// CanDeploy returns if the currently authenticated entity (a machine // agent) can deploy each passed unit entity. func (d *DeployerAPI) CanDeploy(args params.Entities) (params.BoolResults, error) { result := params.BoolResults{ Results: make([]params.BoolResult, len(args.Entities)), } for i, entity := range args.Entities { unitName := state.UnitNameFromTag(entity.Tag) unit, err := d.st.Unit(unitName) if errors.IsNotFoundError(err) { // Unit not found, so no need to continue. continue } else if err != nil { // Any other error get reported back. result.Results[i].Error = common.ServerError(err) continue } machineId, err := unit.AssignedMachineId() if err != nil && !state.IsNotAssigned(err) && !errors.IsNotFoundError(err) { // Any other errors get reported back. result.Results[i].Error = common.ServerError(err) continue } else if err != nil { // This means the unit wasn't assigned to the machine // agent or it wasn't found. In both cases we just return // false so as not to leak information about the existence // of a unit to a potentially rogue machine agent. continue } // Finally, check if we're allowed to access this unit. // When assigned machineId == "" it will fail. result.Results[i].Result = d.authorizer.AuthOwner(state.MachineTag(machineId)) } return result, nil }
func getMaybeSignedImageIdMetadata(baseURLs []string, indexPath string, ic *ImageConstraint, requireSigned bool) ([]*ImageMetadata, error) { var metadata []*ImageMetadata for _, baseURL := range baseURLs { indexRef, err := getIndexWithFormat(baseURL, indexPath, "index:1.0", requireSigned) if err != nil { if errors.IsNotFoundError(err) || errors.IsUnauthorizedError(err) { logger.Warningf("cannot load index %q/%q: %v", baseURL, indexPath, err) continue } return nil, err } metadata, err = indexRef.getLatestImageIdMetadataWithFormat(ic, "products:1.0", requireSigned) if err != nil { if errors.IsNotFoundError(err) { logger.Warningf("skipping index because of error getting latest metadata %q/%q: %v", baseURL, indexPath, err) continue } return nil, err } if len(metadata) > 0 { break } } return metadata, nil }
// watchLoop watches the service's exposed flag for changes. func (sd *serviceData) watchLoop(exposed bool) { defer sd.tomb.Done() w := sd.service.Watch() defer watcher.Stop(w, &sd.tomb) for { select { case <-sd.tomb.Dying(): return case _, ok := <-w.Changes(): if !ok { sd.fw.tomb.Kill(watcher.MustErr(w)) return } if err := sd.service.Refresh(); err != nil { if !errors.IsNotFoundError(err) { sd.fw.tomb.Kill(err) } return } change := sd.service.IsExposed() if change == exposed { continue } exposed = change select { case sd.fw.exposedChange <- &exposedChange{sd, change}: case <-sd.tomb.Dying(): return } } } }
// Login logs in with the provided credentials. // All subsequent requests on the connection will // act as the authenticated user. func (a *srvAdmin) Login(c params.Creds) error { a.mu.Lock() defer a.mu.Unlock() if a.loggedIn { // This can only happen if Login is called concurrently. return errAlreadyLoggedIn } entity0, err := a.root.srv.state.FindEntity(c.AuthTag) if err != nil && !errors.IsNotFoundError(err) { return err } // We return the same error when an entity // does not exist as for a bad password, so that // we don't allow unauthenticated users to find information // about existing entities. entity, ok := entity0.(taggedAuthenticator) if !ok { return common.ErrBadCreds } if err != nil || !entity.PasswordValid(c.Password) { return common.ErrBadCreds } // We have authenticated the user; now choose an appropriate API // to serve to them. newRoot, err := a.apiRootForEntity(entity, c) if err != nil { return err } if err := a.root.rpcConn.Serve(newRoot, serverError); err != nil { return err } return nil }
// restoreRelations reconciles the supplied relation state dirs with the // remote state of the corresponding relations. func (u *Uniter) restoreRelations() error { // TODO(dimitern): Get these from state, not from disk. dirs, err := relation.ReadAllStateDirs(u.relationsDir) if err != nil { return err } for id, dir := range dirs { remove := false rel, err := u.st.Relation(id) if errors.IsNotFoundError(err) { remove = true } else if err != nil { return err } if err = u.addRelation(rel, dir); err == state.ErrCannotEnterScope { remove = true } else if err != nil { return err } if remove { // If the previous execution was interrupted in the process of // joining or departing the relation, the directory will be empty // and the state is sane. if err := dir.Remove(); err != nil { return fmt.Errorf("cannot synchronize relation state: %v", err) } } } return nil }
// Destroy ensures that the service and all its relations will be removed at // some point; if the service has no units, and no relation involving the // service has any units in scope, they are all removed immediately. func (s *Service) Destroy() (err error) { defer utils.ErrorContextf(&err, "cannot destroy service %q", s) defer func() { if err == nil { // This is a white lie; the document might actually be removed. s.doc.Life = Dying } }() svc := &Service{st: s.st, doc: s.doc} for i := 0; i < 5; i++ { switch ops, err := svc.destroyOps(); err { case errRefresh: case errAlreadyDying: return nil case nil: if err := svc.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := svc.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
func (mr *Machiner) SetUp() (api.NotifyWatcher, error) { // Find which machine we're responsible for. m, err := mr.st.Machine(mr.id) if errors.IsNotFoundError(err) { return nil, worker.ErrTerminateAgent } else if err != nil { return nil, err } mr.machine = m // Announce our presence to the world. mr.pinger, err = m.SetAgentAlive() if err != nil { return nil, err } logger.Debugf("agent for machine %q is now alive", m) // Mark the machine as started and log it. if err := m.SetStatus(params.StatusStarted, ""); err != nil { return nil, err } logger.Infof("machine %q started", m) w := m.Watch() return w, nil }
// NextTools returns the next changed tools, waiting // until the tools are actually set. func (w *toolsWaiter) NextTools(c *C) (*state.Tools, error) { for _ = range w.changes { err := w.tooler.Refresh() if err != nil { return nil, fmt.Errorf("cannot refresh: %v", err) } if w.tooler.Life() == state.Dead { return nil, fmt.Errorf("object is dead") } tools, err := w.tooler.AgentTools() if errors.IsNotFoundError(err) { c.Logf("tools not yet set") continue } if err != nil { return nil, err } changed := w.lastTools == nil || *tools != *w.lastTools w.lastTools = tools if changed { return tools, nil } c.Logf("found same tools") } return nil, fmt.Errorf("watcher closed prematurely: %v", w.watcher.Err()) }
func (*EnvironSuite) TestStateInfoFailsIfNoStateInstances(c *C) { env := makeEnviron(c) cleanup := setDummyStorage(c, env) defer cleanup() _, _, err := env.StateInfo() c.Check(errors.IsNotFoundError(err), Equals, true) }
func (task *provisionerTask) populateMachineMaps(ids []string) error { task.instances = make(map[instance.Id]instance.Instance) instances, err := task.broker.AllInstances() if err != nil { logger.Errorf("failed to get all instances from broker: %v", err) return err } for _, i := range instances { task.instances[i.Id()] = i } // Update the machines map with new data for each of the machines in the // change list. // TODO(thumper): update for API server later to get all machines in one go. for _, id := range ids { machine, err := task.machineGetter.Machine(id) switch { case errors.IsNotFoundError(err): logger.Debugf("machine %q not found in state", id) delete(task.machines, id) case err == nil: task.machines[id] = machine default: logger.Errorf("failed to get machine: %v", err) } } return nil }
// startMachine creates a new data value for tracking details of the // machine and starts watching the machine for units added or removed. func (fw *Firewaller) startMachine(id string) error { machined := &machineData{ fw: fw, id: id, unitds: make(map[string]*unitData), ports: make([]instance.Port, 0), } m, err := machined.machine() if errors.IsNotFoundError(err) { return nil } else if err != nil { return fmt.Errorf("worker/firewaller: cannot watch machine units: %v", err) } unitw := m.WatchUnits() select { case <-fw.tomb.Dying(): stop("units watcher", unitw) return tomb.ErrDying case change, ok := <-unitw.Changes(): if !ok { stop("units watcher", unitw) return watcher.MustErr(unitw) } fw.machineds[id] = machined err = fw.unitsChanged(&unitsChange{machined, change}) if err != nil { stop("units watcher", unitw) return fmt.Errorf("worker/firewaller: cannot respond to units changes for machine %q: %v", id, err) } } go machined.watchLoop(unitw) return nil }
func (m *backingMachine) updated(st *State, store *multiwatcher.Store, id interface{}) error { info := ¶ms.MachineInfo{ Id: m.Id, } oldInfo := store.Get(info.EntityId()) if oldInfo == nil { // We're adding the entry for the first time, // so fetch the associated machine status. sdoc, err := getStatus(st, machineGlobalKey(m.Id)) if err != nil { return err } info.Status = sdoc.Status info.StatusInfo = sdoc.StatusInfo } else { // The entry already exists, so preserve the current status and instance id. oldInfo := oldInfo.(*params.MachineInfo) info.Status = oldInfo.Status info.StatusInfo = oldInfo.StatusInfo info.InstanceId = oldInfo.InstanceId } // If the machine is been provisioned, fetch the instance id if required. if m.Nonce != "" && info.InstanceId == "" { instanceData, err := getInstanceData(st, m.Id) if err == nil { info.InstanceId = string(instanceData.InstanceId) } else if !errors.IsNotFoundError(err) { return err } } store.Update(info) return nil }
// initVersions collects state relevant to an upgrade decision. The returned // agent and client versions, and the list of currently available tools, will // always be accurate; the chosen version, and the flag indicating development // mode, may remain blank until uploadTools or validate is called. func (c *UpgradeJujuCommand) initVersions(cfg *config.Config, env environs.Environ) (*upgradeVersions, error) { agent, ok := cfg.AgentVersion() if !ok { // Can't happen. In theory. return nil, fmt.Errorf("incomplete environment configuration") } if c.Version == agent { return nil, errUpToDate } client := version.Current.Number available, err := environs.FindAvailableTools(env, client.Major) if err != nil { if !errors.IsNotFoundError(err) { return nil, err } if !c.UploadTools { if c.Version == version.Zero { return nil, errUpToDate } return nil, err } } dev := c.Development || cfg.Development() || agent.IsDev() || client.IsDev() return &upgradeVersions{ dev: dev, agent: agent, client: client, chosen: c.Version, tools: available, }, nil }
// watchLoop watches the unit for port changes. func (ud *unitData) watchLoop(latestPorts []instance.Port) { defer ud.tomb.Done() w := ud.unit.Watch() defer watcher.Stop(w, &ud.tomb) for { select { case <-ud.tomb.Dying(): return case _, ok := <-w.Changes(): if !ok { ud.fw.tomb.Kill(watcher.MustErr(w)) return } if err := ud.unit.Refresh(); err != nil { if !errors.IsNotFoundError(err) { ud.fw.tomb.Kill(err) } return } change := ud.unit.OpenedPorts() if samePorts(change, latestPorts) { continue } latestPorts = append(latestPorts[:0], change...) select { case ud.fw.portsChange <- &portsChange{ud, change}: case <-ud.tomb.Dying(): return } } } }
// Destroy ensures that the relation will be removed at some point; if no units // are currently in scope, it will be removed immediately. func (r *Relation) Destroy() (err error) { defer utils.ErrorContextf(&err, "cannot destroy relation %q", r) if len(r.doc.Endpoints) == 1 && r.doc.Endpoints[0].Role == charm.RolePeer { return fmt.Errorf("is a peer relation") } defer func() { if err == nil { // This is a white lie; the document might actually be removed. r.doc.Life = Dying } }() rel := &Relation{r.st, r.doc} // In this context, aborted transactions indicate that the number of units // in scope have changed between 0 and not-0. The chances of 5 successive // attempts each hitting this change -- which is itself an unlikely one -- // are considered to be extremely small. for attempt := 0; attempt < 5; attempt++ { ops, _, err := rel.destroyOps("") if err == errAlreadyDying { return nil } else if err != nil { return err } if err := rel.st.runTransaction(ops); err != txn.ErrAborted { return err } if err := rel.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// unitChanged responds to changes in the unit. func (f *filter) unitChanged() error { if err := f.unit.Refresh(); err != nil { if errors.IsNotFoundError(err) { return worker.ErrTerminateAgent } return err } if f.life != f.unit.Life() { switch f.life = f.unit.Life(); f.life { case state.Dying: log.Noticef("worker/uniter/filter: unit is dying") close(f.outUnitDying) f.outUpgrade = nil case state.Dead: log.Noticef("worker/uniter/filter: unit is dead") return worker.ErrTerminateAgent } } if resolved := f.unit.Resolved(); resolved != f.resolved { f.resolved = resolved if f.resolved != state.ResolvedNone { f.outResolved = f.outResolvedOn } } return nil }
func (w *settingsWatcher) loop(key string) (err error) { ch := make(chan watcher.Change) revno := int64(-1) settings, err := readSettings(w.st, key) if err == nil { revno = settings.txnRevno } else if !errors.IsNotFoundError(err) { return err } w.st.watcher.Watch(w.st.settings.Name, key, revno, ch) defer w.st.watcher.Unwatch(w.st.settings.Name, key, ch) out := w.out if revno == -1 { out = nil } for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case <-ch: settings, err = readSettings(w.st, key) if err != nil { return err } out = w.out case out <- settings: out = nil } } return nil }
// Remove removes the unit from state, and may remove its service as well, if // the service is Dying and no other references to it exist. It will fail if // the unit is not Dead. func (u *Unit) Remove() (err error) { defer utils.ErrorContextf(&err, "cannot remove unit %q", u) if u.doc.Life != Dead { return stderrors.New("unit is not dead") } unit := &Unit{st: u.st, doc: u.doc} for i := 0; i < 5; i++ { switch ops, err := unit.removeOps(isDeadDoc); err { case errRefresh: case errAlreadyRemoved: return nil case nil: if err := u.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := unit.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// Destroy, when called on a Alive unit, advances its lifecycle as far as // possible; it otherwise has no effect. In most situations, the unit's // life is just set to Dying; but if a principal unit that is not assigned // to a provisioned machine is Destroyed, it will be removed from state // directly. func (u *Unit) Destroy() (err error) { defer func() { if err == nil { // This is a white lie; the document might actually be removed. u.doc.Life = Dying } }() unit := &Unit{st: u.st, doc: u.doc} for i := 0; i < 5; i++ { switch ops, err := unit.destroyOps(); err { case errRefresh: case errAlreadyDying: return nil case nil: if err := unit.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := unit.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
func (suite *StateSuite) TestLoadStateReturnsNotFoundErrorForMissingFile(c *C) { storage, cleanup := makeDummyStorage(c) defer cleanup() _, err := environs.LoadState(storage) c.Check(errors.IsNotFoundError(err), Equals, true) }
func (s *deployerSuite) TestRemove(c *gc.C) { c.Assert(s.principal0.Life(), gc.Equals, state.Alive) c.Assert(s.subordinate0.Life(), gc.Equals, state.Alive) // Try removing alive units - should fail. args := params.Entities{Entities: []params.Entity{ {Tag: "unit-mysql-0"}, {Tag: "unit-mysql-1"}, {Tag: "unit-logging-0"}, {Tag: "unit-fake-42"}, }} result, err := s.deployer.Remove(args) c.Assert(err, gc.IsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Errors: []*params.Error{ {Message: `cannot remove entity "unit-mysql-0": still alive`}, apiservertesting.ErrUnauthorized, {Message: `cannot remove entity "unit-logging-0": still alive`}, apiservertesting.ErrUnauthorized, }, }) err = s.principal0.Refresh() c.Assert(err, gc.IsNil) c.Assert(s.principal0.Life(), gc.Equals, state.Alive) err = s.subordinate0.Refresh() c.Assert(err, gc.IsNil) c.Assert(s.subordinate0.Life(), gc.Equals, state.Alive) // Now make the subordinate dead and try again. err = s.subordinate0.EnsureDead() c.Assert(err, gc.IsNil) err = s.subordinate0.Refresh() c.Assert(err, gc.IsNil) c.Assert(s.subordinate0.Life(), gc.Equals, state.Dead) args = params.Entities{ Entities: []params.Entity{{Tag: "unit-logging-0"}}, } result, err = s.deployer.Remove(args) c.Assert(err, gc.IsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Errors: []*params.Error{nil}, }) err = s.subordinate0.Refresh() c.Assert(errors.IsNotFoundError(err), gc.Equals, true) // Make sure the subordinate is detected as removed. result, err = s.deployer.Remove(args) c.Assert(err, gc.IsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Errors: []*params.Error{ apiservertesting.ErrUnauthorized, }, }) }
func (*StorageSuite) TestGetReturnsNotFoundIf404(c *C) { container := "container" filename := "blobname" response := makeResponse("not found", http.StatusNotFound) azStorage, _ := makeAzureStorage(response, container, "account") _, err := azStorage.Get(filename) c.Assert(err, NotNil) c.Check(errors.IsNotFoundError(err), Equals, true) }
func (s addSubordinateRelation) step(c *C, ctx *context) { if _, err := ctx.st.Service("logging"); errors.IsNotFoundError(err) { _, err := ctx.st.AddService("logging", ctx.s.AddTestingCharm(c, "logging")) c.Assert(err, IsNil) } eps, err := ctx.st.InferEndpoints([]string{"logging", "u:" + s.ifce}) c.Assert(err, IsNil) _, err = ctx.st.AddRelation(eps...) c.Assert(err, IsNil) }
func isRemoved(st *state.State, name string) func(*C) bool { return func(c *C) bool { _, err := st.Unit(name) if errors.IsNotFoundError(err) { return true } c.Assert(err, IsNil) return false } }
// constraints is a helper function to return a unit's deployment constraints. func (u *Unit) constraints() (*constraints.Value, error) { cons, err := readConstraints(u.st, u.globalKey()) if errors.IsNotFoundError(err) { // Lack of constraints indicates lack of unit. return nil, errors.NotFoundf("unit") } else if err != nil { return nil, err } return &cons, nil }
// removeOps returns the operations necessary to remove the unit, assuming // the supplied asserts apply to the unit document. func (u *Unit) removeOps(asserts D) ([]txn.Op, error) { svc, err := u.st.Service(u.doc.Service) if errors.IsNotFoundError(err) { // If the service has been removed, the unit must already have been. return nil, errAlreadyRemoved } else if err != nil { return nil, err } return svc.removeUnitOps(u, asserts) }
// waitRemoved waits for the supplied machine to be removed from state. func (s *CommonProvisionerSuite) waitRemoved(c *C, m *state.Machine) { s.waitMachine(c, m, func() bool { err := m.Refresh() if errors.IsNotFoundError(err) { return true } c.Assert(err, IsNil) c.Logf("machine %v is still %s", m, m.Life()) return false }) }
func getMaybeSignedImageIdMetadata(baseURLs []string, indexPath string, ic *ImageConstraint, requireSigned bool) ([]*ImageMetadata, error) { var metadata []*ImageMetadata for _, baseURL := range baseURLs { indexRef, err := getIndexWithFormat(baseURL, indexPath, "index:1.0", requireSigned) if err != nil { if errors.IsNotFoundError(err) || errors.IsUnauthorizedError(err) { continue } return nil, err } metadata, err = indexRef.getLatestImageIdMetadataWithFormat(ic, "products:1.0", requireSigned) if err != nil { if errors.IsNotFoundError(err) { continue } return nil, err } if len(metadata) > 0 { break } } return metadata, nil }
// unitsChanged responds to changes to the assigned units. func (fw *Firewaller) unitsChanged(change *unitsChange) error { changed := []*unitData{} for _, name := range change.units { unit, err := fw.st.Unit(name) if err != nil && !errors.IsNotFoundError(err) { return err } var machineId string if unit != nil { machineId, err = unit.AssignedMachineId() if errors.IsNotFoundError(err) { continue } else if err != nil && !state.IsNotAssigned(err) { return err } } if unitd, known := fw.unitds[name]; known { knownMachineId := fw.unitds[name].machined.id if unit == nil || unit.Life() == state.Dead || machineId != knownMachineId { fw.forgetUnit(unitd) changed = append(changed, unitd) log.Debugf("worker/firewaller: stopped watching unit %s", name) } } else if unit != nil && unit.Life() != state.Dead && fw.machineds[machineId] != nil { err = fw.startUnit(unit, machineId) if err != nil { return err } changed = append(changed, fw.unitds[name]) log.Debugf("worker/firewaller: started watching unit %s", name) } } if err := fw.flushUnits(changed); err != nil { return fmt.Errorf("cannot change firewall ports: %v", err) } return nil }
// reconcileInstances compares the initially started watcher for machines, // units and services with the opened and closed ports of the instances and // opens and closes the appropriate ports for each instance. func (fw *Firewaller) reconcileInstances() error { for _, machined := range fw.machineds { m, err := machined.machine() if errors.IsNotFoundError(err) { if err := fw.forgetMachine(machined); err != nil { return err } continue } else if err != nil { return err } instanceId, err := m.InstanceId() if err != nil { return err } instances, err := fw.environ.Instances([]instance.Id{instanceId}) if err == environs.ErrNoInstances { return nil } else if err != nil { return err } initialPorts, err := instances[0].Ports(machined.id) if err != nil { return err } // Check which ports to open or to close. toOpen := Diff(machined.ports, initialPorts) toClose := Diff(initialPorts, machined.ports) if len(toOpen) > 0 { log.Infof("worker/firewaller: opening instance ports %v for machine %s", toOpen, machined.id) if err := instances[0].OpenPorts(machined.id, toOpen); err != nil { // TODO(mue) Add local retry logic. return err } state.SortPorts(toOpen) } if len(toClose) > 0 { log.Infof("worker/firewaller: closing instance ports %v for machine %s", toClose, machined.id) if err := instances[0].ClosePorts(machined.id, toClose); err != nil { // TODO(mue) Add local retry logic. return err } state.SortPorts(toClose) } } return nil }