func (mr *Machiner) SetUp() (watcher.NotifyWatcher, error) { // Find which machine we're responsible for. m, err := mr.config.MachineAccessor.Machine(mr.config.Tag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { return nil, worker.ErrTerminateAgent } else if err != nil { return nil, errors.Trace(err) } mr.machine = m if mr.config.ClearMachineAddressesOnStart { logger.Debugf("machine addresses ignored on start - resetting machine addresses") if err := m.SetMachineAddresses(nil); err != nil { return nil, errors.Annotate(err, "reseting machine addresses") } } else { // Set the addresses in state to the host's addresses. if err := setMachineAddresses(mr.config.Tag, m); err != nil { return nil, errors.Annotate(err, "setting machine addresses") } } // Mark the machine as started and log it. if err := m.SetStatus(status.Started, "", nil); err != nil { return nil, errors.Annotatef(err, "%s failed to set status started", mr.config.Tag) } logger.Infof("%q started", mr.config.Tag) return m.Watch() }
// findUnknownInstances finds instances which are not associated with a machine. func (task *provisionerTask) findUnknownInstances(stopping []instance.Instance) ([]instance.Instance, error) { // Make a copy of the instances we know about. instances := make(map[instance.Id]instance.Instance) for k, v := range task.instances { instances[k] = v } for _, m := range task.machines { instId, err := m.InstanceId() switch { case err == nil: delete(instances, instId) case params.IsCodeNotProvisioned(err): case params.IsCodeNotFoundOrCodeUnauthorized(err): default: return nil, err } } // Now remove all those instances that we are stopping already as we // know about those and don't want to include them in the unknown list. for _, inst := range stopping { delete(instances, inst.Id()) } var unknown []instance.Instance for _, inst := range instances { unknown = append(unknown, inst) } return unknown, nil }
func (mr *Machiner) Handle(_ <-chan struct{}) error { if err := mr.machine.Refresh(); params.IsCodeNotFoundOrCodeUnauthorized(err) { // NOTE(axw) we can distinguish between NotFound and CodeUnauthorized, // so we could call NotifyMachineDead here in case the agent failed to // call NotifyMachineDead directly after setting the machine Dead in // the first place. We're not doing that to be cautious: the machine // could be missing from state due to invalid global state. return worker.ErrTerminateAgent } else if err != nil { return err } life := mr.machine.Life() if life == params.Alive { observedConfig, err := getObservedNetworkConfig(networkingcommon.DefaultNetworkConfigSource()) if err != nil { return errors.Annotate(err, "cannot discover observed network config") } else if len(observedConfig) == 0 { logger.Warningf("not updating network config: no observed config found to update") } if len(observedConfig) > 0 { if err := mr.machine.SetObservedNetworkConfig(observedConfig); err != nil { return errors.Annotate(err, "cannot update observed network config") } } logger.Debugf("observed network config updated") return nil } logger.Debugf("%q is now %s", mr.config.Tag, life) if err := mr.machine.SetStatus(status.Stopped, "", nil); err != nil { return errors.Annotatef(err, "%s failed to set status stopped", mr.config.Tag) } // Attempt to mark the machine Dead. If the machine still has units // assigned, or storage attached, this will fail with // CodeHasAssignedUnits or CodeMachineHasAttachedStorage respectively. // Once units or storage are removed, the watcher will trigger again // and we'll reattempt. if err := mr.machine.EnsureDead(); err != nil { if params.IsCodeHasAssignedUnits(err) { return nil } if params.IsCodeMachineHasAttachedStorage(err) { logger.Tracef("machine still has storage attached") return nil } return errors.Annotatef(err, "%s failed to set machine to dead", mr.config.Tag) } // Report on the machine's death. It is important that we do this after // the machine is Dead, because this is the mechanism we use to clean up // the machine (uninstall). If we were to report before marking the machine // as Dead, then we would risk uninstalling prematurely. if mr.config.NotifyMachineDead != nil { if err := mr.config.NotifyMachineDead(); err != nil { return errors.Annotate(err, "reporting machine death") } } return worker.ErrTerminateAgent }
// NewActionRunner exists to satisfy the Factory interface. func (f *factory) NewActionRunner(actionId string) (Runner, error) { ch, err := getCharm(f.paths.GetCharmDir()) if err != nil { return nil, errors.Trace(err) } ok := names.IsValidAction(actionId) if !ok { return nil, &badActionError{actionId, "not valid actionId"} } tag := names.NewActionTag(actionId) action, err := f.state.Action(tag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { return nil, ErrActionNotAvailable } else if params.IsCodeActionNotAvailable(err) { return nil, ErrActionNotAvailable } else if err != nil { return nil, errors.Trace(err) } name := action.Name() spec, ok := ch.Actions().ActionSpecs[name] if !ok { return nil, &badActionError{name, "not defined"} } params := action.Params() if err := spec.ValidateParams(params); err != nil { return nil, &badActionError{name, err.Error()} } actionData := newActionData(name, &tag, params) ctx, err := f.contextFactory.ActionContext(actionData) runner := NewRunner(ctx, f.paths) return runner, nil }
// changed ensures that the named unit is deployed, recalled, or removed, as // indicated by its state. func (d *Deployer) changed(unitName string) error { unitTag := names.NewUnitTag(unitName) // Determine unit life state, and whether we're responsible for it. logger.Infof("checking unit %q", unitName) var life params.Life unit, err := d.st.Unit(unitTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { life = params.Dead } else if err != nil { return err } else { life = unit.Life() } // Deployed units must be removed if they're Dead, or if the deployer // is no longer responsible for them. if d.deployed.Contains(unitName) { if life == params.Dead { if err := d.recall(unitName); err != nil { return err } } } // The only units that should be deployed are those that (1) we are responsible // for and (2) are Alive -- if we're responsible for a Dying unit that is not // yet deployed, we should remove it immediately rather than undergo the hassle // of deploying a unit agent purely so it can set itself to Dead. if !d.deployed.Contains(unitName) { if life == params.Alive { return d.deploy(unit) } else if unit != nil { return d.remove(unit) } } return nil }
func (mr *Machiner) Handle(_ <-chan struct{}) error { if err := mr.machine.Refresh(); params.IsCodeNotFoundOrCodeUnauthorized(err) { return worker.ErrTerminateAgent } else if err != nil { return err } life := mr.machine.Life() if life == params.Alive { return nil } logger.Debugf("%q is now %s", mr.tag, life) if err := mr.machine.SetStatus(params.StatusStopped, "", nil); err != nil { return errors.Annotatef(err, "%s failed to set status stopped", mr.tag) } // Attempt to mark the machine Dead. If the machine still has units // assigned, or storage attached, this will fail with // CodeHasAssignedUnits or CodeMachineHasAttachedStorage respectively. // Once units or storage are removed, the watcher will trigger again // and we'll reattempt. if err := mr.machine.EnsureDead(); err != nil { if params.IsCodeHasAssignedUnits(err) { return nil } if params.IsCodeMachineHasAttachedStorage(err) { logger.Tracef("machine still has storage attached") return nil } return errors.Annotatef(err, "%s failed to set machine to dead", mr.tag) } return worker.ErrTerminateAgent }
// populateMachineMaps updates task.instances. Also updates // task.machines map if a list of IDs is given. func (task *provisionerTask) populateMachineMaps(ids []string) error { task.instances = make(map[instance.Id]instance.Instance) instances, err := task.broker.AllInstances() if err != nil { return errors.Annotate(err, "failed to get all instances from broker") } for _, i := range instances { task.instances[i.Id()] = i } // Update the machines map with new data for each of the machines in the // change list. // TODO(thumper): update for API server later to get all machines in one go. for _, id := range ids { machineTag := names.NewMachineTag(id) machine, err := task.machineGetter.Machine(machineTag) switch { case params.IsCodeNotFoundOrCodeUnauthorized(err): logger.Debugf("machine %q not found in state", id) delete(task.machines, id) case err == nil: task.machines[id] = machine default: return errors.Annotatef(err, "failed to get machine %v", id) } } return nil }
// SetPassword is part of the ConnFacade interface. func (facade *connFacade) SetPassword(entity names.Tag, password string) error { var results params.ErrorResults args := params.EntityPasswords{ Changes: []params.EntityPassword{{ Tag: entity.String(), Password: password, }}, } err := facade.caller.FacadeCall("SetPasswords", args, &results) if err != nil { return errors.Trace(err) } if len(results.Results) != 1 { return errors.Errorf("expected 1 result, got %d", len(results.Results)) } if err := results.Results[0].Error; err != nil { if params.IsCodeDead(err) { return ErrDenied } else if params.IsCodeNotFoundOrCodeUnauthorized(err) { return ErrDenied } return errors.Trace(err) } return nil }
// Life is part of the ConnFacade interface. func (facade *connFacade) Life(entity names.Tag) (Life, error) { var results params.AgentGetEntitiesResults args := params.Entities{ Entities: []params.Entity{{Tag: entity.String()}}, } err := facade.caller.FacadeCall("GetEntities", args, &results) if err != nil { return "", errors.Trace(err) } if len(results.Entities) != 1 { return "", errors.Errorf("expected 1 result, got %d", len(results.Entities)) } if err := results.Entities[0].Error; err != nil { if params.IsCodeNotFoundOrCodeUnauthorized(err) { return "", ErrDenied } return "", errors.Trace(err) } life := Life(results.Entities[0].Life) switch life { case Alive, Dying, Dead: return life, nil } return "", errors.Errorf("unknown life value %q", life) }
// FailAction is part of the operation.Callbacks interface. func (opc *operationCallbacks) FailAction(actionId, message string) error { if !names.IsValidAction(actionId) { return errors.Errorf("invalid action id %q", actionId) } tag := names.NewActionTag(actionId) err := opc.u.st.ActionFinish(tag, params.ActionFailed, nil, message) if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = nil } return err }
func (w *RemoteStateWatcher) init(unitTag names.UnitTag) (err error) { // TODO(dfc) named return value is a time bomb // TODO(axw) move this logic. defer func() { if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = worker.ErrTerminateAgent } }() if w.unit, err = w.st.Unit(unitTag); err != nil { return err } w.service, err = w.unit.Service() if err != nil { return err } return nil }
func (w *RemoteStateWatcher) setUp(unitTag names.UnitTag) (err error) { // TODO(dfc) named return value is a time bomb // TODO(axw) move this logic. defer func() { cause := errors.Cause(err) if params.IsCodeNotFoundOrCodeUnauthorized(cause) { err = worker.ErrTerminateAgent } }() if w.unit, err = w.st.Unit(unitTag); err != nil { return errors.Trace(err) } w.service, err = w.unit.Application() if err != nil { return errors.Trace(err) } return nil }
func (mr *Machiner) Handle() error { if err := mr.machine.Refresh(); params.IsCodeNotFoundOrCodeUnauthorized(err) { return worker.ErrTerminateAgent } else if err != nil { return err } if mr.machine.Life() == params.Alive { return nil } logger.Debugf("%q is now %s", mr.tag, mr.machine.Life()) if err := mr.machine.SetStatus(params.StatusStopped, "", nil); err != nil { return fmt.Errorf("%s failed to set status stopped: %v", mr.tag, err) } // If the machine is Dying, it has no units, // and can be safely set to Dead. if err := mr.machine.EnsureDead(); err != nil { return fmt.Errorf("%s failed to set machine to dead: %v", mr.tag, err) } return worker.ErrTerminateAgent }
// relationsChanged responds to service relation changes. func (w *RemoteStateWatcher) relationsChanged(keys []string) error { w.mu.Lock() defer w.mu.Unlock() for _, key := range keys { relationTag := names.NewRelationTag(key) rel, err := w.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. if ruw, ok := w.relations[relationTag]; ok { worker.Stop(ruw) delete(w.relations, relationTag) delete(w.current.Relations, ruw.relationId) } } else if err != nil { return errors.Trace(err) } else { if _, ok := w.relations[relationTag]; ok { relationSnapshot := w.current.Relations[rel.Id()] relationSnapshot.Life = rel.Life() w.current.Relations[rel.Id()] = relationSnapshot continue } ruw, err := w.st.WatchRelationUnits(relationTag, w.unit.Tag()) if err != nil { return errors.Trace(err) } // Because of the delay before handing off responsibility to // newRelationUnitsWatcher below, add to our own catacomb to // ensure errors get picked up if they happen. if err := w.catacomb.Add(ruw); err != nil { return errors.Trace(err) } if err := w.watchRelationUnits(rel, relationTag, ruw); err != nil { return errors.Trace(err) } } } return nil }
func (mr *Machiner) SetUp() (watcher.NotifyWatcher, error) { // Find which machine we're responsible for. m, err := mr.st.Machine(mr.tag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { return nil, worker.ErrTerminateAgent } else if err != nil { return nil, err } mr.machine = m // Set the addresses in state to the host's addresses. if err := setMachineAddresses(mr.tag, m); err != nil { return nil, err } // Mark the machine as started and log it. if err := m.SetStatus(params.StatusStarted, "", nil); err != nil { return nil, fmt.Errorf("%s failed to set status started: %v", mr.tag, err) } logger.Infof("%q started", mr.tag) return m.Watch() }
// relationsChanged responds to service relation changes. func (w *RemoteStateWatcher) relationsChanged(keys []string) error { w.mu.Lock() defer w.mu.Unlock() for _, key := range keys { relationTag := names.NewRelationTag(key) rel, err := w.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. if ruw, ok := w.relations[relationTag]; ok { if err := ruw.Stop(); err != nil { return errors.Trace(err) } delete(w.relations, relationTag) delete(w.current.Relations, ruw.relationId) } } else if err != nil { return err } else { if _, ok := w.relations[relationTag]; ok { relationSnapshot := w.current.Relations[rel.Id()] relationSnapshot.Life = rel.Life() w.current.Relations[rel.Id()] = relationSnapshot continue } in, err := w.st.WatchRelationUnits(relationTag, w.unit.Tag()) if err != nil { return errors.Trace(err) } if err := w.watchRelationUnits(rel, relationTag, in); err != nil { watcher.Stop(in, &w.tomb) return errors.Trace(err) } } } return nil }
func (f *filter) loop(unitTag names.UnitTag) (err error) { // TODO(dfc) named return value is a time bomb defer func() { if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = worker.ErrTerminateAgent } }() if f.unit, err = f.st.Unit(unitTag); err != nil { return err } if err = f.unitChanged(); err != nil { return err } if err = f.meterStatusChanged(); err != nil { return err } f.service, err = f.unit.Service() if err != nil { return err } if err = f.serviceChanged(); err != nil { return err } unitw, err := f.unit.Watch() if err != nil { return err } defer f.maybeStopWatcher(unitw) servicew, err := f.service.Watch() if err != nil { return err } defer f.maybeStopWatcher(servicew) // configw and relationsw can get restarted, so we need to use // their eventual values in the defer calls. var configw apiwatcher.NotifyWatcher var configChanges <-chan struct{} curl, err := f.unit.CharmURL() if err == nil { configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() f.upgradeFrom.url = curl } else if err != uniter.ErrNoCharmURLSet { filterLogger.Errorf("unit charm: %v", err) return err } defer f.maybeStopWatcher(configw) actionsw, err := f.unit.WatchActionNotifications() if err != nil { return err } f.actionsPending = make([]string, 0) defer f.maybeStopWatcher(actionsw) relationsw, err := f.service.WatchRelations() if err != nil { return err } defer f.maybeStopWatcher(relationsw) meterStatusw, err := f.unit.WatchMeterStatus() if err != nil { return err } defer f.maybeStopWatcher(meterStatusw) addressesw, err := f.unit.WatchAddresses() if err != nil { return err } defer watcher.Stop(addressesw, &f.tomb) storagew, err := f.unit.WatchStorage() if err != nil { return err } defer watcher.Stop(storagew, &f.tomb) leaderSettingsw, err := f.st.LeadershipSettings.WatchLeadershipSettings(f.service.Tag().Id()) if err != nil { return err } defer watcher.Stop(leaderSettingsw, &f.tomb) // Ignore external requests for leader settings behaviour until we see the first change. var discardLeaderSettings <-chan struct{} var wantLeaderSettings <-chan bool // By default we send all leaderSettings onwards. sendLeaderSettings := true // Config events cannot be meaningfully discarded until one is available; // once we receive the initial config and address changes, we unblock // discard requests by setting this channel to its namesake on f. var discardConfig chan struct{} var seenConfigChange bool var seenAddressChange bool maybePrepareConfigEvent := func() { if !seenAddressChange { filterLogger.Debugf("no address change seen yet, skipping config event") return } if !seenConfigChange { filterLogger.Debugf("no config change seen yet, skipping config event") return } filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn discardConfig = f.discardConfig } for { var ok bool select { case <-f.tomb.Dying(): return tomb.ErrDying // Handle watcher changes. case _, ok = <-unitw.Changes(): filterLogger.Debugf("got unit change") if !ok { return watcher.EnsureErr(unitw) } if err = f.unitChanged(); err != nil { return err } case _, ok = <-servicew.Changes(): filterLogger.Debugf("got service change") if !ok { return watcher.EnsureErr(servicew) } if err = f.serviceChanged(); err != nil { return err } case _, ok = <-configChanges: filterLogger.Debugf("got config change") if !ok { return watcher.EnsureErr(configw) } seenConfigChange = true maybePrepareConfigEvent() case _, ok = <-addressesw.Changes(): filterLogger.Debugf("got address change") if !ok { return watcher.EnsureErr(addressesw) } seenAddressChange = true maybePrepareConfigEvent() case _, ok = <-meterStatusw.Changes(): filterLogger.Debugf("got meter status change") if !ok { return watcher.EnsureErr(meterStatusw) } if err = f.meterStatusChanged(); err != nil { return errors.Trace(err) } case ids, ok := <-actionsw.Changes(): filterLogger.Debugf("got %d actions", len(ids)) if !ok { return watcher.EnsureErr(actionsw) } f.actionsPending = append(f.actionsPending, ids...) f.nextAction = f.getNextAction() case keys, ok := <-relationsw.Changes(): filterLogger.Debugf("got relations change") if !ok { return watcher.EnsureErr(relationsw) } var ids []int for _, key := range keys { relationTag := names.NewRelationTag(key) rel, err := f.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. } else if err != nil { return err } else { ids = append(ids, rel.Id()) } } f.relationsChanged(ids) case ids, ok := <-storagew.Changes(): filterLogger.Debugf("got storage change") if !ok { return watcher.EnsureErr(storagew) } tags := make([]names.StorageTag, len(ids)) for i, id := range ids { tag := names.NewStorageTag(id) tags[i] = tag } f.storageChanged(tags) case _, ok = <-leaderSettingsw.Changes(): filterLogger.Debugf("got leader settings change: ok=%t", ok) if !ok { return watcher.EnsureErr(leaderSettingsw) } if sendLeaderSettings { // only send the leader settings changed event // if it hasn't been explicitly disabled f.outLeaderSettings = f.outLeaderSettingsOn } else { filterLogger.Debugf("not sending leader settings change (want=false)") } discardLeaderSettings = f.discardLeaderSettings wantLeaderSettings = f.wantLeaderSettings // Send events on active out chans. case f.outUpgrade <- f.upgrade: filterLogger.Debugf("sent upgrade event") f.outUpgrade = nil case f.outResolved <- f.resolved: filterLogger.Debugf("sent resolved event") f.outResolved = nil case f.outConfig <- nothing: filterLogger.Debugf("sent config event") f.outConfig = nil case f.outLeaderSettings <- nothing: filterLogger.Debugf("sent leader settings event") f.outLeaderSettings = nil case f.outAction <- f.nextAction: f.nextAction = f.getNextAction() filterLogger.Debugf("sent action event") case f.outRelations <- f.relations: filterLogger.Debugf("sent relations event") f.outRelations = nil f.relations = nil case f.outMeterStatus <- nothing: filterLogger.Debugf("sent meter status change event") f.outMeterStatus = nil case f.outStorage <- f.storage: filterLogger.Debugf("sent storage event") f.outStorage = nil f.storage = nil // Handle explicit requests. case curl := <-f.setCharm: filterLogger.Debugf("changing charm to %q", curl) // We need to restart the config watcher after setting the // charm, because service config settings are distinct for // different service charms. if configw != nil { if err := configw.Stop(); err != nil { return err } } if err := f.unit.SetCharmURL(curl); err != nil { filterLogger.Debugf("failed setting charm url %q: %v", curl, err) return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didSetCharm <- nothing: } configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() // Restart the relations watcher. if err := relationsw.Stop(); err != nil { return err } relationsw, err = f.service.WatchRelations() if err != nil { return err } f.upgradeFrom.url = curl if err = f.upgradeChanged(); err != nil { return err } case force := <-f.wantForcedUpgrade: filterLogger.Debugf("want forced upgrade %v", force) f.upgradeFrom.force = force if err = f.upgradeChanged(); err != nil { return err } case <-f.wantResolved: filterLogger.Debugf("want resolved event") if f.resolved != params.ResolvedNone { f.outResolved = f.outResolvedOn } case sendEvents := <-wantLeaderSettings: filterLogger.Debugf("want leader settings event: %t", sendEvents) sendLeaderSettings = sendEvents if sendEvents { // go ahead and send an event right now, // they're waiting for us f.outLeaderSettings = f.outLeaderSettingsOn } else { // Make sure we don't have a pending event f.outLeaderSettings = nil } case <-f.clearResolved: filterLogger.Debugf("resolved event handled") f.outResolved = nil if err := f.unit.ClearResolved(); err != nil { return err } if err := f.unitChanged(); err != nil { return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didClearResolved <- nothing: } case <-discardConfig: filterLogger.Debugf("discarded config event") f.outConfig = nil case <-discardLeaderSettings: filterLogger.Debugf("discarded leader settings event") f.outLeaderSettings = nil } } }
// Update is part of the Relations interface. func (r *relations) Update(ids []int) error { for _, id := range ids { if relationer, found := r.relationers[id]; found { rel := relationer.ru.Relation() if err := rel.Refresh(); err != nil { return errors.Annotatef(err, "cannot update relation %q", rel) } if rel.Life() == params.Dying { if err := r.setDying(id); err != nil { return errors.Trace(err) } } continue } // Relations that are not alive are simply skipped, because they // were not previously known anyway. rel, err := r.st.RelationById(id) if err != nil { if params.IsCodeNotFoundOrCodeUnauthorized(err) { continue } return errors.Trace(err) } if rel.Life() != params.Alive { continue } // Make sure we ignore relations not implemented by the unit's charm. ch, err := corecharm.ReadCharmDir(r.charmDir) if err != nil { return errors.Trace(err) } if ep, err := rel.Endpoint(); err != nil { return errors.Trace(err) } else if !ep.ImplementedBy(ch) { logger.Warningf("skipping relation with unknown endpoint %q", ep.Name) continue } dir, err := relation.ReadStateDir(r.relationsDir, id) if err != nil { return errors.Trace(err) } err = r.add(rel, dir) if err == nil { r.relationers[id].StartHooks() continue } e := dir.Remove() if !params.IsCodeCannotEnterScope(err) { return errors.Trace(err) } if e != nil { return errors.Trace(e) } } if ok, err := r.unit.IsPrincipal(); err != nil { return errors.Trace(err) } else if ok { return nil } // If no Alive relations remain between a subordinate unit's service // and its principal's service, the subordinate must become Dying. for _, relationer := range r.relationers { scope := relationer.ru.Endpoint().Scope if scope == corecharm.ScopeContainer && !relationer.dying { return nil } } return r.unit.Destroy() }
func (r *relations) update(remote map[int]remotestate.RelationSnapshot) error { for id, relationSnapshot := range remote { if _, found := r.relationers[id]; found { // We've seen this relation before. The only changes // we care about are to the lifecycle state, and to // the member settings versions. We handle differences // in settings in nextRelationHook. if relationSnapshot.Life == params.Dying { if err := r.setDying(id); err != nil { return errors.Trace(err) } } continue } // Relations that are not alive are simply skipped, because they // were not previously known anyway. if relationSnapshot.Life != params.Alive { continue } rel, err := r.st.RelationById(id) if err != nil { if params.IsCodeNotFoundOrCodeUnauthorized(err) { continue } return errors.Trace(err) } // Make sure we ignore relations not implemented by the unit's charm. ch, err := corecharm.ReadCharmDir(r.charmDir) if err != nil { return errors.Trace(err) } if ep, err := rel.Endpoint(); err != nil { return errors.Trace(err) } else if !ep.ImplementedBy(ch) { logger.Warningf("skipping relation with unknown endpoint %q", ep.Name) continue } dir, err := ReadStateDir(r.relationsDir, id) if err != nil { return errors.Trace(err) } addErr := r.add(rel, dir) if addErr == nil { continue } removeErr := dir.Remove() if !params.IsCodeCannotEnterScope(addErr) { return errors.Trace(addErr) } if removeErr != nil { return errors.Trace(removeErr) } } if ok, err := r.unit.IsPrincipal(); err != nil { return errors.Trace(err) } else if ok { return nil } // If no Alive relations remain between a subordinate unit's service // and its principal's service, the subordinate must become Dying. for _, relationer := range r.relationers { scope := relationer.ru.Endpoint().Scope if scope == corecharm.ScopeContainer && !relationer.dying { return nil } } return r.unit.Destroy() }
func (f *filter) loop(unitTag string) (err error) { // TODO(dfc) named return value is a time bomb defer func() { if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = worker.ErrTerminateAgent } }() tag, err := names.ParseUnitTag(unitTag) if err != nil { return err } if f.unit, err = f.st.Unit(tag); err != nil { return err } if err = f.unitChanged(); err != nil { return err } f.service, err = f.unit.Service() if err != nil { return err } if err = f.serviceChanged(); err != nil { return err } unitw, err := f.unit.Watch() if err != nil { return err } defer f.maybeStopWatcher(unitw) servicew, err := f.service.Watch() if err != nil { return err } defer f.maybeStopWatcher(servicew) // configw and relationsw can get restarted, so we need to use // their eventual values in the defer calls. var configw apiwatcher.NotifyWatcher var configChanges <-chan struct{} curl, err := f.unit.CharmURL() if err == nil { configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() f.upgradeFrom.url = curl } else if err != uniter.ErrNoCharmURLSet { filterLogger.Errorf("unit charm: %v", err) return err } defer func() { if configw != nil { watcher.Stop(configw, &f.tomb) } }() actionsw, err := f.unit.WatchActions() if err != nil { return err } f.actionsPending = make([]string, 0) defer func() { if actionsw != nil { watcher.Stop(actionsw, &f.tomb) } }() relationsw, err := f.service.WatchRelations() if err != nil { return err } defer func() { if relationsw != nil { watcher.Stop(relationsw, &f.tomb) } }() var addressChanges <-chan struct{} addressesw, err := f.unit.WatchAddresses() if err != nil { return err } defer watcher.Stop(addressesw, &f.tomb) // Config events cannot be meaningfully discarded until one is available; // once we receive the initial change, we unblock discard requests by // setting this channel to its namesake on f. var discardConfig chan struct{} for { var ok bool select { case <-f.tomb.Dying(): return tomb.ErrDying // Handle watcher changes. case _, ok = <-unitw.Changes(): filterLogger.Debugf("got unit change") if !ok { return watcher.MustErr(unitw) } if err = f.unitChanged(); err != nil { return err } case _, ok = <-servicew.Changes(): filterLogger.Debugf("got service change") if !ok { return watcher.MustErr(servicew) } if err = f.serviceChanged(); err != nil { return err } case _, ok = <-configChanges: filterLogger.Debugf("got config change") if !ok { return watcher.MustErr(configw) } if addressChanges == nil { // We start reacting to address changes after the // first config-changed is processed, ignoring the // initial address changed event. addressChanges = addressesw.Changes() if _, ok := <-addressChanges; !ok { return watcher.MustErr(addressesw) } } filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn discardConfig = f.discardConfig case _, ok = <-addressChanges: filterLogger.Debugf("got address change") if !ok { return watcher.MustErr(addressesw) } // address change causes config-changed event filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn case ids, ok := <-actionsw.Changes(): filterLogger.Debugf("got %d actions", len(ids)) if !ok { return watcher.MustErr(actionsw) } f.actionsPending = append(f.actionsPending, ids...) f.nextAction = f.getNextAction() case keys, ok := <-relationsw.Changes(): filterLogger.Debugf("got relations change") if !ok { return watcher.MustErr(relationsw) } var ids []int for _, key := range keys { relationTag := names.NewRelationTag(key).String() rel, err := f.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. } else if err != nil { return err } else { ids = append(ids, rel.Id()) } } f.relationsChanged(ids) // Send events on active out chans. case f.outUpgrade <- f.upgrade: filterLogger.Debugf("sent upgrade event") f.outUpgrade = nil case f.outResolved <- f.resolved: filterLogger.Debugf("sent resolved event") f.outResolved = nil case f.outConfig <- nothing: filterLogger.Debugf("sent config event") f.outConfig = nil case f.outAction <- f.nextAction: f.nextAction = f.getNextAction() filterLogger.Debugf("sent action event") case f.outRelations <- f.relations: filterLogger.Debugf("sent relations event") f.outRelations = nil f.relations = nil // Handle explicit requests. case curl := <-f.setCharm: filterLogger.Debugf("changing charm to %q", curl) // We need to restart the config watcher after setting the // charm, because service config settings are distinct for // different service charms. if configw != nil { if err := configw.Stop(); err != nil { return err } } if err := f.unit.SetCharmURL(curl); err != nil { filterLogger.Debugf("failed setting charm url %q: %v", curl, err) return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didSetCharm <- nothing: } configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() // Restart the relations watcher. if err := relationsw.Stop(); err != nil { return err } relationsw, err = f.service.WatchRelations() if err != nil { return err } f.upgradeFrom.url = curl if err = f.upgradeChanged(); err != nil { return err } case force := <-f.wantForcedUpgrade: filterLogger.Debugf("want forced upgrade %v", force) f.upgradeFrom.force = force if err = f.upgradeChanged(); err != nil { return err } case <-f.wantResolved: filterLogger.Debugf("want resolved event") if f.resolved != params.ResolvedNone { f.outResolved = f.outResolvedOn } case <-f.clearResolved: filterLogger.Debugf("resolved event handled") f.outResolved = nil if err := f.unit.ClearResolved(); err != nil { return err } if err := f.unitChanged(); err != nil { return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didClearResolved <- nothing: } case <-discardConfig: filterLogger.Debugf("discarded config event") f.outConfig = nil } } }
// updateRelations responds to changes in the life states of the relations // with the supplied ids. If any id corresponds to an alive relation not // known to the unit, the uniter will join that relation and return its // relationer in the added list. func (u *Uniter) updateRelations(ids []int) (added []*Relationer, err error) { for _, id := range ids { if r, found := u.relationers[id]; found { rel := r.ru.Relation() if err := rel.Refresh(); err != nil { return nil, fmt.Errorf("cannot update relation %q: %v", rel, err) } if rel.Life() == params.Dying { if err := r.SetDying(); err != nil { return nil, err } else if r.IsImplicit() { delete(u.relationers, id) } } continue } // Relations that are not alive are simply skipped, because they // were not previously known anyway. rel, err := u.st.RelationById(id) if err != nil { if params.IsCodeNotFoundOrCodeUnauthorized(err) { continue } return nil, err } if rel.Life() != params.Alive { continue } // Make sure we ignore relations not implemented by the unit's charm. ch, err := corecharm.ReadCharmDir(u.charmPath) if err != nil { return nil, err } if ep, err := rel.Endpoint(); err != nil { return nil, err } else if !ep.ImplementedBy(ch) { logger.Warningf("skipping relation with unknown endpoint %q", ep.Name) continue } dir, err := relation.ReadStateDir(u.relationsDir, id) if err != nil { return nil, err } err = u.addRelation(rel, dir) if err == nil { added = append(added, u.relationers[id]) continue } e := dir.Remove() if !params.IsCodeCannotEnterScope(err) { return nil, err } if e != nil { return nil, e } } if ok, err := u.unit.IsPrincipal(); err != nil { return nil, err } else if ok { return added, nil } // If no Alive relations remain between a subordinate unit's service // and its principal's service, the subordinate must become Dying. keepAlive := false for _, r := range u.relationers { scope := r.ru.Endpoint().Scope if scope == corecharm.ScopeContainer && !r.dying { keepAlive = true break } } if !keepAlive { if err := u.unit.Destroy(); err != nil { return nil, err } } return added, nil }