func (s *FastPeriodSuite) TestMustErr(c *gc.C) { err := watcher.MustErr(&dummyWatcher{errors.New("POW")}) c.Assert(err, gc.ErrorMatches, "POW") stillAlive := func() { watcher.MustErr(&dummyWatcher{tomb.ErrStillAlive}) } c.Assert(stillAlive, gc.PanicMatches, "watcher is still running") noErr := func() { watcher.MustErr(&dummyWatcher{nil}) } c.Assert(noErr, gc.PanicMatches, "watcher was stopped cleanly") }
func (q *AliveHookQueue) loop(initial *State) { defer q.tomb.Done() defer watcher.Stop(q.w, &q.tomb) // Consume initial event, and reconcile with initial state, by inserting // a new RelationUnitsChange before the initial event, which schedules // every missing unit for immediate departure before anything else happens // (apart from a single potential required post-joined changed event). ch1, ok := <-q.w.Changes() if !ok { q.tomb.Kill(watcher.MustErr(q.w)) return } if len(ch1.Departed) != 0 { panic("AliveHookQueue must be started with a fresh RelationUnitsWatcher") } q.changedPending = initial.ChangedPending ch0 := params.RelationUnitsChange{} for unit, version := range initial.Members { q.info[unit] = &unitInfo{ unit: unit, version: version, joined: true, } if _, found := ch1.Changed[unit]; !found { ch0.Departed = append(ch0.Departed, unit) } } q.update(ch0) q.update(ch1) var next hook.Info var out chan<- hook.Info for { if q.empty() { out = nil } else { out = q.out next = q.next() } select { case <-q.tomb.Dying(): return case ch, ok := <-q.w.Changes(): if !ok { q.tomb.Kill(watcher.MustErr(q.w)) return } q.update(ch) case out <- next: q.pop() } } }
func (p *ProvisionerAPI) watchOneMachineContainers(arg params.WatchContainer) (params.StringsWatchResult, error) { nothing := params.StringsWatchResult{} canAccess, err := p.getAuthFunc() if err != nil { return nothing, err } if !canAccess(arg.MachineTag) { return nothing, common.ErrPerm } _, id, err := names.ParseTag(arg.MachineTag, names.MachineTagKind) if err != nil { return nothing, err } machine, err := p.st.Machine(id) if err != nil { return nothing, err } var watch state.StringsWatcher if arg.ContainerType != "" { watch = machine.WatchContainers(instance.ContainerType(arg.ContainerType)) } else { watch = machine.WatchAllContainers() } // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.StringsWatchResult{ StringsWatcherId: p.resources.Register(watch), Changes: changes, }, nil } return nothing, watcher.MustErr(watch) }
// WatchForEnvironConfigChanges returns a NotifyWatcher that observes // changes to the environment configuration. // Note that although the NotifyWatchResult contains an Error field, // it's not used because we are only returning a single watcher, // so we use the regular error return. func (e *EnvironWatcher) WatchForEnvironConfigChanges() (params.NotifyWatchResult, error) { result := params.NotifyWatchResult{} canWatch, err := e.getCanWatch() if err != nil { return result, err } // TODO(dimitern) If we have multiple environments in state, use a // tag argument here and as a method argument. if !canWatch("") { return result, ErrPerm } watch := e.st.WatchForEnvironConfigChanges() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { result.NotifyWatcherId = e.resources.Register(watch) } else { return result, watcher.MustErr(watch) } return result, nil }
// WatchAuthorisedKeys starts a watcher to track changes to the authorised ssh keys // for the specified machines. // The current implementation relies on global authorised keys being stored in the environment config. // This will change as new user management and authorisation functionality is added. func (api *KeyUpdaterAPI) WatchAuthorisedKeys(arg params.Entities) (params.NotifyWatchResults, error) { results := make([]params.NotifyWatchResult, len(arg.Entities)) canRead, err := api.getCanRead() if err != nil { return params.NotifyWatchResults{}, err } for i, entity := range arg.Entities { // 1. Check permissions if !canRead(entity.Tag) { results[i].Error = common.ServerError(common.ErrPerm) continue } // 2. Check entity exists if _, err := api.state.FindEntity(entity.Tag); err != nil { if errors.IsNotFound(err) { results[i].Error = common.ServerError(common.ErrPerm) } else { results[i].Error = common.ServerError(err) } continue } // 3. Watch fr changes var err error watch := api.state.WatchForEnvironConfigChanges() // Consume the initial event. if _, ok := <-watch.Changes(); ok { results[i].NotifyWatcherId = api.resources.Register(watch) } else { err = watcher.MustErr(watch) } results[i].Error = common.ServerError(err) } return params.NotifyWatchResults{Results: results}, nil }
// watchMachinesLoop watches for changes provided by the given // machinesWatcher and starts machine goroutines to deal // with them, using the provided newMachineContext // function to create the appropriate context for each new machine id. func watchMachinesLoop(context updaterContext, w machinesWatcher) (err error) { p := &updater{ context: context, machines: make(map[string]chan struct{}), machineDead: make(chan machine), } defer func() { if stopErr := w.Stop(); stopErr != nil { if err == nil { err = fmt.Errorf("error stopping watcher: %v", stopErr) } else { logger.Warningf("ignoring error when stopping watcher: %v", stopErr) } } for len(p.machines) > 0 { delete(p.machines, (<-p.machineDead).Id()) } }() for { select { case ids, ok := <-w.Changes(): if !ok { return watcher.MustErr(w) } if err := p.startMachines(ids); err != nil { return err } case m := <-p.machineDead: delete(p.machines, m.Id()) case <-p.context.dying(): return nil } } }
// addRelation causes the unit agent to join the supplied relation, and to // store persistent state in the supplied dir. func (u *Uniter) addRelation(rel *uniter.Relation, dir *relation.StateDir) error { logger.Infof("joining relation %q", rel) ru, err := rel.Unit(u.unit) if err != nil { return err } r := NewRelationer(ru, dir, u.relationHooks) w, err := u.unit.Watch() if err != nil { return err } defer watcher.Stop(w, &u.tomb) for { select { case <-u.tomb.Dying(): return tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return watcher.MustErr(w) } err := r.Join() if params.IsCodeCannotEnterScopeYet(err) { logger.Infof("cannot enter scope for relation %q; waiting for subordinate to be removed", rel) continue } else if err != nil { return err } logger.Infof("joined relation %q", rel) u.relationers[rel.Id()] = r return nil } } }
func (w *EnvironConfigWatcher) loop() (err error) { sw := w.st.watchSettings(environGlobalKey) defer sw.Stop() out := w.out out = nil cfg := &config.Config{} for { select { case <-w.st.watcher.Dead(): return stateWatcherDeadError(w.st.watcher.Err()) case <-w.tomb.Dying(): return tomb.ErrDying case settings, ok := <-sw.Changes(): if !ok { return watcher.MustErr(sw) } cfg, err = config.New(config.NoDefaults, settings.Map()) if err == nil { out = w.out } else { out = nil } case out <- cfg: out = nil } } }
func (w *relationUnitsWatcher) loop() (err error) { sentInitial := false changes := params.RelationUnitsChange{} out := w.out out = nil for { select { case <-w.st.watcher.Dead(): return stateWatcherDeadError(w.st.watcher.Err()) case <-w.tomb.Dying(): return tomb.ErrDying case c, ok := <-w.sw.Changes(): if !ok { return watcher.MustErr(w.sw) } if err = w.mergeScope(&changes, c); err != nil { return err } if !sentInitial || !emptyRelationUnitsChanges(&changes) { out = w.out } else { out = nil } case c := <-w.updates: if _, err = w.mergeSettings(&changes, c.Id.(string)); err != nil { return err } out = w.out case out <- changes: sentInitial = true changes = params.RelationUnitsChange{} out = nil } } }
// ModeTerminating marks the unit dead and returns ErrTerminateAgent. func ModeTerminating(u *Uniter) (next Mode, err error) { defer modeContext("ModeTerminating", &err)() if err = u.unit.SetStatus(params.StatusStopped, "", nil); err != nil { return nil, err } w, err := u.unit.Watch() if err != nil { return nil, err } defer watcher.Stop(w, &u.tomb) for { select { case <-u.tomb.Dying(): return nil, tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return nil, watcher.MustErr(w) } if err := u.unit.Refresh(); err != nil { return nil, err } if hasSubs, err := u.unit.HasSubordinates(); err != nil { return nil, err } else if hasSubs { continue } // The unit is known to be Dying; so if it didn't have subordinates // just above, it can't acquire new ones before this call. if err := u.unit.EnsureDead(); err != nil { return nil, err } return nil, worker.ErrTerminateAgent } } }
func (obs *EnvironObserver) loop() error { for { select { case <-obs.tomb.Dying(): return nil case _, ok := <-obs.environWatcher.Changes(): if !ok { return watcher.MustErr(obs.environWatcher) } } config, err := obs.st.EnvironConfig() if err != nil { logger.Warningf("error reading environment config: %v", err) continue } environ, err := environs.New(config) if err != nil { logger.Warningf("error creating Environ: %v", err) continue } obs.mu.Lock() obs.environ = environ obs.mu.Unlock() } }
// WatchAPIHostPorts watches the API server addresses. func (api *APIAddresser) WatchAPIHostPorts() (params.NotifyWatchResult, error) { watch := api.getter.WatchAPIHostPorts() if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: api.resources.Register(watch), }, nil } return params.NotifyWatchResult{}, watcher.MustErr(watch) }
func (u *UniterAPI) watchOneRelationUnit(relUnit *state.RelationUnit) (params.RelationUnitsWatchResult, error) { watch := relUnit.Watch() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.RelationUnitsWatchResult{ RelationUnitsWatcherId: u.resources.Register(watch), Changes: changes, }, nil } return params.RelationUnitsWatchResult{}, watcher.MustErr(watch) }
func (task *provisionerTask) loop() error { logger.Infof("Starting up provisioner task %s", task.machineTag) defer watcher.Stop(task.machineWatcher, &task.tomb) // Don't allow the safe mode to change until we have // read at least one set of changes, which will populate // the task.machines map. Otherwise we will potentially // see all legitimate instances as unknown. var safeModeChan chan bool // Not all provisioners have a retry channel. var retryChan <-chan struct{} if task.retryWatcher != nil { retryChan = task.retryWatcher.Changes() } // When the watcher is started, it will have the initial changes be all // the machines that are relevant. Also, since this is available straight // away, we know there will be some changes right off the bat. for { select { case <-task.tomb.Dying(): logger.Infof("Shutting down provisioner task %s", task.machineTag) return tomb.ErrDying case ids, ok := <-task.machineWatcher.Changes(): if !ok { return watcher.MustErr(task.machineWatcher) } if err := task.processMachines(ids); err != nil { return fmt.Errorf("failed to process updated machines: %v", err) } // We've seen a set of changes. Enable safe mode change. safeModeChan = task.safeModeChan case safeMode := <-safeModeChan: if safeMode == task.safeMode { break } logger.Infof("safe mode changed to %v", safeMode) task.safeMode = safeMode if !safeMode { // Safe mode has been disabled, so process current machines // so that unknown machines will be immediately dealt with. if err := task.processMachines(nil); err != nil { return fmt.Errorf("failed to process machines after safe mode disabled: %v", err) } } case <-retryChan: if err := task.processMachinesWithTransientErrors(); err != nil { return fmt.Errorf("failed to process machines with transient errors: %v", err) } } } }
func (u *UnitUpgraderAPI) watchAssignedMachine(unitTag string) (string, error) { machine, err := u.getAssignedMachine(unitTag) if err != nil { return "", err } watch := machine.Watch() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { return u.resources.Register(watch), nil } return "", watcher.MustErr(watch) }
func (u *UniterAPI) watchOneServiceRelations(tag string) (params.StringsWatchResult, error) { nothing := params.StringsWatchResult{} service, err := u.getService(tag) if err != nil { return nothing, err } watch := service.WatchRelations() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.StringsWatchResult{ StringsWatcherId: u.resources.Register(watch), Changes: changes, }, nil } return nothing, watcher.MustErr(watch) }
func (p *environProvisioner) loop() error { var environConfigChanges <-chan struct{} environWatcher, err := p.st.WatchForEnvironConfigChanges() if err != nil { return err } environConfigChanges = environWatcher.Changes() defer watcher.Stop(environWatcher, &p.tomb) p.environ, err = worker.WaitForEnviron(environWatcher, p.st, p.tomb.Dying()) if err != nil { return err } p.broker = p.environ safeMode := p.environ.Config().ProvisionerSafeMode() task, err := p.getStartTask(safeMode) if err != nil { return err } defer watcher.Stop(task, &p.tomb) for { select { case <-p.tomb.Dying(): return tomb.ErrDying case <-task.Dying(): err := task.Err() logger.Errorf("environ provisioner died: %v", err) return err case _, ok := <-environConfigChanges: if !ok { return watcher.MustErr(environWatcher) } environConfig, err := p.st.EnvironConfig() if err != nil { logger.Errorf("cannot load environment configuration: %v", err) return err } if err := p.setConfig(environConfig); err != nil { logger.Errorf("loaded invalid environment configuration: %v", err) } task.SetSafeMode(environConfig.ProvisionerSafeMode()) } } }
// WatchMachineErrorRetry returns a NotifyWatcher that notifies when // the provisioner should retry provisioning machines with transient errors. func (p *ProvisionerAPI) WatchMachineErrorRetry() (params.NotifyWatchResult, error) { result := params.NotifyWatchResult{} canWatch, err := p.getCanWatchMachines() if err != nil { return params.NotifyWatchResult{}, err } if !canWatch("") { return result, common.ErrPerm } watch := newWatchMachineErrorRetry() // Consume any initial event and forward it to the result. if _, ok := <-watch.Changes(); ok { result.NotifyWatcherId = p.resources.Register(watch) } else { return result, watcher.MustErr(watch) } return result, nil }
func (u *UniterAPI) watchOneUnitConfigSettings(tag string) (string, error) { unit, err := u.getUnit(tag) if err != nil { return "", err } watch, err := unit.WatchConfigSettings() if err != nil { return "", err } // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { return u.resources.Register(watch), nil } return "", watcher.MustErr(watch) }
func (a *AgentEntityWatcher) watchEntity(tag string) (string, error) { entity0, err := a.st.FindEntity(tag) if err != nil { return "", err } entity, ok := entity0.(state.NotifyWatcherFactory) if !ok { return "", NotSupportedError(tag, "watching") } watch := entity.Watch() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { return a.resources.Register(watch), nil } return "", watcher.MustErr(watch) }
// WatchEnvironMachines returns a StringsWatcher that notifies of // changes to the life cycles of the top level machines in the current // environment. func (e *EnvironMachinesWatcher) WatchEnvironMachines() (params.StringsWatchResult, error) { result := params.StringsWatchResult{} canWatch, err := e.getCanWatch() if err != nil { return params.StringsWatchResult{}, err } if !canWatch("") { return result, ErrPerm } watch := e.st.WatchEnvironMachines() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { result.StringsWatcherId = e.resources.Register(watch) result.Changes = changes } else { err := watcher.MustErr(watch) return result, fmt.Errorf("cannot obtain initial environment machines: %v", err) } return result, nil }
// WatchLoggingConfig starts a watcher to track changes to the logging config // for the agents specified.. Unfortunately the current infrastruture makes // watching parts of the config non-trivial, so currently any change to the // config will cause the watcher to notify the client. func (api *LoggerAPI) WatchLoggingConfig(arg params.Entities) params.NotifyWatchResults { result := make([]params.NotifyWatchResult, len(arg.Entities)) for i, entity := range arg.Entities { err := common.ErrPerm if api.authorizer.AuthOwner(entity.Tag) { watch := api.state.WatchForEnvironConfigChanges() // Consume the initial event. Technically, API calls to Watch // 'transmit' the initial event in the Watch response. But // NotifyWatchers have no state to transmit. if _, ok := <-watch.Changes(); ok { result[i].NotifyWatcherId = api.resources.Register(watch) err = nil } else { err = watcher.MustErr(watch) } } result[i].Error = common.ServerError(err) } return params.NotifyWatchResults{Results: result} }
// watchLoop watches the unit for port changes. func (ud *unitData) watchLoop(latestPorts []instance.Port) { defer ud.tomb.Done() w, err := ud.unit.Watch() if err != nil { ud.fw.tomb.Kill(err) return } defer watcher.Stop(w, &ud.tomb) for { select { case <-ud.tomb.Dying(): return case _, ok := <-w.Changes(): if !ok { ud.fw.tomb.Kill(watcher.MustErr(w)) return } if err := ud.unit.Refresh(); err != nil { if !params.IsCodeNotFound(err) { ud.fw.tomb.Kill(err) } return } change, err := ud.unit.OpenedPorts() if err != nil { ud.fw.tomb.Kill(err) return } if samePorts(change, latestPorts) { continue } latestPorts = append(latestPorts[:0], change...) select { case ud.fw.portsChange <- &portsChange{ud, change}: case <-ud.tomb.Dying(): return } } } }
// watchLoop watches the service's exposed flag for changes. func (sd *serviceData) watchLoop(exposed bool) { defer sd.tomb.Done() w, err := sd.service.Watch() if err != nil { sd.fw.tomb.Kill(err) return } defer watcher.Stop(w, &sd.tomb) for { select { case <-sd.tomb.Dying(): return case _, ok := <-w.Changes(): if !ok { sd.fw.tomb.Kill(watcher.MustErr(w)) return } if err := sd.service.Refresh(); err != nil { if !params.IsCodeNotFound(err) { sd.fw.tomb.Kill(err) } return } change, err := sd.service.IsExposed() if err != nil { sd.fw.tomb.Kill(err) return } if change == exposed { continue } exposed = change select { case sd.fw.exposedChange <- &exposedChange{sd, change}: case <-sd.tomb.Dying(): return } } } }
// WaitForEnviron waits for an valid environment to arrive from // the given watcher. It terminates with tomb.ErrDying if // it receives a value on dying. func WaitForEnviron(w apiwatcher.NotifyWatcher, st EnvironConfigGetter, dying <-chan struct{}) (environs.Environ, error) { for { select { case <-dying: return nil, tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return nil, watcher.MustErr(w) } config, err := st.EnvironConfig() if err != nil { return nil, err } environ, err := environs.New(config) if err == nil { return environ, nil } logger.Errorf("loaded invalid environment configuration: %v", err) loadedInvalid() } } }
// watchLoop watches the machine for units added or removed. func (md *machineData) watchLoop(unitw apiwatcher.StringsWatcher) { defer md.tomb.Done() defer watcher.Stop(unitw, &md.tomb) for { select { case <-md.tomb.Dying(): return case change, ok := <-unitw.Changes(): if !ok { _, err := md.machine() if !params.IsCodeNotFound(err) { md.fw.tomb.Kill(watcher.MustErr(unitw)) } return } select { case md.fw.unitsChange <- &unitsChange{md, change}: case <-md.tomb.Dying(): return } } } }
func (u *UnitsWatcher) watchOneEntityUnits(canWatch AuthFunc, tag string) (params.StringsWatchResult, error) { nothing := params.StringsWatchResult{} if !canWatch(tag) { return nothing, ErrPerm } entity0, err := u.st.FindEntity(tag) if err != nil { return nothing, err } entity, ok := entity0.(state.UnitsWatcher) if !ok { return nothing, NotSupportedError(tag, "watching units") } watch := entity.WatchUnits() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.StringsWatchResult{ StringsWatcherId: u.resources.Register(watch), Changes: changes, }, nil } return nothing, watcher.MustErr(watch) }
// WatchAPIVersion starts a watcher to track if there is a new version // of the API that we want to upgrade to func (u *UpgraderAPI) WatchAPIVersion(args params.Entities) (params.NotifyWatchResults, error) { result := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } for i, agent := range args.Entities { err := common.ErrPerm if u.authorizer.AuthOwner(agent.Tag) { watch := u.st.WatchForEnvironConfigChanges() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { result.Results[i].NotifyWatcherId = u.resources.Register(watch) err = nil } else { err = watcher.MustErr(watch) } } result.Results[i].Error = common.ServerError(err) } return result, nil }
// startMachine creates a new data value for tracking details of the // machine and starts watching the machine for units added or removed. func (fw *Firewaller) startMachine(tag string) error { machined := &machineData{ fw: fw, tag: tag, unitds: make(map[string]*unitData), ports: make([]instance.Port, 0), } m, err := machined.machine() if params.IsCodeNotFound(err) { return nil } else if err != nil { return errgo.Annotate(err, "cannot watch machine units") } unitw, err := m.WatchUnits() if err != nil { return err } select { case <-fw.tomb.Dying(): stop("units watcher", unitw) return tomb.ErrDying case change, ok := <-unitw.Changes(): if !ok { stop("units watcher", unitw) return watcher.MustErr(unitw) } fw.machineds[tag] = machined err = fw.unitsChanged(&unitsChange{machined, change}) if err != nil { stop("units watcher", unitw) delete(fw.machineds, tag) return errgo.Annotatef(err, "cannot respond to units changes for %q", tag) } } go machined.watchLoop(unitw) return nil }
func (f *filter) loop(unitTag string) (err error) { defer func() { if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = worker.ErrTerminateAgent } }() if f.unit, err = f.st.Unit(unitTag); err != nil { return err } if err = f.unitChanged(); err != nil { return err } f.service, err = f.unit.Service() if err != nil { return err } if err = f.serviceChanged(); err != nil { return err } unitw, err := f.unit.Watch() if err != nil { return err } defer f.maybeStopWatcher(unitw) servicew, err := f.service.Watch() if err != nil { return err } defer f.maybeStopWatcher(servicew) // configw and relationsw can get restarted, so we need to use // their eventual values in the defer calls. var configw apiwatcher.NotifyWatcher var configChanges <-chan struct{} curl, err := f.unit.CharmURL() if err == nil { configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() f.upgradeFrom.url = curl } else if err != uniter.ErrNoCharmURLSet { filterLogger.Errorf("unit charm: %v", err) return err } defer func() { if configw != nil { watcher.Stop(configw, &f.tomb) } }() relationsw, err := f.service.WatchRelations() if err != nil { return err } defer func() { if relationsw != nil { watcher.Stop(relationsw, &f.tomb) } }() // Config events cannot be meaningfully discarded until one is available; // once we receive the initial change, we unblock discard requests by // setting this channel to its namesake on f. var discardConfig chan struct{} for { var ok bool select { case <-f.tomb.Dying(): return tomb.ErrDying // Handle watcher changes. case _, ok = <-unitw.Changes(): filterLogger.Debugf("got unit change") if !ok { return watcher.MustErr(unitw) } if err = f.unitChanged(); err != nil { return err } case _, ok = <-servicew.Changes(): filterLogger.Debugf("got service change") if !ok { return watcher.MustErr(servicew) } if err = f.serviceChanged(); err != nil { return err } case _, ok = <-configChanges: filterLogger.Debugf("got config change") if !ok { return watcher.MustErr(configw) } filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn discardConfig = f.discardConfig case keys, ok := <-relationsw.Changes(): filterLogger.Debugf("got relations change") if !ok { return watcher.MustErr(relationsw) } var ids []int for _, key := range keys { relationTag := names.RelationTag(key) rel, err := f.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. } else if err != nil { return err } else { ids = append(ids, rel.Id()) } } f.relationsChanged(ids) // Send events on active out chans. case f.outUpgrade <- f.upgrade: filterLogger.Debugf("sent upgrade event") f.outUpgrade = nil case f.outResolved <- f.resolved: filterLogger.Debugf("sent resolved event") f.outResolved = nil case f.outConfig <- nothing: filterLogger.Debugf("sent config event") f.outConfig = nil case f.outRelations <- f.relations: filterLogger.Debugf("sent relations event") f.outRelations = nil f.relations = nil // Handle explicit requests. case curl := <-f.setCharm: filterLogger.Debugf("changing charm to %q", curl) // We need to restart the config watcher after setting the // charm, because service config settings are distinct for // different service charms. if configw != nil { if err := configw.Stop(); err != nil { return err } } if err := f.unit.SetCharmURL(curl); err != nil { filterLogger.Debugf("failed setting charm url %q: %v", curl, err) return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didSetCharm <- nothing: } configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() // Restart the relations watcher. if err := relationsw.Stop(); err != nil { return err } relationsw, err = f.service.WatchRelations() if err != nil { return err } f.upgradeFrom.url = curl if err = f.upgradeChanged(); err != nil { return err } case force := <-f.wantForcedUpgrade: filterLogger.Debugf("want forced upgrade %v", force) f.upgradeFrom.force = force if err = f.upgradeChanged(); err != nil { return err } case <-f.wantResolved: filterLogger.Debugf("want resolved event") if f.resolved != params.ResolvedNone { f.outResolved = f.outResolvedOn } case <-f.clearResolved: filterLogger.Debugf("resolved event handled") f.outResolved = nil if err := f.unit.ClearResolved(); err != nil { return err } if err := f.unitChanged(); err != nil { return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didClearResolved <- nothing: } case <-discardConfig: filterLogger.Debugf("discarded config event") f.outConfig = nil } } }