func (w *RelationUnitsWatcher) loop() (err error) { sentInitial := false changes := RelationUnitsChange{} out := w.out out = nil for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case c, ok := <-w.sw.Changes(): if !ok { return watcher.MustErr(w.sw) } if err = w.mergeScope(&changes, c); err != nil { return err } if !sentInitial || !changes.empty() { out = w.out } else { out = nil } case c := <-w.updates: if _, err = w.mergeSettings(&changes, c.Id.(string)); err != nil { return err } out = w.out case out <- changes: sentInitial = true changes = RelationUnitsChange{} out = nil } } }
func (w *EnvironConfigWatcher) loop() (err error) { sw := w.st.watchSettings(environGlobalKey) defer sw.Stop() out := w.out out = nil cfg := &config.Config{} for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case settings, ok := <-sw.Changes(): if !ok { return watcher.MustErr(sw) } cfg, err = config.New(settings.Map()) if err == nil { out = w.out } else { out = nil } case out <- cfg: out = nil } } return nil }
func (s *WatcherSuite) TestMustErr(c *C) { err := watcher.MustErr(&dummyWatcher{errors.New("POW")}) c.Assert(err, ErrorMatches, "POW") stillAlive := func() { watcher.MustErr(&dummyWatcher{tomb.ErrStillAlive}) } c.Assert(stillAlive, PanicMatches, "watcher is still running") noErr := func() { watcher.MustErr(&dummyWatcher{nil}) } c.Assert(noErr, PanicMatches, "watcher was stopped cleanly") }
func (q *AliveHookQueue) loop(initial *State) { defer q.tomb.Done() defer watcher.Stop(q.w, &q.tomb) // Consume initial event, and reconcile with initial state, by inserting // a new RelationUnitsChange before the initial event, which schedules // every missing unit for immediate departure before anything else happens // (apart from a single potential required post-joined changed event). ch1, ok := <-q.w.Changes() if !ok { q.tomb.Kill(watcher.MustErr(q.w)) return } if len(ch1.Departed) != 0 { panic("AliveHookQueue must be started with a fresh RelationUnitsWatcher") } q.changedPending = initial.ChangedPending ch0 := state.RelationUnitsChange{} for unit, version := range initial.Members { q.info[unit] = &unitInfo{ unit: unit, version: version, joined: true, } if _, found := ch1.Changed[unit]; !found { ch0.Departed = append(ch0.Departed, unit) } } q.update(ch0) q.update(ch1) var next hook.Info var out chan<- hook.Info for { if q.empty() { out = nil } else { out = q.out next = q.next() } select { case <-q.tomb.Dying(): return case ch, ok := <-q.w.Changes(): if !ok { q.tomb.Kill(watcher.MustErr(q.w)) return } q.update(ch) case out <- next: q.pop() } } }
func (p *Provisioner) loop() error { environWatcher := p.st.WatchEnvironConfig() defer watcher.Stop(environWatcher, &p.tomb) var err error p.environ, err = worker.WaitForEnviron(environWatcher, p.tomb.Dying()) if err != nil { return err } // Get a new StateInfo from the environment: the one used to // launch the agent may refer to localhost, which will be // unhelpful when attempting to run an agent on a new machine. if p.stateInfo, p.apiInfo, err = p.environ.StateInfo(); err != nil { return err } // Call processMachines to stop any unknown instances before watching machines. if err := p.processMachines(nil); err != nil { return err } // Start responding to changes in machines, and to any further updates // to the environment config. machinesWatcher := p.st.WatchMachines() defer watcher.Stop(machinesWatcher, &p.tomb) // START OMIT // launchpad.net/juju-core/worker/provisioner/provisioner.go for { select { case <-p.tomb.Dying(): return tomb.ErrDying case cfg, ok := <-environWatcher.Changes(): if !ok { return watcher.MustErr(environWatcher) } if err := p.setConfig(cfg); err != nil { log.Errorf("worker/provisioner: loaded invalid environment configuration: %v", err) } case ids, ok := <-machinesWatcher.Changes(): if !ok { return watcher.MustErr(machinesWatcher) } if err := p.processMachines(ids); err != nil { return err } } } // END OMIT panic("not reached") }
func (task *provisionerTask) loop() error { logger.Infof("Starting up provisioner task %s", task.machineId) defer watcher.Stop(task.machineWatcher, &task.tomb) // When the watcher is started, it will have the initial changes be all // the machines that are relevant. Also, since this is available straight // away, we know there will be some changes right off the bat. for { select { case <-task.tomb.Dying(): logger.Infof("Shutting down provisioner task %s", task.machineId) return tomb.ErrDying case ids, ok := <-task.machineWatcher.Changes(): if !ok { return watcher.MustErr(task.machineWatcher) } // TODO(dfc; lp:1042717) fire process machines periodically to shut down unknown // instances. if err := task.processMachines(ids); err != nil { logger.Errorf("Process machines failed: %v", err) return err } } } }
func (w *minUnitsWatcher) loop() (err error) { ch := make(chan watcher.Change) w.st.watcher.WatchCollection(w.st.minUnits.Name, ch) defer w.st.watcher.UnwatchCollection(w.st.minUnits.Name, ch) serviceNames, err := w.initial() if err != nil { return err } out := w.out for { select { case <-w.tomb.Dying(): return tomb.ErrDying case change, ok := <-ch: if !ok { return watcher.MustErr(w.st.watcher) } if err = w.merge(serviceNames, change); err != nil { return err } if !serviceNames.IsEmpty() { out = w.out } case out <- serviceNames.Values(): out = nil serviceNames = new(set.Strings) } } return nil }
func (w *lifecycleWatcher) loop() (err error) { in := make(chan watcher.Change) w.st.watcher.WatchCollectionWithFilter(w.coll.Name, in, w.filter) defer w.st.watcher.UnwatchCollection(w.coll.Name, in) ids, err := w.initial() if err != nil { return err } out := w.out for { select { case <-w.tomb.Dying(): return tomb.ErrDying case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case ch := <-in: updates, ok := collect(ch, in, w.tomb.Dying()) if !ok { return tomb.ErrDying } if err := w.merge(ids, updates); err != nil { return err } if !ids.IsEmpty() { out = w.out } case out <- ids.Values(): ids = &set.Strings{} out = nil } } return nil }
func (w *entityWatcher) loop(coll *mgo.Collection, key string) (err error) { doc := &struct { TxnRevno int64 `bson:"txn-revno"` }{} fields := D{{"txn-revno", 1}} if err := coll.FindId(key).Select(fields).One(doc); err == mgo.ErrNotFound { doc.TxnRevno = -1 } else if err != nil { return err } in := make(chan watcher.Change) w.st.watcher.Watch(coll.Name, key, doc.TxnRevno, in) defer w.st.watcher.Unwatch(coll.Name, key, in) out := w.out for { select { case <-w.tomb.Dying(): return tomb.ErrDying case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case ch := <-in: if _, ok := collect(ch, in, w.tomb.Dying()); !ok { return tomb.ErrDying } out = w.out case out <- struct{}{}: out = nil } } return nil }
// Watch starts an NotifyWatcher for each given machine. func (m *MachinerAPI) Watch(args params.Entities) (params.NotifyWatchResults, error) { result := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } for i, entity := range args.Entities { err := common.ErrPerm if m.auth.AuthOwner(entity.Tag) { var machine *state.Machine machine, err = m.st.Machine(state.MachineIdFromTag(entity.Tag)) if err == nil { watch := machine.Watch() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { result.Results[i].NotifyWatcherId = m.resources.Register(watch) } else { err = watcher.MustErr(watch) } } } result.Results[i].Error = common.ServerError(err) } return result, nil }
// addRelation causes the unit agent to join the supplied relation, and to // store persistent state in the supplied dir. func (u *Uniter) addRelation(rel *state.Relation, dir *relation.StateDir) error { log.Printf("worker/uniter: joining relation %q", rel) ru, err := rel.Unit(u.unit) if err != nil { return err } r := NewRelationer(ru, dir, u.relationHooks) w := u.unit.Watch() defer watcher.Stop(w, &u.tomb) for { select { case <-u.tomb.Dying(): return tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return watcher.MustErr(w) } if err := r.Join(); err == state.ErrCannotEnterScopeYet { log.Printf("worker/uniter: cannot enter scope for relation %q; waiting for subordinate to be removed", rel) continue } else if err != nil { return err } log.Printf("worker/uniter: joined relation %q", rel) u.relationers[rel.Id()] = r return nil } } panic("unreachable") }
func (w *ServiceUnitsWatcher) loop() (err error) { ch := make(chan watcher.Change) w.st.watcher.WatchCollection(w.st.units.Name, ch) defer w.st.watcher.UnwatchCollection(w.st.units.Name, ch) changes, err := w.initial() if err != nil { return err } prefix := w.service.doc.Name + "/" out := w.out for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case c := <-ch: name := c.Id.(string) if !strings.HasPrefix(name, prefix) { continue } changes, err = w.merge(changes, name) if err != nil { return err } if len(changes) > 0 { out = w.out } case out <- changes: out = nil changes = nil } } return nil }
func (d *Deployer) loop(w *state.UnitsWatcher) error { deployed, err := d.mgr.DeployedUnits() if err != nil { return err } for _, unitName := range deployed { d.deployed[unitName] = true if err := d.changed(unitName); err != nil { return err } } for { select { case <-d.tomb.Dying(): return tomb.ErrDying case changes, ok := <-w.Changes(): if !ok { return watcher.MustErr(w) } for _, unitName := range changes { if err := d.changed(unitName); err != nil { return err } } } } panic("unreachable") }
func (d *Deployer) loop() error { machine, err := d.st.Machine(d.machineId) if err != nil { return err } machineUnitsWatcher := machine.WatchUnits() defer watcher.Stop(machineUnitsWatcher, &d.tomb) deployed, err := d.ctx.DeployedUnits() if err != nil { return err } for _, unitName := range deployed { d.deployed.Add(unitName) if err := d.changed(unitName); err != nil { return err } } for { select { case <-d.tomb.Dying(): return tomb.ErrDying case changes, ok := <-machineUnitsWatcher.Changes(): if !ok { return watcher.MustErr(machineUnitsWatcher) } for _, unitName := range changes { if err := d.changed(unitName); err != nil { return err } } } } panic("unreachable") }
// startMachine creates a new data value for tracking details of the // machine and starts watching the machine for units added or removed. func (fw *Firewaller) startMachine(id string) error { machined := &machineData{ fw: fw, id: id, unitds: make(map[string]*unitData), ports: make([]instance.Port, 0), } m, err := machined.machine() if errors.IsNotFoundError(err) { return nil } else if err != nil { return fmt.Errorf("worker/firewaller: cannot watch machine units: %v", err) } unitw := m.WatchUnits() select { case <-fw.tomb.Dying(): stop("units watcher", unitw) return tomb.ErrDying case change, ok := <-unitw.Changes(): if !ok { stop("units watcher", unitw) return watcher.MustErr(unitw) } fw.machineds[id] = machined err = fw.unitsChanged(&unitsChange{machined, change}) if err != nil { stop("units watcher", unitw) return fmt.Errorf("worker/firewaller: cannot respond to units changes for machine %q: %v", id, err) } } go machined.watchLoop(unitw) return nil }
// watchLoop watches the unit for port changes. func (ud *unitData) watchLoop(latestPorts []instance.Port) { defer ud.tomb.Done() w := ud.unit.Watch() defer watcher.Stop(w, &ud.tomb) for { select { case <-ud.tomb.Dying(): return case _, ok := <-w.Changes(): if !ok { ud.fw.tomb.Kill(watcher.MustErr(w)) return } if err := ud.unit.Refresh(); err != nil { if !errors.IsNotFoundError(err) { ud.fw.tomb.Kill(err) } return } change := ud.unit.OpenedPorts() if samePorts(change, latestPorts) { continue } latestPorts = append(latestPorts[:0], change...) select { case ud.fw.portsChange <- &portsChange{ud, change}: case <-ud.tomb.Dying(): return } } } }
func (w *LifecycleWatcher) loop() (err error) { in := make(chan watcher.Change) w.st.watcher.WatchCollection(w.coll.Name, in) defer w.st.watcher.UnwatchCollection(w.coll.Name, in) ids, err := w.initial() if err != nil { return err } out := w.out for { select { case <-w.tomb.Dying(): return tomb.ErrDying case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case ch := <-in: if ids, err = w.merge(ids, ch); err != nil { return err } if len(ids) > 0 { out = w.out } case out <- ids: ids = nil out = nil } } return nil }
func (w *settingsWatcher) loop(key string) (err error) { ch := make(chan watcher.Change) revno := int64(-1) settings, err := readSettings(w.st, key) if err == nil { revno = settings.txnRevno } else if !IsNotFound(err) { return err } w.st.watcher.Watch(w.st.settings.Name, key, revno, ch) defer w.st.watcher.Unwatch(w.st.settings.Name, key, ch) out := w.out if revno == -1 { out = nil } for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case <-ch: settings, err = readSettings(w.st, key) if err != nil { return err } out = w.out case out <- settings: out = nil } } return nil }
func (w *RelationScopeWatcher) loop() error { ch := make(chan watcher.Change) w.st.watcher.WatchCollection(w.st.relationScopes.Name, ch) defer w.st.watcher.UnwatchCollection(w.st.relationScopes.Name, ch) changes, err := w.getInitialEvent() if err != nil { return err } out := w.out for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case c := <-ch: if err := w.mergeChange(changes, c); err != nil { return err } if !changes.isEmpty() { out = w.out } case out <- changes: changes = &RelationScopeChange{} out = nil } } return nil }
// watchLoop watches the service's exposed flag for changes. func (sd *serviceData) watchLoop(exposed bool) { defer sd.tomb.Done() w := sd.service.Watch() defer watcher.Stop(w, &sd.tomb) for { select { case <-sd.tomb.Dying(): return case _, ok := <-w.Changes(): if !ok { sd.fw.tomb.Kill(watcher.MustErr(w)) return } if err := sd.service.Refresh(); err != nil { if !errors.IsNotFoundError(err) { sd.fw.tomb.Kill(err) } return } change := sd.service.IsExposed() if change == exposed { continue } exposed = change select { case sd.fw.exposedChange <- &exposedChange{sd, change}: case <-sd.tomb.Dying(): return } } } }
// ModeTerminating marks the unit dead and returns ErrTerminateAgent. func ModeTerminating(u *Uniter) (next Mode, err error) { defer modeContext("ModeTerminating", &err)() if err = u.unit.SetStatus(params.StatusStopped, ""); err != nil { return nil, err } w := u.unit.Watch() defer watcher.Stop(w, &u.tomb) for { select { case <-u.tomb.Dying(): return nil, tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return nil, watcher.MustErr(w) } if err := u.unit.Refresh(); err != nil { return nil, err } if len(u.unit.SubordinateNames()) > 0 { continue } // The unit is known to be Dying; so if it didn't have subordinates // just above, it can't acquire new ones before this call. if err := u.unit.EnsureDead(); err != nil { return nil, err } return nil, worker.ErrTerminateAgent } } }
func (p *Provisioner) loop() error { environWatcher := p.st.WatchEnvironConfig() defer watcher.Stop(environWatcher, &p.tomb) var err error p.environ, err = worker.WaitForEnviron(environWatcher, p.tomb.Dying()) if err != nil { return err } auth, err := NewSimpleAuthenticator(p.environ) if err != nil { return err } // Start a new worker for the environment provider. // Start responding to changes in machines, and to any further updates // to the environment config. instanceBroker, err := p.getBroker() if err != nil { return err } machineWatcher, err := p.getWatcher() if err != nil { return err } environmentProvisioner := NewProvisionerTask( p.machineId, p.st, machineWatcher, instanceBroker, auth) defer watcher.Stop(environmentProvisioner, &p.tomb) for { select { case <-p.tomb.Dying(): return tomb.ErrDying case <-environmentProvisioner.Dying(): err := environmentProvisioner.Err() logger.Errorf("environment provisioner died: %v", err) return err case cfg, ok := <-environWatcher.Changes(): if !ok { return watcher.MustErr(environWatcher) } if err := p.setConfig(cfg); err != nil { logger.Errorf("loaded invalid environment configuration: %v", err) } } } panic("not reached") }
func (w *EntityWatcher) loop(ch <-chan watcher.Change) (err error) { out := w.out for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case <-ch: out = w.out case out <- struct{}{}: out = nil } } return nil }
// WaitForEnviron waits for an valid environment to arrive from // the given watcher. It terminates with tomb.ErrDying if // it receives a value on dying. func WaitForEnviron(w *state.EnvironConfigWatcher, dying <-chan struct{}) (environs.Environ, error) { for { select { case <-dying: return nil, tomb.ErrDying case config, ok := <-w.Changes(): if !ok { return nil, watcher.MustErr(w) } environ, err := environs.New(config) if err == nil { return environ, nil } log.Errorf("worker: loaded invalid environment configuration: %v", err) loadedInvalid() } } }
func (a *AgentEntityWatcher) watchEntity(tag string) (string, error) { entity0, err := a.st.FindEntity(tag) if err != nil { return "", err } entity, ok := entity0.(state.NotifyWatcherFactory) if !ok { return "", NotSupportedError(tag, "watching") } watch := entity.Watch() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { return a.resources.Register(watch), nil } return "", watcher.MustErr(watch) }
func (w *MachineUnitsWatcher) loop() (err error) { defer func() { for unit := range w.known { w.st.watcher.Unwatch(w.st.units.Name, unit, w.in) } }() machineCh := make(chan watcher.Change) w.st.watcher.Watch(w.st.machines.Name, w.machine.doc.Id, w.machine.doc.TxnRevno, machineCh) defer w.st.watcher.Unwatch(w.st.machines.Name, w.machine.doc.Id, machineCh) changes, err := w.updateMachine([]string(nil)) if err != nil { return err } out := w.out for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case <-machineCh: changes, err = w.updateMachine(changes) if err != nil { return err } if len(changes) > 0 { out = w.out } case c := <-w.in: changes, err = w.merge(changes, c.Id.(string)) if err != nil { return err } if len(changes) > 0 { out = w.out } case out <- changes: out = nil changes = nil } } panic("unreachable") }
func (w *cleanupWatcher) loop() (err error) { in := make(chan watcher.Change) w.st.watcher.WatchCollection(w.st.cleanups.Name, in) defer w.st.watcher.UnwatchCollection(w.st.cleanups.Name, in) out := w.out for { select { case <-w.tomb.Dying(): return tomb.ErrDying case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-in: // Simply emit event for each change. out = w.out case out <- struct{}{}: out = nil } } }
func (w *UnitsWatcher) loop(coll, id string, revno int64) error { w.st.watcher.Watch(coll, id, revno, w.in) defer func() { w.st.watcher.Unwatch(coll, id, w.in) for name := range w.life { w.st.watcher.Unwatch(w.st.units.Name, name, w.in) } }() changes, err := w.initial() if err != nil { return err } out := w.out for { select { case <-w.st.watcher.Dead(): return watcher.MustErr(w.st.watcher) case <-w.tomb.Dying(): return tomb.ErrDying case c := <-w.in: name := c.Id.(string) if name == id { changes, err = w.update(changes) } else { changes, err = w.merge(changes, name) } if err != nil { return err } if len(changes) > 0 { out = w.out } case out <- changes: out = nil changes = nil } } return nil }
// watchLoop watches the machine for units added or removed. func (md *machineData) watchLoop(unitw state.StringsWatcher) { defer md.tomb.Done() defer watcher.Stop(unitw, &md.tomb) for { select { case <-md.tomb.Dying(): return case change, ok := <-unitw.Changes(): if !ok { _, err := md.machine() if !errors.IsNotFoundError(err) { md.fw.tomb.Kill(watcher.MustErr(unitw)) } return } select { case md.fw.unitsChange <- &unitsChange{md, change}: case <-md.tomb.Dying(): return } } } }
func (d *DeployerAPI) watchOneMachineUnits(entity params.Entity) (params.StringsWatchResult, error) { nothing := params.StringsWatchResult{} if !d.authorizer.AuthOwner(entity.Tag) { return nothing, common.ErrPerm } _, id, err := names.ParseTag(entity.Tag, names.MachineTagKind) if err != nil { return nothing, err } machine, err := d.st.Machine(id) if err != nil { return nothing, err } watch := machine.WatchUnits() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.StringsWatchResult{ StringsWatcherId: d.resources.Register(watch), Changes: changes, }, nil } return nothing, watcher.MustErr(watch) }