func (s *FastPeriodSuite) TestEnsureErr(c *gc.C) { err := watcher.EnsureErr(&dummyWatcher{stderrors.New("POW")}) c.Assert(err, gc.ErrorMatches, "POW") err = watcher.EnsureErr(&dummyWatcher{tomb.ErrStillAlive}) c.Assert(err, gc.ErrorMatches, "expected .* to be stopped: tomb: still alive") err = watcher.EnsureErr(&dummyWatcher{nil}) c.Assert(err, gc.ErrorMatches, "expected an error from .*, got nil") }
// WatchLoggingConfig starts a watcher to track changes to the logging config // for the agents specified.. Unfortunately the current infrastruture makes // watching parts of the config non-trivial, so currently any change to the // config will cause the watcher to notify the client. func (api *LoggerAPI) WatchLoggingConfig(arg params.Entities) params.NotifyWatchResults { result := make([]params.NotifyWatchResult, len(arg.Entities)) for i, entity := range arg.Entities { tag, err := names.ParseTag(entity.Tag) if err != nil { result[i].Error = common.ServerError(err) continue } err = common.ErrPerm if api.authorizer.AuthOwner(tag) { watch := api.state.WatchForModelConfigChanges() // Consume the initial event. Technically, API calls to Watch // 'transmit' the initial event in the Watch response. But // NotifyWatchers have no state to transmit. if _, ok := <-watch.Changes(); ok { result[i].NotifyWatcherId = api.resources.Register(watch) err = nil } else { err = watcher.EnsureErr(watch) } } result[i].Error = common.ServerError(err) } return params.NotifyWatchResults{Results: result} }
// startMachine creates a new data value for tracking details of the // machine and starts watching the machine for units added or removed. func (fw *Firewaller) startMachine(tag names.MachineTag) error { machined := &machineData{ fw: fw, tag: tag, unitds: make(map[names.UnitTag]*unitData), openedPorts: make([]network.PortRange, 0), definedPorts: make(map[network.PortRange]names.UnitTag), } m, err := machined.machine() if params.IsCodeNotFound(err) { return nil } else if err != nil { return errors.Annotate(err, "cannot watch machine units") } unitw, err := m.WatchUnits() if err != nil { return err } select { case <-fw.tomb.Dying(): return tomb.ErrDying case change, ok := <-unitw.Changes(): if !ok { return watcher.EnsureErr(unitw) } fw.machineds[tag] = machined err = fw.unitsChanged(&unitsChange{machined, change}) if err != nil { delete(fw.machineds, tag) return errors.Annotatef(err, "cannot respond to units changes for %q", tag) } } go machined.watchLoop(unitw) return nil }
func (s *storageSource) loop() error { defer close(s.changes) var inChanges <-chan struct{} var outChanges chan<- hook.SourceChange var outChange hook.SourceChange ready := make(chan struct{}, 1) ready <- struct{}{} for { select { case <-s.tomb.Dying(): return tomb.ErrDying case <-ready: inChanges = s.watcher.Changes() case _, ok := <-inChanges: logger.Debugf("got storage attachment change") if !ok { return watcher.EnsureErr(s.watcher) } inChanges = nil outChanges = s.changes outChange = func() error { defer func() { ready <- struct{}{} }() logger.Debugf("processing storage source change") return s.update() } case outChanges <- outChange: logger.Debugf("sent storage source change") outChanges = nil outChange = nil } } }
func (u *Uniter) terminate() error { w, err := u.unit.Watch() if err != nil { return errors.Trace(err) } defer watcher.Stop(w, &u.tomb) for { select { case <-u.tomb.Dying(): return tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return watcher.EnsureErr(w) } if err := u.unit.Refresh(); err != nil { return errors.Trace(err) } if hasSubs, err := u.unit.HasSubordinates(); err != nil { return errors.Trace(err) } else if hasSubs { continue } // The unit is known to be Dying; so if it didn't have subordinates // just above, it can't acquire new ones before this call. if err := u.unit.EnsureDead(); err != nil { return errors.Trace(err) } return worker.ErrTerminateAgent } } }
func (s *StorageAPI) watchOneStorageAttachment(id params.StorageAttachmentId, canAccess func(names.Tag) bool) (params.NotifyWatchResult, error) { // Watching a storage attachment is implemented as watching the // underlying volume or filesystem attachment. The only thing // we don't necessarily see in doing this is the lifecycle state // changes, but these may be observed by using the // WatchUnitStorageAttachments watcher. nothing := params.NotifyWatchResult{} unitTag, err := names.ParseUnitTag(id.UnitTag) if err != nil || !canAccess(unitTag) { return nothing, common.ErrPerm } storageTag, err := names.ParseStorageTag(id.StorageTag) if err != nil { return nothing, err } machineTag, err := s.st.UnitAssignedMachine(unitTag) if err != nil { return nothing, err } watch, err := common.WatchStorageAttachment(s.st, storageTag, machineTag, unitTag) if err != nil { return nothing, errors.Trace(err) } if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: s.resources.Register(watch), }, nil } return nothing, watcher.EnsureErr(watch) }
// WatchBlockDevices watches for changes to the specified machines' block devices. func (s *StorageProvisionerAPI) WatchBlockDevices(args params.Entities) (params.NotifyWatchResults, error) { canAccess, err := s.getBlockDevicesAuthFunc() if err != nil { return params.NotifyWatchResults{}, common.ServerError(common.ErrPerm) } results := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } one := func(arg params.Entity) (string, error) { machineTag, err := names.ParseMachineTag(arg.Tag) if err != nil { return "", err } if !canAccess(machineTag) { return "", common.ErrPerm } w := s.st.WatchBlockDevices(machineTag) if _, ok := <-w.Changes(); ok { return s.resources.Register(w), nil } return "", watcher.EnsureErr(w) } for i, arg := range args.Entities { var result params.NotifyWatchResult id, err := one(arg) if err != nil { result.Error = common.ServerError(err) } else { result.NotifyWatcherId = id } results.Results[i] = result } return results, nil }
func (u *UndertakerAPI) environResourceWatcher() params.NotifyWatchResult { var nothing params.NotifyWatchResult machines, err := u.st.AllMachines() if err != nil { nothing.Error = common.ServerError(err) return nothing } services, err := u.st.AllServices() if err != nil { nothing.Error = common.ServerError(err) return nothing } var watchers []state.NotifyWatcher for _, machine := range machines { watchers = append(watchers, machine.Watch()) } for _, service := range services { watchers = append(watchers, service.Watch()) } watch := common.NewMultiNotifyWatcher(watchers...) if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: u.resources.Register(watch), } } nothing.Error = common.ServerError(watcher.EnsureErr(watch)) return nothing }
func (p *ProvisionerAPI) watchOneMachineContainers(arg params.WatchContainer) (params.StringsWatchResult, error) { nothing := params.StringsWatchResult{} canAccess, err := p.getAuthFunc() if err != nil { return nothing, common.ErrPerm } tag, err := names.ParseMachineTag(arg.MachineTag) if err != nil { return nothing, common.ErrPerm } if !canAccess(tag) { return nothing, common.ErrPerm } machine, err := p.st.Machine(tag.Id()) if err != nil { return nothing, err } var watch state.StringsWatcher if arg.ContainerType != "" { watch = machine.WatchContainers(instance.ContainerType(arg.ContainerType)) } else { watch = machine.WatchAllContainers() } // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.StringsWatchResult{ StringsWatcherId: p.resources.Register(watch), Changes: changes, }, nil } return nothing, watcher.EnsureErr(watch) }
func (obs *EnvironObserver) loop() error { for { select { case <-obs.tomb.Dying(): return nil case _, ok := <-obs.environWatcher.Changes(): if !ok { return watcher.EnsureErr(obs.environWatcher) } } config, err := obs.st.EnvironConfig() if err != nil { logger.Warningf("error reading environment config: %v", err) continue } environ, err := environs.New(config) if err != nil { logger.Warningf("error creating an environment: %v", err) continue } obs.mu.Lock() obs.environ = environ obs.mu.Unlock() } }
func (s *storageAttachmentWatcher) loop() error { for { select { case <-s.tomb.Dying(): return tomb.ErrDying case _, ok := <-s.watcher.Changes(): if !ok { return watcher.EnsureErr(s.watcher) } snapshot, err := getStorageSnapshot( s.st, s.storageTag, s.unitTag, ) if params.IsCodeNotFound(err) { // The storage attachment was removed // from state, so we can stop watching. return nil } else if params.IsCodeNotProvisioned(err) { // We do not care about unattached // storage here. continue } else if err != nil { return err } change := storageAttachmentChange{ s.storageTag, snapshot, } select { case <-s.tomb.Dying(): return tomb.ErrDying case s.changes <- change: } } } }
// WatchAPIVersion starts a watcher to track if there is a new version // of the API that we want to upgrade to func (u *UpgraderAPI) WatchAPIVersion(args params.Entities) (params.NotifyWatchResults, error) { result := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } for i, agent := range args.Entities { tag, err := names.ParseTag(agent.Tag) if err != nil { return params.NotifyWatchResults{}, errors.Trace(err) } err = common.ErrPerm if u.authorizer.AuthOwner(tag) { watch := u.st.WatchForModelConfigChanges() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { result.Results[i].NotifyWatcherId = u.resources.Register(watch) err = nil } else { err = watcher.EnsureErr(watch) } } result.Results[i].Error = common.ServerError(err) } return result, nil }
// WatchRetryStrategy watches for changes to the environment. Currently we only allow // changes to the boolean that determines whether retries should be attempted or not. func (h *RetryStrategyAPI) WatchRetryStrategy(args params.Entities) (params.NotifyWatchResults, error) { results := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } canAccess, err := h.accessUnit() if err != nil { return params.NotifyWatchResults{}, errors.Trace(err) } for i, entity := range args.Entities { tag, err := names.ParseTag(entity.Tag) if err != nil { results.Results[i].Error = common.ServerError(err) continue } err = common.ErrPerm if canAccess(tag) { watch := h.st.WatchForModelConfigChanges() // Consume the initial event. Technically, API calls to Watch // 'transmit' the initial event in the Watch response. But // NotifyWatchers have no state to transmit. if _, ok := <-watch.Changes(); ok { results.Results[i].NotifyWatcherId = h.resources.Register(watch) err = nil } else { err = watcher.EnsureErr(watch) } } results.Results[i].Error = common.ServerError(err) } return results, nil }
// watchStorageAttachment starts watching the storage attachment with // the specified storage tag, waits for its first event, and records // the information in the current snapshot. func (w *RemoteStateWatcher) watchStorageAttachment( tag names.StorageTag, life params.Life, in apiwatcher.NotifyWatcher, ) error { var storageSnapshot StorageSnapshot select { case <-w.tomb.Dying(): return tomb.ErrDying case _, ok := <-in.Changes(): if !ok { return watcher.EnsureErr(in) } var err error storageSnapshot, err = getStorageSnapshot(w.st, tag, w.unit.Tag()) if params.IsCodeNotProvisioned(err) { // If the storage is unprovisioned, we still want to // record the attachment, but we'll mark it as // unattached. This allows the uniter to wait for // pending storage attachments to be provisioned. storageSnapshot = StorageSnapshot{Life: life} } else if err != nil { return errors.Annotatef(err, "processing initial storage attachment change") } } w.current.Storage[tag] = storageSnapshot w.storageAttachmentWatchers[tag] = newStorageAttachmentWatcher( w.st, in, w.unit.Tag(), tag, w.storageAttachmentChanges, ) return nil }
// WatchAPIHostPorts watches the API server addresses. func (api *APIAddresser) WatchAPIHostPorts() (params.NotifyWatchResult, error) { watch := api.getter.WatchAPIHostPorts() if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: api.resources.Register(watch), }, nil } return params.NotifyWatchResult{}, watcher.EnsureErr(watch) }
func (task *provisionerTask) loop() error { logger.Infof("Starting up provisioner task %s", task.machineTag) defer watcher.Stop(task.machineWatcher, &task.tomb) // Don't allow the harvesting mode to change until we have read at // least one set of changes, which will populate the task.machines // map. Otherwise we will potentially see all legitimate instances // as unknown. var harvestModeChan chan config.HarvestMode // Not all provisioners have a retry channel. var retryChan <-chan struct{} if task.retryWatcher != nil { retryChan = task.retryWatcher.Changes() } // When the watcher is started, it will have the initial changes be all // the machines that are relevant. Also, since this is available straight // away, we know there will be some changes right off the bat. for { select { case <-task.tomb.Dying(): logger.Infof("Shutting down provisioner task %s", task.machineTag) return tomb.ErrDying case ids, ok := <-task.machineWatcher.Changes(): if !ok { return watcher.EnsureErr(task.machineWatcher) } if err := task.processMachines(ids); err != nil { return errors.Annotate(err, "failed to process updated machines") } // We've seen a set of changes. Enable modification of // harvesting mode. harvestModeChan = task.harvestModeChan case harvestMode := <-harvestModeChan: if harvestMode == task.harvestMode { break } logger.Infof("harvesting mode changed to %s", harvestMode) task.harvestMode = harvestMode if harvestMode.HarvestUnknown() { logger.Infof("harvesting unknown machines") if err := task.processMachines(nil); err != nil { return errors.Annotate(err, "failed to process machines after safe mode disabled") } } case <-retryChan: if err := task.processMachinesWithTransientErrors(); err != nil { return errors.Annotate(err, "failed to process machines with transient errors") } } } }
// oneWatch does auth, and watcher creation/registration, for a single // entity. func (facade *Facade) oneWatch(tagString string) (string, error) { if err := facade.auth(tagString); err != nil { return "", errors.Trace(err) } watch := facade.backend.WatchMigrationPhase() if _, ok := <-watch.Changes(); ok { return facade.resources.Register(watch), nil } return "", watcher.EnsureErr(watch) }
// WatchUnitAssignments returns a strings watcher that is notified when new unit // assignments are added to the db. func (a *API) WatchUnitAssignments() (params.StringsWatchResult, error) { watch := a.st.WatchForUnitAssignment() if changes, ok := <-watch.Changes(); ok { return params.StringsWatchResult{ StringsWatcherId: a.res.Register(watch), Changes: changes, }, nil } return params.StringsWatchResult{}, watcher.EnsureErr(watch) }
func (f *FirewallerAPI) watchOneEnvironOpenedPorts(tag names.Tag) (string, []string, error) { // NOTE: tag is ignored, as there is only one environment in the // state DB. Once this changes, change the code below accordingly. watch := f.st.WatchOpenedPorts() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return f.resources.Register(watch), changes, nil } return "", nil, watcher.EnsureErr(watch) }
// WatchChanges watches for cleanups to be perfomed in state func (api *CleanerAPI) WatchCleanups() (params.NotifyWatchResult, error) { watch := api.st.WatchCleanups() if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: api.resources.Register(watch), }, nil } return params.NotifyWatchResult{ Error: common.ServerError(watcher.EnsureErr(watch)), }, nil }
// Watch starts watching for an active migration for the model // associated with the API connection. The returned id should be used // with the NotifyWatcher facade to receive events. func (api *API) Watch() params.NotifyWatchResult { watch := api.backend.WatchForMigration() if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: api.resources.Register(watch), } } return params.NotifyWatchResult{ Error: common.ServerError(watcher.EnsureErr(watch)), } }
func (m *MeterStatusAPI) watchOneUnitMeterStatus(tag names.UnitTag) (string, error) { unit, err := m.state.Unit(tag.Id()) if err != nil { return "", err } watch := unit.WatchMeterStatus() if _, ok := <-watch.Changes(); ok { return m.resources.Register(watch), nil } return "", watcher.EnsureErr(watch) }
func (u *uniterBaseAPI) watchOneUnitMeterStatus(tag names.UnitTag) (string, error) { unit, err := u.getUnit(tag) if err != nil { return "", err } watch := unit.WatchMeterStatus() if _, ok := <-watch.Changes(); ok { return u.resources.Register(watch), nil } return "", watcher.EnsureErr(watch) }
func (u *UniterAPIV3) watchOneRelationUnit(relUnit *state.RelationUnit) (params.RelationUnitsWatchResult, error) { watch := relUnit.Watch() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { return params.RelationUnitsWatchResult{ RelationUnitsWatcherId: u.resources.Register(watch), Changes: changes, }, nil } return params.RelationUnitsWatchResult{}, watcher.EnsureErr(watch) }
// Watch returns a watcher that sends the names of services whose // unit count may be below their configured minimum. func (facade *Facade) Watch() (params.StringsWatchResult, error) { watch := facade.backend.WatchScaledServices() if changes, ok := <-watch.Changes(); ok { id := facade.resources.Register(watch) return params.StringsWatchResult{ StringsWatcherId: id, Changes: changes, }, nil } return params.StringsWatchResult{}, watcher.EnsureErr(watch) }
func (n *NetworkerAPI) watchOneMachineInterfaces(id string) (string, error) { machine, err := n.st.Machine(id) if err != nil { return "", err } watch := machine.WatchInterfaces() // Consume the initial event. if _, ok := <-watch.Changes(); ok { return n.resources.Register(watch), nil } return "", watcher.EnsureErr(watch) }
func (w *activeStatusWorker) loop() error { code, info, err := w.stateFile.Read() if err != nil { return errors.Trace(err) } // Check current meter status before entering loop. currentCode, currentInfo, err := w.status.MeterStatus() if err != nil { return errors.Trace(err) } if code != currentCode || info != currentInfo { err = w.runHook(currentCode, currentInfo) if err != nil { return errors.Trace(err) } code, info = currentCode, currentInfo } watch, err := w.status.WatchMeterStatus() if err != nil { return errors.Trace(err) } defer watcher.Stop(watch, &w.tomb) // This function is used in tests to signal entering the worker loop. if w.init != nil { w.init() } for { select { case _, ok := <-watch.Changes(): logger.Debugf("got meter status change") if !ok { return watcher.EnsureErr(watch) } currentCode, currentInfo, err := w.status.MeterStatus() if err != nil { return errors.Trace(err) } if currentCode == code && currentInfo == info { continue } err = w.runHook(currentCode, currentInfo) if err != nil { return errors.Trace(err) } code, info = currentCode, currentInfo case <-w.tomb.Dying(): return tomb.ErrDying } } }
func (m *API) watchRemovalsForTag(tag string) (string, error) { err := m.checkModelAuthorization(tag) if err != nil { return "", errors.Trace(err) } watch := m.backend.WatchMachineRemovals() if _, ok := <-watch.Changes(); ok { return m.resources.Register(watch), nil } else { return "", watcher.EnsureErr(watch) } }
func leadershipSettingsAccessorFactory( st *state.State, resources *common.Resources, auth common.Authorizer, ) *leadershipapiserver.LeadershipSettingsAccessor { registerWatcher := func(serviceId string) (string, error) { settingsWatcher := st.WatchLeadershipSettings(serviceId) if _, ok := <-settingsWatcher.Changes(); ok { return resources.Register(settingsWatcher), nil } return "", watcher.EnsureErr(settingsWatcher) } // TODO(katco-): <2015-01-21 Wed> // Due to time constraints, we're translating between // map[string]interface{} and map[string]string. At some point we // should support a native read of this format straight from // state. getSettings := func(serviceId string) (map[string]string, error) { settings, err := st.ReadLeadershipSettings(serviceId) if err != nil { return nil, err } // Perform the conversion rawMap := settings.Map() leadershipSettings := make(map[string]string) for k, v := range rawMap { leadershipSettings[k] = v.(string) } return leadershipSettings, nil } writeSettings := func(serviceId string, settings map[string]string) error { currentSettings, err := st.ReadLeadershipSettings(serviceId) if err != nil { return err } rawSettings := make(map[string]interface{}) for k, v := range settings { rawSettings[k] = v } currentSettings.Update(rawSettings) _, err = currentSettings.Write() return errors.Annotate(err, "could not write changes") } ldrMgr := leadership.NewLeadershipManager(lease.Manager()) return leadershipapiserver.NewLeadershipSettingsAccessor( auth, registerWatcher, getSettings, writeSettings, ldrMgr.Leader, ) }
// WatchMachineErrorRetry returns a NotifyWatcher that notifies when // the provisioner should retry provisioning machines with transient errors. func (p *ProvisionerAPI) WatchMachineErrorRetry() (params.NotifyWatchResult, error) { result := params.NotifyWatchResult{} if !p.authorizer.AuthModelManager() { return result, common.ErrPerm } watch := newWatchMachineErrorRetry() // Consume any initial event and forward it to the result. if _, ok := <-watch.Changes(); ok { result.NotifyWatcherId = p.resources.Register(watch) } else { return result, watcher.EnsureErr(watch) } return result, nil }