// watchStorageAttachment starts watching the storage attachment with // the specified storage tag, waits for its first event, and records // the information in the current snapshot. func (w *RemoteStateWatcher) watchStorageAttachment( tag names.StorageTag, life params.Life, in apiwatcher.NotifyWatcher, ) error { var storageSnapshot StorageSnapshot select { case <-w.tomb.Dying(): return tomb.ErrDying case _, ok := <-in.Changes(): if !ok { return watcher.EnsureErr(in) } var err error storageSnapshot, err = getStorageSnapshot(w.st, tag, w.unit.Tag()) if params.IsCodeNotProvisioned(err) { // If the storage is unprovisioned, we still want to // record the attachment, but we'll mark it as // unattached. This allows the uniter to wait for // pending storage attachments to be provisioned. storageSnapshot = StorageSnapshot{Life: life} } else if err != nil { return errors.Annotatef(err, "processing initial storage attachment change") } } w.current.Storage[tag] = storageSnapshot w.storageAttachmentWatchers[tag] = newStorageAttachmentWatcher( w.st, in, w.unit.Tag(), tag, w.storageAttachmentChanges, ) return nil }
// watchForProxyChanges kicks off a go routine to listen to the watcher and // update the proxy settings. func (u *Uniter) watchForProxyChanges(environWatcher apiwatcher.NotifyWatcher) { go func() { for { select { case <-u.tomb.Dying(): return case _, ok := <-environWatcher.Changes(): logger.Debugf("new environment change") if !ok { return } environConfig, err := u.st.EnvironConfig() if err != nil { logger.Errorf("cannot load environment configuration: %v", err) } else { u.updatePackageProxy(environConfig) } } } }() }
// WaitForEnviron waits for an valid environment to arrive from // the given watcher. It terminates with tomb.ErrDying if // it receives a value on dying. func WaitForEnviron(w apiwatcher.NotifyWatcher, st EnvironConfigGetter, dying <-chan struct{}) (environs.Environ, error) { for { select { case <-dying: return nil, tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return nil, watcher.EnsureErr(w) } config, err := st.EnvironConfig() if err != nil { return nil, err } environ, err := environs.New(config) if err == nil { return environ, nil } logger.Errorf("loaded invalid environment configuration: %v", err) loadedInvalid() } } }
func (w *storageprovisioner) loop() error { var environConfigChanges <-chan struct{} var volumesWatcher apiwatcher.StringsWatcher var filesystemsWatcher apiwatcher.StringsWatcher var volumesChanges <-chan []string var filesystemsChanges <-chan []string var volumeAttachmentsWatcher apiwatcher.MachineStorageIdsWatcher var filesystemAttachmentsWatcher apiwatcher.MachineStorageIdsWatcher var volumeAttachmentsChanges <-chan []params.MachineStorageId var filesystemAttachmentsChanges <-chan []params.MachineStorageId var machineBlockDevicesWatcher apiwatcher.NotifyWatcher var machineBlockDevicesChanges <-chan struct{} machineChanges := make(chan names.MachineTag) environConfigWatcher, err := w.environ.WatchForEnvironConfigChanges() if err != nil { return errors.Annotate(err, "watching environ config") } defer watcher.Stop(environConfigWatcher, &w.tomb) environConfigChanges = environConfigWatcher.Changes() // Machine-scoped provisioners need to watch block devices, to create // volume-backed filesystems. if machineTag, ok := w.scope.(names.MachineTag); ok { machineBlockDevicesWatcher, err = w.volumes.WatchBlockDevices(machineTag) if err != nil { return errors.Annotate(err, "watching block devices") } defer watcher.Stop(machineBlockDevicesWatcher, &w.tomb) machineBlockDevicesChanges = machineBlockDevicesWatcher.Changes() } // The other watchers are started dynamically; stop only if started. defer w.maybeStopWatcher(volumesWatcher) defer w.maybeStopWatcher(volumeAttachmentsWatcher) defer w.maybeStopWatcher(filesystemsWatcher) defer w.maybeStopWatcher(filesystemAttachmentsWatcher) startWatchers := func() error { var err error volumesWatcher, err = w.volumes.WatchVolumes() if err != nil { return errors.Annotate(err, "watching volumes") } filesystemsWatcher, err = w.filesystems.WatchFilesystems() if err != nil { return errors.Annotate(err, "watching filesystems") } volumeAttachmentsWatcher, err = w.volumes.WatchVolumeAttachments() if err != nil { return errors.Annotate(err, "watching volume attachments") } filesystemAttachmentsWatcher, err = w.filesystems.WatchFilesystemAttachments() if err != nil { return errors.Annotate(err, "watching filesystem attachments") } volumesChanges = volumesWatcher.Changes() filesystemsChanges = filesystemsWatcher.Changes() volumeAttachmentsChanges = volumeAttachmentsWatcher.Changes() filesystemAttachmentsChanges = filesystemAttachmentsWatcher.Changes() return nil } ctx := context{ scope: w.scope, storageDir: w.storageDir, volumeAccessor: w.volumes, filesystemAccessor: w.filesystems, life: w.life, machineAccessor: w.machines, statusSetter: w.status, time: w.clock, volumes: make(map[names.VolumeTag]storage.Volume), volumeAttachments: make(map[params.MachineStorageId]storage.VolumeAttachment), volumeBlockDevices: make(map[names.VolumeTag]storage.BlockDevice), filesystems: make(map[names.FilesystemTag]storage.Filesystem), filesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachment), machines: make(map[names.MachineTag]*machineWatcher), machineChanges: machineChanges, schedule: schedule.NewSchedule(w.clock), pendingVolumeBlockDevices: make(set.Tags), incompleteVolumeParams: make(map[names.VolumeTag]storage.VolumeParams), incompleteVolumeAttachmentParams: make(map[params.MachineStorageId]storage.VolumeAttachmentParams), pendingFilesystems: make(map[names.FilesystemTag]storage.FilesystemParams), pendingFilesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams), pendingDyingFilesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams), } ctx.managedFilesystemSource = newManagedFilesystemSource( ctx.volumeBlockDevices, ctx.filesystems, ) defer func() { for _, w := range ctx.machines { w.stop() } }() for { // Check if any pending operations can be fulfilled. if err := processPending(&ctx); err != nil { return errors.Trace(err) } select { case <-w.tomb.Dying(): return tomb.ErrDying case _, ok := <-environConfigChanges: if !ok { return watcher.EnsureErr(environConfigWatcher) } environConfig, err := w.environ.EnvironConfig() if err != nil { return errors.Annotate(err, "getting environ config") } if ctx.environConfig == nil { // We've received the initial environ config, // so we can begin provisioning storage. if err := startWatchers(); err != nil { return err } } ctx.environConfig = environConfig case changes, ok := <-volumesChanges: if !ok { return watcher.EnsureErr(volumesWatcher) } if err := volumesChanged(&ctx, changes); err != nil { return errors.Trace(err) } case changes, ok := <-volumeAttachmentsChanges: if !ok { return watcher.EnsureErr(volumeAttachmentsWatcher) } if err := volumeAttachmentsChanged(&ctx, changes); err != nil { return errors.Trace(err) } case changes, ok := <-filesystemsChanges: if !ok { return watcher.EnsureErr(filesystemsWatcher) } if err := filesystemsChanged(&ctx, changes); err != nil { return errors.Trace(err) } case changes, ok := <-filesystemAttachmentsChanges: if !ok { return watcher.EnsureErr(filesystemAttachmentsWatcher) } if err := filesystemAttachmentsChanged(&ctx, changes); err != nil { return errors.Trace(err) } case _, ok := <-machineBlockDevicesChanges: if !ok { return watcher.EnsureErr(machineBlockDevicesWatcher) } if err := machineBlockDevicesChanged(&ctx); err != nil { return errors.Trace(err) } case machineTag := <-machineChanges: if err := refreshMachine(&ctx, machineTag); err != nil { return errors.Trace(err) } case <-ctx.schedule.Next(): // Ready to pick something(s) off the pending queue. if err := processSchedule(&ctx); err != nil { return errors.Trace(err) } } } }
func (f *filter) loop(unitTag string) (err error) { // TODO(dfc) named return value is a time bomb defer func() { if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = worker.ErrTerminateAgent } }() tag, err := names.ParseUnitTag(unitTag) if err != nil { return err } if f.unit, err = f.st.Unit(tag); err != nil { return err } if err = f.unitChanged(); err != nil { return err } f.service, err = f.unit.Service() if err != nil { return err } if err = f.serviceChanged(); err != nil { return err } unitw, err := f.unit.Watch() if err != nil { return err } defer f.maybeStopWatcher(unitw) servicew, err := f.service.Watch() if err != nil { return err } defer f.maybeStopWatcher(servicew) // configw and relationsw can get restarted, so we need to use // their eventual values in the defer calls. var configw apiwatcher.NotifyWatcher var configChanges <-chan struct{} curl, err := f.unit.CharmURL() if err == nil { configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() f.upgradeFrom.url = curl } else if err != uniter.ErrNoCharmURLSet { filterLogger.Errorf("unit charm: %v", err) return err } defer func() { if configw != nil { watcher.Stop(configw, &f.tomb) } }() actionsw, err := f.unit.WatchActions() if err != nil { return err } f.actionsPending = make([]string, 0) defer func() { if actionsw != nil { watcher.Stop(actionsw, &f.tomb) } }() relationsw, err := f.service.WatchRelations() if err != nil { return err } defer func() { if relationsw != nil { watcher.Stop(relationsw, &f.tomb) } }() var addressChanges <-chan struct{} addressesw, err := f.unit.WatchAddresses() if err != nil { return err } defer watcher.Stop(addressesw, &f.tomb) // Config events cannot be meaningfully discarded until one is available; // once we receive the initial change, we unblock discard requests by // setting this channel to its namesake on f. var discardConfig chan struct{} for { var ok bool select { case <-f.tomb.Dying(): return tomb.ErrDying // Handle watcher changes. case _, ok = <-unitw.Changes(): filterLogger.Debugf("got unit change") if !ok { return watcher.MustErr(unitw) } if err = f.unitChanged(); err != nil { return err } case _, ok = <-servicew.Changes(): filterLogger.Debugf("got service change") if !ok { return watcher.MustErr(servicew) } if err = f.serviceChanged(); err != nil { return err } case _, ok = <-configChanges: filterLogger.Debugf("got config change") if !ok { return watcher.MustErr(configw) } if addressChanges == nil { // We start reacting to address changes after the // first config-changed is processed, ignoring the // initial address changed event. addressChanges = addressesw.Changes() if _, ok := <-addressChanges; !ok { return watcher.MustErr(addressesw) } } filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn discardConfig = f.discardConfig case _, ok = <-addressChanges: filterLogger.Debugf("got address change") if !ok { return watcher.MustErr(addressesw) } // address change causes config-changed event filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn case ids, ok := <-actionsw.Changes(): filterLogger.Debugf("got %d actions", len(ids)) if !ok { return watcher.MustErr(actionsw) } f.actionsPending = append(f.actionsPending, ids...) f.nextAction = f.getNextAction() case keys, ok := <-relationsw.Changes(): filterLogger.Debugf("got relations change") if !ok { return watcher.MustErr(relationsw) } var ids []int for _, key := range keys { relationTag := names.NewRelationTag(key).String() rel, err := f.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. } else if err != nil { return err } else { ids = append(ids, rel.Id()) } } f.relationsChanged(ids) // Send events on active out chans. case f.outUpgrade <- f.upgrade: filterLogger.Debugf("sent upgrade event") f.outUpgrade = nil case f.outResolved <- f.resolved: filterLogger.Debugf("sent resolved event") f.outResolved = nil case f.outConfig <- nothing: filterLogger.Debugf("sent config event") f.outConfig = nil case f.outAction <- f.nextAction: f.nextAction = f.getNextAction() filterLogger.Debugf("sent action event") case f.outRelations <- f.relations: filterLogger.Debugf("sent relations event") f.outRelations = nil f.relations = nil // Handle explicit requests. case curl := <-f.setCharm: filterLogger.Debugf("changing charm to %q", curl) // We need to restart the config watcher after setting the // charm, because service config settings are distinct for // different service charms. if configw != nil { if err := configw.Stop(); err != nil { return err } } if err := f.unit.SetCharmURL(curl); err != nil { filterLogger.Debugf("failed setting charm url %q: %v", curl, err) return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didSetCharm <- nothing: } configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() // Restart the relations watcher. if err := relationsw.Stop(); err != nil { return err } relationsw, err = f.service.WatchRelations() if err != nil { return err } f.upgradeFrom.url = curl if err = f.upgradeChanged(); err != nil { return err } case force := <-f.wantForcedUpgrade: filterLogger.Debugf("want forced upgrade %v", force) f.upgradeFrom.force = force if err = f.upgradeChanged(); err != nil { return err } case <-f.wantResolved: filterLogger.Debugf("want resolved event") if f.resolved != params.ResolvedNone { f.outResolved = f.outResolvedOn } case <-f.clearResolved: filterLogger.Debugf("resolved event handled") f.outResolved = nil if err := f.unit.ClearResolved(); err != nil { return err } if err := f.unitChanged(); err != nil { return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didClearResolved <- nothing: } case <-discardConfig: filterLogger.Debugf("discarded config event") f.outConfig = nil } } }
func stopWatcher(c *gc.C, w apiwatcher.NotifyWatcher) { err := w.Stop() c.Check(err, jc.ErrorIsNil) }
func (f *filter) loop(unitTag names.UnitTag) (err error) { // TODO(dfc) named return value is a time bomb defer func() { if params.IsCodeNotFoundOrCodeUnauthorized(err) { err = worker.ErrTerminateAgent } }() if f.unit, err = f.st.Unit(unitTag); err != nil { return err } if err = f.unitChanged(); err != nil { return err } if err = f.meterStatusChanged(); err != nil { return err } f.service, err = f.unit.Service() if err != nil { return err } if err = f.serviceChanged(); err != nil { return err } unitw, err := f.unit.Watch() if err != nil { return err } defer f.maybeStopWatcher(unitw) servicew, err := f.service.Watch() if err != nil { return err } defer f.maybeStopWatcher(servicew) // configw and relationsw can get restarted, so we need to use // their eventual values in the defer calls. var configw apiwatcher.NotifyWatcher var configChanges <-chan struct{} curl, err := f.unit.CharmURL() if err == nil { configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() f.upgradeFrom.url = curl } else if err != uniter.ErrNoCharmURLSet { filterLogger.Errorf("unit charm: %v", err) return err } defer f.maybeStopWatcher(configw) actionsw, err := f.unit.WatchActionNotifications() if err != nil { return err } f.actionsPending = make([]string, 0) defer f.maybeStopWatcher(actionsw) relationsw, err := f.service.WatchRelations() if err != nil { return err } defer f.maybeStopWatcher(relationsw) meterStatusw, err := f.unit.WatchMeterStatus() if err != nil { return err } defer f.maybeStopWatcher(meterStatusw) addressesw, err := f.unit.WatchAddresses() if err != nil { return err } defer watcher.Stop(addressesw, &f.tomb) storagew, err := f.unit.WatchStorage() if err != nil { return err } defer watcher.Stop(storagew, &f.tomb) leaderSettingsw, err := f.st.LeadershipSettings.WatchLeadershipSettings(f.service.Tag().Id()) if err != nil { return err } defer watcher.Stop(leaderSettingsw, &f.tomb) // Ignore external requests for leader settings behaviour until we see the first change. var discardLeaderSettings <-chan struct{} var wantLeaderSettings <-chan bool // By default we send all leaderSettings onwards. sendLeaderSettings := true // Config events cannot be meaningfully discarded until one is available; // once we receive the initial config and address changes, we unblock // discard requests by setting this channel to its namesake on f. var discardConfig chan struct{} var seenConfigChange bool var seenAddressChange bool maybePrepareConfigEvent := func() { if !seenAddressChange { filterLogger.Debugf("no address change seen yet, skipping config event") return } if !seenConfigChange { filterLogger.Debugf("no config change seen yet, skipping config event") return } filterLogger.Debugf("preparing new config event") f.outConfig = f.outConfigOn discardConfig = f.discardConfig } for { var ok bool select { case <-f.tomb.Dying(): return tomb.ErrDying // Handle watcher changes. case _, ok = <-unitw.Changes(): filterLogger.Debugf("got unit change") if !ok { return watcher.EnsureErr(unitw) } if err = f.unitChanged(); err != nil { return err } case _, ok = <-servicew.Changes(): filterLogger.Debugf("got service change") if !ok { return watcher.EnsureErr(servicew) } if err = f.serviceChanged(); err != nil { return err } case _, ok = <-configChanges: filterLogger.Debugf("got config change") if !ok { return watcher.EnsureErr(configw) } seenConfigChange = true maybePrepareConfigEvent() case _, ok = <-addressesw.Changes(): filterLogger.Debugf("got address change") if !ok { return watcher.EnsureErr(addressesw) } seenAddressChange = true maybePrepareConfigEvent() case _, ok = <-meterStatusw.Changes(): filterLogger.Debugf("got meter status change") if !ok { return watcher.EnsureErr(meterStatusw) } if err = f.meterStatusChanged(); err != nil { return errors.Trace(err) } case ids, ok := <-actionsw.Changes(): filterLogger.Debugf("got %d actions", len(ids)) if !ok { return watcher.EnsureErr(actionsw) } f.actionsPending = append(f.actionsPending, ids...) f.nextAction = f.getNextAction() case keys, ok := <-relationsw.Changes(): filterLogger.Debugf("got relations change") if !ok { return watcher.EnsureErr(relationsw) } var ids []int for _, key := range keys { relationTag := names.NewRelationTag(key) rel, err := f.st.Relation(relationTag) if params.IsCodeNotFoundOrCodeUnauthorized(err) { // If it's actually gone, this unit cannot have entered // scope, and therefore never needs to know about it. } else if err != nil { return err } else { ids = append(ids, rel.Id()) } } f.relationsChanged(ids) case ids, ok := <-storagew.Changes(): filterLogger.Debugf("got storage change") if !ok { return watcher.EnsureErr(storagew) } tags := make([]names.StorageTag, len(ids)) for i, id := range ids { tag := names.NewStorageTag(id) tags[i] = tag } f.storageChanged(tags) case _, ok = <-leaderSettingsw.Changes(): filterLogger.Debugf("got leader settings change: ok=%t", ok) if !ok { return watcher.EnsureErr(leaderSettingsw) } if sendLeaderSettings { // only send the leader settings changed event // if it hasn't been explicitly disabled f.outLeaderSettings = f.outLeaderSettingsOn } else { filterLogger.Debugf("not sending leader settings change (want=false)") } discardLeaderSettings = f.discardLeaderSettings wantLeaderSettings = f.wantLeaderSettings // Send events on active out chans. case f.outUpgrade <- f.upgrade: filterLogger.Debugf("sent upgrade event") f.outUpgrade = nil case f.outResolved <- f.resolved: filterLogger.Debugf("sent resolved event") f.outResolved = nil case f.outConfig <- nothing: filterLogger.Debugf("sent config event") f.outConfig = nil case f.outLeaderSettings <- nothing: filterLogger.Debugf("sent leader settings event") f.outLeaderSettings = nil case f.outAction <- f.nextAction: f.nextAction = f.getNextAction() filterLogger.Debugf("sent action event") case f.outRelations <- f.relations: filterLogger.Debugf("sent relations event") f.outRelations = nil f.relations = nil case f.outMeterStatus <- nothing: filterLogger.Debugf("sent meter status change event") f.outMeterStatus = nil case f.outStorage <- f.storage: filterLogger.Debugf("sent storage event") f.outStorage = nil f.storage = nil // Handle explicit requests. case curl := <-f.setCharm: filterLogger.Debugf("changing charm to %q", curl) // We need to restart the config watcher after setting the // charm, because service config settings are distinct for // different service charms. if configw != nil { if err := configw.Stop(); err != nil { return err } } if err := f.unit.SetCharmURL(curl); err != nil { filterLogger.Debugf("failed setting charm url %q: %v", curl, err) return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didSetCharm <- nothing: } configw, err = f.unit.WatchConfigSettings() if err != nil { return err } configChanges = configw.Changes() // Restart the relations watcher. if err := relationsw.Stop(); err != nil { return err } relationsw, err = f.service.WatchRelations() if err != nil { return err } f.upgradeFrom.url = curl if err = f.upgradeChanged(); err != nil { return err } case force := <-f.wantForcedUpgrade: filterLogger.Debugf("want forced upgrade %v", force) f.upgradeFrom.force = force if err = f.upgradeChanged(); err != nil { return err } case <-f.wantResolved: filterLogger.Debugf("want resolved event") if f.resolved != params.ResolvedNone { f.outResolved = f.outResolvedOn } case sendEvents := <-wantLeaderSettings: filterLogger.Debugf("want leader settings event: %t", sendEvents) sendLeaderSettings = sendEvents if sendEvents { // go ahead and send an event right now, // they're waiting for us f.outLeaderSettings = f.outLeaderSettingsOn } else { // Make sure we don't have a pending event f.outLeaderSettings = nil } case <-f.clearResolved: filterLogger.Debugf("resolved event handled") f.outResolved = nil if err := f.unit.ClearResolved(); err != nil { return err } if err := f.unitChanged(); err != nil { return err } select { case <-f.tomb.Dying(): return tomb.ErrDying case f.didClearResolved <- nothing: } case <-discardConfig: filterLogger.Debugf("discarded config event") f.outConfig = nil case <-discardLeaderSettings: filterLogger.Debugf("discarded leader settings event") f.outLeaderSettings = nil } } }