// NewMachiner returns a Worker that will wait for the identified machine // to become Dying and make it Dead; or until the machine becomes Dead by // other means. // // The machineDead function will be called immediately after the machine's // lifecycle is updated to Dead. func NewMachiner(cfg Config) (worker.Worker, error) { if err := cfg.Validate(); err != nil { return nil, errors.Annotate(err, "validating config") } mr := &Machiner{config: cfg} return worker.NewNotifyWorker(mr), nil }
// NewWorker returns a worker that keeps track of // the machine's authorised ssh keys and ensures the // ~/.ssh/authorized_keys file is up to date. func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker { if os.HostOS() == os.Windows { return worker.NewNoOpWorker() } kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)} return worker.NewNotifyWorker(kw) }
// NewConnectedStatusWorker creates a new worker that monitors the meter status of the // unit and runs the meter-status-changed hook appropriately. func NewConnectedStatusWorker(cfg ConnectedConfig) (worker.Worker, error) { handler, err := NewConnectedStatusHandler(cfg) if err != nil { return nil, errors.Trace(err) } return worker.NewNotifyWorker(handler), nil }
// NewWorker returns a worker that keeps track of // the machine's authorised ssh keys and ensures the // ~/.ssh/authorized_keys file is up to date. func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker { if version.Current.OS == version.Windows { return worker.NewNoOpWorker() } kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)} return worker.NewNotifyWorker(kw) }
// NewRsyslogConfigWorker returns a worker.Worker that uses // WatchForRsyslogChanges and updates rsyslog configuration based // on changes. The worker will remove the configuration file // on teardown. func NewRsyslogConfigWorker(st *apirsyslog.State, mode RsyslogMode, tag names.Tag, namespace string, stateServerAddrs []string) (worker.Worker, error) { handler, err := newRsyslogConfigHandler(st, mode, tag, namespace, stateServerAddrs) if err != nil { return nil, err } logger.Debugf("starting rsyslog worker mode %v for %q %q", mode, tag, namespace) return worker.NewNotifyWorker(handler), nil }
// NewLogger returns a worker.Worker that uses the notify watcher returned // from the setup. func NewLogger(api *logger.State, agentConfig agent.Config) worker.Worker { logger := &Logger{ api: api, agentConfig: agentConfig, lastConfig: loggo.LoggerInfo(), } log.Debugf("initial log config: %q", logger.lastConfig) return worker.NewNotifyWorker(logger) }
// NewRsyslogConfigWorker returns a worker.Worker that uses // WatchForRsyslogChanges and updates rsyslog configuration based // on changes. The worker will remove the configuration file // on teardown. func NewRsyslogConfigWorker(st *apirsyslog.State, mode RsyslogMode, tag names.Tag, namespace string, stateServerAddrs []string) (worker.Worker, error) { if version.Current.OS == version.Windows && mode == RsyslogModeAccumulate { return worker.NewNoOpWorker(), nil } handler, err := newRsyslogConfigHandler(st, mode, tag, namespace, stateServerAddrs) if err != nil { return nil, err } logger.Debugf("starting rsyslog worker mode %v for %q %q", mode, tag, namespace) return worker.NewNotifyWorker(handler), nil }
// NewCertificateUpdater returns a worker.Worker that watches for changes to // machine addresses and then generates a new state server certificate with those // addresses in the certificate's SAN value. func NewCertificateUpdater(addressWatcher AddressWatcher, getter StateServingInfoGetter, configGetter EnvironConfigGetter, setter StateServingInfoSetter, certChanged chan params.StateServingInfo, ) worker.Worker { return worker.NewNotifyWorker(&CertificateUpdater{ addressWatcher: addressWatcher, configGetter: configGetter, getter: getter, setter: setter, certChanged: certChanged, }) }
func (s *notifyWorkerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.actor = ¬ifyHandler{ actions: nil, handled: make(chan struct{}, 1), watcher: &testNotifyWatcher{ changes: make(chan struct{}), }, } s.worker = worker.NewNotifyWorker(s.actor) }
// NewCertificateUpdater returns a worker.Worker that watches for changes to // machine addresses and then generates a new state server certificate with those // addresses in the certificate's SAN value. func NewCertificateUpdater(addressWatcher AddressWatcher, getter StateServingInfoGetter, configGetter EnvironConfigGetter, hostPortsGetter APIHostPortsGetter, setter StateServingInfoSetter, ) worker.Worker { return worker.NewNotifyWorker(&CertificateUpdater{ addressWatcher: addressWatcher, configGetter: configGetter, hostPortsGetter: hostPortsGetter, getter: getter, setter: setter, }) }
func NewReboot(st *reboot.State, agentConfig agent.Config, machineLock *fslock.Lock) (worker.Worker, error) { tag, ok := agentConfig.Tag().(names.MachineTag) if !ok { return nil, errors.Errorf("Expected names.MachineTag, got %T: %v", agentConfig.Tag(), agentConfig.Tag()) } r := &Reboot{ st: st, tag: tag, machineLock: machineLock, } return worker.NewNotifyWorker(r), nil }
// NewNetworker returns a Worker that handles machine networking // configuration. If there is no /etc/network/interfaces file, an // error is returned. func NewNetworker(st *apinetworker.State, agentConfig agent.Config) (worker.Worker, error) { nw := &networker{ st: st, tag: agentConfig.Tag().String(), } // Verify we have /etc/network/interfaces first, otherwise bail out. if !CanStart() { err := fmt.Errorf("missing %q config file", configFileName) logger.Infof("not starting worker: %v", err) return nil, err } return worker.NewNotifyWorker(nw), nil }
// NewMachineEnvironmentWorker returns a worker.Worker that uses the notify // watcher returned from the setup. func NewMachineEnvironmentWorker(api *environment.Facade, agentConfig agent.Config) worker.Worker { // We don't write out system files for the local provider on machine zero // as that is the host machine. writeSystemFiles := (agentConfig.Tag() != names.NewMachineTag("0").String() || agentConfig.Value(agent.ProviderType) != provider.Local) logger.Debugf("write system files: %v", writeSystemFiles) envWorker := &MachineEnvironmentWorker{ api: api, writeSystemFiles: writeSystemFiles, first: true, } return worker.NewNotifyWorker(envWorker) }
func (s *notifyWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { s.stopWorker(c) actor := ¬ifyHandler{ actions: nil, handled: make(chan struct{}, 1), handlerError: fmt.Errorf("my handling error"), watcher: &testNotifyWatcher{ changes: make(chan struct{}), }, } w := worker.NewNotifyWorker(actor) actor.watcher.TriggerChange(c) waitForHandledNotify(c, actor.handled) err := waitShort(c, w) c.Check(err, gc.ErrorMatches, "my handling error") actor.CheckActions(c, "setup", "handler", "teardown") c.Check(actor.watcher.stopped, jc.IsTrue) }
func (s *notifyWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { // Stop the worker and SetUp again, this time with an error s.stopWorker(c) actor := ¬ifyHandler{ actions: nil, handled: make(chan struct{}, 1), setupError: fmt.Errorf("my special error"), watcher: &testNotifyWatcher{ changes: make(chan struct{}), }, } w := worker.NewNotifyWorker(actor) err := waitShort(c, w) c.Check(err, gc.ErrorMatches, "my special error") // TearDown is not called on SetUp error. actor.CheckActions(c, "setup") c.Check(actor.watcher.stopped, jc.IsTrue) }
func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) { nh := ¬ifyHandler{ actions: nil, handled: make(chan struct{}, 1), setupError: setupError, teardownError: teardownError, handlerError: handlerError, watcher: &testNotifyWatcher{ changes: make(chan struct{}), }, setupDone: make(chan struct{}), } w := worker.NewNotifyWorker(nh) select { case <-nh.setupDone: case <-time.After(coretesting.ShortWait): c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest") } return nh, w }
// NewAPIAddressUpdater returns a worker.Worker that runs state.Cleanup() // if the CleanupWatcher signals documents marked for deletion. func NewAPIAddressUpdater(addresser APIAddresser, setter APIAddressSetter) worker.Worker { return worker.NewNotifyWorker(&APIAddressUpdater{ addresser: addresser, setter: setter, }) }
// NewWorker returns a worker that keeps track of // the machine's authorised ssh keys and ensures the // ~/.ssh/authorized_keys file is up to date. func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker { kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag()} return worker.NewNotifyWorker(kw) }
// disk. first bool } var _ worker.NotifyWatchHandler = (*proxyWorker)(nil) // New returns a worker.Worker that updates proxy environment variables for the // process; and, if writeSystemFiles is true, for the whole machine. var New = func(api *environment.Facade, writeSystemFiles bool) worker.Worker { logger.Debugf("write system files: %v", writeSystemFiles) envWorker := &proxyWorker{ api: api, writeSystemFiles: writeSystemFiles, first: true, } return worker.NewNotifyWorker(envWorker) } func (w *proxyWorker) writeEnvironmentFile() error { // Writing the environment file is handled by executing the script for two // primary reasons: // // 1: In order to have the local provider specify the environment settings // for the machine agent running on the host, this worker needs to run, // but it shouldn't be touching any files on the disk. If however there is // an ubuntu user, it will. This shouldn't be a problem. // // 2: On cloud-instance ubuntu images, the ubuntu user is uid 1000, but in // the situation where the ubuntu user has been created as a part of the // manual provisioning process, the user will exist, and will not have the // same uid/gid as the default cloud image.
// NewMachiner returns a Worker that will wait for the identified machine // to become Dying and make it Dead; or until the machine becomes Dead by // other means. func NewMachiner(st MachineAccessor, agentConfig agent.Config) worker.Worker { mr := &Machiner{st: st, tag: agentConfig.Tag().(names.MachineTag)} return worker.NewNotifyWorker(mr) }
// NewMachiner returns a Worker that will wait for the identified machine // to become Dying and make it Dead; or until the machine becomes Dead by // other means. func NewMachiner(st *machiner.State, agentConfig agent.Config) worker.Worker { // TODO(dfc) clearly agentConfig.Tag() can _only_ return a machine tag mr := &Machiner{st: st, tag: agentConfig.Tag().(names.MachineTag)} return worker.NewNotifyWorker(mr) }
// NewMachiner returns a Worker that will wait for the identified machine // to become Dying and make it Dead; or until the machine becomes Dead by // other means. func NewMachiner(st MachineAccessor, agentConfig agent.Config, ignoreAddressesOnStart bool) worker.Worker { mr := &Machiner{st: st, tag: agentConfig.Tag().(names.MachineTag), ignoreAddressesOnStart: ignoreAddressesOnStart} return worker.NewNotifyWorker(mr) }
// NewCleaner returns a worker.Worker that runs state.Cleanup() // if the CleanupWatcher signals documents marked for deletion. func NewCleaner(st *state.State) worker.Worker { return worker.NewNotifyWorker(&Cleaner{st: st}) }
// NewCleaner returns a worker.Worker that runs state.Cleanup() // if the CleanupWatcher signals documents marked for deletion. func NewCleaner(st StateCleaner) worker.Worker { return worker.NewNotifyWorker(&Cleaner{st}) }
// NewMachiner returns a Worker that will wait for the identified machine // to become Dying and make it Dead; or until the machine becomes Dead by // other means. func NewMachiner(st *machiner.State, agentConfig agent.Config) worker.Worker { mr := &Machiner{st: st, tag: agentConfig.Tag()} return worker.NewNotifyWorker(mr) }