func (s *ContainerSetupSuite) setupContainerWorker(c *gc.C, tag names.MachineTag) (worker.StringsWatchHandler, worker.Runner) { testing.PatchExecutable(c, s, "ubuntu-cloudimg-query", containertesting.FakeLxcURLScript) runner := worker.NewRunner(allFatal, noImportance) pr := s.st.Provisioner() machine, err := pr.Machine(tag) c.Assert(err, jc.ErrorIsNil) err = machine.SetSupportedContainers(instance.ContainerTypes...) c.Assert(err, jc.ErrorIsNil) cfg := s.AgentConfigForTag(c, tag) watcherName := fmt.Sprintf("%s-container-watcher", machine.Id()) params := provisioner.ContainerSetupParams{ Runner: runner, WorkerName: watcherName, SupportedContainers: instance.ContainerTypes, ImageURLGetter: &containertesting.MockURLGetter{}, Machine: machine, Provisioner: pr, Config: cfg, InitLock: s.initLock, } handler := provisioner.NewContainerSetupHandler(params) runner.StartWorker(watcherName, func() (worker.Worker, error) { return worker.NewStringsWorker(handler), nil }) return handler, runner }
// NewDeployer returns a Worker that deploys and recalls unit agents // via ctx, taking a machine id to operate on. func NewDeployer(st *apideployer.State, ctx Context) worker.Worker { d := &Deployer{ st: st, ctx: ctx, } return worker.NewStringsWorker(d) }
func newWorkerWithReleaser(st stateAddresser, releaser releaser) worker.Worker { a := &addresserHandler{ st: st, releaser: releaser, } w := worker.NewStringsWorker(a) return w }
func (s *stringsWorkerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.actor = &stringsHandler{ actions: nil, handled: make(chan []string, 1), watcher: &testStringsWatcher{ changes: make(chan []string), }, } s.worker = worker.NewStringsWorker(s.actor) }
// updateSupportedContainers records in state that a machine can run the specified containers. // It starts a watcher and when a container of a given type is first added to the machine, // the watcher is killed, the machine is set up to be able to start containers of the given type, // and a suitable provisioner is started. func (a *MachineAgent) updateSupportedContainers( runner worker.Runner, st *api.State, machineTag string, containers []instance.ContainerType, agentConfig agent.Config, ) error { pr := st.Provisioner() tag, err := names.ParseMachineTag(machineTag) if err != nil { return err } machine, err := pr.Machine(tag) if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead { return worker.ErrTerminateAgent } if err != nil { return errors.Annotatef(err, "cannot load machine %s from state", tag) } if len(containers) == 0 { if err := machine.SupportsNoContainers(); err != nil { return errors.Annotatef(err, "clearing supported containers for %s", tag) } return nil } if err := machine.SetSupportedContainers(containers...); err != nil { return errors.Annotatef(err, "setting supported containers for %s", tag) } initLock, err := hookExecutionLock(agentConfig.DataDir()) if err != nil { return err } // Start the watcher to fire when a container is first requested on the machine. watcherName := fmt.Sprintf("%s-container-watcher", machine.Id()) handler := provisioner.NewContainerSetupHandler( runner, watcherName, containers, machine, pr, agentConfig, initLock, ) a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) { return worker.NewStringsWorker(handler), nil }) return nil }
func (s *ContainerSetupSuite) setupContainerWorker(c *gc.C, tag names.MachineTag) (worker.StringsWatchHandler, worker.Runner) { runner := worker.NewRunner(allFatal, noImportance) pr := s.st.Provisioner() machine, err := pr.Machine(tag) c.Assert(err, gc.IsNil) err = machine.SetSupportedContainers(instance.ContainerTypes...) c.Assert(err, gc.IsNil) cfg := s.AgentConfigForTag(c, tag) watcherName := fmt.Sprintf("%s-container-watcher", machine.Id()) handler := provisioner.NewContainerSetupHandler(runner, watcherName, instance.ContainerTypes, machine, pr, cfg, s.initLock) runner.StartWorker(watcherName, func() (worker.Worker, error) { return worker.NewStringsWorker(handler), nil }) return handler, runner }
// NewWorker returns a worker that keeps track of IP address // lifecycles, releaseing and removing dead addresses. func NewWorker(api *apiaddresser.API) (worker.Worker, error) { ok, err := api.CanDeallocateAddresses() if err != nil { return nil, errors.Annotate(err, "checking address deallocation") } if !ok { // Environment does not support IP address // deallocation. logger.Debugf("address deallocation not supported; not starting worker") return worker.FinishedWorker{}, nil } ah := &addresserHandler{ api: api, } aw := worker.NewStringsWorker(ah) return aw, nil }
func (s *stringsWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { s.stopWorker(c) actor := &stringsHandler{ actions: nil, handled: make(chan []string, 1), handlerError: fmt.Errorf("my handling error"), watcher: &testStringsWatcher{ changes: make(chan []string), }, } w := worker.NewStringsWorker(actor) actor.watcher.TriggerChange(c, []string{"aa", "bb"}) waitForHandledStrings(c, actor.handled, []string{"aa", "bb"}) err := waitShort(c, w) c.Check(err, gc.ErrorMatches, "my handling error") actor.CheckActions(c, "setup", "handler", "teardown") c.Check(actor.watcher.stopped, jc.IsTrue) }
func (s *stringsWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { // Stop the worker and SetUp again, this time with an error s.stopWorker(c) actor := &stringsHandler{ actions: nil, handled: make(chan []string, 1), setupError: fmt.Errorf("my special error"), watcher: &testStringsWatcher{ changes: make(chan []string), }, } w := worker.NewStringsWorker(actor) err := waitShort(c, w) c.Check(err, gc.ErrorMatches, "my special error") // TearDown is not called on SetUp error. actor.CheckActions(c, "setup") c.Check(actor.watcher.stopped, jc.IsTrue) }
func newStringsHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*stringsHandler, worker.Worker) { sh := &stringsHandler{ actions: nil, handled: make(chan []string, 1), setupError: setupError, teardownError: teardownError, handlerError: handlerError, watcher: &testStringsWatcher{ changes: make(chan []string), }, setupDone: make(chan struct{}), } w := worker.NewStringsWorker(sh) select { case <-sh.setupDone: case <-time.After(coretesting.ShortWait): c.Error("Failed waiting for stringsHandler.Setup to be called during SetUpTest") } return sh, w }
// updateSupportedContainers records in state that a machine can run the specified containers. // It starts a watcher and when a container of a given type is first added to the machine, // the watcher is killed, the machine is set up to be able to start containers of the given type, // and a suitable provisioner is started. func (a *MachineAgent) updateSupportedContainers( runner worker.Runner, st *api.State, tag string, containers []instance.ContainerType, agentConfig agent.Config, ) error { pr := st.Provisioner() machine, err := pr.Machine(tag) if err != nil { return fmt.Errorf("%s is not in state: %v", tag, err) } if len(containers) == 0 { if err := machine.SupportsNoContainers(); err != nil { return fmt.Errorf("clearing supported containers for %s: %v", tag, err) } return nil } if err := machine.SetSupportedContainers(containers...); err != nil { return fmt.Errorf("setting supported containers for %s: %v", tag, err) } initLock, err := hookExecutionLock(agentConfig.DataDir()) if err != nil { return err } // Start the watcher to fire when a container is first requested on the machine. watcherName := fmt.Sprintf("%s-container-watcher", machine.Id()) handler := provisioner.NewContainerSetupHandler( runner, watcherName, containers, machine, pr, agentConfig, initLock, ) a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) { return worker.NewStringsWorker(handler), nil }) return nil }
func New(ua UnitAssigner) worker.Worker { return worker.NewStringsWorker(unitAssigner{api: ua}) }
// NewMinUnitsWorker returns a Worker that runs service.EnsureMinUnits() // if the number of alive units belonging to a service decreases, or if the // minimum required number of units for a service is increased. func NewMinUnitsWorker(st *state.State) worker.Worker { mu := &MinUnitsWorker{st: st} return worker.NewStringsWorker(mu) }