func (*ManifoldSuite) TestAgentEntity_Error(c *gc.C) { manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "agent", APICallerName: "api-caller", }) stub := &testing.Stub{} stub.SetErrors(errors.New("zap")) apiCaller := &fakeAPICaller{stub: stub} worker, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ "agent": &fakeAgent{}, "api-caller": apiCaller, })) workertest.CheckNilOrKill(c, worker) c.Check(err, gc.ErrorMatches, "zap") stub.CheckCalls(c, []testing.StubCall{{ FuncName: "Agent.GetEntities", Args: []interface{}{params.Entities{ Entities: []params.Entity{{ Tag: "machine-123", }}, }}, }}) }
func (s *ManifoldSuite) TestNewWorker_Error(c *gc.C) { clock := &fakeClock{} facade := &fakeFacade{} manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "agent", APICallerName: "api-caller", Clock: clock, Interval: time.Hour, NewFacade: func(base.APICaller) (resumer.Facade, error) { return facade, nil }, NewWorker: func(actual resumer.Config) (worker.Worker, error) { c.Check(actual, jc.DeepEquals, resumer.Config{ Facade: facade, Clock: clock, Interval: time.Hour, }) return nil, errors.New("blam") }, }) worker, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ "agent": &fakeAgent{}, "api-caller": newFakeAPICaller(multiwatcher.JobManageModel), })) workertest.CheckNilOrKill(c, worker) c.Check(err, gc.ErrorMatches, "blam") }
func (*ManifoldSuite) TestInputs(c *gc.C) { manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "bill", APICallerName: "ben", }) expect := []string{"bill", "ben"} c.Check(manifold.Inputs, jc.DeepEquals, expect) }
func (s *ManifoldSuite) TestNonAgentErrors(c *gc.C) { config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) _, err := workertesting.RunPostUpgradeManifold( resumer.Manifold(config), &fakeAgent{tag: names.NewUserTag("foo")}, &fakeAPIConn{}) c.Assert(err, gc.ErrorMatches, "this manifold may only be used inside a machine agent") c.Assert(s.newCalled, jc.IsFalse) }
func (s *ManifoldSuite) TestMachineNonManagerErrors(c *gc.C) { config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) _, err := workertesting.RunPostUpgradeManifold( resumer.Manifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, &fakeAPIConn{machineJob: multiwatcher.JobHostUnits}) c.Assert(err, gc.Equals, dependency.ErrMissing) c.Assert(s.newCalled, jc.IsFalse) }
func (s *ManifoldSuite) TestMachine(c *gc.C) { config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) _, err := workertesting.RunPostUpgradeManifold( resumer.Manifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, &fakeAPIConn{machineJob: multiwatcher.JobManageModel}) c.Assert(err, jc.ErrorIsNil) c.Assert(s.newCalled, jc.IsTrue) }
func (*ManifoldSuite) TestMissingAPICaller(c *gc.C) { manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "agent", APICallerName: "api-caller", }) worker, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ "agent": &fakeAgent{}, "api-caller": dependency.ErrMissing, })) workertest.CheckNilOrKill(c, worker) c.Check(err, gc.Equals, dependency.ErrMissing) }
func (s *ManifoldSuite) TestNewFacade_Missing(c *gc.C) { manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "agent", APICallerName: "api-caller", }) worker, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ "agent": &fakeAgent{}, "api-caller": newFakeAPICaller(multiwatcher.JobManageModel), })) workertest.CheckNilOrKill(c, worker) c.Check(err, gc.Equals, dependency.ErrUninstall) }
func (s *ManifoldSuite) TestNewFacade_Error(c *gc.C) { apiCaller := newFakeAPICaller(multiwatcher.JobManageModel) manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "agent", APICallerName: "api-caller", NewFacade: func(actual base.APICaller) (resumer.Facade, error) { c.Check(actual, gc.Equals, apiCaller) return nil, errors.New("pow") }, }) worker, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ "agent": &fakeAgent{}, "api-caller": apiCaller, })) workertest.CheckNilOrKill(c, worker) c.Check(err, gc.ErrorMatches, "pow") }
func (s *ManifoldSuite) TestNewWorker_Success(c *gc.C) { expect := &fakeWorker{} manifold := resumer.Manifold(resumer.ManifoldConfig{ AgentName: "agent", APICallerName: "api-caller", NewFacade: func(base.APICaller) (resumer.Facade, error) { return &fakeFacade{}, nil }, NewWorker: func(actual resumer.Config) (worker.Worker, error) { return expect, nil }, }) actual, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ "agent": &fakeAgent{}, "api-caller": newFakeAPICaller(multiwatcher.JobManageModel), })) c.Check(err, jc.ErrorIsNil) c.Check(actual, gc.Equals, expect) }
// Manifolds returns a set of co-configured manifolds covering the // various responsibilities of a machine agent. // // Thou Shalt Not Use String Literals In This Function. Or Else. func Manifolds(config ManifoldsConfig) dependency.Manifolds { // connectFilter exists: // 1) to let us retry api connections immediately on password change, // rather than causing the dependency engine to wait for a while; // 2) to ensure that certain connection failures correctly trigger // complete agent removal. (It's not safe to let any agent other // than the machine mess around with SetCanUninstall). connectFilter := func(err error) error { cause := errors.Cause(err) if cause == apicaller.ErrConnectImpossible { err2 := coreagent.SetCanUninstall(config.Agent) if err2 != nil { return errors.Trace(err2) } return worker.ErrTerminateAgent } else if cause == apicaller.ErrChangedPassword { return dependency.ErrBounce } return err } return dependency.Manifolds{ // The agent manifold references the enclosing agent, and is the // foundation stone on which most other manifolds ultimately depend. agentName: agent.Manifold(config.Agent), // The termination worker returns ErrTerminateAgent if a // termination signal is received by the process it's running // in. It has no inputs and its only output is the error it // returns. It depends on the uninstall file having been // written *by the manual provider* at install time; it would // be Very Wrong Indeed to use SetCanUninstall in conjunction // with this code. terminationName: terminationworker.Manifold(), // The stateconfigwatcher manifold watches the machine agent's // configuration and reports if state serving info is // present. It will bounce itself if state serving info is // added or removed. It is intended as a dependency just for // the state manifold. stateConfigWatcherName: stateconfigwatcher.Manifold(stateconfigwatcher.ManifoldConfig{ AgentName: agentName, AgentConfigChanged: config.AgentConfigChanged, }), // The state manifold creates a *state.State and makes it // available to other manifolds. It pings the mongodb session // regularly and will die if pings fail. stateName: workerstate.Manifold(workerstate.ManifoldConfig{ AgentName: agentName, StateConfigWatcherName: stateConfigWatcherName, OpenState: config.OpenState, }), // The stateworkers manifold starts workers which rely on a // *state.State but which haven't been converted to run // directly under the dependency engine yet. This manifold // will be removed once all such workers have been converted; // until then, the workers are expected to handle their own // checks for upgrades etc, rather than blocking this whole // worker on upgrade completion. stateWorkersName: StateWorkersManifold(StateWorkersConfig{ StateName: stateName, StartStateWorkers: config.StartStateWorkers, }), // The api-config-watcher manifold monitors the API server // addresses in the agent config and bounces when they // change. It's required as part of model migrations. apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{ AgentName: agentName, AgentConfigChanged: config.AgentConfigChanged, }), // The api caller is a thin concurrent wrapper around a connection // to some API server. It's used by many other manifolds, which all // select their own desired facades. It will be interesting to see // how this works when we consolidate the agents; might be best to // handle the auth changes server-side..? apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{ AgentName: agentName, APIConfigWatcherName: apiConfigWatcherName, APIOpen: apicaller.APIOpen, NewConnection: apicaller.ScaryConnect, Filter: connectFilter, }), // The upgrade steps gate is used to coordinate workers which // shouldn't do anything until the upgrade-steps worker has // finished running any required upgrade steps. The flag of // similar name is used to implement the isFullyUpgraded func // that keeps upgrade concerns out of unrelated manifolds. upgradeStepsGateName: gate.ManifoldEx(config.UpgradeStepsLock), upgradeStepsFlagName: gate.FlagManifold(gate.FlagManifoldConfig{ GateName: upgradeStepsGateName, NewWorker: gate.NewFlagWorker, }), // The upgrade check gate is used to coordinate workers which // shouldn't do anything until the upgrader worker has // completed its first check for a new tools version to // upgrade to. The flag of similar name is used to implement // the isFullyUpgraded func that keeps upgrade concerns out of // unrelated manifolds. upgradeCheckGateName: gate.ManifoldEx(config.UpgradeCheckLock), upgradeCheckFlagName: gate.FlagManifold(gate.FlagManifoldConfig{ GateName: upgradeCheckGateName, NewWorker: gate.NewFlagWorker, }), // The upgrader is a leaf worker that returns a specific error // type recognised by the machine agent, causing other workers // to be stopped and the agent to be restarted running the new // tools. We should only need one of these in a consolidated // agent, but we'll need to be careful about behavioural // differences, and interactions with the upgrade-steps // worker. upgraderName: upgrader.Manifold(upgrader.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, UpgradeStepsGateName: upgradeStepsGateName, UpgradeCheckGateName: upgradeCheckGateName, PreviousAgentVersion: config.PreviousAgentVersion, }), // The upgradesteps worker runs soon after the machine agent // starts and runs any steps required to upgrade to the // running jujud version. Once upgrade steps have run, the // upgradesteps gate is unlocked and the worker exits. upgradeStepsName: upgradesteps.Manifold(upgradesteps.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, UpgradeStepsGateName: upgradeStepsGateName, OpenStateForUpgrade: config.OpenStateForUpgrade, PreUpgradeSteps: config.PreUpgradeSteps, }), // The migration minion handles the agent side aspects of model migrations. migrationFortressName: ifFullyUpgraded(fortress.Manifold()), migrationMinionName: ifFullyUpgraded(migrationminion.Manifold(migrationminion.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, FortressName: migrationFortressName, NewFacade: migrationminion.NewFacade, NewWorker: migrationminion.NewWorker, })), // The serving-info-setter manifold sets grabs the state // serving info from the API connection and writes it to the // agent config. servingInfoSetterName: ifFullyUpgraded(ServingInfoSetterManifold(ServingInfoSetterConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The apiworkers manifold starts workers which rely on the // machine agent's API connection but have not been converted // to work directly under the dependency engine. It waits for // upgrades to be finished before starting these workers. apiWorkersName: ifFullyUpgraded(APIWorkersManifold(APIWorkersConfig{ APICallerName: apiCallerName, StartAPIWorkers: config.StartAPIWorkers, })), // The reboot manifold manages a worker which will reboot the // machine when requested. It needs an API connection and // waits for upgrades to be complete. rebootName: ifFullyUpgraded(reboot.Manifold(reboot.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The logging config updater is a leaf worker that indirectly // controls the messages sent via the log sender or rsyslog, // according to changes in environment config. We should only need // one of these in a consolidated agent. loggingConfigUpdaterName: ifFullyUpgraded(logger.Manifold(logger.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The diskmanager worker periodically lists block devices on the // machine it runs on. This worker will be run on all Juju-managed // machines (one per machine agent). diskManagerName: ifFullyUpgraded(diskmanager.Manifold(diskmanager.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The proxy config updater is a leaf worker that sets http/https/apt/etc // proxy settings. proxyConfigUpdater: ifFullyUpgraded(proxyupdater.Manifold(proxyupdater.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The api address updater is a leaf worker that rewrites agent config // as the state server addresses change. We should only need one of // these in a consolidated agent. apiAddressUpdaterName: ifFullyUpgraded(apiaddressupdater.Manifold(apiaddressupdater.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The machiner Worker will wait for the identified machine to become // Dying and make it Dead; or until the machine becomes Dead by other // means. machinerName: ifFullyUpgraded(machiner.Manifold(machiner.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The log sender is a leaf worker that sends log messages to some // API server, when configured so to do. We should only need one of // these in a consolidated agent. // // NOTE: the LogSource will buffer a large number of messages as an upgrade // runs; it currently seems better to fill the buffer and send when stable, // optimising for stable controller upgrades rather than up-to-the-moment // observable normal-machine upgrades. logSenderName: ifFullyUpgraded(logsender.Manifold(logsender.ManifoldConfig{ APICallerName: apiCallerName, LogSource: config.LogSource, })), // The deployer worker is responsible for deploying and recalling unit // agents, according to changes in a set of state units; and for the // final removal of its agents' units from state when they are no // longer needed. deployerName: ifFullyUpgraded(deployer.Manifold(deployer.ManifoldConfig{ NewDeployContext: config.NewDeployContext, AgentName: agentName, APICallerName: apiCallerName, })), authenticationWorkerName: ifFullyUpgraded(authenticationworker.Manifold(authenticationworker.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), // The storageProvisioner worker manages provisioning // (deprovisioning), and attachment (detachment) of first-class // volumes and filesystems. storageProvisionerName: ifFullyUpgraded(storageprovisioner.MachineManifold(storageprovisioner.MachineManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, Clock: config.Clock, })), resumerName: ifFullyUpgraded(resumer.Manifold(resumer.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), identityFileWriterName: ifFullyUpgraded(identityfilewriter.Manifold(identityfilewriter.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), toolsVersionCheckerName: ifFullyUpgraded(toolsversionchecker.Manifold(toolsversionchecker.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, })), machineActionName: ifFullyUpgraded(machineactions.Manifold(machineactions.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, NewFacade: machineactions.NewFacade, NewWorker: machineactions.NewMachineActionsWorker, })), hostKeyReporterName: ifFullyUpgraded(hostkeyreporter.Manifold(hostkeyreporter.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, RootDir: config.RootDir, NewFacade: hostkeyreporter.NewFacade, NewWorker: hostkeyreporter.NewWorker, })), } }
func (*ManifoldSuite) TestOutput(c *gc.C) { manifold := resumer.Manifold(resumer.ManifoldConfig{}) c.Check(manifold.Output, gc.IsNil) }