Ejemplo n.º 1
0
func (s *ManifoldSuite) SetUpTest(c *gc.C) {
	s.IsolationSuite.SetUpTest(c)
	s.Stub = testing.Stub{}
	s.manifold = apicaller.Manifold(apicaller.ManifoldConfig{
		AgentName: "agent-name",
	})

	s.agent = &mockAgent{
		stub: &s.Stub,
		env:  coretesting.EnvironmentTag,
	}
	s.getResource = dt.StubGetResource(dt.StubResources{
		"agent-name": dt.StubResource{Output: s.agent},
	})

	// Watch out for this: it uses its own Stub because Close calls are made from
	// the worker's loop goroutine. You should make sure to stop the worker before
	// checking the mock conn's calls (unless you know the connection will outlive
	// the test -- see setupMutatorTest).
	s.conn = &mockConn{
		stub:   &testing.Stub{},
		broken: make(chan struct{}),
	}
	s.PatchValue(apicaller.OpenConnection, func(a agent.Agent) (api.Connection, error) {
		s.AddCall("openConnection", a)
		if err := s.NextErr(); err != nil {
			return nil, err
		}
		return s.conn, nil
	})
}
Ejemplo n.º 2
0
func (s *ManifoldSuite) SetUpTest(c *gc.C) {
	s.IsolationSuite.SetUpTest(c)
	s.Stub = testing.Stub{}
	s.manifold = apicaller.Manifold(apicaller.ManifoldConfig{
		AgentName: "agent-name",
		APIOpen: func(*api.Info, api.DialOpts) (api.Connection, error) {
			panic("just a fake")
		},
		NewConnection: func(a agent.Agent, apiOpen api.OpenFunc) (api.Connection, error) {
			c.Check(apiOpen, gc.NotNil) // uncomparable
			s.AddCall("NewConnection", a)
			if err := s.NextErr(); err != nil {
				return nil, err
			}
			return s.conn, nil
		},
		Filter: func(err error) error {
			panic(err)
		},
	})
	checkFilter := func() {
		s.manifold.Filter(errors.New("arrgh"))
	}
	c.Check(checkFilter, gc.PanicMatches, "arrgh")

	s.agent = &mockAgent{
		stub:  &s.Stub,
		model: coretesting.ModelTag,
	}
	s.getResource = dt.StubGetResource(dt.StubResources{
		"agent-name": dt.StubResource{Output: s.agent},
	})

	// Watch out for this: it uses its own Stub because Close calls
	// are made from the worker's loop goroutine. You should make
	// sure to stop the worker before checking the mock conn's calls.
	s.conn = &mockConn{
		stub:   &testing.Stub{},
		broken: make(chan struct{}),
	}
}
Ejemplo n.º 3
0
// Manifolds returns a set of co-configured manifolds covering the
// various responsibilities of a machine agent.
//
// Thou Shalt Not Use String Literals In This Function. Or Else.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {

	// connectFilter exists:
	//  1) to let us retry api connections immediately on password change,
	//     rather than causing the dependency engine to wait for a while;
	//  2) to ensure that certain connection failures correctly trigger
	//     complete agent removal. (It's not safe to let any agent other
	//     than the machine mess around with SetCanUninstall).
	connectFilter := func(err error) error {
		cause := errors.Cause(err)
		if cause == apicaller.ErrConnectImpossible {
			err2 := coreagent.SetCanUninstall(config.Agent)
			if err2 != nil {
				return errors.Trace(err2)
			}
			return worker.ErrTerminateAgent
		} else if cause == apicaller.ErrChangedPassword {
			return dependency.ErrBounce
		}
		return err
	}

	return dependency.Manifolds{
		// The agent manifold references the enclosing agent, and is the
		// foundation stone on which most other manifolds ultimately depend.
		agentName: agent.Manifold(config.Agent),

		// The termination worker returns ErrTerminateAgent if a
		// termination signal is received by the process it's running
		// in. It has no inputs and its only output is the error it
		// returns. It depends on the uninstall file having been
		// written *by the manual provider* at install time; it would
		// be Very Wrong Indeed to use SetCanUninstall in conjunction
		// with this code.
		terminationName: terminationworker.Manifold(),

		// The stateconfigwatcher manifold watches the machine agent's
		// configuration and reports if state serving info is
		// present. It will bounce itself if state serving info is
		// added or removed. It is intended as a dependency just for
		// the state manifold.
		stateConfigWatcherName: stateconfigwatcher.Manifold(stateconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),

		// The state manifold creates a *state.State and makes it
		// available to other manifolds. It pings the mongodb session
		// regularly and will die if pings fail.
		stateName: workerstate.Manifold(workerstate.ManifoldConfig{
			AgentName:              agentName,
			StateConfigWatcherName: stateConfigWatcherName,
			OpenState:              config.OpenState,
		}),

		// The stateworkers manifold starts workers which rely on a
		// *state.State but which haven't been converted to run
		// directly under the dependency engine yet. This manifold
		// will be removed once all such workers have been converted;
		// until then, the workers are expected to handle their own
		// checks for upgrades etc, rather than blocking this whole
		// worker on upgrade completion.
		stateWorkersName: StateWorkersManifold(StateWorkersConfig{
			StateName:         stateName,
			StartStateWorkers: config.StartStateWorkers,
		}),

		// The api-config-watcher manifold monitors the API server
		// addresses in the agent config and bounces when they
		// change. It's required as part of model migrations.
		apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),

		// The api caller is a thin concurrent wrapper around a connection
		// to some API server. It's used by many other manifolds, which all
		// select their own desired facades. It will be interesting to see
		// how this works when we consolidate the agents; might be best to
		// handle the auth changes server-side..?
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:            agentName,
			APIConfigWatcherName: apiConfigWatcherName,
			APIOpen:              apicaller.APIOpen,
			NewConnection:        apicaller.ScaryConnect,
			Filter:               connectFilter,
		}),

		// The upgrade steps gate is used to coordinate workers which
		// shouldn't do anything until the upgrade-steps worker has
		// finished running any required upgrade steps. The flag of
		// similar name is used to implement the isFullyUpgraded func
		// that keeps upgrade concerns out of unrelated manifolds.
		upgradeStepsGateName: gate.ManifoldEx(config.UpgradeStepsLock),
		upgradeStepsFlagName: gate.FlagManifold(gate.FlagManifoldConfig{
			GateName:  upgradeStepsGateName,
			NewWorker: gate.NewFlagWorker,
		}),

		// The upgrade check gate is used to coordinate workers which
		// shouldn't do anything until the upgrader worker has
		// completed its first check for a new tools version to
		// upgrade to. The flag of similar name is used to implement
		// the isFullyUpgraded func that keeps upgrade concerns out of
		// unrelated manifolds.
		upgradeCheckGateName: gate.ManifoldEx(config.UpgradeCheckLock),
		upgradeCheckFlagName: gate.FlagManifold(gate.FlagManifoldConfig{
			GateName:  upgradeCheckGateName,
			NewWorker: gate.NewFlagWorker,
		}),

		// The upgrader is a leaf worker that returns a specific error
		// type recognised by the machine agent, causing other workers
		// to be stopped and the agent to be restarted running the new
		// tools. We should only need one of these in a consolidated
		// agent, but we'll need to be careful about behavioural
		// differences, and interactions with the upgrade-steps
		// worker.
		upgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
			AgentName:            agentName,
			APICallerName:        apiCallerName,
			UpgradeStepsGateName: upgradeStepsGateName,
			UpgradeCheckGateName: upgradeCheckGateName,
			PreviousAgentVersion: config.PreviousAgentVersion,
		}),

		// The upgradesteps worker runs soon after the machine agent
		// starts and runs any steps required to upgrade to the
		// running jujud version. Once upgrade steps have run, the
		// upgradesteps gate is unlocked and the worker exits.
		upgradeStepsName: upgradesteps.Manifold(upgradesteps.ManifoldConfig{
			AgentName:            agentName,
			APICallerName:        apiCallerName,
			UpgradeStepsGateName: upgradeStepsGateName,
			OpenStateForUpgrade:  config.OpenStateForUpgrade,
			PreUpgradeSteps:      config.PreUpgradeSteps,
		}),

		// The migration minion handles the agent side aspects of model migrations.
		migrationFortressName: ifFullyUpgraded(fortress.Manifold()),
		migrationMinionName: ifFullyUpgraded(migrationminion.Manifold(migrationminion.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			FortressName:  migrationFortressName,

			NewFacade: migrationminion.NewFacade,
			NewWorker: migrationminion.NewWorker,
		})),

		// The serving-info-setter manifold sets grabs the state
		// serving info from the API connection and writes it to the
		// agent config.
		servingInfoSetterName: ifFullyUpgraded(ServingInfoSetterManifold(ServingInfoSetterConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The apiworkers manifold starts workers which rely on the
		// machine agent's API connection but have not been converted
		// to work directly under the dependency engine. It waits for
		// upgrades to be finished before starting these workers.
		apiWorkersName: ifFullyUpgraded(APIWorkersManifold(APIWorkersConfig{
			APICallerName:   apiCallerName,
			StartAPIWorkers: config.StartAPIWorkers,
		})),

		// The reboot manifold manages a worker which will reboot the
		// machine when requested. It needs an API connection and
		// waits for upgrades to be complete.
		rebootName: ifFullyUpgraded(reboot.Manifold(reboot.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The logging config updater is a leaf worker that indirectly
		// controls the messages sent via the log sender or rsyslog,
		// according to changes in environment config. We should only need
		// one of these in a consolidated agent.
		loggingConfigUpdaterName: ifFullyUpgraded(logger.Manifold(logger.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The diskmanager worker periodically lists block devices on the
		// machine it runs on. This worker will be run on all Juju-managed
		// machines (one per machine agent).
		diskManagerName: ifFullyUpgraded(diskmanager.Manifold(diskmanager.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The proxy config updater is a leaf worker that sets http/https/apt/etc
		// proxy settings.
		proxyConfigUpdater: ifFullyUpgraded(proxyupdater.Manifold(proxyupdater.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The api address updater is a leaf worker that rewrites agent config
		// as the state server addresses change. We should only need one of
		// these in a consolidated agent.
		apiAddressUpdaterName: ifFullyUpgraded(apiaddressupdater.Manifold(apiaddressupdater.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The machiner Worker will wait for the identified machine to become
		// Dying and make it Dead; or until the machine becomes Dead by other
		// means.
		machinerName: ifFullyUpgraded(machiner.Manifold(machiner.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The log sender is a leaf worker that sends log messages to some
		// API server, when configured so to do. We should only need one of
		// these in a consolidated agent.
		//
		// NOTE: the LogSource will buffer a large number of messages as an upgrade
		// runs; it currently seems better to fill the buffer and send when stable,
		// optimising for stable controller upgrades rather than up-to-the-moment
		// observable normal-machine upgrades.
		logSenderName: ifFullyUpgraded(logsender.Manifold(logsender.ManifoldConfig{
			APICallerName: apiCallerName,
			LogSource:     config.LogSource,
		})),

		// The deployer worker is responsible for deploying and recalling unit
		// agents, according to changes in a set of state units; and for the
		// final removal of its agents' units from state when they are no
		// longer needed.
		deployerName: ifFullyUpgraded(deployer.Manifold(deployer.ManifoldConfig{
			NewDeployContext: config.NewDeployContext,
			AgentName:        agentName,
			APICallerName:    apiCallerName,
		})),

		authenticationWorkerName: ifFullyUpgraded(authenticationworker.Manifold(authenticationworker.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		// The storageProvisioner worker manages provisioning
		// (deprovisioning), and attachment (detachment) of first-class
		// volumes and filesystems.
		storageProvisionerName: ifFullyUpgraded(storageprovisioner.MachineManifold(storageprovisioner.MachineManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Clock:         config.Clock,
		})),

		resumerName: ifFullyUpgraded(resumer.Manifold(resumer.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		identityFileWriterName: ifFullyUpgraded(identityfilewriter.Manifold(identityfilewriter.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		toolsVersionCheckerName: ifFullyUpgraded(toolsversionchecker.Manifold(toolsversionchecker.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),

		machineActionName: ifFullyUpgraded(machineactions.Manifold(machineactions.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			NewFacade:     machineactions.NewFacade,
			NewWorker:     machineactions.NewMachineActionsWorker,
		})),

		hostKeyReporterName: ifFullyUpgraded(hostkeyreporter.Manifold(hostkeyreporter.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			RootDir:       config.RootDir,
			NewFacade:     hostkeyreporter.NewFacade,
			NewWorker:     hostkeyreporter.NewWorker,
		})),
	}
}
Ejemplo n.º 4
0
// Manifolds returns a set of co-configured manifolds covering the
// various responsibilities of a machine agent.
//
// Thou Shalt Not Use String Literals In This Function. Or Else.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	return dependency.Manifolds{
		// The agent manifold references the enclosing agent, and is the
		// foundation stone on which most other manifolds ultimately depend.
		agentName: agent.Manifold(config.Agent),

		// The termination worker returns ErrTerminateAgent if a
		// termination signal is received by the process it's running
		// in. It has no inputs and its only output is the error it
		// returns.
		terminationName: terminationworker.Manifold(),

		// The api caller is a thin concurrent wrapper around a connection
		// to some API server. It's used by many other manifolds, which all
		// select their own desired facades. It will be interesting to see
		// how this works when we consolidate the agents; might be best to
		// handle the auth changes server-side..?
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName: agentName,
		}),

		// The upgrade steps gate is used to coordinate workers which
		// shouldn't do anything until the upgrade-steps worker has
		// finished running any required upgrade steps.
		upgradeStepsGateName: gate.ManifoldEx(config.UpgradeStepsLock),

		// The upgrade check gate is used to coordinate workers which
		// shouldn't do anything until the upgrader worker has
		// completed it's first check for a new tools version to
		// upgrade to.
		upgradeCheckGateName: gate.ManifoldEx(config.UpgradeCheckLock),

		// The upgrader is a leaf worker that returns a specific error
		// type recognised by the machine agent, causing other workers
		// to be stopped and the agent to be restarted running the new
		// tools. We should only need one of these in a consolidated
		// agent, but we'll need to be careful about behavioural
		// differences, and interactions with the upgrade-steps
		// worker.
		upgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
			AgentName:            agentName,
			APICallerName:        apiCallerName,
			UpgradeStepsGateName: upgradeStepsGateName,
			UpgradeCheckGateName: upgradeCheckGateName,
			PreviousAgentVersion: config.PreviousAgentVersion,
		}),

		// The upgradesteps worker runs soon after the machine agent
		// starts and runs any steps required to upgrade to the
		// running jujud version. Once upgrade steps have run, the
		// upgradesteps gate is unlocked and the worker exits.
		upgradeStepsName: upgradesteps.Manifold(upgradesteps.ManifoldConfig{
			AgentName:            agentName,
			APICallerName:        apiCallerName,
			UpgradeStepsGateName: upgradeStepsGateName,
			OpenStateForUpgrade:  config.OpenStateForUpgrade,
			PreUpgradeSteps:      config.PreUpgradeSteps,
		}),

		// The uninstaller manifold checks if the machine is dead. If
		// it is it writes the agent uninstall file and returns
		// ErrTerminateAgent which causes the agent to remove itself.
		uninstallerName: uninstallerManifold(uninstallerManifoldConfig{
			AgentName:          agentName,
			APICallerName:      apiCallerName,
			WriteUninstallFile: config.WriteUninstallFile,
		}),

		// The serving-info-setter manifold sets grabs the state
		// serving info from the API connection and writes it to the
		// agent config.
		servingInfoSetterName: ServingInfoSetterManifold(ServingInfoSetterConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		// The upgradewaiter manifold aggregates the
		// upgrade-steps-gate and upgrade-check-gate manifolds into
		// one boolean output. It makes it easy to create manifolds
		// which must only run after these upgrade events have
		// occured.
		upgradeWaiterName: upgradewaiter.Manifold(upgradewaiter.ManifoldConfig{
			UpgradeStepsWaiterName: upgradeStepsGateName,
			UpgradeCheckWaiterName: upgradeCheckGateName,
		}),

		// The apiworkers manifold starts workers which rely on the
		// machine agent's API connection but have not been converted
		// to work directly under the dependency engine. It waits for
		// upgrades to be finished before starting these workers.
		apiWorkersName: APIWorkersManifold(APIWorkersConfig{
			APICallerName:     apiCallerName,
			UpgradeWaiterName: upgradeWaiterName,
			StartAPIWorkers:   config.StartAPIWorkers,
		}),

		// The reboot manifold manages a worker which will reboot the
		// machine when requested. It needs an API connection and
		// waits for upgrades to be complete.
		rebootName: reboot.Manifold(reboot.ManifoldConfig{
			AgentName:         agentName,
			APICallerName:     apiCallerName,
			UpgradeWaiterName: upgradeWaiterName,
		}),

		// The logging config updater is a leaf worker that indirectly
		// controls the messages sent via the log sender or rsyslog,
		// according to changes in environment config. We should only need
		// one of these in a consolidated agent.
		loggingConfigUpdaterName: logger.Manifold(logger.ManifoldConfig{
			AgentName:         agentName,
			APICallerName:     apiCallerName,
			UpgradeWaiterName: upgradeWaiterName,
		}),
	}
}
Ejemplo n.º 5
0
// Manifolds returns a set of co-configured manifolds covering the various
// responsibilities of a standalone unit agent. It also accepts the logSource
// argument because we haven't figured out how to thread all the logging bits
// through a dependency engine yet.
//
// Thou Shalt Not Use String Literals In This Function. Or Else.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	return dependency.Manifolds{

		// The agent manifold references the enclosing agent, and is the
		// foundation stone on which most other manifolds ultimately depend.
		// (Currently, that is "all manifolds", but consider a shared clock.)
		AgentName: agent.Manifold(config.Agent),

		// The machine lock manifold is a thin concurrent wrapper around an
		// FSLock in an agreed location. We expect it to be replaced with an
		// in-memory lock when the unit agent moves into the machine agent.
		MachineLockName: machinelock.Manifold(machinelock.ManifoldConfig{
			AgentName: AgentName,
		}),

		// The api caller is a thin concurrent wrapper around a connection
		// to some API server. It's used by many other manifolds, which all
		// select their own desired facades. It will be interesting to see
		// how this works when we consolidate the agents; might be best to
		// handle the auth changes server-side..?
		APICallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:       AgentName,
			APIInfoGateName: APIInfoGateName,
		}),

		// This manifold is used to coordinate between the api caller and the
		// log sender, which share the API credentials that the API caller may
		// update. To avoid surprising races, the log sender waits for the api
		// caller to unblock this, indicating that any password dance has been
		// completed and the log-sender can now connect without confusion.
		APIInfoGateName: gate.Manifold(),

		// The log sender is a leaf worker that sends log messages to some
		// API server, when configured so to do. We should only need one of
		// these in a consolidated agent.
		LogSenderName: logsender.Manifold(logsender.ManifoldConfig{
			AgentName:       AgentName,
			APIInfoGateName: APIInfoGateName,
			LogSource:       config.LogSource,
		}),

		// The rsyslog config updater is a leaf worker that causes rsyslog
		// to send messages to the state servers. We should only need one
		// of these in a consolidated agent.
		RsyslogConfigUpdaterName: rsyslog.Manifold(rsyslog.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The logging config updater is a leaf worker that indirectly
		// controls the messages sent via the log sender or rsyslog,
		// according to changes in environment config. We should only need
		// one of these in a consolidated agent.
		LoggingConfigUpdaterName: logger.Manifold(logger.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The api address updater is a leaf worker that rewrites agent config
		// as the state server addresses change. We should only need one of
		// these in a consolidated agent.
		APIAdddressUpdaterName: apiaddressupdater.Manifold(apiaddressupdater.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The proxy config updater is a leaf worker that sets http/https/apt/etc
		// proxy settings.
		// TODO(fwereade): timing of this is suspicious. There was superstitious
		// code trying to run this early; if that ever helped, it was only by
		// coincidence. Probably we ought to be making components that might
		// need proxy config into explicit dependencies of the proxy updater...
		ProxyConfigUpdaterName: proxyupdater.Manifold(proxyupdater.ManifoldConfig{
			APICallerName: APICallerName,
		}),

		// The upgrader is a leaf worker that returns a specific error type
		// recognised by the unit agent, causing other workers to be stopped
		// and the agent to be restarted running the new tools. We should only
		// need one of these in a consolidated agent, but we'll need to be
		// careful about behavioural differences, and interactions with the
		// upgrade-steps worker.
		UpgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The leadership tracker attempts to secure and retain leadership of
		// the unit's service, and is consulted on such matters by the
		// uniter. As it stannds today, we'll need one per unit in a
		// consolidated agent.
		LeadershipTrackerName: leadership.Manifold(leadership.ManifoldConfig{
			AgentName:           AgentName,
			APICallerName:       APICallerName,
			LeadershipGuarantee: config.LeadershipGuarantee,
		}),

		// The uniter installs charms; manages the unit's presence in its
		// relations; creates suboordinate units; runs all the hooks; sends
		// metrics; etc etc etc. We expect to break it up further in the
		// coming weeks, and to need one per unit in a consolidated agent
		// (and probably one for each component broken out).
		UniterName: uniter.Manifold(uniter.ManifoldConfig{
			AgentName:             AgentName,
			APICallerName:         APICallerName,
			LeadershipTrackerName: LeadershipTrackerName,
			MachineLockName:       MachineLockName,
			CharmDirName:          CharmDirName,
		}),

		// TODO (mattyw) should be added to machine agent.
		MetricSpoolName: spool.Manifold(spool.ManifoldConfig{
			AgentName: AgentName,
		}),

		// The charmdir resource tracks whether the charm directory is available or
		// not; after 'start' hook and before 'stop' hook executes, and not during
		// upgrades.
		CharmDirName: charmdir.Manifold(),

		// The metric collect worker executes the collect-metrics hook in a
		// restricted context that can safely run concurrently with other hooks.
		MetricCollectName: collect.Manifold(collect.ManifoldConfig{
			AgentName:       AgentName,
			APICallerName:   APICallerName,
			MetricSpoolName: MetricSpoolName,
			CharmDirName:    CharmDirName,
		}),

		// The meter status worker executes the meter-status-changed hook when it detects
		// that the meter status has changed.
		MeterStatusName: meterstatus.Manifold(meterstatus.ManifoldConfig{
			AgentName:       AgentName,
			APICallerName:   APICallerName,
			MachineLockName: MachineLockName,
		}),

		// The metric sender worker periodically sends accumulated metrics to the state server.
		MetricSenderName: sender.Manifold(sender.ManifoldConfig{
			APICallerName:   APICallerName,
			MetricSpoolName: MetricSpoolName,
		}),
	}
}
Ejemplo n.º 6
0
// Manifolds returns a set of interdependent dependency manifolds that will
// run together to administer a model, as configured.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	modelTag := config.Agent.CurrentConfig().Model()
	return dependency.Manifolds{

		// The first group are foundational; the agent and clock
		// which wrap those supplied in config, and the api-caller
		// through everything else communicates with the apiserver.
		agentName: agent.Manifold(config.Agent),
		clockName: clockManifold(config.Clock),
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:     agentName,
			APIOpen:       apicaller.APIOpen,
			NewConnection: apicaller.OnlyConnect,
		}),

		// The discover spaces gate is used to coordinate workers which
		// shouldn't do anything until the discoverspaces worker has completed
		// its first discovery attempt.
		discovertSpacesCheckGateName: gate.ManifoldEx(config.DiscoverSpacesCheckLock),

		// All other manifolds should depend on at least one of these
		// three, which handle all the tasks that are safe and sane
		// to run in *all* controller machines.
		notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotDead,
			Filter:        lifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotAlive,
			Filter:        lifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{
			ClockName:     clockName,
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Duration:      config.RunFlagDuration,

			NewFacade: singular.NewFacade,
			NewWorker: singular.NewWorker,
		}),

		// Everything else should be wrapped in ifResponsible,
		// ifNotAlive, or ifNotDead, to ensure that only a single
		// controller is administering this model at a time.
		//
		// NOTE: not perfectly reliable at this stage? i.e. a worker
		// that ignores its stop signal for "too long" might continue
		// to take admin actions after the window of responsibility
		// closes. This *is* a pre-existing problem, but demands some
		// thought/care: e.g. should we make sure the apiserver also
		// closes any connections that lose responsibility..? can we
		// make sure all possible environ operations are either time-
		// bounded or interruptible? etc
		//
		// On the other hand, all workers *should* be written in the
		// expectation of dealing with a sucky infrastructure running
		// things in parallel unexpectedly, just because the universe
		// hates us and will engineer matters such that it happens
		// sometimes, even when we try to avoid it.

		// The environ tracker could/should be used by several other
		// workers (firewaller, provisioners, address-cleaner?).
		environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{
			APICallerName:  apiCallerName,
			NewEnvironFunc: environs.New,
		})),

		// The undertaker is currently the only ifNotAlive worker.
		undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			ClockName:     clockName,
			RemoveDelay:   config.ModelRemoveDelay,

			NewFacade: undertaker.NewFacade,
			NewWorker: undertaker.NewWorker,
		})),

		// All the rest depend on ifNotDead.
		discoverSpacesName: ifNotDead(discoverspaces.Manifold(discoverspaces.ManifoldConfig{
			EnvironName:   environTrackerName,
			APICallerName: apiCallerName,
			UnlockerName:  discovertSpacesCheckGateName,

			NewFacade: discoverspaces.NewFacade,
			NewWorker: discoverspaces.NewWorker,
		})),
		computeProvisionerName: ifNotDead(provisioner.Manifold(provisioner.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),
		storageProvisionerName: ifNotDead(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Scope:         modelTag,
		})),
		firewallerName: ifNotDead(firewaller.Manifold(firewaller.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		unitAssignerName: ifNotDead(unitassigner.Manifold(unitassigner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		serviceScalerName: ifNotDead(servicescaler.Manifold(servicescaler.ManifoldConfig{
			APICallerName: apiCallerName,
			NewFacade:     servicescaler.NewFacade,
			NewWorker:     servicescaler.New,
		})),
		instancePollerName: ifNotDead(instancepoller.Manifold(instancepoller.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
		})),
		charmRevisionUpdaterName: ifNotDead(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Period:        config.CharmRevisionUpdateInterval,

			NewFacade: charmrevisionmanifold.NewAPIFacade,
			NewWorker: charmrevision.NewWorker,
		})),
		metricWorkerName: ifNotDead(metricworker.Manifold(metricworker.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		stateCleanerName: ifNotDead(cleaner.Manifold(cleaner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		addressCleanerName: ifNotDead(addresser.Manifold(addresser.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		statusHistoryPrunerName: ifNotDead(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{
			APICallerName:    apiCallerName,
			MaxLogsPerEntity: config.EntityStatusHistoryCount,
			PruneInterval:    config.EntityStatusHistoryInterval,
			// TODO(fwereade): 2016-03-17 lp:1558657
			NewTimer: worker.NewTimer,
		})),
	}
}
Ejemplo n.º 7
0
// Manifolds returns a set of co-configured manifolds covering the various
// responsibilities of a standalone unit agent. It also accepts the logSource
// argument because we haven't figured out how to thread all the logging bits
// through a dependency engine yet.
//
// Thou Shalt Not Use String Literals In This Function. Or Else.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {

	// connectFilter exists to let us retry api connections immediately
	// on password change, rather than causing the dependency engine to
	// wait for a while.
	connectFilter := func(err error) error {
		cause := errors.Cause(err)
		if cause == apicaller.ErrChangedPassword {
			return dependency.ErrBounce
		} else if cause == apicaller.ErrConnectImpossible {
			return worker.ErrTerminateAgent
		}
		return err
	}

	return dependency.Manifolds{

		// The agent manifold references the enclosing agent, and is the
		// foundation stone on which most other manifolds ultimately depend.
		// (Currently, that is "all manifolds", but consider a shared clock.)
		agentName: agent.Manifold(config.Agent),

		// The machine lock manifold is a thin concurrent wrapper around an
		// FSLock in an agreed location. We expect it to be replaced with an
		// in-memory lock when the unit agent moves into the machine agent.
		machineLockName: machinelock.Manifold(machinelock.ManifoldConfig{
			AgentName: agentName,
		}),

		// The api-config-watcher manifold monitors the API server
		// addresses in the agent config and bounces when they
		// change. It's required as part of model migrations.
		apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),

		// The api caller is a thin concurrent wrapper around a connection
		// to some API server. It's used by many other manifolds, which all
		// select their own desired facades. It will be interesting to see
		// how this works when we consolidate the agents; might be best to
		// handle the auth changes server-side..?
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:            agentName,
			APIConfigWatcherName: apiConfigWatcherName,
			APIOpen:              apicaller.APIOpen,
			NewConnection:        apicaller.ScaryConnect,
			Filter:               connectFilter,
		}),

		// The log sender is a leaf worker that sends log messages to some
		// API server, when configured so to do. We should only need one of
		// these in a consolidated agent.
		logSenderName: logsender.Manifold(logsender.ManifoldConfig{
			APICallerName: apiCallerName,
			LogSource:     config.LogSource,
		}),

		// The upgrader is a leaf worker that returns a specific error type
		// recognised by the unit agent, causing other workers to be stopped
		// and the agent to be restarted running the new tools. We should only
		// need one of these in a consolidated agent, but we'll need to be
		// careful about behavioural differences, and interactions with the
		// upgradesteps worker.
		upgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		migrationFortressName: fortress.Manifold(),

		// The migration minion handles the agent side aspects of model migrations.
		migrationMinionName: migrationminion.Manifold(migrationminion.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			FortressName:  migrationFortressName,

			NewFacade: migrationminion.NewFacade,
			NewWorker: migrationminion.NewWorker,
		}),

		// The logging config updater is a leaf worker that indirectly
		// controls the messages sent via the log sender according to
		// changes in environment config. We should only need one of
		// these in a consolidated agent.
		loggingConfigUpdaterName: logger.Manifold(logger.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		// The api address updater is a leaf worker that rewrites agent config
		// as the controller addresses change. We should only need one of
		// these in a consolidated agent.
		apiAddressUpdaterName: apiaddressupdater.Manifold(apiaddressupdater.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		// The proxy config updater is a leaf worker that sets http/https/apt/etc
		// proxy settings.
		// TODO(fwereade): timing of this is suspicious. There was superstitious
		// code trying to run this early; if that ever helped, it was only by
		// coincidence. Probably we ought to be making components that might
		// need proxy config into explicit dependencies of the proxy updater...
		proxyConfigUpdaterName: proxyupdater.Manifold(proxyupdater.ManifoldConfig{
			APICallerName: apiCallerName,
		}),

		// The charmdir resource coordinates whether the charm directory is
		// available or not; after 'start' hook and before 'stop' hook
		// executes, and not during upgrades.
		charmDirName: fortress.Manifold(),

		// The leadership tracker attempts to secure and retain leadership of
		// the unit's service, and is consulted on such matters by the
		// uniter. As it stannds today, we'll need one per unit in a
		// consolidated agent.
		leadershipTrackerName: leadership.Manifold(leadership.ManifoldConfig{
			AgentName:           agentName,
			APICallerName:       apiCallerName,
			LeadershipGuarantee: config.LeadershipGuarantee,
		}),

		// HookRetryStrategy uses a retrystrategy worker to get a
		// retry strategy that will be used by the uniter to run its hooks.
		hookRetryStrategyName: retrystrategy.Manifold(retrystrategy.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			NewFacade:     retrystrategy.NewFacade,
			NewWorker:     retrystrategy.NewRetryStrategyWorker,
		}),

		// The uniter installs charms; manages the unit's presence in its
		// relations; creates suboordinate units; runs all the hooks; sends
		// metrics; etc etc etc. We expect to break it up further in the
		// coming weeks, and to need one per unit in a consolidated agent
		// (and probably one for each component broken out).
		uniterName: uniter.Manifold(uniter.ManifoldConfig{
			AgentName:             agentName,
			APICallerName:         apiCallerName,
			LeadershipTrackerName: leadershipTrackerName,
			MachineLockName:       machineLockName,
			CharmDirName:          charmDirName,
			HookRetryStrategyName: hookRetryStrategyName,
		}),

		// TODO (mattyw) should be added to machine agent.
		metricSpoolName: spool.Manifold(spool.ManifoldConfig{
			AgentName: agentName,
		}),

		// The metric collect worker executes the collect-metrics hook in a
		// restricted context that can safely run concurrently with other hooks.
		metricCollectName: collect.Manifold(collect.ManifoldConfig{
			AgentName:       agentName,
			MetricSpoolName: metricSpoolName,
			CharmDirName:    charmDirName,
		}),

		// The meter status worker executes the meter-status-changed hook when it detects
		// that the meter status has changed.
		meterStatusName: meterstatus.Manifold(meterstatus.ManifoldConfig{
			AgentName:                agentName,
			APICallerName:            apiCallerName,
			MachineLockName:          machineLockName,
			NewHookRunner:            meterstatus.NewHookRunner,
			NewMeterStatusAPIClient:  msapi.NewClient,
			NewConnectedStatusWorker: meterstatus.NewConnectedStatusWorker,
			NewIsolatedStatusWorker:  meterstatus.NewIsolatedStatusWorker,
		}),

		// The metric sender worker periodically sends accumulated metrics to the controller.
		metricSenderName: sender.Manifold(sender.ManifoldConfig{
			AgentName:       agentName,
			APICallerName:   apiCallerName,
			MetricSpoolName: metricSpoolName,
		}),
	}
}
Ejemplo n.º 8
0
// Manifolds returns a set of interdependent dependency manifolds that will
// run together to administer a model, as configured.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	modelTag := config.Agent.CurrentConfig().Model()
	return dependency.Manifolds{

		// The first group are foundational; the agent and clock
		// which wrap those supplied in config, and the api-caller
		// through which everything else communicates with the
		// controller.
		agentName: agent.Manifold(config.Agent),
		clockName: clockManifold(config.Clock),
		apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:     agentName,
			APIOpen:       api.Open,
			NewConnection: apicaller.OnlyConnect,
			Filter:        apiConnectFilter,
		}),

		// The spaces-imported gate will be unlocked when space
		// discovery is known to be complete. Various manifolds
		// should also come to depend upon it (or rather, on a
		// Flag depending on it) in the future.
		spacesImportedGateName: gate.ManifoldEx(config.SpacesImportedGate),

		// All other manifolds should depend on at least one of these
		// three, which handle all the tasks that are safe and sane
		// to run in *all* controller machines.
		notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotDead,
			Filter:        LifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotAlive,
			Filter:        LifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{
			ClockName:     clockName,
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Duration:      config.RunFlagDuration,

			NewFacade: singular.NewFacade,
			NewWorker: singular.NewWorker,
		}),

		// The migration workers collaborate to run migrations;
		// and to create a mechanism for running other workers
		// so they can't accidentally interfere with a migration
		// in progress. Such a manifold should (1) depend on the
		// migration-inactive flag, to know when to start or die;
		// and (2) occupy the migration-fortress, so as to avoid
		// possible interference with the minion (which will not
		// take action until it's gained sole control of the
		// fortress).
		//
		// Note that the fortress and flag will only exist while
		// the model is not dead; this frees their dependencies
		// from model-lifetime concerns.
		migrationFortressName: ifNotDead(fortress.Manifold()),
		migrationInactiveFlagName: ifNotDead(migrationflag.Manifold(migrationflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Check:         migrationflag.IsTerminal,
			NewFacade:     migrationflag.NewFacade,
			NewWorker:     migrationflag.NewWorker,
		})),
		migrationMasterName: ifNotDead(migrationmaster.Manifold(migrationmaster.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			FortressName:  migrationFortressName,
			Clock:         config.Clock,
			NewFacade:     migrationmaster.NewFacade,
			NewWorker:     config.NewMigrationMaster,
		})),

		// Everything else should be wrapped in ifResponsible,
		// ifNotAlive, ifNotDead, or ifNotMigrating (which also
		// implies NotDead), to ensure that only a single
		// controller is attempting to administer this model at
		// any one time.
		//
		// NOTE: not perfectly reliable at this stage? i.e. a
		// worker that ignores its stop signal for "too long"
		// might continue to take admin actions after the window
		// of responsibility closes. This *is* a pre-existing
		// problem, but demands some thought/care: e.g. should
		// we make sure the apiserver also closes any
		// connections that lose responsibility..? can we make
		// sure all possible environ operations are either time-
		// bounded or interruptible? etc
		//
		// On the other hand, all workers *should* be written in
		// the expectation of dealing with sucky infrastructure
		// running things in parallel unexpectedly, just because
		// the universe hates us and will engineer matters such
		// that it happens sometimes, even when we try to avoid
		// it.

		// The environ tracker could/should be used by several other
		// workers (firewaller, provisioners, address-cleaner?).
		environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{
			APICallerName:  apiCallerName,
			NewEnvironFunc: config.NewEnvironFunc,
		})),

		// The undertaker is currently the only ifNotAlive worker.
		undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,

			NewFacade: undertaker.NewFacade,
			NewWorker: undertaker.NewWorker,
		})),

		// All the rest depend on ifNotMigrating.
		spaceImporterName: ifNotMigrating(discoverspaces.Manifold(discoverspaces.ManifoldConfig{
			EnvironName:   environTrackerName,
			APICallerName: apiCallerName,
			UnlockerName:  spacesImportedGateName,

			NewFacade: discoverspaces.NewFacade,
			NewWorker: discoverspaces.NewWorker,
		})),
		computeProvisionerName: ifNotMigrating(provisioner.Manifold(provisioner.ManifoldConfig{
			AgentName:          agentName,
			APICallerName:      apiCallerName,
			EnvironName:        environTrackerName,
			NewProvisionerFunc: provisioner.NewEnvironProvisioner,
		})),
		storageProvisionerName: ifNotMigrating(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			EnvironName:   environTrackerName,
			Scope:         modelTag,
		})),
		firewallerName: ifNotMigrating(firewaller.Manifold(firewaller.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		unitAssignerName: ifNotMigrating(unitassigner.Manifold(unitassigner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		applicationScalerName: ifNotMigrating(applicationscaler.Manifold(applicationscaler.ManifoldConfig{
			APICallerName: apiCallerName,
			NewFacade:     applicationscaler.NewFacade,
			NewWorker:     applicationscaler.New,
		})),
		instancePollerName: ifNotMigrating(instancepoller.Manifold(instancepoller.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			ClockName:     clockName,
			Delay:         config.InstPollerAggregationDelay,
		})),
		charmRevisionUpdaterName: ifNotMigrating(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Period:        config.CharmRevisionUpdateInterval,

			NewFacade: charmrevisionmanifold.NewAPIFacade,
			NewWorker: charmrevision.NewWorker,
		})),
		metricWorkerName: ifNotMigrating(metricworker.Manifold(metricworker.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		stateCleanerName: ifNotMigrating(cleaner.Manifold(cleaner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		statusHistoryPrunerName: ifNotMigrating(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{
			APICallerName:  apiCallerName,
			MaxHistoryTime: config.StatusHistoryPrunerMaxHistoryTime,
			MaxHistoryMB:   config.StatusHistoryPrunerMaxHistoryMB,
			PruneInterval:  config.StatusHistoryPrunerInterval,
			// TODO(fwereade): 2016-03-17 lp:1558657
			NewTimer: worker.NewTimer,
		})),
		machineUndertakerName: ifNotMigrating(machineundertaker.Manifold(machineundertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			NewWorker:     machineundertaker.NewWorker,
		})),
	}
}