示例#1
0
func (s *ManifoldSuite) SetUpTest(c *gc.C) {
	s.IsolationSuite.SetUpTest(c)
	s.Stub = testing.Stub{}
	s.manifold = machinelock.Manifold(machinelock.ManifoldConfig{
		AgentName: "agent-name",
	})
	s.context = dt.StubContext(nil, map[string]interface{}{
		"agent-name": &dummyAgent{},
	})

	lock, err := fslock.NewLock(c.MkDir(), "test-lock", fslock.Defaults())
	c.Assert(err, jc.ErrorIsNil)
	s.lock = lock
	s.PatchValue(machinelock.CreateLock, func(dataDir string) (*fslock.Lock, error) {
		s.AddCall("createLock", dataDir)
		if err := s.NextErr(); err != nil {
			return nil, err
		}
		return s.lock, nil
	})
}
示例#2
0
func (s *ManifoldSuite) SetUpTest(c *gc.C) {
	s.IsolationSuite.SetUpTest(c)
	s.Stub = testing.Stub{}
	s.manifold = machinelock.Manifold(machinelock.ManifoldConfig{
		AgentName: "agent-name",
	})
	s.getResource = dt.StubGetResource(dt.StubResources{
		"agent-name": dt.StubResource{Output: &dummyAgent{}},
	})

	lock, err := fslock.NewLock(c.MkDir(), "test-lock")
	c.Assert(err, jc.ErrorIsNil)
	s.lock = lock
	s.PatchValue(machinelock.CreateLock, func(dataDir string) (*fslock.Lock, error) {
		s.AddCall("createLock", dataDir)
		if err := s.NextErr(); err != nil {
			return nil, err
		}
		return s.lock, nil
	})
}
示例#3
0
// Manifolds returns a set of co-configured manifolds covering the various
// responsibilities of a standalone unit agent. It also accepts the logSource
// argument because we haven't figured out how to thread all the logging bits
// through a dependency engine yet.
//
// Thou Shalt Not Use String Literals In This Function. Or Else.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	return dependency.Manifolds{

		// The agent manifold references the enclosing agent, and is the
		// foundation stone on which most other manifolds ultimately depend.
		// (Currently, that is "all manifolds", but consider a shared clock.)
		AgentName: agent.Manifold(config.Agent),

		// The machine lock manifold is a thin concurrent wrapper around an
		// FSLock in an agreed location. We expect it to be replaced with an
		// in-memory lock when the unit agent moves into the machine agent.
		MachineLockName: machinelock.Manifold(machinelock.ManifoldConfig{
			AgentName: AgentName,
		}),

		// The api caller is a thin concurrent wrapper around a connection
		// to some API server. It's used by many other manifolds, which all
		// select their own desired facades. It will be interesting to see
		// how this works when we consolidate the agents; might be best to
		// handle the auth changes server-side..?
		APICallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:       AgentName,
			APIInfoGateName: APIInfoGateName,
		}),

		// This manifold is used to coordinate between the api caller and the
		// log sender, which share the API credentials that the API caller may
		// update. To avoid surprising races, the log sender waits for the api
		// caller to unblock this, indicating that any password dance has been
		// completed and the log-sender can now connect without confusion.
		APIInfoGateName: gate.Manifold(),

		// The log sender is a leaf worker that sends log messages to some
		// API server, when configured so to do. We should only need one of
		// these in a consolidated agent.
		LogSenderName: logsender.Manifold(logsender.ManifoldConfig{
			AgentName:       AgentName,
			APIInfoGateName: APIInfoGateName,
			LogSource:       config.LogSource,
		}),

		// The rsyslog config updater is a leaf worker that causes rsyslog
		// to send messages to the state servers. We should only need one
		// of these in a consolidated agent.
		RsyslogConfigUpdaterName: rsyslog.Manifold(rsyslog.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The logging config updater is a leaf worker that indirectly
		// controls the messages sent via the log sender or rsyslog,
		// according to changes in environment config. We should only need
		// one of these in a consolidated agent.
		LoggingConfigUpdaterName: logger.Manifold(logger.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The api address updater is a leaf worker that rewrites agent config
		// as the state server addresses change. We should only need one of
		// these in a consolidated agent.
		APIAdddressUpdaterName: apiaddressupdater.Manifold(apiaddressupdater.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The proxy config updater is a leaf worker that sets http/https/apt/etc
		// proxy settings.
		// TODO(fwereade): timing of this is suspicious. There was superstitious
		// code trying to run this early; if that ever helped, it was only by
		// coincidence. Probably we ought to be making components that might
		// need proxy config into explicit dependencies of the proxy updater...
		ProxyConfigUpdaterName: proxyupdater.Manifold(proxyupdater.ManifoldConfig{
			APICallerName: APICallerName,
		}),

		// The upgrader is a leaf worker that returns a specific error type
		// recognised by the unit agent, causing other workers to be stopped
		// and the agent to be restarted running the new tools. We should only
		// need one of these in a consolidated agent, but we'll need to be
		// careful about behavioural differences, and interactions with the
		// upgrade-steps worker.
		UpgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
			AgentName:     AgentName,
			APICallerName: APICallerName,
		}),

		// The leadership tracker attempts to secure and retain leadership of
		// the unit's service, and is consulted on such matters by the
		// uniter. As it stannds today, we'll need one per unit in a
		// consolidated agent.
		LeadershipTrackerName: leadership.Manifold(leadership.ManifoldConfig{
			AgentName:           AgentName,
			APICallerName:       APICallerName,
			LeadershipGuarantee: config.LeadershipGuarantee,
		}),

		// The uniter installs charms; manages the unit's presence in its
		// relations; creates suboordinate units; runs all the hooks; sends
		// metrics; etc etc etc. We expect to break it up further in the
		// coming weeks, and to need one per unit in a consolidated agent
		// (and probably one for each component broken out).
		UniterName: uniter.Manifold(uniter.ManifoldConfig{
			AgentName:             AgentName,
			APICallerName:         APICallerName,
			LeadershipTrackerName: LeadershipTrackerName,
			MachineLockName:       MachineLockName,
			CharmDirName:          CharmDirName,
		}),

		// TODO (mattyw) should be added to machine agent.
		MetricSpoolName: spool.Manifold(spool.ManifoldConfig{
			AgentName: AgentName,
		}),

		// The charmdir resource tracks whether the charm directory is available or
		// not; after 'start' hook and before 'stop' hook executes, and not during
		// upgrades.
		CharmDirName: charmdir.Manifold(),

		// The metric collect worker executes the collect-metrics hook in a
		// restricted context that can safely run concurrently with other hooks.
		MetricCollectName: collect.Manifold(collect.ManifoldConfig{
			AgentName:       AgentName,
			APICallerName:   APICallerName,
			MetricSpoolName: MetricSpoolName,
			CharmDirName:    CharmDirName,
		}),

		// The meter status worker executes the meter-status-changed hook when it detects
		// that the meter status has changed.
		MeterStatusName: meterstatus.Manifold(meterstatus.ManifoldConfig{
			AgentName:       AgentName,
			APICallerName:   APICallerName,
			MachineLockName: MachineLockName,
		}),

		// The metric sender worker periodically sends accumulated metrics to the state server.
		MetricSenderName: sender.Manifold(sender.ManifoldConfig{
			APICallerName:   APICallerName,
			MetricSpoolName: MetricSpoolName,
		}),
	}
}
示例#4
0
文件: manifolds.go 项目: makyo/juju
// Manifolds returns a set of co-configured manifolds covering the various
// responsibilities of a standalone unit agent. It also accepts the logSource
// argument because we haven't figured out how to thread all the logging bits
// through a dependency engine yet.
//
// Thou Shalt Not Use String Literals In This Function. Or Else.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {

	// connectFilter exists to let us retry api connections immediately
	// on password change, rather than causing the dependency engine to
	// wait for a while.
	connectFilter := func(err error) error {
		cause := errors.Cause(err)
		if cause == apicaller.ErrChangedPassword {
			return dependency.ErrBounce
		} else if cause == apicaller.ErrConnectImpossible {
			return worker.ErrTerminateAgent
		}
		return err
	}

	return dependency.Manifolds{

		// The agent manifold references the enclosing agent, and is the
		// foundation stone on which most other manifolds ultimately depend.
		// (Currently, that is "all manifolds", but consider a shared clock.)
		agentName: agent.Manifold(config.Agent),

		// The machine lock manifold is a thin concurrent wrapper around an
		// FSLock in an agreed location. We expect it to be replaced with an
		// in-memory lock when the unit agent moves into the machine agent.
		machineLockName: machinelock.Manifold(machinelock.ManifoldConfig{
			AgentName: agentName,
		}),

		// The api-config-watcher manifold monitors the API server
		// addresses in the agent config and bounces when they
		// change. It's required as part of model migrations.
		apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),

		// The api caller is a thin concurrent wrapper around a connection
		// to some API server. It's used by many other manifolds, which all
		// select their own desired facades. It will be interesting to see
		// how this works when we consolidate the agents; might be best to
		// handle the auth changes server-side..?
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:            agentName,
			APIConfigWatcherName: apiConfigWatcherName,
			APIOpen:              apicaller.APIOpen,
			NewConnection:        apicaller.ScaryConnect,
			Filter:               connectFilter,
		}),

		// The log sender is a leaf worker that sends log messages to some
		// API server, when configured so to do. We should only need one of
		// these in a consolidated agent.
		logSenderName: logsender.Manifold(logsender.ManifoldConfig{
			APICallerName: apiCallerName,
			LogSource:     config.LogSource,
		}),

		// The upgrader is a leaf worker that returns a specific error type
		// recognised by the unit agent, causing other workers to be stopped
		// and the agent to be restarted running the new tools. We should only
		// need one of these in a consolidated agent, but we'll need to be
		// careful about behavioural differences, and interactions with the
		// upgradesteps worker.
		upgraderName: upgrader.Manifold(upgrader.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		migrationFortressName: fortress.Manifold(),

		// The migration minion handles the agent side aspects of model migrations.
		migrationMinionName: migrationminion.Manifold(migrationminion.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			FortressName:  migrationFortressName,

			NewFacade: migrationminion.NewFacade,
			NewWorker: migrationminion.NewWorker,
		}),

		// The logging config updater is a leaf worker that indirectly
		// controls the messages sent via the log sender according to
		// changes in environment config. We should only need one of
		// these in a consolidated agent.
		loggingConfigUpdaterName: logger.Manifold(logger.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		// The api address updater is a leaf worker that rewrites agent config
		// as the controller addresses change. We should only need one of
		// these in a consolidated agent.
		apiAddressUpdaterName: apiaddressupdater.Manifold(apiaddressupdater.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		}),

		// The proxy config updater is a leaf worker that sets http/https/apt/etc
		// proxy settings.
		// TODO(fwereade): timing of this is suspicious. There was superstitious
		// code trying to run this early; if that ever helped, it was only by
		// coincidence. Probably we ought to be making components that might
		// need proxy config into explicit dependencies of the proxy updater...
		proxyConfigUpdaterName: proxyupdater.Manifold(proxyupdater.ManifoldConfig{
			APICallerName: apiCallerName,
		}),

		// The charmdir resource coordinates whether the charm directory is
		// available or not; after 'start' hook and before 'stop' hook
		// executes, and not during upgrades.
		charmDirName: fortress.Manifold(),

		// The leadership tracker attempts to secure and retain leadership of
		// the unit's service, and is consulted on such matters by the
		// uniter. As it stannds today, we'll need one per unit in a
		// consolidated agent.
		leadershipTrackerName: leadership.Manifold(leadership.ManifoldConfig{
			AgentName:           agentName,
			APICallerName:       apiCallerName,
			LeadershipGuarantee: config.LeadershipGuarantee,
		}),

		// HookRetryStrategy uses a retrystrategy worker to get a
		// retry strategy that will be used by the uniter to run its hooks.
		hookRetryStrategyName: retrystrategy.Manifold(retrystrategy.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			NewFacade:     retrystrategy.NewFacade,
			NewWorker:     retrystrategy.NewRetryStrategyWorker,
		}),

		// The uniter installs charms; manages the unit's presence in its
		// relations; creates suboordinate units; runs all the hooks; sends
		// metrics; etc etc etc. We expect to break it up further in the
		// coming weeks, and to need one per unit in a consolidated agent
		// (and probably one for each component broken out).
		uniterName: uniter.Manifold(uniter.ManifoldConfig{
			AgentName:             agentName,
			APICallerName:         apiCallerName,
			LeadershipTrackerName: leadershipTrackerName,
			MachineLockName:       machineLockName,
			CharmDirName:          charmDirName,
			HookRetryStrategyName: hookRetryStrategyName,
		}),

		// TODO (mattyw) should be added to machine agent.
		metricSpoolName: spool.Manifold(spool.ManifoldConfig{
			AgentName: agentName,
		}),

		// The metric collect worker executes the collect-metrics hook in a
		// restricted context that can safely run concurrently with other hooks.
		metricCollectName: collect.Manifold(collect.ManifoldConfig{
			AgentName:       agentName,
			MetricSpoolName: metricSpoolName,
			CharmDirName:    charmDirName,
		}),

		// The meter status worker executes the meter-status-changed hook when it detects
		// that the meter status has changed.
		meterStatusName: meterstatus.Manifold(meterstatus.ManifoldConfig{
			AgentName:                agentName,
			APICallerName:            apiCallerName,
			MachineLockName:          machineLockName,
			NewHookRunner:            meterstatus.NewHookRunner,
			NewMeterStatusAPIClient:  msapi.NewClient,
			NewConnectedStatusWorker: meterstatus.NewConnectedStatusWorker,
			NewIsolatedStatusWorker:  meterstatus.NewIsolatedStatusWorker,
		}),

		// The metric sender worker periodically sends accumulated metrics to the controller.
		metricSenderName: sender.Manifold(sender.ManifoldConfig{
			AgentName:       agentName,
			APICallerName:   apiCallerName,
			MetricSpoolName: metricSpoolName,
		}),
	}
}