func (s *ManifoldSuite) TestStartMissingAPICaller(c *gc.C) { manifold := applicationscaler.Manifold(applicationscaler.ManifoldConfig{ APICallerName: "api-caller", }) context := dt.StubContext(nil, map[string]interface{}{ "api-caller": dependency.ErrMissing, }) worker, err := manifold.Start(context) c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) c.Check(worker, gc.IsNil) }
func (s *ManifoldSuite) TestStartFacadeError(c *gc.C) { expectCaller := &fakeCaller{} manifold := applicationscaler.Manifold(applicationscaler.ManifoldConfig{ APICallerName: "api-caller", NewFacade: func(apiCaller base.APICaller) (applicationscaler.Facade, error) { c.Check(apiCaller, gc.Equals, expectCaller) return nil, errors.New("blort") }, }) context := dt.StubContext(nil, map[string]interface{}{ "api-caller": expectCaller, }) worker, err := manifold.Start(context) c.Check(err, gc.ErrorMatches, "blort") c.Check(worker, gc.IsNil) }
func (s *ManifoldSuite) TestSuccess(c *gc.C) { expectWorker := &fakeWorker{} manifold := applicationscaler.Manifold(applicationscaler.ManifoldConfig{ APICallerName: "api-caller", NewFacade: func(_ base.APICaller) (applicationscaler.Facade, error) { return &fakeFacade{}, nil }, NewWorker: func(_ applicationscaler.Config) (worker.Worker, error) { return expectWorker, nil }, }) context := dt.StubContext(nil, map[string]interface{}{ "api-caller": &fakeCaller{}, }) worker, err := manifold.Start(context) c.Check(err, jc.ErrorIsNil) c.Check(worker, gc.Equals, expectWorker) }
func (s *ManifoldSuite) TestStartWorkerError(c *gc.C) { expectFacade := &fakeFacade{} manifold := applicationscaler.Manifold(applicationscaler.ManifoldConfig{ APICallerName: "api-caller", NewFacade: func(_ base.APICaller) (applicationscaler.Facade, error) { return expectFacade, nil }, NewWorker: func(config applicationscaler.Config) (worker.Worker, error) { c.Check(config.Validate(), jc.ErrorIsNil) c.Check(config.Facade, gc.Equals, expectFacade) return nil, errors.New("splot") }, }) context := dt.StubContext(nil, map[string]interface{}{ "api-caller": &fakeCaller{}, }) worker, err := manifold.Start(context) c.Check(err, gc.ErrorMatches, "splot") c.Check(worker, gc.IsNil) }
func (s *ManifoldSuite) TestOutput(c *gc.C) { manifold := applicationscaler.Manifold(applicationscaler.ManifoldConfig{}) c.Check(manifold.Output, gc.IsNil) }
func (s *ManifoldSuite) TestInputs(c *gc.C) { manifold := applicationscaler.Manifold(applicationscaler.ManifoldConfig{ APICallerName: "washington the terrible", }) c.Check(manifold.Inputs, jc.DeepEquals, []string{"washington the terrible"}) }
// Manifolds returns a set of interdependent dependency manifolds that will // run together to administer a model, as configured. func Manifolds(config ManifoldsConfig) dependency.Manifolds { modelTag := config.Agent.CurrentConfig().Model() return dependency.Manifolds{ // The first group are foundational; the agent and clock // which wrap those supplied in config, and the api-caller // through which everything else communicates with the // controller. agentName: agent.Manifold(config.Agent), clockName: clockManifold(config.Clock), apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{ AgentName: agentName, AgentConfigChanged: config.AgentConfigChanged, }), apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{ AgentName: agentName, APIOpen: api.Open, NewConnection: apicaller.OnlyConnect, Filter: apiConnectFilter, }), // The spaces-imported gate will be unlocked when space // discovery is known to be complete. Various manifolds // should also come to depend upon it (or rather, on a // Flag depending on it) in the future. spacesImportedGateName: gate.ManifoldEx(config.SpacesImportedGate), // All other manifolds should depend on at least one of these // three, which handle all the tasks that are safe and sane // to run in *all* controller machines. notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{ APICallerName: apiCallerName, Entity: modelTag, Result: life.IsNotDead, Filter: LifeFilter, NewFacade: lifeflag.NewFacade, NewWorker: lifeflag.NewWorker, }), notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{ APICallerName: apiCallerName, Entity: modelTag, Result: life.IsNotAlive, Filter: LifeFilter, NewFacade: lifeflag.NewFacade, NewWorker: lifeflag.NewWorker, }), isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{ ClockName: clockName, AgentName: agentName, APICallerName: apiCallerName, Duration: config.RunFlagDuration, NewFacade: singular.NewFacade, NewWorker: singular.NewWorker, }), // The migration workers collaborate to run migrations; // and to create a mechanism for running other workers // so they can't accidentally interfere with a migration // in progress. Such a manifold should (1) depend on the // migration-inactive flag, to know when to start or die; // and (2) occupy the migration-fortress, so as to avoid // possible interference with the minion (which will not // take action until it's gained sole control of the // fortress). // // Note that the fortress and flag will only exist while // the model is not dead; this frees their dependencies // from model-lifetime concerns. migrationFortressName: ifNotDead(fortress.Manifold()), migrationInactiveFlagName: ifNotDead(migrationflag.Manifold(migrationflag.ManifoldConfig{ APICallerName: apiCallerName, Check: migrationflag.IsTerminal, NewFacade: migrationflag.NewFacade, NewWorker: migrationflag.NewWorker, })), migrationMasterName: ifNotDead(migrationmaster.Manifold(migrationmaster.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, FortressName: migrationFortressName, Clock: config.Clock, NewFacade: migrationmaster.NewFacade, NewWorker: config.NewMigrationMaster, })), // Everything else should be wrapped in ifResponsible, // ifNotAlive, ifNotDead, or ifNotMigrating (which also // implies NotDead), to ensure that only a single // controller is attempting to administer this model at // any one time. // // NOTE: not perfectly reliable at this stage? i.e. a // worker that ignores its stop signal for "too long" // might continue to take admin actions after the window // of responsibility closes. This *is* a pre-existing // problem, but demands some thought/care: e.g. should // we make sure the apiserver also closes any // connections that lose responsibility..? can we make // sure all possible environ operations are either time- // bounded or interruptible? etc // // On the other hand, all workers *should* be written in // the expectation of dealing with sucky infrastructure // running things in parallel unexpectedly, just because // the universe hates us and will engineer matters such // that it happens sometimes, even when we try to avoid // it. // The environ tracker could/should be used by several other // workers (firewaller, provisioners, address-cleaner?). environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{ APICallerName: apiCallerName, NewEnvironFunc: config.NewEnvironFunc, })), // The undertaker is currently the only ifNotAlive worker. undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{ APICallerName: apiCallerName, EnvironName: environTrackerName, NewFacade: undertaker.NewFacade, NewWorker: undertaker.NewWorker, })), // All the rest depend on ifNotMigrating. spaceImporterName: ifNotMigrating(discoverspaces.Manifold(discoverspaces.ManifoldConfig{ EnvironName: environTrackerName, APICallerName: apiCallerName, UnlockerName: spacesImportedGateName, NewFacade: discoverspaces.NewFacade, NewWorker: discoverspaces.NewWorker, })), computeProvisionerName: ifNotMigrating(provisioner.Manifold(provisioner.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, EnvironName: environTrackerName, NewProvisionerFunc: provisioner.NewEnvironProvisioner, })), storageProvisionerName: ifNotMigrating(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{ APICallerName: apiCallerName, ClockName: clockName, EnvironName: environTrackerName, Scope: modelTag, })), firewallerName: ifNotMigrating(firewaller.Manifold(firewaller.ManifoldConfig{ APICallerName: apiCallerName, })), unitAssignerName: ifNotMigrating(unitassigner.Manifold(unitassigner.ManifoldConfig{ APICallerName: apiCallerName, })), applicationScalerName: ifNotMigrating(applicationscaler.Manifold(applicationscaler.ManifoldConfig{ APICallerName: apiCallerName, NewFacade: applicationscaler.NewFacade, NewWorker: applicationscaler.New, })), instancePollerName: ifNotMigrating(instancepoller.Manifold(instancepoller.ManifoldConfig{ APICallerName: apiCallerName, EnvironName: environTrackerName, ClockName: clockName, Delay: config.InstPollerAggregationDelay, })), charmRevisionUpdaterName: ifNotMigrating(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ APICallerName: apiCallerName, ClockName: clockName, Period: config.CharmRevisionUpdateInterval, NewFacade: charmrevisionmanifold.NewAPIFacade, NewWorker: charmrevision.NewWorker, })), metricWorkerName: ifNotMigrating(metricworker.Manifold(metricworker.ManifoldConfig{ APICallerName: apiCallerName, })), stateCleanerName: ifNotMigrating(cleaner.Manifold(cleaner.ManifoldConfig{ APICallerName: apiCallerName, })), statusHistoryPrunerName: ifNotMigrating(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{ APICallerName: apiCallerName, MaxHistoryTime: config.StatusHistoryPrunerMaxHistoryTime, MaxHistoryMB: config.StatusHistoryPrunerMaxHistoryMB, PruneInterval: config.StatusHistoryPrunerInterval, // TODO(fwereade): 2016-03-17 lp:1558657 NewTimer: worker.NewTimer, })), machineUndertakerName: ifNotMigrating(machineundertaker.Manifold(machineundertaker.ManifoldConfig{ APICallerName: apiCallerName, EnvironName: environTrackerName, NewWorker: machineundertaker.NewWorker, })), } }