Пример #1
0
func (s *ManifoldSuite) TestStartNewWorkerError(c *gc.C) {
	expectFacade := &fakeFacade{}
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
		Duration:      time.Minute,
		NewFacade: func(_ base.APICaller, _ names.MachineTag) (singular.Facade, error) {
			return expectFacade, nil
		},
		NewWorker: func(config singular.FlagConfig) (worker.Worker, error) {
			c.Check(config.Facade, gc.Equals, expectFacade)
			err := config.Validate()
			c.Check(err, jc.ErrorIsNil)
			return nil, errors.New("blomp tik")
		},
	})
	getResource := dt.StubGetResource(dt.StubResources{
		"clock":      dt.StubResource{Output: &fakeClock{}},
		"api-caller": dt.StubResource{Output: &fakeAPICaller{}},
		"agent":      dt.StubResource{Output: &mockAgent{}},
	})

	worker, err := manifold.Start(getResource)
	c.Check(err, gc.ErrorMatches, "blomp tik")
	c.Check(worker, gc.IsNil)
}
Пример #2
0
func (s *ManifoldSuite) TestOutputBadWorker(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{})
	var out dependency.Flag
	err := manifold.Output(&fakeWorker{}, &out)
	c.Check(err, gc.ErrorMatches, `expected in to be a \*FlagWorker, got a .*`)
	c.Check(out, gc.IsNil)
}
Пример #3
0
func (s *ManifoldSuite) TestOutputBadWorker(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{})
	var out engine.Flag
	err := manifold.Output(&fakeWorker{}, &out)
	c.Check(err, gc.ErrorMatches, `expected in to implement Flag; got a .*`)
	c.Check(out, gc.IsNil)
}
Пример #4
0
func (s *ManifoldSuite) TestInputs(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "harriet",
		APICallerName: "kim",
		AgentName:     "jenny",
	})
	expectInputs := []string{"harriet", "kim", "jenny"}
	c.Check(manifold.Inputs, jc.DeepEquals, expectInputs)
}
Пример #5
0
func (s *ManifoldSuite) TestOutputSuccess(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{})
	fix := newFixture(c)
	fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, _ func()) {
		var out dependency.Flag
		err := manifold.Output(flag, &out)
		c.Check(err, jc.ErrorIsNil)
		c.Check(out, gc.Equals, flag)
	})
}
Пример #6
0
func (s *ManifoldSuite) TestOutputBadResult(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{})
	fix := newFixture(c)
	fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, _ func()) {
		var out interface{}
		err := manifold.Output(flag, &out)
		c.Check(err, gc.ErrorMatches, `expected out to be a \*dependency.Flag, got a .*`)
		c.Check(out, gc.IsNil)
	})
}
Пример #7
0
func (s *ManifoldSuite) TestStartMissingClock(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
	})
	getResource := dt.StubGetResource(dt.StubResources{
		"clock": dt.StubResource{Error: dependency.ErrMissing},
	})

	worker, err := manifold.Start(getResource)
	c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing)
	c.Check(worker, gc.IsNil)
}
Пример #8
0
func (s *ManifoldSuite) TestStartMissingClock(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
	})
	context := dt.StubContext(nil, map[string]interface{}{
		"clock": dependency.ErrMissing,
	})

	worker, err := manifold.Start(context)
	c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing)
	c.Check(worker, gc.IsNil)
}
Пример #9
0
func (s *ManifoldSuite) TestStartWrongAgent(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
	})
	getResource := dt.StubGetResource(dt.StubResources{
		"clock":      dt.StubResource{Output: &fakeClock{}},
		"api-caller": dt.StubResource{Output: &fakeAPICaller{}},
		"agent":      dt.StubResource{Output: &mockAgent{wrongKind: true}},
	})

	worker, err := manifold.Start(getResource)
	c.Check(err, gc.ErrorMatches, "singular flag expected a machine agent")
	c.Check(worker, gc.IsNil)
}
Пример #10
0
func (s *ManifoldSuite) TestStartWrongAgent(c *gc.C) {
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
	})
	context := dt.StubContext(nil, map[string]interface{}{
		"clock":      &fakeClock{},
		"api-caller": &fakeAPICaller{},
		"agent":      &mockAgent{wrongKind: true},
	})

	worker, err := manifold.Start(context)
	c.Check(err, gc.ErrorMatches, "singular flag expected a machine agent")
	c.Check(worker, gc.IsNil)
}
Пример #11
0
func (s *ManifoldSuite) TestStartNewFacadeError(c *gc.C) {
	expectAPICaller := &fakeAPICaller{}
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
		NewFacade: func(apiCaller base.APICaller, tag names.MachineTag) (singular.Facade, error) {
			c.Check(apiCaller, gc.Equals, expectAPICaller)
			c.Check(tag.String(), gc.Equals, "machine-123")
			return nil, errors.New("grark plop")
		},
	})
	getResource := dt.StubGetResource(dt.StubResources{
		"clock":      dt.StubResource{Output: &fakeClock{}},
		"api-caller": dt.StubResource{Output: expectAPICaller},
		"agent":      dt.StubResource{Output: &mockAgent{}},
	})

	worker, err := manifold.Start(getResource)
	c.Check(err, gc.ErrorMatches, "grark plop")
	c.Check(worker, gc.IsNil)
}
Пример #12
0
func (s *ManifoldSuite) TestStartNewFacadeError(c *gc.C) {
	expectAPICaller := &fakeAPICaller{}
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
		NewFacade: func(apiCaller base.APICaller, tag names.MachineTag) (singular.Facade, error) {
			c.Check(apiCaller, gc.Equals, expectAPICaller)
			c.Check(tag.String(), gc.Equals, "machine-123")
			return nil, errors.New("grark plop")
		},
	})
	context := dt.StubContext(nil, map[string]interface{}{
		"clock":      &fakeClock{},
		"api-caller": expectAPICaller,
		"agent":      &mockAgent{},
	})

	worker, err := manifold.Start(context)
	c.Check(err, gc.ErrorMatches, "grark plop")
	c.Check(worker, gc.IsNil)
}
Пример #13
0
func (s *ManifoldSuite) TestStartSuccess(c *gc.C) {
	expectWorker := &fakeWorker{}
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
		NewFacade: func(_ base.APICaller, _ names.MachineTag) (singular.Facade, error) {
			return &fakeFacade{}, nil
		},
		NewWorker: func(_ singular.FlagConfig) (worker.Worker, error) {
			return expectWorker, nil
		},
	})
	getResource := dt.StubGetResource(dt.StubResources{
		"clock":      dt.StubResource{Output: &fakeClock{}},
		"api-caller": dt.StubResource{Output: &fakeAPICaller{}},
		"agent":      dt.StubResource{Output: &mockAgent{}},
	})

	worker, err := manifold.Start(getResource)
	c.Check(err, jc.ErrorIsNil)
	c.Check(worker, gc.Equals, expectWorker)
}
Пример #14
0
func (s *ManifoldSuite) TestStartSuccess(c *gc.C) {
	expectWorker := &fakeWorker{}
	manifold := singular.Manifold(singular.ManifoldConfig{
		ClockName:     "clock",
		APICallerName: "api-caller",
		AgentName:     "agent",
		NewFacade: func(_ base.APICaller, _ names.MachineTag) (singular.Facade, error) {
			return &fakeFacade{}, nil
		},
		NewWorker: func(_ singular.FlagConfig) (worker.Worker, error) {
			return expectWorker, nil
		},
	})
	context := dt.StubContext(nil, map[string]interface{}{
		"clock":      &fakeClock{},
		"api-caller": &fakeAPICaller{},
		"agent":      &mockAgent{},
	})

	worker, err := manifold.Start(context)
	c.Check(err, jc.ErrorIsNil)
	c.Check(worker, gc.Equals, expectWorker)
}
Пример #15
0
// Manifolds returns a set of interdependent dependency manifolds that will
// run together to administer a model, as configured.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	modelTag := config.Agent.CurrentConfig().Model()
	return dependency.Manifolds{

		// The first group are foundational; the agent and clock
		// which wrap those supplied in config, and the api-caller
		// through everything else communicates with the apiserver.
		agentName: agent.Manifold(config.Agent),
		clockName: clockManifold(config.Clock),
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:     agentName,
			APIOpen:       apicaller.APIOpen,
			NewConnection: apicaller.OnlyConnect,
		}),

		// The discover spaces gate is used to coordinate workers which
		// shouldn't do anything until the discoverspaces worker has completed
		// its first discovery attempt.
		discovertSpacesCheckGateName: gate.ManifoldEx(config.DiscoverSpacesCheckLock),

		// All other manifolds should depend on at least one of these
		// three, which handle all the tasks that are safe and sane
		// to run in *all* controller machines.
		notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotDead,
			Filter:        lifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotAlive,
			Filter:        lifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{
			ClockName:     clockName,
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Duration:      config.RunFlagDuration,

			NewFacade: singular.NewFacade,
			NewWorker: singular.NewWorker,
		}),

		// Everything else should be wrapped in ifResponsible,
		// ifNotAlive, or ifNotDead, to ensure that only a single
		// controller is administering this model at a time.
		//
		// NOTE: not perfectly reliable at this stage? i.e. a worker
		// that ignores its stop signal for "too long" might continue
		// to take admin actions after the window of responsibility
		// closes. This *is* a pre-existing problem, but demands some
		// thought/care: e.g. should we make sure the apiserver also
		// closes any connections that lose responsibility..? can we
		// make sure all possible environ operations are either time-
		// bounded or interruptible? etc
		//
		// On the other hand, all workers *should* be written in the
		// expectation of dealing with a sucky infrastructure running
		// things in parallel unexpectedly, just because the universe
		// hates us and will engineer matters such that it happens
		// sometimes, even when we try to avoid it.

		// The environ tracker could/should be used by several other
		// workers (firewaller, provisioners, address-cleaner?).
		environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{
			APICallerName:  apiCallerName,
			NewEnvironFunc: environs.New,
		})),

		// The undertaker is currently the only ifNotAlive worker.
		undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			ClockName:     clockName,
			RemoveDelay:   config.ModelRemoveDelay,

			NewFacade: undertaker.NewFacade,
			NewWorker: undertaker.NewWorker,
		})),

		// All the rest depend on ifNotDead.
		discoverSpacesName: ifNotDead(discoverspaces.Manifold(discoverspaces.ManifoldConfig{
			EnvironName:   environTrackerName,
			APICallerName: apiCallerName,
			UnlockerName:  discovertSpacesCheckGateName,

			NewFacade: discoverspaces.NewFacade,
			NewWorker: discoverspaces.NewWorker,
		})),
		computeProvisionerName: ifNotDead(provisioner.Manifold(provisioner.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),
		storageProvisionerName: ifNotDead(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Scope:         modelTag,
		})),
		firewallerName: ifNotDead(firewaller.Manifold(firewaller.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		unitAssignerName: ifNotDead(unitassigner.Manifold(unitassigner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		serviceScalerName: ifNotDead(servicescaler.Manifold(servicescaler.ManifoldConfig{
			APICallerName: apiCallerName,
			NewFacade:     servicescaler.NewFacade,
			NewWorker:     servicescaler.New,
		})),
		instancePollerName: ifNotDead(instancepoller.Manifold(instancepoller.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
		})),
		charmRevisionUpdaterName: ifNotDead(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Period:        config.CharmRevisionUpdateInterval,

			NewFacade: charmrevisionmanifold.NewAPIFacade,
			NewWorker: charmrevision.NewWorker,
		})),
		metricWorkerName: ifNotDead(metricworker.Manifold(metricworker.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		stateCleanerName: ifNotDead(cleaner.Manifold(cleaner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		addressCleanerName: ifNotDead(addresser.Manifold(addresser.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		statusHistoryPrunerName: ifNotDead(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{
			APICallerName:    apiCallerName,
			MaxLogsPerEntity: config.EntityStatusHistoryCount,
			PruneInterval:    config.EntityStatusHistoryInterval,
			// TODO(fwereade): 2016-03-17 lp:1558657
			NewTimer: worker.NewTimer,
		})),
	}
}
Пример #16
0
// Manifolds returns a set of interdependent dependency manifolds that will
// run together to administer a model, as configured.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	modelTag := config.Agent.CurrentConfig().Model()
	return dependency.Manifolds{

		// The first group are foundational; the agent and clock
		// which wrap those supplied in config, and the api-caller
		// through which everything else communicates with the
		// controller.
		agentName: agent.Manifold(config.Agent),
		clockName: clockManifold(config.Clock),
		apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:     agentName,
			APIOpen:       api.Open,
			NewConnection: apicaller.OnlyConnect,
			Filter:        apiConnectFilter,
		}),

		// The spaces-imported gate will be unlocked when space
		// discovery is known to be complete. Various manifolds
		// should also come to depend upon it (or rather, on a
		// Flag depending on it) in the future.
		spacesImportedGateName: gate.ManifoldEx(config.SpacesImportedGate),

		// All other manifolds should depend on at least one of these
		// three, which handle all the tasks that are safe and sane
		// to run in *all* controller machines.
		notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotDead,
			Filter:        LifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotAlive,
			Filter:        LifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{
			ClockName:     clockName,
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Duration:      config.RunFlagDuration,

			NewFacade: singular.NewFacade,
			NewWorker: singular.NewWorker,
		}),

		// The migration workers collaborate to run migrations;
		// and to create a mechanism for running other workers
		// so they can't accidentally interfere with a migration
		// in progress. Such a manifold should (1) depend on the
		// migration-inactive flag, to know when to start or die;
		// and (2) occupy the migration-fortress, so as to avoid
		// possible interference with the minion (which will not
		// take action until it's gained sole control of the
		// fortress).
		//
		// Note that the fortress and flag will only exist while
		// the model is not dead; this frees their dependencies
		// from model-lifetime concerns.
		migrationFortressName: ifNotDead(fortress.Manifold()),
		migrationInactiveFlagName: ifNotDead(migrationflag.Manifold(migrationflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Check:         migrationflag.IsTerminal,
			NewFacade:     migrationflag.NewFacade,
			NewWorker:     migrationflag.NewWorker,
		})),
		migrationMasterName: ifNotDead(migrationmaster.Manifold(migrationmaster.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			FortressName:  migrationFortressName,
			Clock:         config.Clock,
			NewFacade:     migrationmaster.NewFacade,
			NewWorker:     config.NewMigrationMaster,
		})),

		// Everything else should be wrapped in ifResponsible,
		// ifNotAlive, ifNotDead, or ifNotMigrating (which also
		// implies NotDead), to ensure that only a single
		// controller is attempting to administer this model at
		// any one time.
		//
		// NOTE: not perfectly reliable at this stage? i.e. a
		// worker that ignores its stop signal for "too long"
		// might continue to take admin actions after the window
		// of responsibility closes. This *is* a pre-existing
		// problem, but demands some thought/care: e.g. should
		// we make sure the apiserver also closes any
		// connections that lose responsibility..? can we make
		// sure all possible environ operations are either time-
		// bounded or interruptible? etc
		//
		// On the other hand, all workers *should* be written in
		// the expectation of dealing with sucky infrastructure
		// running things in parallel unexpectedly, just because
		// the universe hates us and will engineer matters such
		// that it happens sometimes, even when we try to avoid
		// it.

		// The environ tracker could/should be used by several other
		// workers (firewaller, provisioners, address-cleaner?).
		environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{
			APICallerName:  apiCallerName,
			NewEnvironFunc: config.NewEnvironFunc,
		})),

		// The undertaker is currently the only ifNotAlive worker.
		undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,

			NewFacade: undertaker.NewFacade,
			NewWorker: undertaker.NewWorker,
		})),

		// All the rest depend on ifNotMigrating.
		spaceImporterName: ifNotMigrating(discoverspaces.Manifold(discoverspaces.ManifoldConfig{
			EnvironName:   environTrackerName,
			APICallerName: apiCallerName,
			UnlockerName:  spacesImportedGateName,

			NewFacade: discoverspaces.NewFacade,
			NewWorker: discoverspaces.NewWorker,
		})),
		computeProvisionerName: ifNotMigrating(provisioner.Manifold(provisioner.ManifoldConfig{
			AgentName:          agentName,
			APICallerName:      apiCallerName,
			EnvironName:        environTrackerName,
			NewProvisionerFunc: provisioner.NewEnvironProvisioner,
		})),
		storageProvisionerName: ifNotMigrating(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			EnvironName:   environTrackerName,
			Scope:         modelTag,
		})),
		firewallerName: ifNotMigrating(firewaller.Manifold(firewaller.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		unitAssignerName: ifNotMigrating(unitassigner.Manifold(unitassigner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		applicationScalerName: ifNotMigrating(applicationscaler.Manifold(applicationscaler.ManifoldConfig{
			APICallerName: apiCallerName,
			NewFacade:     applicationscaler.NewFacade,
			NewWorker:     applicationscaler.New,
		})),
		instancePollerName: ifNotMigrating(instancepoller.Manifold(instancepoller.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			ClockName:     clockName,
			Delay:         config.InstPollerAggregationDelay,
		})),
		charmRevisionUpdaterName: ifNotMigrating(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Period:        config.CharmRevisionUpdateInterval,

			NewFacade: charmrevisionmanifold.NewAPIFacade,
			NewWorker: charmrevision.NewWorker,
		})),
		metricWorkerName: ifNotMigrating(metricworker.Manifold(metricworker.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		stateCleanerName: ifNotMigrating(cleaner.Manifold(cleaner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		statusHistoryPrunerName: ifNotMigrating(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{
			APICallerName:  apiCallerName,
			MaxHistoryTime: config.StatusHistoryPrunerMaxHistoryTime,
			MaxHistoryMB:   config.StatusHistoryPrunerMaxHistoryMB,
			PruneInterval:  config.StatusHistoryPrunerInterval,
			// TODO(fwereade): 2016-03-17 lp:1558657
			NewTimer: worker.NewTimer,
		})),
		machineUndertakerName: ifNotMigrating(machineundertaker.Manifold(machineundertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			NewWorker:     machineundertaker.NewWorker,
		})),
	}
}