예제 #1
0
func (s *ManifoldSuite) TestNewWorkerError(c *gc.C) {
	fakeClock := &fakeClock{}
	fakeFacade := &fakeFacade{}
	fakeAPICaller := &fakeAPICaller{}

	stub := testing.Stub{}
	manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
		APICallerName: "api-caller",
		ClockName:     "clock",
		NewFacade: func(apiCaller base.APICaller) (charmrevisionmanifold.Facade, error) {
			stub.AddCall("NewFacade", apiCaller)
			return fakeFacade, nil
		},
		NewWorker: func(config charmrevision.Config) (worker.Worker, error) {
			stub.AddCall("NewWorker", config)
			return nil, errors.New("snrght")
		},
	})

	_, err := manifold.Start(dt.StubGetResource(dt.StubResources{
		"api-caller": dt.StubResource{Output: fakeAPICaller},
		"clock":      dt.StubResource{Output: fakeClock},
	}))
	c.Check(err, gc.ErrorMatches, "cannot create worker: snrght")
	stub.CheckCalls(c, []testing.StubCall{{
		"NewFacade", []interface{}{fakeAPICaller},
	}, {
		"NewWorker", []interface{}{charmrevision.Config{
			RevisionUpdater: fakeFacade,
			Clock:           fakeClock,
		}},
	}})
}
예제 #2
0
func (s *ManifoldSuite) TestManifold(c *gc.C) {
	manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
		APICallerName: "billy",
		ClockName:     "bob",
	})

	c.Check(manifold.Inputs, jc.DeepEquals, []string{"billy", "bob"})
	c.Check(manifold.Start, gc.NotNil)
	c.Check(manifold.Output, gc.IsNil)
}
예제 #3
0
func (s *ManifoldSuite) TestMissingClock(c *gc.C) {
	manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
		APICallerName: "api-caller",
		ClockName:     "clock",
	})

	_, err := manifold.Start(dt.StubGetResource(dt.StubResources{
		"api-caller": dt.StubResource{Output: fakeAPICaller{}},
		"clock":      dt.StubResource{Error: dependency.ErrMissing},
	}))
	c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing)
}
예제 #4
0
파일: manifold_test.go 프로젝트: bac/juju
func (s *ManifoldSuite) TestMissingClock(c *gc.C) {
	manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
		APICallerName: "api-caller",
		ClockName:     "clock",
	})

	_, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{
		"api-caller": fakeAPICaller{},
		"clock":      dependency.ErrMissing,
	}))
	c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing)
}
예제 #5
0
func (s *ManifoldSuite) TestNewFacadeError(c *gc.C) {
	fakeAPICaller := &fakeAPICaller{}

	stub := testing.Stub{}
	manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
		APICallerName: "api-caller",
		ClockName:     "clock",
		NewFacade: func(apiCaller base.APICaller) (charmrevisionmanifold.Facade, error) {
			stub.AddCall("NewFacade", apiCaller)
			return nil, errors.New("blefgh")
		},
	})

	_, err := manifold.Start(dt.StubGetResource(dt.StubResources{
		"api-caller": dt.StubResource{Output: fakeAPICaller},
		"clock":      dt.StubResource{Output: fakeClock{}},
	}))
	c.Check(err, gc.ErrorMatches, "cannot create facade: blefgh")
	stub.CheckCalls(c, []testing.StubCall{{
		"NewFacade", []interface{}{fakeAPICaller},
	}})
}
예제 #6
0
func (s *ManifoldSuite) TestSuccess(c *gc.C) {
	fakeClock := &fakeClock{}
	fakeFacade := &fakeFacade{}
	fakeWorker := &fakeWorker{}
	fakeAPICaller := &fakeAPICaller{}

	stub := testing.Stub{}
	manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
		APICallerName: "api-caller",
		ClockName:     "clock",
		Period:        10 * time.Minute,
		NewFacade: func(apiCaller base.APICaller) (charmrevisionmanifold.Facade, error) {
			stub.AddCall("NewFacade", apiCaller)
			return fakeFacade, nil
		},
		NewWorker: func(config charmrevision.Config) (worker.Worker, error) {
			stub.AddCall("NewWorker", config)
			return fakeWorker, nil
		},
	})

	w, err := manifold.Start(dt.StubGetResource(dt.StubResources{
		"api-caller": dt.StubResource{Output: fakeAPICaller},
		"clock":      dt.StubResource{Output: fakeClock},
	}))
	c.Check(w, gc.Equals, fakeWorker)
	c.Check(err, jc.ErrorIsNil)
	stub.CheckCalls(c, []testing.StubCall{{
		"NewFacade", []interface{}{fakeAPICaller},
	}, {
		"NewWorker", []interface{}{charmrevision.Config{
			Period:          10 * time.Minute,
			RevisionUpdater: fakeFacade,
			Clock:           fakeClock,
		}},
	}})
}
예제 #7
0
// Manifolds returns a set of interdependent dependency manifolds that will
// run together to administer a model, as configured.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	modelTag := config.Agent.CurrentConfig().Model()
	return dependency.Manifolds{

		// The first group are foundational; the agent and clock
		// which wrap those supplied in config, and the api-caller
		// through everything else communicates with the apiserver.
		agentName: agent.Manifold(config.Agent),
		clockName: clockManifold(config.Clock),
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:     agentName,
			APIOpen:       apicaller.APIOpen,
			NewConnection: apicaller.OnlyConnect,
		}),

		// The discover spaces gate is used to coordinate workers which
		// shouldn't do anything until the discoverspaces worker has completed
		// its first discovery attempt.
		discovertSpacesCheckGateName: gate.ManifoldEx(config.DiscoverSpacesCheckLock),

		// All other manifolds should depend on at least one of these
		// three, which handle all the tasks that are safe and sane
		// to run in *all* controller machines.
		notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotDead,
			Filter:        lifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotAlive,
			Filter:        lifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{
			ClockName:     clockName,
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Duration:      config.RunFlagDuration,

			NewFacade: singular.NewFacade,
			NewWorker: singular.NewWorker,
		}),

		// Everything else should be wrapped in ifResponsible,
		// ifNotAlive, or ifNotDead, to ensure that only a single
		// controller is administering this model at a time.
		//
		// NOTE: not perfectly reliable at this stage? i.e. a worker
		// that ignores its stop signal for "too long" might continue
		// to take admin actions after the window of responsibility
		// closes. This *is* a pre-existing problem, but demands some
		// thought/care: e.g. should we make sure the apiserver also
		// closes any connections that lose responsibility..? can we
		// make sure all possible environ operations are either time-
		// bounded or interruptible? etc
		//
		// On the other hand, all workers *should* be written in the
		// expectation of dealing with a sucky infrastructure running
		// things in parallel unexpectedly, just because the universe
		// hates us and will engineer matters such that it happens
		// sometimes, even when we try to avoid it.

		// The environ tracker could/should be used by several other
		// workers (firewaller, provisioners, address-cleaner?).
		environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{
			APICallerName:  apiCallerName,
			NewEnvironFunc: environs.New,
		})),

		// The undertaker is currently the only ifNotAlive worker.
		undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			ClockName:     clockName,
			RemoveDelay:   config.ModelRemoveDelay,

			NewFacade: undertaker.NewFacade,
			NewWorker: undertaker.NewWorker,
		})),

		// All the rest depend on ifNotDead.
		discoverSpacesName: ifNotDead(discoverspaces.Manifold(discoverspaces.ManifoldConfig{
			EnvironName:   environTrackerName,
			APICallerName: apiCallerName,
			UnlockerName:  discovertSpacesCheckGateName,

			NewFacade: discoverspaces.NewFacade,
			NewWorker: discoverspaces.NewWorker,
		})),
		computeProvisionerName: ifNotDead(provisioner.Manifold(provisioner.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
		})),
		storageProvisionerName: ifNotDead(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Scope:         modelTag,
		})),
		firewallerName: ifNotDead(firewaller.Manifold(firewaller.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		unitAssignerName: ifNotDead(unitassigner.Manifold(unitassigner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		serviceScalerName: ifNotDead(servicescaler.Manifold(servicescaler.ManifoldConfig{
			APICallerName: apiCallerName,
			NewFacade:     servicescaler.NewFacade,
			NewWorker:     servicescaler.New,
		})),
		instancePollerName: ifNotDead(instancepoller.Manifold(instancepoller.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
		})),
		charmRevisionUpdaterName: ifNotDead(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Period:        config.CharmRevisionUpdateInterval,

			NewFacade: charmrevisionmanifold.NewAPIFacade,
			NewWorker: charmrevision.NewWorker,
		})),
		metricWorkerName: ifNotDead(metricworker.Manifold(metricworker.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		stateCleanerName: ifNotDead(cleaner.Manifold(cleaner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		addressCleanerName: ifNotDead(addresser.Manifold(addresser.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		statusHistoryPrunerName: ifNotDead(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{
			APICallerName:    apiCallerName,
			MaxLogsPerEntity: config.EntityStatusHistoryCount,
			PruneInterval:    config.EntityStatusHistoryInterval,
			// TODO(fwereade): 2016-03-17 lp:1558657
			NewTimer: worker.NewTimer,
		})),
	}
}
예제 #8
0
파일: manifolds.go 프로젝트: bac/juju
// Manifolds returns a set of interdependent dependency manifolds that will
// run together to administer a model, as configured.
func Manifolds(config ManifoldsConfig) dependency.Manifolds {
	modelTag := config.Agent.CurrentConfig().Model()
	return dependency.Manifolds{

		// The first group are foundational; the agent and clock
		// which wrap those supplied in config, and the api-caller
		// through which everything else communicates with the
		// controller.
		agentName: agent.Manifold(config.Agent),
		clockName: clockManifold(config.Clock),
		apiConfigWatcherName: apiconfigwatcher.Manifold(apiconfigwatcher.ManifoldConfig{
			AgentName:          agentName,
			AgentConfigChanged: config.AgentConfigChanged,
		}),
		apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{
			AgentName:     agentName,
			APIOpen:       api.Open,
			NewConnection: apicaller.OnlyConnect,
			Filter:        apiConnectFilter,
		}),

		// The spaces-imported gate will be unlocked when space
		// discovery is known to be complete. Various manifolds
		// should also come to depend upon it (or rather, on a
		// Flag depending on it) in the future.
		spacesImportedGateName: gate.ManifoldEx(config.SpacesImportedGate),

		// All other manifolds should depend on at least one of these
		// three, which handle all the tasks that are safe and sane
		// to run in *all* controller machines.
		notDeadFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotDead,
			Filter:        LifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		notAliveFlagName: lifeflag.Manifold(lifeflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Entity:        modelTag,
			Result:        life.IsNotAlive,
			Filter:        LifeFilter,

			NewFacade: lifeflag.NewFacade,
			NewWorker: lifeflag.NewWorker,
		}),
		isResponsibleFlagName: singular.Manifold(singular.ManifoldConfig{
			ClockName:     clockName,
			AgentName:     agentName,
			APICallerName: apiCallerName,
			Duration:      config.RunFlagDuration,

			NewFacade: singular.NewFacade,
			NewWorker: singular.NewWorker,
		}),

		// The migration workers collaborate to run migrations;
		// and to create a mechanism for running other workers
		// so they can't accidentally interfere with a migration
		// in progress. Such a manifold should (1) depend on the
		// migration-inactive flag, to know when to start or die;
		// and (2) occupy the migration-fortress, so as to avoid
		// possible interference with the minion (which will not
		// take action until it's gained sole control of the
		// fortress).
		//
		// Note that the fortress and flag will only exist while
		// the model is not dead; this frees their dependencies
		// from model-lifetime concerns.
		migrationFortressName: ifNotDead(fortress.Manifold()),
		migrationInactiveFlagName: ifNotDead(migrationflag.Manifold(migrationflag.ManifoldConfig{
			APICallerName: apiCallerName,
			Check:         migrationflag.IsTerminal,
			NewFacade:     migrationflag.NewFacade,
			NewWorker:     migrationflag.NewWorker,
		})),
		migrationMasterName: ifNotDead(migrationmaster.Manifold(migrationmaster.ManifoldConfig{
			AgentName:     agentName,
			APICallerName: apiCallerName,
			FortressName:  migrationFortressName,
			Clock:         config.Clock,
			NewFacade:     migrationmaster.NewFacade,
			NewWorker:     config.NewMigrationMaster,
		})),

		// Everything else should be wrapped in ifResponsible,
		// ifNotAlive, ifNotDead, or ifNotMigrating (which also
		// implies NotDead), to ensure that only a single
		// controller is attempting to administer this model at
		// any one time.
		//
		// NOTE: not perfectly reliable at this stage? i.e. a
		// worker that ignores its stop signal for "too long"
		// might continue to take admin actions after the window
		// of responsibility closes. This *is* a pre-existing
		// problem, but demands some thought/care: e.g. should
		// we make sure the apiserver also closes any
		// connections that lose responsibility..? can we make
		// sure all possible environ operations are either time-
		// bounded or interruptible? etc
		//
		// On the other hand, all workers *should* be written in
		// the expectation of dealing with sucky infrastructure
		// running things in parallel unexpectedly, just because
		// the universe hates us and will engineer matters such
		// that it happens sometimes, even when we try to avoid
		// it.

		// The environ tracker could/should be used by several other
		// workers (firewaller, provisioners, address-cleaner?).
		environTrackerName: ifResponsible(environ.Manifold(environ.ManifoldConfig{
			APICallerName:  apiCallerName,
			NewEnvironFunc: config.NewEnvironFunc,
		})),

		// The undertaker is currently the only ifNotAlive worker.
		undertakerName: ifNotAlive(undertaker.Manifold(undertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,

			NewFacade: undertaker.NewFacade,
			NewWorker: undertaker.NewWorker,
		})),

		// All the rest depend on ifNotMigrating.
		spaceImporterName: ifNotMigrating(discoverspaces.Manifold(discoverspaces.ManifoldConfig{
			EnvironName:   environTrackerName,
			APICallerName: apiCallerName,
			UnlockerName:  spacesImportedGateName,

			NewFacade: discoverspaces.NewFacade,
			NewWorker: discoverspaces.NewWorker,
		})),
		computeProvisionerName: ifNotMigrating(provisioner.Manifold(provisioner.ManifoldConfig{
			AgentName:          agentName,
			APICallerName:      apiCallerName,
			EnvironName:        environTrackerName,
			NewProvisionerFunc: provisioner.NewEnvironProvisioner,
		})),
		storageProvisionerName: ifNotMigrating(storageprovisioner.ModelManifold(storageprovisioner.ModelManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			EnvironName:   environTrackerName,
			Scope:         modelTag,
		})),
		firewallerName: ifNotMigrating(firewaller.Manifold(firewaller.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		unitAssignerName: ifNotMigrating(unitassigner.Manifold(unitassigner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		applicationScalerName: ifNotMigrating(applicationscaler.Manifold(applicationscaler.ManifoldConfig{
			APICallerName: apiCallerName,
			NewFacade:     applicationscaler.NewFacade,
			NewWorker:     applicationscaler.New,
		})),
		instancePollerName: ifNotMigrating(instancepoller.Manifold(instancepoller.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			ClockName:     clockName,
			Delay:         config.InstPollerAggregationDelay,
		})),
		charmRevisionUpdaterName: ifNotMigrating(charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{
			APICallerName: apiCallerName,
			ClockName:     clockName,
			Period:        config.CharmRevisionUpdateInterval,

			NewFacade: charmrevisionmanifold.NewAPIFacade,
			NewWorker: charmrevision.NewWorker,
		})),
		metricWorkerName: ifNotMigrating(metricworker.Manifold(metricworker.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		stateCleanerName: ifNotMigrating(cleaner.Manifold(cleaner.ManifoldConfig{
			APICallerName: apiCallerName,
		})),
		statusHistoryPrunerName: ifNotMigrating(statushistorypruner.Manifold(statushistorypruner.ManifoldConfig{
			APICallerName:  apiCallerName,
			MaxHistoryTime: config.StatusHistoryPrunerMaxHistoryTime,
			MaxHistoryMB:   config.StatusHistoryPrunerMaxHistoryMB,
			PruneInterval:  config.StatusHistoryPrunerInterval,
			// TODO(fwereade): 2016-03-17 lp:1558657
			NewTimer: worker.NewTimer,
		})),
		machineUndertakerName: ifNotMigrating(machineundertaker.Manifold(machineundertaker.ManifoldConfig{
			APICallerName: apiCallerName,
			EnvironName:   environTrackerName,
			NewWorker:     machineundertaker.NewWorker,
		})),
	}
}