// APIWorkers returns a dependency.Engine running the unit agent's responsibilities. func (a *UnitAgent) APIWorkers() (worker.Worker, error) { manifolds := unit.Manifolds(unit.ManifoldsConfig{ Agent: agent.APIHostPortsSetter{a}, LogSource: a.bufferedLogs, LeadershipGuarantee: 30 * time.Second, AgentConfigChanged: a.configChangedVal, }) config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } return engine, nil }
// startModelWorkers starts the set of workers that run for every model // in each controller. func (a *MachineAgent) startModelWorkers(uuid string) (worker.Worker, error) { modelAgent, err := model.WrapAgent(a, uuid) if err != nil { return nil, errors.Trace(err) } engine, err := dependency.NewEngine(dependency.EngineConfig{ IsFatal: model.IsFatal, WorstError: model.WorstError, Filter: model.IgnoreErrRemoved, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, }) if err != nil { return nil, errors.Trace(err) } manifolds := modelManifolds(model.ManifoldsConfig{ Agent: modelAgent, AgentConfigChanged: a.configChangedVal, Clock: clock.WallClock, RunFlagDuration: time.Minute, CharmRevisionUpdateInterval: 24 * time.Hour, EntityStatusHistoryCount: 100, EntityStatusHistoryInterval: 5 * time.Minute, SpacesImportedGate: a.discoverSpacesComplete, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, errors.Trace(err) } return engine, nil }
func (s *EngineSuite) startEngine(c *gc.C, isFatal dependency.IsFatalFunc) { config := dependency.EngineConfig{ IsFatal: isFatal, MoreImportant: func(err0, err1 error) error { return err0 }, ErrorDelay: coretesting.ShortWait / 2, BounceDelay: coretesting.ShortWait / 10, } e, err := dependency.NewEngine(config) c.Assert(err, jc.ErrorIsNil) s.engine = e }
func (fix *engineFixture) run(c *gc.C, test func(*dependency.Engine)) { config := dependency.EngineConfig{ IsFatal: fix.isFatalFunc(), WorstError: fix.worstErrorFunc(), Filter: fix.filter, // can be nil anyway ErrorDelay: coretesting.ShortWait / 2, BounceDelay: coretesting.ShortWait / 10, } engine, err := dependency.NewEngine(config) c.Assert(err, jc.ErrorIsNil) defer fix.kill(c, engine) test(engine) }
func (a *MachineAgent) makeEngineCreator(previousAgentVersion version.Number) func() (worker.Worker, error) { return func() (worker.Worker, error) { config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } manifolds := machineManifolds(machine.ManifoldsConfig{ PreviousAgentVersion: previousAgentVersion, Agent: agent.APIHostPortsSetter{Agent: a}, RootDir: a.rootDir, AgentConfigChanged: a.configChangedVal, UpgradeStepsLock: a.upgradeComplete, UpgradeCheckLock: a.initialUpgradeCheckComplete, OpenState: a.initState, OpenStateForUpgrade: a.openStateForUpgrade, StartStateWorkers: a.startStateWorkers, StartAPIWorkers: a.startAPIWorkers, PreUpgradeSteps: upgrades.PreUpgradeSteps, LogSource: a.bufferedLogs, NewDeployContext: newDeployContext, Clock: clock.WallClock, ValidateMigration: a.validateMigration, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } if err := startIntrospection(introspectionConfig{ Agent: a, Engine: engine, WorkerFunc: introspection.NewWorker, }); err != nil { // If the introspection worker failed to start, we just log error // but continue. It is very unlikely to happen in the real world // as the only issue is connecting to the abstract domain socket // and the agent is controlled by by the OS to only have one. logger.Errorf("failed to start introspection worker: %v", err) } return engine, nil } }
func (s *engineFixture) startEngine(c *gc.C, isFatal dependency.IsFatalFunc) { if s.engine != nil { c.Fatalf("original engine not stopped") } config := dependency.EngineConfig{ IsFatal: isFatal, WorstError: func(err0, err1 error) error { return err0 }, ErrorDelay: coretesting.ShortWait / 2, BounceDelay: coretesting.ShortWait / 10, } e, err := dependency.NewEngine(config) c.Assert(err, jc.ErrorIsNil) s.engine = e }
// startModelWorkers starts the set of workers that run for every model // in each controller. func (a *MachineAgent) startModelWorkers(controllerUUID, modelUUID string) (worker.Worker, error) { modelAgent, err := model.WrapAgent(a, controllerUUID, modelUUID) if err != nil { return nil, errors.Trace(err) } engine, err := dependency.NewEngine(dependency.EngineConfig{ IsFatal: model.IsFatal, WorstError: model.WorstError, Filter: model.IgnoreErrRemoved, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, }) if err != nil { return nil, errors.Trace(err) } manifolds := modelManifolds(model.ManifoldsConfig{ Agent: modelAgent, AgentConfigChanged: a.configChangedVal, Clock: clock.WallClock, RunFlagDuration: time.Minute, CharmRevisionUpdateInterval: 24 * time.Hour, InstPollerAggregationDelay: 3 * time.Second, // TODO(perrito666) the status history pruning numbers need // to be adjusting, after collecting user data from large install // bases, to numbers allowing a rich and useful back history. StatusHistoryPrunerMaxHistoryTime: 336 * time.Hour, // 2 weeks StatusHistoryPrunerMaxHistoryMB: 5120, // 5G StatusHistoryPrunerInterval: 5 * time.Minute, SpacesImportedGate: a.discoverSpacesComplete, NewEnvironFunc: newEnvirons, NewMigrationMaster: migrationmaster.NewWorker, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, errors.Trace(err) } return engine, nil }
func (s *EngineSuite) TestConfigValidate(c *gc.C) { tests := []struct { breakConfig func(*dependency.EngineConfig) err string }{{ func(config *dependency.EngineConfig) { config.IsFatal = nil }, "IsFatal not specified", }, { func(config *dependency.EngineConfig) { config.WorstError = nil }, "WorstError not specified", }, { func(config *dependency.EngineConfig) { config.ErrorDelay = -time.Second }, "ErrorDelay is negative", }, { func(config *dependency.EngineConfig) { config.BounceDelay = -time.Second }, "BounceDelay is negative", }} for i, test := range tests { c.Logf("test %d", i) config := dependency.EngineConfig{ IsFatal: alwaysFatal, WorstError: firstError, ErrorDelay: time.Second, BounceDelay: time.Second, } test.breakConfig(&config) c.Logf("config validation...") validateErr := config.Validate() c.Check(validateErr, gc.ErrorMatches, test.err) c.Logf("engine creation...") engine, createErr := dependency.NewEngine(config) c.Check(engine, gc.IsNil) c.Check(createErr, gc.ErrorMatches, "invalid config: "+test.err) } }
func (s *EngineSuite) TestConfigValidate(c *gc.C) { validIsFatal := func(error) bool { return true } validWorstError := func(err0, err1 error) error { return err0 } validErrorDelay := time.Second validBounceDelay := time.Second tests := []struct { about string config dependency.EngineConfig err string }{{ "IsFatal invalid", dependency.EngineConfig{nil, validWorstError, validErrorDelay, validBounceDelay}, "IsFatal not specified", }, { "WorstError invalid", dependency.EngineConfig{validIsFatal, nil, validErrorDelay, validBounceDelay}, "WorstError not specified", }, { "ErrorDelay invalid", dependency.EngineConfig{validIsFatal, validWorstError, -time.Second, validBounceDelay}, "ErrorDelay is negative", }, { "BounceDelay invalid", dependency.EngineConfig{validIsFatal, validWorstError, validErrorDelay, -time.Second}, "BounceDelay is negative", }} for i, test := range tests { c.Logf("test %d: %v", i, test.about) c.Logf("config validation...") validateErr := test.config.Validate() c.Check(validateErr, gc.ErrorMatches, test.err) c.Logf("engine creation...") engine, createErr := dependency.NewEngine(test.config) c.Check(engine, gc.IsNil) c.Check(createErr, gc.ErrorMatches, "invalid config: "+test.err) } }
func (s *introspectionSuite) TestStartSuccess(c *gc.C) { if runtime.GOOS != "linux" { c.Skip("introspection worker not supported on non-linux") } fake := &dummyWorker{ done: make(chan struct{}), } config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, } engine, err := dependency.NewEngine(config) c.Assert(err, jc.ErrorIsNil) cfg := introspectionConfig{ Agent: &dummyAgent{}, Engine: engine, WorkerFunc: func(cfg introspection.Config) (worker.Worker, error) { fake.config = cfg return fake, nil }, } err = startIntrospection(cfg) c.Assert(err, jc.ErrorIsNil) c.Check(fake.config.Reporter, gc.Equals, engine) c.Check(fake.config.SocketName, gc.Equals, "jujud-machine-42") // Stopping the engine causes the introspection worker to stop. engine.Kill() select { case <-fake.done: case <-time.After(coretesting.LongWait): c.Fatalf("worker did not get stopped") } }
// TestWorstError starts an engine with two manifolds that always error // with fatal errors. We test that the most important error is the one // returned by the engine. // // This test uses manifolds whose workers ignore kill requests. We want // this (dangerous!) behaviour so that we don't race over which fatal // error is seen by the engine first. func (s *EngineSuite) TestWorstError(c *gc.C) { // Setup the errors, their importance, and the function // that decides. importantError := errors.New("an important error") moreImportant := func(_, _ error) error { return importantError } allFatal := func(error) bool { return true } // Start a new engine with moreImportant configured config := dependency.EngineConfig{ IsFatal: allFatal, WorstError: moreImportant, ErrorDelay: coretesting.ShortWait / 2, BounceDelay: coretesting.ShortWait / 10, } engine, err := dependency.NewEngine(config) c.Assert(err, jc.ErrorIsNil) mh1 := newErrorIgnoringManifoldHarness() err = engine.Install("task", mh1.Manifold()) c.Assert(err, jc.ErrorIsNil) mh1.AssertOneStart(c) mh2 := newErrorIgnoringManifoldHarness() err = engine.Install("another task", mh2.Manifold()) c.Assert(err, jc.ErrorIsNil) mh2.AssertOneStart(c) mh1.InjectError(c, errors.New("kerrang")) mh2.InjectError(c, importantError) err = engine.Wait() c.Check(err, gc.ErrorMatches, importantError.Error()) report := engine.Report() c.Check(report["error"], gc.ErrorMatches, importantError.Error()) }
// APIWorkers returns a dependency.Engine running the unit agent's responsibilities. func (a *UnitAgent) APIWorkers() (worker.Worker, error) { manifolds := unitManifolds(unit.ManifoldsConfig{ Agent: agent.APIHostPortsSetter{a}, LogSource: a.bufferedLogs, LeadershipGuarantee: 30 * time.Second, AgentConfigChanged: a.configChangedVal, ValidateMigration: a.validateMigration, }) config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } if err := startIntrospection(introspectionConfig{ Agent: a, Engine: engine, WorkerFunc: introspection.NewWorker, }); err != nil { // If the introspection worker failed to start, we just log error // but continue. It is very unlikely to happen in the real world // as the only issue is connecting to the abstract domain socket // and the agent is controlled by by the OS to only have one. logger.Errorf("failed to start introspection worker: %v", err) } return engine, nil }
func (a *MachineAgent) makeEngineCreator(previousAgentVersion version.Number) func() (worker.Worker, error) { return func() (worker.Worker, error) { config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } manifolds := machine.Manifolds(machine.ManifoldsConfig{ PreviousAgentVersion: previousAgentVersion, Agent: agent.APIHostPortsSetter{Agent: a}, RootDir: a.rootDir, AgentConfigChanged: a.configChangedVal, UpgradeStepsLock: a.upgradeComplete, UpgradeCheckLock: a.initialUpgradeCheckComplete, OpenState: a.initState, OpenStateForUpgrade: a.openStateForUpgrade, StartStateWorkers: a.startStateWorkers, StartAPIWorkers: a.startAPIWorkers, PreUpgradeSteps: upgrades.PreUpgradeSteps, LogSource: a.bufferedLogs, NewDeployContext: newDeployContext, Clock: clock.WallClock, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } return engine, nil } }
func (s *EngineSuite) startEngine(c *gc.C, isFatal dependency.IsFatalFunc) { s.engine = dependency.NewEngine(isFatal, coretesting.ShortWait/2, coretesting.ShortWait/10) }