// startModelWorkers starts the set of workers that run for every model // in each controller. func (a *MachineAgent) startModelWorkers(uuid string) (worker.Worker, error) { modelAgent, err := model.WrapAgent(a, uuid) if err != nil { return nil, errors.Trace(err) } engine, err := dependency.NewEngine(dependency.EngineConfig{ IsFatal: model.IsFatal, WorstError: model.WorstError, Filter: model.IgnoreErrRemoved, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, }) if err != nil { return nil, errors.Trace(err) } manifolds := modelManifolds(model.ManifoldsConfig{ Agent: modelAgent, AgentConfigChanged: a.configChangedVal, Clock: clock.WallClock, RunFlagDuration: time.Minute, CharmRevisionUpdateInterval: 24 * time.Hour, EntityStatusHistoryCount: 100, EntityStatusHistoryInterval: 5 * time.Minute, SpacesImportedGate: a.discoverSpacesComplete, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, errors.Trace(err) } return engine, nil }
// APIWorkers returns a dependency.Engine running the unit agent's responsibilities. func (a *UnitAgent) APIWorkers() (worker.Worker, error) { manifolds := unit.Manifolds(unit.ManifoldsConfig{ Agent: agent.APIHostPortsSetter{a}, LogSource: a.bufferedLogs, LeadershipGuarantee: 30 * time.Second, AgentConfigChanged: a.configChangedVal, }) config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } return engine, nil }
func (a *MachineAgent) makeEngineCreator(previousAgentVersion version.Number) func() (worker.Worker, error) { return func() (worker.Worker, error) { config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } manifolds := machineManifolds(machine.ManifoldsConfig{ PreviousAgentVersion: previousAgentVersion, Agent: agent.APIHostPortsSetter{Agent: a}, RootDir: a.rootDir, AgentConfigChanged: a.configChangedVal, UpgradeStepsLock: a.upgradeComplete, UpgradeCheckLock: a.initialUpgradeCheckComplete, OpenState: a.initState, OpenStateForUpgrade: a.openStateForUpgrade, StartStateWorkers: a.startStateWorkers, StartAPIWorkers: a.startAPIWorkers, PreUpgradeSteps: upgrades.PreUpgradeSteps, LogSource: a.bufferedLogs, NewDeployContext: newDeployContext, Clock: clock.WallClock, ValidateMigration: a.validateMigration, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } if err := startIntrospection(introspectionConfig{ Agent: a, Engine: engine, WorkerFunc: introspection.NewWorker, }); err != nil { // If the introspection worker failed to start, we just log error // but continue. It is very unlikely to happen in the real world // as the only issue is connecting to the abstract domain socket // and the agent is controlled by by the OS to only have one. logger.Errorf("failed to start introspection worker: %v", err) } return engine, nil } }
func (s *EngineSuite) TestInstallConvenienceWrapper(c *gc.C) { mh1 := newManifoldHarness() mh2 := newManifoldHarness() mh3 := newManifoldHarness() err := dependency.Install(s.engine, dependency.Manifolds{ "mh1": mh1.Manifold(), "mh2": mh2.Manifold(), "mh3": mh3.Manifold(), }) c.Assert(err, jc.ErrorIsNil) mh1.AssertOneStart(c) mh2.AssertOneStart(c) mh3.AssertOneStart(c) }
// startModelWorkers starts the set of workers that run for every model // in each controller. func (a *MachineAgent) startModelWorkers(controllerUUID, modelUUID string) (worker.Worker, error) { modelAgent, err := model.WrapAgent(a, controllerUUID, modelUUID) if err != nil { return nil, errors.Trace(err) } engine, err := dependency.NewEngine(dependency.EngineConfig{ IsFatal: model.IsFatal, WorstError: model.WorstError, Filter: model.IgnoreErrRemoved, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, }) if err != nil { return nil, errors.Trace(err) } manifolds := modelManifolds(model.ManifoldsConfig{ Agent: modelAgent, AgentConfigChanged: a.configChangedVal, Clock: clock.WallClock, RunFlagDuration: time.Minute, CharmRevisionUpdateInterval: 24 * time.Hour, InstPollerAggregationDelay: 3 * time.Second, // TODO(perrito666) the status history pruning numbers need // to be adjusting, after collecting user data from large install // bases, to numbers allowing a rich and useful back history. StatusHistoryPrunerMaxHistoryTime: 336 * time.Hour, // 2 weeks StatusHistoryPrunerMaxHistoryMB: 5120, // 5G StatusHistoryPrunerInterval: 5 * time.Minute, SpacesImportedGate: a.discoverSpacesComplete, NewEnvironFunc: newEnvirons, NewMigrationMaster: migrationmaster.NewWorker, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, errors.Trace(err) } return engine, nil }
// APIWorkers returns a dependency.Engine running the unit agent's responsibilities. func (a *UnitAgent) APIWorkers() (worker.Worker, error) { manifolds := unitManifolds(unit.ManifoldsConfig{ Agent: agent.APIHostPortsSetter{a}, LogSource: a.bufferedLogs, LeadershipGuarantee: 30 * time.Second, AgentConfigChanged: a.configChangedVal, ValidateMigration: a.validateMigration, }) config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } if err := startIntrospection(introspectionConfig{ Agent: a, Engine: engine, WorkerFunc: introspection.NewWorker, }); err != nil { // If the introspection worker failed to start, we just log error // but continue. It is very unlikely to happen in the real world // as the only issue is connecting to the abstract domain socket // and the agent is controlled by by the OS to only have one. logger.Errorf("failed to start introspection worker: %v", err) } return engine, nil }
func (a *MachineAgent) makeEngineCreator(previousAgentVersion version.Number) func() (worker.Worker, error) { return func() (worker.Worker, error) { config := dependency.EngineConfig{ IsFatal: cmdutil.IsFatal, WorstError: cmdutil.MoreImportantError, ErrorDelay: 3 * time.Second, BounceDelay: 10 * time.Millisecond, } engine, err := dependency.NewEngine(config) if err != nil { return nil, err } manifolds := machine.Manifolds(machine.ManifoldsConfig{ PreviousAgentVersion: previousAgentVersion, Agent: agent.APIHostPortsSetter{Agent: a}, RootDir: a.rootDir, AgentConfigChanged: a.configChangedVal, UpgradeStepsLock: a.upgradeComplete, UpgradeCheckLock: a.initialUpgradeCheckComplete, OpenState: a.initState, OpenStateForUpgrade: a.openStateForUpgrade, StartStateWorkers: a.startStateWorkers, StartAPIWorkers: a.startAPIWorkers, PreUpgradeSteps: upgrades.PreUpgradeSteps, LogSource: a.bufferedLogs, NewDeployContext: newDeployContext, Clock: clock.WallClock, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { logger.Errorf("while stopping engine with bad manifolds: %v", err) } return nil, err } return engine, nil } }