func (a *UnitAgent) APIWorkers() (_ worker.Worker, err error) { agentConfig := a.CurrentConfig() dataDir := agentConfig.DataDir() hookLock, err := cmdutil.HookExecutionLock(dataDir) if err != nil { return nil, err } st, entity, err := OpenAPIState(agentConfig, a) if err != nil { return nil, err } unitTag, err := names.ParseUnitTag(entity.Tag()) if err != nil { return nil, errors.Trace(err) } // Ensure that the environment uuid is stored in the agent config. // Luckily the API has it recorded for us after we connect. if agentConfig.Environment().Id() == "" { err := a.ChangeConfig(func(setter agent.ConfigSetter) error { environTag, err := st.EnvironTag() if err != nil { return errors.Annotate(err, "no environment uuid set on api") } return setter.Migrate(agent.MigrateParams{ Environment: environTag, }) }) if err != nil { logger.Warningf("unable to save environment uuid: %v", err) // Not really fatal, just annoying. } } defer func() { if err != nil { st.Close() reportClosedUnitAPI(st) } }() // Before starting any workers, ensure we record the Juju version this unit // agent is running. currentTools := &tools.Tools{Version: version.Current} apiStateUpgrader := a.getUpgrader(st) if err := apiStateUpgrader.SetVersion(agentConfig.Tag().String(), currentTools.Version); err != nil { return nil, errors.Annotate(err, "cannot set unit agent version") } runner := worker.NewRunner(cmdutil.ConnectionIsFatal(logger, st), cmdutil.MoreImportant) // start proxyupdater first to ensure proxy settings are correct runner.StartWorker("proxyupdater", func() (worker.Worker, error) { return proxyupdater.New(st.Environment(), false), nil }) runner.StartWorker("upgrader", func() (worker.Worker, error) { return upgrader.NewAgentUpgrader( st.Upgrader(), agentConfig, agentConfig.UpgradedToVersion(), func() bool { return false }, a.initialAgentUpgradeCheckComplete, ), nil }) runner.StartWorker("logger", func() (worker.Worker, error) { return workerlogger.NewLogger(st.Logger(), agentConfig), nil }) runner.StartWorker("uniter", func() (worker.Worker, error) { uniterFacade, err := st.Uniter() if err != nil { return nil, errors.Trace(err) } uniterParams := uniter.UniterParams{ uniterFacade, unitTag, leadership.NewClient(st), dataDir, hookLock, uniter.NewMetricsTimerChooser(), uniter.NewUpdateStatusTimer(), } return uniter.NewUniter(&uniterParams), nil }) runner.StartWorker("apiaddressupdater", func() (worker.Worker, error) { uniterFacade, err := st.Uniter() if err != nil { return nil, errors.Trace(err) } return apiaddressupdater.NewAPIAddressUpdater(uniterFacade, a), nil }) runner.StartWorker("rsyslog", func() (worker.Worker, error) { return cmdutil.NewRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslog.RsyslogModeForwarding) }) return cmdutil.NewCloseWorker(logger, runner, st), nil }
func newConnRunner(conns ...cmdutil.Pinger) worker.Runner { return worker.NewRunner(cmdutil.ConnectionIsFatal(logger, conns...), cmdutil.MoreImportant, worker.RestartDelay) }
// startAPIWorkers is called to start workers which rely on the // machine agent's API connection (via the apiworkers manifold). It // returns a Runner with a number of workers attached to it. // // The workers started here need to be converted to run under the // dependency engine. Once they have all been converted, this method - // and the apiworkers manifold - can be removed. func (a *MachineAgent) startAPIWorkers(apiConn api.Connection) (_ worker.Worker, outErr error) { agentConfig := a.CurrentConfig() entity, err := apiagent.NewState(apiConn).Entity(a.Tag()) if err != nil { return nil, errors.Trace(err) } var isModelManager bool for _, job := range entity.Jobs() { switch job { case multiwatcher.JobManageModel: isModelManager = true default: // TODO(dimitern): Once all workers moved over to using // the API, report "unknown job type" here. } } runner := worker.NewRunner( cmdutil.ConnectionIsFatal(logger, apiConn), cmdutil.MoreImportant, worker.RestartDelay, ) defer func() { // If startAPIWorkers exits early with an error, stop the // runner so that any already started runners aren't leaked. if outErr != nil { worker.Stop(runner) } }() // Perform the operations needed to set up hosting for containers. if err := a.setupContainerSupport(runner, apiConn, agentConfig); err != nil { cause := errors.Cause(err) if params.IsCodeDead(cause) || cause == worker.ErrTerminateAgent { return nil, worker.ErrTerminateAgent } return nil, errors.Errorf("setting up container support: %v", err) } if isModelManager { // Published image metadata for some providers are in simple streams. // Providers that do not depend on simple streams do not need this worker. env, err := environs.GetEnviron(apiagent.NewState(apiConn), newEnvirons) if err != nil { return nil, errors.Annotate(err, "getting environ") } if _, ok := env.(simplestreams.HasRegion); ok { // Start worker that stores published image metadata in state. runner.StartWorker("imagemetadata", func() (worker.Worker, error) { return newMetadataUpdater(apiConn.MetadataUpdater()), nil }) } // We don't have instance info set and the network config for the // bootstrap machine only, so update it now. All the other machines will // have instance info including network config set at provisioning time. if err := a.setControllerNetworkConfig(apiConn); err != nil { return nil, errors.Annotate(err, "setting controller network config") } } else { runner.StartWorker("stateconverter", func() (worker.Worker, error) { // TODO(fwereade): this worker needs its own facade. facade := apimachiner.NewState(apiConn) handler := conv2state.New(facade, a) w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ Handler: handler, }) if err != nil { return nil, errors.Annotate(err, "cannot start controller promoter worker") } return w, nil }) } return runner, nil }