Exemplo n.º 1
0
func newAgentToolsAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*AgentToolsAPI, error) {
	newEnviron := func() (environs.Environ, error) {
		newEnviron := stateenvirons.GetNewEnvironFunc(environs.New)
		return newEnviron(st)
	}
	return NewAgentToolsAPI(st, newEnviron, findTools, envVersionUpdate, authorizer)
}
Exemplo n.º 2
0
func (s *environSuite) TestGetEnvironment(c *gc.C) {
	env, err := stateenvirons.GetNewEnvironFunc(environs.New)(s.State)
	c.Assert(err, jc.ErrorIsNil)
	config, err := s.State.ModelConfig()
	c.Assert(err, jc.ErrorIsNil)

	c.Check(env.Config().UUID(), jc.DeepEquals, config.UUID())
	c.Check(env, gc.Not(gc.Equals), s.Environ)
}
Exemplo n.º 3
0
Arquivo: shim.go Projeto: bac/juju
func newStorageProvisionerAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*StorageProvisionerAPI, error) {
	env, err := stateenvirons.GetNewEnvironFunc(environs.New)(st)
	if err != nil {
		return nil, errors.Annotate(err, "getting environ")
	}
	registry := stateenvirons.NewStorageProviderRegistry(env)
	pm := poolmanager.New(state.NewStateSettings(st), registry)
	return NewStorageProvisionerAPI(stateShim{st}, resources, authorizer, registry, pm)
}
Exemplo n.º 4
0
Arquivo: metadata.go Projeto: bac/juju
// NewAPI returns a new cloud image metadata API facade.
func NewAPI(
	st *state.State,
	resources facade.Resources,
	authorizer facade.Authorizer,
) (*API, error) {
	newEnviron := func() (environs.Environ, error) {
		return stateenvirons.GetNewEnvironFunc(environs.New)(st)
	}
	return createAPI(getState(st), newEnviron, resources, authorizer)
}
Exemplo n.º 5
0
Arquivo: agent.go Projeto: bac/juju
func (s *AgentSuite) AssertCanOpenState(c *gc.C, tag names.Tag, dataDir string) {
	config, err := agent.ReadConfig(agent.ConfigPath(dataDir, tag))
	c.Assert(err, jc.ErrorIsNil)
	info, ok := config.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st, err := state.Open(config.Model(), config.Controller(), info, mongotest.DialOpts(), stateenvirons.GetNewPolicyFunc(
		stateenvirons.GetNewEnvironFunc(environs.New),
	))
	c.Assert(err, jc.ErrorIsNil)
	st.Close()
}
Exemplo n.º 6
0
func (s *UpgradeSuite) openStateForUpgrade() (*state.State, error) {
	mongoInfo := s.State.MongoConnectionInfo()
	newPolicy := stateenvirons.GetNewPolicyFunc(
		stateenvirons.GetNewEnvironFunc(environs.New),
	)
	st, err := state.Open(s.State.ModelTag(), s.State.ControllerTag(), mongoInfo, mongotest.DialOpts(), newPolicy)
	if err != nil {
		return nil, err
	}
	return st, nil
}
Exemplo n.º 7
0
func (s *environSuite) TestGetNewEnvironFunc(c *gc.C) {
	var calls int
	var callArgs environs.OpenParams
	newEnviron := func(args environs.OpenParams) (environs.Environ, error) {
		calls++
		callArgs = args
		return nil, nil
	}
	stateenvirons.GetNewEnvironFunc(newEnviron)(s.State)
	c.Assert(calls, gc.Equals, 1)

	cfg, err := s.State.ModelConfig()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(callArgs.Config, jc.DeepEquals, cfg)
}
Exemplo n.º 8
0
func (s *WorkerSuite) startWorker(c *gc.C) (worker.Worker, gate.Lock) {
	// create fresh environ to see any injected broken-ness
	environ, err := stateenvirons.GetNewEnvironFunc(environs.New)(s.State)
	c.Assert(err, jc.ErrorIsNil)

	lock := gate.NewLock()
	worker, err := discoverspaces.NewWorker(discoverspaces.Config{
		Facade:   s.API,
		Environ:  environ,
		NewName:  network.ConvertSpaceName,
		Unlocker: lock,
	})
	c.Assert(err, jc.ErrorIsNil)
	return worker, lock
}
Exemplo n.º 9
0
Arquivo: tools.go Projeto: bac/juju
// fetchAndCacheTools fetches tools with the specified version by searching for a URL
// in simplestreams and GETting it, caching the result in tools storage before returning
// to the caller.
func (h *toolsDownloadHandler) fetchAndCacheTools(v version.Binary, stor binarystorage.Storage, st *state.State) (io.ReadCloser, error) {
	newEnviron := stateenvirons.GetNewEnvironFunc(environs.New)
	env, err := newEnviron(st)
	if err != nil {
		return nil, err
	}
	tools, err := envtools.FindExactTools(env, v.Number, v.Series, v.Arch)
	if err != nil {
		return nil, err
	}

	// No need to verify the server's identity because we verify the SHA-256 hash.
	logger.Infof("fetching %v tools from %v", v, tools.URL)
	resp, err := utils.GetNonValidatingHTTPClient().Get(tools.URL)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		msg := fmt.Sprintf("bad HTTP response: %v", resp.Status)
		if body, err := ioutil.ReadAll(resp.Body); err == nil {
			msg += fmt.Sprintf(" (%s)", bytes.TrimSpace(body))
		}
		return nil, errors.New(msg)
	}
	data, sha256, err := readAndHash(resp.Body)
	if err != nil {
		return nil, err
	}
	if int64(len(data)) != tools.Size {
		return nil, errors.Errorf("size mismatch for %s", tools.URL)
	}
	if sha256 != tools.SHA256 {
		return nil, errors.Errorf("hash mismatch for %s", tools.URL)
	}

	// Cache tarball in tools storage before returning.
	metadata := binarystorage.Metadata{
		Version: v.String(),
		Size:    tools.Size,
		SHA256:  tools.SHA256,
	}
	if err := stor.Add(bytes.NewReader(data), metadata); err != nil {
		return nil, errors.Annotate(err, "error caching tools")
	}
	return ioutil.NopCloser(bytes.NewReader(data)), nil
}
Exemplo n.º 10
0
// openStateForUpgrade exists to be passed into the upgradesteps
// worker. The upgradesteps worker opens state independently of the
// state worker so that it isn't affected by the state worker's
// lifetime. It ensures the MongoDB server is configured and started,
// and then opens a state connection.
//
// TODO(mjs)- review the need for this once the dependency engine is
// in use. Why can't upgradesteps depend on the main state connection?
func (a *MachineAgent) openStateForUpgrade() (*state.State, error) {
	agentConfig := a.CurrentConfig()
	if err := a.ensureMongoServer(agentConfig); err != nil {
		return nil, errors.Trace(err)
	}
	info, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, errors.New("no state info available")
	}
	st, err := state.Open(agentConfig.Model(), agentConfig.Controller(), info, mongo.DefaultDialOpts(),
		stateenvirons.GetNewPolicyFunc(
			stateenvirons.GetNewEnvironFunc(environs.New),
		),
	)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return st, nil
}
Exemplo n.º 11
0
Arquivo: environs.go Projeto: bac/juju
// Reset resets the entire dummy environment and forgets any registered
// operation listener. All opened environments after Reset will share
// the same underlying state.
func Reset(c *gc.C) {
	logger.Infof("reset model")
	dummy.mu.Lock()
	dummy.ops = discardOperations
	oldState := dummy.state
	dummy.controllerState = nil
	dummy.state = make(map[string]*environState)
	dummy.newStatePolicy = stateenvirons.GetNewPolicyFunc(
		stateenvirons.GetNewEnvironFunc(environs.New),
	)
	dummy.supportsSpaces = true
	dummy.supportsSpaceDiscovery = false
	dummy.mu.Unlock()

	// NOTE(axw) we must destroy the old states without holding
	// the provider lock, or we risk deadlocking. Destroying
	// state involves closing the embedded API server, which
	// may require waiting on RPC calls that interact with the
	// EnvironProvider (e.g. EnvironProvider.Open).
	for _, s := range oldState {
		if s.apiListener != nil {
			s.apiListener.Close()
		}
		s.destroy()
	}
	if mongoAlive() {
		err := retry.Call(retry.CallArgs{
			Func: gitjujutesting.MgoServer.Reset,
			// Only interested in retrying the intermittent
			// 'unexpected message'.
			IsFatalError: func(err error) bool {
				return !strings.HasSuffix(err.Error(), "unexpected message")
			},
			Delay:    time.Millisecond,
			Clock:    clock.WallClock,
			Attempts: 5,
		})
		c.Assert(err, jc.ErrorIsNil)
	}
}
Exemplo n.º 12
0
func (s *provisionerSuite) SetUpTest(c *gc.C) {
	s.JujuConnSuite.SetUpTest(c)
	s.factory = factory.NewFactory(s.State)
	s.resources = common.NewResources()
	// Create the resource registry separately to track invocations to
	// Register.
	s.resources = common.NewResources()
	s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() })

	env, err := stateenvirons.GetNewEnvironFunc(environs.New)(s.State)
	c.Assert(err, jc.ErrorIsNil)
	registry := stateenvirons.NewStorageProviderRegistry(env)
	pm := poolmanager.New(state.NewStateSettings(s.State), registry)

	s.authorizer = &apiservertesting.FakeAuthorizer{
		Tag:            names.NewMachineTag("0"),
		EnvironManager: true,
	}
	backend := storageprovisioner.NewStateBackend(s.State)
	s.api, err = storageprovisioner.NewStorageProvisionerAPI(backend, s.resources, s.authorizer, registry, pm)
	c.Assert(err, jc.ErrorIsNil)
}
Exemplo n.º 13
0
func openState(agentConfig agent.Config, dialOpts mongo.DialOpts) (_ *state.State, _ *state.Machine, err error) {
	info, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, nil, errors.Errorf("no state info available")
	}
	st, err := state.Open(agentConfig.Model(), agentConfig.Controller(), info, dialOpts,
		stateenvirons.GetNewPolicyFunc(
			stateenvirons.GetNewEnvironFunc(environs.New),
		),
	)
	if err != nil {
		return nil, nil, err
	}
	defer func() {
		if err != nil {
			st.Close()
		}
	}()
	m0, err := st.FindEntity(agentConfig.Tag())
	if err != nil {
		if errors.IsNotFound(err) {
			err = worker.ErrTerminateAgent
		}
		return nil, nil, err
	}
	m := m0.(*state.Machine)
	if m.Life() == state.Dead {
		return nil, nil, worker.ErrTerminateAgent
	}
	// Check the machine nonce as provisioned matches the agent.Conf value.
	if !m.CheckProvisioned(agentConfig.Nonce()) {
		// The agent is running on a different machine to the one it
		// should be according to state. It must stop immediately.
		logger.Errorf("running machine %v agent on inappropriate instance", m)
		return nil, nil, worker.ErrTerminateAgent
	}
	return st, m, nil
}
Exemplo n.º 14
0
Arquivo: conn.go Projeto: bac/juju
// newState returns a new State that uses the given environment.
// The environment must have already been bootstrapped.
func newState(controllerUUID string, environ environs.Environ, mongoInfo *mongo.MongoInfo) (*state.State, error) {
	if controllerUUID == "" {
		return nil, errors.New("missing controller UUID")
	}
	config := environ.Config()
	password := AdminSecret
	if password == "" {
		return nil, errors.Errorf("cannot connect without admin-secret")
	}
	modelTag := names.NewModelTag(config.UUID())

	mongoInfo.Password = password
	opts := mongotest.DialOpts()
	newPolicyFunc := stateenvirons.GetNewPolicyFunc(
		stateenvirons.GetNewEnvironFunc(environs.New),
	)
	controllerTag := names.NewControllerTag(controllerUUID)
	st, err := state.Open(modelTag, controllerTag, mongoInfo, opts, newPolicyFunc)
	if errors.IsUnauthorized(errors.Cause(err)) {
		// We try for a while because we might succeed in
		// connecting to mongo before the state has been
		// initialized and the initial password set.
		for a := redialStrategy.Start(); a.Next(); {
			st, err = state.Open(modelTag, controllerTag, mongoInfo, opts, newPolicyFunc)
			if !errors.IsUnauthorized(errors.Cause(err)) {
				break
			}
		}
		if err != nil {
			return nil, err
		}
	} else if err != nil {
		return nil, err
	}
	return st, nil
}
Exemplo n.º 15
0
Arquivo: restore.go Projeto: bac/juju
// newStateConnection tries to connect to the newly restored controller.
func newStateConnection(controllerTag names.ControllerTag, modelTag names.ModelTag, info *mongo.MongoInfo) (*state.State, error) {
	// We need to retry here to allow mongo to come up on the restored controller.
	// The connection might succeed due to the mongo dial retries but there may still
	// be a problem issuing database commands.
	var (
		st  *state.State
		err error
	)
	const (
		newStateConnDelay       = 15 * time.Second
		newStateConnMinAttempts = 8
	)
	// TODO(katco): 2016-08-09: lp:1611427
	attempt := utils.AttemptStrategy{Delay: newStateConnDelay, Min: newStateConnMinAttempts}
	getEnviron := stateenvirons.GetNewEnvironFunc(environs.New)
	for a := attempt.Start(); a.Next(); {
		st, err = state.Open(modelTag, controllerTag, info, mongoDefaultDialOpts(), environsGetNewPolicyFunc(getEnviron))
		if err == nil {
			return st, nil
		}
		logger.Errorf("cannot open state, retrying: %v", err)
	}
	return st, errors.Annotate(err, "cannot open state")
}
Exemplo n.º 16
0
// Run initializes state for an environment.
func (c *BootstrapCommand) Run(_ *cmd.Context) error {
	bootstrapParamsData, err := ioutil.ReadFile(c.BootstrapParamsFile)
	if err != nil {
		return errors.Annotate(err, "reading bootstrap params file")
	}
	var args instancecfg.StateInitializationParams
	if err := args.Unmarshal(bootstrapParamsData); err != nil {
		return errors.Trace(err)
	}

	err = c.ReadConfig("machine-0")
	if err != nil {
		return errors.Annotate(err, "cannot read config")
	}
	agentConfig := c.CurrentConfig()

	// agent.Jobs is an optional field in the agent config, and was
	// introduced after 1.17.2. We default to allowing units on
	// machine-0 if missing.
	jobs := agentConfig.Jobs()
	if len(jobs) == 0 {
		jobs = []multiwatcher.MachineJob{
			multiwatcher.JobManageModel,
			multiwatcher.JobHostUnits,
		}
	}

	// Get the bootstrap machine's addresses from the provider.
	cloudSpec, err := environs.MakeCloudSpec(
		args.ControllerCloud,
		args.ControllerCloudName,
		args.ControllerCloudRegion,
		args.ControllerCloudCredential,
	)
	if err != nil {
		return errors.Trace(err)
	}
	env, err := environs.New(environs.OpenParams{
		Cloud:  cloudSpec,
		Config: args.ControllerModelConfig,
	})
	if err != nil {
		return errors.Annotate(err, "new environ")
	}
	newConfigAttrs := make(map[string]interface{})

	// Check to see if a newer agent version has been requested
	// by the bootstrap client.
	desiredVersion, ok := args.ControllerModelConfig.AgentVersion()
	if ok && desiredVersion != jujuversion.Current {
		// If we have been asked for a newer version, ensure the newer
		// tools can actually be found, or else bootstrap won't complete.
		stream := envtools.PreferredStream(&desiredVersion, args.ControllerModelConfig.Development(), args.ControllerModelConfig.AgentStream())
		logger.Infof("newer tools requested, looking for %v in stream %v", desiredVersion, stream)
		filter := tools.Filter{
			Number: desiredVersion,
			Arch:   arch.HostArch(),
			Series: series.HostSeries(),
		}
		_, toolsErr := envtools.FindTools(env, -1, -1, stream, filter)
		if toolsErr == nil {
			logger.Infof("tools are available, upgrade will occur after bootstrap")
		}
		if errors.IsNotFound(toolsErr) {
			// Newer tools not available, so revert to using the tools
			// matching the current agent version.
			logger.Warningf("newer tools for %q not available, sticking with version %q", desiredVersion, jujuversion.Current)
			newConfigAttrs["agent-version"] = jujuversion.Current.String()
		} else if toolsErr != nil {
			logger.Errorf("cannot find newer tools: %v", toolsErr)
			return toolsErr
		}
	}

	instances, err := env.Instances([]instance.Id{args.BootstrapMachineInstanceId})
	if err != nil {
		return errors.Annotate(err, "getting bootstrap instance")
	}
	addrs, err := instances[0].Addresses()
	if err != nil {
		return errors.Annotate(err, "bootstrap instance addresses")
	}

	// When machine addresses are reported from state, they have
	// duplicates removed.  We should do the same here so that
	// there is not unnecessary churn in the mongo replicaset.
	// TODO (cherylj) Add explicit unit tests for this - tracked
	// by bug #1544158.
	addrs = network.MergedAddresses([]network.Address{}, addrs)

	// Generate a private SSH key for the controllers, and add
	// the public key to the environment config. We'll add the
	// private key to StateServingInfo below.
	privateKey, publicKey, err := sshGenerateKey(config.JujuSystemKey)
	if err != nil {
		return errors.Annotate(err, "failed to generate system key")
	}
	authorizedKeys := config.ConcatAuthKeys(args.ControllerModelConfig.AuthorizedKeys(), publicKey)
	newConfigAttrs[config.AuthorizedKeysKey] = authorizedKeys

	// Generate a shared secret for the Mongo replica set, and write it out.
	sharedSecret, err := mongo.GenerateSharedSecret()
	if err != nil {
		return err
	}
	info, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("bootstrap machine config has no state serving info")
	}
	info.SharedSecret = sharedSecret
	info.SystemIdentity = privateKey
	err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error {
		agentConfig.SetStateServingInfo(info)
		return nil
	})
	if err != nil {
		return fmt.Errorf("cannot write agent config: %v", err)
	}

	agentConfig = c.CurrentConfig()

	// Create system-identity file
	if err := agent.WriteSystemIdentityFile(agentConfig); err != nil {
		return err
	}

	if err := c.startMongo(addrs, agentConfig); err != nil {
		return errors.Annotate(err, "failed to start mongo")
	}

	controllerModelCfg, err := env.Config().Apply(newConfigAttrs)
	if err != nil {
		return errors.Annotate(err, "failed to update model config")
	}
	args.ControllerModelConfig = controllerModelCfg

	// Initialise state, and store any agent config (e.g. password) changes.
	var st *state.State
	var m *state.Machine
	err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error {
		var stateErr error
		dialOpts := mongo.DefaultDialOpts()

		// Set a longer socket timeout than usual, as the machine
		// will be starting up and disk I/O slower than usual. This
		// has been known to cause timeouts in queries.
		dialOpts.SocketTimeout = c.Timeout
		if dialOpts.SocketTimeout < minSocketTimeout {
			dialOpts.SocketTimeout = minSocketTimeout
		}

		// We shouldn't attempt to dial peers until we have some.
		dialOpts.Direct = true

		adminTag := names.NewLocalUserTag(adminUserName)
		st, m, stateErr = agentInitializeState(
			adminTag,
			agentConfig,
			agentbootstrap.InitializeStateParams{
				StateInitializationParams: args,
				BootstrapMachineAddresses: addrs,
				BootstrapMachineJobs:      jobs,
				SharedSecret:              sharedSecret,
				Provider:                  environs.Provider,
				StorageProviderRegistry:   stateenvirons.NewStorageProviderRegistry(env),
			},
			dialOpts,
			stateenvirons.GetNewPolicyFunc(
				stateenvirons.GetNewEnvironFunc(environs.New),
			),
		)
		return stateErr
	})
	if err != nil {
		return err
	}
	defer st.Close()

	// Populate the tools catalogue.
	if err := c.populateTools(st, env); err != nil {
		return err
	}

	// Populate the GUI archive catalogue.
	if err := c.populateGUIArchive(st, env); err != nil {
		// Do not stop the bootstrapping process for Juju GUI archive errors.
		logger.Warningf("cannot set up Juju GUI: %s", err)
	} else {
		logger.Debugf("Juju GUI successfully set up")
	}

	// Add custom image metadata to environment storage.
	if len(args.CustomImageMetadata) > 0 {
		if err := c.saveCustomImageMetadata(st, env, args.CustomImageMetadata); err != nil {
			return err
		}
	}

	// bootstrap machine always gets the vote
	return m.SetHasVote(true)
}
Exemplo n.º 17
0
Arquivo: environs.go Projeto: bac/juju
	environs.RegisterProvider("dummy", &dummy)

	// Prime the first ops channel, so that naive clients can use
	// the testing environment by simply importing it.
	go func() {
		for _ = range discardOperations {
		}
	}()
}

// dummy is the dummy environmentProvider singleton.
var dummy = environProvider{
	ops:   discardOperations,
	state: make(map[string]*environState),
	newStatePolicy: stateenvirons.GetNewPolicyFunc(
		stateenvirons.GetNewEnvironFunc(environs.New),
	),
	supportsSpaces:         true,
	supportsSpaceDiscovery: false,
}

// Reset resets the entire dummy environment and forgets any registered
// operation listener. All opened environments after Reset will share
// the same underlying state.
func Reset(c *gc.C) {
	logger.Infof("reset model")
	dummy.mu.Lock()
	dummy.ops = discardOperations
	oldState := dummy.state
	dummy.controllerState = nil
	dummy.state = make(map[string]*environState)
Exemplo n.º 18
0
// startStateWorkers returns a worker running all the workers that
// require a *state.State connection.
func (a *MachineAgent) startStateWorkers(st *state.State) (worker.Worker, error) {
	agentConfig := a.CurrentConfig()

	m, err := getMachine(st, agentConfig.Tag())
	if err != nil {
		return nil, errors.Annotate(err, "machine lookup")
	}

	runner := newConnRunner(st)
	singularRunner, err := newSingularStateRunner(runner, st, m)
	if err != nil {
		return nil, errors.Trace(err)
	}

	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			// Implemented elsewhere with workers that use the API.
		case state.JobManageModel:
			useMultipleCPUs()
			a.startWorkerAfterUpgrade(runner, "model worker manager", func() (worker.Worker, error) {
				w, err := modelworkermanager.New(modelworkermanager.Config{
					ControllerUUID: st.ControllerUUID(),
					Backend:        st,
					NewWorker:      a.startModelWorkers,
					ErrorDelay:     worker.RestartDelay,
				})
				if err != nil {
					return nil, errors.Annotate(err, "cannot start model worker manager")
				}
				return w, nil
			})
			a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) {
				env, err := stateenvirons.GetNewEnvironFunc(environs.New)(st)
				if err != nil {
					return nil, errors.Annotate(err, "getting environ from state")
				}
				supportsSpaces := environs.SupportsSpaces(env)
				w, err := peergrouperNew(st, supportsSpaces)
				if err != nil {
					return nil, errors.Annotate(err, "cannot start peergrouper worker")
				}
				return w, nil
			})
			a.startWorkerAfterUpgrade(runner, "restore", func() (worker.Worker, error) {
				w, err := a.newRestoreStateWatcherWorker(st)
				if err != nil {
					return nil, errors.Annotate(err, "cannot start backup-restorer worker")
				}
				return w, nil
			})
			a.startWorkerAfterUpgrade(runner, "mongoupgrade", func() (worker.Worker, error) {
				return newUpgradeMongoWorker(st, a.machineId, a.maybeStopMongo)
			})

			// certChangedChan is shared by multiple workers it's up
			// to the agent to close it rather than any one of the
			// workers.  It is possible that multiple cert changes
			// come in before the apiserver is up to receive them.
			// Specify a bigger buffer to prevent deadlock when
			// the apiserver isn't up yet.  Use a size of 10 since we
			// allow up to 7 controllers, and might also update the
			// addresses of the local machine (127.0.0.1, ::1, etc).
			//
			// TODO(cherylj/waigani) Remove this workaround when
			// certupdater and apiserver can properly manage dependencies
			// through the dependency engine.
			//
			// TODO(ericsnow) For now we simply do not close the channel.
			certChangedChan := make(chan params.StateServingInfo, 10)
			// Each time apiserver worker is restarted, we need a fresh copy of state due
			// to the fact that state holds lease managers which are killed and need to be reset.
			stateOpener := func() (*state.State, error) {
				logger.Debugf("opening state for apiserver worker")
				st, _, err := openState(agentConfig, stateWorkerDialOpts)
				return st, err
			}
			runner.StartWorker("apiserver", a.apiserverWorkerStarter(stateOpener, certChangedChan))
			var stateServingSetter certupdater.StateServingInfoSetter = func(info params.StateServingInfo, done <-chan struct{}) error {
				return a.ChangeConfig(func(config agent.ConfigSetter) error {
					config.SetStateServingInfo(info)
					logger.Infof("update apiserver worker with new certificate")
					select {
					case certChangedChan <- info:
						return nil
					case <-done:
						return nil
					}
				})
			}
			a.startWorkerAfterUpgrade(runner, "certupdater", func() (worker.Worker, error) {
				return newCertificateUpdater(m, agentConfig, st, st, stateServingSetter), nil
			})

			a.startWorkerAfterUpgrade(singularRunner, "dblogpruner", func() (worker.Worker, error) {
				return dblogpruner.New(st, dblogpruner.NewLogPruneParams()), nil
			})

			a.startWorkerAfterUpgrade(singularRunner, "txnpruner", func() (worker.Worker, error) {
				return txnpruner.New(st, time.Hour*2, clock.WallClock), nil
			})
		default:
			return nil, errors.Errorf("unknown job type %q", job)
		}
	}
	return runner, nil
}