Esempio n. 1
0
// newNetworker creates a Networker worker with the specified arguments.
func newNetworker(
	st *apinetworker.State,
	agentConfig agent.Config,
	configBasePath string,
	canWriteNetworkConfig bool,
) (*Networker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		// This should never happen, as there is a check for it in the
		// machine agent.
		return nil, fmt.Errorf("expected names.MachineTag, got %T", agentConfig.Tag())
	}
	nw := &Networker{
		st:                    st,
		tag:                   tag,
		configBasePath:        configBasePath,
		canWriteNetworkConfig: canWriteNetworkConfig,
		configFiles:           make(map[string]*configFile),
		networkInfo:           make(map[string]network.Info),
		interfaces:            make(map[string]net.Interface),
	}
	go func() {
		defer nw.tomb.Done()
		nw.tomb.Kill(nw.loop())
	}()
	return nw, nil
}
Esempio n. 2
0
// NewNetworker returns a Worker that handles machine networking
// configuration. If there is no <configBasePath>/interfaces file, an
// error is returned.
func NewNetworker(
	st apinetworker.State,
	agentConfig agent.Config,
	intrusiveMode bool,
	configBaseDir string,
) (*Networker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		// This should never happen, as there is a check for it in the
		// machine agent.
		return nil, fmt.Errorf("expected names.MachineTag, got %T", agentConfig.Tag())
	}
	nw := &Networker{
		st:            st,
		tag:           tag,
		intrusiveMode: intrusiveMode,
		configBaseDir: configBaseDir,
		configFiles:   make(map[string]*configFile),
		interfaceInfo: make(map[string]network.InterfaceInfo),
		interfaces:    make(map[string]net.Interface),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &nw.catacomb,
		Work: nw.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return nw, nil
}
Esempio n. 3
0
func openState(agentConfig agent.Config, dialOpts mongo.DialOpts) (_ *state.State, _ *state.Machine, err error) {
	info, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, nil, fmt.Errorf("no state info available")
	}
	st, err := state.Open(agentConfig.Model(), info, dialOpts, environs.NewStatePolicy())
	if err != nil {
		return nil, nil, err
	}
	defer func() {
		if err != nil {
			st.Close()
		}
	}()
	m0, err := st.FindEntity(agentConfig.Tag())
	if err != nil {
		if errors.IsNotFound(err) {
			err = worker.ErrTerminateAgent
		}
		return nil, nil, err
	}
	m := m0.(*state.Machine)
	if m.Life() == state.Dead {
		return nil, nil, worker.ErrTerminateAgent
	}
	// Check the machine nonce as provisioned matches the agent.Conf value.
	if !m.CheckProvisioned(agentConfig.Nonce()) {
		// The agent is running on a different machine to the one it
		// should be according to state. It must stop immediately.
		logger.Errorf("running machine %v agent on inappropriate instance", m)
		return nil, nil, worker.ErrTerminateAgent
	}
	return st, m, nil
}
Esempio n. 4
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker {
	if version.Current.OS == version.Windows {
		return worker.NewNoOpWorker()
	}
	kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)}
	return worker.NewNotifyWorker(kw)
}
Esempio n. 5
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker {
	if os.HostOS() == os.Windows {
		return worker.NewNoOpWorker()
	}
	kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)}
	return worker.NewNotifyWorker(kw)
}
Esempio n. 6
0
func waitForUpgradeToFinish(c *gc.C, conf agent.Config) {
	success := false
	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
		diskConf := readConfigFromDisk(c, conf.DataDir(), conf.Tag())
		success = diskConf.UpgradedToVersion() == version.Current
		if success {
			break
		}
	}
	c.Assert(success, jc.IsTrue)
}
Esempio n. 7
0
func NewReboot(st *reboot.State, agentConfig agent.Config, machineLock *fslock.Lock) (worker.Worker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.Errorf("Expected names.MachineTag, got %T: %v", agentConfig.Tag(), agentConfig.Tag())
	}
	r := &Reboot{
		st:          st,
		tag:         tag,
		machineLock: machineLock,
	}
	return worker.NewNotifyWorker(r), nil
}
Esempio n. 8
0
// NewUpgrader returns a new upgrader worker. It watches changes to the
// current version of the current agent (with the given tag) and tries to
// download the tools for any new version into the given data directory.  If
// an upgrade is needed, the worker will exit with an UpgradeReadyError
// holding details of the requested upgrade. The tools will have been
// downloaded and unpacked.
func NewUpgrader(st *upgrader.State, agentConfig agent.Config) *Upgrader {
	u := &Upgrader{
		st:      st,
		dataDir: agentConfig.DataDir(),
		tag:     agentConfig.Tag(),
	}
	go func() {
		defer u.tomb.Done()
		u.tomb.Kill(u.loop())
	}()
	return u
}
Esempio n. 9
0
// NewNetworker returns a Worker that handles machine networking
// configuration. If there is no /etc/network/interfaces file, an
// error is returned.
func NewNetworker(st *apinetworker.State, agentConfig agent.Config) (worker.Worker, error) {
	nw := &networker{
		st:  st,
		tag: agentConfig.Tag().String(),
	}
	// Verify we have /etc/network/interfaces first, otherwise bail out.
	if !CanStart() {
		err := fmt.Errorf("missing %q config file", configFileName)
		logger.Infof("not starting worker: %v", err)
		return nil, err
	}
	return worker.NewNotifyWorker(nw), nil
}
Esempio n. 10
0
File: main.go Progetto: zhouqt/juju
// setupLogging redirects logging to rolled log files.
//
// NOTE: do not use this in the bootstrap agent, or
// if you do, change the bootstrap error reporting.
func setupAgentLogging(conf agent.Config) error {
	filename := filepath.Join(conf.LogDir(), conf.Tag().String()+".log")

	log := &lumberjack.Logger{
		Filename:   filename,
		MaxSize:    300, // megabytes
		MaxBackups: 2,
	}

	writer := loggo.NewSimpleWriter(log, &loggo.DefaultFormatter{})
	_, err := loggo.ReplaceDefaultWriter(writer)
	return err
}
Esempio n. 11
0
// NewMachineEnvironmentWorker returns a worker.Worker that uses the notify
// watcher returned from the setup.
func NewMachineEnvironmentWorker(api *environment.Facade, agentConfig agent.Config) worker.Worker {
	// We don't write out system files for the local provider on machine zero
	// as that is the host machine.
	writeSystemFiles := (agentConfig.Tag() != names.NewMachineTag("0").String() ||
		agentConfig.Value(agent.ProviderType) != provider.Local)
	logger.Debugf("write system files: %v", writeSystemFiles)
	envWorker := &MachineEnvironmentWorker{
		api:              api,
		writeSystemFiles: writeSystemFiles,
		first:            true,
	}
	return worker.NewNotifyWorker(envWorker)
}
Esempio n. 12
0
func NewReboot(st reboot.State, agentConfig agent.Config, machineLock *fslock.Lock) (worker.Worker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.Errorf("Expected names.MachineTag, got %T: %v", agentConfig.Tag(), agentConfig.Tag())
	}
	r := &Reboot{
		st:          st,
		tag:         tag,
		machineLock: machineLock,
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: r,
	})
	return w, errors.Trace(err)
}
Esempio n. 13
0
// updateSupportedContainers records in state that a machine can run the specified containers.
// It starts a watcher and when a container of a given type is first added to the machine,
// the watcher is killed, the machine is set up to be able to start containers of the given type,
// and a suitable provisioner is started.
func (a *MachineAgent) updateSupportedContainers(
	runner worker.Runner,
	st api.Connection,
	containers []instance.ContainerType,
	agentConfig agent.Config,
) error {
	pr := apiprovisioner.NewState(st)
	tag := agentConfig.Tag().(names.MachineTag)
	machine, err := pr.Machine(tag)
	if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead {
		return worker.ErrTerminateAgent
	}
	if err != nil {
		return errors.Annotatef(err, "cannot load machine %s from state", tag)
	}
	if len(containers) == 0 {
		if err := machine.SupportsNoContainers(); err != nil {
			return errors.Annotatef(err, "clearing supported containers for %s", tag)
		}
		return nil
	}
	if err := machine.SetSupportedContainers(containers...); err != nil {
		return errors.Annotatef(err, "setting supported containers for %s", tag)
	}
	// Start the watcher to fire when a container is first requested on the machine.
	watcherName := fmt.Sprintf("%s-container-watcher", machine.Id())
	params := provisioner.ContainerSetupParams{
		Runner:              runner,
		WorkerName:          watcherName,
		SupportedContainers: containers,
		Machine:             machine,
		Provisioner:         pr,
		Config:              agentConfig,
		InitLockName:        agent.MachineLockName,
	}
	handler := provisioner.NewContainerSetupHandler(params)
	a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) {
		w, err := watcher.NewStringsWorker(watcher.StringsConfig{
			Handler: handler,
		})
		if err != nil {
			return nil, errors.Annotatef(err, "cannot start %s worker", watcherName)
		}
		return w, nil
	})
	return nil
}
Esempio n. 14
0
File: restore.go Progetto: bac/juju
// tagUserCredentials is a convenience function that extracts the
// tag user and apipassword, required to access mongodb.
func tagUserCredentials(conf agent.Config) (string, string, error) {
	username := conf.Tag().String()
	var password string
	// TODO(perrito) we might need an accessor for the actual state password
	// just in case it ever changes from the same as api password.
	apiInfo, ok := conf.APIInfo()
	if ok {
		password = apiInfo.Password
	} else {
		// There seems to be no way to reach this inconsistence other than making a
		// backup on a machine where these fields are corrupted and even so I find
		// no reasonable way to reach this state, yet since APIInfo has it as a
		// possibility I prefer to handle it, we cannot recover from this since
		// it would mean that the agent.conf is corrupted.
		return "", "", errors.New("cannot obtain password to access the controller")
	}
	return username, password, nil
}
Esempio n. 15
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) (worker.Worker, error) {
	machineTag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.NotValidf("machine tag %v", agentConfig.Tag())
	}
	if os.HostOS() == os.Windows {
		return worker.NewNoOpWorker(), nil
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: &keyupdaterWorker{
			st:  st,
			tag: machineTag,
		},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 16
0
// NewUpgrader returns a new upgrader worker. It watches changes to the
// current version of the current agent (with the given tag) and tries to
// download the tools for any new version into the given data directory.  If
// an upgrade is needed, the worker will exit with an UpgradeReadyError
// holding details of the requested upgrade. The tools will have been
// downloaded and unpacked.
func NewUpgrader(
	st *upgrader.State,
	agentConfig agent.Config,
	origAgentVersion version.Number,
	isUpgradeRunning func() bool,
) *Upgrader {
	u := &Upgrader{
		st:               st,
		dataDir:          agentConfig.DataDir(),
		tag:              agentConfig.Tag(),
		origAgentVersion: origAgentVersion,
		isUpgradeRunning: isUpgradeRunning,
	}
	go func() {
		defer u.tomb.Done()
		u.tomb.Kill(u.loop())
	}()
	return u
}
Esempio n. 17
0
// NewAgentUpgrader returns a new upgrader worker. It watches changes to the
// current version of the current agent (with the given tag) and tries to
// download the tools for any new version into the given data directory.  If
// an upgrade is needed, the worker will exit with an UpgradeReadyError
// holding details of the requested upgrade. The tools will have been
// downloaded and unpacked.
func NewAgentUpgrader(
	st *upgrader.State,
	agentConfig agent.Config,
	origAgentVersion version.Number,
	areUpgradeStepsRunning func() bool,
	agentUpgradeComplete chan struct{},
) *Upgrader {
	u := &Upgrader{
		st:                     st,
		dataDir:                agentConfig.DataDir(),
		tag:                    agentConfig.Tag(),
		origAgentVersion:       origAgentVersion,
		areUpgradeStepsRunning: areUpgradeStepsRunning,
		agentUpgradeComplete:   agentUpgradeComplete,
	}
	go func() {
		defer u.tomb.Done()
		u.tomb.Kill(u.loop())
	}()
	return u
}
Esempio n. 18
0
func (a *MachineAgent) isMaster(st *state.State, agentConfig agent.Config) (bool, error) {
	if st == nil {
		// If there is no state, we aren't a master.
		return false, nil
	}
	// Not calling the agent openState method as it does other checks
	// we really don't care about here.  All we need here is the machine
	// so we can determine if we are the master or not.
	machine, err := st.Machine(agentConfig.Tag().Id())
	if err != nil {
		// This shouldn't happen, and if it does, the state worker will have
		// found out before us, and already errored, or is likely to error out
		// very shortly.  All we do here is return the error. The state worker
		// returns an error that will cause the agent to be terminated.
		return false, errors.Trace(err)
	}
	isMaster, err := mongo.IsMaster(st.MongoSession(), machine)
	if err != nil {
		return false, errors.Trace(err)
	}
	return isMaster, nil
}
Esempio n. 19
0
// newDialInfo returns mgo.DialInfo with the given address using the minimal
// possible setup.
func newDialInfo(privateAddr string, conf agent.Config) (*mgo.DialInfo, error) {
	dialOpts := mongo.DialOpts{Direct: true}
	ssi, ok := conf.StateServingInfo()
	if !ok {
		return nil, errors.Errorf("cannot get state serving info to dial")
	}
	info := mongo.Info{
		Addrs:  []string{net.JoinHostPort(privateAddr, strconv.Itoa(ssi.StatePort))},
		CACert: conf.CACert(),
	}
	dialInfo, err := mongo.DialInfo(info, dialOpts)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce a dial info")
	}
	oldPassword := conf.OldPassword()
	if oldPassword != "" {
		dialInfo.Username = "******"
		dialInfo.Password = conf.OldPassword()
	} else {
		dialInfo.Username = conf.Tag().String()
		// TODO(perrito) we might need an accessor for the actual state password
		// just in case it ever changes from the same as api password.
		apiInfo, ok := conf.APIInfo()
		if ok {
			dialInfo.Password = apiInfo.Password
			logger.Infof("using API password to access state server.")
		} else {
			// There seems to be no way to reach this inconsistence other than making a
			// backup on a machine where these fields are corrupted and even so I find
			// no reasonable way to reach this state, yet since APIInfo has it as a
			// possibility I prefer to handle it, we cannot recover from this since
			// it would mean that the agent.conf is corrupted.
			return nil, errors.New("cannot obtain password to access the state server")
		}
	}
	return dialInfo, nil
}
Esempio n. 20
0
// NewAgentUpgrader returns a new upgrader worker. It watches changes to the
// current version of the current agent (with the given tag) and tries to
// download the tools for any new version into the given data directory.  If
// an upgrade is needed, the worker will exit with an UpgradeReadyError
// holding details of the requested upgrade. The tools will have been
// downloaded and unpacked.
func NewAgentUpgrader(
	st *upgrader.State,
	agentConfig agent.Config,
	origAgentVersion version.Number,
	upgradeStepsWaiter gate.Waiter,
	initialUpgradeCheckComplete gate.Unlocker,
) (*Upgrader, error) {
	u := &Upgrader{
		st:                          st,
		dataDir:                     agentConfig.DataDir(),
		tag:                         agentConfig.Tag(),
		origAgentVersion:            origAgentVersion,
		upgradeStepsWaiter:          upgradeStepsWaiter,
		initialUpgradeCheckComplete: initialUpgradeCheckComplete,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &u.catacomb,
		Work: u.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return u, nil
}
Esempio n. 21
0
func NewRebootWaiter(apistate api.Connection, acfg agent.Config) (*Reboot, error) {
	rebootState, err := apistate.Reboot()
	if err != nil {
		return nil, errors.Trace(err)
	}
	tag, ok := acfg.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.Errorf("Expected names.MachineTag, got: %T --> %v", acfg.Tag(), acfg.Tag())
	}
	return &Reboot{
		acfg:     acfg,
		st:       rebootState,
		tag:      tag,
		apistate: apistate,
	}, nil
}
Esempio n. 22
0
// NewMachiner returns a Worker that will wait for the identified machine
// to become Dying and make it Dead; or until the machine becomes Dead by
// other means.
func NewMachiner(st *machiner.State, agentConfig agent.Config) worker.Worker {
	// TODO(dfc) clearly agentConfig.Tag() can _only_ return a machine tag
	mr := &Machiner{st: st, tag: agentConfig.Tag().(names.MachineTag)}
	return worker.NewNotifyWorker(mr)
}
Esempio n. 23
0
// NewMachiner returns a Worker that will wait for the identified machine
// to become Dying and make it Dead; or until the machine becomes Dead by
// other means.
func NewMachiner(st MachineAccessor, agentConfig agent.Config) worker.Worker {
	mr := &Machiner{st: st, tag: agentConfig.Tag().(names.MachineTag)}
	return worker.NewNotifyWorker(mr)
}
Esempio n. 24
0
// runUpgrades runs the upgrade operations for each job type and
// updates the updatedToVersion on success.
func (c *upgradeWorkerContext) runUpgrades(st *state.State, agentConfig agent.Config) error {
	from := version.Current
	from.Number = agentConfig.UpgradedToVersion()
	if from == version.Current {
		logger.Infof("upgrade to %v already completed.", version.Current)
		return nil
	}

	a := c.agent
	tag := agentConfig.Tag().(names.MachineTag)

	isMaster, err := isMachineMaster(st, tag)
	if err != nil {
		return errors.Trace(err)
	}

	err = a.ChangeConfig(func(agentConfig agent.ConfigSetter) error {
		var upgradeErr error
		a.setMachineStatus(c.apiState, params.StatusStarted,
			fmt.Sprintf("upgrading to %v", version.Current))
		context := upgrades.NewContext(agentConfig, c.apiState, st)
		for _, job := range c.jobs {
			target := upgradeTarget(job, isMaster)
			if target == "" {
				continue
			}
			logger.Infof("starting upgrade from %v to %v for %v %q",
				from, version.Current, target, tag)

			attempts := getUpgradeRetryStrategy()
			for attempt := attempts.Start(); attempt.Next(); {
				upgradeErr = upgradesPerformUpgrade(from.Number, target, context)
				if upgradeErr == nil {
					break
				}
				if connectionIsDead(c.apiState) {
					// API connection has gone away - abort!
					return &apiLostDuringUpgrade{upgradeErr}
				}
				retryText := "will retry"
				if !attempt.HasNext() {
					retryText = "giving up"
				}
				logger.Errorf("upgrade from %v to %v for %v %q failed (%s): %v",
					from, version.Current, target, tag, retryText, upgradeErr)
				a.setMachineStatus(c.apiState, params.StatusError,
					fmt.Sprintf("upgrade to %v failed (%s): %v",
						version.Current, retryText, upgradeErr))
			}
		}
		if upgradeErr != nil {
			return upgradeErr
		}
		agentConfig.SetUpgradedToVersion(version.Current.Number)
		return nil
	})
	if err != nil {
		logger.Errorf("upgrade to %v failed: %v", version.Current, err)
		return err
	}

	logger.Infof("upgrade to %v completed successfully.", version.Current)
	a.setMachineStatus(c.apiState, params.StatusStarted, "")
	return nil
}
Esempio n. 25
0
// NewMachiner returns a Worker that will wait for the identified machine
// to become Dying and make it Dead; or until the machine becomes Dead by
// other means.
func NewMachiner(st MachineAccessor, agentConfig agent.Config, ignoreAddressesOnStart bool) worker.Worker {
	mr := &Machiner{st: st, tag: agentConfig.Tag().(names.MachineTag), ignoreAddressesOnStart: ignoreAddressesOnStart}
	return worker.NewNotifyWorker(mr)
}
Esempio n. 26
0
// NewMachiner returns a Worker that will wait for the identified machine
// to become Dying and make it Dead; or until the machine becomes Dead by
// other means.
func NewMachiner(st *machiner.State, agentConfig agent.Config) worker.Worker {
	mr := &Machiner{st: st, tag: agentConfig.Tag()}
	return worker.NewNotifyWorker(mr)
}
Esempio n. 27
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker {
	kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag()}
	return worker.NewNotifyWorker(kw)
}
Esempio n. 28
0
func refreshConfig(c *gc.C, config agent.Config) agent.ConfigSetterWriter {
	config1, err := agent.ReadConfig(agent.ConfigPath(config.DataDir(), config.Tag()))
	c.Assert(err, gc.IsNil)
	return config1
}
Esempio n. 29
0
// updateSupportedContainers records in state that a machine can run the specified containers.
// It starts a watcher and when a container of a given type is first added to the machine,
// the watcher is killed, the machine is set up to be able to start containers of the given type,
// and a suitable provisioner is started.
func (a *MachineAgent) updateSupportedContainers(
	runner worker.Runner,
	st api.Connection,
	containers []instance.ContainerType,
	agentConfig agent.Config,
) error {
	pr := st.Provisioner()
	tag := agentConfig.Tag().(names.MachineTag)
	machine, err := pr.Machine(tag)
	if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead {
		return worker.ErrTerminateAgent
	}
	if err != nil {
		return errors.Annotatef(err, "cannot load machine %s from state", tag)
	}
	if len(containers) == 0 {
		if err := machine.SupportsNoContainers(); err != nil {
			return errors.Annotatef(err, "clearing supported containers for %s", tag)
		}
		return nil
	}
	if err := machine.SetSupportedContainers(containers...); err != nil {
		return errors.Annotatef(err, "setting supported containers for %s", tag)
	}
	initLock, err := cmdutil.HookExecutionLock(agentConfig.DataDir())
	if err != nil {
		return err
	}
	// Start the watcher to fire when a container is first requested on the machine.
	modelUUID, err := st.ModelTag()
	if err != nil {
		return err
	}
	watcherName := fmt.Sprintf("%s-container-watcher", machine.Id())
	// There may not be a CA certificate private key available, and without
	// it we can't ensure that other Juju nodes can connect securely, so only
	// use an image URL getter if there's a private key.
	var imageURLGetter container.ImageURLGetter
	if agentConfig.Value(agent.AllowsSecureConnection) == "true" {
		cfg, err := pr.ModelConfig()
		if err != nil {
			return errors.Annotate(err, "unable to get environ config")
		}
		imageURLGetter = container.NewImageURLGetter(
			// Explicitly call the non-named constructor so if anyone
			// adds additional fields, this fails.
			container.ImageURLGetterConfig{
				ServerRoot:        st.Addr(),
				ModelUUID:         modelUUID.Id(),
				CACert:            []byte(agentConfig.CACert()),
				CloudimgBaseUrl:   cfg.CloudImageBaseURL(),
				Stream:            cfg.ImageStream(),
				ImageDownloadFunc: container.ImageDownloadURL,
			})
	}
	params := provisioner.ContainerSetupParams{
		Runner:              runner,
		WorkerName:          watcherName,
		SupportedContainers: containers,
		ImageURLGetter:      imageURLGetter,
		Machine:             machine,
		Provisioner:         pr,
		Config:              agentConfig,
		InitLock:            initLock,
	}
	handler := provisioner.NewContainerSetupHandler(params)
	a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) {
		w, err := watcher.NewStringsWorker(watcher.StringsConfig{
			Handler: handler,
		})
		if err != nil {
			return nil, errors.Annotatef(err, "cannot start %s worker", watcherName)
		}
		return w, nil
	})
	return nil
}