func containerManagerConfig(
	containerType instance.ContainerType,
	provisioner *apiprovisioner.State,
	agentConfig agent.Config,
) (container.ManagerConfig, error) {
	// Ask the provisioner for the container manager configuration.
	managerConfigResult, err := provisioner.ContainerManagerConfig(
		params.ContainerManagerConfigParams{Type: containerType},
	)
	if params.IsCodeNotImplemented(err) {
		// We currently don't support upgrading;
		// revert to the old configuration.
		managerConfigResult.ManagerConfig = container.ManagerConfig{container.ConfigName: container.DefaultNamespace}
	}
	if err != nil {
		return nil, err
	}
	// If a namespace is specified, that should instead be used as the config name.
	if namespace := agentConfig.Value(agent.Namespace); namespace != "" {
		managerConfigResult.ManagerConfig[container.ConfigName] = namespace
	}
	managerConfig := container.ManagerConfig(managerConfigResult.ManagerConfig)

	return managerConfig, nil
}
Exemplo n.º 2
0
func ensureMongoService(agentConfig agent.Config) error {
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return errors.Annotatef(err, "invalid oplog size: %q", oplogSizeString)
		}
	}

	var numaCtlPolicy bool
	if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" {
		var err error
		if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil {
			return errors.Annotatef(err, "invalid numactl preference: %q", numaCtlString)
		}
	}

	si, ok := agentConfig.StateServingInfo()
	if !ok {
		return errors.Errorf("agent config has no state serving info")
	}

	err := mongo.EnsureServiceInstalled(agentConfig.DataDir(),
		agentConfig.Value(agent.Namespace),
		si.StatePort,
		oplogSize,
		numaCtlPolicy)
	return errors.Annotate(err, "cannot ensure that mongo service start/stop scripts are in place")
}
Exemplo n.º 3
0
// runUpgrades runs the upgrade operations for each job type and updates the updatedToVersion on success.
func (a *MachineAgent) runUpgrades(
	st *state.State,
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) error {
	from := version.Current
	from.Number = agentConfig.UpgradedToVersion()
	if from == version.Current {
		logger.Infof("upgrade to %v already completed.", version.Current)
		return nil
	}
	var err error
	writeErr := a.ChangeConfig(func(agentConfig agent.ConfigSetter) {
		context := upgrades.NewContext(agentConfig, apiState, st)
		for _, job := range jobs {
			target := upgradeTarget(job)
			if target == "" {
				continue
			}
			logger.Infof("starting upgrade from %v to %v for %v %q", from, version.Current, target, a.Tag())
			if err = upgradesPerformUpgrade(from.Number, target, context); err != nil {
				err = fmt.Errorf("cannot perform upgrade from %v to %v for %v %q: %v", from, version.Current, target, a.Tag(), err)
				return
			}
		}
		agentConfig.SetUpgradedToVersion(version.Current.Number)
	})
	if writeErr != nil {
		return fmt.Errorf("cannot write updated agent configuration: %v", writeErr)
	}
	return nil
}
Exemplo n.º 4
0
func (s *UpgradeSuite) assertUpgradeSteps(c *gc.C, job state.MachineJob) {
	s.agentSuite.PatchValue(&version.Current, s.upgradeToVersion)
	err := s.State.SetEnvironAgentVersion(s.upgradeToVersion.Number)
	c.Assert(err, gc.IsNil)

	oldVersion := s.upgradeToVersion
	oldVersion.Major = 1
	oldVersion.Minor = 16
	var oldConfig agent.Config
	s.machine, oldConfig, _ = s.primeAgent(c, oldVersion, job)

	a := s.newAgent(c, s.machine)
	go func() { c.Check(a.Run(nil), gc.IsNil) }()
	defer func() { c.Check(a.Stop(), gc.IsNil) }()

	// Wait for upgrade steps to run.
	success := false
	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
		conf, err := agent.ReadConfig(agent.ConfigPath(oldConfig.DataDir(), s.machine.Tag()))
		c.Assert(err, gc.IsNil)
		success = conf.UpgradedToVersion() == s.upgradeToVersion.Number
		if success {
			break
		}
	}
	// Upgrade worker has completed ok.
	c.Assert(success, jc.IsTrue)
}
Exemplo n.º 5
0
// NewNetworker returns a Worker that handles machine networking
// configuration. If there is no <configBasePath>/interfaces file, an
// error is returned.
func NewNetworker(
	st apinetworker.State,
	agentConfig agent.Config,
	intrusiveMode bool,
	configBaseDir string,
) (*Networker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		// This should never happen, as there is a check for it in the
		// machine agent.
		return nil, fmt.Errorf("expected names.MachineTag, got %T", agentConfig.Tag())
	}
	nw := &Networker{
		st:            st,
		tag:           tag,
		intrusiveMode: intrusiveMode,
		configBaseDir: configBaseDir,
		configFiles:   make(map[string]*configFile),
		interfaceInfo: make(map[string]network.InterfaceInfo),
		interfaces:    make(map[string]net.Interface),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &nw.catacomb,
		Work: nw.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return nw, nil
}
Exemplo n.º 6
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker {
	if os.HostOS() == os.Windows {
		return worker.NewNoOpWorker()
	}
	kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)}
	return worker.NewNotifyWorker(kw)
}
Exemplo n.º 7
0
// newNetworker creates a Networker worker with the specified arguments.
func newNetworker(
	st *apinetworker.State,
	agentConfig agent.Config,
	configBasePath string,
	canWriteNetworkConfig bool,
) (*Networker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		// This should never happen, as there is a check for it in the
		// machine agent.
		return nil, fmt.Errorf("expected names.MachineTag, got %T", agentConfig.Tag())
	}
	nw := &Networker{
		st:                    st,
		tag:                   tag,
		configBasePath:        configBasePath,
		canWriteNetworkConfig: canWriteNetworkConfig,
		configFiles:           make(map[string]*configFile),
		networkInfo:           make(map[string]network.Info),
		interfaces:            make(map[string]net.Interface),
	}
	go func() {
		defer nw.tomb.Done()
		nw.tomb.Kill(nw.loop())
	}()
	return nw, nil
}
Exemplo n.º 8
0
func (s *upgradeSuite) attemptRestrictedAPIAsUser(c *gc.C, conf agent.Config) error {
	info, ok := conf.APIInfo()
	c.Assert(ok, jc.IsTrue)
	info.Tag = s.AdminUserTag(c)
	info.Password = "******"
	info.Nonce = ""

	apiState, err := api.Open(info, upgradeTestDialOpts)
	if err != nil {
		// If space discovery is in progress we'll get an error here
		// and need to retry.
		return err
	}
	defer apiState.Close()

	// This call should always work, but might fail if the apiserver
	// is restarting. If it fails just return the error so retries
	// can continue.
	err = apiState.APICall("Client", 1, "", "FullStatus", nil, new(params.FullStatus))
	if err != nil {
		return errors.Annotate(err, "FullStatus call")
	}

	// this call should only work if API is not restricted
	err = apiState.APICall("Client", 1, "", "WatchAll", nil, nil)
	return errors.Annotate(err, "WatchAll call")
}
Exemplo n.º 9
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker {
	if version.Current.OS == version.Windows {
		return worker.NewNoOpWorker()
	}
	kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)}
	return worker.NewNotifyWorker(kw)
}
Exemplo n.º 10
0
// runUpgrades runs the upgrade operations for each job type and updates the updatedToVersion on success.
func (a *MachineAgent) runUpgrades(
	st *state.State,
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) error {
	from := version.Current
	from.Number = agentConfig.UpgradedToVersion()
	if from == version.Current {
		logger.Infof("upgrade to %v already completed.", version.Current)
		return nil
	}
	isMaster, err := a.isMaster(st, agentConfig)
	if err != nil {
		return errors.Trace(err)
	}
	err = a.ChangeConfig(func(agentConfig agent.ConfigSetter) error {
		var upgradeErr error
		a.setMachineStatus(apiState, params.StatusStarted,
			fmt.Sprintf("upgrading to %v", version.Current))
		context := upgrades.NewContext(agentConfig, apiState, st)
		for _, job := range jobs {
			target := upgradeTarget(job, isMaster)
			if target == "" {
				continue
			}
			logger.Infof("starting upgrade from %v to %v for %v %q", from, version.Current, target, a.Tag())

			attempts := getUpgradeRetryStrategy()
			for attempt := attempts.Start(); attempt.Next(); {
				upgradeErr = upgradesPerformUpgrade(from.Number, target, context)
				if upgradeErr == nil {
					break
				} else {
					retryText := "will retry"
					if !attempt.HasNext() {
						retryText = "giving up"
					}
					logger.Errorf("upgrade from %v to %v for %v %q failed (%s): %v",
						from, version.Current, target, a.Tag(), retryText, upgradeErr)
					a.setMachineStatus(apiState, params.StatusError,
						fmt.Sprintf("upgrade to %v failed (%s): %v", version.Current, retryText, upgradeErr))
				}
			}
		}
		if upgradeErr != nil {
			return upgradeErr
		}
		agentConfig.SetUpgradedToVersion(version.Current.Number)
		return nil
	})
	if err != nil {
		logger.Errorf("upgrade to %v failed: %v", version.Current, err)
		return &fatalError{err.Error()}
	}

	logger.Infof("upgrade to %v completed successfully.", version.Current)
	a.setMachineStatus(apiState, params.StatusStarted, "")
	return nil
}
Exemplo n.º 11
0
// migrateCharmStorage copies uploaded charms from provider storage
// to environment storage, and then adds the storage path into the
// charm's document in state.
func migrateCharmStorage(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating charms to environment storage")
	charms, err := st.AllCharms()
	if err != nil {
		return err
	}
	storage := storage.NewStorage(st.EnvironUUID(), st.MongoSession())

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	fetchCharmArchive := fetchCharmArchive
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		fetchCharmArchive = localstorage{storageDir}.fetchCharmArchive
	}

	storagePaths := make(map[*charm.URL]string)
	for _, ch := range charms {
		if ch.IsPlaceholder() {
			logger.Debugf("skipping %s, placeholder charm", ch.URL())
			continue
		}
		if !ch.IsUploaded() {
			logger.Debugf("skipping %s, not uploaded to provider storage", ch.URL())
			continue
		}
		if charmStoragePath(ch) != "" {
			logger.Debugf("skipping %s, already in environment storage", ch.URL())
			continue
		}
		url := charmBundleURL(ch)
		if url == nil {
			logger.Debugf("skipping %s, has no bundle URL", ch.URL())
			continue
		}
		uuid, err := utils.NewUUID()
		if err != nil {
			return err
		}
		data, err := fetchCharmArchive(url)
		if err != nil {
			return err
		}

		curl := ch.URL()
		storagePath := fmt.Sprintf("charms/%s-%s", curl, uuid)
		logger.Debugf("uploading %s to %q in environment storage", curl, storagePath)
		err = storage.Put(storagePath, bytes.NewReader(data), int64(len(data)))
		if err != nil {
			return errors.Annotatef(err, "failed to upload %s to storage", curl)
		}
		storagePaths[curl] = storagePath
	}

	return stateAddCharmStoragePaths(st, storagePaths)
}
Exemplo n.º 12
0
func canLoginToAPIAsMachine(c *gc.C, fromConf, toConf agent.Config) bool {
	info := fromConf.APIInfo()
	info.Addrs = toConf.APIInfo().Addrs
	apiState, err := api.Open(info, upgradeTestDialOpts)
	if apiState != nil {
		apiState.Close()
	}
	return apiState != nil && err == nil
}
Exemplo n.º 13
0
// OpenAPIState opens the API using the given information. The agent's
// password is changed if the fallback password was used to connect to
// the API.
func OpenAPIState(agentConfig agent.Config, a Agent) (_ *api.State, _ *apiagent.Entity, outErr error) {
	info := agentConfig.APIInfo()
	st, usedOldPassword, err := openAPIStateUsingInfo(info, a, agentConfig.OldPassword())
	if err != nil {
		return nil, nil, err
	}
	defer func() {
		if outErr != nil && st != nil {
			st.Close()
		}
	}()

	entity, err := st.Agent().Entity(a.Tag())
	if err == nil && entity.Life() == params.Dead {
		logger.Errorf("agent terminating - entity %q is dead", a.Tag())
		return nil, nil, worker.ErrTerminateAgent
	}
	if params.IsCodeUnauthorized(err) {
		logger.Errorf("agent terminating due to error returned during entity lookup: %v", err)
		return nil, nil, worker.ErrTerminateAgent
	}
	if err != nil {
		return nil, nil, err
	}

	if !usedOldPassword {
		// Call set password with the current password.  If we've recently
		// become a state server, this will fix up our credentials in mongo.
		if err := entity.SetPassword(info.Password); err != nil {
			return nil, nil, errors.Annotate(err, "can't reset agent password")
		}
	} else {
		// We succeeded in connecting with the fallback
		// password, so we need to create a new password
		// for the future.
		newPassword, err := utils.RandomPassword()
		if err != nil {
			return nil, nil, err
		}
		err = setAgentPassword(newPassword, info.Password, a, entity)
		if err != nil {
			return nil, nil, err
		}

		// Reconnect to the API with the new password.
		st.Close()
		info.Password = newPassword
		st, err = apiOpen(info, api.DialOpts{})
		if err != nil {
			return nil, nil, err
		}
	}

	return st, entity, err
}
Exemplo n.º 14
0
func waitForUpgradeToFinish(c *gc.C, conf agent.Config) {
	success := false
	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
		diskConf := readConfigFromDisk(c, conf.DataDir(), conf.Tag())
		success = diskConf.UpgradedToVersion() == version.Current
		if success {
			break
		}
	}
	c.Assert(success, jc.IsTrue)
}
Exemplo n.º 15
0
// NewUpgrader returns a new upgrader worker. It watches changes to the
// current version of the current agent (with the given tag) and tries to
// download the tools for any new version into the given data directory.  If
// an upgrade is needed, the worker will exit with an UpgradeReadyError
// holding details of the requested upgrade. The tools will have been
// downloaded and unpacked.
func NewUpgrader(st *upgrader.State, agentConfig agent.Config) *Upgrader {
	u := &Upgrader{
		st:      st,
		dataDir: agentConfig.DataDir(),
		tag:     agentConfig.Tag(),
	}
	go func() {
		defer u.tomb.Done()
		u.tomb.Kill(u.loop())
	}()
	return u
}
Exemplo n.º 16
0
func NewReboot(st *reboot.State, agentConfig agent.Config, machineLock *fslock.Lock) (worker.Worker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.Errorf("Expected names.MachineTag, got %T: %v", agentConfig.Tag(), agentConfig.Tag())
	}
	r := &Reboot{
		st:          st,
		tag:         tag,
		machineLock: machineLock,
	}
	return worker.NewNotifyWorker(r), nil
}
Exemplo n.º 17
0
func canLoginToAPIAsMachine(c *gc.C, fromConf, toConf agent.Config) bool {
	fromInfo, ok := fromConf.APIInfo()
	c.Assert(ok, jc.IsTrue)
	toInfo, ok := toConf.APIInfo()
	c.Assert(ok, jc.IsTrue)
	fromInfo.Addrs = toInfo.Addrs
	apiState, err := api.Open(fromInfo, upgradeTestDialOpts)
	if apiState != nil {
		apiState.Close()
	}
	return apiState != nil && err == nil
}
Exemplo n.º 18
0
Arquivo: main.go Projeto: zhouqt/juju
// setupLogging redirects logging to rolled log files.
//
// NOTE: do not use this in the bootstrap agent, or
// if you do, change the bootstrap error reporting.
func setupAgentLogging(conf agent.Config) error {
	filename := filepath.Join(conf.LogDir(), conf.Tag().String()+".log")

	log := &lumberjack.Logger{
		Filename:   filename,
		MaxSize:    300, // megabytes
		MaxBackups: 2,
	}

	writer := loggo.NewSimpleWriter(log, &loggo.DefaultFormatter{})
	_, err := loggo.ReplaceDefaultWriter(writer)
	return err
}
Exemplo n.º 19
0
// NewMachineEnvironmentWorker returns a worker.Worker that uses the notify
// watcher returned from the setup.
func NewMachineEnvironmentWorker(api *environment.Facade, agentConfig agent.Config) worker.Worker {
	// We don't write out system files for the local provider on machine zero
	// as that is the host machine.
	writeSystemFiles := (agentConfig.Tag() != names.NewMachineTag("0").String() ||
		agentConfig.Value(agent.ProviderType) != provider.Local)
	logger.Debugf("write system files: %v", writeSystemFiles)
	envWorker := &MachineEnvironmentWorker{
		api:              api,
		writeSystemFiles: writeSystemFiles,
		first:            true,
	}
	return worker.NewNotifyWorker(envWorker)
}
Exemplo n.º 20
0
// NewNetworker returns a Worker that handles machine networking
// configuration. If there is no /etc/network/interfaces file, an
// error is returned.
func NewNetworker(st *apinetworker.State, agentConfig agent.Config) (worker.Worker, error) {
	nw := &networker{
		st:  st,
		tag: agentConfig.Tag().String(),
	}
	// Verify we have /etc/network/interfaces first, otherwise bail out.
	if !CanStart() {
		err := fmt.Errorf("missing %q config file", configFileName)
		logger.Infof("not starting worker: %v", err)
		return nil, err
	}
	return worker.NewNotifyWorker(nw), nil
}
Exemplo n.º 21
0
// upgradeWorker runs the required upgrade operations to upgrade to the current Juju version.
func (a *MachineAgent) upgradeWorker(
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		select {
		case <-a.upgradeComplete:
			// Our work is already done (we're probably being restarted
			// because the API connection has gone down), so do nothing.
			<-stop
			return nil
		default:
		}
		// If the machine agent is a state server, flag that state
		// needs to be opened before running upgrade steps
		needsState := false
		for _, job := range jobs {
			if job == params.JobManageEnviron {
				needsState = true
			}
		}
		// We need a *state.State for upgrades. We open it independently
		// of StateWorker, because we have no guarantees about when
		// and how often StateWorker might run.
		var st *state.State
		if needsState {
			if err := a.ensureMongoServer(agentConfig); err != nil {
				return err
			}
			var err error
			info, ok := agentConfig.StateInfo()
			if !ok {
				return fmt.Errorf("no state info available")
			}
			st, err = state.Open(info, mongo.DialOpts{}, environs.NewStatePolicy())
			if err != nil {
				return err
			}
			defer st.Close()
		}
		err := a.runUpgrades(st, apiState, jobs, agentConfig)
		if err != nil {
			return err
		}
		logger.Infof("upgrade to %v completed.", version.Current)
		close(a.upgradeComplete)
		<-stop
		return nil
	})
}
Exemplo n.º 22
0
func (c *BootstrapCommand) startMongo(addrs []network.Address, agentConfig agent.Config) error {
	logger.Debugf("starting mongo")

	info, ok := agentConfig.MongoInfo()
	if !ok {
		return fmt.Errorf("no state info available")
	}
	// When bootstrapping, we need to allow enough time for mongo
	// to start as there's no retry loop in place.
	// 5 minutes should suffice.
	mongoDialOpts := mongo.DialOpts{Timeout: 5 * time.Minute}
	dialInfo, err := mongo.DialInfo(info.Info, mongoDialOpts)
	if err != nil {
		return err
	}
	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("agent config has no state serving info")
	}
	// Use localhost to dial the mongo server, because it's running in
	// auth mode and will refuse to perform any operations unless
	// we dial that address.
	dialInfo.Addrs = []string{
		net.JoinHostPort("127.0.0.1", fmt.Sprint(servingInfo.StatePort)),
	}

	logger.Debugf("calling ensureMongoServer")
	ensureServerParams, err := cmdutil.NewEnsureServerParams(agentConfig)
	if err != nil {
		return err
	}
	err = cmdutil.EnsureMongoServer(ensureServerParams)
	if err != nil {
		return err
	}

	peerAddr := mongo.SelectPeerAddress(addrs)
	if peerAddr == "" {
		return fmt.Errorf("no appropriate peer address found in %q", addrs)
	}
	peerHostPort := net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort))

	if err := initiateMongoServer(peergrouper.InitiateMongoParams{
		DialInfo:       dialInfo,
		MemberHostPort: peerHostPort,
	}); err != nil {
		return err
	}
	logger.Infof("started mongo")
	return nil
}
Exemplo n.º 23
0
Arquivo: restore.go Projeto: bac/juju
// newDialInfo returns mgo.DialInfo with the given address using the minimal
// possible setup.
func newDialInfo(privateAddr string, conf agent.Config) (*mgo.DialInfo, error) {
	dialOpts := mongo.DialOpts{Direct: true}
	ssi, ok := conf.StateServingInfo()
	if !ok {
		return nil, errors.Errorf("cannot get state serving info to dial")
	}
	info := mongo.Info{
		Addrs:  []string{net.JoinHostPort(privateAddr, strconv.Itoa(ssi.StatePort))},
		CACert: conf.CACert(),
	}
	dialInfo, err := mongo.DialInfo(info, dialOpts)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce a dial info")
	}
	oldPassword := conf.OldPassword()
	if oldPassword != "" {
		dialInfo.Username = "******"
		dialInfo.Password = conf.OldPassword()
	} else {
		dialInfo.Username, dialInfo.Password, err = tagUserCredentials(conf)
		if err != nil {
			return nil, errors.Trace(err)
		}
	}
	return dialInfo, nil
}
Exemplo n.º 24
0
func openState(agentConfig agent.Config, dialOpts mongo.DialOpts) (_ *state.State, _ *state.Machine, err error) {
	info, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, nil, fmt.Errorf("no state info available")
	}
	st, err := state.Open(agentConfig.Model(), info, dialOpts, environs.NewStatePolicy())
	if err != nil {
		return nil, nil, err
	}
	defer func() {
		if err != nil {
			st.Close()
		}
	}()
	m0, err := st.FindEntity(agentConfig.Tag())
	if err != nil {
		if errors.IsNotFound(err) {
			err = worker.ErrTerminateAgent
		}
		return nil, nil, err
	}
	m := m0.(*state.Machine)
	if m.Life() == state.Dead {
		return nil, nil, worker.ErrTerminateAgent
	}
	// Check the machine nonce as provisioned matches the agent.Conf value.
	if !m.CheckProvisioned(agentConfig.Nonce()) {
		// The agent is running on a different machine to the one it
		// should be according to state. It must stop immediately.
		logger.Errorf("running machine %v agent on inappropriate instance", m)
		return nil, nil, worker.ErrTerminateAgent
	}
	return st, m, nil
}
Exemplo n.º 25
0
// newEnsureServerParams creates an EnsureServerParams from an agent configuration.
func newEnsureServerParams(agentConfig agent.Config) (mongo.EnsureServerParams, error) {
	// If oplog size is specified in the agent configuration, use that.
	// Otherwise leave the default zero value to indicate to EnsureServer
	// that it should calculate the size.
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return mongo.EnsureServerParams{}, fmt.Errorf("invalid oplog size: %q", oplogSizeString)
		}
	}

	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return mongo.EnsureServerParams{}, fmt.Errorf("agent config has no state serving info")
	}

	params := mongo.EnsureServerParams{
		StateServingInfo: servingInfo,
		DataDir:          agentConfig.DataDir(),
		Namespace:        agentConfig.Value(agent.Namespace),
		OplogSize:        oplogSize,
	}
	return params, nil
}
Exemplo n.º 26
0
// PreUpgradeSteps runs various checks and prepares for performing an upgrade.
// If any check fails, an error is returned which aborts the upgrade.
func PreUpgradeSteps(st *state.State, agentConf agent.Config, isController, isMaster bool) error {
	if err := checkDiskSpace(agentConf.DataDir()); err != nil {
		return errors.Trace(err)
	}
	if isController {
		// Update distro info in case the new Juju controller version
		// is aware of new supported series. We'll keep going if this
		// fails, and the user can manually update it if they need to.
		logger.Infof("updating distro-info")
		err := updateDistroInfo()
		return errors.Annotate(err, "failed to update distro-info")
	}
	return nil
}
Exemplo n.º 27
0
func NewReboot(st reboot.State, agentConfig agent.Config, machineLock *fslock.Lock) (worker.Worker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.Errorf("Expected names.MachineTag, got %T: %v", agentConfig.Tag(), agentConfig.Tag())
	}
	r := &Reboot{
		st:          st,
		tag:         tag,
		machineLock: machineLock,
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: r,
	})
	return w, errors.Trace(err)
}
Exemplo n.º 28
0
// updateSupportedContainers records in state that a machine can run the specified containers.
// It starts a watcher and when a container of a given type is first added to the machine,
// the watcher is killed, the machine is set up to be able to start containers of the given type,
// and a suitable provisioner is started.
func (a *MachineAgent) updateSupportedContainers(
	runner worker.Runner,
	st *api.State,
	machineTag string,
	containers []instance.ContainerType,
	agentConfig agent.Config,
) error {
	pr := st.Provisioner()
	tag, err := names.ParseMachineTag(machineTag)
	if err != nil {
		return err
	}
	machine, err := pr.Machine(tag)
	if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead {
		return worker.ErrTerminateAgent
	}
	if err != nil {
		return errors.Annotatef(err, "cannot load machine %s from state", tag)
	}
	if len(containers) == 0 {
		if err := machine.SupportsNoContainers(); err != nil {
			return errors.Annotatef(err, "clearing supported containers for %s", tag)
		}
		return nil
	}
	if err := machine.SetSupportedContainers(containers...); err != nil {
		return errors.Annotatef(err, "setting supported containers for %s", tag)
	}
	initLock, err := hookExecutionLock(agentConfig.DataDir())
	if err != nil {
		return err
	}
	// Start the watcher to fire when a container is first requested on the machine.
	watcherName := fmt.Sprintf("%s-container-watcher", machine.Id())
	handler := provisioner.NewContainerSetupHandler(
		runner,
		watcherName,
		containers,
		machine,
		pr,
		agentConfig,
		initLock,
	)
	a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) {
		return worker.NewStringsWorker(handler), nil
	})
	return nil
}
Exemplo n.º 29
0
// updateSupportedContainers records in state that a machine can run the specified containers.
// It starts a watcher and when a container of a given type is first added to the machine,
// the watcher is killed, the machine is set up to be able to start containers of the given type,
// and a suitable provisioner is started.
func (a *MachineAgent) updateSupportedContainers(
	runner worker.Runner,
	st api.Connection,
	containers []instance.ContainerType,
	agentConfig agent.Config,
) error {
	pr := apiprovisioner.NewState(st)
	tag := agentConfig.Tag().(names.MachineTag)
	machine, err := pr.Machine(tag)
	if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead {
		return worker.ErrTerminateAgent
	}
	if err != nil {
		return errors.Annotatef(err, "cannot load machine %s from state", tag)
	}
	if len(containers) == 0 {
		if err := machine.SupportsNoContainers(); err != nil {
			return errors.Annotatef(err, "clearing supported containers for %s", tag)
		}
		return nil
	}
	if err := machine.SetSupportedContainers(containers...); err != nil {
		return errors.Annotatef(err, "setting supported containers for %s", tag)
	}
	// Start the watcher to fire when a container is first requested on the machine.
	watcherName := fmt.Sprintf("%s-container-watcher", machine.Id())
	params := provisioner.ContainerSetupParams{
		Runner:              runner,
		WorkerName:          watcherName,
		SupportedContainers: containers,
		Machine:             machine,
		Provisioner:         pr,
		Config:              agentConfig,
		InitLockName:        agent.MachineLockName,
	}
	handler := provisioner.NewContainerSetupHandler(params)
	a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) {
		w, err := watcher.NewStringsWorker(watcher.StringsConfig{
			Handler: handler,
		})
		if err != nil {
			return nil, errors.Annotatef(err, "cannot start %s worker", watcherName)
		}
		return w, nil
	})
	return nil
}
Exemplo n.º 30
0
// NewEnsureServerParams creates an EnsureServerParams from an agent
// configuration.
func NewEnsureServerParams(agentConfig agent.Config) (mongo.EnsureServerParams, error) {
	// If oplog size is specified in the agent configuration, use that.
	// Otherwise leave the default zero value to indicate to EnsureServer
	// that it should calculate the size.
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return mongo.EnsureServerParams{}, fmt.Errorf("invalid oplog size: %q", oplogSizeString)
		}
	}

	// If numa ctl preference is specified in the agent configuration, use that.
	// Otherwise leave the default false value to indicate to EnsureServer
	// that numactl should not be used.
	var numaCtlPolicy bool
	if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" {
		var err error
		if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil {
			return mongo.EnsureServerParams{}, fmt.Errorf("invalid numactl preference: %q", numaCtlString)
		}
	}

	si, ok := agentConfig.StateServingInfo()
	if !ok {
		return mongo.EnsureServerParams{}, fmt.Errorf("agent config has no state serving info")
	}

	params := mongo.EnsureServerParams{
		APIPort:        si.APIPort,
		StatePort:      si.StatePort,
		Cert:           si.Cert,
		PrivateKey:     si.PrivateKey,
		CAPrivateKey:   si.CAPrivateKey,
		SharedSecret:   si.SharedSecret,
		SystemIdentity: si.SystemIdentity,

		DataDir:              agentConfig.DataDir(),
		OplogSize:            oplogSize,
		SetNumaControlPolicy: numaCtlPolicy,

		Version: agentConfig.MongoVersion(),
	}
	return params, nil
}