예제 #1
0
파일: machine.go 프로젝트: jameinel/core
// runUpgrades runs the upgrade operations for each job type and updates the updatedToVersion on success.
func (a *MachineAgent) runUpgrades(
	st *state.State,
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) error {
	from := version.Current
	from.Number = agentConfig.UpgradedToVersion()
	if from == version.Current {
		logger.Infof("upgrade to %v already completed.", version.Current)
		return nil
	}
	var err error
	writeErr := a.ChangeConfig(func(agentConfig agent.ConfigSetter) {
		context := upgrades.NewContext(agentConfig, apiState, st)
		for _, job := range jobs {
			target := upgradeTarget(job)
			if target == "" {
				continue
			}
			logger.Infof("starting upgrade from %v to %v for %v %q", from, version.Current, target, a.Tag())
			if err = upgrades.PerformUpgrade(from.Number, target, context); err != nil {
				err = fmt.Errorf("cannot perform upgrade from %v to %v for %v %q: %v", from, version.Current, target, a.Tag(), err)
				return
			}
		}
		agentConfig.SetUpgradedToVersion(version.Current.Number)
	})
	if writeErr != nil {
		return fmt.Errorf("cannot write updated agent configuration: %v", writeErr)
	}
	return nil
}
예제 #2
0
func (s *UpgradeSuite) assertUpgradeSteps(c *gc.C, job state.MachineJob) {
	s.PatchValue(&version.Current, s.upgradeToVersion)
	err := s.State.SetEnvironAgentVersion(s.upgradeToVersion.Number)
	c.Assert(err, gc.IsNil)

	oldVersion := s.upgradeToVersion
	oldVersion.Major = 1
	oldVersion.Minor = 16
	var oldConfig agent.Config
	s.machine, oldConfig, _ = s.primeAgent(c, oldVersion, job)

	a := s.newAgent(c, s.machine)
	go func() { c.Check(a.Run(nil), gc.IsNil) }()
	defer func() { c.Check(a.Stop(), gc.IsNil) }()

	// Wait for upgrade steps to run.
	success := false
	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
		conf, err := agent.ReadConfig(agent.ConfigPath(oldConfig.DataDir(), s.machine.Tag()))
		c.Assert(err, gc.IsNil)
		success = conf.UpgradedToVersion() == s.upgradeToVersion.Number
		if success {
			break
		}
	}
	// Upgrade worker has completed ok.
	c.Assert(success, jc.IsTrue)
}
예제 #3
0
파일: upgrader.go 프로젝트: jameinel/core
// NewUpgrader returns a new upgrader worker. It watches changes to the
// current version of the current agent (with the given tag) and tries to
// download the tools for any new version into the given data directory.  If
// an upgrade is needed, the worker will exit with an UpgradeReadyError
// holding details of the requested upgrade. The tools will have been
// downloaded and unpacked.
func NewUpgrader(st *upgrader.State, agentConfig agent.Config) *Upgrader {
	u := &Upgrader{
		st:      st,
		dataDir: agentConfig.DataDir(),
		tag:     agentConfig.Tag(),
	}
	go func() {
		defer u.tomb.Done()
		u.tomb.Kill(u.loop())
	}()
	return u
}
예제 #4
0
// NewMachineEnvironmentWorker returns a worker.Worker that uses the notify
// watcher returned from the setup.
func NewMachineEnvironmentWorker(api *environment.Facade, agentConfig agent.Config) worker.Worker {
	// We don't write out system files for the local provider on machine zero
	// as that is the host machine.
	writeSystemFiles := (agentConfig.Tag() != names.MachineTag("0") ||
		agentConfig.Value(agent.ProviderType) != provider.Local)
	logger.Debugf("write system files: %v", writeSystemFiles)
	envWorker := &MachineEnvironmentWorker{
		api:              api,
		writeSystemFiles: writeSystemFiles,
		first:            true,
	}
	return worker.NewNotifyWorker(envWorker)
}
예제 #5
0
파일: machine.go 프로젝트: jameinel/core
// upgradeWorker runs the required upgrade operations to upgrade to the current Juju version.
func (a *MachineAgent) upgradeWorker(
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		select {
		case <-a.upgradeComplete:
			// Our work is already done (we're probably being restarted
			// because the API connection has gone down), so do nothing.
			<-stop
			return nil
		default:
		}
		// If the machine agent is a state server, wait until state is opened.
		needsState := false
		for _, job := range jobs {
			if job == params.JobManageEnviron {
				needsState = true
			}
		}
		// We need a *state.State for upgrades. We open it independently
		// of StateWorker, because we have no guarantees about when
		// and how often StateWorker might run.
		var st *state.State
		if needsState {
			var err error
			info, ok := agentConfig.StateInfo()
			if !ok {
				return fmt.Errorf("no state info available")
			}
			st, err = state.Open(info, state.DialOpts{}, environs.NewStatePolicy())
			if err != nil {
				return err
			}
			defer st.Close()
		}
		err := a.runUpgrades(st, apiState, jobs, agentConfig)
		if err != nil {
			return err
		}
		logger.Infof("upgrade to %v completed.", version.Current)
		close(a.upgradeComplete)
		<-stop
		return nil
	})
}
예제 #6
0
파일: machine.go 프로젝트: jameinel/core
func (a *MachineAgent) uninstallAgent(agentConfig agent.Config) error {
	var errors []error
	agentServiceName := agentConfig.Value(agent.AgentServiceName)
	if agentServiceName == "" {
		// For backwards compatibility, handle lack of AgentServiceName.
		agentServiceName = os.Getenv("UPSTART_JOB")
	}
	if agentServiceName != "" {
		if err := upstart.NewService(agentServiceName).Remove(); err != nil {
			errors = append(errors, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err))
		}
	}
	// Remove the juju-run symlink.
	if err := os.Remove(jujuRun); err != nil && !os.IsNotExist(err) {
		errors = append(errors, err)
	}

	namespace := agentConfig.Value(agent.Namespace)
	if err := mongo.RemoveService(namespace); err != nil {
		errors = append(errors, fmt.Errorf("cannot stop/remove mongo service with namespace %q: %v", namespace, err))
	}
	if err := os.RemoveAll(agentConfig.DataDir()); err != nil {
		errors = append(errors, err)
	}
	if len(errors) == 0 {
		return nil
	}
	return fmt.Errorf("uninstall failed: %v", errors)
}
예제 #7
0
파일: machine.go 프로젝트: jameinel/core
func openState(agentConfig agent.Config) (_ *state.State, _ *state.Machine, err error) {
	info, ok := agentConfig.StateInfo()
	if !ok {
		return nil, nil, fmt.Errorf("no state info available")
	}
	st, err := state.Open(info, state.DialOpts{}, environs.NewStatePolicy())
	if err != nil {
		return nil, nil, err
	}
	defer func() {
		if err != nil {
			st.Close()
		}
	}()
	m0, err := st.FindEntity(agentConfig.Tag())
	if err != nil {
		if errors.IsNotFound(err) {
			err = worker.ErrTerminateAgent
		}
		return nil, nil, err
	}
	m := m0.(*state.Machine)
	if m.Life() == state.Dead {
		return nil, nil, worker.ErrTerminateAgent
	}
	// Check the machine nonce as provisioned matches the agent.Conf value.
	if !m.CheckProvisioned(agentConfig.Nonce()) {
		// The agent is running on a different machine to the one it
		// should be according to state. It must stop immediately.
		logger.Errorf("running machine %v agent on inappropriate instance", m)
		return nil, nil, worker.ErrTerminateAgent
	}
	return st, m, nil
}
예제 #8
0
파일: bootstrap.go 프로젝트: jameinel/core
func (c *BootstrapCommand) startMongo(addrs []instance.Address, agentConfig agent.Config) error {
	logger.Debugf("starting mongo")

	info, ok := agentConfig.StateInfo()
	if !ok {
		return fmt.Errorf("no state info available")
	}
	dialInfo, err := state.DialInfo(info, state.DefaultDialOpts())
	if err != nil {
		return err
	}
	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("agent config has no state serving info")
	}
	// Use localhost to dial the mongo server, because it's running in
	// auth mode and will refuse to perform any operations unless
	// we dial that address.
	dialInfo.Addrs = []string{
		net.JoinHostPort("127.0.0.1", fmt.Sprint(servingInfo.StatePort)),
	}

	logger.Debugf("calling ensureMongoServer")
	withHA := shouldEnableHA(agentConfig)
	err = ensureMongoServer(
		agentConfig.DataDir(),
		agentConfig.Value(agent.Namespace),
		servingInfo,
		withHA,
	)
	if err != nil {
		return err
	}
	// If we are not doing HA, there is no need to set up replica set.
	if !withHA {
		return nil
	}

	peerAddr := mongo.SelectPeerAddress(addrs)
	if peerAddr == "" {
		return fmt.Errorf("no appropriate peer address found in %q", addrs)
	}
	peerHostPort := net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort))

	return maybeInitiateMongoServer(peergrouper.InitiateMongoParams{
		DialInfo:       dialInfo,
		MemberHostPort: peerHostPort,
	})
}
예제 #9
0
파일: machine.go 프로젝트: jameinel/core
func (a *MachineAgent) ensureMongoAdminUser(agentConfig agent.Config) (added bool, err error) {
	stateInfo, ok1 := agentConfig.StateInfo()
	servingInfo, ok2 := agentConfig.StateServingInfo()
	if !ok1 || !ok2 {
		return false, fmt.Errorf("no state serving info configuration")
	}
	dialInfo, err := state.DialInfo(stateInfo, state.DefaultDialOpts())
	if err != nil {
		return false, err
	}
	if len(dialInfo.Addrs) > 1 {
		logger.Infof("more than one state server; admin user must exist")
		return false, nil
	}
	return ensureMongoAdminUser(mongo.EnsureAdminUserParams{
		DialInfo:  dialInfo,
		Namespace: agentConfig.Value(agent.Namespace),
		DataDir:   agentConfig.DataDir(),
		Port:      servingInfo.StatePort,
		User:      stateInfo.Tag,
		Password:  stateInfo.Password,
	})
}
예제 #10
0
파일: worker.go 프로젝트: jameinel/core
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker {
	kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag()}
	return worker.NewNotifyWorker(kw)
}
예제 #11
0
파일: config.go 프로젝트: jameinel/core
func loadConfig(agentConfig agent.Config) (*config, error) {
	config := &config{
		storageDir:  agentConfig.Value(StorageDir),
		storageAddr: agentConfig.Value(StorageAddr),
		authkey:     agentConfig.Value(StorageAuthKey),
	}

	caCertPEM := agentConfig.Value(StorageCACert)
	if len(caCertPEM) > 0 {
		config.caCertPEM = caCertPEM
	}

	caKeyPEM := agentConfig.Value(StorageCAKey)
	if len(caKeyPEM) > 0 {
		config.caKeyPEM = caKeyPEM
	}

	hostnames := agentConfig.Value(StorageHostnames)
	if len(hostnames) > 0 {
		err := goyaml.Unmarshal([]byte(hostnames), &config.hostnames)
		if err != nil {
			return nil, err
		}
	}

	return config, nil
}
예제 #12
0
파일: machine.go 프로젝트: jameinel/core
// shouldEnableHA reports whether HA should be enabled.
//
// Eventually this should always be true, and ideally
// it should be true before 1.20 is released or we'll
// have more upgrade scenarios on our hands.
func shouldEnableHA(agentConfig agent.Config) bool {
	providerType := agentConfig.Value(agent.ProviderType)
	return providerType != provider.Local
}
예제 #13
0
파일: machine.go 프로젝트: jameinel/core
// ensureMongoServer ensures that mongo is installed and running,
// and ready for opening a state connection.
func (a *MachineAgent) ensureMongoServer(agentConfig agent.Config) error {
	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("state worker was started with no state serving info")
	}
	namespace := agentConfig.Value(agent.Namespace)
	withHA := shouldEnableHA(agentConfig)

	// When upgrading from a pre-HA-capable environment,
	// we must add machine-0 to the admin database and
	// initiate its replicaset.
	//
	// TODO(axw) remove this when we no longer need
	// to upgrade from pre-HA-capable environments.
	var shouldInitiateMongoServer bool
	var addrs []instance.Address
	if isPreHAVersion(agentConfig.UpgradedToVersion()) {
		_, err := a.ensureMongoAdminUser(agentConfig)
		if err != nil {
			return err
		}
		if servingInfo.SharedSecret == "" {
			servingInfo.SharedSecret, err = mongo.GenerateSharedSecret()
			if err != nil {
				return err
			}
			if err = a.ChangeConfig(func(config agent.ConfigSetter) {
				config.SetStateServingInfo(servingInfo)
			}); err != nil {
				return err
			}
			agentConfig = a.CurrentConfig()
		}
		st, m, err := openState(agentConfig)
		if err != nil {
			return err
		}
		if err := st.SetStateServingInfo(servingInfo); err != nil {
			st.Close()
			return fmt.Errorf("cannot set state serving info: %v", err)
		}
		st.Close()
		addrs = m.Addresses()
		shouldInitiateMongoServer = withHA
	}

	// ensureMongoServer installs/upgrades the upstart config as necessary.
	if err := ensureMongoServer(
		agentConfig.DataDir(),
		namespace,
		servingInfo,
		withHA,
	); err != nil {
		return err
	}
	if !shouldInitiateMongoServer {
		return nil
	}

	// Initiate the replicaset for upgraded environments.
	//
	// TODO(axw) remove this when we no longer need
	// to upgrade from pre-HA-capable environments.
	stateInfo, ok := agentConfig.StateInfo()
	if !ok {
		return fmt.Errorf("state worker was started with no state serving info")
	}
	dialInfo, err := state.DialInfo(stateInfo, state.DefaultDialOpts())
	if err != nil {
		return err
	}
	peerAddr := mongo.SelectPeerAddress(addrs)
	if peerAddr == "" {
		return fmt.Errorf("no appropriate peer address found in %q", addrs)
	}
	return maybeInitiateMongoServer(peergrouper.InitiateMongoParams{
		DialInfo:       dialInfo,
		MemberHostPort: net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort)),
		User:           stateInfo.Tag,
		Password:       stateInfo.Password,
	})
}
예제 #14
0
파일: agent.go 프로젝트: jameinel/core
// openAPIState opens the API using the given information, and
// returns the opened state and the api entity with
// the given tag. The given changeConfig function is
// called if the password changes to set the password.
func openAPIState(
	agentConfig agent.Config,
	a Agent,
) (_ *api.State, _ *apiagent.Entity, err error) {
	// We let the API dial fail immediately because the
	// runner's loop outside the caller of openAPIState will
	// keep on retrying. If we block for ages here,
	// then the worker that's calling this cannot
	// be interrupted.
	info := agentConfig.APIInfo()
	st, err := apiOpen(info, api.DialOpts{})
	usedOldPassword := false
	if params.IsCodeUnauthorized(err) {
		// We've perhaps used the wrong password, so
		// try again with the fallback password.
		info := *info
		info.Password = agentConfig.OldPassword()
		usedOldPassword = true
		st, err = apiOpen(&info, api.DialOpts{})
	}
	if err != nil {
		if params.IsCodeNotProvisioned(err) {
			return nil, nil, worker.ErrTerminateAgent
		}
		if params.IsCodeUnauthorized(err) {
			return nil, nil, worker.ErrTerminateAgent
		}
		return nil, nil, err
	}
	defer func() {
		if err != nil {
			st.Close()
		}
	}()
	entity, err := st.Agent().Entity(a.Tag())
	if err == nil && entity.Life() == params.Dead {
		return nil, nil, worker.ErrTerminateAgent
	}
	if err != nil {
		if params.IsCodeUnauthorized(err) {
			return nil, nil, worker.ErrTerminateAgent
		}
		return nil, nil, err
	}
	if usedOldPassword {
		// We succeeded in connecting with the fallback
		// password, so we need to create a new password
		// for the future.

		newPassword, err := utils.RandomPassword()
		if err != nil {
			return nil, nil, err
		}
		// Change the configuration *before* setting the entity
		// password, so that we avoid the possibility that
		// we might successfully change the entity's
		// password but fail to write the configuration,
		// thus locking us out completely.
		if err := a.ChangeConfig(func(c agent.ConfigSetter) {
			c.SetPassword(newPassword)
			c.SetOldPassword(info.Password)
		}); err != nil {
			return nil, nil, err
		}
		if err := entity.SetPassword(newPassword); err != nil {
			return nil, nil, err
		}
	}

	return st, entity, nil
}
예제 #15
0
파일: agent_test.go 프로젝트: jameinel/core
func refreshConfig(c *gc.C, config agent.Config) agent.ConfigSetterWriter {
	config1, err := agent.ReadConfig(agent.ConfigPath(config.DataDir(), config.Tag()))
	c.Assert(err, gc.IsNil)
	return config1
}
예제 #16
0
파일: machiner.go 프로젝트: jameinel/core
// NewMachiner returns a Worker that will wait for the identified machine
// to become Dying and make it Dead; or until the machine becomes Dead by
// other means.
func NewMachiner(st *machiner.State, agentConfig agent.Config) worker.Worker {
	mr := &Machiner{st: st, tag: agentConfig.Tag()}
	return worker.NewNotifyWorker(mr)
}