func (s *UpgradeSuite) assertUpgradeSteps(c *gc.C, job state.MachineJob) { s.agentSuite.PatchValue(&version.Current, s.upgradeToVersion) err := s.State.SetEnvironAgentVersion(s.upgradeToVersion.Number) c.Assert(err, gc.IsNil) oldVersion := s.upgradeToVersion oldVersion.Major = 1 oldVersion.Minor = 16 var oldConfig agent.Config s.machine, oldConfig, _ = s.primeAgent(c, oldVersion, job) a := s.newAgent(c, s.machine) go func() { c.Check(a.Run(nil), gc.IsNil) }() defer func() { c.Check(a.Stop(), gc.IsNil) }() // Wait for upgrade steps to run. success := false for attempt := coretesting.LongAttempt.Start(); attempt.Next(); { conf, err := agent.ReadConfig(agent.ConfigPath(oldConfig.DataDir(), s.machine.Tag())) c.Assert(err, gc.IsNil) success = conf.UpgradedToVersion() == s.upgradeToVersion.Number if success { break } } // Upgrade worker has completed ok. c.Assert(success, jc.IsTrue) }
func (a *MachineAgent) uninstallAgent(agentConfig agent.Config) error { var errors []error agentServiceName := agentConfig.Value(agent.AgentServiceName) if agentServiceName == "" { // For backwards compatibility, handle lack of AgentServiceName. agentServiceName = os.Getenv("UPSTART_JOB") } if agentServiceName != "" { if err := upstart.NewService(agentServiceName).Remove(); err != nil { errors = append(errors, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err)) } } // Remove the juju-run symlink. if err := os.Remove(jujuRun); err != nil && !os.IsNotExist(err) { errors = append(errors, err) } namespace := agentConfig.Value(agent.Namespace) if err := mongo.RemoveService(namespace); err != nil { errors = append(errors, fmt.Errorf("cannot stop/remove mongo service with namespace %q: %v", namespace, err)) } if err := os.RemoveAll(agentConfig.DataDir()); err != nil { errors = append(errors, err) } if len(errors) == 0 { return nil } return fmt.Errorf("uninstall failed: %v", errors) }
func (c *BootstrapCommand) startMongo(addrs []instance.Address, agentConfig agent.Config) error { logger.Debugf("starting mongo") info, ok := agentConfig.StateInfo() if !ok { return fmt.Errorf("no state info available") } // When bootstrapping, we need to allow enough time for mongo // to start as there's no retry loop in place. // 5 minutes should suffice. bootstrapDialOpts := state.DialOpts{Timeout: 5 * time.Minute} dialInfo, err := state.DialInfo(info, bootstrapDialOpts) if err != nil { return err } servingInfo, ok := agentConfig.StateServingInfo() if !ok { return fmt.Errorf("agent config has no state serving info") } // Use localhost to dial the mongo server, because it's running in // auth mode and will refuse to perform any operations unless // we dial that address. dialInfo.Addrs = []string{ net.JoinHostPort("127.0.0.1", fmt.Sprint(servingInfo.StatePort)), } logger.Debugf("calling ensureMongoServer") err = ensureMongoServer( agentConfig.DataDir(), agentConfig.Value(agent.Namespace), servingInfo, ) if err != nil { return err } peerAddr := mongo.SelectPeerAddress(addrs) if peerAddr == "" { return fmt.Errorf("no appropriate peer address found in %q", addrs) } peerHostPort := net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort)) return maybeInitiateMongoServer(peergrouper.InitiateMongoParams{ DialInfo: dialInfo, MemberHostPort: peerHostPort, }) }
// updateSupportedContainers records in state that a machine can run the specified containers. // It starts a watcher and when a container of a given type is first added to the machine, // the watcher is killed, the machine is set up to be able to start containers of the given type, // and a suitable provisioner is started. func (a *MachineAgent) updateSupportedContainers( runner worker.Runner, st *api.State, tag string, containers []instance.ContainerType, agentConfig agent.Config, ) error { pr := st.Provisioner() machine, err := pr.Machine(tag) if err != nil { return fmt.Errorf("%s is not in state: %v", tag, err) } if len(containers) == 0 { if err := machine.SupportsNoContainers(); err != nil { return fmt.Errorf("clearing supported containers for %s: %v", tag, err) } return nil } if err := machine.SetSupportedContainers(containers...); err != nil { return fmt.Errorf("setting supported containers for %s: %v", tag, err) } initLock, err := hookExecutionLock(agentConfig.DataDir()) if err != nil { return err } // Start the watcher to fire when a container is first requested on the machine. watcherName := fmt.Sprintf("%s-container-watcher", machine.Id()) handler := provisioner.NewContainerSetupHandler( runner, watcherName, containers, machine, pr, agentConfig, initLock, ) a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) { return worker.NewStringsWorker(handler), nil }) return nil }
func (a *MachineAgent) ensureMongoAdminUser(agentConfig agent.Config) (added bool, err error) { stateInfo, ok1 := agentConfig.StateInfo() servingInfo, ok2 := agentConfig.StateServingInfo() if !ok1 || !ok2 { return false, fmt.Errorf("no state serving info configuration") } dialInfo, err := state.DialInfo(stateInfo, state.DefaultDialOpts()) if err != nil { return false, err } if len(dialInfo.Addrs) > 1 { logger.Infof("more than one state server; admin user must exist") return false, nil } return ensureMongoAdminUser(mongo.EnsureAdminUserParams{ DialInfo: dialInfo, Namespace: agentConfig.Value(agent.Namespace), DataDir: agentConfig.DataDir(), Port: servingInfo.StatePort, User: stateInfo.Tag, Password: stateInfo.Password, }) }
func refreshConfig(c *gc.C, config agent.Config) agent.ConfigSetterWriter { config1, err := agent.ReadConfig(agent.ConfigPath(config.DataDir(), config.Tag())) c.Assert(err, gc.IsNil) return config1 }
// ensureMongoServer ensures that mongo is installed and running, // and ready for opening a state connection. func (a *MachineAgent) ensureMongoServer(agentConfig agent.Config) error { servingInfo, ok := agentConfig.StateServingInfo() if !ok { return fmt.Errorf("state worker was started with no state serving info") } namespace := agentConfig.Value(agent.Namespace) // When upgrading from a pre-HA-capable environment, // we must add machine-0 to the admin database and // initiate its replicaset. // // TODO(axw) remove this when we no longer need // to upgrade from pre-HA-capable environments. var shouldInitiateMongoServer bool var addrs []instance.Address if isPreHAVersion(agentConfig.UpgradedToVersion()) { _, err := a.ensureMongoAdminUser(agentConfig) if err != nil { return err } if servingInfo.SharedSecret == "" { servingInfo.SharedSecret, err = mongo.GenerateSharedSecret() if err != nil { return err } if err = a.ChangeConfig(func(config agent.ConfigSetter) { config.SetStateServingInfo(servingInfo) }); err != nil { return err } agentConfig = a.CurrentConfig() } st, m, err := openState(agentConfig) if err != nil { return err } if err := st.SetStateServingInfo(servingInfo); err != nil { st.Close() return fmt.Errorf("cannot set state serving info: %v", err) } st.Close() addrs = m.Addresses() shouldInitiateMongoServer = true } // ensureMongoServer installs/upgrades the upstart config as necessary. if err := ensureMongoServer( agentConfig.DataDir(), namespace, servingInfo, ); err != nil { return err } if !shouldInitiateMongoServer { return nil } // Initiate the replicaset for upgraded environments. // // TODO(axw) remove this when we no longer need // to upgrade from pre-HA-capable environments. stateInfo, ok := agentConfig.StateInfo() if !ok { return fmt.Errorf("state worker was started with no state serving info") } dialInfo, err := state.DialInfo(stateInfo, state.DefaultDialOpts()) if err != nil { return err } peerAddr := mongo.SelectPeerAddress(addrs) if peerAddr == "" { return fmt.Errorf("no appropriate peer address found in %q", addrs) } return maybeInitiateMongoServer(peergrouper.InitiateMongoParams{ DialInfo: dialInfo, MemberHostPort: net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort)), User: stateInfo.Tag, Password: stateInfo.Password, }) }