// Run initializes state for an environment. func (c *BootstrapCommand) Run(_ *cmd.Context) error { envCfg, err := config.New(config.NoDefaults, c.ControllerModelConfig) if err != nil { return err } err = c.ReadConfig("machine-0") if err != nil { return err } agentConfig := c.CurrentConfig() network.SetPreferIPv6(agentConfig.PreferIPv6()) // agent.Jobs is an optional field in the agent config, and was // introduced after 1.17.2. We default to allowing units on // machine-0 if missing. jobs := agentConfig.Jobs() if len(jobs) == 0 { jobs = []multiwatcher.MachineJob{ multiwatcher.JobManageModel, multiwatcher.JobHostUnits, multiwatcher.JobManageNetworking, } } // Get the bootstrap machine's addresses from the provider. env, err := environs.New(envCfg) if err != nil { return err } newConfigAttrs := make(map[string]interface{}) // Check to see if a newer agent version has been requested // by the bootstrap client. desiredVersion, ok := envCfg.AgentVersion() if ok && desiredVersion != jujuversion.Current { // If we have been asked for a newer version, ensure the newer // tools can actually be found, or else bootstrap won't complete. stream := envtools.PreferredStream(&desiredVersion, envCfg.Development(), envCfg.AgentStream()) logger.Infof("newer tools requested, looking for %v in stream %v", desiredVersion, stream) filter := tools.Filter{ Number: desiredVersion, Arch: arch.HostArch(), Series: series.HostSeries(), } _, toolsErr := envtools.FindTools(env, -1, -1, stream, filter) if toolsErr == nil { logger.Infof("tools are available, upgrade will occur after bootstrap") } if errors.IsNotFound(toolsErr) { // Newer tools not available, so revert to using the tools // matching the current agent version. logger.Warningf("newer tools for %q not available, sticking with version %q", desiredVersion, jujuversion.Current) newConfigAttrs["agent-version"] = jujuversion.Current.String() } else if toolsErr != nil { logger.Errorf("cannot find newer tools: %v", toolsErr) return toolsErr } } instanceId := instance.Id(c.InstanceId) instances, err := env.Instances([]instance.Id{instanceId}) if err != nil { return err } addrs, err := instances[0].Addresses() if err != nil { return err } // When machine addresses are reported from state, they have // duplicates removed. We should do the same here so that // there is not unnecessary churn in the mongo replicaset. // TODO (cherylj) Add explicit unit tests for this - tracked // by bug #1544158. addrs = network.MergedAddresses([]network.Address{}, addrs) // Generate a private SSH key for the controllers, and add // the public key to the environment config. We'll add the // private key to StateServingInfo below. privateKey, publicKey, err := sshGenerateKey(config.JujuSystemKey) if err != nil { return errors.Annotate(err, "failed to generate system key") } authorizedKeys := config.ConcatAuthKeys(envCfg.AuthorizedKeys(), publicKey) newConfigAttrs[config.AuthKeysConfig] = authorizedKeys // Generate a shared secret for the Mongo replica set, and write it out. sharedSecret, err := mongo.GenerateSharedSecret() if err != nil { return err } info, ok := agentConfig.StateServingInfo() if !ok { return fmt.Errorf("bootstrap machine config has no state serving info") } info.SharedSecret = sharedSecret info.SystemIdentity = privateKey err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error { agentConfig.SetStateServingInfo(info) return nil }) if err != nil { return fmt.Errorf("cannot write agent config: %v", err) } err = c.ChangeConfig(func(config agent.ConfigSetter) error { // We'll try for mongo 3.2 first and fallback to // mongo 2.4 if the newer binaries are not available. if mongo.BinariesAvailable(mongo.Mongo32wt) { config.SetMongoVersion(mongo.Mongo32wt) } else { config.SetMongoVersion(mongo.Mongo24) } return nil }) if err != nil { return errors.Annotate(err, "cannot set mongo version") } agentConfig = c.CurrentConfig() // Create system-identity file if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return err } if err := c.startMongo(addrs, agentConfig); err != nil { return err } logger.Infof("started mongo") // Initialise state, and store any agent config (e.g. password) changes. envCfg, err = env.Config().Apply(newConfigAttrs) if err != nil { return errors.Annotate(err, "failed to update model config") } var st *state.State var m *state.Machine err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error { var stateErr error dialOpts := mongo.DefaultDialOpts() // Set a longer socket timeout than usual, as the machine // will be starting up and disk I/O slower than usual. This // has been known to cause timeouts in queries. timeouts := envCfg.BootstrapSSHOpts() dialOpts.SocketTimeout = timeouts.Timeout if dialOpts.SocketTimeout < minSocketTimeout { dialOpts.SocketTimeout = minSocketTimeout } // We shouldn't attempt to dial peers until we have some. dialOpts.Direct = true adminTag := names.NewLocalUserTag(c.AdminUsername) st, m, stateErr = agentInitializeState( adminTag, agentConfig, envCfg, c.HostedModelConfig, agentbootstrap.BootstrapMachineConfig{ Addresses: addrs, BootstrapConstraints: c.BootstrapConstraints, ModelConstraints: c.ModelConstraints, Jobs: jobs, InstanceId: instanceId, Characteristics: c.Hardware, SharedSecret: sharedSecret, }, dialOpts, environs.NewStatePolicy(), ) return stateErr }) if err != nil { return err } defer st.Close() // Populate the tools catalogue. if err := c.populateTools(st, env); err != nil { return err } // Populate the GUI archive catalogue. if err := c.populateGUIArchive(st, env); err != nil { // Do not stop the bootstrapping process for Juju GUI archive errors. logger.Warningf("cannot set up Juju GUI: %s", err) } else { logger.Debugf("Juju GUI successfully set up") } // Add custom image metadata to environment storage. if c.ImageMetadataDir != "" { if err := c.saveCustomImageMetadata(st, env); err != nil { return err } stor := newStateStorage(st.ModelUUID(), st.MongoSession()) if err := c.storeCustomImageMetadata(stor); err != nil { return err } } // Populate the storage pools. if err = c.populateDefaultStoragePools(st); err != nil { return err } // bootstrap machine always gets the vote return m.SetHasVote(true) }
return nil, errors.New("this manifold may only be used inside a machine agent") } // Get the machine agent's jobs. entity, err := apiagent.NewState(apiCaller).Entity(tag) if err != nil { return nil, err } var isModelManager bool for _, job := range entity.Jobs() { if job == multiwatcher.JobManageModel { isModelManager = true break } } if !isModelManager { return nil, dependency.ErrMissing } return NewWorker(cfg) } var NewWorker = func(agentConfig agent.Config) (worker.Worker, error) { inner := func(<-chan struct{}) error { return agent.WriteSystemIdentityFile(agentConfig) } return worker.NewSimpleWorker(inner), nil }
// StateWorker returns a worker running all the workers that require // a *state.State connection. func (a *MachineAgent) StateWorker() (worker.Worker, error) { agentConfig := a.CurrentConfig() // Create system-identity file. if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return nil, err } // Start MongoDB server and dial. if err := a.ensureMongoServer(agentConfig); err != nil { return nil, err } st, m, err := openState(agentConfig, stateWorkerDialOpts) if err != nil { return nil, err } reportOpenedState(st) singularStateConn := singularStateConn{st.MongoSession(), m} runner := newRunner(connectionIsFatal(st), moreImportant) singularRunner, err := newSingularRunner(runner, singularStateConn) if err != nil { return nil, fmt.Errorf("cannot make singular State Runner: %v", err) } // Take advantage of special knowledge here in that we will only ever want // the storage provider on one machine, and that is the "bootstrap" node. providerType := agentConfig.Value(agent.ProviderType) if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId { a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) { // TODO(axw) 2013-09-24 bug #1229507 // Make another job to enable storage. // There's nothing special about this. return localstorage.NewWorker(agentConfig), nil }) } for _, job := range m.Jobs() { switch job { case state.JobHostUnits: // Implemented in APIWorker. case state.JobManageEnviron: useMultipleCPUs() a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) { return instancepoller.NewWorker(st), nil }) a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) { return peergrouperNew(st) }) runner.StartWorker("apiserver", func() (worker.Worker, error) { // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. In the future, we may retrieve // the state server certificate and key from the state, and // this should then change. info, ok := agentConfig.StateServingInfo() if !ok { return nil, &fatalError{"StateServingInfo not available and we need it"} } cert := []byte(info.Cert) key := []byte(info.PrivateKey) if len(cert) == 0 || len(key) == 0 { return nil, &fatalError{"configuration does not have state server cert/key"} } dataDir := agentConfig.DataDir() logDir := agentConfig.LogDir() endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort)) listener, err := net.Listen("tcp", endpoint) if err != nil { return nil, err } return apiserver.NewServer(st, listener, apiserver.ServerConfig{ Cert: cert, Key: key, DataDir: dataDir, LogDir: logDir, Validator: a.limitLoginsDuringUpgrade, }) }) a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) { return cleaner.NewCleaner(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) { // The action of resumer is so subtle that it is not tested, // because we can't figure out how to do so without brutalising // the transaction log. return resumer.NewResumer(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) { return minunitsworker.NewMinUnitsWorker(st), nil }) case state.JobManageStateDeprecated: // Legacy environments may set this, but we ignore it. default: logger.Warningf("ignoring unknown job %q", job) } } return newCloseWorker(runner, st), nil }
// Run initializes state for an environment. func (c *BootstrapCommand) Run(_ *cmd.Context) error { envCfg, err := config.New(config.NoDefaults, c.EnvConfig) if err != nil { return err } err = c.ReadConfig("machine-0") if err != nil { return err } agentConfig := c.CurrentConfig() network.InitializeFromConfig(agentConfig) // agent.Jobs is an optional field in the agent config, and was // introduced after 1.17.2. We default to allowing units on // machine-0 if missing. jobs := agentConfig.Jobs() if len(jobs) == 0 { jobs = []multiwatcher.MachineJob{ multiwatcher.JobManageEnviron, multiwatcher.JobHostUnits, multiwatcher.JobManageNetworking, } } // Get the bootstrap machine's addresses from the provider. env, err := environs.New(envCfg) if err != nil { return err } instanceId := instance.Id(c.InstanceId) instances, err := env.Instances([]instance.Id{instanceId}) if err != nil { return err } addrs, err := instances[0].Addresses() if err != nil { return err } // Generate a private SSH key for the state servers, and add // the public key to the environment config. We'll add the // private key to StateServingInfo below. privateKey, publicKey, err := sshGenerateKey(config.JujuSystemKey) if err != nil { return errors.Annotate(err, "failed to generate system key") } authorizedKeys := config.ConcatAuthKeys(envCfg.AuthorizedKeys(), publicKey) envCfg, err = env.Config().Apply(map[string]interface{}{ config.AuthKeysConfig: authorizedKeys, }) if err != nil { return errors.Annotate(err, "failed to add public key to environment config") } // Generate a shared secret for the Mongo replica set, and write it out. sharedSecret, err := mongo.GenerateSharedSecret() if err != nil { return err } info, ok := agentConfig.StateServingInfo() if !ok { return fmt.Errorf("bootstrap machine config has no state serving info") } info.SharedSecret = sharedSecret info.SystemIdentity = privateKey err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error { agentConfig.SetStateServingInfo(info) return nil }) if err != nil { return fmt.Errorf("cannot write agent config: %v", err) } agentConfig = c.CurrentConfig() // Create system-identity file if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return err } if err := c.startMongo(addrs, agentConfig); err != nil { return err } logger.Infof("started mongo") // Initialise state, and store any agent config (e.g. password) changes. var st *state.State var m *state.Machine err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error { var stateErr error dialOpts := mongo.DefaultDialOpts() // Set a longer socket timeout than usual, as the machine // will be starting up and disk I/O slower than usual. This // has been known to cause timeouts in queries. timeouts := envCfg.BootstrapSSHOpts() dialOpts.SocketTimeout = timeouts.Timeout if dialOpts.SocketTimeout < minSocketTimeout { dialOpts.SocketTimeout = minSocketTimeout } // We shouldn't attempt to dial peers until we have some. dialOpts.Direct = true adminTag := names.NewLocalUserTag(c.AdminUsername) st, m, stateErr = agentInitializeState( adminTag, agentConfig, envCfg, agent.BootstrapMachineConfig{ Addresses: addrs, Constraints: c.Constraints, Jobs: jobs, InstanceId: instanceId, Characteristics: c.Hardware, SharedSecret: sharedSecret, }, dialOpts, environs.NewStatePolicy(), ) return stateErr }) if err != nil { return err } defer st.Close() // Populate the tools catalogue. if err := c.populateTools(st, env); err != nil { return err } // Add custom image metadata to environment storage. if c.ImageMetadataDir != "" { if err := c.saveCustomImageMetadata(st); err != nil { return err } // TODO (anastasiamac 2015-09-24) Remove this once search path is updated.. stor := newStateStorage(st.EnvironUUID(), st.MongoSession()) if err := c.storeCustomImageMetadata(stor); err != nil { return err } } // Populate the storage pools. if err := c.populateDefaultStoragePools(st); err != nil { return err } // bootstrap machine always gets the vote return m.SetHasVote(true) }
// Run initializes state for an environment. func (c *BootstrapCommand) Run(_ *cmd.Context) error { envCfg, err := config.New(config.NoDefaults, c.EnvConfig) if err != nil { return err } err = c.ReadConfig("machine-0") if err != nil { return err } agentConfig := c.CurrentConfig() // agent.Jobs is an optional field in the agent config, and was // introduced after 1.17.2. We default to allowing units on // machine-0 if missing. jobs := agentConfig.Jobs() if len(jobs) == 0 { jobs = []params.MachineJob{ params.JobManageEnviron, params.JobHostUnits, } } // Get the bootstrap machine's addresses from the provider. env, err := environs.New(envCfg) if err != nil { return err } instanceId := instance.Id(c.InstanceId) instances, err := env.Instances([]instance.Id{instanceId}) if err != nil { return err } addrs, err := instances[0].Addresses() if err != nil { return err } // Create system-identity file if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return err } // Generate a shared secret for the Mongo replica set, and write it out. sharedSecret, err := mongo.GenerateSharedSecret() if err != nil { return err } info, ok := agentConfig.StateServingInfo() if !ok { return fmt.Errorf("bootstrap machine config has no state serving info") } info.SharedSecret = sharedSecret err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) { agentConfig.SetStateServingInfo(info) }) if err != nil { return fmt.Errorf("cannot write agent config: %v", err) } agentConfig = c.CurrentConfig() if err := c.startMongo(addrs, agentConfig); err != nil { return err } logger.Infof("started mongo") // Initialise state, and store any agent config (e.g. password) changes. var st *state.State var m *state.Machine err = nil writeErr := c.ChangeConfig(func(agentConfig agent.ConfigSetter) { st, m, err = agent.InitializeState( agentConfig, envCfg, agent.BootstrapMachineConfig{ Addresses: addrs, Constraints: c.Constraints, Jobs: jobs, InstanceId: instanceId, Characteristics: c.Hardware, SharedSecret: sharedSecret, }, mongo.DefaultDialOpts(), environs.NewStatePolicy(), ) }) if writeErr != nil { return fmt.Errorf("cannot write initial configuration: %v", err) } if err != nil { return err } defer st.Close() // bootstrap machine always gets the vote return m.SetHasVote(true) }