func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) error { selectedTools, err := common.EnsureBootstrapTools(ctx, e, config.PreferredSeries(e.Config()), args.Constraints.Arch) if err != nil { return err } defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return err } password := e.Config().AdminSecret() if password == "" { return fmt.Errorf("admin-secret is required for bootstrap") } if _, ok := e.Config().CACert(); !ok { return fmt.Errorf("no CA certificate in environment configuration") } logger.Infof("would pick tools from %s", selectedTools) cfg, err := environs.BootstrapConfig(e.Config()) if err != nil { return fmt.Errorf("cannot make bootstrap config: %v", err) } estate, err := e.state() if err != nil { return err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return fmt.Errorf("environment is already bootstrapped") } // Write the bootstrap file just like a normal provider. However // we need to release the mutex for the save state to work, so regain // it after the call. estate.mu.Unlock() if err := bootstrap.SaveState(e.Storage(), &bootstrap.BootstrapState{StateInstances: []instance.Id{"localhost"}}); err != nil { logger.Errorf("failed to save state instances: %v", err) estate.mu.Lock() // otherwise defered unlock will fail return err } estate.mu.Lock() // back at it if e.ecfg().stateServer() { // TODO(rog) factor out relevant code from cmd/jujud/bootstrap.go // so that we can call it here. info := stateInfo() st, err := state.Initialize(info, cfg, state.DefaultDialOpts(), estate.statePolicy) if err != nil { panic(err) } if err := st.SetEnvironConstraints(args.Constraints); err != nil { panic(err) } if err := st.SetAdminMongoPassword(utils.UserPasswordHash(password, utils.CompatSalt)); err != nil { panic(err) } _, err = st.AddUser("admin", password) if err != nil { panic(err) } estate.apiServer, err = apiserver.NewServer(st, "localhost:0", []byte(testing.ServerCert), []byte(testing.ServerKey), DataDir, LogDir) if err != nil { panic(err) } estate.apiState = st } estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} return nil }
// StateWorker returns a worker running all the workers that require // a *state.State connection. func (a *MachineAgent) StateWorker() (worker.Worker, error) { agentConfig := a.CurrentConfig() // Create system-identity file if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return nil, err } // Start MondoDB server if err := a.ensureMongoServer(agentConfig); err != nil { return nil, err } st, m, err := openState(agentConfig) if err != nil { return nil, err } reportOpenedState(st) singularStateConn := singularStateConn{st.MongoSession(), m} runner := newRunner(connectionIsFatal(st), moreImportant) singularRunner, err := newSingularRunner(runner, singularStateConn) if err != nil { return nil, fmt.Errorf("cannot make singular State Runner: %v", err) } // Take advantage of special knowledge here in that we will only ever want // the storage provider on one machine, and that is the "bootstrap" node. providerType := agentConfig.Value(agent.ProviderType) if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId { a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) { // TODO(axw) 2013-09-24 bug #1229507 // Make another job to enable storage. // There's nothing special about this. return localstorage.NewWorker(agentConfig), nil }) } for _, job := range m.Jobs() { switch job { case state.JobHostUnits: // Implemented in APIWorker. case state.JobManageEnviron: useMultipleCPUs() a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) { return instancepoller.NewWorker(st), nil }) a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) { return peergrouperNew(st) }) runner.StartWorker("apiserver", func() (worker.Worker, error) { // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. In the future, we may retrieve // the state server certificate and key from the state, and // this should then change. info, ok := agentConfig.StateServingInfo() if !ok { return nil, &fatalError{"StateServingInfo not available and we need it"} } port := info.APIPort cert := []byte(info.Cert) key := []byte(info.PrivateKey) if len(cert) == 0 || len(key) == 0 { return nil, &fatalError{"configuration does not have state server cert/key"} } dataDir := agentConfig.DataDir() logDir := agentConfig.LogDir() return apiserver.NewServer( st, fmt.Sprintf(":%d", port), cert, key, dataDir, logDir) }) a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) { return cleaner.NewCleaner(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) { // The action of resumer is so subtle that it is not tested, // because we can't figure out how to do so without brutalising // the transaction log. return resumer.NewResumer(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) { return minunitsworker.NewMinUnitsWorker(st), nil }) case state.JobManageStateDeprecated: // Legacy environments may set this, but we ignore it. default: logger.Warningf("ignoring unknown job %q", job) } } return newCloseWorker(runner, st), nil }