// Prepare prepares a new environment based on the provided configuration. // If the environment is already prepared, it behaves like New. func Prepare(cfg *config.Config, ctx BootstrapContext, store configstore.Storage) (Environ, error) { if p, err := Provider(cfg.Type()); err != nil { return nil, err } else if info, err := store.ReadInfo(cfg.Name()); errors.IsNotFound(errors.Cause(err)) { info = store.CreateInfo(cfg.Name()) if env, err := prepare(ctx, cfg, info, p); err == nil { return env, decorateAndWriteInfo(info, env.Config()) } else { if err := info.Destroy(); err != nil { logger.Warningf("cannot destroy newly created environment info: %v", err) } return nil, err } } else if err != nil { return nil, errors.Annotatef(err, "error reading environment info %q", cfg.Name()) } else if !info.Initialized() { return nil, errors.Errorf( "found uninitialized environment info for %q; environment preparation probably in progress or interrupted", cfg.Name(), ) } else if len(info.BootstrapConfig()) == 0 { return nil, errors.New("found environment info but no bootstrap config") } else { cfg, err = config.New(config.NoDefaults, info.BootstrapConfig()) if err != nil { return nil, errors.Annotate(err, "cannot parse bootstrap config") } return New(cfg) } }
// finalizeConfig creates the config object from attributes, calls // PrepareForCreateEnvironment, and then finally validates the config // before returning it. func finalizeConfig(isAdmin bool, controllerCfg *config.Config, attrs map[string]interface{}) (*config.Config, error) { provider, err := environs.Provider(controllerCfg.Type()) if err != nil { return nil, errors.Trace(err) } // Controller admins creating models do not have to re-supply new secrets. // These may be copied from the controller model if not supplied. if isAdmin { maybeCopyControllerSecrets(provider, controllerCfg.AllAttrs(), attrs) } cfg, err := config.New(config.UseDefaults, attrs) if err != nil { return nil, errors.Annotate(err, "creating config from values failed") } cfg, err = provider.PrepareForCreateEnvironment(cfg) if err != nil { return nil, errors.Trace(err) } cfg, err = provider.Validate(cfg, nil) if err != nil { return nil, errors.Annotate(err, "provider validation failed") } return cfg, nil }
// NewModelConfig returns a new model config given a base (controller) config // and a set of attributes that will be specific to the new model, overriding // any non-restricted attributes in the base configuration. The resulting // config will be suitable for creating a new model in state. // // If "attrs" does not include a UUID, a new, random one will be generated // and added to the config. // // The config will be validated with the provider before being returned. func (c ModelConfigCreator) NewModelConfig( isAdmin bool, base *config.Config, attrs map[string]interface{}, ) (*config.Config, error) { if err := c.checkVersion(base, attrs); err != nil { return nil, errors.Trace(err) } // Before comparing any values, we need to push the config through // the provider validation code. One of the reasons for this is that // numbers being serialized through JSON get turned into float64. The // schema code used in config will convert these back into integers. // However, before we can create a valid config, we need to make sure // we copy across fields from the main config that aren't there. baseAttrs := base.AllAttrs() restrictedFields, err := RestrictedProviderFields(base.Type()) if err != nil { return nil, errors.Trace(err) } for _, field := range restrictedFields { if _, ok := attrs[field]; !ok { if baseValue, ok := baseAttrs[field]; ok { attrs[field] = baseValue } } } // Generate a new UUID for the model as necessary, // and finalize the new config. if _, ok := attrs[config.UUIDKey]; !ok { uuid, err := utils.NewUUID() if err != nil { return nil, errors.Trace(err) } attrs[config.UUIDKey] = uuid.String() } cfg, err := finalizeConfig(isAdmin, base, attrs) if err != nil { return nil, errors.Trace(err) } attrs = cfg.AllAttrs() // Any values that would normally be copied from the controller // config can also be defined, but if they differ from the controller // values, an error is returned. for _, field := range restrictedFields { if value, ok := attrs[field]; ok { if serverValue := baseAttrs[field]; value != serverValue { return nil, errors.Errorf( "specified %s \"%v\" does not match controller \"%v\"", field, value, serverValue) } } } return cfg, nil }
// FinishMachineConfig sets fields on a MachineConfig that can be determined by // inspecting a plain config.Config and the machine constraints at the last // moment before bootstrapping. It assumes that the supplied Config comes from // an environment that has passed through all the validation checks in the // Bootstrap func, and that has set an agent-version (via finding the tools to, // use for bootstrap, or otherwise). // TODO(fwereade) This function is not meant to be "good" in any serious way: // it is better that this functionality be collected in one place here than // that it be spread out across 3 or 4 providers, but this is its only // redeeming feature. func FinishMachineConfig(mcfg *cloudinit.MachineConfig, cfg *config.Config, cons constraints.Value) (err error) { defer errors.Maskf(&err, "cannot complete machine configuration") if err := PopulateMachineConfig( mcfg, cfg.Type(), cfg.AuthorizedKeys(), cfg.SSLHostnameVerification(), cfg.ProxySettings(), cfg.AptProxySettings(), cfg.PreferIPv6(), ); err != nil { return err } // The following settings are only appropriate at bootstrap time. At the // moment, the only state server is the bootstrap node, but this // will probably change. if !mcfg.Bootstrap { return nil } if mcfg.APIInfo != nil || mcfg.MongoInfo != nil { return fmt.Errorf("machine configuration already has api/state info") } caCert, hasCACert := cfg.CACert() if !hasCACert { return fmt.Errorf("environment configuration has no ca-cert") } password := cfg.AdminSecret() if password == "" { return fmt.Errorf("environment configuration has no admin-secret") } passwordHash := utils.UserPasswordHash(password, utils.CompatSalt) mcfg.APIInfo = &api.Info{Password: passwordHash, CACert: caCert} mcfg.MongoInfo = &authentication.MongoInfo{Password: passwordHash, Info: mongo.Info{CACert: caCert}} // These really are directly relevant to running a state server. cert, key, err := cfg.GenerateStateServerCertAndKey() if err != nil { return errors.Annotate(err, "cannot generate state server certificate") } srvInfo := params.StateServingInfo{ StatePort: cfg.StatePort(), APIPort: cfg.APIPort(), Cert: string(cert), PrivateKey: string(key), SystemIdentity: mcfg.SystemPrivateSSHKey, } mcfg.StateServingInfo = &srvInfo mcfg.Constraints = cons if mcfg.Config, err = BootstrapConfig(cfg); err != nil { return err } return nil }
// TODO(anastasiamac) 2014-10-20 Bug#1383116 // This exists to provide more context to the user about // why they cannot allocate units to machine 0. Remove // this when the local provider's machine 0 is a container. // TODO(cherylj) Unexport CheckProvider once deploy is moved under service func (c *UnitCommandBase) CheckProvider(conf *config.Config) error { isMachineZero := c.PlacementSpec == "0" for _, p := range c.Placement { isMachineZero = isMachineZero || (p.Scope == instance.MachineScope && p.Directive == "0") } if conf.Type() == provider.Local && isMachineZero { return errors.New("machine 0 is the state server for a local environment and cannot host units") } return nil }
// Prepare prepares a new environment based on the provided configuration. // If the environment is already prepared, it behaves like New. func Prepare(cfg *config.Config, ctx BootstrapContext, store configstore.Storage) (Environ, error) { p, err := Provider(cfg.Type()) if err != nil { return nil, err } info, err := store.ReadInfo(cfg.Name()) if errors.IsNotFound(errors.Cause(err)) { info = store.CreateInfo(cfg.Name()) env, err := prepare(ctx, cfg, info, p) if err != nil { if err := info.Destroy(); err != nil { logger.Warningf("cannot destroy newly created environment info: %v", err) } return nil, err } cfg = env.Config() creds := configstore.APICredentials{ User: "******", // TODO(waigani) admin@local once we have that set Password: cfg.AdminSecret(), } info.SetAPICredentials(creds) endpoint := configstore.APIEndpoint{} var ok bool endpoint.CACert, ok = cfg.CACert() if !ok { return nil, errors.Errorf("CACert is not set") } endpoint.EnvironUUID, ok = cfg.UUID() if !ok { return nil, errors.Errorf("CACert is not set") } info.SetAPIEndpoint(endpoint) info.SetBootstrapConfig(env.Config().AllAttrs()) if err := info.Write(); err != nil { return nil, errors.Annotatef(err, "cannot create environment info %q", env.Config().Name()) } return env, nil } if err != nil { return nil, errors.Annotatef(err, "error reading environment info %q", cfg.Name()) } if !info.Initialized() { return nil, errors.Errorf("found uninitialized environment info for %q; environment preparation probably in progress or interrupted", cfg.Name()) } if len(info.BootstrapConfig()) == 0 { return nil, errors.New("found environment info but no bootstrap config") } cfg, err = config.New(config.NoDefaults, info.BootstrapConfig()) if err != nil { return nil, errors.Annotate(err, "cannot parse bootstrap config") } return New(cfg) }
// validate calls the state's assigned policy, if non-nil, to obtain // a ConfigValidator, and calls Validate if a non-nil ConfigValidator is // returned. func (st *State) validate(cfg, old *config.Config) (valid *config.Config, err error) { if st.policy == nil { return cfg, nil } configValidator, err := st.policy.ConfigValidator(cfg.Type()) if errors.IsNotImplemented(err) { return cfg, nil } else if err != nil { return nil, err } if configValidator == nil { return nil, fmt.Errorf("policy returned nil configValidator without an error") } return configValidator.Validate(cfg, old) }
// New returns a new environment based on the provided configuration. func New(config *config.Config) (Environ, error) { p, err := Provider(config.Type()) if err != nil { return nil, err } return p.Open(config) }
// Prepare prepares a new environment based on the provided configuration. // If the environment is already prepared, it behaves like New. func Prepare(cfg *config.Config, ctx BootstrapContext, store configstore.Storage) (Environ, error) { p, err := Provider(cfg.Type()) if err != nil { return nil, err } info, err := store.CreateInfo(cfg.Name()) if err == configstore.ErrEnvironInfoAlreadyExists { logger.Infof("environment info already exists; using New not Prepare") info, err := store.ReadInfo(cfg.Name()) if err != nil { return nil, fmt.Errorf("error reading environment info %q: %v", cfg.Name(), err) } if !info.Initialized() { return nil, fmt.Errorf("found uninitialized environment info for %q; environment preparation probably in progress or interrupted", cfg.Name()) } if len(info.BootstrapConfig()) == 0 { return nil, fmt.Errorf("found environment info but no bootstrap config") } cfg, err = config.New(config.NoDefaults, info.BootstrapConfig()) if err != nil { return nil, fmt.Errorf("cannot parse bootstrap config: %v", err) } return New(cfg) } if err != nil { return nil, fmt.Errorf("cannot create new info for environment %q: %v", cfg.Name(), err) } env, err := prepare(ctx, cfg, info, p) if err != nil { if err := info.Destroy(); err != nil { logger.Warningf("cannot destroy newly created environment info: %v", err) } return nil, err } info.SetBootstrapConfig(env.Config().AllAttrs()) if err := info.Write(); err != nil { return nil, fmt.Errorf("cannot create environment info %q: %v", env.Config().Name(), err) } return env, nil }
// FinishInstanceConfig sets fields on a InstanceConfig that can be determined by // inspecting a plain config.Config and the machine constraints at the last // moment before creating the user-data. It assumes that the supplied Config comes // from an environment that has passed through all the validation checks in the // Bootstrap func, and that has set an agent-version (via finding the tools to, // use for bootstrap, or otherwise). // TODO(fwereade) This function is not meant to be "good" in any serious way: // it is better that this functionality be collected in one place here than // that it be spread out across 3 or 4 providers, but this is its only // redeeming feature. func FinishInstanceConfig(icfg *InstanceConfig, cfg *config.Config) (err error) { defer errors.DeferredAnnotatef(&err, "cannot complete machine configuration") if err := PopulateInstanceConfig( icfg, cfg.Type(), cfg.AuthorizedKeys(), cfg.SSLHostnameVerification(), cfg.ProxySettings(), cfg.AptProxySettings(), cfg.AptMirror(), cfg.EnableOSRefreshUpdate(), cfg.EnableOSUpgrade(), ); err != nil { return errors.Trace(err) } if icfg.Controller != nil { // Add NUMACTL preference. Needed to work for both bootstrap and high availability // Only makes sense for controller logger.Debugf("Setting numa ctl preference to %v", icfg.Controller.Config.NUMACtlPreference()) // Unfortunately, AgentEnvironment can only take strings as values icfg.AgentEnvironment[agent.NUMACtlPreference] = fmt.Sprintf("%v", icfg.Controller.Config.NUMACtlPreference()) } return nil }
// FinishInstanceConfig sets fields on a InstanceConfig that can be determined by // inspecting a plain config.Config and the machine constraints at the last // moment before bootstrapping. It assumes that the supplied Config comes from // an environment that has passed through all the validation checks in the // Bootstrap func, and that has set an agent-version (via finding the tools to, // use for bootstrap, or otherwise). // TODO(fwereade) This function is not meant to be "good" in any serious way: // it is better that this functionality be collected in one place here than // that it be spread out across 3 or 4 providers, but this is its only // redeeming feature. func FinishInstanceConfig(icfg *InstanceConfig, cfg *config.Config) (err error) { defer errors.DeferredAnnotatef(&err, "cannot complete machine configuration") if err := PopulateInstanceConfig( icfg, cfg.Type(), cfg.AuthorizedKeys(), cfg.SSLHostnameVerification(), cfg.ProxySettings(), cfg.AptProxySettings(), cfg.AptMirror(), cfg.PreferIPv6(), cfg.EnableOSRefreshUpdate(), cfg.EnableOSUpgrade(), ); err != nil { return errors.Trace(err) } if isStateInstanceConfig(icfg) { // Add NUMACTL preference. Needed to work for both bootstrap and high availability // Only makes sense for controller logger.Debugf("Setting numa ctl preference to %v", cfg.NumaCtlPreference()) // Unfortunately, AgentEnvironment can only take strings as values icfg.AgentEnvironment[agent.NumaCtlPreference] = fmt.Sprintf("%v", cfg.NumaCtlPreference()) } // The following settings are only appropriate at bootstrap time. At the // moment, the only controller is the bootstrap node, but this // will probably change. if !icfg.Bootstrap { return nil } if icfg.APIInfo != nil || icfg.MongoInfo != nil { return errors.New("machine configuration already has api/state info") } caCert, hasCACert := cfg.CACert() if !hasCACert { return errors.New("model configuration has no ca-cert") } password := cfg.AdminSecret() if password == "" { return errors.New("model configuration has no admin-secret") } icfg.APIInfo = &api.Info{ Password: password, CACert: caCert, ModelTag: names.NewModelTag(cfg.UUID()), } icfg.MongoInfo = &mongo.MongoInfo{Password: password, Info: mongo.Info{CACert: caCert}} // These really are directly relevant to running a controller. // Initially, generate a controller certificate with no host IP // addresses in the SAN field. Once the controller is up and the // NIC addresses become known, the certificate can be regenerated. cert, key, err := cfg.GenerateControllerCertAndKey(nil) if err != nil { return errors.Annotate(err, "cannot generate controller certificate") } caPrivateKey, hasCAPrivateKey := cfg.CAPrivateKey() if !hasCAPrivateKey { return errors.New("model configuration has no ca-private-key") } srvInfo := params.StateServingInfo{ StatePort: cfg.StatePort(), APIPort: cfg.APIPort(), Cert: string(cert), PrivateKey: string(key), CAPrivateKey: caPrivateKey, } icfg.StateServingInfo = &srvInfo if icfg.Config, err = bootstrapConfig(cfg); err != nil { return errors.Trace(err) } return nil }
func (c *addCommand) Run(ctx *cmd.Context) error { client, err := c.getClientAPI() if err != nil { return errors.Trace(err) } defer client.Close() var machineManager MachineManagerAPI if len(c.Disks) > 0 { machineManager, err = c.getMachineManagerAPI() if err != nil { return errors.Trace(err) } defer machineManager.Close() if machineManager.BestAPIVersion() < 1 { return errors.New("cannot add machines with disks: not supported by the API server") } } logger.Infof("load config") var config *config.Config if defaultStore, err := configstore.Default(); err != nil { return err } else if config, err = c.Config(defaultStore, client); err != nil { return err } if c.Placement != nil && c.Placement.Scope == "ssh" { logger.Infof("manual provisioning") args := manual.ProvisionMachineArgs{ Host: c.Placement.Directive, Client: client, Stdin: ctx.Stdin, Stdout: ctx.Stdout, Stderr: ctx.Stderr, UpdateBehavior: ¶ms.UpdateBehavior{ config.EnableOSRefreshUpdate(), config.EnableOSUpgrade(), }, } machineId, err := manualProvisioner(args) if err == nil { ctx.Infof("created machine %v", machineId) } return err } logger.Infof("environment provisioning") if c.Placement != nil && c.Placement.Scope == "env-uuid" { c.Placement.Scope = client.EnvironmentUUID() } if c.Placement != nil && c.Placement.Scope == instance.MachineScope { // It does not make sense to add-machine <id>. return fmt.Errorf("machine-id cannot be specified when adding machines") } jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits} envVersion, err := envcmd.GetEnvironmentVersion(client) if err != nil { return err } // Servers before 1.21-alpha2 don't have the networker so don't // try to use JobManageNetworking with them. // // In case of MAAS and Joyent JobManageNetworking is not added // to ensure the non-intrusive start of a networker like above // for the manual provisioning. See this related joyent bug // http://pad.lv/1401423 if envVersion.Compare(version.MustParse("1.21-alpha2")) >= 0 && config.Type() != provider.MAAS && config.Type() != provider.Joyent { jobs = append(jobs, multiwatcher.JobManageNetworking) } machineParams := params.AddMachineParams{ Placement: c.Placement, Series: c.Series, Constraints: c.Constraints, Jobs: jobs, Disks: c.Disks, } machines := make([]params.AddMachineParams, c.NumMachines) for i := 0; i < c.NumMachines; i++ { machines[i] = machineParams } var results []params.AddMachinesResult // If storage is specified, we attempt to use a new API on the service facade. if len(c.Disks) > 0 { results, err = machineManager.AddMachines(machines) } else { results, err = client.AddMachines(machines) if params.IsCodeNotImplemented(err) { if c.Placement != nil { containerType, parseErr := instance.ParseContainerType(c.Placement.Scope) if parseErr != nil { // The user specified a non-container placement directive: // return original API not implemented error. return err } machineParams.ContainerType = containerType machineParams.ParentId = c.Placement.Directive machineParams.Placement = nil } logger.Infof( "AddMachinesWithPlacement not supported by the API server, " + "falling back to 1.18 compatibility mode", ) results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams}) } } if params.IsCodeOperationBlocked(err) { return block.ProcessBlockedError(err, block.BlockChange) } if err != nil { return errors.Trace(err) } errs := []error{} for _, machineInfo := range results { if machineInfo.Error != nil { errs = append(errs, machineInfo.Error) continue } machineId := machineInfo.Machine if names.IsContainerMachine(machineId) { ctx.Infof("created container %v", machineId) } else { ctx.Infof("created machine %v", machineId) } } if len(errs) == 1 { fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n") return errs[0] } if len(errs) > 1 { fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs)) returnErr := []string{} for _, e := range errs { returnErr = append(returnErr, e.Error()) } return errors.New(strings.Join(returnErr, ", ")) } return nil }
// TODO(anastasiamac) 2014-10-20 Bug#1383116 // This exists to provide more context to the user about // why they cannot allocate units to machine 0. Remove // this when the local provider's machine 0 is a container. // TODO(cherylj) Unexport CheckProvider once deploy is moved under service func (c *UnitCommandBase) CheckProvider(conf *config.Config) error { if conf.Type() == provider.Local && c.ToMachineSpec == "0" { return errors.New("machine 0 is the state server for a local environment and cannot host units") } return nil }