func (s *configureSuite) getCloudConfig(c *gc.C, stateServer bool, vers version.Binary) *cloudinit.Config { var mcfg *envcloudinit.MachineConfig var err error if stateServer { mcfg, err = environs.NewBootstrapMachineConfig(constraints.Value{}, vers.Series) c.Assert(err, gc.IsNil) mcfg.InstanceId = "instance-id" mcfg.Jobs = []params.MachineJob{params.JobManageEnviron, params.JobHostUnits} } else { mcfg, err = environs.NewMachineConfig("0", "ya", imagemetadata.ReleasedStream, vers.Series, nil, nil, nil) c.Assert(err, gc.IsNil) mcfg.Jobs = []params.MachineJob{params.JobHostUnits} } mcfg.Tools = &tools.Tools{ Version: vers, URL: "http://testing.invalid/tools.tar.gz", } environConfig := testConfig(c, stateServer, vers) err = environs.FinishMachineConfig(mcfg, environConfig) c.Assert(err, gc.IsNil) cloudcfg := cloudinit.New() udata, err := envcloudinit.NewUserdataConfig(mcfg, cloudcfg) c.Assert(err, gc.IsNil) err = udata.Configure() c.Assert(err, gc.IsNil) return cloudcfg }
// FinishMachineConfig sets fields on a MachineConfig that can be determined by // inspecting a plain config.Config and the machine constraints at the last // moment before bootstrapping. It assumes that the supplied Config comes from // an environment that has passed through all the validation checks in the // Bootstrap func, and that has set an agent-version (via finding the tools to, // use for bootstrap, or otherwise). // TODO(fwereade) This function is not meant to be "good" in any serious way: // it is better that this functionality be collected in one place here than // that it be spread out across 3 or 4 providers, but this is its only // redeeming feature. func FinishMachineConfig(mcfg *cloudinit.MachineConfig, cfg *config.Config, cons constraints.Value) (err error) { defer errors.Maskf(&err, "cannot complete machine configuration") if err := PopulateMachineConfig( mcfg, cfg.Type(), cfg.AuthorizedKeys(), cfg.SSLHostnameVerification(), cfg.ProxySettings(), cfg.AptProxySettings(), cfg.PreferIPv6(), ); err != nil { return err } // The following settings are only appropriate at bootstrap time. At the // moment, the only state server is the bootstrap node, but this // will probably change. if !mcfg.Bootstrap { return nil } if mcfg.APIInfo != nil || mcfg.MongoInfo != nil { return fmt.Errorf("machine configuration already has api/state info") } caCert, hasCACert := cfg.CACert() if !hasCACert { return fmt.Errorf("environment configuration has no ca-cert") } password := cfg.AdminSecret() if password == "" { return fmt.Errorf("environment configuration has no admin-secret") } passwordHash := utils.UserPasswordHash(password, utils.CompatSalt) mcfg.APIInfo = &api.Info{Password: passwordHash, CACert: caCert} mcfg.MongoInfo = &authentication.MongoInfo{Password: passwordHash, Info: mongo.Info{CACert: caCert}} // These really are directly relevant to running a state server. cert, key, err := cfg.GenerateStateServerCertAndKey() if err != nil { return errors.Annotate(err, "cannot generate state server certificate") } srvInfo := params.StateServingInfo{ StatePort: cfg.StatePort(), APIPort: cfg.APIPort(), Cert: string(cert), PrivateKey: string(key), SystemIdentity: mcfg.SystemPrivateSSHKey, } mcfg.StateServingInfo = &srvInfo mcfg.Constraints = cons if mcfg.Config, err = BootstrapConfig(cfg); err != nil { return err } return nil }
// PopulateMachineConfig is called both from the FinishMachineConfig below, // which does have access to the environment config, and from the container // provisioners, which don't have access to the environment config. Everything // that is needed to provision a container needs to be returned to the // provisioner in the ContainerConfig structure. Those values are then used to // call this function. func PopulateMachineConfig(mcfg *cloudinit.MachineConfig, providerType, authorizedKeys string, sslHostnameVerification bool, proxySettings, aptProxySettings proxy.Settings, preferIPv6 bool, enableOSRefreshUpdates bool, enableOSUpgrade bool, ) error { if authorizedKeys == "" { return fmt.Errorf("environment configuration has no authorized-keys") } mcfg.AuthorizedKeys = authorizedKeys if mcfg.AgentEnvironment == nil { mcfg.AgentEnvironment = make(map[string]string) } mcfg.AgentEnvironment[agent.ProviderType] = providerType mcfg.AgentEnvironment[agent.ContainerType] = string(mcfg.MachineContainerType) mcfg.DisableSSLHostnameVerification = !sslHostnameVerification mcfg.ProxySettings = proxySettings mcfg.AptProxySettings = aptProxySettings mcfg.PreferIPv6 = preferIPv6 mcfg.EnableOSRefreshUpdate = enableOSRefreshUpdates mcfg.EnableOSUpgrade = enableOSUpgrade return nil }
func assocProvInfoAndMachCfg( provInfo *params.ProvisioningInfo, machineConfig *cloudinit.MachineConfig, ) *provisioningInfo { machineConfig.Networks = provInfo.Networks if len(provInfo.Jobs) > 0 { machineConfig.Jobs = provInfo.Jobs } return &provisioningInfo{ Constraints: provInfo.Constraints, Series: provInfo.Series, Placement: provInfo.Placement, MachineConfig: machineConfig, } }
func (s *configureSuite) getCloudConfig(c *gc.C, stateServer bool, vers version.Binary) *cloudinit.Config { var mcfg *envcloudinit.MachineConfig if stateServer { mcfg = environs.NewBootstrapMachineConfig("private-key") mcfg.InstanceId = "instance-id" mcfg.Jobs = []params.MachineJob{params.JobManageEnviron, params.JobHostUnits} } else { mcfg = environs.NewMachineConfig("0", "ya", nil, nil, nil) mcfg.Jobs = []params.MachineJob{params.JobHostUnits} } mcfg.Tools = &tools.Tools{ Version: vers, URL: "file:///var/lib/juju/storage/" + envtools.StorageName(vers), } environConfig := testConfig(c, stateServer, vers) err := environs.FinishMachineConfig(mcfg, environConfig, constraints.Value{}) c.Assert(err, gc.IsNil) cloudcfg := cloudinit.New() err = envcloudinit.Configure(mcfg, cloudcfg) c.Assert(err, gc.IsNil) return cloudcfg }
func (manager *containerManager) CreateContainer( machineConfig *cloudinit.MachineConfig, series string, network *container.NetworkConfig, ) (instance.Instance, *instance.HardwareCharacteristics, error) { start := time.Now() name := names.NewMachineTag(machineConfig.MachineId).String() if manager.name != "" { name = fmt.Sprintf("%s-%s", manager.name, name) } // Create the cloud-init. directory, err := container.NewDirectory(name) if err != nil { return nil, nil, err } logger.Tracef("write cloud-init") if manager.createWithClone { // If we are using clone, disable the apt-get steps machineConfig.DisablePackageCommands = true } userDataFilename, err := container.WriteUserData(machineConfig, directory) if err != nil { logger.Errorf("failed to write user data: %v", err) return nil, nil, err } logger.Tracef("write the lxc.conf file") configFile, err := writeLxcConfig(network, directory) if err != nil { logger.Errorf("failed to write config file: %v", err) return nil, nil, err } var lxcContainer golxc.Container if manager.createWithClone { templateContainer, err := EnsureCloneTemplate( manager.backingFilesystem, series, network, machineConfig.AuthorizedKeys, machineConfig.AptProxySettings, ) if err != nil { return nil, nil, err } templateParams := []string{ "--debug", // Debug errors in the cloud image "--userdata", userDataFilename, // Our groovey cloud-init "--hostid", name, // Use the container name as the hostid } var extraCloneArgs []string if manager.backingFilesystem == Btrfs || manager.useAUFS { extraCloneArgs = append(extraCloneArgs, "--snapshot") } if manager.backingFilesystem != Btrfs && manager.useAUFS { extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs") } lock, err := AcquireTemplateLock(templateContainer.Name(), "clone") if err != nil { return nil, nil, fmt.Errorf("failed to acquire lock on template: %v", err) } defer lock.Unlock() lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams) if err != nil { logger.Errorf("lxc container cloning failed: %v", err) return nil, nil, err } } else { // Note here that the lxcObjectFacotry only returns a valid container // object, and doesn't actually construct the underlying lxc container on // disk. lxcContainer = LxcObjectFactory.New(name) templateParams := []string{ "--debug", // Debug errors in the cloud image "--userdata", userDataFilename, // Our groovey cloud-init "--hostid", name, // Use the container name as the hostid "-r", series, } // Create the container. logger.Tracef("create the container") if err := lxcContainer.Create(configFile, defaultTemplate, nil, templateParams); err != nil { logger.Errorf("lxc container creation failed: %v", err) return nil, nil, err } logger.Tracef("lxc container created") } if err := autostartContainer(name); err != nil { return nil, nil, err } if err := mountHostLogDir(name, manager.logdir); err != nil { return nil, nil, err } // Start the lxc container with the appropriate settings for grabbing the // console output and a log file. consoleFile := filepath.Join(directory, "console.log") lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug) logger.Tracef("start the container") // We explicitly don't pass through the config file to the container.Start // method as we have passed it through at container creation time. This // is necessary to get the appropriate rootfs reference without explicitly // setting it ourselves. if err = lxcContainer.Start("", consoleFile); err != nil { logger.Errorf("container failed to start: %v", err) return nil, nil, err } arch := version.Current.Arch hardware := &instance.HardwareCharacteristics{ Arch: &arch, } logger.Tracef("container %q started: %v", name, time.Now().Sub(start)) return &lxcInstance{lxcContainer, name}, hardware, nil }