func cloudInitUserData( machineId, nonce string, tools *tools.Tools, environConfig *config.Config, stateInfo *state.Info, apiInfo *api.Info, ) ([]byte, error) { machineConfig := &cloudinit.MachineConfig{ MachineId: machineId, MachineNonce: nonce, MachineContainerType: instance.LXC, StateInfo: stateInfo, APIInfo: apiInfo, DataDir: "/var/lib/juju", Tools: tools, } if err := environs.FinishMachineConfig(machineConfig, environConfig, constraints.Value{}); err != nil { return nil, err } cloudConfig, err := cloudinit.New(machineConfig) if err != nil { return nil, err } data, err := cloudConfig.Render() if err != nil { return nil, err } return data, nil }
func cloudInitUserData( machineId, nonce string, tools *tools.Tools, environConfig *config.Config, stateInfo *state.Info, apiInfo *api.Info, ) ([]byte, error) { machineConfig := &cloudinit.MachineConfig{ MachineId: machineId, MachineNonce: nonce, MachineContainerType: instance.LXC, StateInfo: stateInfo, APIInfo: apiInfo, DataDir: "/var/lib/juju", Tools: tools, } if err := environs.FinishMachineConfig(machineConfig, environConfig, constraints.Value{}); err != nil { return nil, err } cloudConfig, err := cloudinit.New(machineConfig) if err != nil { return nil, err } // Run apt-config to fetch proxy settings from host. If no proxy // settings are configured, then we don't set up any proxy information // on the container. proxyConfig, err := utils.AptConfigProxy() if err != nil { return nil, err } if proxyConfig != "" { var proxyLines []string for _, line := range strings.Split(proxyConfig, "\n") { line = strings.TrimSpace(line) if m := aptHTTPProxyRE.FindStringSubmatch(line); m != nil { cloudConfig.SetAptProxy(m[1]) } else { proxyLines = append(proxyLines, line) } } if len(proxyLines) > 0 { cloudConfig.AddFile( "/etc/apt/apt.conf.d/99proxy-extra", strings.Join(proxyLines, "\n"), 0644) } } // Run ifconfig to get the addresses of the internal container at least // logged in the host. cloudConfig.AddRunCmd("ifconfig") data, err := cloudConfig.Render() if err != nil { return nil, err } return data, nil }
// internalStartInstance allocates and starts a MAAS node. It is used both // for the implementation of StartInstance, and to initialize the bootstrap // node. // The instance will be set up for the same series for which you pass tools. // All tools in possibleTools must be for the same series. // machineConfig will be filled out with further details, but should contain // MachineID, MachineNonce, StateInfo, and APIInfo. // TODO(bug 1199847): Some of this work can be shared between providers. func (environ *maasEnviron) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (_ *maasInstance, err error) { series := possibleTools.Series() if len(series) != 1 { panic(fmt.Errorf("should have gotten tools for one series, got %v", series)) } var instance *maasInstance if node, tools, err := environ.acquireNode(cons, possibleTools); err != nil { return nil, fmt.Errorf("cannot run instances: %v", err) } else { instance = &maasInstance{&node, environ} machineConfig.Tools = tools } defer func() { if err != nil { if err := environ.releaseInstance(instance); err != nil { logger.Errorf("error releasing failed instance: %v", err) } } }() hostname, err := instance.DNSName() if err != nil { return nil, err } info := machineInfo{hostname} runCmd, err := info.cloudinitRunCmd() if err != nil { return nil, err } if err := environs.FinishMachineConfig(machineConfig, environ.Config(), cons); err != nil { return nil, err } // Explicitly specify that the lxc containers use the network bridge defined above. machineConfig.MachineEnvironment[osenv.JujuLxcBridge] = "br0" userdata, err := environs.ComposeUserData( machineConfig, runCmd, createBridgeNetwork(), "service networking restart", ) if err != nil { msg := fmt.Errorf("could not compose userdata for bootstrap node: %v", err) return nil, msg } logger.Debugf("maas user data; %d bytes", len(userdata)) if err := environ.startNode(*instance.maasObject, series[0], userdata); err != nil { return nil, err } logger.Debugf("started instance %q", instance.Id()) return instance, nil }
func (s *CloudInitSuite) TestFinishBootstrapConfig(c *C) { cfg, err := config.New(map[string]interface{}{ "name": "barbara", "type": "dummy", "admin-secret": "lisboan-pork", "authorized-keys": "we-are-the-keys", "agent-version": "1.2.3", "ca-cert": testing.CACert, "ca-private-key": testing.CAKey, "state-server": false, "secret": "british-horse", }) c.Assert(err, IsNil) oldAttrs := cfg.AllAttrs() mcfg := &cloudinit.MachineConfig{ StateServer: true, } cons := constraints.MustParse("mem=1T cpu-power=999999999") err = environs.FinishMachineConfig(mcfg, cfg, cons) c.Check(err, IsNil) c.Check(mcfg.AuthorizedKeys, Equals, "we-are-the-keys") password := utils.PasswordHash("lisboan-pork") c.Check(mcfg.APIInfo, DeepEquals, &api.Info{ Password: password, CACert: []byte(testing.CACert), }) c.Check(mcfg.StateInfo, DeepEquals, &state.Info{ Password: password, CACert: []byte(testing.CACert), }) c.Check(mcfg.StatePort, Equals, cfg.StatePort()) c.Check(mcfg.APIPort, Equals, cfg.APIPort()) c.Check(mcfg.Constraints, DeepEquals, cons) oldAttrs["ca-private-key"] = "" oldAttrs["admin-secret"] = "" delete(oldAttrs, "secret") c.Check(mcfg.Config.AllAttrs(), DeepEquals, oldAttrs) srvCertPEM := mcfg.StateServerCert srvKeyPEM := mcfg.StateServerKey _, _, err = cert.ParseCertAndKey(srvCertPEM, srvKeyPEM) c.Check(err, IsNil) err = cert.Verify(srvCertPEM, []byte(testing.CACert), time.Now()) c.Assert(err, IsNil) err = cert.Verify(srvCertPEM, []byte(testing.CACert), time.Now().AddDate(9, 0, 0)) c.Assert(err, IsNil) err = cert.Verify(srvCertPEM, []byte(testing.CACert), time.Now().AddDate(10, 0, 1)) c.Assert(err, NotNil) }
func (s *CloudInitSuite) TestFinishInstanceConfig(c *C) { cfg, err := config.New(map[string]interface{}{ "name": "barbara", "type": "dummy", "authorized-keys": "we-are-the-keys", "ca-cert": testing.CACert, "ca-private-key": "", }) c.Assert(err, IsNil) mcfg := &cloudinit.MachineConfig{ StateInfo: &state.Info{Tag: "not touched"}, APIInfo: &api.Info{Tag: "not touched"}, } err = environs.FinishMachineConfig(mcfg, cfg, constraints.Value{}) c.Assert(err, IsNil) c.Assert(mcfg, DeepEquals, &cloudinit.MachineConfig{ AuthorizedKeys: "we-are-the-keys", ProviderType: "dummy", StateInfo: &state.Info{Tag: "not touched"}, APIInfo: &api.Info{Tag: "not touched"}, }) }
// internalStartInstance is the internal version of StartInstance, used by // Bootstrap as well as via StartInstance itself. // machineConfig will be filled out with further details, but should contain // MachineID, MachineNonce, StateInfo, and APIInfo. // TODO(bug 1199847): Some of this work can be shared between providers. func (e *environ) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (instance.Instance, *instance.HardwareCharacteristics, error) { series := possibleTools.Series() if len(series) != 1 { panic(fmt.Errorf("should have gotten tools for one series, got %v", series)) } arches := possibleTools.Arches() spec, err := findInstanceSpec(e, &instances.InstanceConstraint{ Region: e.ecfg().region(), Series: series[0], Arches: arches, Constraints: cons, }) if err != nil { return nil, nil, err } tools, err := possibleTools.Match(tools.Filter{Arch: spec.Image.Arch}) if err != nil { return nil, nil, fmt.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches) } machineConfig.Tools = tools[0] if err := environs.FinishMachineConfig(machineConfig, e.Config(), cons); err != nil { return nil, nil, err } userData, err := environs.ComposeUserData(machineConfig) if err != nil { return nil, nil, fmt.Errorf("cannot make user data: %v", err) } log.Debugf("environs/openstack: openstack user data; %d bytes", len(userData)) withPublicIP := e.ecfg().useFloatingIP() var publicIP *nova.FloatingIP if withPublicIP { if fip, err := e.allocatePublicIP(); err != nil { return nil, nil, fmt.Errorf("cannot allocate a public IP as needed: %v", err) } else { publicIP = fip log.Infof("environs/openstack: allocated public IP %s", publicIP.IP) } } config := e.Config() groups, err := e.setUpGroups(machineConfig.MachineId, config.StatePort(), config.APIPort()) if err != nil { return nil, nil, fmt.Errorf("cannot set up groups: %v", err) } var groupNames = make([]nova.SecurityGroupName, len(groups)) for i, g := range groups { groupNames[i] = nova.SecurityGroupName{g.Name} } var server *nova.Entity for a := shortAttempt.Start(); a.Next(); { server, err = e.nova().RunServer(nova.RunServerOpts{ Name: e.machineFullName(machineConfig.MachineId), FlavorId: spec.InstanceType.Id, ImageId: spec.Image.Id, UserData: userData, SecurityGroupNames: groupNames, }) if err == nil || !gooseerrors.IsNotFound(err) { break } } if err != nil { return nil, nil, fmt.Errorf("cannot run instance: %v", err) } detail, err := e.nova().GetServer(server.Id) if err != nil { return nil, nil, fmt.Errorf("cannot get started instance: %v", err) } inst := &openstackInstance{ e: e, ServerDetail: detail, arch: &spec.Image.Arch, instType: &spec.InstanceType, } log.Infof("environs/openstack: started instance %q", inst.Id()) if withPublicIP { if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil { if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil { // ignore the failure at this stage, just log it log.Debugf("environs/openstack: failed to terminate instance %q: %v", inst.Id(), err) } return nil, nil, fmt.Errorf("cannot assign public address %s to instance %q: %v", publicIP.IP, inst.Id(), err) } log.Infof("environs/openstack: assigned public IP %s to %q", publicIP.IP, inst.Id()) } return inst, inst.hardwareCharacteristics(), nil }
// internalStartInstance is the internal version of StartInstance, used by // Bootstrap as well as via StartInstance itself. // TODO(bug 1199847): Some of this work can be shared between providers. func (e *environ) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (instance.Instance, *instance.HardwareCharacteristics, error) { series := possibleTools.Series() if len(series) != 1 { panic(fmt.Errorf("should have gotten tools for one series, got %v", series)) } arches := possibleTools.Arches() storage := ebsStorage baseURLs, err := e.getImageBaseURLs() if err != nil { return nil, nil, err } spec, err := findInstanceSpec(baseURLs, &instances.InstanceConstraint{ Region: e.ecfg().region(), Series: series[0], Arches: arches, Constraints: cons, Storage: &storage, }) if err != nil { return nil, nil, err } tools, err := possibleTools.Match(tools.Filter{Arch: spec.Image.Arch}) if err != nil { return nil, nil, fmt.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches) } machineConfig.Tools = tools[0] if err := environs.FinishMachineConfig(machineConfig, e.Config(), cons); err != nil { return nil, nil, err } userData, err := environs.ComposeUserData(machineConfig) if err != nil { return nil, nil, fmt.Errorf("cannot make user data: %v", err) } log.Debugf("environs/ec2: ec2 user data; %d bytes", len(userData)) config := e.Config() groups, err := e.setUpGroups(machineConfig.MachineId, config.StatePort(), config.APIPort()) if err != nil { return nil, nil, fmt.Errorf("cannot set up groups: %v", err) } var instances *ec2.RunInstancesResp for a := shortAttempt.Start(); a.Next(); { instances, err = e.ec2().RunInstances(&ec2.RunInstances{ ImageId: spec.Image.Id, MinCount: 1, MaxCount: 1, UserData: userData, InstanceType: spec.InstanceType.Name, SecurityGroups: groups, }) if err == nil || ec2ErrCode(err) != "InvalidGroup.NotFound" { break } } if err != nil { return nil, nil, fmt.Errorf("cannot run instances: %v", err) } if len(instances.Instances) != 1 { return nil, nil, fmt.Errorf("expected 1 started instance, got %d", len(instances.Instances)) } inst := &ec2Instance{ e: e, Instance: &instances.Instances[0], arch: &spec.Image.Arch, instType: &spec.InstanceType, } log.Infof("environs/ec2: started instance %q", inst.Id()) return inst, inst.hardwareCharacteristics(), nil }
// internalStartInstance does the provider-specific work of starting an // instance. The code in StartInstance is actually largely agnostic across // the EC2/OpenStack/MAAS/Azure providers. // The instance will be set up for the same series for which you pass tools. // All tools in possibleTools must be for the same series. // machineConfig will be filled out with further details, but should contain // MachineID, MachineNonce, StateInfo, and APIInfo. // TODO(bug 1199847): Some of this work can be shared between providers. func (env *azureEnviron) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (_ instance.Instance, err error) { // Declaring "err" in the function signature so that we can "defer" // any cleanup that needs to run during error returns. series := possibleTools.Series() if len(series) != 1 { panic(fmt.Errorf("should have gotten tools for one series, got %v", series)) } err = environs.FinishMachineConfig(machineConfig, env.Config(), cons) if err != nil { return nil, err } // Pick tools. Needed for the custom data (which is what we normally // call userdata). machineConfig.Tools = possibleTools[0] logger.Infof("picked tools %q", machineConfig.Tools) // Compose userdata. userData, err := makeCustomData(machineConfig) if err != nil { return nil, fmt.Errorf("custom data: %v", err) } azure, err := env.getManagementAPI() if err != nil { return nil, err } defer env.releaseManagementAPI(azure) service, err := newHostedService(azure.ManagementAPI, env.getEnvPrefix(), env.getAffinityGroupName()) if err != nil { return nil, err } serviceName := service.ServiceName // If we fail after this point, clean up the hosted service. defer func() { if err != nil { azure.DestroyHostedService( &gwacl.DestroyHostedServiceRequest{ ServiceName: serviceName, }) } }() // TODO: use simplestreams to get the name of the image given // the constraints provided by Juju. // In the meantime we use a temporary Saucy image containing a // cloud-init package which supports Azure. sourceImageName := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-DEVELOPMENT-20130713-Juju_ALPHA-en-us-30GB" // virtualNetworkName is the virtual network to which all the // deployments in this environment belong. virtualNetworkName := env.getVirtualNetworkName() // 1. Create an OS Disk. vhd := env.newOSDisk(sourceImageName) // 2. Create a Role for a Linux machine. role := env.newRole(vhd, userData, roleHostname) // 3. Create the Deployment object. deployment := env.newDeployment(role, serviceName, serviceName, virtualNetworkName) err = azure.AddDeployment(deployment, serviceName) if err != nil { return nil, err } var inst instance.Instance // From here on, remember to shut down the instance before returning // any error. defer func() { if err != nil && inst != nil { err2 := env.StopInstances([]instance.Instance{inst}) if err2 != nil { // Failure upon failure. Log it, but return // the original error. logger.Errorf("error releasing failed instance: %v", err) } } }() // Assign the returned instance to 'inst' so that the deferred method // above can perform its check. inst, err = env.getInstance(serviceName) if err != nil { return nil, err } return inst, nil }
// internalStartInstance does the provider-specific work of starting an // instance. The code in StartInstance is actually largely agnostic across // the EC2/OpenStack/MAAS/Azure providers. // The instance will be set up for the same series for which you pass tools. // All tools in possibleTools must be for the same series. // machineConfig will be filled out with further details, but should contain // MachineID, MachineNonce, StateInfo, and APIInfo. // TODO(bug 1199847): Some of this work can be shared between providers. func (env *azureEnviron) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (_ instance.Instance, err error) { // Declaring "err" in the function signature so that we can "defer" // any cleanup that needs to run during error returns. series := possibleTools.Series() if len(series) != 1 { panic(fmt.Errorf("should have gotten tools for one series, got %v", series)) } err = environs.FinishMachineConfig(machineConfig, env.Config(), cons) if err != nil { return nil, err } // Pick tools. Needed for the custom data (which is what we normally // call userdata). machineConfig.Tools = possibleTools[0] logger.Infof("picked tools %q", machineConfig.Tools) // Compose userdata. userData, err := makeCustomData(machineConfig) if err != nil { return nil, fmt.Errorf("custom data: %v", err) } azure, err := env.getManagementAPI() if err != nil { return nil, err } defer env.releaseManagementAPI(azure) snap := env.getSnapshot() location := snap.ecfg.location() service, err := newHostedService(azure.ManagementAPI, env.getEnvPrefix(), env.getAffinityGroupName(), location) if err != nil { return nil, err } serviceName := service.ServiceName // If we fail after this point, clean up the hosted service. defer func() { if err != nil { azure.DestroyHostedService( &gwacl.DestroyHostedServiceRequest{ ServiceName: serviceName, }) } }() instanceType, sourceImageName, err := env.selectInstanceTypeAndImage(cons, series[0], location) if err != nil { return nil, err } // virtualNetworkName is the virtual network to which all the // deployments in this environment belong. virtualNetworkName := env.getVirtualNetworkName() // 1. Create an OS Disk. vhd := env.newOSDisk(sourceImageName) // 2. Create a Role for a Linux machine. role := env.newRole(instanceType, vhd, userData, roleHostname) // 3. Create the Deployment object. deployment := env.newDeployment(role, serviceName, serviceName, virtualNetworkName) err = azure.AddDeployment(deployment, serviceName) if err != nil { return nil, err } var inst instance.Instance // From here on, remember to shut down the instance before returning // any error. defer func() { if err != nil && inst != nil { err2 := env.StopInstances([]instance.Instance{inst}) if err2 != nil { // Failure upon failure. Log it, but return // the original error. logger.Errorf("error releasing failed instance: %v", err) } } }() // Assign the returned instance to 'inst' so that the deferred method // above can perform its check. inst, err = env.getInstance(serviceName) if err != nil { return nil, err } return inst, nil }