// getMetadata builds the raw "user-defined" metadata for the new // instance (relative to the provided args) and returns it. func getMetadata(args environs.StartInstanceParams) (map[string]string, error) { userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("GCE user data; %d bytes", len(userData)) authKeys, err := google.FormatAuthorizedKeys(args.InstanceConfig.AuthorizedKeys, "ubuntu") if err != nil { return nil, errors.Trace(err) } b64UserData := base64.StdEncoding.EncodeToString([]byte(userData)) metadata := map[string]string{ metadataKeyIsState: metadataValueFalse, // We store a gz snapshop of information that is used by // cloud-init and unpacked in to the /var/lib/cloud/instances folder // for the instance. Due to a limitation with GCE and binary blobs // we base64 encode the data before storing it. metadataKeyCloudInit: b64UserData, // Valid encoding values are determined by the cloudinit GCE data source. // See: http://cloudinit.readthedocs.org metadataKeyEncoding: "base64", metadataKeySSHKeys: authKeys, } if isStateServer(args.InstanceConfig) { metadata[metadataKeyIsState] = metadataValueTrue } return metadata, nil }
func (s *BaseSuiteUnpatched) initInst(c *gc.C) { tools := []*tools.Tools{{ Version: version.Binary{Arch: arch.AMD64, Series: "trusty"}, URL: "https://example.org", }} cons := constraints.Value{InstanceType: &allInstanceTypes[0].Name} instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(cons, cons, "trusty", "") c.Assert(err, jc.ErrorIsNil) instanceConfig.Tools = tools[0] instanceConfig.AuthorizedKeys = s.Config.AuthorizedKeys() userData, err := providerinit.ComposeUserData(instanceConfig, nil, GCERenderer{}) c.Assert(err, jc.ErrorIsNil) authKeys, err := google.FormatAuthorizedKeys(instanceConfig.AuthorizedKeys, "ubuntu") c.Assert(err, jc.ErrorIsNil) s.UbuntuMetadata = map[string]string{ metadataKeyIsState: metadataValueTrue, metadataKeyCloudInit: string(userData), metadataKeyEncoding: "base64", metadataKeySSHKeys: authKeys, } s.WindowsMetadata = map[string]string{ metadataKeyWindowsUserdata: string(userData), metadataKeyWindowsSysprep: fmt.Sprintf(winSetHostnameScript, "juju.*"), } s.Addresses = []network.Address{{ Value: "10.0.0.1", Type: network.IPv4Address, Scope: network.ScopeCloudLocal, }} s.Instance = s.NewInstance(c, "spam") s.BaseInstance = s.Instance.base s.InstName = s.Prefix + "machine-spam" s.StartInstArgs = environs.StartInstanceParams{ InstanceConfig: instanceConfig, Tools: tools, Constraints: cons, //Placement: "", //DistributionGroup: nil, } s.InstanceType = allInstanceTypes[0] // Storage eUUID := s.Env.Config().UUID() s.BaseDisk = &google.Disk{ Id: 1234567, Name: "home-zone--c930380d-8337-4bf5-b07a-9dbb5ae771e4", Zone: "home-zone", Status: google.StatusReady, Size: 1024, Description: eUUID, } }
// getMetadata builds the raw "user-defined" metadata for the new // instance (relative to the provided args) and returns it. func getMetadata(args environs.StartInstanceParams) (map[string]string, error) { renderer := lxdRenderer{} uncompressed, err := providerinit.ComposeUserData(args.InstanceConfig, nil, renderer) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("LXD user data; %d bytes", len(uncompressed)) // TODO(ericsnow) Looks like LXD does not handle gzipped userdata // correctly. It likely has to do with the HTTP transport, much // as we have to b64encode the userdata for GCE. Until that is // resolved we simply pass the plain text. //compressed := utils.Gzip(compressed) userdata := string(uncompressed) metadata := map[string]string{ metadataKeyIsState: metadataValueFalse, // We store a gz snapshop of information that is used by // cloud-init and unpacked in to the /var/lib/cloud/instances folder // for the instance. metadataKeyCloudInit: userdata, } if isStateServer(args.InstanceConfig) { metadata[metadataKeyIsState] = metadataValueTrue } return metadata, nil }
func (s *CloudInitSuite) TestWindowsUserdataEncoding(c *gc.C) { series := "win8" metricsSpoolDir := must(paths.MetricsSpoolDir("win8")) toolsList := tools.List{ &tools.Tools{ URL: "http://foo.com/tools/released/juju1.2.3-win8-amd64.tgz", Version: version.MustParseBinary("1.2.3-win8-amd64"), Size: 10, SHA256: "1234", }, } dataDir, err := paths.DataDir(series) c.Assert(err, jc.ErrorIsNil) logDir, err := paths.LogDir(series) c.Assert(err, jc.ErrorIsNil) cfg := instancecfg.InstanceConfig{ ControllerTag: testing.ControllerTag, MachineId: "10", AgentEnvironment: map[string]string{agent.ProviderType: "dummy"}, Series: series, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, MachineNonce: "FAKE_NONCE", APIInfo: &api.Info{ Addrs: []string{"state-addr.testing.invalid:54321"}, Password: "******", CACert: "CA CERT\n" + testing.CACert, Tag: names.NewMachineTag("10"), ModelTag: testing.ModelTag, }, MachineAgentServiceName: "jujud-machine-10", DataDir: dataDir, LogDir: path.Join(logDir, "juju"), MetricsSpoolDir: metricsSpoolDir, CloudInitOutputLog: path.Join(logDir, "cloud-init-output.log"), } err = cfg.SetTools(toolsList) c.Assert(err, jc.ErrorIsNil) ci, err := cloudinit.New("win8") c.Assert(err, jc.ErrorIsNil) udata, err := cloudconfig.NewUserdataConfig(&cfg, ci) c.Assert(err, jc.ErrorIsNil) err = udata.Configure() c.Assert(err, jc.ErrorIsNil) data, err := ci.RenderYAML() c.Assert(err, jc.ErrorIsNil) cicompose, err := cloudinit.New("win8") c.Assert(err, jc.ErrorIsNil) base64Data := base64.StdEncoding.EncodeToString(utils.Gzip(data)) got := []byte(fmt.Sprintf(cloudconfig.UserDataScript, base64Data)) expected, err := providerinit.ComposeUserData(&cfg, cicompose, openstack.OpenstackRenderer{}) c.Assert(err, jc.ErrorIsNil) c.Assert(string(got), gc.Equals, string(expected)) }
// StartInstance asks for a new instance to be created, associated with // the provided config in machineConfig. The given config describes the juju // state for the new instance to connect to. The config MachineNonce, which must be // unique within an environment, is used by juju to protect against the // consequences of multiple instances being started with the same machine id. func (env *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { logger.Infof("sigmaEnviron.StartInstance...") if args.InstanceConfig == nil { return nil, errors.New("instance configuration is nil") } if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting instances with networks is not supported yet") } if len(args.Tools) == 0 { return nil, errors.New("tools not found") } img, err := findInstanceImage(args.ImageMetadata) if err != nil { return nil, err } tools, err := args.Tools.Match(tools.Filter{Arch: img.Arch}) if err != nil { return nil, errors.Errorf("chosen architecture %v not present in %v", img.Arch, args.Tools.Arches()) } args.InstanceConfig.Tools = tools[0] if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil { return nil, err } userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, CloudSigmaRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("cloudsigma user data; %d bytes", len(userData)) client := env.client server, rootdrive, arch, err := client.newInstance(args, img, userData) if err != nil { return nil, errors.Errorf("failed start instance: %v", err) } inst := &sigmaInstance{server: server} // prepare hardware characteristics hwch, err := inst.hardware(arch, rootdrive.Size()) if err != nil { return nil, err } logger.Debugf("hardware: %v", hwch) return &environs.StartInstanceResult{ Instance: inst, Hardware: hwch, }, nil }
// makeCustomData produces custom data for Azure. This is a base64-encoded // zipfile of cloudinit userdata. func makeCustomData(cfg *instancecfg.InstanceConfig) (string, error) { zipData, err := providerinit.ComposeUserData(cfg, nil) if err != nil { return "", fmt.Errorf("failure while generating custom data: %v", err) } logger.Debugf("user data; %d bytes", len(zipData)) encodedData := base64.StdEncoding.EncodeToString(zipData) logger.Debugf("base64-encoded custom data: %d bytes", len(encodedData)) return encodedData, nil }
func (*customDataSuite) TestMakeCustomDataEncodesUserData(c *gc.C) { cfg := makeInstanceConfig(c) encodedData, err := makeCustomData(cfg) c.Assert(err, jc.ErrorIsNil) data, err := base64.StdEncoding.DecodeString(encodedData) c.Assert(err, jc.ErrorIsNil) reference, err := providerinit.ComposeUserData(cfg, nil) c.Assert(err, jc.ErrorIsNil) c.Check(data, gc.DeepEquals, reference) }
func (s *BaseSuiteUnpatched) initInst(c *gc.C) { tools := []*tools.Tools{{ Version: version.Binary{Arch: arch.AMD64, Series: "trusty"}, URL: "https://example.org", }} cons := constraints.Value{ // nothing } instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(cons, cons, "trusty", "") c.Assert(err, jc.ErrorIsNil) instanceConfig.Tools = tools[0] instanceConfig.AuthorizedKeys = s.Config.AuthorizedKeys() userData, err := providerinit.ComposeUserData(instanceConfig, nil, lxdRenderer{}) c.Assert(err, jc.ErrorIsNil) s.Hardware = &lxdclient.InstanceHardware{ Architecture: arch.AMD64, NumCores: 1, MemoryMB: 3750, } var archName string = arch.AMD64 var numCores uint64 = 1 var memoryMB uint64 = 3750 s.HWC = &instance.HardwareCharacteristics{ Arch: &archName, CpuCores: &numCores, Mem: &memoryMB, } s.Metadata = map[string]string{ // userdata metadataKeyIsState: metadataValueTrue, // bootstrap metadataKeyCloudInit: string(userData), } s.Addresses = []network.Address{{ Value: "10.0.0.1", Type: network.IPv4Address, Scope: network.ScopeCloudLocal, }} s.Instance = s.NewInstance(c, "spam") s.RawInstance = s.Instance.raw s.InstName = s.Prefix + "machine-spam" s.StartInstArgs = environs.StartInstanceParams{ InstanceConfig: instanceConfig, Tools: tools, Constraints: cons, } }
func newOSProfile( vmName string, instanceConfig *instancecfg.InstanceConfig, randomAdminPassword func() string, ) (*compute.OSProfile, os.OSType, error) { logger.Debugf("creating OS profile for %q", vmName) customData, err := providerinit.ComposeUserData(instanceConfig, nil, AzureRenderer{}) if err != nil { return nil, os.Unknown, errors.Annotate(err, "composing user data") } osProfile := &compute.OSProfile{ ComputerName: to.StringPtr(vmName), CustomData: to.StringPtr(string(customData)), } seriesOS, err := jujuseries.GetOSFromSeries(instanceConfig.Series) if err != nil { return nil, os.Unknown, errors.Trace(err) } switch seriesOS { case os.Ubuntu, os.CentOS: // SSH keys are handled by custom data, but must also be // specified in order to forego providing a password, and // disable password authentication. publicKeys := []compute.SSHPublicKey{{ Path: to.StringPtr("/home/ubuntu/.ssh/authorized_keys"), KeyData: to.StringPtr(instanceConfig.AuthorizedKeys), }} osProfile.AdminUsername = to.StringPtr("ubuntu") osProfile.LinuxConfiguration = &compute.LinuxConfiguration{ DisablePasswordAuthentication: to.BoolPtr(true), SSH: &compute.SSHConfiguration{PublicKeys: &publicKeys}, } case os.Windows: osProfile.AdminUsername = to.StringPtr("JujuAdministrator") // A password is required by Azure, but we will never use it. // We generate something sufficiently long and random that it // should be infeasible to guess. osProfile.AdminPassword = to.StringPtr(randomAdminPassword()) osProfile.WindowsConfiguration = &compute.WindowsConfiguration{ ProvisionVMAgent: to.BoolPtr(true), EnableAutomaticUpdates: to.BoolPtr(true), // TODO(?) add WinRM configuration here. } default: return nil, os.Unknown, errors.NotSupportedf("%s", seriesOS) } return osProfile, seriesOS, nil }
func (s *BaseSuiteUnpatched) initInst(c *gc.C) { tools := []*tools.Tools{{ Version: version.Binary{Arch: arch.AMD64, Series: "trusty"}, URL: "https://example.org", }} cons := constraints.Value{InstanceType: &allInstanceTypes[0].Name} instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(cons, "trusty") c.Assert(err, jc.ErrorIsNil) instanceConfig.Tools = tools[0] instanceConfig.AuthorizedKeys = s.Config.AuthorizedKeys() userData, err := providerinit.ComposeUserData(instanceConfig, nil) c.Assert(err, jc.ErrorIsNil) b64UserData := base64.StdEncoding.EncodeToString([]byte(userData)) authKeys, err := google.FormatAuthorizedKeys(instanceConfig.AuthorizedKeys, "ubuntu") c.Assert(err, jc.ErrorIsNil) s.Metadata = map[string]string{ metadataKeyIsState: metadataValueTrue, metadataKeyCloudInit: b64UserData, metadataKeyEncoding: "base64", metadataKeySSHKeys: authKeys, } s.Addresses = []network.Address{{ Value: "10.0.0.1", Type: network.IPv4Address, Scope: network.ScopeCloudLocal, }} s.Instance = s.NewInstance(c, "spam") s.BaseInstance = s.Instance.base s.InstName = s.Prefix + "machine-spam" s.StartInstArgs = environs.StartInstanceParams{ InstanceConfig: instanceConfig, Tools: tools, Constraints: cons, //Placement: "", //DistributionGroup: nil, } s.InstanceType = allInstanceTypes[0] }
// getMetadata builds the raw "user-defined" metadata for the new // instance (relative to the provided args) and returns it. func getMetadata(args environs.StartInstanceParams, os jujuos.OSType) (map[string]string, error) { userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, GCERenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("GCE user data; %d bytes", len(userData)) metadata := make(map[string]string) if isController(args.InstanceConfig) { metadata[metadataKeyIsState] = metadataValueTrue } else { metadata[metadataKeyIsState] = metadataValueFalse } switch os { case jujuos.Ubuntu: // We store a gz snapshop of information that is used by // cloud-init and unpacked in to the /var/lib/cloud/instances folder // for the instance. Due to a limitation with GCE and binary blobs // we base64 encode the data before storing it. metadata[metadataKeyCloudInit] = string(userData) // Valid encoding values are determined by the cloudinit GCE data source. // See: http://cloudinit.readthedocs.org metadata[metadataKeyEncoding] = "base64" authKeys, err := google.FormatAuthorizedKeys(args.InstanceConfig.AuthorizedKeys, "ubuntu") if err != nil { return nil, errors.Trace(err) } metadata[metadataKeySSHKeys] = authKeys case jujuos.Windows: metadata[metadataKeyWindowsUserdata] = string(userData) validChars := append(utils.UpperAlpha, append(utils.LowerAlpha, utils.Digits...)...) // The hostname must have maximum 15 characters winHostname := "juju" + utils.RandomString(11, validChars) metadata[metadataKeyWindowsSysprep] = fmt.Sprintf(winSetHostnameScript, winHostname) default: return nil, errors.Errorf("cannot pack metadata for os %s on the gce provider", os.String()) } return metadata, nil }
// getMetadata builds the raw "user-defined" metadata for the new // instance (relative to the provided args) and returns it. func getMetadata(cloudcfg cloudinit.CloudConfig, args environs.StartInstanceParams) (map[string]string, error) { renderer := lxdRenderer{} uncompressed, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, renderer) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("LXD user data; %d bytes", len(uncompressed)) // TODO(ericsnow) Looks like LXD does not handle gzipped userdata // correctly. It likely has to do with the HTTP transport, much // as we have to b64encode the userdata for GCE. Until that is // resolved we simply pass the plain text. //compressed := utils.Gzip(compressed) userdata := string(uncompressed) metadata := map[string]string{ // store the cloud-config userdata for cloud-init. metadataKeyCloudInit: userdata, } for k, v := range args.InstanceConfig.Tags { if !strings.HasPrefix(k, tags.JujuTagPrefix) { // Since some metadata is interpreted by LXD, // we cannot allow arbitrary tags to be passed // in by the user. We currently only pass through // Juju-defined tags. // // TODO(axw) 2016-04-11 #1568666 // We should reject non-juju tags in config validation. logger.Debugf("ignoring non-juju tag: %s=%s", k, v) continue } metadata[k] = v } return metadata, nil }
// StartInstance is specified in the InstanceBroker interface. func (e *Environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { var availabilityZones []string if args.Placement != "" { placement, err := e.parsePlacement(args.Placement) if err != nil { return nil, err } if !placement.availabilityZone.State.Available { return nil, errors.Errorf("availability zone %q is unavailable", placement.availabilityZone.Name) } availabilityZones = append(availabilityZones, placement.availabilityZone.Name) } // If no availability zone is specified, then automatically spread across // the known zones for optimal spread across the instance distribution // group. if len(availabilityZones) == 0 { var group []instance.Id var err error if args.DistributionGroup != nil { group, err = args.DistributionGroup() if err != nil { return nil, err } } zoneInstances, err := availabilityZoneAllocations(e, group) if errors.IsNotImplemented(err) { // Availability zones are an extension, so we may get a // not implemented error; ignore these. } else if err != nil { return nil, err } else { for _, zone := range zoneInstances { availabilityZones = append(availabilityZones, zone.ZoneName) } } if len(availabilityZones) == 0 { // No explicitly selectable zones available, so use an unspecified zone. availabilityZones = []string{""} } } series := args.Tools.OneSeries() arches := args.Tools.Arches() spec, err := findInstanceSpec(e, &instances.InstanceConstraint{ Region: e.ecfg().region(), Series: series, Arches: arches, Constraints: args.Constraints, }, args.ImageMetadata) if err != nil { return nil, err } tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch}) if err != nil { return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches) } if err := args.InstanceConfig.SetTools(tools); err != nil { return nil, errors.Trace(err) } if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, e.Config()); err != nil { return nil, err } cloudcfg, err := e.configurator.GetCloudConfig(args) if err != nil { return nil, errors.Trace(err) } userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, OpenstackRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("openstack user data; %d bytes", len(userData)) var networks = e.firewaller.InitialNetworks() usingNetwork := e.ecfg().network() if usingNetwork != "" { networkId, err := e.resolveNetwork(usingNetwork) if err != nil { return nil, err } logger.Debugf("using network id %q", networkId) networks = append(networks, nova.ServerNetworks{NetworkId: networkId}) } withPublicIP := e.ecfg().useFloatingIP() var publicIP *nova.FloatingIP if withPublicIP { logger.Debugf("allocating public IP address for openstack node") if fip, err := e.allocatePublicIP(); err != nil { return nil, errors.Annotate(err, "cannot allocate a public IP as needed") } else { publicIP = fip logger.Infof("allocated public IP %s", publicIP.IP) } } cfg := e.Config() var groupNames = make([]nova.SecurityGroupName, 0) groups, err := e.firewaller.SetUpGroups(args.InstanceConfig.MachineId, cfg.APIPort()) if err != nil { return nil, errors.Annotate(err, "cannot set up groups") } for _, g := range groups { groupNames = append(groupNames, nova.SecurityGroupName{g.Name}) } machineName := resourceName( names.NewMachineTag(args.InstanceConfig.MachineId), e.Config().UUID(), ) tryStartNovaInstance := func( attempts utils.AttemptStrategy, client *nova.Client, instanceOpts nova.RunServerOpts, ) (server *nova.Entity, err error) { for a := attempts.Start(); a.Next(); { server, err = client.RunServer(instanceOpts) if err == nil || gooseerrors.IsNotFound(err) == false { break } } return server, err } tryStartNovaInstanceAcrossAvailZones := func( attempts utils.AttemptStrategy, client *nova.Client, instanceOpts nova.RunServerOpts, availabilityZones []string, ) (server *nova.Entity, err error) { for _, zone := range availabilityZones { instanceOpts.AvailabilityZone = zone e.configurator.ModifyRunServerOptions(&instanceOpts) server, err = tryStartNovaInstance(attempts, client, instanceOpts) if err == nil || isNoValidHostsError(err) == false { break } logger.Infof("no valid hosts available in zone %q, trying another availability zone", zone) } if err != nil { err = errors.Annotate(err, "cannot run instance") } return server, err } var opts = nova.RunServerOpts{ Name: machineName, FlavorId: spec.InstanceType.Id, ImageId: spec.Image.Id, UserData: userData, SecurityGroupNames: groupNames, Networks: networks, Metadata: args.InstanceConfig.Tags, } server, err := tryStartNovaInstanceAcrossAvailZones(shortAttempt, e.nova(), opts, availabilityZones) if err != nil { return nil, errors.Trace(err) } detail, err := e.nova().GetServer(server.Id) if err != nil { return nil, errors.Annotate(err, "cannot get started instance") } inst := &openstackInstance{ e: e, serverDetail: detail, arch: &spec.Image.Arch, instType: &spec.InstanceType, } logger.Infof("started instance %q", inst.Id()) if withPublicIP { if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil { if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil { // ignore the failure at this stage, just log it logger.Debugf("failed to terminate instance %q: %v", inst.Id(), err) } return nil, errors.Annotatef(err, "cannot assign public address %s to instance %q", publicIP.IP, inst.Id()) } inst.floatingIP = publicIP logger.Infof("assigned public IP %s to %q", publicIP.IP, inst.Id()) } return &environs.StartInstanceResult{ Instance: inst, Hardware: inst.hardwareCharacteristics(), }, nil }
// StartInstance is specified in the InstanceBroker interface. func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting instances with networks is not supported yet") } err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()) if err != nil { return nil, err } // Pick envtools. Needed for the custom data (which is what we normally // call userdata). args.InstanceConfig.Tools = args.Tools[0] logger.Infof("picked tools %q", args.InstanceConfig.Tools) // Compose userdata. userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, AzureRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot compose user data") } snapshot := env.getSnapshot() location := snapshot.ecfg.location() instanceType, sourceImageName, err := env.selectInstanceTypeAndImage(&instances.InstanceConstraint{ Region: location, Series: args.Tools.OneSeries(), Arches: args.Tools.Arches(), Constraints: args.Constraints, }) if err != nil { return nil, err } // We use the cloud service label as a way to group instances with // the same affinity, so that machines can be be allocated to the // same availability set. var cloudServiceName string if args.DistributionGroup != nil && snapshot.ecfg.availabilitySetsEnabled() { instanceIds, err := args.DistributionGroup() if err != nil { return nil, err } for _, id := range instanceIds { cloudServiceName, _ = env.splitInstanceId(id) if cloudServiceName != "" { break } } } vhd, err := env.newOSDisk(sourceImageName, args.InstanceConfig.Series) if err != nil { return nil, errors.Trace(err) } // If we're creating machine-0, we'll want to expose port 22. // All other machines get an auto-generated public port for SSH. stateServer := multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) role, err := env.newRole(instanceType.Id, vhd, stateServer, string(userData), args.InstanceConfig.Series, snapshot) if err != nil { return nil, errors.Trace(err) } inst, err := createInstance(env, snapshot.api, role, cloudServiceName, stateServer) if err != nil { return nil, errors.Trace(err) } hc := &instance.HardwareCharacteristics{ Mem: &instanceType.Mem, RootDisk: &instanceType.RootDisk, CpuCores: &instanceType.CpuCores, } if len(instanceType.Arches) == 1 { hc.Arch = &instanceType.Arches[0] } return &environs.StartInstanceResult{ Instance: inst, Hardware: hc, }, nil }
func (env *joyentEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting instances with networks is not supported yet") } series := args.Tools.OneSeries() arches := args.Tools.Arches() spec, err := env.FindInstanceSpec(&instances.InstanceConstraint{ Region: env.Ecfg().Region(), Series: series, Arches: arches, Constraints: args.Constraints, }, args.ImageMetadata) if err != nil { return nil, err } tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch}) if err != nil { return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches) } args.InstanceConfig.Tools = tools[0] if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil { return nil, err } // This is a hack that ensures that instances can communicate over // the internal network. Joyent sometimes gives instances // different 10.x.x.x/21 networks and adding this route allows // them to talk despite this. See: // https://bugs.launchpad.net/juju-core/+bug/1401130 cloudcfg, err := cloudinit.New(args.InstanceConfig.Series) if err != nil { return nil, errors.Annotate(err, "cannot create cloudinit template") } ifupScript := ` #!/bin/bash # These guards help to ensure that this hack only runs if Joyent's # internal network still works as it does at time of writing. [ "$IFACE" == "eth1" ] || [ "$IFACE" == "--all" ] || exit 0 /sbin/ip -4 --oneline addr show dev eth1 | fgrep --quiet " inet 10." || exit 0 /sbin/ip route add 10.0.0.0/8 dev eth1 `[1:] cloudcfg.AddBootTextFile("/etc/network/if-up.d/joyent", ifupScript, 0755) userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, JoyentRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("joyent user data: %d bytes", len(userData)) var machine *cloudapi.Machine machine, err = env.compute.cloudapi.CreateMachine(cloudapi.CreateMachineOpts{ //Name: env.machineFullName(machineConf.MachineId), Package: spec.InstanceType.Name, Image: spec.Image.Id, Metadata: map[string]string{"metadata.cloud-init:user-data": string(userData)}, Tags: map[string]string{"tag.group": "juju", "tag.env": env.Config().Name()}, }) if err != nil { return nil, errors.Annotate(err, "cannot create instances") } machineId := machine.Id logger.Infof("provisioning instance %q", machineId) machine, err = env.compute.cloudapi.GetMachine(machineId) if err != nil { return nil, errors.Annotate(err, "cannot start instances") } // wait for machine to start for !strings.EqualFold(machine.State, "running") { time.Sleep(1 * time.Second) machine, err = env.compute.cloudapi.GetMachine(machineId) if err != nil { return nil, errors.Annotate(err, "cannot start instances") } } logger.Infof("started instance %q", machineId) inst := &joyentInstance{ machine: machine, env: env, } if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) { if err := common.AddStateInstance(env.Storage(), inst.Id()); err != nil { logger.Errorf("could not record instance in provider-state: %v", err) } } disk64 := uint64(machine.Disk) hc := instance.HardwareCharacteristics{ Arch: &spec.Image.Arch, Mem: &spec.InstanceType.Mem, CpuCores: &spec.InstanceType.CpuCores, CpuPower: spec.InstanceType.CpuPower, RootDisk: &disk64, } return &environs.StartInstanceResult{ Instance: inst, Hardware: &hc, }, nil }
func (*CloudInitSuite) testUserData(c *gc.C, series string, bootstrap bool) { testJujuHome := c.MkDir() defer osenv.SetJujuHome(osenv.SetJujuHome(testJujuHome)) // Use actual series paths instead of local defaults logDir := must(paths.LogDir(series)) metricsSpoolDir := must(paths.MetricsSpoolDir(series)) dataDir := must(paths.DataDir(series)) tools := &tools.Tools{ URL: "http://tools.testing/tools/released/juju.tgz", Version: version.Binary{version.MustParse("1.2.3"), "quantal", "amd64"}, } envConfig, err := config.New(config.NoDefaults, dummySampleConfig()) c.Assert(err, jc.ErrorIsNil) allJobs := []multiwatcher.MachineJob{ multiwatcher.JobManageModel, multiwatcher.JobHostUnits, multiwatcher.JobManageNetworking, } cfg := &instancecfg.InstanceConfig{ MachineId: "10", MachineNonce: "5432", Tools: tools, Series: series, MongoInfo: &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{"127.0.0.1:1234"}, CACert: "CA CERT\n" + testing.CACert, }, Password: "******", Tag: names.NewMachineTag("10"), }, APIInfo: &api.Info{ Addrs: []string{"127.0.0.1:1234"}, Password: "******", CACert: "CA CERT\n" + testing.CACert, Tag: names.NewMachineTag("10"), ModelTag: testing.ModelTag, }, DataDir: dataDir, LogDir: path.Join(logDir, "juju"), MetricsSpoolDir: metricsSpoolDir, Jobs: allJobs, CloudInitOutputLog: path.Join(logDir, "cloud-init-output.log"), Config: envConfig, AgentEnvironment: map[string]string{agent.ProviderType: "dummy"}, AuthorizedKeys: "wheredidileavemykeys", MachineAgentServiceName: "jujud-machine-10", EnableOSUpgrade: true, } if bootstrap { cfg.Bootstrap = true cfg.StateServingInfo = ¶ms.StateServingInfo{ StatePort: envConfig.StatePort(), APIPort: envConfig.APIPort(), Cert: testing.ServerCert, PrivateKey: testing.ServerKey, CAPrivateKey: testing.CAKey, } } script1 := "script1" script2 := "script2" cloudcfg, err := cloudinit.New(series) c.Assert(err, jc.ErrorIsNil) cloudcfg.AddRunCmd(script1) cloudcfg.AddRunCmd(script2) result, err := providerinit.ComposeUserData(cfg, cloudcfg, &openstack.OpenstackRenderer{}) c.Assert(err, jc.ErrorIsNil) unzipped, err := utils.Gunzip(result) c.Assert(err, jc.ErrorIsNil) config := make(map[interface{}]interface{}) err = goyaml.Unmarshal(unzipped, &config) c.Assert(err, jc.ErrorIsNil) // The scripts given to userData where added as the first // commands to be run. runCmd := config["runcmd"].([]interface{}) c.Check(runCmd[0], gc.Equals, script1) c.Check(runCmd[1], gc.Equals, script2) if bootstrap { // The cloudinit config should have nothing but the basics: // SSH authorized keys, the additional runcmds, and log output. // // Note: the additional runcmds *do* belong here, at least // for MAAS. MAAS needs to configure and then bounce the // network interfaces, which would sever the SSH connection // in the synchronous bootstrap phase. expected := map[interface{}]interface{}{ "output": map[interface{}]interface{}{ "all": "| tee -a /var/log/cloud-init-output.log", }, "runcmd": []interface{}{ "script1", "script2", "set -xe", "install -D -m 644 /dev/null '/etc/init/juju-clean-shutdown.conf'", "printf '%s\\n' '\nauthor \"Juju Team <*****@*****.**>\"\ndescription \"Stop all network interfaces on shutdown\"\nstart on runlevel [016]\ntask\nconsole output\n\nexec /sbin/ifdown -a -v --force\n' > '/etc/init/juju-clean-shutdown.conf'", "install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'", "printf '%s\\n' '5432' > '/var/lib/juju/nonce.txt'", }, } // Series with old cloudinit versions don't support adding // users so need the old way to set SSH authorized keys. if series == "precise" { expected["ssh_authorized_keys"] = []interface{}{ "wheredidileavemykeys", } } else { expected["users"] = []interface{}{ map[interface{}]interface{}{ "name": "ubuntu", "lock_passwd": true, "groups": []interface{}{"adm", "audio", "cdrom", "dialout", "dip", "floppy", "netdev", "plugdev", "sudo", "video"}, "shell": "/bin/bash", "sudo": []interface{}{"ALL=(ALL) NOPASSWD:ALL"}, "ssh-authorized-keys": []interface{}{"wheredidileavemykeys"}, }, } } c.Check(config, jc.DeepEquals, expected) } else { // Just check that the cloudinit config looks good, // and that there are more runcmds than the additional // ones we passed into ComposeUserData. c.Check(config["package_upgrade"], jc.IsTrue) c.Check(len(runCmd) > 2, jc.IsTrue) } }
// newRawInstance is where the new physical instance is actually // provisioned, relative to the provided args and spec. Info for that // low-level instance is returned. func (env *environ) newRawInstance(args environs.StartInstanceParams, img *OvaFileMetadata) (*mo.VirtualMachine, *instance.HardwareCharacteristics, error) { machineID := common.MachineFullName(env, args.InstanceConfig.MachineId) cloudcfg, err := cloudinit.New(args.Tools.OneSeries()) if err != nil { return nil, nil, errors.Trace(err) } cloudcfg.AddPackage("open-vm-tools") cloudcfg.AddPackage("iptables-persistent") userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, VsphereRenderer{}) if err != nil { return nil, nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("Vmware user data; %d bytes", len(userData)) rootDisk := common.MinRootDiskSizeGiB * 1024 if args.Constraints.RootDisk != nil && *args.Constraints.RootDisk > rootDisk { rootDisk = *args.Constraints.RootDisk } cpuCores := DefaultCpuCores if args.Constraints.CpuCores != nil { cpuCores = *args.Constraints.CpuCores } cpuPower := DefaultCpuPower if args.Constraints.CpuPower != nil { cpuPower = *args.Constraints.CpuPower } mem := DefaultMemMb if args.Constraints.Mem != nil { mem = *args.Constraints.Mem } hwc := &instance.HardwareCharacteristics{ Arch: &img.Arch, Mem: &mem, CpuCores: &cpuCores, CpuPower: &cpuPower, RootDisk: &rootDisk, } zones, err := env.parseAvailabilityZones(args) if err != nil { return nil, nil, errors.Trace(err) } var inst *mo.VirtualMachine for _, zone := range zones { var availZone *vmwareAvailZone availZone, err = env.availZone(zone) if err != nil { logger.Warningf("Error while getting availability zone %s: %s", zone, err) continue } apiPort := 0 if isStateServer(args.InstanceConfig) { apiPort = args.InstanceConfig.StateServingInfo.APIPort } spec := &instanceSpec{ machineID: machineID, zone: availZone, hwc: hwc, img: img, userData: userData, sshKey: args.InstanceConfig.AuthorizedKeys, isState: isStateServer(args.InstanceConfig), apiPort: apiPort, } inst, err = env.client.CreateInstance(env.ecfg, spec) if err != nil { logger.Warningf("Error while trying to create instance in %s availability zone: %s", zone, err) continue } break } if err != nil { return nil, nil, errors.Annotate(err, "Can't create instance in any of availability zones, last error") } return inst, hwc, err }