// parseAvailabilityZones returns the availability zones that should be // tried for the given instance spec. If a placement argument was // provided then only that one is returned. Otherwise the environment is // queried for available zones. In that case, the resulting list is // roughly ordered such that the environment's instances are spread // evenly across the region. func (env *environ) parseAvailabilityZones(args environs.StartInstanceParams) ([]string, error) { if args.Placement != "" { // args.Placement will always be a zone name or empty. placement, err := env.parsePlacement(args.Placement) if err != nil { return nil, errors.Trace(err) } // TODO(ericsnow) Fail if placement.Zone is not in the env's configured region? return []string{placement.Zone.Name()}, nil } // If no availability zone is specified, then automatically spread across // the known zones for optimal spread across the instance distribution // group. var group []instance.Id var err error if args.DistributionGroup != nil { group, err = args.DistributionGroup() if err != nil { return nil, errors.Trace(err) } } zoneInstances, err := availabilityZoneAllocations(env, group) if err != nil { return nil, errors.Trace(err) } logger.Infof("found %d zones: %v", len(zoneInstances), zoneInstances) var zoneNames []string for _, z := range zoneInstances { zoneNames = append(zoneNames, z.ZoneName) } if len(zoneNames) == 0 { return nil, errors.NotFoundf("failed to determine availability zones") } return zoneNames, nil }
// StartInstance is specified in the InstanceBroker interface. func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting instances with networks is not supported yet") } err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()) if err != nil { return nil, err } // Pick envtools. Needed for the custom data (which is what we normally // call userdata). args.InstanceConfig.Tools = args.Tools[0] logger.Infof("picked tools %q", args.InstanceConfig.Tools) // Compose userdata. userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, AzureRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot compose user data") } snapshot := env.getSnapshot() location := snapshot.ecfg.location() instanceType, sourceImageName, err := env.selectInstanceTypeAndImage(&instances.InstanceConstraint{ Region: location, Series: args.Tools.OneSeries(), Arches: args.Tools.Arches(), Constraints: args.Constraints, }) if err != nil { return nil, err } // We use the cloud service label as a way to group instances with // the same affinity, so that machines can be be allocated to the // same availability set. var cloudServiceName string if args.DistributionGroup != nil && snapshot.ecfg.availabilitySetsEnabled() { instanceIds, err := args.DistributionGroup() if err != nil { return nil, err } for _, id := range instanceIds { cloudServiceName, _ = env.splitInstanceId(id) if cloudServiceName != "" { break } } } vhd, err := env.newOSDisk(sourceImageName, args.InstanceConfig.Series) if err != nil { return nil, errors.Trace(err) } // If we're creating machine-0, we'll want to expose port 22. // All other machines get an auto-generated public port for SSH. stateServer := multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) role, err := env.newRole(instanceType.Id, vhd, stateServer, string(userData), args.InstanceConfig.Series, snapshot) if err != nil { return nil, errors.Trace(err) } inst, err := createInstance(env, snapshot.api, role, cloudServiceName, stateServer) if err != nil { return nil, errors.Trace(err) } hc := &instance.HardwareCharacteristics{ Mem: &instanceType.Mem, RootDisk: &instanceType.RootDisk, CpuCores: &instanceType.CpuCores, } if len(instanceType.Arches) == 1 { hc.Arch = &instanceType.Arches[0] } return &environs.StartInstanceResult{ Instance: inst, Hardware: hc, }, nil }
// StartInstance is specified in the InstanceBroker interface. func (e *Environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { var availabilityZones []string if args.Placement != "" { placement, err := e.parsePlacement(args.Placement) if err != nil { return nil, err } if !placement.availabilityZone.State.Available { return nil, errors.Errorf("availability zone %q is unavailable", placement.availabilityZone.Name) } availabilityZones = append(availabilityZones, placement.availabilityZone.Name) } // If no availability zone is specified, then automatically spread across // the known zones for optimal spread across the instance distribution // group. if len(availabilityZones) == 0 { var group []instance.Id var err error if args.DistributionGroup != nil { group, err = args.DistributionGroup() if err != nil { return nil, err } } zoneInstances, err := availabilityZoneAllocations(e, group) if errors.IsNotImplemented(err) { // Availability zones are an extension, so we may get a // not implemented error; ignore these. } else if err != nil { return nil, err } else { for _, zone := range zoneInstances { availabilityZones = append(availabilityZones, zone.ZoneName) } } if len(availabilityZones) == 0 { // No explicitly selectable zones available, so use an unspecified zone. availabilityZones = []string{""} } } series := args.Tools.OneSeries() arches := args.Tools.Arches() spec, err := findInstanceSpec(e, &instances.InstanceConstraint{ Region: e.ecfg().region(), Series: series, Arches: arches, Constraints: args.Constraints, }, args.ImageMetadata) if err != nil { return nil, err } tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch}) if err != nil { return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches) } if err := args.InstanceConfig.SetTools(tools); err != nil { return nil, errors.Trace(err) } if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, e.Config()); err != nil { return nil, err } cloudcfg, err := e.configurator.GetCloudConfig(args) if err != nil { return nil, errors.Trace(err) } userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, OpenstackRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("openstack user data; %d bytes", len(userData)) var networks = e.firewaller.InitialNetworks() usingNetwork := e.ecfg().network() if usingNetwork != "" { networkId, err := e.resolveNetwork(usingNetwork) if err != nil { return nil, err } logger.Debugf("using network id %q", networkId) networks = append(networks, nova.ServerNetworks{NetworkId: networkId}) } withPublicIP := e.ecfg().useFloatingIP() var publicIP *nova.FloatingIP if withPublicIP { logger.Debugf("allocating public IP address for openstack node") if fip, err := e.allocatePublicIP(); err != nil { return nil, errors.Annotate(err, "cannot allocate a public IP as needed") } else { publicIP = fip logger.Infof("allocated public IP %s", publicIP.IP) } } cfg := e.Config() var groupNames = make([]nova.SecurityGroupName, 0) groups, err := e.firewaller.SetUpGroups(args.InstanceConfig.MachineId, cfg.APIPort()) if err != nil { return nil, errors.Annotate(err, "cannot set up groups") } for _, g := range groups { groupNames = append(groupNames, nova.SecurityGroupName{g.Name}) } machineName := resourceName( names.NewMachineTag(args.InstanceConfig.MachineId), e.Config().UUID(), ) tryStartNovaInstance := func( attempts utils.AttemptStrategy, client *nova.Client, instanceOpts nova.RunServerOpts, ) (server *nova.Entity, err error) { for a := attempts.Start(); a.Next(); { server, err = client.RunServer(instanceOpts) if err == nil || gooseerrors.IsNotFound(err) == false { break } } return server, err } tryStartNovaInstanceAcrossAvailZones := func( attempts utils.AttemptStrategy, client *nova.Client, instanceOpts nova.RunServerOpts, availabilityZones []string, ) (server *nova.Entity, err error) { for _, zone := range availabilityZones { instanceOpts.AvailabilityZone = zone e.configurator.ModifyRunServerOptions(&instanceOpts) server, err = tryStartNovaInstance(attempts, client, instanceOpts) if err == nil || isNoValidHostsError(err) == false { break } logger.Infof("no valid hosts available in zone %q, trying another availability zone", zone) } if err != nil { err = errors.Annotate(err, "cannot run instance") } return server, err } var opts = nova.RunServerOpts{ Name: machineName, FlavorId: spec.InstanceType.Id, ImageId: spec.Image.Id, UserData: userData, SecurityGroupNames: groupNames, Networks: networks, Metadata: args.InstanceConfig.Tags, } server, err := tryStartNovaInstanceAcrossAvailZones(shortAttempt, e.nova(), opts, availabilityZones) if err != nil { return nil, errors.Trace(err) } detail, err := e.nova().GetServer(server.Id) if err != nil { return nil, errors.Annotate(err, "cannot get started instance") } inst := &openstackInstance{ e: e, serverDetail: detail, arch: &spec.Image.Arch, instType: &spec.InstanceType, } logger.Infof("started instance %q", inst.Id()) if withPublicIP { if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil { if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil { // ignore the failure at this stage, just log it logger.Debugf("failed to terminate instance %q: %v", inst.Id(), err) } return nil, errors.Annotatef(err, "cannot assign public address %s to instance %q", publicIP.IP, inst.Id()) } inst.floatingIP = publicIP logger.Infof("assigned public IP %s to %q", publicIP.IP, inst.Id()) } return &environs.StartInstanceResult{ Instance: inst, Hardware: inst.hardwareCharacteristics(), }, nil }