Beispiel #1
0
// distributeuUnit takes a unit and set of clean, possibly empty, instances
// and asks the InstanceDistributor policy (if any) which ones are suitable
// for assigning the unit to. If there is no InstanceDistributor, or the
// distribution group is empty, then all of the candidates will be returned.
func distributeUnit(u *Unit, candidates []instance.Id) ([]instance.Id, error) {
	if len(candidates) == 0 {
		return nil, nil
	}
	if u.st.policy == nil {
		return candidates, nil
	}
	cfg, err := u.st.EnvironConfig()
	if err != nil {
		return nil, err
	}
	distributor, err := u.st.policy.InstanceDistributor(cfg)
	if errors.IsNotImplemented(err) {
		return candidates, nil
	} else if err != nil {
		return nil, err
	}
	if distributor == nil {
		return nil, fmt.Errorf("policy returned nil instance distributor without an error")
	}
	distributionGroup, err := ServiceInstances(u.st, u.doc.Service)
	if err != nil {
		return nil, err
	}
	if len(distributionGroup) == 0 {
		return candidates, nil
	}
	return distributor.DistributeInstances(candidates, distributionGroup)
}
Beispiel #2
0
func (st *State) constraintsValidator() (constraints.Validator, error) {
	// Default behaviour is to simply use a standard validator with
	// no environment specific behaviour built in.
	defaultValidator := constraints.NewValidator()
	if st.policy == nil {
		return defaultValidator, nil
	}
	cfg, err := st.EnvironConfig()
	if err != nil {
		return nil, err
	}
	validator, err := st.policy.ConstraintsValidator(
		cfg,
		&cloudimagemetadata.MetadataArchitectureQuerier{st.CloudImageMetadataStorage},
	)
	if errors.IsNotImplemented(err) {
		return defaultValidator, nil
	} else if err != nil {
		return nil, err
	}
	if validator == nil {
		return nil, fmt.Errorf("policy returned nil constraints validator without an error")
	}
	return validator, nil
}
Beispiel #3
0
func (c *StatusGetCommand) ServiceStatus(ctx *cmd.Context) error {
	serviceStatus, err := c.ctx.ServiceStatus()
	if err != nil {
		if errors.IsNotImplemented(err) {
			return c.out.Write(ctx, params.StatusUnknown)
		}
		return errors.Annotatef(err, "finding service status")
	}
	if !c.includeData && c.out.Name() == "smart" {
		return c.out.Write(ctx, serviceStatus.Service.Status)
	}
	statusDetails := make(map[string]interface{})
	details := toDetails(serviceStatus.Service, c.includeData)

	units := make(map[string]interface{}, len(serviceStatus.Units))
	for _, unit := range serviceStatus.Units {
		units[unit.Tag] = toDetails(unit, c.includeData)
	}
	details["units"] = units
	statusDetails["service-status"] = details
	c.out.Write(ctx, statusDetails)

	return nil

}
Beispiel #4
0
// getStartTask creates a new worker for the provisioner,
func (p *provisioner) getStartTask(safeMode bool) (ProvisionerTask, error) {
	auth, err := authentication.NewAPIAuthenticator(p.st)
	if err != nil {
		return nil, err
	}
	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machineWatcher, err := p.getMachineWatcher()
	if err != nil {
		return nil, err
	}
	retryWatcher, err := p.getRetryWatcher()
	if err != nil && !errors.IsNotImplemented(err) {
		return nil, err
	}
	tag := p.agentConfig.Tag()
	machineTag, ok := tag.(names.MachineTag)
	if !ok {
		errors.Errorf("expacted names.MachineTag, got %T", tag)
	}
	task := NewProvisionerTask(
		machineTag, safeMode, p.st,
		machineWatcher, retryWatcher, p.broker, auth)
	return task, nil
}
Beispiel #5
0
// Ports returns the port ranges opened for the whole environment.
// Must only be used if the environment was setup with the
// FwGlobal firewall mode.
func (env *environ) Ports() ([]network.PortRange, error) {
	ports, err := env.raw.Ports(env.globalFirewallName())
	if errors.IsNotImplemented(err) {
		// TODO(ericsnow) for now...
		return nil, nil
	}
	return ports, errors.Trace(err)
}
Beispiel #6
0
// ClosePorts closes the given port ranges for the whole environment.
// Must only be used if the environment was setup with the
// FwGlobal firewall mode.
func (env *environ) ClosePorts(ports []network.PortRange) error {
	err := env.raw.ClosePorts(env.globalFirewallName(), ports...)
	if errors.IsNotImplemented(err) {
		// TODO(ericsnow) for now...
		return nil
	}
	return errors.Trace(err)
}
Beispiel #7
0
// ClosePorts closes the given ports on the instance, which
// should have been started with the given machine id.
func (inst *environInstance) ClosePorts(machineID string, ports []network.PortRange) error {
	name := common.MachineFullName(inst.env.Config().UUID(), machineID)
	err := inst.env.raw.ClosePorts(name, ports...)
	if errors.IsNotImplemented(err) {
		// TODO(ericsnow) for now...
		return nil
	}
	return errors.Trace(err)
}
Beispiel #8
0
// Ports returns the set of ports open on the instance, which
// should have been started with the given machine id.
// The ports are returned as sorted by SortPorts.
func (inst *environInstance) Ports(machineID string) ([]network.PortRange, error) {
	name := common.MachineFullName(inst.env.Config().UUID(), machineID)
	ports, err := inst.env.raw.Ports(name)
	if errors.IsNotImplemented(err) {
		// TODO(ericsnow) for now...
		return nil, nil
	}
	return ports, errors.Trace(err)
}
Beispiel #9
0
func machineLoop(context machineContext, m machine, changed <-chan struct{}) error {
	// Use a short poll interval when initially waiting for
	// a machine's address and machine agent to start, and a long one when it already
	// has an address and the machine agent is started.
	pollInterval := ShortPoll
	pollInstance := true
	for {
		if pollInstance {
			instInfo, err := pollInstanceInfo(context, m)
			if err != nil && !state.IsNotProvisionedError(err) {
				// If the provider doesn't implement Addresses/Status now,
				// it never will until we're upgraded, so don't bother
				// asking any more. We could use less resources
				// by taking down the entire worker, but this is easier for now
				// (and hopefully the local provider will implement
				// Addresses/Status in the not-too-distant future),
				// so we won't need to worry about this case at all.
				if errors.IsNotImplemented(err) {
					pollInterval = 365 * 24 * time.Hour
				} else {
					return err
				}
			}
			machineStatus := params.StatusPending
			if err == nil {
				if machineStatus, _, _, err = m.Status(); err != nil {
					logger.Warningf("cannot get current machine status for machine %v: %v", m.Id(), err)
				}
			}
			if len(instInfo.addresses) > 0 && instInfo.status != "" && machineStatus == params.StatusStarted {
				// We've got at least one address and a status and instance is started, so poll infrequently.
				pollInterval = LongPoll
			} else if pollInterval < LongPoll {
				// We have no addresses or not started - poll increasingly rarely
				// until we do.
				pollInterval = time.Duration(float64(pollInterval) * ShortPollBackoff)
			}
			pollInstance = false
		}
		select {
		case <-time.After(pollInterval):
			pollInstance = true
		case <-context.dying():
			return nil
		case <-changed:
			if err := m.Refresh(); err != nil {
				return err
			}
			if m.Life() == state.Dead {
				return nil
			}
		}
	}
}
Beispiel #10
0
// OpenPorts opens the given ports on the instance, which
// should have been started with the given machine id.
func (inst *environInstance) OpenPorts(machineID string, ports []network.PortRange) error {
	// TODO(ericsnow) Make sure machineId matches inst.Id()?
	name := common.MachineFullName(inst.env, machineID)
	env := inst.env.getSnapshot()
	err := env.raw.OpenPorts(name, ports...)
	if errors.IsNotImplemented(err) {
		// TODO(ericsnow) for now...
		return nil
	}
	return errors.Trace(err)
}
Beispiel #11
0
// getStartTask creates a new worker for the provisioner,
func (p *provisioner) getStartTask(harvestMode config.HarvestMode) (ProvisionerTask, error) {
	auth, err := authentication.NewAPIAuthenticator(p.st)
	if err != nil {
		return nil, err
	}
	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machineWatcher, err := p.getMachineWatcher()
	if err != nil {
		return nil, err
	}
	retryWatcher, err := p.getRetryWatcher()
	if err != nil && !errors.IsNotImplemented(err) {
		return nil, err
	}
	tag := p.agentConfig.Tag()
	machineTag, ok := tag.(names.MachineTag)
	if !ok {
		errors.Errorf("expected names.MachineTag, got %T", tag)
	}

	modelCfg, err := p.st.ModelConfig()
	if err != nil {
		return nil, errors.Annotate(err, "could not retrieve the model config.")
	}

	controllerCfg, err := p.st.ControllerConfig()
	if err != nil {
		return nil, errors.Annotate(err, "could not retrieve the controller config.")
	}

	task, err := NewProvisionerTask(
		controllerCfg.ControllerUUID(),
		machineTag,
		harvestMode,
		p.st,
		p.toolsFinder,
		machineWatcher,
		retryWatcher,
		p.broker,
		auth,
		modelCfg.ImageStream(),
		RetryStrategy{retryDelay: retryStrategyDelay, retryCount: retryStrategyCount},
	)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return task, nil
}
Beispiel #12
0
// validate calls the state's assigned policy, if non-nil, to obtain
// a ConfigValidator, and calls Validate if a non-nil ConfigValidator is
// returned.
func (st *State) validate(cfg, old *config.Config) (valid *config.Config, err error) {
	if st.policy == nil {
		return cfg, nil
	}
	configValidator, err := st.policy.ConfigValidator(cfg.Type())
	if errors.IsNotImplemented(err) {
		return cfg, nil
	} else if err != nil {
		return nil, err
	}
	if configValidator == nil {
		return nil, fmt.Errorf("policy returned nil configValidator without an error")
	}
	return configValidator.Validate(cfg, old)
}
Beispiel #13
0
// precheckInstance calls the state's assigned policy, if non-nil, to obtain
// a Prechecker, and calls PrecheckInstance if a non-nil Prechecker is returned.
func (st *State) precheckInstance(series string, cons constraints.Value, placement string) error {
	if st.policy == nil {
		return nil
	}
	prechecker, err := st.policy.Prechecker()
	if errors.IsNotImplemented(err) {
		return nil
	} else if err != nil {
		return err
	}
	if prechecker == nil {
		return errors.New("policy returned nil prechecker without an error")
	}
	return prechecker.PrecheckInstance(series, cons, placement)
}
Beispiel #14
0
// getStartTask creates a new worker for the provisioner,
func (p *provisioner) getStartTask(harvestMode config.HarvestMode) (ProvisionerTask, error) {
	auth, err := authentication.NewAPIAuthenticator(p.st)
	if err != nil {
		return nil, err
	}
	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machineWatcher, err := p.getMachineWatcher()
	if err != nil {
		return nil, err
	}
	retryWatcher, err := p.getRetryWatcher()
	if err != nil && !errors.IsNotImplemented(err) {
		return nil, err
	}
	tag := p.agentConfig.Tag()
	machineTag, ok := tag.(names.MachineTag)
	if !ok {
		errors.Errorf("expacted names.MachineTag, got %T", tag)
	}

	envCfg, err := p.st.EnvironConfig()
	if err != nil {
		return nil, errors.Annotate(err, "could not retrieve the environment config.")
	}

	secureServerConnection := false
	if info, ok := p.agentConfig.StateServingInfo(); ok {
		secureServerConnection = info.CAPrivateKey != ""
	}
	task, err := NewProvisionerTask(
		machineTag,
		harvestMode,
		p.st,
		p.toolsFinder,
		machineWatcher,
		retryWatcher,
		p.broker,
		auth,
		envCfg.ImageStream(),
		secureServerConnection,
		RetryStrategy{retryDelay: retryStrategyDelay, retryCount: retryStrategyCount},
	)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return task, nil
}
Beispiel #15
0
func (st *State) constraintsValidator() (constraints.Validator, error) {
	// Default behaviour is to simply use a standard validator with
	// no model specific behaviour built in.
	var validator constraints.Validator
	if st.policy != nil {
		var err error
		validator, err = st.policy.ConstraintsValidator()
		if errors.IsNotImplemented(err) {
			validator = constraints.NewValidator()
		} else if err != nil {
			return nil, err
		} else if validator == nil {
			return nil, errors.New("policy returned nil constraints validator without an error")
		}
	} else {
		validator = constraints.NewValidator()
	}

	// Add supported architectures gleaned from cloud image
	// metadata to the validator's vocabulary.
	model, err := st.Model()
	if err != nil {
		return nil, errors.Annotate(err, "getting model")
	}
	if region := model.CloudRegion(); region != "" {
		cfg, err := st.ModelConfig()
		if err != nil {
			return nil, errors.Trace(err)
		}
		arches, err := st.CloudImageMetadataStorage.SupportedArchitectures(
			cloudimagemetadata.MetadataFilter{
				Stream: cfg.AgentStream(),
				Region: region,
			},
		)
		if err != nil {
			return nil, errors.Annotate(err, "querying supported architectures")
		}
		if len(arches) != 0 {
			validator.UpdateVocabulary(constraints.Arch, arches)
		}
	}
	return validator, nil
}
Beispiel #16
0
// supportsUnitPlacement calls the state's assigned policy, if non-nil,
// to obtain an EnvironCapability, and calls SupportsUnitPlacement if a
// non-nil EnvironCapability is returned.
func (st *State) supportsUnitPlacement() error {
	if st.policy == nil {
		return nil
	}
	cfg, err := st.EnvironConfig()
	if err != nil {
		return err
	}
	capability, err := st.policy.EnvironCapability(cfg)
	if errors.IsNotImplemented(err) {
		return nil
	} else if err != nil {
		return err
	}
	if capability == nil {
		return fmt.Errorf("policy returned nil EnvironCapability without an error")
	}
	return capability.SupportsUnitPlacement()
}
Beispiel #17
0
// getStartTask creates a new worker for the provisioner,
func (p *provisioner) getStartTask(safeMode bool) (ProvisionerTask, error) {
	auth, err := environs.NewAPIAuthenticator(p.st)
	if err != nil {
		return nil, err
	}
	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machineWatcher, err := p.getMachineWatcher()
	if err != nil {
		return nil, err
	}
	retryWatcher, err := p.getRetryWatcher()
	if err != nil && !errors.IsNotImplemented(err) {
		return nil, err
	}
	task := NewProvisionerTask(
		p.agentConfig.Tag(), safeMode, p.st,
		machineWatcher, retryWatcher, p.broker, auth)
	return task, nil
}
Beispiel #18
0
func (c *StatusGetCommand) unitOrServiceStatus(ctx *cmd.Context) error {
	var err error

	if c.serviceWide {
		return c.ServiceStatus(ctx)
	}

	unitStatus, err := c.ctx.UnitStatus()
	if err != nil {
		if errors.IsNotImplemented(err) {
			return c.out.Write(ctx, params.StatusUnknown)
		}
		return errors.Annotatef(err, "finding workload status")
	}
	if !c.includeData && c.out.Name() == "smart" {
		return c.out.Write(ctx, unitStatus.Status)
	}
	c.out.Write(ctx, toDetails(*unitStatus, c.includeData))
	return nil
}
Beispiel #19
0
// defaultInheritedConfig returns config values which are defined
// as defaults in either Juju or the state's environ provider.
func (st *State) defaultInheritedConfig() (attrValues, error) {
	var defaults = make(map[string]interface{})
	for k, v := range config.ConfigDefaults() {
		defaults[k] = v
	}
	providerDefaults, err := st.environsProviderConfigSchemaSource()
	if errors.IsNotImplemented(err) {
		return defaults, nil
	} else if err != nil {
		return nil, errors.Trace(err)
	}
	fields := schema.FieldMap(providerDefaults.ConfigSchema(), providerDefaults.ConfigDefaults())
	if coercedAttrs, err := fields.Coerce(defaults, nil); err != nil {
		return nil, errors.Trace(err)
	} else {
		for k, v := range coercedAttrs.(map[string]interface{}) {
			defaults[k] = v
		}
	}
	return defaults, nil
}
Beispiel #20
0
// pollInstanceInfo checks the current provider addresses and status
// for the given machine's instance, and sets them on the machine if they've changed.
func pollInstanceInfo(context machineContext, m machine) (instInfo instanceInfo, err error) {
	instInfo = instanceInfo{}
	instId, err := m.InstanceId()
	// We can't ask the machine for its addresses if it isn't provisioned yet.
	if state.IsNotProvisionedError(err) {
		return instInfo, err
	}
	if err != nil {
		return instInfo, fmt.Errorf("cannot get machine's instance id: %v", err)
	}
	instInfo, err = context.instanceInfo(instId)
	if err != nil {
		if errors.IsNotImplemented(err) {
			return instInfo, err
		}
		logger.Warningf("cannot get instance info for instance %q: %v", instId, err)
		return instInfo, nil
	}
	currentInstStatus, err := m.InstanceStatus()
	if err != nil {
		// This should never occur since the machine is provisioned.
		// But just in case, we reset polled status so we try again next time.
		logger.Warningf("cannot get current instance status for machine %v: %v", m.Id(), err)
		instInfo.status = ""
	} else {
		if instInfo.status != currentInstStatus {
			logger.Infof("machine %q has new instance status: %v", m.Id(), instInfo.status)
			if err = m.SetInstanceStatus(instInfo.status); err != nil {
				logger.Errorf("cannot set instance status on %q: %v", m, err)
			}
		}
	}
	if !addressesEqual(m.Addresses(), instInfo.addresses) {
		logger.Infof("machine %q has new addresses: %v", m.Id(), instInfo.addresses)
		if err = m.SetAddresses(instInfo.addresses...); err != nil {
			logger.Errorf("cannot set addresses on %q: %v", m, err)
		}
	}
	return instInfo, err
}
Beispiel #21
0
// getStartTask creates a new worker for the provisioner,
func (p *provisioner) getStartTask(harvestMode config.HarvestMode) (ProvisionerTask, error) {
	auth, err := authentication.NewAPIAuthenticator(p.st)
	if err != nil {
		return nil, err
	}
	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machineWatcher, err := p.getMachineWatcher()
	if err != nil {
		return nil, err
	}
	retryWatcher, err := p.getRetryWatcher()
	if err != nil && !errors.IsNotImplemented(err) {
		return nil, err
	}
	tag := p.agentConfig.Tag()
	machineTag, ok := tag.(names.MachineTag)
	if !ok {
		errors.Errorf("expacted names.MachineTag, got %T", tag)
	}

	envCfg, err := p.st.EnvironConfig()
	if err != nil {
		return nil, errors.Annotate(err, "could not retrieve the environment config.")
	}

	task := NewProvisionerTask(
		machineTag,
		harvestMode,
		p.st,
		getToolsFinder(p.st),
		machineWatcher,
		retryWatcher,
		p.broker,
		auth,
		envCfg.ImageStream(),
	)
	return task, nil
}
Beispiel #22
0
// StartInstance is specified in the InstanceBroker interface.
func (e *Environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	var availabilityZones []string
	if args.Placement != "" {
		placement, err := e.parsePlacement(args.Placement)
		if err != nil {
			return nil, err
		}
		if !placement.availabilityZone.State.Available {
			return nil, errors.Errorf("availability zone %q is unavailable", placement.availabilityZone.Name)
		}
		availabilityZones = append(availabilityZones, placement.availabilityZone.Name)
	}

	// If no availability zone is specified, then automatically spread across
	// the known zones for optimal spread across the instance distribution
	// group.
	if len(availabilityZones) == 0 {
		var group []instance.Id
		var err error
		if args.DistributionGroup != nil {
			group, err = args.DistributionGroup()
			if err != nil {
				return nil, err
			}
		}
		zoneInstances, err := availabilityZoneAllocations(e, group)
		if errors.IsNotImplemented(err) {
			// Availability zones are an extension, so we may get a
			// not implemented error; ignore these.
		} else if err != nil {
			return nil, err
		} else {
			for _, zone := range zoneInstances {
				availabilityZones = append(availabilityZones, zone.ZoneName)
			}
		}
		if len(availabilityZones) == 0 {
			// No explicitly selectable zones available, so use an unspecified zone.
			availabilityZones = []string{""}
		}
	}

	series := args.Tools.OneSeries()
	arches := args.Tools.Arches()
	spec, err := findInstanceSpec(e, &instances.InstanceConstraint{
		Region:      e.ecfg().region(),
		Series:      series,
		Arches:      arches,
		Constraints: args.Constraints,
	}, args.ImageMetadata)
	if err != nil {
		return nil, err
	}
	tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	if err := args.InstanceConfig.SetTools(tools); err != nil {
		return nil, errors.Trace(err)
	}

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, e.Config()); err != nil {
		return nil, err
	}
	cloudcfg, err := e.configurator.GetCloudConfig(args)
	if err != nil {
		return nil, errors.Trace(err)
	}
	userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, OpenstackRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot make user data")
	}
	logger.Debugf("openstack user data; %d bytes", len(userData))

	var networks = e.firewaller.InitialNetworks()
	usingNetwork := e.ecfg().network()
	if usingNetwork != "" {
		networkId, err := e.resolveNetwork(usingNetwork)
		if err != nil {
			return nil, err
		}
		logger.Debugf("using network id %q", networkId)
		networks = append(networks, nova.ServerNetworks{NetworkId: networkId})
	}
	withPublicIP := e.ecfg().useFloatingIP()
	var publicIP *nova.FloatingIP
	if withPublicIP {
		logger.Debugf("allocating public IP address for openstack node")
		if fip, err := e.allocatePublicIP(); err != nil {
			return nil, errors.Annotate(err, "cannot allocate a public IP as needed")
		} else {
			publicIP = fip
			logger.Infof("allocated public IP %s", publicIP.IP)
		}
	}

	cfg := e.Config()
	var groupNames = make([]nova.SecurityGroupName, 0)
	groups, err := e.firewaller.SetUpGroups(args.InstanceConfig.MachineId, cfg.APIPort())
	if err != nil {
		return nil, errors.Annotate(err, "cannot set up groups")
	}

	for _, g := range groups {
		groupNames = append(groupNames, nova.SecurityGroupName{g.Name})
	}
	machineName := resourceName(
		names.NewMachineTag(args.InstanceConfig.MachineId),
		e.Config().UUID(),
	)

	tryStartNovaInstance := func(
		attempts utils.AttemptStrategy,
		client *nova.Client,
		instanceOpts nova.RunServerOpts,
	) (server *nova.Entity, err error) {
		for a := attempts.Start(); a.Next(); {
			server, err = client.RunServer(instanceOpts)
			if err == nil || gooseerrors.IsNotFound(err) == false {
				break
			}
		}
		return server, err
	}

	tryStartNovaInstanceAcrossAvailZones := func(
		attempts utils.AttemptStrategy,
		client *nova.Client,
		instanceOpts nova.RunServerOpts,
		availabilityZones []string,
	) (server *nova.Entity, err error) {
		for _, zone := range availabilityZones {
			instanceOpts.AvailabilityZone = zone
			e.configurator.ModifyRunServerOptions(&instanceOpts)
			server, err = tryStartNovaInstance(attempts, client, instanceOpts)
			if err == nil || isNoValidHostsError(err) == false {
				break
			}

			logger.Infof("no valid hosts available in zone %q, trying another availability zone", zone)
		}

		if err != nil {
			err = errors.Annotate(err, "cannot run instance")
		}

		return server, err
	}

	var opts = nova.RunServerOpts{
		Name:               machineName,
		FlavorId:           spec.InstanceType.Id,
		ImageId:            spec.Image.Id,
		UserData:           userData,
		SecurityGroupNames: groupNames,
		Networks:           networks,
		Metadata:           args.InstanceConfig.Tags,
	}
	server, err := tryStartNovaInstanceAcrossAvailZones(shortAttempt, e.nova(), opts, availabilityZones)
	if err != nil {
		return nil, errors.Trace(err)
	}

	detail, err := e.nova().GetServer(server.Id)
	if err != nil {
		return nil, errors.Annotate(err, "cannot get started instance")
	}

	inst := &openstackInstance{
		e:            e,
		serverDetail: detail,
		arch:         &spec.Image.Arch,
		instType:     &spec.InstanceType,
	}
	logger.Infof("started instance %q", inst.Id())
	if withPublicIP {
		if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil {
			if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil {
				// ignore the failure at this stage, just log it
				logger.Debugf("failed to terminate instance %q: %v", inst.Id(), err)
			}
			return nil, errors.Annotatef(err, "cannot assign public address %s to instance %q", publicIP.IP, inst.Id())
		}
		inst.floatingIP = publicIP
		logger.Infof("assigned public IP %s to %q", publicIP.IP, inst.Id())
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: inst.hardwareCharacteristics(),
	}, nil
}