Beispiel #1
0
// StartInstanceWithParams is a test helper function that starts an instance
// with the given parameters, and a plausible but invalid configuration, and
// returns the result of Environ.StartInstance. The provided params's
// MachineConfig and Tools field values will be ignored.
func StartInstanceWithParams(
	env environs.Environ, machineId string,
	params environs.StartInstanceParams,
	networks []string,
) (
	instance.Instance, *instance.HardwareCharacteristics, []network.Info, error,
) {
	series := config.PreferredSeries(env.Config())
	agentVersion, ok := env.Config().AgentVersion()
	if !ok {
		return nil, nil, nil, fmt.Errorf("missing agent version in environment config")
	}
	possibleTools, err := tools.FindInstanceTools(
		env, agentVersion, series, params.Constraints.Arch,
	)
	if err != nil {
		return nil, nil, nil, err
	}
	machineNonce := "fake_nonce"
	stateInfo := FakeStateInfo(machineId)
	apiInfo := FakeAPIInfo(machineId)
	machineConfig := environs.NewMachineConfig(
		machineId, machineNonce,
		networks,
		stateInfo, apiInfo)
	params.Tools = possibleTools
	params.MachineConfig = machineConfig
	return env.StartInstance(params)
}
Beispiel #2
0
// StartInstance implements environs.InstanceBroker.
func (env *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	// Start a new instance.

	series := args.Tools.OneSeries()
	logger.Debugf("StartInstance: %q, %s", args.InstanceConfig.MachineId, series)

	if err := env.finishInstanceConfig(args); err != nil {
		return nil, errors.Trace(err)
	}

	// TODO(ericsnow) Handle constraints?

	raw, err := env.newRawInstance(args)
	if err != nil {
		if args.StatusCallback != nil {
			args.StatusCallback(status.ProvisioningError, err.Error(), nil)
		}
		return nil, errors.Trace(err)
	}
	logger.Infof("started instance %q", raw.Name)
	inst := newInstance(raw, env)

	// Build the result.
	hwc := env.getHardwareCharacteristics(args, inst)
	result := environs.StartInstanceResult{
		Instance: inst,
		Hardware: hwc,
	}
	return &result, nil
}
Beispiel #3
0
// StartInstanceWithParams is a test helper function that starts an instance
// with the given parameters, and a plausible but invalid configuration, and
// returns the result of Environ.StartInstance. The provided params's
// InstanceConfig and Tools field values will be ignored.
func StartInstanceWithParams(
	env environs.Environ, machineId string,
	params environs.StartInstanceParams,
	networks []string,
) (
	*environs.StartInstanceResult, error,
) {
	preferredSeries := config.PreferredSeries(env.Config())
	agentVersion, ok := env.Config().AgentVersion()
	if !ok {
		return nil, errors.New("missing agent version in model config")
	}
	filter := coretools.Filter{
		Number: agentVersion,
		Series: preferredSeries,
	}
	if params.Constraints.Arch != nil {
		filter.Arch = *params.Constraints.Arch
	}
	stream := tools.PreferredStream(&agentVersion, env.Config().Development(), env.Config().AgentStream())
	possibleTools, err := tools.FindTools(env, -1, -1, stream, filter)
	if err != nil {
		return nil, errors.Trace(err)
	}

	if params.ImageMetadata == nil {
		if err := SetImageMetadata(
			env,
			possibleTools.AllSeries(),
			possibleTools.Arches(),
			&params.ImageMetadata,
		); err != nil {
			return nil, errors.Trace(err)
		}
	}

	machineNonce := "fake_nonce"
	stateInfo := FakeStateInfo(machineId)
	apiInfo := FakeAPIInfo(machineId)
	instanceConfig, err := instancecfg.NewInstanceConfig(
		machineId,
		machineNonce,
		imagemetadata.ReleasedStream,
		preferredSeries,
		"",
		true,
		networks,
		stateInfo,
		apiInfo,
	)
	if err != nil {
		return nil, errors.Trace(err)
	}
	eUUID, _ := env.Config().UUID()
	instanceConfig.Tags[tags.JujuModel] = eUUID
	params.Tools = possibleTools
	params.InstanceConfig = instanceConfig
	return env.StartInstance(params)
}
Beispiel #4
0
func (task *provisionerTask) maintainMachines(machines []*apiprovisioner.Machine) error {
	for _, m := range machines {
		logger.Infof("maintainMachines: %v", m)
		startInstanceParams := environs.StartInstanceParams{}
		startInstanceParams.InstanceConfig = &instancecfg.InstanceConfig{}
		startInstanceParams.InstanceConfig.MachineId = m.Id()
		if err := task.broker.MaintainInstance(startInstanceParams); err != nil {
			return errors.Annotatef(err, "cannot maintain machine %v", m)
		}
	}
	return nil
}
Beispiel #5
0
// StartInstanceWithParams is a test helper function that starts an instance
// with the given parameters, and a plausible but invalid configuration, and
// returns the result of Environ.StartInstance. The provided params's
// MachineConfig and Tools field values will be ignored.
func StartInstanceWithParams(
	env environs.Environ, machineId string,
	params environs.StartInstanceParams,
	networks []string,
) (
	instance.Instance, *instance.HardwareCharacteristics, []network.Info, error,
) {
	series := config.PreferredSeries(env.Config())
	agentVersion, ok := env.Config().AgentVersion()
	if !ok {
		return nil, nil, nil, fmt.Errorf("missing agent version in environment config")
	}
	filter := coretools.Filter{
		Number: agentVersion,
		Series: series,
	}
	if params.Constraints.Arch != nil {
		filter.Arch = *params.Constraints.Arch
	}
	possibleTools, err := tools.FindTools(
		env, -1, -1, filter, tools.DoNotAllowRetry,
	)
	if err != nil {
		return nil, nil, nil, err
	}
	machineNonce := "fake_nonce"
	stateInfo := FakeStateInfo(machineId)
	apiInfo := FakeAPIInfo(machineId)
	machineConfig, err := environs.NewMachineConfig(
		machineId,
		machineNonce,
		imagemetadata.ReleasedStream,
		series,
		networks,
		stateInfo,
		apiInfo,
	)
	if err != nil {
		return nil, nil, nil, err
	}
	params.Tools = possibleTools
	params.MachineConfig = machineConfig
	return env.StartInstance(params)
}
Beispiel #6
0
// StartInstanceWithParams is a test helper function that starts an instance
// with the given parameters, and a plausible but invalid configuration, and
// returns the result of Environ.StartInstance. The provided params's
// InstanceConfig and Tools field values will be ignored.
func StartInstanceWithParams(
	env environs.Environ, machineId string,
	params environs.StartInstanceParams,
	networks []string,
) (
	*environs.StartInstanceResult, error,
) {
	series := config.PreferredSeries(env.Config())
	agentVersion, ok := env.Config().AgentVersion()
	if !ok {
		return nil, errors.New("missing agent version in environment config")
	}
	filter := coretools.Filter{
		Number: agentVersion,
		Series: series,
	}
	if params.Constraints.Arch != nil {
		filter.Arch = *params.Constraints.Arch
	}
	possibleTools, err := tools.FindTools(env, -1, -1, filter)
	if err != nil {
		return nil, errors.Trace(err)
	}
	machineNonce := "fake_nonce"
	stateInfo := FakeStateInfo(machineId)
	apiInfo := FakeAPIInfo(machineId)
	instanceConfig, err := instancecfg.NewInstanceConfig(
		machineId,
		machineNonce,
		imagemetadata.ReleasedStream,
		series,
		true,
		networks,
		stateInfo,
		apiInfo,
	)
	if err != nil {
		return nil, errors.Trace(err)
	}
	params.Tools = possibleTools
	params.InstanceConfig = instanceConfig
	return env.StartInstance(params)
}
Beispiel #7
0
// parseAvailabilityZones returns the availability zones that should be
// tried for the given instance spec. If a placement argument was
// provided then only that one is returned. Otherwise the environment is
// queried for available zones. In that case, the resulting list is
// roughly ordered such that the environment's instances are spread
// evenly across the region.
func (env *environ) parseAvailabilityZones(args environs.StartInstanceParams) ([]string, error) {
	if args.Placement != "" {
		// args.Placement will always be a zone name or empty.
		placement, err := env.parsePlacement(args.Placement)
		if err != nil {
			return nil, errors.Trace(err)
		}
		// TODO(ericsnow) Fail if placement.Zone is not in the env's configured region?
		return []string{placement.Zone.Name()}, nil
	}

	// If no availability zone is specified, then automatically spread across
	// the known zones for optimal spread across the instance distribution
	// group.
	var group []instance.Id
	var err error
	if args.DistributionGroup != nil {
		group, err = args.DistributionGroup()
		if err != nil {
			return nil, errors.Trace(err)
		}
	}
	zoneInstances, err := availabilityZoneAllocations(env, group)
	if err != nil {
		return nil, errors.Trace(err)
	}
	logger.Infof("found %d zones: %v", len(zoneInstances), zoneInstances)

	var zoneNames []string
	for _, z := range zoneInstances {
		zoneNames = append(zoneNames, z.ZoneName)
	}

	if len(zoneNames) == 0 {
		return nil, errors.NotFoundf("failed to determine availability zones")
	}

	return zoneNames, nil
}
Beispiel #8
0
// StartInstance is specified in the Broker interface.
func (broker *lxcBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	// TODO: refactor common code out of the container brokers.
	machineId := args.InstanceConfig.MachineId
	lxcLogger.Infof("starting lxc container for machineId: %s", machineId)

	// Default to using the host network until we can configure.
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		bridgeDevice = container.DefaultLxcBridge
	}

	config, err := broker.api.ContainerConfig()
	if err != nil {
		lxcLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}

	preparedInfo, err := prepareOrGetContainerInterfaceInfo(
		broker.api,
		machineId,
		bridgeDevice,
		true, // allocate if possible, do not maintain existing.
		broker.enableNAT,
		args.NetworkInfo,
		lxcLogger,
		config.ProviderType,
	)
	if err != nil {
		// It's not fatal (yet) if we couldn't pre-allocate addresses for the
		// container.
		logger.Warningf("failed to prepare container %q network config: %v", machineId, err)
	} else {
		args.NetworkInfo = preparedInfo

	}
	network := container.BridgeNetworkConfig(bridgeDevice, broker.defaultMTU, args.NetworkInfo)

	// The provisioner worker will provide all tools it knows about
	// (after applying explicitly specified constraints), which may
	// include tools for architectures other than the host's. We
	// must constrain to the host's architecture for LXC.
	archTools, err := matchHostArchTools(args.Tools)
	if err != nil {
		return nil, errors.Trace(err)
	}

	series := archTools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.LXC
	if err := args.InstanceConfig.SetTools(archTools); err != nil {
		return nil, errors.Trace(err)
	}

	storageConfig := &container.StorageConfig{
		AllowMount: config.AllowLXCLoopMounts,
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.PreferIPv6,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		lxcLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig, args.StatusCallback)
	if err != nil {
		lxcLogger.Errorf("failed to start container: %v", err)
		return nil, err
	}
	lxcLogger.Infof("started lxc container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String())
	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: network.Interfaces,
	}, nil
}
Beispiel #9
0
func fillinStartInstanceParams(env environs.Environ, machineId string, isController bool, params *environs.StartInstanceParams) error {
	if params.ControllerUUID == "" {
		return errors.New("missing controller UUID in start instance parameters")
	}
	preferredSeries := config.PreferredSeries(env.Config())
	agentVersion, ok := env.Config().AgentVersion()
	if !ok {
		return errors.New("missing agent version in model config")
	}
	filter := coretools.Filter{
		Number: agentVersion,
		Series: preferredSeries,
	}
	if params.Constraints.Arch != nil {
		filter.Arch = *params.Constraints.Arch
	}
	stream := tools.PreferredStream(&agentVersion, env.Config().Development(), env.Config().AgentStream())
	possibleTools, err := tools.FindTools(env, -1, -1, stream, filter)
	if err != nil {
		return errors.Trace(err)
	}

	if params.ImageMetadata == nil {
		if err := SetImageMetadata(
			env,
			possibleTools.AllSeries(),
			possibleTools.Arches(),
			&params.ImageMetadata,
		); err != nil {
			return errors.Trace(err)
		}
	}

	machineNonce := "fake_nonce"
	apiInfo := FakeAPIInfo(machineId)
	instanceConfig, err := instancecfg.NewInstanceConfig(
		testing.ControllerTag,
		machineId,
		machineNonce,
		imagemetadata.ReleasedStream,
		preferredSeries,
		apiInfo,
	)
	if err != nil {
		return errors.Trace(err)
	}
	if isController {
		instanceConfig.Controller = &instancecfg.ControllerConfig{
			Config: testing.FakeControllerConfig(),
			MongoInfo: &mongo.MongoInfo{
				Info: mongo.Info{
					Addrs:  []string{"127.0.0.1:1234"},
					CACert: "CA CERT\n" + testing.CACert,
				},
				Password: "******",
				Tag:      names.NewMachineTag(machineId),
			},
		}
		instanceConfig.Jobs = []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageModel}
	}
	cfg := env.Config()
	instanceConfig.Tags = instancecfg.InstanceTags(env.Config().UUID(), params.ControllerUUID, cfg, nil)
	params.Tools = possibleTools
	params.InstanceConfig = instanceConfig
	return nil
}
Beispiel #10
0
// StartInstance is specified in the InstanceBroker interface.
func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting instances with networks is not supported yet")
	}

	err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config())
	if err != nil {
		return nil, err
	}

	// Pick envtools.  Needed for the custom data (which is what we normally
	// call userdata).
	args.InstanceConfig.Tools = args.Tools[0]
	logger.Infof("picked tools %q", args.InstanceConfig.Tools)

	// Compose userdata.
	userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, AzureRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot compose user data")
	}

	snapshot := env.getSnapshot()
	location := snapshot.ecfg.location()
	instanceType, sourceImageName, err := env.selectInstanceTypeAndImage(&instances.InstanceConstraint{
		Region:      location,
		Series:      args.Tools.OneSeries(),
		Arches:      args.Tools.Arches(),
		Constraints: args.Constraints,
	})
	if err != nil {
		return nil, err
	}

	// We use the cloud service label as a way to group instances with
	// the same affinity, so that machines can be be allocated to the
	// same availability set.
	var cloudServiceName string
	if args.DistributionGroup != nil && snapshot.ecfg.availabilitySetsEnabled() {
		instanceIds, err := args.DistributionGroup()
		if err != nil {
			return nil, err
		}
		for _, id := range instanceIds {
			cloudServiceName, _ = env.splitInstanceId(id)
			if cloudServiceName != "" {
				break
			}
		}
	}

	vhd, err := env.newOSDisk(sourceImageName, args.InstanceConfig.Series)
	if err != nil {
		return nil, errors.Trace(err)
	}
	// If we're creating machine-0, we'll want to expose port 22.
	// All other machines get an auto-generated public port for SSH.
	stateServer := multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...)
	role, err := env.newRole(instanceType.Id, vhd, stateServer, string(userData), args.InstanceConfig.Series, snapshot)
	if err != nil {
		return nil, errors.Trace(err)
	}
	inst, err := createInstance(env, snapshot.api, role, cloudServiceName, stateServer)
	if err != nil {
		return nil, errors.Trace(err)
	}
	hc := &instance.HardwareCharacteristics{
		Mem:      &instanceType.Mem,
		RootDisk: &instanceType.RootDisk,
		CpuCores: &instanceType.CpuCores,
	}
	if len(instanceType.Arches) == 1 {
		hc.Arch = &instanceType.Arches[0]
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: hc,
	}, nil
}
Beispiel #11
0
// StartInstance is specified in the Broker interface.
func (broker *kvmBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	// TODO: refactor common code out of the container brokers.
	machineId := args.InstanceConfig.MachineId
	kvmLogger.Infof("starting kvm container for machineId: %s", machineId)

	// TODO: Default to using the host network until we can configure.  Yes,
	// this is using the LxcBridge value, we should put it in the api call for
	// container config.
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		bridgeDevice = container.DefaultKvmBridge
	}

	config, err := broker.api.ContainerConfig()
	if err != nil {
		kvmLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}

	preparedInfo, err := prepareOrGetContainerInterfaceInfo(
		broker.api,
		machineId,
		bridgeDevice,
		true, // allocate if possible, do not maintain existing.
		args.NetworkInfo,
		kvmLogger,
	)
	if err != nil {
		// It's not fatal (yet) if we couldn't pre-allocate addresses for the
		// container.
		logger.Warningf("failed to prepare container %q network config: %v", machineId, err)
	} else {
		args.NetworkInfo = preparedInfo
	}

	network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo)
	interfaces, err := finishNetworkConfig(bridgeDevice, args.NetworkInfo)
	if err != nil {
		return nil, errors.Trace(err)
	}
	network.Interfaces = interfaces

	// The provisioner worker will provide all tools it knows about
	// (after applying explicitly specified constraints), which may
	// include tools for architectures other than the host's.
	//
	// container/kvm only allows running container==host arch, so
	// we constrain the tools to host arch here regardless of the
	// constraints specified.
	archTools, err := matchHostArchTools(args.Tools)
	if err != nil {
		return nil, errors.Trace(err)
	}

	series := archTools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.KVM
	if err := args.InstanceConfig.SetTools(archTools); err != nil {
		return nil, errors.Trace(err)
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		kvmLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	storageConfig := &container.StorageConfig{
		AllowMount: true,
	}
	inst, hardware, err := broker.manager.CreateContainer(
		args.InstanceConfig, args.Constraints,
		series, network, storageConfig, args.StatusCallback,
	)
	if err != nil {
		kvmLogger.Errorf("failed to start container: %v", err)
		return nil, err
	}
	kvmLogger.Infof("started kvm container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String())
	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: interfaces,
	}, nil
}
Beispiel #12
0
// StartInstance is specified in the Broker interface.
func (broker *kvmBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting kvm containers with networks is not supported yet")
	}
	// TODO: refactor common code out of the container brokers.
	machineId := args.InstanceConfig.MachineId
	kvmLogger.Infof("starting kvm container for machineId: %s", machineId)

	// TODO: Default to using the host network until we can configure.  Yes,
	// this is using the LxcBridge value, we should put it in the api call for
	// container config.
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		bridgeDevice = kvm.DefaultKvmBridge
	}
	if !environs.AddressAllocationEnabled() {
		logger.Debugf(
			"address allocation feature flag not enabled; using DHCP for container %q",
			machineId,
		)
	} else {
		logger.Debugf("trying to allocate static IP for container %q", machineId)

		allocatedInfo, err := configureContainerNetwork(
			machineId,
			bridgeDevice,
			broker.api,
			args.NetworkInfo,
			true, // allocate a new address.
			broker.enableNAT,
		)
		if err != nil {
			// It's fine, just ignore it. The effect will be that the
			// container won't have a static address configured.
			logger.Infof("not allocating static IP for container %q: %v", machineId, err)
		} else {
			args.NetworkInfo = allocatedInfo
		}
	}

	// Unlike with LXC, we don't override the default MTU to use.
	network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo)

	series := args.Tools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.KVM
	args.InstanceConfig.Tools = args.Tools[0]

	config, err := broker.api.ContainerConfig()
	if err != nil {
		kvmLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.PreferIPv6,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		kvmLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	storageConfig := &container.StorageConfig{
		AllowMount: true,
	}
	inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig)
	if err != nil {
		kvmLogger.Errorf("failed to start container: %v", err)
		return nil, err
	}
	kvmLogger.Infof("started kvm container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String())
	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: network.Interfaces,
	}, nil
}
Beispiel #13
0
// StartInstance is specified in the InstanceBroker interface.
func (e *Environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	var availabilityZones []string
	if args.Placement != "" {
		placement, err := e.parsePlacement(args.Placement)
		if err != nil {
			return nil, err
		}
		if !placement.availabilityZone.State.Available {
			return nil, errors.Errorf("availability zone %q is unavailable", placement.availabilityZone.Name)
		}
		availabilityZones = append(availabilityZones, placement.availabilityZone.Name)
	}

	// If no availability zone is specified, then automatically spread across
	// the known zones for optimal spread across the instance distribution
	// group.
	if len(availabilityZones) == 0 {
		var group []instance.Id
		var err error
		if args.DistributionGroup != nil {
			group, err = args.DistributionGroup()
			if err != nil {
				return nil, err
			}
		}
		zoneInstances, err := availabilityZoneAllocations(e, group)
		if errors.IsNotImplemented(err) {
			// Availability zones are an extension, so we may get a
			// not implemented error; ignore these.
		} else if err != nil {
			return nil, err
		} else {
			for _, zone := range zoneInstances {
				availabilityZones = append(availabilityZones, zone.ZoneName)
			}
		}
		if len(availabilityZones) == 0 {
			// No explicitly selectable zones available, so use an unspecified zone.
			availabilityZones = []string{""}
		}
	}

	series := args.Tools.OneSeries()
	arches := args.Tools.Arches()
	spec, err := findInstanceSpec(e, &instances.InstanceConstraint{
		Region:      e.ecfg().region(),
		Series:      series,
		Arches:      arches,
		Constraints: args.Constraints,
	}, args.ImageMetadata)
	if err != nil {
		return nil, err
	}
	tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	if err := args.InstanceConfig.SetTools(tools); err != nil {
		return nil, errors.Trace(err)
	}

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, e.Config()); err != nil {
		return nil, err
	}
	cloudcfg, err := e.configurator.GetCloudConfig(args)
	if err != nil {
		return nil, errors.Trace(err)
	}
	userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, OpenstackRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot make user data")
	}
	logger.Debugf("openstack user data; %d bytes", len(userData))

	var networks = e.firewaller.InitialNetworks()
	usingNetwork := e.ecfg().network()
	if usingNetwork != "" {
		networkId, err := e.resolveNetwork(usingNetwork)
		if err != nil {
			return nil, err
		}
		logger.Debugf("using network id %q", networkId)
		networks = append(networks, nova.ServerNetworks{NetworkId: networkId})
	}
	withPublicIP := e.ecfg().useFloatingIP()
	var publicIP *nova.FloatingIP
	if withPublicIP {
		logger.Debugf("allocating public IP address for openstack node")
		if fip, err := e.allocatePublicIP(); err != nil {
			return nil, errors.Annotate(err, "cannot allocate a public IP as needed")
		} else {
			publicIP = fip
			logger.Infof("allocated public IP %s", publicIP.IP)
		}
	}

	cfg := e.Config()
	var groupNames = make([]nova.SecurityGroupName, 0)
	groups, err := e.firewaller.SetUpGroups(args.InstanceConfig.MachineId, cfg.APIPort())
	if err != nil {
		return nil, errors.Annotate(err, "cannot set up groups")
	}

	for _, g := range groups {
		groupNames = append(groupNames, nova.SecurityGroupName{g.Name})
	}
	machineName := resourceName(
		names.NewMachineTag(args.InstanceConfig.MachineId),
		e.Config().UUID(),
	)

	tryStartNovaInstance := func(
		attempts utils.AttemptStrategy,
		client *nova.Client,
		instanceOpts nova.RunServerOpts,
	) (server *nova.Entity, err error) {
		for a := attempts.Start(); a.Next(); {
			server, err = client.RunServer(instanceOpts)
			if err == nil || gooseerrors.IsNotFound(err) == false {
				break
			}
		}
		return server, err
	}

	tryStartNovaInstanceAcrossAvailZones := func(
		attempts utils.AttemptStrategy,
		client *nova.Client,
		instanceOpts nova.RunServerOpts,
		availabilityZones []string,
	) (server *nova.Entity, err error) {
		for _, zone := range availabilityZones {
			instanceOpts.AvailabilityZone = zone
			e.configurator.ModifyRunServerOptions(&instanceOpts)
			server, err = tryStartNovaInstance(attempts, client, instanceOpts)
			if err == nil || isNoValidHostsError(err) == false {
				break
			}

			logger.Infof("no valid hosts available in zone %q, trying another availability zone", zone)
		}

		if err != nil {
			err = errors.Annotate(err, "cannot run instance")
		}

		return server, err
	}

	var opts = nova.RunServerOpts{
		Name:               machineName,
		FlavorId:           spec.InstanceType.Id,
		ImageId:            spec.Image.Id,
		UserData:           userData,
		SecurityGroupNames: groupNames,
		Networks:           networks,
		Metadata:           args.InstanceConfig.Tags,
	}
	server, err := tryStartNovaInstanceAcrossAvailZones(shortAttempt, e.nova(), opts, availabilityZones)
	if err != nil {
		return nil, errors.Trace(err)
	}

	detail, err := e.nova().GetServer(server.Id)
	if err != nil {
		return nil, errors.Annotate(err, "cannot get started instance")
	}

	inst := &openstackInstance{
		e:            e,
		serverDetail: detail,
		arch:         &spec.Image.Arch,
		instType:     &spec.InstanceType,
	}
	logger.Infof("started instance %q", inst.Id())
	if withPublicIP {
		if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil {
			if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil {
				// ignore the failure at this stage, just log it
				logger.Debugf("failed to terminate instance %q: %v", inst.Id(), err)
			}
			return nil, errors.Annotatef(err, "cannot assign public address %s to instance %q", publicIP.IP, inst.Id())
		}
		inst.floatingIP = publicIP
		logger.Infof("assigned public IP %s to %q", publicIP.IP, inst.Id())
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: inst.hardwareCharacteristics(),
	}, nil
}
Beispiel #14
0
// newRawInstance is where the new physical instance is actually
// provisioned, relative to the provided args and spec. Info for that
// low-level instance is returned.
func (env *environ) newRawInstance(args environs.StartInstanceParams) (*lxdclient.Instance, error) {
	hostname, err := env.namespace.Hostname(args.InstanceConfig.MachineId)
	if err != nil {
		return nil, errors.Trace(err)
	}

	// Note: other providers have the ImageMetadata already read for them
	// and passed in as args.ImageMetadata. However, lxd provider doesn't
	// use datatype: image-ids, it uses datatype: image-download, and we
	// don't have a registered cloud/region.
	imageSources, err := env.getImageSources()
	if err != nil {
		return nil, errors.Trace(err)
	}

	series := args.InstanceConfig.Series
	// TODO(jam): We should get this information from EnsureImageExists, or
	// something given to us from 'raw', not assume it ourselves.
	image := "ubuntu-" + series
	// TODO: support args.Constraints.Arch, we'll want to map from

	// Keep track of StatusCallback output so we may clean up later.
	// This is implemented here, close to where the StatusCallback calls
	// are made, instead of at a higher level in the package, so as not to
	// assume that all providers will have the same need to be implemented
	// in the same way.
	longestMsg := 0
	statusCallback := func(currentStatus status.Status, msg string) {
		if args.StatusCallback != nil {
			args.StatusCallback(currentStatus, msg, nil)
		}
		if len(msg) > longestMsg {
			longestMsg = len(msg)
		}
	}
	cleanupCallback := func() {
		if args.CleanupCallback != nil {
			args.CleanupCallback(strings.Repeat(" ", longestMsg))
		}
	}
	defer cleanupCallback()

	imageCallback := func(copyProgress string) {
		statusCallback(status.Allocating, copyProgress)
	}
	if err := env.raw.EnsureImageExists(series, imageSources, imageCallback); err != nil {
		return nil, errors.Trace(err)
	}
	cleanupCallback() // Clean out any long line of completed download status

	cloudcfg, err := cloudinit.New(series)
	if err != nil {
		return nil, errors.Trace(err)
	}

	var certificateFingerprint string
	if args.InstanceConfig.Controller != nil {
		// For controller machines, generate a certificate pair and write
		// them to the instance's disk in a well-defined location, along
		// with the server's certificate.
		certPEM, keyPEM, err := lxdshared.GenerateMemCert(true)
		if err != nil {
			return nil, errors.Trace(err)
		}
		cert := lxdclient.NewCert(certPEM, keyPEM)
		cert.Name = hostname

		// We record the certificate's fingerprint in metadata, so we can
		// remove the certificate along with the instance.
		certificateFingerprint, err = cert.Fingerprint()
		if err != nil {
			return nil, errors.Trace(err)
		}

		if err := env.raw.AddCert(cert); err != nil {
			return nil, errors.Annotatef(err, "adding certificate %q", cert.Name)
		}
		serverState, err := env.raw.ServerStatus()
		if err != nil {
			return nil, errors.Annotate(err, "getting server status")
		}
		cloudcfg.AddRunTextFile(clientCertPath, string(certPEM), 0600)
		cloudcfg.AddRunTextFile(clientKeyPath, string(keyPEM), 0600)
		cloudcfg.AddRunTextFile(serverCertPath, serverState.Environment.Certificate, 0600)
	}

	cloudcfg.SetAttr("hostname", hostname)
	cloudcfg.SetAttr("manage_etc_hosts", true)

	metadata, err := getMetadata(cloudcfg, args)
	if err != nil {
		return nil, errors.Trace(err)
	}
	if certificateFingerprint != "" {
		metadata[metadataKeyCertificateFingerprint] = certificateFingerprint
	}

	// TODO(ericsnow) Use the env ID for the network name (instead of default)?
	// TODO(ericsnow) Make the network name configurable?
	// TODO(ericsnow) Support multiple networks?
	// TODO(ericsnow) Use a different net interface name? Configurable?
	instSpec := lxdclient.InstanceSpec{
		Name:  hostname,
		Image: image,
		//Type:              spec.InstanceType.Name,
		//Disks:             getDisks(spec, args.Constraints),
		//NetworkInterfaces: []string{"ExternalNAT"},
		Metadata: metadata,
		Profiles: []string{
			//TODO(wwitzel3) allow the user to specify lxc profiles to apply. This allows the
			// user to setup any custom devices order config settings for their environment.
			// Also we must ensure that a device with the parent: lxcbr0 exists in at least
			// one of the profiles.
			"default",
			env.profileName(),
		},
		// Network is omitted (left empty).
	}

	logger.Infof("starting instance %q (image %q)...", instSpec.Name, instSpec.Image)

	statusCallback(status.Allocating, "preparing image")
	inst, err := env.raw.AddInstance(instSpec)
	if err != nil {
		return nil, errors.Trace(err)
	}
	statusCallback(status.Running, "container started")
	return inst, nil
}
Beispiel #15
0
// StartInstance is specified in the Broker interface.
func (broker *lxcBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting lxc containers with networks is not supported yet")
	}
	// TODO: refactor common code out of the container brokers.
	machineId := args.InstanceConfig.MachineId
	lxcLogger.Infof("starting lxc container for machineId: %s", machineId)

	// Default to using the host network until we can configure.
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		bridgeDevice = lxc.DefaultLxcBridge
	}

	if !environs.AddressAllocationEnabled() {
		logger.Debugf(
			"address allocation feature flag not enabled; using DHCP for container %q",
			machineId,
		)
	} else {
		logger.Debugf("trying to allocate static IP for container %q", machineId)
		allocatedInfo, err := configureContainerNetwork(
			machineId,
			bridgeDevice,
			broker.api,
			args.NetworkInfo,
			true, // allocate a new address.
			broker.enableNAT,
		)
		if err != nil {
			// It's fine, just ignore it. The effect will be that the
			// container won't have a static address configured.
			logger.Infof("not allocating static IP for container %q: %v", machineId, err)
		} else {
			args.NetworkInfo = allocatedInfo
		}
	}
	network := container.BridgeNetworkConfig(bridgeDevice, broker.defaultMTU, args.NetworkInfo)

	// The provisioner worker will provide all tools it knows about
	// (after applying explicitly specified constraints), which may
	// include tools for architectures other than the host's. We
	// must constrain to the host's architecture for LXC.
	archTools, err := args.Tools.Match(tools.Filter{
		Arch: version.Current.Arch,
	})
	if err == tools.ErrNoMatches {
		return nil, errors.Errorf(
			"need tools for arch %s, only found %s",
			version.Current.Arch,
			args.Tools.Arches(),
		)
	}

	series := archTools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.LXC
	args.InstanceConfig.Tools = archTools[0]

	config, err := broker.api.ContainerConfig()
	if err != nil {
		lxcLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}
	storageConfig := &container.StorageConfig{
		AllowMount: config.AllowLXCLoopMounts,
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.PreferIPv6,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		lxcLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig)
	if err != nil {
		lxcLogger.Errorf("failed to start container: %v", err)
		return nil, err
	}
	lxcLogger.Infof("started lxc container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String())
	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: network.Interfaces,
	}, nil
}
Beispiel #16
0
func (broker *lxdBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting lxd containers with networks is not supported yet")
	}
	machineId := args.InstanceConfig.MachineId
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		var err error
		bridgeDevice, err = lxdclient.GetDefaultBridgeName()
		if err != nil {
			return nil, errors.Trace(err)
		}
	}

	preparedInfo, err := prepareOrGetContainerInterfaceInfo(
		broker.api,
		machineId,
		bridgeDevice,
		true, // allocate if possible, do not maintain existing.
		broker.enableNAT,
		args.NetworkInfo,
		lxdLogger,
	)
	if err != nil {
		// It's not fatal (yet) if we couldn't pre-allocate addresses for the
		// container.
		logger.Warningf("failed to prepare container %q network config: %v", machineId, err)
	} else {
		args.NetworkInfo = preparedInfo
	}

	network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo)

	series := args.Tools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.LXD
	args.InstanceConfig.Tools = args.Tools[0]

	config, err := broker.api.ContainerConfig()
	if err != nil {
		lxdLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.PreferIPv6,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		lxdLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	storageConfig := &container.StorageConfig{}
	inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig, args.StatusCallback)
	if err != nil {
		return nil, err
	}

	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: network.Interfaces,
	}, nil
}
Beispiel #17
0
func (broker *lxdBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	machineId := args.InstanceConfig.MachineId
	bridgeDevice := broker.agentConfig.Value(agent.LxdBridge)
	if bridgeDevice == "" {
		bridgeDevice = network.DefaultLXDBridge
	}

	config, err := broker.api.ContainerConfig()
	if err != nil {
		lxdLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}

	preparedInfo, err := prepareOrGetContainerInterfaceInfo(
		broker.api,
		machineId,
		bridgeDevice,
		true, // allocate if possible, do not maintain existing.
		args.NetworkInfo,
		lxdLogger,
	)
	if err != nil {
		// It's not fatal (yet) if we couldn't pre-allocate addresses for the
		// container.
		logger.Warningf("failed to prepare container %q network config: %v", machineId, err)
	} else {
		args.NetworkInfo = preparedInfo
	}

	network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo)
	interfaces, err := finishNetworkConfig(bridgeDevice, args.NetworkInfo)
	if err != nil {
		return nil, errors.Trace(err)
	}
	network.Interfaces = interfaces

	// The provisioner worker will provide all tools it knows about
	// (after applying explicitly specified constraints), which may
	// include tools for architectures other than the host's. We
	// must constrain to the host's architecture for LXD.
	archTools, err := matchHostArchTools(args.Tools)
	if err != nil {
		return nil, errors.Trace(err)
	}

	series := archTools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.LXD
	if err := args.InstanceConfig.SetTools(archTools); err != nil {
		return nil, errors.Trace(err)
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		lxdLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	storageConfig := &container.StorageConfig{}
	inst, hardware, err := broker.manager.CreateContainer(
		args.InstanceConfig, args.Constraints,
		series, network, storageConfig, args.StatusCallback,
	)
	if err != nil {
		return nil, err
	}

	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: interfaces,
	}, nil
}
Beispiel #18
0
// StartInstance is specified in the Broker interface.
func (broker *kvmBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting kvm containers with networks is not supported yet")
	}
	// TODO: refactor common code out of the container brokers.
	machineId := args.InstanceConfig.MachineId
	kvmLogger.Infof("starting kvm container for machineId: %s", machineId)

	// TODO: Default to using the host network until we can configure.  Yes,
	// this is using the LxcBridge value, we should put it in the api call for
	// container config.
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		bridgeDevice = kvm.DefaultKvmBridge
	}

	preparedInfo, err := prepareOrGetContainerInterfaceInfo(
		broker.api,
		machineId,
		bridgeDevice,
		true, // allocate if possible, do not maintain existing.
		broker.enableNAT,
		args.NetworkInfo,
		kvmLogger,
	)
	if err != nil {
		// It's not fatal (yet) if we couldn't pre-allocate addresses for the
		// container.
		logger.Warningf("failed to prepare container %q network config: %v", machineId, err)
	} else {
		args.NetworkInfo = preparedInfo
	}

	// Unlike with LXC, we don't override the default MTU to use.
	network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo)

	series := args.Tools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.KVM
	args.InstanceConfig.Tools = args.Tools[0]

	config, err := broker.api.ContainerConfig()
	if err != nil {
		kvmLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.PreferIPv6,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		kvmLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	storageConfig := &container.StorageConfig{
		AllowMount: true,
	}
	inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig, args.StatusCallback)
	if err != nil {
		kvmLogger.Errorf("failed to start container: %v", err)
		return nil, err
	}
	kvmLogger.Infof("started kvm container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String())
	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: network.Interfaces,
	}, nil
}
Beispiel #19
0
// newRawInstance is where the new physical instance is actually
// provisioned, relative to the provided args and spec. Info for that
// low-level instance is returned.
func (env *environ) newRawInstance(args environs.StartInstanceParams) (*lxdclient.Instance, error) {
	machineID := common.MachineFullName(env.Config().UUID(), args.InstanceConfig.MachineId)

	// Note: other providers have the ImageMetadata already read for them
	// and passed in as args.ImageMetadata. However, lxd provider doesn't
	// use datatype: image-ids, it uses datatype: image-download, and we
	// don't have a registered cloud/region.
	imageSources, err := env.getImageSources()
	if err != nil {
		return nil, errors.Trace(err)
	}

	series := args.Tools.OneSeries()
	// TODO(jam): We should get this information from EnsureImageExists, or
	// something given to us from 'raw', not assume it ourselves.
	image := "ubuntu-" + series
	// TODO: support args.Constraints.Arch, we'll want to map from

	var callback func(string)
	if args.StatusCallback != nil {
		callback = func(copyProgress string) {
			args.StatusCallback(status.StatusAllocating, copyProgress, nil)
		}
	}
	if err := env.raw.EnsureImageExists(series, imageSources, callback); err != nil {
		return nil, errors.Trace(err)
	}

	metadata, err := getMetadata(args)
	if err != nil {
		return nil, errors.Trace(err)
	}
	//tags := []string{
	//	env.globalFirewallName(),
	//	machineID,
	//}
	// TODO(ericsnow) Use the env ID for the network name (instead of default)?
	// TODO(ericsnow) Make the network name configurable?
	// TODO(ericsnow) Support multiple networks?
	// TODO(ericsnow) Use a different net interface name? Configurable?
	instSpec := lxdclient.InstanceSpec{
		Name:  machineID,
		Image: image,
		//Type:              spec.InstanceType.Name,
		//Disks:             getDisks(spec, args.Constraints),
		//NetworkInterfaces: []string{"ExternalNAT"},
		Metadata: metadata,
		Profiles: []string{
			//TODO(wwitzel3) allow the user to specify lxc profiles to apply. This allows the
			// user to setup any custom devices order config settings for their environment.
			// Also we must ensure that a device with the parent: lxcbr0 exists in at least
			// one of the profiles.
			"default",
			env.profileName(),
		},
		//Tags:              tags,
		// Network is omitted (left empty).
	}

	logger.Infof("starting instance %q (image %q)...", instSpec.Name, instSpec.Image)
	if args.StatusCallback != nil {
		args.StatusCallback(status.StatusAllocating, "starting instance", nil)
	}
	inst, err := env.raw.AddInstance(instSpec)
	if err != nil {
		return nil, errors.Trace(err)
	}
	if args.StatusCallback != nil {
		args.StatusCallback(status.StatusRunning, "Container started", nil)
	}
	return inst, nil
}