func (s *environAvailzonesSuite) TestInstancesReturnPartialInstances(c *gc.C) { client := vsphere.ExposeEnvFakeClient(s.Env) client.SetPropertyProxyHandler("FakeDatacenter", vsphere.RetrieveDatacenterProperties) vmName1 := common.MachineFullName(s.Env, "1") vmName2 := common.MachineFullName(s.Env, "2") s.FakeInstancesWithResourcePool(client, vsphere.InstRp{Inst: vmName1, Rp: "rp1"}, vsphere.InstRp{Inst: "Some inst", Rp: "rp2"}) _, err := s.Env.Instances([]instance.Id{instance.Id(vmName1), instance.Id(vmName2)}) c.Assert(err, gc.Equals, environs.ErrPartialInstances) }
func (s *environAvailzonesSuite) TestInstances(c *gc.C) { client := vsphere.ExposeEnvFakeClient(s.Env) client.SetPropertyProxyHandler("FakeDatacenter", vsphere.RetrieveDatacenterProperties) vmName1 := common.MachineFullName(s.Env, "1") vmName2 := common.MachineFullName(s.Env, "2") s.FakeInstancesWithResourcePool(client, vsphere.InstRp{Inst: vmName1, Rp: "rp1"}, vsphere.InstRp{Inst: vmName2, Rp: "rp2"}) instances, err := s.Env.Instances([]instance.Id{instance.Id(vmName1), instance.Id(vmName2)}) c.Assert(err, jc.ErrorIsNil) c.Assert(len(instances), gc.Equals, 2) c.Assert(string(instances[0].Id()), gc.Equals, vmName1) c.Assert(string(instances[1].Id()), gc.Equals, vmName2) }
// OpenPorts opens the given ports on the instance, which // should have been started with the given machine id. func (inst *environInstance) OpenPorts(machineID string, ports []network.PortRange) error { // TODO(ericsnow) Make sure machineId matches inst.Id()? name := common.MachineFullName(inst.env, machineID) env := inst.env.getSnapshot() err := env.gce.OpenPorts(name, ports...) return errors.Trace(err) }
// ControllerInstances returns the IDs of the instances corresponding // to juju controllers. func (env *environ) ControllerInstances() ([]instance.Id, error) { env = env.getSnapshot() prefix := common.MachineFullName(env, "") instances, err := env.client.Instances(prefix) if err != nil { return nil, errors.Trace(err) } var results []instance.Id for _, inst := range instances { metadata := inst.Config.ExtraConfig for _, item := range metadata { value := item.GetOptionValue() if value.Key == metadataKeyIsState && value.Value == metadataValueIsState { results = append(results, instance.Id(inst.Name)) break } } } if len(results) == 0 { return nil, environs.ErrNotBootstrapped } return results, nil }
// newRawInstance is where the new physical instance is actually // provisioned, relative to the provided args and spec. Info for that // low-level instance is returned. func (env *environ) newRawInstance(args environs.StartInstanceParams, spec *instances.InstanceSpec) (*google.Instance, error) { machineID := common.MachineFullName(env, args.InstanceConfig.MachineId) metadata, err := getMetadata(args) if err != nil { return nil, errors.Trace(err) } tags := []string{ env.globalFirewallName(), machineID, } // TODO(ericsnow) Use the env ID for the network name (instead of default)? // TODO(ericsnow) Make the network name configurable? // TODO(ericsnow) Support multiple networks? // TODO(ericsnow) Use a different net interface name? Configurable? instSpec := google.InstanceSpec{ ID: machineID, Type: spec.InstanceType.Name, Disks: getDisks(spec, args.Constraints), NetworkInterfaces: []string{"ExternalNAT"}, Metadata: metadata, Tags: tags, // Network is omitted (left empty). } zones, err := env.parseAvailabilityZones(args) if err != nil { return nil, errors.Trace(err) } inst, err := env.gce.AddInstance(instSpec, zones...) return inst, errors.Trace(err) }
// ClosePorts closes the given ports on the instance, which // should have been started with the given machine id. func (inst *environInstance) ClosePorts(machineID string, ports []network.PortRange) error { name := common.MachineFullName(inst.env.Config().UUID(), machineID) err := inst.env.raw.ClosePorts(name, ports...) if errors.IsNotImplemented(err) { // TODO(ericsnow) for now... return nil } return errors.Trace(err) }
// Ports returns the set of ports open on the instance, which // should have been started with the given machine id. // The ports are returned as sorted by SortPorts. func (inst *environInstance) Ports(machineID string) ([]network.PortRange, error) { name := common.MachineFullName(inst.env.Config().UUID(), machineID) ports, err := inst.env.raw.Ports(name) if errors.IsNotImplemented(err) { // TODO(ericsnow) for now... return nil, nil } return ports, errors.Trace(err) }
// StopInstances implements environs.InstanceBroker. func (env *environ) StopInstances(instances ...instance.Id) error { var ids []string for _, id := range instances { ids = append(ids, string(id)) } prefix := common.MachineFullName(env.Config().UUID(), "") err := env.raw.RemoveInstances(prefix, ids...) return errors.Trace(err) }
// StopInstances implements environs.InstanceBroker. func (env *environ) StopInstances(instances ...instance.Id) error { env = env.getSnapshot() var ids []string for _, id := range instances { ids = append(ids, string(id)) } prefix := common.MachineFullName(env, "") err := env.gce.RemoveInstances(prefix, ids...) return errors.Trace(err) }
func (s *environAvailzonesSuite) TestInstanceAvailabilityZoneNames(c *gc.C) { client := vsphere.ExposeEnvFakeClient(s.Env) client.SetPropertyProxyHandler("FakeDatacenter", vsphere.RetrieveDatacenterProperties) vmName := common.MachineFullName(s.Env, "1") s.FakeInstancesWithResourcePool(client, vsphere.InstRp{Inst: vmName, Rp: "rp1"}) s.FakeAvailabilityZonesWithResourcePool(client, vsphere.ZoneRp{Zone: "z1", Rp: "rp1"}, vsphere.ZoneRp{Zone: "z2", Rp: "rp2"}) zones, err := s.Env.InstanceAvailabilityZoneNames([]instance.Id{instance.Id(vmName)}) c.Assert(err, jc.ErrorIsNil) c.Assert(len(zones), gc.Equals, 1) c.Assert(zones[0], gc.Equals, "z1") }
// instances returns a list of all "alive" instances in the environment. // This means only instances where the IDs match // "juju-<env name>-machine-*". This is important because otherwise juju // will see they are not tracked in state, assume they're stale/rogue, // and shut them down. func (env *environ) instances() ([]instance.Instance, error) { prefix := common.MachineFullName(env.Config().UUID(), "") instances, err := env.client.Instances(prefix) err = errors.Trace(err) // Turn mo.VirtualMachine values into *environInstance values, // whether or not we got an error. var results []instance.Instance for _, base := range instances { inst := newInstance(base, env) results = append(results, inst) } return results, err }
// instances returns a list of all "alive" instances in the environment. // This means only instances where the IDs match // "juju-<env name>-machine-*". This is important because otherwise juju // will see they are not tracked in state, assume they're stale/rogue, // and shut them down. func (env *environ) instances() ([]instance.Instance, error) { prefix := common.MachineFullName(env.Config().UUID(), "") instances, err := env.raw.Instances(prefix, instStatuses...) err = errors.Trace(err) // Turn lxdclient.Instance values into *environInstance values, // whether or not we got an error. var results []instance.Instance for _, base := range instances { // If we don't make a copy then the same pointer is used for the // base of all resulting instances. copied := base inst := newInstance(&copied, env) results = append(results, inst) } return results, err }
// ControllerInstances returns the IDs of the instances corresponding // to juju controllers. func (env *environ) ControllerInstances() ([]instance.Id, error) { prefix := common.MachineFullName(env.Config().ControllerUUID(), "") instances, err := env.raw.Instances(prefix, lxdclient.AliveStatuses...) if err != nil { return nil, errors.Trace(err) } var results []instance.Id for _, inst := range instances { if inst.Metadata()[tags.JujuIsController] == "true" { results = append(results, instance.Id(inst.Name)) } } if len(results) == 0 { return nil, environs.ErrNotBootstrapped } return results, nil }
// newRawInstance is where the new physical instance is actually // provisioned, relative to the provided args and spec. Info for that // low-level instance is returned. func (env *environ) newRawInstance(args environs.StartInstanceParams) (*lxdclient.Instance, error) { machineID := common.MachineFullName(env, args.InstanceConfig.MachineId) series := args.Tools.OneSeries() image := "ubuntu-" + series metadata, err := getMetadata(args) if err != nil { return nil, errors.Trace(err) } //tags := []string{ // env.globalFirewallName(), // machineID, //} // TODO(ericsnow) Use the env ID for the network name (instead of default)? // TODO(ericsnow) Make the network name configurable? // TODO(ericsnow) Support multiple networks? // TODO(ericsnow) Use a different net interface name? Configurable? instSpec := lxdclient.InstanceSpec{ Name: machineID, Image: image, //Type: spec.InstanceType.Name, //Disks: getDisks(spec, args.Constraints), //NetworkInterfaces: []string{"ExternalNAT"}, Metadata: metadata, Profiles: []string{ //TODO(wwitzel3) move this to environments.yaml allowing the user to specify // lxc profiles to apply. This allows the user to setup any custom devices order // config settings for their environment. Also we must ensure that a device with // the parent: lxcbr0 exists in at least one of the profiles. "default", env.profileName(), }, //Tags: tags, // Network is omitted (left empty). } logger.Infof("starting instance %q (image %q)...", instSpec.Name, instSpec.Image) inst, err := env.raw.AddInstance(instSpec) if err != nil { return nil, errors.Trace(err) } return inst, nil }
// ControllerInstances returns the IDs of the instances corresponding // to juju controllers. func (env *environ) ControllerInstances() ([]instance.Id, error) { prefix := common.MachineFullName(env.Config().ControllerUUID(), "") instances, err := env.raw.Instances(prefix, instStatuses...) if err != nil { return nil, errors.Trace(err) } var results []instance.Id for _, inst := range instances { metadata := inst.Metadata() isState, ok := metadata[metadataKeyIsState] if ok && isState == metadataValueTrue { results = append(results, instance.Id(inst.Name)) } } if len(results) == 0 { return nil, environs.ErrNotBootstrapped } return results, nil }
// StateServerInstances returns the IDs of the instances corresponding // to juju state servers. func (env *environ) StateServerInstances() ([]instance.Id, error) { env = env.getSnapshot() prefix := common.MachineFullName(env, "") instances, err := env.gce.Instances(prefix, instStatuses...) if err != nil { return nil, errors.Trace(err) } var results []instance.Id for _, inst := range instances { metadata := inst.Metadata() isState, ok := metadata[metadataKeyIsState] if ok && isState == metadataValueTrue { results = append(results, instance.Id(inst.ID)) } } if len(results) == 0 { return nil, environs.ErrNotBootstrapped } return results, nil }
// Ports returns the set of ports open on the instance, which // should have been started with the given machine id. // The ports are returned as sorted by SortPorts. func (inst *environInstance) Ports(machineID string) ([]network.PortRange, error) { name := common.MachineFullName(inst.env.Config().UUID(), machineID) ports, err := inst.env.gce.Ports(name) return ports, errors.Trace(err) }
// instances returns a list of all "alive" instances in the environment. // We match machine names to the pattern "juju-<model-UUID>-machine-*" // to ensure that only machines for the environment are returned. This // is necessary to isolate multiple models within the same LXD. func (env *environ) allInstances() ([]*environInstance, error) { prefix := common.MachineFullName(env.Config().UUID(), "") return env.prefixedInstances(prefix) }
// newRawInstance is where the new physical instance is actually // provisioned, relative to the provided args and spec. Info for that // low-level instance is returned. func (env *environ) newRawInstance(args environs.StartInstanceParams, img *OvaFileMetadata) (*mo.VirtualMachine, *instance.HardwareCharacteristics, error) { machineID := common.MachineFullName(env, args.InstanceConfig.MachineId) cloudcfg, err := cloudinit.New(args.Tools.OneSeries()) if err != nil { return nil, nil, errors.Trace(err) } cloudcfg.AddPackage("open-vm-tools") cloudcfg.AddPackage("iptables-persistent") userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, VsphereRenderer{}) if err != nil { return nil, nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("Vmware user data; %d bytes", len(userData)) rootDisk := common.MinRootDiskSizeGiB * 1024 if args.Constraints.RootDisk != nil && *args.Constraints.RootDisk > rootDisk { rootDisk = *args.Constraints.RootDisk } cpuCores := DefaultCpuCores if args.Constraints.CpuCores != nil { cpuCores = *args.Constraints.CpuCores } cpuPower := DefaultCpuPower if args.Constraints.CpuPower != nil { cpuPower = *args.Constraints.CpuPower } mem := DefaultMemMb if args.Constraints.Mem != nil { mem = *args.Constraints.Mem } hwc := &instance.HardwareCharacteristics{ Arch: &img.Arch, Mem: &mem, CpuCores: &cpuCores, CpuPower: &cpuPower, RootDisk: &rootDisk, } zones, err := env.parseAvailabilityZones(args) if err != nil { return nil, nil, errors.Trace(err) } var inst *mo.VirtualMachine for _, zone := range zones { var availZone *vmwareAvailZone availZone, err = env.availZone(zone) if err != nil { logger.Warningf("Error while getting availability zone %s: %s", zone, err) continue } apiPort := 0 if isStateServer(args.InstanceConfig) { apiPort = args.InstanceConfig.StateServingInfo.APIPort } spec := &instanceSpec{ machineID: machineID, zone: availZone, hwc: hwc, img: img, userData: userData, sshKey: args.InstanceConfig.AuthorizedKeys, isState: isStateServer(args.InstanceConfig), apiPort: apiPort, } inst, err = env.client.CreateInstance(env.ecfg, spec) if err != nil { logger.Warningf("Error while trying to create instance in %s availability zone: %s", zone, err) continue } break } if err != nil { return nil, nil, errors.Annotate(err, "Can't create instance in any of availability zones, last error") } return inst, hwc, err }
// ClosePorts closes the given ports on the instance, which // should have been started with the given machine id. func (inst *environInstance) ClosePorts(machineID string, ports []network.PortRange) error { name := common.MachineFullName(inst.env, machineID) env := inst.env.getSnapshot() err := env.gce.ClosePorts(name, ports...) return errors.Trace(err) }
// newRawInstance is where the new physical instance is actually // provisioned, relative to the provided args and spec. Info for that // low-level instance is returned. func (env *environ) newRawInstance(args environs.StartInstanceParams) (*lxdclient.Instance, error) { machineID := common.MachineFullName(env.Config().UUID(), args.InstanceConfig.MachineId) // Note: other providers have the ImageMetadata already read for them // and passed in as args.ImageMetadata. However, lxd provider doesn't // use datatype: image-ids, it uses datatype: image-download, and we // don't have a registered cloud/region. imageSources, err := env.getImageSources() if err != nil { return nil, errors.Trace(err) } series := args.Tools.OneSeries() // TODO(jam): We should get this information from EnsureImageExists, or // something given to us from 'raw', not assume it ourselves. image := "ubuntu-" + series // TODO: support args.Constraints.Arch, we'll want to map from var callback func(string) if args.StatusCallback != nil { callback = func(copyProgress string) { args.StatusCallback(status.StatusAllocating, copyProgress, nil) } } if err := env.raw.EnsureImageExists(series, imageSources, callback); err != nil { return nil, errors.Trace(err) } metadata, err := getMetadata(args) if err != nil { return nil, errors.Trace(err) } //tags := []string{ // env.globalFirewallName(), // machineID, //} // TODO(ericsnow) Use the env ID for the network name (instead of default)? // TODO(ericsnow) Make the network name configurable? // TODO(ericsnow) Support multiple networks? // TODO(ericsnow) Use a different net interface name? Configurable? instSpec := lxdclient.InstanceSpec{ Name: machineID, Image: image, //Type: spec.InstanceType.Name, //Disks: getDisks(spec, args.Constraints), //NetworkInterfaces: []string{"ExternalNAT"}, Metadata: metadata, Profiles: []string{ //TODO(wwitzel3) allow the user to specify lxc profiles to apply. This allows the // user to setup any custom devices order config settings for their environment. // Also we must ensure that a device with the parent: lxcbr0 exists in at least // one of the profiles. "default", env.profileName(), }, //Tags: tags, // Network is omitted (left empty). } logger.Infof("starting instance %q (image %q)...", instSpec.Name, instSpec.Image) if args.StatusCallback != nil { args.StatusCallback(status.StatusAllocating, "starting instance", nil) } inst, err := env.raw.AddInstance(instSpec) if err != nil { return nil, errors.Trace(err) } if args.StatusCallback != nil { args.StatusCallback(status.StatusRunning, "Container started", nil) } return inst, nil }
// ClosePorts closes the given ports on the instance, which // should have been started with the given machine id. func (inst *environInstance) ClosePorts(machineID string, ports []network.PortRange) error { name := common.MachineFullName(inst.env.Config().UUID(), machineID) err := inst.env.gce.ClosePorts(name, ports...) return errors.Trace(err) }
// Ports returns the set of ports open on the instance, which // should have been started with the given machine id. // The ports are returned as sorted by SortPorts. func (inst *environInstance) Ports(machineID string) ([]network.PortRange, error) { name := common.MachineFullName(inst.env, machineID) env := inst.env.getSnapshot() ports, err := env.gce.Ports(name) return ports, errors.Trace(err) }