func addVNIC(ui packer.Ui, f *find.Finder, ctx context.Context, c *govmomi.Client, vm *object.VirtualMachine, network string, nwType string) error {

	ui.Say("Adding NIC")

	nets, err := f.NetworkList(ctx, network)
	if err != nil {
		return err
	}
	// TODO expose param for DVS
	net := nets[1]

	backing, err := net.EthernetCardBackingInfo(ctx)
	if err != nil {
		return err
	}
	device, err := object.EthernetCardTypes().CreateEthernetCard(nwType, backing)
	if err != nil {
		return err
	}
	err = vm.AddDevice(ctx, device)
	if err != nil {
		return err
	}
	ui.Say("Adding NIC Success")

	return nil
} //
Beispiel #2
0
func (cmd *vmdk) DetachDisk(vm *object.VirtualMachine) (string, error) {
	ctx := context.TODO()
	var mvm mo.VirtualMachine

	pc := property.DefaultCollector(cmd.Client)
	err := pc.RetrieveOne(ctx, vm.Reference(), []string{"config.hardware"}, &mvm)
	if err != nil {
		return "", err
	}

	spec := new(configSpec)
	dsFile := spec.RemoveDisk(&mvm)

	task, err := vm.Reconfigure(ctx, spec.ToSpec())
	if err != nil {
		return "", err
	}

	err = task.Wait(ctx)
	if err != nil {
		return "", err
	}

	return dsFile, nil
}
Beispiel #3
0
func (vmh *VMHost) FindHosts(targetVM *object.VirtualMachine) (hosts []*object.HostSystem, err error) {
	targetResourcePool, err := targetVM.ResourcePool(vmh.Ctx)
	if err != nil {
		return nil, errors.New("Error with finding Resource Pool of VM")
	}
	var resourcePoolProp mo.ResourcePool
	err = targetResourcePool.Properties(vmh.Ctx, targetResourcePool.Reference(), []string{"owner"}, &resourcePoolProp)

	if err != nil {
		return nil, errors.New("Error with finding Owner of Resource Pool")
	}

	typeOfOwningResource := resourcePoolProp.Owner.Type
	//Scenario in which VM is apart of a Cluster (Not tied to 1 ESXi host) - VMware DRS
	if typeOfOwningResource == "ClusterComputeResource" {
		cluster := object.NewClusterComputeResource(vmh.client.Client, resourcePoolProp.Owner)
		var clusterProp mo.ClusterComputeResource
		err = cluster.Properties(vmh.Ctx, cluster.Reference(), []string{"host"}, &clusterProp)
		if err != nil {
			return nil, errors.New("Error with finding Hosts of Cluster")
		}

		//convert Managed Object References into actual host_sytem objects to return
		var hosts []*object.HostSystem
		for _, host := range clusterProp.Host {
			newHost := object.NewHostSystem(vmh.client.Client, host)
			hosts = append(hosts, newHost)
		}
		return hosts, nil
	} else {
		return nil, errors.New("Looks like you are on a single/Non-Clustered host and we havent gotten to this yet!!")
	}

}
Beispiel #4
0
// IpAddress attempts to find the guest IP address using esxcli.
// ESX hosts must be configured with the /Net/GuestIPHack enabled.
// For example:
// $ govc host.esxcli -- system settings advanced set -o /Net/GuestIPHack -i 1
func (g *GuestInfo) IpAddress(vm *object.VirtualMachine) (string, error) {
	const any = "0.0.0.0"
	var mvm mo.VirtualMachine

	pc := property.DefaultCollector(g.c)
	err := pc.RetrieveOne(context.TODO(), vm.Reference(), []string{"runtime.host", "config.uuid"}, &mvm)
	if err != nil {
		return "", err
	}

	h, err := g.hostInfo(mvm.Runtime.Host)
	if err != nil {
		return "", err
	}

	// Normalize uuid, esxcli and mo.VirtualMachine have different formats
	uuid := strings.Replace(mvm.Config.Uuid, "-", "", -1)

	if wid, ok := h.wids[uuid]; ok {
		res, err := h.Run([]string{"network", "vm", "port", "list", "--world-id", wid})
		if err != nil {
			return "", err
		}

		for _, val := range res.Values {
			if ip, ok := val["IPAddress"]; ok {
				if ip[0] != any {
					return ip[0], nil
				}
			}
		}
	}

	return any, nil
}
Beispiel #5
0
func (i *vSphereInstanceManager) instanceForVirtualMachine(ctx context.Context, vm *object.VirtualMachine) (inst *Instance, err error) {
	defer func() {
		recoverErr := recover()
		if recoverErr != nil {
			inst = nil
			err = recoverErr.(error)
		}
	}()

	var mvm mo.VirtualMachine
	err = vm.Properties(ctx, vm.Reference(), []string{"config", "guest", "runtime"}, &mvm)
	if err != nil {
		return nil, err
	}

	var ipAddresses []string
	for _, nic := range mvm.Guest.Net {
		for _, ip := range nic.IpConfig.IpAddress {
			ipAddresses = append(ipAddresses, ip.IpAddress)
		}
	}

	if reflect.DeepEqual(mvm.Runtime, types.VirtualMachineRuntimeInfo{}) {
		return nil, fmt.Errorf("no instance for vm %v", vm)
	}

	return &Instance{
		ID:          mvm.Config.Name,
		IPAddresses: ipAddresses,
		State:       string(mvm.Runtime.PowerState),
	}, nil
}
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) {
	var key int

	devices, err := vm.Device(context.TODO())
	if err != nil {
		return types.VirtualMachineRelocateSpec{}, err
	}
	for _, d := range devices {
		if devices.Type(d) == "disk" {
			key = d.GetVirtualDevice().Key
		}
	}

	rpr := rp.Reference()
	dsr := ds.Reference()
	return types.VirtualMachineRelocateSpec{
		Datastore: &dsr,
		Pool:      &rpr,
		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
			types.VirtualMachineRelocateSpecDiskLocator{
				Datastore: dsr,
				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
					DiskMode:        "persistent",
					ThinProvisioned: types.NewBool(false),
					EagerlyScrub:    types.NewBool(true),
				},
				DiskId: key,
			},
		},
	}, nil
}
Beispiel #7
0
func (vmh *VMHost) getMACAddressOfVM(vm *object.VirtualMachine) (string, error) {

	vmDeviceList, err := vm.Device(context.TODO())
	if err != nil {
		return "", errors.New("Cannot read VM VirtualDevices")
	}
	return vmDeviceList.PrimaryMacAddress(), nil
}
Beispiel #8
0
func getVirtualMachineManagedObjectReference(ctx context.Context, c *govmomi.Client, vm *object.VirtualMachine, field string, dst interface{}) error {
	collector := property.DefaultCollector(c.Client)

	// Retrieve required field from VM object
	err := collector.RetrieveOne(ctx, vm.Reference(), []string{field}, dst)
	if err != nil {
		return err
	}
	return nil
}
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
func buildVMRelocateSpec(finder *find.Finder, rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linked bool) (types.VirtualMachineRelocateSpec, error) {
	var key int
	var parent *types.VirtualDiskFlatVer2BackingInfo

	devices, err := vm.Device(context.TODO())
	if err != nil {
		return types.VirtualMachineRelocateSpec{}, err
	}

	for _, d := range devices {
		if devices.Type(d) == "disk" {
			vd := d.GetVirtualDevice()
			parent = vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
			key = vd.Key
		}
	}

	rpr := rp.Reference()
	relocateSpec := types.VirtualMachineRelocateSpec{}
	// Treat linked clones a bit differently.
	if linked {
		parentDs := strings.SplitN(parent.FileName[1:], "]", 2)
		parentDsObj, err := finder.Datastore(context.TODO(), parentDs[0])
		if err != nil {
			return types.VirtualMachineRelocateSpec{}, err
		}

		parentDbObjRef := parentDsObj.Reference()
		relocateSpec = types.VirtualMachineRelocateSpec{
			Datastore:    &parentDbObjRef,
			Pool:         &rpr,
			DiskMoveType: "createNewChildDiskBacking",
		}
	} else {
		dsr := ds.Reference()

		relocateSpec = types.VirtualMachineRelocateSpec{
			Datastore: &dsr,
			Pool:      &rpr,
			Disk: []types.VirtualMachineRelocateSpecDiskLocator{
				types.VirtualMachineRelocateSpecDiskLocator{
					Datastore: dsr,
					DiskId:    key,
					DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
						DiskMode:        "persistent",
						ThinProvisioned: types.NewBool(false),
						EagerlyScrub:    types.NewBool(true),
					},
				},
			},
		}
	}

	return relocateSpec, nil
}
// addHardDisk adds a new Hard Disk to the VirtualMachine.
func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error {
	devices, err := vm.Device(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] vm devices: %#v\n", devices)

	controller, err := devices.FindDiskController(controller_type)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] disk controller: %#v\n", controller)

	// TODO Check if diskPath & datastore exist
	// If diskPath is not specified, pass empty string to CreateDisk()
	if diskPath == "" {
		return fmt.Errorf("[ERROR] addHardDisk - No path proided")
	} else {
		// TODO Check if diskPath & datastore exist
		diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath)
	}
	log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath)
	disk := devices.CreateDisk(controller, datastore.Reference(), diskPath)

	existing := devices.SelectByBackingInfo(disk.Backing)
	log.Printf("[DEBUG] disk: %#v\n", disk)

	if len(existing) == 0 {
		disk.CapacityInKB = int64(size * 1024 * 1024)
		if iops != 0 {
			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
				Limit: iops,
			}
		}
		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)

		if diskType == "eager_zeroed" {
			// eager zeroed thick virtual disk
			backing.ThinProvisioned = types.NewBool(false)
			backing.EagerlyScrub = types.NewBool(true)
		} else if diskType == "thin" {
			// thin provisioned virtual disk
			backing.ThinProvisioned = types.NewBool(true)
		}

		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
		log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB)

		return vm.AddDevice(context.TODO(), disk)
	} else {
		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")

		return nil
	}
}
Beispiel #11
0
// Removes SCSI controller which is latest attached to VM.
func cleanUpController(newSCSIController types.BaseVirtualDevice, vmDevices object.VirtualDeviceList, vm *object.VirtualMachine, ctx context.Context) error {
	ctls := vmDevices.SelectByType(newSCSIController)
	if len(ctls) < 1 {
		return ErrNoDevicesFound
	}
	newScsi := ctls[len(ctls)-1]
	err := vm.RemoveDevice(ctx, true, newScsi)
	if err != nil {
		return err
	}
	return nil
}
Beispiel #12
0
func (cmd *ovfx) WaitForIP(vm *object.VirtualMachine) error {
	if !cmd.Options.PowerOn || !cmd.Options.WaitForIP {
		return nil
	}

	cmd.Log("Waiting for IP address...\n")
	ip, err := vm.WaitForIP(context.TODO())
	if err != nil {
		return err
	}

	cmd.Log(fmt.Sprintf("Received IP address: %s\n", ip))
	return nil
}
Beispiel #13
0
func (cmd *ovfx) InjectOvfEnv(vm *object.VirtualMachine) error {
	if !cmd.Options.InjectOvfEnv {
		return nil
	}

	cmd.Log("Injecting OVF environment...\n")

	var opts []types.BaseOptionValue

	a := cmd.Client.ServiceContent.About

	// build up Environment in order to marshal to xml
	var props []ovf.EnvProperty
	for _, p := range cmd.Options.PropertyMapping {
		props = append(props, ovf.EnvProperty{
			Key:   p.Key,
			Value: p.Value,
		})
	}

	env := ovf.Env{
		EsxID: vm.Reference().Value,
		Platform: &ovf.PlatformSection{
			Kind:    a.Name,
			Version: a.Version,
			Vendor:  a.Vendor,
			Locale:  "US",
		},
		Property: &ovf.PropertySection{
			Properties: props,
		},
	}

	opts = append(opts, &types.OptionValue{
		Key:   "guestinfo.ovfEnv",
		Value: env.MarshalManual(),
	})

	ctx := context.Background()

	task, err := vm.Reconfigure(ctx, types.VirtualMachineConfigSpec{
		ExtraConfig: opts,
	})

	if err != nil {
		return err
	}

	return task.Wait(ctx)
}
Beispiel #14
0
Datei: ovf.go Projekt: vmware/vic
func (cmd *ovfx) InjectOvfEnv(vm *object.VirtualMachine) error {
	ctx := context.TODO()
	if !cmd.Options.PowerOn || !cmd.Options.InjectOvfEnv {
		return nil
	}

	a := cmd.Client.ServiceContent.About
	if strings.EqualFold(a.ProductLineId, "esx") || strings.EqualFold(a.ProductLineId, "embeddedEsx") || strings.EqualFold(a.ProductLineId, "vpx") {
		cmd.Log("Injecting OVF environment...\n")

		// build up Environment in order to marshal to xml
		var epa []ovf.EnvProperty
		for _, p := range cmd.Options.PropertyMapping {
			epa = append(epa, ovf.EnvProperty{
				Key:   p.Key,
				Value: p.Value})
		}
		env := ovf.Env{
			EsxID: vm.Reference().Value,
			Platform: &ovf.PlatformSection{
				Kind:    a.Name,
				Version: a.Version,
				Vendor:  a.Vendor,
				Locale:  "US",
			},
			Property: &ovf.PropertySection{
				Properties: epa},
		}

		xenv := env.MarshalManual()
		vmConfigSpec := types.VirtualMachineConfigSpec{
			ExtraConfig: []types.BaseOptionValue{&types.OptionValue{
				Key:   "guestinfo.ovfEnv",
				Value: xenv}}}

		task, err := vm.Reconfigure(ctx, vmConfigSpec)
		if err != nil {
			return err
		}
		if err := task.Wait(ctx); err != nil {
			return err
		}
	}

	return nil
}
Beispiel #15
0
func (cmd *create) addDevices(vm *object.VirtualMachine) error {
	devices, err := vm.Device(context.TODO())
	if err != nil {
		return err
	}

	var add []types.BaseVirtualDevice

	if cmd.disk != "" {
		controller, err := devices.FindDiskController(cmd.controller)
		if err != nil {
			return err
		}

		disk := devices.CreateDisk(controller, cmd.Datastore.Path(cmd.disk))

		if cmd.link {
			disk = devices.ChildDisk(disk)
		}

		add = append(add, disk)
	}

	if cmd.iso != "" {
		ide, err := devices.FindIDEController("")
		if err != nil {
			return err
		}

		cdrom, err := devices.CreateCdrom(ide)
		if err != nil {
			return err
		}

		add = append(add, devices.InsertIso(cdrom, cmd.Datastore.Path(cmd.iso)))
	}

	netdev, err := cmd.NetworkFlag.Device()
	if err != nil {
		return err
	}

	add = append(add, netdev)

	return vm.AddDevice(context.TODO(), add...)
}
// addHardDisk adds a new Hard Disk to the VirtualMachine.
func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error {
	devices, err := vm.Device(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] vm devices: %#v\n", devices)

	controller, err := devices.FindDiskController("scsi")
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] disk controller: %#v\n", controller)

	disk := devices.CreateDisk(controller, "")
	existing := devices.SelectByBackingInfo(disk.Backing)
	log.Printf("[DEBUG] disk: %#v\n", disk)

	if len(existing) == 0 {
		disk.CapacityInKB = int64(size * 1024 * 1024)
		if iops != 0 {
			disk.StorageIOAllocation = &types.StorageIOAllocationInfo{
				Limit: iops,
			}
		}
		backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)

		// if diskType == "eager_zeroed" {
		// 	// eager zeroed thick virtual disk
		// 	backing.ThinProvisioned = types.NewBool(false)
		// 	backing.EagerlyScrub = types.NewBool(true)
		// } else if diskType == "thin" {
		// 	// thin provisioned virtual disk
		// 	backing.ThinProvisioned = types.NewBool(true)
		// }
		backing.ThinProvisioned = types.NewBool(true)

		log.Printf("[DEBUG] addHardDisk: %#v\n", disk)
		log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB)

		return vm.AddDevice(context.TODO(), disk)
	} else {
		log.Printf("[DEBUG] addHardDisk: Disk already present.\n")

		return nil
	}
}
Beispiel #17
0
func (cmd *ovfx) PowerOn(vm *object.VirtualMachine) error {
	if !cmd.Options.PowerOn {
		return nil
	}

	cmd.Log("Powering on vm...\n")

	task, err := vm.PowerOn(context.TODO())
	if err != nil {
		return err
	}

	if _, err = task.WaitForResult(context.TODO(), nil); err != nil {
		return err
	}

	return nil
}
Beispiel #18
0
func (cmd *vmdk) DestroyVM(vm *object.VirtualMachine) error {
	_, err := cmd.DetachDisk(vm)
	if err != nil {
		return err
	}

	task, err := vm.Destroy(context.TODO())
	if err != nil {
		return err
	}

	err = task.Wait(context.TODO())
	if err != nil {
		return err
	}

	return nil
}
Beispiel #19
0
func (vmh *VMHost) getVmScsiDiskDeviceInfo(vm *object.VirtualMachine) ([]types.VirtualMachineScsiDiskDeviceInfo, error) {
	var VM_withProp mo.VirtualMachine
	err := vm.Properties(vmh.Ctx, vm.Reference(), []string{"environmentBrowser"}, &VM_withProp)
	if err != nil {
		return nil, errors.New(fmt.Sprintf("Error finding Environment Browser for VM - %S", err))
	}

	//Query VM To Find Devices avilable for attaching to VM
	var queryConfigRequest types.QueryConfigTarget
	queryConfigRequest.This = VM_withProp.EnvironmentBrowser
	queryConfigResp, err := methods.QueryConfigTarget(vmh.Ctx, vmh.client.Client, &queryConfigRequest)
	if err != nil {
		return nil, errors.New(fmt.Sprintf("Error Obtaining Configuration Options of Host System that VM is On - %S", err))
	}
	vmConfigOptions := *queryConfigResp.Returnval

	return vmConfigOptions.ScsiDisk, nil
}
Beispiel #20
0
func (vmh *VMHost) DetachRDM(vm *object.VirtualMachine, deviceNAA string) error {

	scsiLuns, err := vmh.GetSCSILuns()
	if err != nil {
		return err
	}

	mapSDI := make(map[string]*types.ScsiLun)
	for _, d := range scsiLuns {
		mapSDI[d.Uuid] = d
	}

	devices, err := vm.Device(context.TODO())
	if err != nil {
		return err
	}

	for _, device := range devices {
		device2 := device.(types.BaseVirtualDevice).GetVirtualDevice()
		if device2.Backing != nil {
			elem := reflect.ValueOf(device2.Backing).Elem()
			lunUuid := elem.FieldByName("LunUuid")
			if lunUuid.Kind() == reflect.Invalid {
				continue
			}
			if sd, ok := mapSDI[lunUuid.String()]; ok && strings.Contains(sd.CanonicalName, deviceNAA) {
				deviceName := devices.Name(device)
				newDevice := devices.Find(deviceName)
				if newDevice == nil {
					return fmt.Errorf("device '%s' not found", deviceName)
				}
				if err = vm.RemoveDevice(context.TODO(), newDevice); err != nil {
					return err
				}
				break
			}
		}

	}

	return nil
}
Beispiel #21
0
// Find the disk by name attached to the given vm.
func findDisk(ctx context.Context, vm *object.VirtualMachine, name string) (*types.VirtualDisk, error) {
	defer trace.End(trace.Begin(vm.String()))

	log.Debugf("Looking for attached disk matching filename %s", name)

	devices, err := vm.Device(ctx)
	if err != nil {
		return nil, fmt.Errorf("Failed to refresh devices for vm: %s", errors.ErrorStack(err))
	}

	candidates := devices.Select(func(device types.BaseVirtualDevice) bool {
		db := device.GetVirtualDevice().Backing
		if db == nil {
			return false
		}

		backing, ok := device.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo)
		if !ok {
			return false
		}

		log.Debugf("backing file name %s", backing.VirtualDeviceFileBackingInfo.FileName)
		match := strings.HasSuffix(backing.VirtualDeviceFileBackingInfo.FileName, name)
		if match {
			log.Debugf("Found candidate disk for %s at %s", name, backing.VirtualDeviceFileBackingInfo.FileName)
		}

		return match
	})

	if len(candidates) == 0 {
		log.Warnf("No disks match name: %s", name)
		return nil, os.ErrNotExist
	}

	if len(candidates) > 1 {
		return nil, errors.Errorf("Too many disks match name: %s", name)
	}

	return candidates[0].(*types.VirtualDisk), nil
}
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) {
	var key int
	var moveType string
	if linkedClone {
		moveType = "createNewChildDiskBacking"
	} else {
		moveType = "moveAllDiskBackingsAndDisallowSharing"
	}
	log.Printf("[DEBUG] relocate type: [%s]", moveType)

	devices, err := vm.Device(context.TODO())
	if err != nil {
		return types.VirtualMachineRelocateSpec{}, err
	}
	for _, d := range devices {
		if devices.Type(d) == "disk" {
			key = d.GetVirtualDevice().Key
		}
	}

	isThin := initType == "thin"
	rpr := rp.Reference()
	dsr := ds.Reference()
	return types.VirtualMachineRelocateSpec{
		Datastore:    &dsr,
		Pool:         &rpr,
		DiskMoveType: moveType,
		Disk: []types.VirtualMachineRelocateSpecDiskLocator{
			types.VirtualMachineRelocateSpecDiskLocator{
				Datastore: dsr,
				DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
					DiskMode:        "persistent",
					ThinProvisioned: types.NewBool(isThin),
					EagerlyScrub:    types.NewBool(!isThin),
				},
				DiskId: key,
			},
		},
	}, nil
}
func delVNIC(ui packer.Ui, f *find.Finder, ctx context.Context, vm *object.VirtualMachine) error {
	ui.Say("Deleting NIC ")
	devicelst, err := vm.Device(ctx)
	if err != nil {
		return err
	}

	for _, device := range devicelst {

		switch device.(type) {
		case *types.VirtualVmxnet3:
			ui.Message(fmt.Sprintf("Removing NIC %s\n", device.GetVirtualDevice().DeviceInfo))
			err := vm.RemoveDevice(ctx, device)
			if err != nil {
				return err
			}
			return nil

		case *types.VirtualE1000:
			ui.Message(fmt.Sprintf("Removing NIC %s\n", device.GetVirtualDevice().DeviceInfo))
			err := vm.RemoveDevice(ctx, device)
			if err != nil {
				return err
			}
			return nil
		default:
			fmt.Printf("Type %s\n", reflect.TypeOf(device).Elem())
			fmt.Printf("Device info %s\n", device.GetVirtualDevice().DeviceInfo)

		}

	}

	return nil
} //
Beispiel #24
0
func (cmd *vmdk) CloneVM(vm *object.VirtualMachine, name string) (*object.VirtualMachine, error) {
	folders, err := cmd.Datacenter.Folders(context.TODO())
	if err != nil {
		return nil, err
	}

	spec := types.VirtualMachineCloneSpec{
		Config:   &types.VirtualMachineConfigSpec{},
		Location: types.VirtualMachineRelocateSpec{},
	}

	task, err := vm.Clone(context.TODO(), folders.VmFolder, name, spec)
	if err != nil {
		return nil, err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return nil, err
	}

	return object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil
}
// addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path.
func addCdrom(vm *object.VirtualMachine, datastore, path string) error {
	devices, err := vm.Device(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] vm devices: %#v", devices)

	controller, err := devices.FindIDEController("")
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] ide controller: %#v", controller)

	c, err := devices.CreateCdrom(controller)
	if err != nil {
		return err
	}

	c = devices.InsertIso(c, fmt.Sprintf("[%s] %s", datastore, path))
	log.Printf("[DEBUG] addCdrom: %#v", c)

	return vm.AddDevice(context.TODO(), c)
}
Beispiel #26
0
func newVNCVM(c *vim25.Client, vm *object.VirtualMachine) (*vncVM, error) {
	v := &vncVM{
		c:  c,
		vm: vm,
	}

	virtualMachineProperties := []string{
		"name",
		"config.extraConfig",
		"runtime.host",
	}

	pc := property.DefaultCollector(c)
	err := pc.RetrieveOne(context.TODO(), vm.Reference(), virtualMachineProperties, &v.mvm)
	if err != nil {
		return nil, err
	}

	v.curOptions = vncOptionsFromExtraConfig(v.mvm.Config.ExtraConfig)
	v.newOptions = vncOptionsFromExtraConfig(v.mvm.Config.ExtraConfig)

	return v, nil
}
// buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action.
func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec {
	vmr := vm.Reference()
	vmfr := f.VmFolder.Reference()
	rpr := rp.Reference()
	spr := storagePod.Reference()

	var o mo.VirtualMachine
	err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o)
	if err != nil {
		return types.StoragePlacementSpec{}
	}
	ds := object.NewDatastore(c.Client, o.Datastore[0])
	log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds)

	devices, err := vm.Device(context.TODO())
	if err != nil {
		return types.StoragePlacementSpec{}
	}

	var key int
	for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) {
		key = d.GetVirtualDevice().Key
		log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice())
	}

	sps := types.StoragePlacementSpec{
		Type: "clone",
		Vm:   &vmr,
		PodSelectionSpec: types.StorageDrsPodSelectionSpec{
			StoragePod: &spr,
		},
		CloneSpec: &types.VirtualMachineCloneSpec{
			Location: types.VirtualMachineRelocateSpec{
				Disk: []types.VirtualMachineRelocateSpecDiskLocator{
					types.VirtualMachineRelocateSpecDiskLocator{
						Datastore:       ds.Reference(),
						DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{},
						DiskId:          key,
					},
				},
				Pool: &rpr,
			},
			PowerOn:  false,
			Template: false,
		},
		CloneName: "dummy",
		Folder:    &vmfr,
	}
	return sps
}
Beispiel #28
0
func (vmh *VMHost) AttachRDM(vm *object.VirtualMachine, deviceNAA string) (err error) {

	vmScsiDiskDeviceInfo, err := vmh.getVmScsiDiskDeviceInfo(vm)
	if err != nil {
		return err
	}

	// var scsiCtlrUnitNumber int
	//Build new Virtual Device to add to VM from list of avilable devices found from our query
	for _, ScsiDisk := range vmScsiDiskDeviceInfo {
		if !strings.Contains(ScsiDisk.Disk.CanonicalName, deviceNAA) {
			continue
		}

		var rdmBacking types.VirtualDiskRawDiskMappingVer1BackingInfo
		rdmBacking.FileName = ""
		rdmBacking.DiskMode = "independent_persistent"
		rdmBacking.CompatibilityMode = "physicalMode"
		rdmBacking.DeviceName = ScsiDisk.Disk.DeviceName
		for _, descriptor := range ScsiDisk.Disk.Descriptor {
			if string([]rune(descriptor.Id)[:4]) == "vml." {
				rdmBacking.LunUuid = descriptor.Id
				break
			}
		}
		var rdmDisk types.VirtualDisk
		rdmDisk.Backing = &rdmBacking
		rdmDisk.CapacityInKB = 1024

		controller, err := vmh.getAvailableSCSIController()
		if err != nil {
			return err
		}

		if controller == nil {
			controllers, err := vmh.getSCSIControllers()
			if err != nil {
				return err
			}

			if len(controllers) == 0 {
				return errors.New("no SCSI controllers found")
			}

			if len(controllers) == 4 {
				return errors.New("no more controllers can be added")
			}

			err = vmh.createController(&controllers[0])
			if err != nil {
				return err
			}

			controller, err = vmh.getAvailableSCSIController()
			if err != nil {
				return err
			}
		}

		rdmDisk.ControllerKey = controller.VirtualController.Key
		rdmDisk.UnitNumber = -1

		err = vm.AddDevice(vmh.Ctx, &rdmDisk)
		if err != nil {
			return errors.New(fmt.Sprintf("Error adding device %+v \n Logged Item:  %s", rdmDisk, err))
		}
		return nil

	}

	scsiLuns, err := vmh.GetSCSILuns()
	if err != nil {
		return goof.WithError("error getting existing LUNs", err)
	}

	for _, sl := range scsiLuns {
		if strings.Contains(sl.CanonicalName, deviceNAA) {
			return nil
		}
	}

	return errors.New("no device detected on VM host to add")
}
func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
	dc, err := getDatacenter(c, vm.datacenter)

	if err != nil {
		return err
	}
	finder := find.NewFinder(c.Client, true)
	finder = finder.SetDatacenter(dc)

	var template *object.VirtualMachine
	var template_mo mo.VirtualMachine
	if vm.template != "" {
		template, err = finder.VirtualMachine(context.TODO(), vm.template)
		if err != nil {
			return err
		}
		log.Printf("[DEBUG] template: %#v", template)

		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
		if err != nil {
			return err
		}
	}

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] folder: %#v", vm.folder)

	folder := dcFolders.VmFolder
	if len(vm.folder) > 0 {
		si := object.NewSearchIndex(c.Client)
		folderRef, err := si.FindByInventoryPath(
			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
		if err != nil {
			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
		} else if folderRef == nil {
			return fmt.Errorf("Cannot find folder %s", vm.folder)
		} else {
			folder = folderRef.(*object.Folder)
		}
	}

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		Name:              vm.name,
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
		MemoryAllocation: &types.ResourceAllocationInfo{
			Reservation: vm.memoryAllocation.reservation,
		},
	}
	if vm.template == "" {
		configSpec.GuestId = "otherLinux64Guest"
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	// make ExtraConfig
	log.Printf("[DEBUG] virtual machine Extra Config spec start")
	if len(vm.customConfigurations) > 0 {
		var ov []types.BaseOptionValue
		for k, v := range vm.customConfigurations {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
			ov = append(ov, &o)
		}
		configSpec.ExtraConfig = ov
		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
	}

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
		if err != nil {
			// TODO: datastore cluster support in govmomi finder function
			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
			if err != nil {
				return err
			}

			if d.Type == "StoragePod" {
				sp := object.StoragePod{
					Folder: object.NewFolder(c.Client, d),
				}

				var sps types.StoragePlacementSpec
				if vm.template != "" {
					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
				} else {
					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
				}

				datastore, err = findDatastore(c, sps)
				if err != nil {
					return err
				}
			} else {
				datastore = object.NewDatastore(c.Client, d)
			}
		}
	}

	log.Printf("[DEBUG] datastore: %#v", datastore)

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	networkConfigs := []types.CustomizationAdapterMapping{}
	for _, network := range vm.networkInterfaces {
		// network device
		var networkDeviceType string
		if vm.template == "" {
			networkDeviceType = "e1000"
		} else {
			networkDeviceType = "vmxnet3"
		}
		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType)
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)

		if vm.template != "" {
			var ipSetting types.CustomizationIPSettings
			if network.ipv4Address == "" {
				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
			} else {
				if network.ipv4PrefixLength == 0 {
					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
				}
				m := net.CIDRMask(network.ipv4PrefixLength, 32)
				sm := net.IPv4(m[0], m[1], m[2], m[3])
				subnetMask := sm.String()
				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
				ipSetting.Gateway = []string{
					network.ipv4Gateway,
				}
				ipSetting.Ip = &types.CustomizationFixedIp{
					IpAddress: network.ipv4Address,
				}
				ipSetting.SubnetMask = subnetMask
			}

			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
			if network.ipv6Address == "" {
				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
					&types.CustomizationDhcpIpV6Generator{},
				}
			} else {
				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)

				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
					&types.CustomizationFixedIpV6{
						IpAddress:  network.ipv6Address,
						SubnetMask: int32(network.ipv6PrefixLength),
					},
				}
				ipv6Spec.Gateway = []string{network.ipv6Gateway}
			}
			ipSetting.IpV6Spec = ipv6Spec

			// network config
			config := types.CustomizationAdapterMapping{
				Adapter: ipSetting,
			}
			networkConfigs = append(networkConfigs, config)
		}
	}
	log.Printf("[DEBUG] network devices: %v", networkDevices)
	log.Printf("[DEBUG] network configs: %v", networkConfigs)

	var task *object.Task
	if vm.template == "" {
		var mds mo.Datastore
		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
			return err
		}
		log.Printf("[DEBUG] datastore: %#v", mds.Name)
		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}

		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
			Operation: types.VirtualDeviceConfigSpecOperationAdd,
			Device:    scsi,
		})

		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}

		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}

		err = task.Wait(context.TODO())
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}

	} else {

		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
		if err != nil {
			return err
		}

		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)

		// make vm clone spec
		cloneSpec := types.VirtualMachineCloneSpec{
			Location: relocateSpec,
			Template: false,
			Config:   &configSpec,
			PowerOn:  false,
		}
		if vm.linkedClone {
			if template_mo.Snapshot == nil {
				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
			}
			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
		}
		log.Printf("[DEBUG] clone spec: %v", cloneSpec)

		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
		if err != nil {
			return err
		}
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	devices, err := newVM.Device(context.TODO())
	if err != nil {
		log.Printf("[DEBUG] Template devices can't be found")
		return err
	}

	for _, dvc := range devices {
		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
		if devices.Type(dvc) == "ethernet" {
			err := newVM.RemoveDevice(context.TODO(), false, dvc)
			if err != nil {
				return err
			}
		}
	}
	// Add Network devices
	for _, dvc := range networkDevices {
		err := newVM.AddDevice(
			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
		if err != nil {
			return err
		}
	}

	// Create the cdroms if needed.
	if err := createCdroms(newVM, vm.cdroms); err != nil {
		return err
	}

	firstDisk := 0
	if vm.template != "" {
		firstDisk++
	}
	for i := firstDisk; i < len(vm.hardDisks); i++ {
		log.Printf("[DEBUG] disk index: %v", i)
		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, vm.hardDisks[i].vmdkPath)
		if err != nil {
			return err
		}
	}

	if vm.skipCustomization || vm.template == "" {
		log.Printf("[DEBUG] VM customization skipped")
	} else {
		var identity_options types.BaseCustomizationIdentitySettings
		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
			var timeZone int
			if vm.timeZone == "Etc/UTC" {
				vm.timeZone = "085"
			}
			timeZone, err := strconv.Atoi(vm.timeZone)
			if err != nil {
				return fmt.Errorf("Error converting TimeZone: %s", err)
			}

			guiUnattended := types.CustomizationGuiUnattended{
				AutoLogon:      false,
				AutoLogonCount: 1,
				TimeZone:       int32(timeZone),
			}

			customIdentification := types.CustomizationIdentification{}

			userData := types.CustomizationUserData{
				ComputerName: &types.CustomizationFixedName{
					Name: strings.Split(vm.name, ".")[0],
				},
				ProductId: vm.windowsOptionalConfig.productKey,
				FullName:  "terraform",
				OrgName:   "terraform",
			}

			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
					PlainText: true,
					Value:     vm.windowsOptionalConfig.domainUserPassword,
				}
				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
			}

			if vm.windowsOptionalConfig.adminPassword != "" {
				guiUnattended.Password = &types.CustomizationPassword{
					PlainText: true,
					Value:     vm.windowsOptionalConfig.adminPassword,
				}
			}

			identity_options = &types.CustomizationSysprep{
				GuiUnattended:  guiUnattended,
				Identification: customIdentification,
				UserData:       userData,
			}
		} else {
			identity_options = &types.CustomizationLinuxPrep{
				HostName: &types.CustomizationFixedName{
					Name: strings.Split(vm.name, ".")[0],
				},
				Domain:     vm.domain,
				TimeZone:   vm.timeZone,
				HwClockUTC: types.NewBool(true),
			}
		}

		// create CustomizationSpec
		customSpec := types.CustomizationSpec{
			Identity: identity_options,
			GlobalIPSettings: types.CustomizationGlobalIPSettings{
				DnsSuffixList: vm.dnsSuffixes,
				DnsServerList: vm.dnsServers,
			},
			NicSettingMap: networkConfigs,
		}
		log.Printf("[DEBUG] custom spec: %v", customSpec)

		log.Printf("[DEBUG] VM customization starting")
		taskb, err := newVM.Customize(context.TODO(), customSpec)
		if err != nil {
			return err
		}
		_, err = taskb.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}
		log.Printf("[DEBUG] VM customization finished")
	}

	if vm.bootableVmdk || vm.template != "" {
		newVM.PowerOn(context.TODO())
	}
	return nil
}
Beispiel #30
0
// ensures that a paravirtual scsi controller is present and determines the
// base path of disks attached to it returns a handle to the controller and a
// format string, with a single decimal for the disk unit number which will
// result in the /dev/disk/by-path path
func verifyParavirtualScsiController(ctx context.Context, vm *object.VirtualMachine) (*types.ParaVirtualSCSIController, string, error) {
	devices, err := vm.Device(ctx)
	if err != nil {
		log.Errorf("vmware driver failed to retrieve device list for VM %s: %s", vm, errors.ErrorStack(err))
		return nil, "", errors.Trace(err)
	}

	controller, ok := devices.PickController((*types.ParaVirtualSCSIController)(nil)).(*types.ParaVirtualSCSIController)
	if controller == nil || !ok {
		err = errors.Errorf("vmware driver failed to find a paravirtual SCSI controller - ensure setup ran correctly")
		log.Error(err.Error())
		return nil, "", errors.Trace(err)
	}

	// build the base path
	// first we determine which label we're looking for (requires VMW hardware version >=10)
	targetLabel := fmt.Sprintf("SCSI%d", controller.BusNumber)
	log.Debugf("Looking for scsi controller with label %s", targetLabel)

	pciBase := "/sys/bus/pci/devices"
	pciBus, err := os.Open(pciBase)
	if err != nil {
		log.Errorf("Failed to open %s for reading: %s", pciBase, errors.ErrorStack(err))
		return controller, "", errors.Trace(err)
	}
	defer pciBus.Close()

	pciDevices, err := pciBus.Readdirnames(0)
	if err != nil {
		log.Errorf("Failed to read contents of %s: %s", pciBase, errors.ErrorStack(err))
		return controller, "", errors.Trace(err)
	}

	var buf = make([]byte, len(targetLabel))
	var controllerName string

	for _, n := range pciDevices {
		nlabel := fmt.Sprintf("%s/%s/label", pciBase, n)
		flabel, err := os.Open(nlabel)
		if err != nil {
			if !os.IsNotExist(err) {
				log.Errorf("Unable to read label from %s: %s", nlabel, errors.ErrorStack(err))
			}
			continue
		}
		defer flabel.Close()

		_, err = flabel.Read(buf)
		if err != nil {
			log.Errorf("Unable to read label from %s: %s", nlabel, errors.ErrorStack(err))
			continue
		}

		if targetLabel == string(buf) {
			// we've found our controller
			controllerName = n
			log.Debugf("Found pvscsi controller directory: %s", controllerName)

			break
		}
	}

	if controllerName == "" {
		err := errors.Errorf("Failed to locate pvscsi controller directory")
		log.Errorf(err.Error())
		return controller, "", errors.Trace(err)
	}

	formatString := fmt.Sprintf("/dev/disk/by-path/pci-%s-scsi-0:0:%%d:0", controllerName)
	log.Debugf("Disk location format: %s", formatString)
	return controller, formatString, nil
}