// newVm creates a new virtual machine. func (vm *VirtualMachine) newVm(f *object.Folder, p *object.ResourcePool, ds *object.Datastore, h *object.HostSystem) error { if vm.Hardware == nil { return errors.New("Missing hardware configuration") } Logf("%s creating virtual machine\n", vm.ID()) spec := types.VirtualMachineConfigSpec{ Name: vm.Name, Version: vm.Hardware.Version, GuestId: vm.GuestID, Annotation: vm.Annotation, NumCPUs: vm.Hardware.Cpu, NumCoresPerSocket: vm.Hardware.Cores, MemoryMB: vm.Hardware.Memory, MaxMksConnections: vm.MaxMksConnections, Files: &types.VirtualMachineFileInfo{ VmPathName: ds.Path(vm.Name), }, } task, err := f.CreateVM(vm.ctx, spec, p, h) if err != nil { return err } return task.Wait(vm.ctx) }
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) { var key int devices, err := vm.Device(context.TODO()) if err != nil { return types.VirtualMachineRelocateSpec{}, err } for _, d := range devices { if devices.Type(d) == "disk" { key = d.GetVirtualDevice().Key } } rpr := rp.Reference() dsr := ds.Reference() return types.VirtualMachineRelocateSpec{ Datastore: &dsr, Pool: &rpr, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: dsr, DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(false), EagerlyScrub: types.NewBool(true), }, DiskId: key, }, }, }, nil }
func (d *Dispatcher) lsFolder(ds *object.Datastore, dsPath string) (*types.HostDatastoreBrowserSearchResults, error) { defer trace.End(trace.Begin(dsPath)) spec := types.HostDatastoreBrowserSearchSpec{ MatchPattern: []string{"*"}, } b, err := ds.Browser(d.ctx) if err != nil { return nil, err } task, err := b.SearchDatastore(d.ctx, dsPath, &spec) if err != nil { return nil, err } info, err := task.WaitForResult(d.ctx, nil) if err != nil { return nil, err } res := info.Result.(types.HostDatastoreBrowserSearchResults) return &res, nil }
func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { var devices object.VirtualDeviceList var err error spec := &types.VirtualMachineConfigSpec{ Name: cmd.name, GuestId: cmd.guestID, NumCPUs: int32(cmd.cpus), MemoryMB: int64(cmd.memory), } devices, err = cmd.addStorage(nil) if err != nil { return nil, err } devices, err = cmd.addNetwork(devices) if err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } spec.DeviceChange = deviceChange var datastore *object.Datastore // If storage pod is specified, collect placement recommendations if cmd.StoragePod != nil { datastore, err = cmd.recommendDatastore(ctx, spec) if err != nil { return nil, err } } else { datastore = cmd.Datastore } if !cmd.force { vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name) _, err := datastore.Stat(ctx, vmxPath) if err == nil { dsPath := cmd.Datastore.Path(vmxPath) return nil, fmt.Errorf("File %s already exists", dsPath) } } folder := cmd.Folder spec.Files = &types.VirtualMachineFileInfo{ VmPathName: fmt.Sprintf("[%s]", datastore.Name()), } return folder.CreateVM(ctx, *spec, cmd.ResourcePool, cmd.HostSystem) }
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. func buildVMRelocateSpec(finder *find.Finder, rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linked bool) (types.VirtualMachineRelocateSpec, error) { var key int var parent *types.VirtualDiskFlatVer2BackingInfo devices, err := vm.Device(context.TODO()) if err != nil { return types.VirtualMachineRelocateSpec{}, err } for _, d := range devices { if devices.Type(d) == "disk" { vd := d.GetVirtualDevice() parent = vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo) key = vd.Key } } rpr := rp.Reference() relocateSpec := types.VirtualMachineRelocateSpec{} // Treat linked clones a bit differently. if linked { parentDs := strings.SplitN(parent.FileName[1:], "]", 2) parentDsObj, err := finder.Datastore(context.TODO(), parentDs[0]) if err != nil { return types.VirtualMachineRelocateSpec{}, err } parentDbObjRef := parentDsObj.Reference() relocateSpec = types.VirtualMachineRelocateSpec{ Datastore: &parentDbObjRef, Pool: &rpr, DiskMoveType: "createNewChildDiskBacking", } } else { dsr := ds.Reference() relocateSpec = types.VirtualMachineRelocateSpec{ Datastore: &dsr, Pool: &rpr, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: dsr, DiskId: key, DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(false), EagerlyScrub: types.NewBool(true), }, }, }, } } return relocateSpec, nil }
// addHardDisk adds a new Hard Disk to the VirtualMachine. func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error { devices, err := vm.Device(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] vm devices: %#v\n", devices) controller, err := devices.FindDiskController(controller_type) if err != nil { return err } log.Printf("[DEBUG] disk controller: %#v\n", controller) // TODO Check if diskPath & datastore exist // If diskPath is not specified, pass empty string to CreateDisk() if diskPath == "" { return fmt.Errorf("[ERROR] addHardDisk - No path proided") } else { // TODO Check if diskPath & datastore exist diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath) } log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) existing := devices.SelectByBackingInfo(disk.Backing) log.Printf("[DEBUG] disk: %#v\n", disk) if len(existing) == 0 { disk.CapacityInKB = int64(size * 1024 * 1024) if iops != 0 { disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ Limit: iops, } } backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) if diskType == "eager_zeroed" { // eager zeroed thick virtual disk backing.ThinProvisioned = types.NewBool(false) backing.EagerlyScrub = types.NewBool(true) } else if diskType == "thin" { // thin provisioned virtual disk backing.ThinProvisioned = types.NewBool(true) } log.Printf("[DEBUG] addHardDisk: %#v\n", disk) log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB) return vm.AddDevice(context.TODO(), disk) } else { log.Printf("[DEBUG] addHardDisk: Disk already present.\n") return nil } }
func vmCleanup(dc *object.Datacenter, ds *object.Datastore, vmName string) error { client := testAccProvider.Meta().(*govmomi.Client) fileManager := object.NewFileManager(client.Client) task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) if err != nil { log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) return err } _, err = task.WaitForResult(context.TODO(), nil) if err != nil { log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) return err } return nil }
// addHardDisk adds a new Hard Disk to the VirtualMachine. func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string) error { devices, err := vm.Device(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] vm devices: %#v\n", devices) controller, err := devices.FindDiskController("scsi") if err != nil { return err } log.Printf("[DEBUG] disk controller: %#v\n", controller) disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) existing := devices.SelectByBackingInfo(disk.Backing) log.Printf("[DEBUG] disk: %#v\n", disk) if len(existing) == 0 { disk.CapacityInKB = int64(size * 1024 * 1024) if iops != 0 { disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ Limit: iops, } } backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) if diskType == "eager_zeroed" { // eager zeroed thick virtual disk backing.ThinProvisioned = types.NewBool(false) backing.EagerlyScrub = types.NewBool(true) } else if diskType == "thin" { // thin provisioned virtual disk backing.ThinProvisioned = types.NewBool(true) } log.Printf("[DEBUG] addHardDisk: %#v\n", disk) log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB) return vm.AddDevice(context.TODO(), disk) } else { log.Printf("[DEBUG] addHardDisk: Disk already present.\n") return nil } }
func (c *configSpec) AddDisk(ds *object.Datastore, path string) { var devices object.VirtualDeviceList controller, err := devices.CreateSCSIController("") if err != nil { panic(err) } devices = append(devices, controller) disk := devices.CreateDisk(controller.(types.BaseVirtualController), ds.Reference(), ds.Path(path)) devices = append(devices, disk) spec, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { panic(err) } c.DeviceChange = append(c.DeviceChange, spec...) }
func createDatastoreFiles(d *Dispatcher, ds *object.Datastore, t *testing.T) error { tmpfile, err := ioutil.TempFile("", "tempDatastoreFile.vmdk") if err != nil { t.Errorf("Failed to create file: %s", err) return err } defer os.Remove(tmpfile.Name()) // clean up if err = ds.UploadFile(d.ctx, tmpfile.Name(), "Test/folder/data/temp.vmdk", nil); err != nil { t.Errorf("Failed to upload file %q: %s", "Test/folder/data/temp.vmdk", err) return err } if err = ds.UploadFile(d.ctx, tmpfile.Name(), "Test/folder/tempMetadata", nil); err != nil { t.Errorf("Failed to upload file %q: %s", "Test/folder/tempMetadata", err) return err } return nil }
func (c *configSpec) AddDisk(ds *object.Datastore, path string) { controller := &types.VirtualLsiLogicController{ VirtualSCSIController: types.VirtualSCSIController{ SharedBus: types.VirtualSCSISharingNoSharing, VirtualController: types.VirtualController{ BusNumber: 0, VirtualDevice: types.VirtualDevice{ Key: -1, }, }, }, } controllerSpec := &types.VirtualDeviceConfigSpec{ Device: controller, Operation: types.VirtualDeviceConfigSpecOperationAdd, } c.AddChange(controllerSpec) disk := &types.VirtualDisk{ VirtualDevice: types.VirtualDevice{ Key: -1, ControllerKey: -1, UnitNumber: -1, Backing: &types.VirtualDiskFlatVer2BackingInfo{ VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ FileName: ds.Path(path), }, DiskMode: string(types.VirtualDiskModePersistent), ThinProvisioned: types.NewBool(true), }, }, } diskSpec := &types.VirtualDeviceConfigSpec{ Device: disk, Operation: types.VirtualDeviceConfigSpecOperationAdd, } c.AddChange(diskSpec) }
func (d *Dispatcher) deleteDatastoreFiles(ds *object.Datastore, path string, force bool) (bool, error) { defer trace.End(trace.Begin(fmt.Sprintf("path %q, force %t", path, force))) // refuse to delete everything on the datstore, ignore force if path == "" { dsn, _ := ds.ObjectName(d.ctx) msg := fmt.Sprintf("refusing to remove datastore files for path \"\" on datastore %q", dsn) return false, errors.New(msg) } var empty bool dsPath := ds.Path(path) res, err := d.lsFolder(ds, dsPath) if err != nil { if !types.IsFileNotFound(err) { err = errors.Errorf("Failed to browse folder %q: %s", dsPath, err) return empty, err } log.Debugf("Folder %q is not found", dsPath) empty = true return empty, nil } if len(res.File) > 0 && !force { log.Debugf("Folder %q is not empty, leave it there", dsPath) return empty, nil } m := object.NewFileManager(ds.Client()) if err = d.deleteFilesIteratively(m, ds, dsPath); err != nil { return empty, err } return true, nil }
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { var key int var moveType string if linkedClone { moveType = "createNewChildDiskBacking" } else { moveType = "moveAllDiskBackingsAndDisallowSharing" } log.Printf("[DEBUG] relocate type: [%s]", moveType) devices, err := vm.Device(context.TODO()) if err != nil { return types.VirtualMachineRelocateSpec{}, err } for _, d := range devices { if devices.Type(d) == "disk" { key = d.GetVirtualDevice().Key } } isThin := initType == "thin" rpr := rp.Reference() dsr := ds.Reference() return types.VirtualMachineRelocateSpec{ Datastore: &dsr, Pool: &rpr, DiskMoveType: moveType, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: dsr, DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(isThin), EagerlyScrub: types.NewBool(!isThin), }, DiskId: key, }, }, }, nil }
// cloneVm creates the virtual machine using a template. func (vm *VirtualMachine) cloneVm(f *object.Folder, p *object.ResourcePool, ds *object.Datastore, h *object.HostSystem) error { Logf("%s cloning virtual machine from %s\n", vm.ID(), vm.TemplateConfig.Use) obj, err := vm.finder.VirtualMachine(vm.ctx, vm.TemplateConfig.Use) if err != nil { return err } folderRef := f.Reference() datastoreRef := ds.Reference() poolRef := p.Reference() var hostRef *types.ManagedObjectReference if h != nil { ref := h.Reference() hostRef = &ref } spec := types.VirtualMachineCloneSpec{ Location: types.VirtualMachineRelocateSpec{ Folder: &folderRef, Datastore: &datastoreRef, Pool: &poolRef, Host: hostRef, }, Template: vm.TemplateConfig.MarkAsTemplate, PowerOn: vm.TemplateConfig.PowerOn, } task, err := obj.Clone(vm.ctx, f, vm.Name, spec) if err != nil { return err } return task.Wait(vm.ctx) }
func (d *Dispatcher) deleteUpgradeImages(ds *object.Datastore, settings *data.InstallerData) { defer trace.End(trace.Begin("")) log.Infof("Deleting upgrade images") // do clean up aggressively, even the previous operation failed with context deadline excceeded. d.ctx = context.Background() m := object.NewFileManager(ds.Client()) file := ds.Path(path.Join(d.vmPathName, settings.ApplianceISO)) if err := d.deleteVMFSFiles(m, ds, file); err != nil { log.Warnf("Image file %q is not removed for %s. Use the vSphere UI to delete content", file, err) } file = ds.Path(path.Join(d.vmPathName, settings.BootstrapISO)) if err := d.deleteVMFSFiles(m, ds, file); err != nil { log.Warnf("Image file %q is not removed for %s. Use the vSphere UI to delete content", file, err) } }
// createVirtualMchine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ object.NewFolder(c.Client, d), } sps := createStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin") if err != nil { return err } } return nil }
func (d *Dispatcher) isVSAN(ds *object.Datastore) bool { dsType, _ := ds.Type(d.ctx) return dsType == types.HostFileSystemVolumeFileSystemTypeVsan }
func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var template *object.VirtualMachine var template_mo mo.VirtualMachine if vm.template != "" { template, err = finder.VirtualMachine(context.TODO(), vm.template) if err != nil { return err } log.Printf("[DEBUG] template: %#v", template) err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) if err != nil { return err } } var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } // make config spec configSpec := types.VirtualMachineConfigSpec{ Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, MemoryAllocation: &types.ResourceAllocationInfo{ Reservation: vm.memoryAllocation.reservation, }, } if vm.template == "" { configSpec.GuestId = "otherLinux64Guest" } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // make ExtraConfig log.Printf("[DEBUG] virtual machine Extra Config spec start") if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } var sps types.StoragePlacementSpec if vm.template != "" { sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) } else { sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) } datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} networkConfigs := []types.CustomizationAdapterMapping{} for _, network := range vm.networkInterfaces { // network device var networkDeviceType string if vm.template == "" { networkDeviceType = "e1000" } else { networkDeviceType = "vmxnet3" } nd, err := buildNetworkDevice(finder, network.label, networkDeviceType) if err != nil { return err } networkDevices = append(networkDevices, nd) if vm.template != "" { var ipSetting types.CustomizationIPSettings if network.ipv4Address == "" { ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} } else { if network.ipv4PrefixLength == 0 { return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") } m := net.CIDRMask(network.ipv4PrefixLength, 32) sm := net.IPv4(m[0], m[1], m[2], m[3]) subnetMask := sm.String() log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) ipSetting.Gateway = []string{ network.ipv4Gateway, } ipSetting.Ip = &types.CustomizationFixedIp{ IpAddress: network.ipv4Address, } ipSetting.SubnetMask = subnetMask } ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} if network.ipv6Address == "" { ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ &types.CustomizationDhcpIpV6Generator{}, } } else { log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ &types.CustomizationFixedIpV6{ IpAddress: network.ipv6Address, SubnetMask: int32(network.ipv6PrefixLength), }, } ipv6Spec.Gateway = []string{network.ipv6Gateway} } ipSetting.IpV6Spec = ipv6Spec // network config config := types.CustomizationAdapterMapping{ Adapter: ipSetting, } networkConfigs = append(networkConfigs, config) } } log.Printf("[DEBUG] network devices: %v", networkDevices) log.Printf("[DEBUG] network configs: %v", networkConfigs) var task *object.Task if vm.template == "" { var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } } else { relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) if err != nil { return err } log.Printf("[DEBUG] relocate spec: %v", relocateSpec) // make vm clone spec cloneSpec := types.VirtualMachineCloneSpec{ Location: relocateSpec, Template: false, Config: &configSpec, PowerOn: false, } if vm.linkedClone { if template_mo.Snapshot == nil { return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") } cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot } log.Printf("[DEBUG] clone spec: %v", cloneSpec) task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) if err != nil { return err } } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) devices, err := newVM.Device(context.TODO()) if err != nil { log.Printf("[DEBUG] Template devices can't be found") return err } for _, dvc := range devices { // Issue 3559/3560: Delete all ethernet devices to add the correct ones later if devices.Type(dvc) == "ethernet" { err := newVM.RemoveDevice(context.TODO(), false, dvc) if err != nil { return err } } } // Add Network devices for _, dvc := range networkDevices { err := newVM.AddDevice( context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } firstDisk := 0 if vm.template != "" { firstDisk++ } for i := firstDisk; i < len(vm.hardDisks); i++ { log.Printf("[DEBUG] disk index: %v", i) err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, vm.hardDisks[i].vmdkPath) if err != nil { return err } } if vm.skipCustomization || vm.template == "" { log.Printf("[DEBUG] VM customization skipped") } else { var identity_options types.BaseCustomizationIdentitySettings if strings.HasPrefix(template_mo.Config.GuestId, "win") { var timeZone int if vm.timeZone == "Etc/UTC" { vm.timeZone = "085" } timeZone, err := strconv.Atoi(vm.timeZone) if err != nil { return fmt.Errorf("Error converting TimeZone: %s", err) } guiUnattended := types.CustomizationGuiUnattended{ AutoLogon: false, AutoLogonCount: 1, TimeZone: int32(timeZone), } customIdentification := types.CustomizationIdentification{} userData := types.CustomizationUserData{ ComputerName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, ProductId: vm.windowsOptionalConfig.productKey, FullName: "terraform", OrgName: "terraform", } if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { customIdentification.DomainAdminPassword = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.domainUserPassword, } customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser customIdentification.JoinDomain = vm.windowsOptionalConfig.domain } if vm.windowsOptionalConfig.adminPassword != "" { guiUnattended.Password = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.adminPassword, } } identity_options = &types.CustomizationSysprep{ GuiUnattended: guiUnattended, Identification: customIdentification, UserData: userData, } } else { identity_options = &types.CustomizationLinuxPrep{ HostName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, Domain: vm.domain, TimeZone: vm.timeZone, HwClockUTC: types.NewBool(true), } } // create CustomizationSpec customSpec := types.CustomizationSpec{ Identity: identity_options, GlobalIPSettings: types.CustomizationGlobalIPSettings{ DnsSuffixList: vm.dnsSuffixes, DnsServerList: vm.dnsServers, }, NicSettingMap: networkConfigs, } log.Printf("[DEBUG] custom spec: %v", customSpec) log.Printf("[DEBUG] VM customization starting") taskb, err := newVM.Customize(context.TODO(), customSpec) if err != nil { return err } _, err = taskb.WaitForResult(context.TODO(), nil) if err != nil { return err } log.Printf("[DEBUG] VM customization finished") } if vm.bootableVmdk || vm.template != "" { newVM.PowerOn(context.TODO()) } return nil }
// createVirtualMachine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := buildNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, MemoryAllocation: &types.ResourceAllocationInfo{ Reservation: vm.memoryAllocation.reservation, }, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // make ExtraConfig log.Printf("[DEBUG] virtual machine Extra Config spec start") if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin", datastore, hd.vmdkPath) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } if vm.bootableVmdk { newVM.PowerOn(context.TODO()) ip, err := newVM.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) } return nil }
// createVirtualMchine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { var dc *object.Datacenter var err error finder := find.NewFinder(c.Client, true) if vm.datacenter != "" { dc, err = finder.Datacenter(context.TODO(), vm.datacenter) if err != nil { return err } } else { dc, err = finder.DefaultDatacenter(context.TODO()) if err != nil { return err } } finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { s := object.NewSearchIndex(c.Client) ref, err := s.FindChild(context.TODO(), dcFolders.DatastoreFolder, vm.datastore) if err != nil { return err } log.Printf("[DEBUG] findDatastore: reference: %#v", ref) mor := ref.Reference() if mor.Type == "StoragePod" { storagePod := object.NewFolder(c.Client, mor) vmfr := dcFolders.VmFolder.Reference() rpr := resourcePool.Reference() spr := storagePod.Reference() sps := types.StoragePlacementSpec{ Type: "create", ConfigSpec: &configSpec, PodSelectionSpec: types.StorageDrsPodSelectionSpec{ StoragePod: &spr, }, Folder: &vmfr, ResourcePool: &rpr, } log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) srm := object.NewStorageResourceManager(c.Client) rds, err := srm.RecommendDatastores(context.TODO(), sps) if err != nil { return err } log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) datastore = object.NewDatastore(c.Client, spa.Destination) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, mor) } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin") if err != nil { return err } } return nil }