// buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { vmr := vm.Reference() vmfr := f.VmFolder.Reference() rpr := rp.Reference() spr := storagePod.Reference() var o mo.VirtualMachine err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) if err != nil { return types.StoragePlacementSpec{} } ds := object.NewDatastore(c.Client, o.Datastore[0]) log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) devices, err := vm.Device(context.TODO()) if err != nil { return types.StoragePlacementSpec{} } var key int for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { key = d.GetVirtualDevice().Key log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) } sps := types.StoragePlacementSpec{ Type: "clone", Vm: &vmr, PodSelectionSpec: types.StorageDrsPodSelectionSpec{ StoragePod: &spr, }, CloneSpec: &types.VirtualMachineCloneSpec{ Location: types.VirtualMachineRelocateSpec{ Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: ds.Reference(), DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, DiskId: key, }, }, Pool: &rpr, }, PowerOn: false, Template: false, }, CloneName: "dummy", Folder: &vmfr, } return sps }
// findDatastore finds Datastore object. func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { var datastore *object.Datastore log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) srm := object.NewStorageResourceManager(c.Client) rds, err := srm.RecommendDatastores(context.TODO(), sps) if err != nil { return nil, err } log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) datastore = object.NewDatastore(c.Client, spa.Destination) log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) return datastore, nil }
func (f *Finder) DatastoreList(ctx context.Context, path string) ([]*object.Datastore, error) { es, err := f.find(ctx, f.datastoreFolder, false, path) if err != nil { return nil, err } var dss []*object.Datastore for _, e := range es { ref := e.Object.Reference() if ref.Type == "Datastore" { ds := object.NewDatastore(f.client, ref) ds.InventoryPath = e.Path dss = append(dss, ds) } } if len(dss) == 0 { return nil, &NotFoundError{"datastore", path} } return dss, nil }
func (cmd *clone) cloneVM(ctx context.Context) (*object.Task, error) { // search for the first network card of the source devices, err := cmd.VirtualMachine.Device(ctx) if err != nil { return nil, err } var card *types.VirtualEthernetCard for _, device := range devices { if c, ok := device.(types.BaseVirtualEthernetCard); ok { card = c.GetVirtualEthernetCard() break } } if card == nil { return nil, fmt.Errorf("No network device found.") } // get the new backing information dev, err := cmd.NetworkFlag.Device() if err != nil { return nil, err } //set backing info card.Backing = dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard().Backing // prepare virtual device config spec for network card configSpecs := []types.BaseVirtualDeviceConfigSpec{ &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationEdit, Device: card, }, } folderref := cmd.Folder.Reference() poolref := cmd.ResourcePool.Reference() relocateSpec := types.VirtualMachineRelocateSpec{ DeviceChange: configSpecs, Folder: &folderref, Pool: &poolref, } if cmd.HostSystem != nil { hostref := cmd.HostSystem.Reference() relocateSpec.Host = &hostref } cloneSpec := &types.VirtualMachineCloneSpec{ Location: relocateSpec, PowerOn: false, Template: cmd.template, } // clone to storage pod datastoreref := types.ManagedObjectReference{} if cmd.StoragePod != nil && cmd.Datastore == nil { storagePod := cmd.StoragePod.Reference() // Build pod selection spec from config spec podSelectionSpec := types.StorageDrsPodSelectionSpec{ StoragePod: &storagePod, } // Get the virtual machine reference vmref := cmd.VirtualMachine.Reference() // Build the placement spec storagePlacementSpec := types.StoragePlacementSpec{ Folder: &folderref, Vm: &vmref, CloneName: cmd.name, CloneSpec: cloneSpec, PodSelectionSpec: podSelectionSpec, Type: string(types.StoragePlacementSpecPlacementTypeClone), } // Get the storage placement result storageResourceManager := object.NewStorageResourceManager(cmd.Client) result, err := storageResourceManager.RecommendDatastores(ctx, storagePlacementSpec) if err != nil { return nil, err } // Get the recommendations recommendations := result.Recommendations if len(recommendations) == 0 { return nil, fmt.Errorf("no recommendations") } // Get the first recommendation datastoreref = recommendations[0].Action[0].(*types.StoragePlacementAction).Destination } else if cmd.StoragePod == nil && cmd.Datastore != nil { datastoreref = cmd.Datastore.Reference() } else { return nil, fmt.Errorf("Please provide either a datastore or a storagepod") } // Set the destination datastore cloneSpec.Location.Datastore = &datastoreref // Check if vmx already exists if !cmd.force { vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name) datastore := object.NewDatastore(cmd.Client, datastoreref) _, err := datastore.Stat(ctx, vmxPath) if err == nil { dsPath := cmd.Datastore.Path(vmxPath) return nil, fmt.Errorf("File %s already exists", dsPath) } } // check if customization specification requested if len(cmd.customization) > 0 { // get the customization spec manager customizationSpecManager := object.NewCustomizationSpecManager(cmd.Client) // check if customization specification exists exists, err := customizationSpecManager.DoesCustomizationSpecExist(ctx, cmd.customization) if err != nil { return nil, err } if exists == false { return nil, fmt.Errorf("Customization specification %s does not exists.", cmd.customization) } // get the customization specification customSpecItem, err := customizationSpecManager.GetCustomizationSpec(ctx, cmd.customization) if err != nil { return nil, err } customSpec := customSpecItem.Spec // set the customization cloneSpec.Customization = &customSpec } // clone virtualmachine return cmd.VirtualMachine.Clone(ctx, cmd.Folder, cmd.name, *cloneSpec) }
// deployVirtualMachine deploys a new VirtualMachine. func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) template, err := finder.VirtualMachine(context.TODO(), vm.template) if err != nil { return err } log.Printf("[DEBUG] template: %#v", template) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) if err != nil { return err } log.Printf("[DEBUG] relocate spec: %v", relocateSpec) // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} networkConfigs := []types.CustomizationAdapterMapping{} for _, network := range vm.networkInterfaces { // network device nd, err := buildNetworkDevice(finder, network.label, "vmxnet3") if err != nil { return err } networkDevices = append(networkDevices, nd) // TODO: IPv6 support var ipSetting types.CustomizationIPSettings if network.ipv4Address == "" { ipSetting = types.CustomizationIPSettings{ Ip: &types.CustomizationDhcpIpGenerator{}, } } else { if network.ipv4PrefixLength == 0 { return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") } m := net.CIDRMask(network.ipv4PrefixLength, 32) sm := net.IPv4(m[0], m[1], m[2], m[3]) subnetMask := sm.String() log.Printf("[DEBUG] gateway: %v", vm.gateway) log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address) log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength) log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask) ipSetting = types.CustomizationIPSettings{ Gateway: []string{ vm.gateway, }, Ip: &types.CustomizationFixedIp{ IpAddress: network.ipv4Address, }, SubnetMask: subnetMask, } } // network config config := types.CustomizationAdapterMapping{ Adapter: ipSetting, } networkConfigs = append(networkConfigs, config) } log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter) // make config spec configSpec := types.VirtualMachineConfigSpec{ NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations) // make ExtraConfig if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var template_mo mo.VirtualMachine err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) var identity_options types.BaseCustomizationIdentitySettings if strings.HasPrefix(template_mo.Config.GuestId, "win") { var timeZone int if vm.timeZone == "Etc/UTC" { vm.timeZone = "085" } timeZone, err := strconv.Atoi(vm.timeZone) if err != nil { return fmt.Errorf("Error converting TimeZone: %s", err) } guiUnattended := types.CustomizationGuiUnattended{ AutoLogon: false, AutoLogonCount: 1, TimeZone: timeZone, } customIdentification := types.CustomizationIdentification{} userData := types.CustomizationUserData{ ComputerName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, ProductId: vm.windowsOptionalConfig.productKey, FullName: "terraform", OrgName: "terraform", } if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { customIdentification.DomainAdminPassword = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.domainUserPassword, } customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser customIdentification.JoinDomain = vm.windowsOptionalConfig.domain } if vm.windowsOptionalConfig.adminPassword != "" { guiUnattended.Password = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.adminPassword, } } identity_options = &types.CustomizationSysprep{ GuiUnattended: guiUnattended, Identification: customIdentification, UserData: userData, } } else { identity_options = &types.CustomizationLinuxPrep{ HostName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, Domain: vm.domain, TimeZone: vm.timeZone, HwClockUTC: types.NewBool(true), } } // create CustomizationSpec customSpec := types.CustomizationSpec{ Identity: identity_options, GlobalIPSettings: types.CustomizationGlobalIPSettings{ DnsSuffixList: vm.dnsSuffixes, DnsServerList: vm.dnsServers, }, NicSettingMap: networkConfigs, } log.Printf("[DEBUG] custom spec: %v", customSpec) // make vm clone spec cloneSpec := types.VirtualMachineCloneSpec{ Location: relocateSpec, Template: false, Config: &configSpec, PowerOn: false, } if vm.linkedClone { if err != nil { return fmt.Errorf("Error reading base VM properties: %s", err) } if template_mo.Snapshot == nil { return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") } cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot } log.Printf("[DEBUG] clone spec: %v", cloneSpec) task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec) if err != nil { return err } _, err = task.WaitForResult(context.TODO(), nil) if err != nil { return err } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) devices, err := newVM.Device(context.TODO()) if err != nil { log.Printf("[DEBUG] Template devices can't be found") return err } for _, dvc := range devices { // Issue 3559/3560: Delete all ethernet devices to add the correct ones later if devices.Type(dvc) == "ethernet" { err := newVM.RemoveDevice(context.TODO(), dvc) if err != nil { return err } } } // Add Network devices for _, dvc := range networkDevices { err := newVM.AddDevice( context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } taskb, err := newVM.Customize(context.TODO(), customSpec) if err != nil { return err } _, err = taskb.WaitForResult(context.TODO(), nil) if err != nil { return err } log.Printf("[DEBUG] VM customization finished") for i := 1; i < len(vm.hardDisks); i++ { err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, vm.hardDisks[i].vmdkPath) if err != nil { return err } } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) newVM.PowerOn(context.TODO()) ip, err := newVM.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) return nil }
// createVirtualMachine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := buildNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, MemoryAllocation: &types.ResourceAllocationInfo{ Reservation: vm.memoryAllocation.reservation, }, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // make ExtraConfig log.Printf("[DEBUG] virtual machine Extra Config spec start") if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin", datastore, hd.vmdkPath) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } if vm.bootableVmdk { newVM.PowerOn(context.TODO()) ip, err := newVM.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) } return nil }
// deployVirtualMachine deploys a new VirtualMachine. func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) template, err := finder.VirtualMachine(context.TODO(), vm.template) if err != nil { return err } log.Printf("[DEBUG] template: %#v", template) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ object.NewFolder(c.Client, d), } sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template) if err != nil { return err } log.Printf("[DEBUG] relocate spec: %v", relocateSpec) // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} networkConfigs := []types.CustomizationAdapterMapping{} for _, network := range vm.networkInterfaces { // network device nd, err := buildNetworkDevice(finder, network.label, "vmxnet3") if err != nil { return err } networkDevices = append(networkDevices, nd) // TODO: IPv6 support var ipSetting types.CustomizationIPSettings if network.ipv4Address == "" { ipSetting = types.CustomizationIPSettings{ Ip: &types.CustomizationDhcpIpGenerator{}, } } else { if network.ipv4PrefixLength == 0 { return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") } m := net.CIDRMask(network.ipv4PrefixLength, 32) sm := net.IPv4(m[0], m[1], m[2], m[3]) subnetMask := sm.String() log.Printf("[DEBUG] gateway: %v", vm.gateway) log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address) log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength) log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask) ipSetting = types.CustomizationIPSettings{ Gateway: []string{ vm.gateway, }, Ip: &types.CustomizationFixedIp{ IpAddress: network.ipv4Address, }, SubnetMask: subnetMask, } } // network config config := types.CustomizationAdapterMapping{ Adapter: ipSetting, } networkConfigs = append(networkConfigs, config) } log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter) // make config spec configSpec := types.VirtualMachineConfigSpec{ NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations) // make ExtraConfig if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } // create CustomizationSpec customSpec := types.CustomizationSpec{ Identity: &types.CustomizationLinuxPrep{ HostName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, Domain: vm.domain, TimeZone: vm.timeZone, HwClockUTC: types.NewBool(true), }, GlobalIPSettings: types.CustomizationGlobalIPSettings{ DnsSuffixList: vm.dnsSuffixes, DnsServerList: vm.dnsServers, }, NicSettingMap: networkConfigs, } log.Printf("[DEBUG] custom spec: %v", customSpec) // make vm clone spec cloneSpec := types.VirtualMachineCloneSpec{ Location: relocateSpec, Template: false, Config: &configSpec, PowerOn: false, } log.Printf("[DEBUG] clone spec: %v", cloneSpec) task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec) if err != nil { return err } _, err = task.WaitForResult(context.TODO(), nil) if err != nil { return err } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) devices, err := newVM.Device(context.TODO()) if err != nil { log.Printf("[DEBUG] Template devices can't be found") return err } for _, dvc := range devices { // Issue 3559/3560: Delete all ethernet devices to add the correct ones later if devices.Type(dvc) == "ethernet" { err := newVM.RemoveDevice(context.TODO(), dvc) if err != nil { return err } } } // Add Network devices for _, dvc := range networkDevices { err := newVM.AddDevice( context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) if err != nil { return err } } taskb, err := newVM.Customize(context.TODO(), customSpec) if err != nil { return err } _, err = taskb.WaitForResult(context.TODO(), nil) if err != nil { return err } log.Printf("[DEBUG]VM customization finished") for i := 1; i < len(vm.hardDisks); i++ { err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed") if err != nil { return err } } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) newVM.PowerOn(context.TODO()) ip, err := newVM.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) return nil }
func (cmd *create) recommendDatastore(ctx context.Context, spec *types.VirtualMachineConfigSpec) (*object.Datastore, error) { sp := cmd.StoragePod.Reference() // Build pod selection spec from config spec podSelectionSpec := types.StorageDrsPodSelectionSpec{ StoragePod: &sp, } // Keep list of disks that need to be placed var disks []*types.VirtualDisk // Collect disks eligible for placement for _, deviceConfigSpec := range spec.DeviceChange { s := deviceConfigSpec.GetVirtualDeviceConfigSpec() if s.Operation != types.VirtualDeviceConfigSpecOperationAdd { continue } if s.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate { continue } d, ok := s.Device.(*types.VirtualDisk) if !ok { continue } podConfigForPlacement := types.VmPodConfigForPlacement{ StoragePod: sp, Disk: []types.PodDiskLocator{ { DiskId: d.Key, DiskBackingInfo: d.Backing, }, }, } podSelectionSpec.InitialVmConfig = append(podSelectionSpec.InitialVmConfig, podConfigForPlacement) disks = append(disks, d) } sps := types.StoragePlacementSpec{ Type: string(types.StoragePlacementSpecPlacementTypeCreate), ResourcePool: types.NewReference(cmd.ResourcePool.Reference()), PodSelectionSpec: podSelectionSpec, ConfigSpec: spec, } srm := object.NewStorageResourceManager(cmd.Client) result, err := srm.RecommendDatastores(ctx, sps) if err != nil { return nil, err } // Use result to pin disks to recommended datastores recs := result.Recommendations if len(recs) == 0 { return nil, fmt.Errorf("no recommendations") } ds := recs[0].Action[0].(*types.StoragePlacementAction).Destination var mds mo.Datastore err = property.DefaultCollector(cmd.Client).RetrieveOne(ctx, ds, []string{"name"}, &mds) if err != nil { return nil, err } datastore := object.NewDatastore(cmd.Client, ds) datastore.InventoryPath = mds.Name // Apply recommendation to eligible disks for _, disk := range disks { backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) backing.Datastore = &ds } return datastore, nil }
// deployVirtualMchine deploys a new VirtualMachine. func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) template, err := finder.VirtualMachine(context.TODO(), vm.template) if err != nil { return err } log.Printf("[DEBUG] template: %#v", template) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ object.NewFolder(c.Client, d), } sps := createStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) relocateSpec, err := createVMRelocateSpec(resourcePool, datastore, template) if err != nil { return err } log.Printf("[DEBUG] relocate spec: %v", relocateSpec) // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} networkConfigs := []types.CustomizationAdapterMapping{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "vmxnet3") if err != nil { return err } networkDevices = append(networkDevices, nd) var ipSetting types.CustomizationIPSettings if network.ipAddress == "" { ipSetting = types.CustomizationIPSettings{ Ip: &types.CustomizationDhcpIpGenerator{}, } } else { log.Printf("[DEBUG] gateway: %v", vm.gateway) log.Printf("[DEBUG] ip address: %v", network.ipAddress) log.Printf("[DEBUG] subnet mask: %v", network.subnetMask) ipSetting = types.CustomizationIPSettings{ Gateway: []string{ vm.gateway, }, Ip: &types.CustomizationFixedIp{ IpAddress: network.ipAddress, }, SubnetMask: network.subnetMask, } } // network config config := types.CustomizationAdapterMapping{ Adapter: ipSetting, } networkConfigs = append(networkConfigs, config) } log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter) // make config spec configSpec := types.VirtualMachineConfigSpec{ NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // create CustomizationSpec customSpec := types.CustomizationSpec{ Identity: &types.CustomizationLinuxPrep{ HostName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, Domain: vm.domain, TimeZone: vm.timeZone, HwClockUTC: types.NewBool(true), }, GlobalIPSettings: types.CustomizationGlobalIPSettings{ DnsSuffixList: vm.dnsSuffixes, DnsServerList: vm.dnsServers, }, NicSettingMap: networkConfigs, } log.Printf("[DEBUG] custom spec: %v", customSpec) // make vm clone spec cloneSpec := types.VirtualMachineCloneSpec{ Location: relocateSpec, Template: false, Config: &configSpec, Customization: &customSpec, PowerOn: true, } log.Printf("[DEBUG] clone spec: %v", cloneSpec) task, err := template.Clone(context.TODO(), dcFolders.VmFolder, vm.name, cloneSpec) if err != nil { return err } _, err = task.WaitForResult(context.TODO(), nil) if err != nil { return err } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) ip, err := newVM.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) for i := 1; i < len(vm.hardDisks); i++ { err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed") if err != nil { return err } } return nil }
// createVirtualMchine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ object.NewFolder(c.Client, d), } sps := createStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin") if err != nil { return err } } return nil }
// createVirtualMchine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { var dc *object.Datacenter var err error finder := find.NewFinder(c.Client, true) if vm.datacenter != "" { dc, err = finder.Datacenter(context.TODO(), vm.datacenter) if err != nil { return err } } else { dc, err = finder.DefaultDatacenter(context.TODO()) if err != nil { return err } } finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { s := object.NewSearchIndex(c.Client) ref, err := s.FindChild(context.TODO(), dcFolders.DatastoreFolder, vm.datastore) if err != nil { return err } log.Printf("[DEBUG] findDatastore: reference: %#v", ref) mor := ref.Reference() if mor.Type == "StoragePod" { storagePod := object.NewFolder(c.Client, mor) vmfr := dcFolders.VmFolder.Reference() rpr := resourcePool.Reference() spr := storagePod.Reference() sps := types.StoragePlacementSpec{ Type: "create", ConfigSpec: &configSpec, PodSelectionSpec: types.StorageDrsPodSelectionSpec{ StoragePod: &spr, }, Folder: &vmfr, ResourcePool: &rpr, } log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) srm := object.NewStorageResourceManager(c.Client) rds, err := srm.RecommendDatastores(context.TODO(), sps) if err != nil { return err } log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) datastore = object.NewDatastore(c.Client, spa.Destination) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, mor) } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin") if err != nil { return err } } return nil }
// findDatastore finds Datastore object. func findDatastore(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, name string) (*object.Datastore, error) { var datastore *object.Datastore s := object.NewSearchIndex(c.Client) ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) if err != nil { return nil, err } log.Printf("[DEBUG] findDatastore: reference: %#v", ref) mor := ref.Reference() if mor.Type == "StoragePod" { storagePod := object.NewFolder(c.Client, mor) vmr := vm.Reference() vmfr := f.VmFolder.Reference() rpr := rp.Reference() spr := storagePod.Reference() var o mo.VirtualMachine err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) if err != nil { return nil, err } ds := object.NewDatastore(c.Client, o.Datastore[0]) log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) devices, err := vm.Device(context.TODO()) if err != nil { return nil, err } var key int for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { key = d.GetVirtualDevice().Key log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) } sps := types.StoragePlacementSpec{ Type: "clone", Vm: &vmr, PodSelectionSpec: types.StorageDrsPodSelectionSpec{ StoragePod: &spr, }, CloneSpec: &types.VirtualMachineCloneSpec{ Location: types.VirtualMachineRelocateSpec{ Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: ds.Reference(), DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, DiskId: key, }, }, Pool: &rpr, }, PowerOn: false, Template: false, }, CloneName: "dummy", Folder: &vmfr, } log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) srm := object.NewStorageResourceManager(c.Client) rds, err := srm.RecommendDatastores(context.TODO(), sps) if err != nil { return nil, err } log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) datastore = object.NewDatastore(c.Client, spa.Destination) } else { datastore = object.NewDatastore(c.Client, mor) } log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) return datastore, nil }
func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var template *object.VirtualMachine var template_mo mo.VirtualMachine var vm_mo mo.VirtualMachine if vm.template != "" { template, err = finder.VirtualMachine(context.TODO(), vm.template) if err != nil { return err } log.Printf("[DEBUG] template: %#v", template) err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) if err != nil { return err } } var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } // make config spec configSpec := types.VirtualMachineConfigSpec{ Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, MemoryAllocation: &types.ResourceAllocationInfo{ Reservation: vm.memoryAllocation.reservation, }, } if vm.template == "" { configSpec.GuestId = "otherLinux64Guest" } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // make ExtraConfig log.Printf("[DEBUG] virtual machine Extra Config spec start") if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } var sps types.StoragePlacementSpec if vm.template != "" { sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) } else { sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) } datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} networkConfigs := []types.CustomizationAdapterMapping{} for _, network := range vm.networkInterfaces { // network device var networkDeviceType string if vm.template == "" { networkDeviceType = "e1000" } else { networkDeviceType = "vmxnet3" } nd, err := buildNetworkDevice(finder, network.label, networkDeviceType) if err != nil { return err } networkDevices = append(networkDevices, nd) if vm.template != "" { var ipSetting types.CustomizationIPSettings if network.ipv4Address == "" { ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} } else { if network.ipv4PrefixLength == 0 { return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") } m := net.CIDRMask(network.ipv4PrefixLength, 32) sm := net.IPv4(m[0], m[1], m[2], m[3]) subnetMask := sm.String() log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) ipSetting.Gateway = []string{ network.ipv4Gateway, } ipSetting.Ip = &types.CustomizationFixedIp{ IpAddress: network.ipv4Address, } ipSetting.SubnetMask = subnetMask } ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} if network.ipv6Address == "" { ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ &types.CustomizationDhcpIpV6Generator{}, } } else { log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ &types.CustomizationFixedIpV6{ IpAddress: network.ipv6Address, SubnetMask: int32(network.ipv6PrefixLength), }, } ipv6Spec.Gateway = []string{network.ipv6Gateway} } ipSetting.IpV6Spec = ipv6Spec // network config config := types.CustomizationAdapterMapping{ Adapter: ipSetting, } networkConfigs = append(networkConfigs, config) } } log.Printf("[DEBUG] network devices: %v", networkDevices) log.Printf("[DEBUG] network configs: %v", networkConfigs) var task *object.Task if vm.template == "" { var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } } else { relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) if err != nil { return err } log.Printf("[DEBUG] relocate spec: %v", relocateSpec) // make vm clone spec cloneSpec := types.VirtualMachineCloneSpec{ Location: relocateSpec, Template: false, Config: &configSpec, PowerOn: false, } if vm.linkedClone { if template_mo.Snapshot == nil { return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") } cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot } log.Printf("[DEBUG] clone spec: %v", cloneSpec) task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) if err != nil { return err } } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) devices, err := newVM.Device(context.TODO()) if err != nil { log.Printf("[DEBUG] Template devices can't be found") return err } for _, dvc := range devices { // Issue 3559/3560: Delete all ethernet devices to add the correct ones later if devices.Type(dvc) == "ethernet" { err := newVM.RemoveDevice(context.TODO(), false, dvc) if err != nil { return err } } } // Add Network devices for _, dvc := range networkDevices { err := newVM.AddDevice( context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo) firstDisk := 0 if vm.template != "" { firstDisk++ } for i := firstDisk; i < len(vm.hardDisks); i++ { log.Printf("[DEBUG] disk index: %v", i) var diskPath string switch { case vm.hardDisks[i].vmdkPath != "": diskPath = vm.hardDisks[i].vmdkPath case vm.hardDisks[i].name != "": snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory split := strings.Split(snapshotFullDir, " ") if len(split) != 2 { return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) } vmWorkingPath := split[1] diskPath = vmWorkingPath + vm.hardDisks[i].name default: return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i]) } err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) if err != nil { return err } } if vm.skipCustomization || vm.template == "" { log.Printf("[DEBUG] VM customization skipped") } else { var identity_options types.BaseCustomizationIdentitySettings if strings.HasPrefix(template_mo.Config.GuestId, "win") { var timeZone int if vm.timeZone == "Etc/UTC" { vm.timeZone = "085" } timeZone, err := strconv.Atoi(vm.timeZone) if err != nil { return fmt.Errorf("Error converting TimeZone: %s", err) } guiUnattended := types.CustomizationGuiUnattended{ AutoLogon: false, AutoLogonCount: 1, TimeZone: int32(timeZone), } customIdentification := types.CustomizationIdentification{} userData := types.CustomizationUserData{ ComputerName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, ProductId: vm.windowsOptionalConfig.productKey, FullName: "terraform", OrgName: "terraform", } if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { customIdentification.DomainAdminPassword = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.domainUserPassword, } customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser customIdentification.JoinDomain = vm.windowsOptionalConfig.domain } if vm.windowsOptionalConfig.adminPassword != "" { guiUnattended.Password = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.adminPassword, } } identity_options = &types.CustomizationSysprep{ GuiUnattended: guiUnattended, Identification: customIdentification, UserData: userData, } } else { identity_options = &types.CustomizationLinuxPrep{ HostName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, Domain: vm.domain, TimeZone: vm.timeZone, HwClockUTC: types.NewBool(true), } } // create CustomizationSpec customSpec := types.CustomizationSpec{ Identity: identity_options, GlobalIPSettings: types.CustomizationGlobalIPSettings{ DnsSuffixList: vm.dnsSuffixes, DnsServerList: vm.dnsServers, }, NicSettingMap: networkConfigs, } log.Printf("[DEBUG] custom spec: %v", customSpec) log.Printf("[DEBUG] VM customization starting") taskb, err := newVM.Customize(context.TODO(), customSpec) if err != nil { return err } _, err = taskb.WaitForResult(context.TODO(), nil) if err != nil { return err } log.Printf("[DEBUG] VM customization finished") } if vm.hasBootableVmdk || vm.template != "" { newVM.PowerOn(context.TODO()) } return nil }