func (m *Manager) Attach(op trace.Operation, disk *types.VirtualDisk) error { deviceList := object.VirtualDeviceList{} deviceList = append(deviceList, disk) changeSpec, err := deviceList.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return err } machineSpec := types.VirtualMachineConfigSpec{} machineSpec.DeviceChange = append(machineSpec.DeviceChange, changeSpec...) m.reconfig.Lock() _, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) { t, er := m.vm.Reconfigure(ctx, machineSpec) op.Debugf("Attach reconfigure task=%s", t.Reference()) return t, er }) m.reconfig.Unlock() if err != nil { op.Errorf("vmdk storage driver failed to attach disk: %s", errors.ErrorStack(err)) return errors.Trace(err) } return nil }
func (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error { spec := types.VirtualMachineConfigSpec{} for _, device := range devices { config := &types.VirtualDeviceConfigSpec{ Device: device, Operation: op, } if disk, ok := device.(*types.VirtualDisk); ok { config.FileOperation = fop // Special case to attach an existing disk if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 { childDisk := false if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { childDisk = b.Parent != nil } if !childDisk { config.FileOperation = "" // existing disk } } } spec.DeviceChange = append(spec.DeviceChange, config) } task, err := v.Reconfigure(ctx, spec) if err != nil { return err } return task.Wait(ctx) }
func (m *Manager) Detach(op trace.Operation, d *VirtualDisk) error { defer trace.End(trace.Begin(d.DevicePath)) op.Infof("Detaching disk %s", d.DevicePath) d.lock() defer d.unlock() if !d.Attached() { op.Infof("Disk %s is already detached", d.DevicePath) return nil } if err := d.canBeDetached(); err != nil { return errors.Trace(err) } spec := types.VirtualMachineConfigSpec{} disk, err := findDisk(op, m.vm, d.DatastoreURI) if err != nil { return errors.Trace(err) } config := []types.BaseVirtualDeviceConfigSpec{ &types.VirtualDeviceConfigSpec{ Device: disk, Operation: types.VirtualDeviceConfigSpecOperationRemove, }, } spec.DeviceChange = config m.reconfig.Lock() _, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) { t, er := m.vm.Reconfigure(ctx, spec) op.Debugf("Detach reconfigure task=%s", t.Reference()) return t, er }) m.reconfig.Unlock() if err != nil { op.Errorf(err.Error()) log.Warnf("detach for %s failed with %s", d.DevicePath, errors.ErrorStack(err)) return errors.Trace(err) } func() { select { case <-m.maxAttached: default: } }() return d.setDetached() }
func (m *Manager) Detach(ctx context.Context, d *VirtualDisk) error { defer trace.End(trace.Begin(d.DevicePath)) log.Infof("Detaching disk %s", d.DevicePath) d.lock() defer d.unlock() if !d.Attached() { log.Infof("Disk %s is already detached", d.DevicePath) return nil } if err := d.canBeDetached(); err != nil { return errors.Trace(err) } spec := types.VirtualMachineConfigSpec{} disk, err := findDisk(ctx, m.vm, d.DatastoreURI) if err != nil { return errors.Trace(err) } config := []types.BaseVirtualDeviceConfigSpec{ &types.VirtualDeviceConfigSpec{ Device: disk, Operation: types.VirtualDeviceConfigSpecOperationRemove, }, } spec.DeviceChange = config err = tasks.Wait(ctx, func(ctx context.Context) (tasks.Waiter, error) { return m.vm.Reconfigure(ctx, spec) }) if err != nil { log.Warnf("detach for %s failed with %s", d.DevicePath, errors.ErrorStack(err)) return errors.Trace(err) } func() { select { case <-m.maxAttached: default: } }() return d.setDetached() }
func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { var devices object.VirtualDeviceList var err error spec := types.VirtualMachineConfigSpec{ Name: cmd.name, GuestId: cmd.guestID, Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name())}, NumCPUs: cmd.cpus, MemoryMB: int64(cmd.memory), } devices, err = cmd.addStorage(nil) if err != nil { return nil, err } devices, err = cmd.addNetwork(devices) if err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } spec.DeviceChange = deviceChange if !cmd.force { vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name) _, err := cmd.Datastore.Stat(ctx, vmxPath) if err == nil { dsPath := cmd.Datastore.Path(vmxPath) return nil, fmt.Errorf("File %s already exists", dsPath) } } folders, err := cmd.Datacenter.Folders(ctx) if err != nil { return nil, err } return folders.VmFolder.CreateVM(ctx, spec, cmd.ResourcePool, cmd.HostSystem) }
func (cmd *create) createVM(name string) (*object.Task, error) { spec := types.VirtualMachineConfigSpec{ Name: name, GuestId: cmd.guestID, Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name())}, NumCPUs: cmd.cpus, MemoryMB: int64(cmd.memory), } if !cmd.force { vmxPath := fmt.Sprintf("%s/%s.vmx", name, name) _, err := cmd.Datastore.Stat(context.TODO(), vmxPath) if err == nil { dsPath := cmd.Datastore.Path(vmxPath) return nil, fmt.Errorf("File %s already exists", dsPath) } } if cmd.controller != "ide" { scsi, err := object.SCSIControllerTypes().CreateSCSIController(cmd.controller) if err != nil { return nil, err } spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) } folders, err := cmd.Datacenter.Folders(context.TODO()) if err != nil { return nil, err } return folders.VmFolder.CreateVM(context.TODO(), spec, cmd.ResourcePool, cmd.HostSystem) }
// Create has the following implementation: // 1. check whether the docker directory contains the boot2docker ISO // 2. generate an SSH keypair and bundle it in a tar. // 3. create a virtual machine with the boot2docker ISO mounted; // 4. reconfigure the virtual machine network and disk size; func (d *Driver) Create() error { b2dutils := mcnutils.NewB2dUtils(d.StorePath) if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil { return err } log.Infof("Generating SSH Keypair...") if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil { return err } // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() c, err := d.vsphereLogin(ctx) if err != nil { return err } defer c.Logout(ctx) // Create a new finder f := find.NewFinder(c.Client, true) dc, err := f.DatacenterOrDefault(ctx, d.Datacenter) if err != nil { return err } f.SetDatacenter(dc) dss, err := f.DatastoreOrDefault(ctx, d.Datastore) if err != nil { return err } net, err := f.NetworkOrDefault(ctx, d.Network) if err != nil { return err } hs, err := f.HostSystemOrDefault(ctx, d.HostSystem) if err != nil { return err } var rp *object.ResourcePool if d.Pool != "" { // Find specified Resource Pool rp, err = f.ResourcePool(ctx, d.Pool) if err != nil { return err } } else { // Pick default Resource Pool for Host System rp, err = hs.ResourcePool(ctx) if err != nil { return err } } spec := types.VirtualMachineConfigSpec{ Name: d.MachineName, GuestId: "otherLinux64Guest", Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", dss.Name())}, NumCPUs: int32(d.CPU), MemoryMB: int64(d.Memory), } scsi, err := object.SCSIControllerTypes().CreateSCSIController("pvscsi") if err != nil { return err } spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) log.Infof("Creating VM...") folders, err := dc.Folders(ctx) task, err := folders.VmFolder.CreateVM(ctx, spec, rp, hs) if err != nil { return err } info, err := task.WaitForResult(ctx, nil) if err != nil { return err } log.Infof("Uploading Boot2docker ISO ...") dsurl, err := dss.URL(ctx, dc, fmt.Sprintf("%s/%s", d.MachineName, isoFilename)) if err != nil { return err } p := soap.DefaultUpload if err = c.Client.UploadFile(d.ISO, dsurl, &p); err != nil { return err } // Retrieve the new VM vm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference)) devices, err := vm.Device(ctx) if err != nil { return err } var add []types.BaseVirtualDevice controller, err := devices.FindDiskController("scsi") if err != nil { return err } disk := devices.CreateDisk(controller, dss.Reference(), dss.Path(fmt.Sprintf("%s/%s.vmdk", d.MachineName, d.MachineName))) // Convert MB to KB disk.CapacityInKB = int64(d.DiskSize) * 1024 add = append(add, disk) ide, err := devices.FindIDEController("") if err != nil { return err } cdrom, err := devices.CreateCdrom(ide) if err != nil { return err } add = append(add, devices.InsertIso(cdrom, dss.Path(fmt.Sprintf("%s/%s", d.MachineName, isoFilename)))) backing, err := net.EthernetCardBackingInfo(ctx) if err != nil { return err } netdev, err := object.EthernetCardTypes().CreateEthernetCard("vmxnet3", backing) if err != nil { return err } log.Infof("Reconfiguring VM...") add = append(add, netdev) if vm.AddDevice(ctx, add...); err != nil { return err } if err := d.Start(); err != nil { return err } log.Infof("Provisioning certs and ssh keys...") // Generate a tar keys bundle if err := d.generateKeyBundle(); err != nil { return err } opman := guest.NewOperationsManager(c.Client, vm.Reference()) fileman, err := opman.FileManager(ctx) if err != nil { return err } src := d.ResolveStorePath("userdata.tar") s, err := os.Stat(src) if err != nil { return err } auth := AuthFlag{} flag := FileAttrFlag{} auth.auth.Username = B2DUser auth.auth.Password = B2DPass flag.SetPerms(0, 0, 660) url, err := fileman.InitiateFileTransferToGuest(ctx, auth.Auth(), "/home/docker/userdata.tar", flag.Attr(), s.Size(), true) if err != nil { return err } u, err := c.Client.ParseURL(url) if err != nil { return err } if err = c.Client.UploadFile(src, u, nil); err != nil { return err } procman, err := opman.ProcessManager(ctx) if err != nil { return err } var env []string guestspec := types.GuestProgramSpec{ ProgramPath: "/usr/bin/sudo", Arguments: "/bin/mv /home/docker/userdata.tar /var/lib/boot2docker/userdata.tar && /usr/bin/sudo tar xf /var/lib/boot2docker/userdata.tar -C /home/docker/ > /var/log/userdata.log 2>&1 && /usr/bin/sudo chown -R docker:staff /home/docker", WorkingDirectory: "", EnvVariables: env, } _, err = procman.StartProgram(ctx, auth.Auth(), &guestspec) if err != nil { return err } return nil }
// createVirtualMachine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := buildNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, MemoryAllocation: &types.ResourceAllocationInfo{ Reservation: vm.memoryAllocation.reservation, }, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // make ExtraConfig log.Printf("[DEBUG] virtual machine Extra Config spec start") if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin", datastore, hd.vmdkPath) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } if vm.bootableVmdk { newVM.PowerOn(context.TODO()) ip, err := newVM.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) } return nil }
// createVirtualMchine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ object.NewFolder(c.Client, d), } sps := createStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin") if err != nil { return err } } return nil }
func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { dc, err := getDatacenter(c, vm.datacenter) if err != nil { return err } finder := find.NewFinder(c.Client, true) finder = finder.SetDatacenter(dc) var template *object.VirtualMachine var template_mo mo.VirtualMachine if vm.template != "" { template, err = finder.VirtualMachine(context.TODO(), vm.template) if err != nil { return err } log.Printf("[DEBUG] template: %#v", template) err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) if err != nil { return err } } var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] folder: %#v", vm.folder) folder := dcFolders.VmFolder if len(vm.folder) > 0 { si := object.NewSearchIndex(c.Client) folderRef, err := si.FindByInventoryPath( context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) if err != nil { return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) } else if folderRef == nil { return fmt.Errorf("Cannot find folder %s", vm.folder) } else { folder = folderRef.(*object.Folder) } } // make config spec configSpec := types.VirtualMachineConfigSpec{ Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, MemoryAllocation: &types.ResourceAllocationInfo{ Reservation: vm.memoryAllocation.reservation, }, } if vm.template == "" { configSpec.GuestId = "otherLinux64Guest" } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) // make ExtraConfig log.Printf("[DEBUG] virtual machine Extra Config spec start") if len(vm.customConfigurations) > 0 { var ov []types.BaseOptionValue for k, v := range vm.customConfigurations { key := k value := v o := types.OptionValue{ Key: key, Value: &value, } log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) ov = append(ov, &o) } configSpec.ExtraConfig = ov log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) } var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { datastore, err = finder.Datastore(context.TODO(), vm.datastore) if err != nil { // TODO: datastore cluster support in govmomi finder function d, err := getDatastoreObject(c, dcFolders, vm.datastore) if err != nil { return err } if d.Type == "StoragePod" { sp := object.StoragePod{ Folder: object.NewFolder(c.Client, d), } var sps types.StoragePlacementSpec if vm.template != "" { sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) } else { sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) } datastore, err = findDatastore(c, sps) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, d) } } } log.Printf("[DEBUG] datastore: %#v", datastore) // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} networkConfigs := []types.CustomizationAdapterMapping{} for _, network := range vm.networkInterfaces { // network device var networkDeviceType string if vm.template == "" { networkDeviceType = "e1000" } else { networkDeviceType = "vmxnet3" } nd, err := buildNetworkDevice(finder, network.label, networkDeviceType) if err != nil { return err } networkDevices = append(networkDevices, nd) if vm.template != "" { var ipSetting types.CustomizationIPSettings if network.ipv4Address == "" { ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} } else { if network.ipv4PrefixLength == 0 { return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") } m := net.CIDRMask(network.ipv4PrefixLength, 32) sm := net.IPv4(m[0], m[1], m[2], m[3]) subnetMask := sm.String() log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) ipSetting.Gateway = []string{ network.ipv4Gateway, } ipSetting.Ip = &types.CustomizationFixedIp{ IpAddress: network.ipv4Address, } ipSetting.SubnetMask = subnetMask } ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} if network.ipv6Address == "" { ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ &types.CustomizationDhcpIpV6Generator{}, } } else { log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ &types.CustomizationFixedIpV6{ IpAddress: network.ipv6Address, SubnetMask: int32(network.ipv6PrefixLength), }, } ipv6Spec.Gateway = []string{network.ipv6Gateway} } ipSetting.IpV6Spec = ipv6Spec // network config config := types.CustomizationAdapterMapping{ Adapter: ipSetting, } networkConfigs = append(networkConfigs, config) } } log.Printf("[DEBUG] network devices: %v", networkDevices) log.Printf("[DEBUG] network configs: %v", networkConfigs) var task *object.Task if vm.template == "" { var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } } else { relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) if err != nil { return err } log.Printf("[DEBUG] relocate spec: %v", relocateSpec) // make vm clone spec cloneSpec := types.VirtualMachineCloneSpec{ Location: relocateSpec, Template: false, Config: &configSpec, PowerOn: false, } if vm.linkedClone { if template_mo.Snapshot == nil { return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") } cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot } log.Printf("[DEBUG] clone spec: %v", cloneSpec) task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) if err != nil { return err } } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) devices, err := newVM.Device(context.TODO()) if err != nil { log.Printf("[DEBUG] Template devices can't be found") return err } for _, dvc := range devices { // Issue 3559/3560: Delete all ethernet devices to add the correct ones later if devices.Type(dvc) == "ethernet" { err := newVM.RemoveDevice(context.TODO(), false, dvc) if err != nil { return err } } } // Add Network devices for _, dvc := range networkDevices { err := newVM.AddDevice( context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) if err != nil { return err } } // Create the cdroms if needed. if err := createCdroms(newVM, vm.cdroms); err != nil { return err } firstDisk := 0 if vm.template != "" { firstDisk++ } for i := firstDisk; i < len(vm.hardDisks); i++ { log.Printf("[DEBUG] disk index: %v", i) err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, vm.hardDisks[i].vmdkPath) if err != nil { return err } } if vm.skipCustomization || vm.template == "" { log.Printf("[DEBUG] VM customization skipped") } else { var identity_options types.BaseCustomizationIdentitySettings if strings.HasPrefix(template_mo.Config.GuestId, "win") { var timeZone int if vm.timeZone == "Etc/UTC" { vm.timeZone = "085" } timeZone, err := strconv.Atoi(vm.timeZone) if err != nil { return fmt.Errorf("Error converting TimeZone: %s", err) } guiUnattended := types.CustomizationGuiUnattended{ AutoLogon: false, AutoLogonCount: 1, TimeZone: int32(timeZone), } customIdentification := types.CustomizationIdentification{} userData := types.CustomizationUserData{ ComputerName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, ProductId: vm.windowsOptionalConfig.productKey, FullName: "terraform", OrgName: "terraform", } if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { customIdentification.DomainAdminPassword = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.domainUserPassword, } customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser customIdentification.JoinDomain = vm.windowsOptionalConfig.domain } if vm.windowsOptionalConfig.adminPassword != "" { guiUnattended.Password = &types.CustomizationPassword{ PlainText: true, Value: vm.windowsOptionalConfig.adminPassword, } } identity_options = &types.CustomizationSysprep{ GuiUnattended: guiUnattended, Identification: customIdentification, UserData: userData, } } else { identity_options = &types.CustomizationLinuxPrep{ HostName: &types.CustomizationFixedName{ Name: strings.Split(vm.name, ".")[0], }, Domain: vm.domain, TimeZone: vm.timeZone, HwClockUTC: types.NewBool(true), } } // create CustomizationSpec customSpec := types.CustomizationSpec{ Identity: identity_options, GlobalIPSettings: types.CustomizationGlobalIPSettings{ DnsSuffixList: vm.dnsSuffixes, DnsServerList: vm.dnsServers, }, NicSettingMap: networkConfigs, } log.Printf("[DEBUG] custom spec: %v", customSpec) log.Printf("[DEBUG] VM customization starting") taskb, err := newVM.Customize(context.TODO(), customSpec) if err != nil { return err } _, err = taskb.WaitForResult(context.TODO(), nil) if err != nil { return err } log.Printf("[DEBUG] VM customization finished") } if vm.bootableVmdk || vm.template != "" { newVM.PowerOn(context.TODO()) } return nil }
// createVirtualMchine creates a new VirtualMachine. func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error { var dc *object.Datacenter var err error finder := find.NewFinder(c.Client, true) if vm.datacenter != "" { dc, err = finder.Datacenter(context.TODO(), vm.datacenter) if err != nil { return err } } else { dc, err = finder.DefaultDatacenter(context.TODO()) if err != nil { return err } } finder = finder.SetDatacenter(dc) var resourcePool *object.ResourcePool if vm.resourcePool == "" { if vm.cluster == "" { resourcePool, err = finder.DefaultResourcePool(context.TODO()) if err != nil { return err } } else { resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") if err != nil { return err } } } else { resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) if err != nil { return err } } log.Printf("[DEBUG] resource pool: %#v", resourcePool) dcFolders, err := dc.Folders(context.TODO()) if err != nil { return err } // network networkDevices := []types.BaseVirtualDeviceConfigSpec{} for _, network := range vm.networkInterfaces { // network device nd, err := createNetworkDevice(finder, network.label, "e1000") if err != nil { return err } networkDevices = append(networkDevices, nd) } // make config spec configSpec := types.VirtualMachineConfigSpec{ GuestId: "otherLinux64Guest", Name: vm.name, NumCPUs: vm.vcpu, NumCoresPerSocket: 1, MemoryMB: vm.memoryMb, DeviceChange: networkDevices, } log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) var datastore *object.Datastore if vm.datastore == "" { datastore, err = finder.DefaultDatastore(context.TODO()) if err != nil { return err } } else { s := object.NewSearchIndex(c.Client) ref, err := s.FindChild(context.TODO(), dcFolders.DatastoreFolder, vm.datastore) if err != nil { return err } log.Printf("[DEBUG] findDatastore: reference: %#v", ref) mor := ref.Reference() if mor.Type == "StoragePod" { storagePod := object.NewFolder(c.Client, mor) vmfr := dcFolders.VmFolder.Reference() rpr := resourcePool.Reference() spr := storagePod.Reference() sps := types.StoragePlacementSpec{ Type: "create", ConfigSpec: &configSpec, PodSelectionSpec: types.StorageDrsPodSelectionSpec{ StoragePod: &spr, }, Folder: &vmfr, ResourcePool: &rpr, } log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) srm := object.NewStorageResourceManager(c.Client) rds, err := srm.RecommendDatastores(context.TODO(), sps) if err != nil { return err } log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) datastore = object.NewDatastore(c.Client, spa.Destination) if err != nil { return err } } else { datastore = object.NewDatastore(c.Client, mor) } } log.Printf("[DEBUG] datastore: %#v", datastore) var mds mo.Datastore if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { return err } log.Printf("[DEBUG] datastore: %#v", mds.Name) scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") if err != nil { log.Printf("[ERROR] %s", err) } configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: scsi, }) configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil) if err != nil { log.Printf("[ERROR] %s", err) } err = task.Wait(context.TODO()) if err != nil { log.Printf("[ERROR] %s", err) } newVM, err := finder.VirtualMachine(context.TODO(), vm.name) if err != nil { return err } log.Printf("[DEBUG] new vm: %v", newVM) log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks) for _, hd := range vm.hardDisks { log.Printf("[DEBUG] add hard disk: %v", hd.size) log.Printf("[DEBUG] add hard disk: %v", hd.iops) err = addHardDisk(newVM, hd.size, hd.iops, "thin") if err != nil { return err } } return nil }