Ejemplo n.º 1
0
func (m *Manager) Attach(op trace.Operation, disk *types.VirtualDisk) error {
	deviceList := object.VirtualDeviceList{}
	deviceList = append(deviceList, disk)

	changeSpec, err := deviceList.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)
	if err != nil {
		return err
	}

	machineSpec := types.VirtualMachineConfigSpec{}
	machineSpec.DeviceChange = append(machineSpec.DeviceChange, changeSpec...)

	m.reconfig.Lock()
	_, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) {
		t, er := m.vm.Reconfigure(ctx, machineSpec)

		op.Debugf("Attach reconfigure task=%s", t.Reference())

		return t, er
	})
	m.reconfig.Unlock()

	if err != nil {
		op.Errorf("vmdk storage driver failed to attach disk: %s", errors.ErrorStack(err))
		return errors.Trace(err)
	}
	return nil
}
Ejemplo n.º 2
0
func (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error {
	spec := types.VirtualMachineConfigSpec{}

	for _, device := range devices {
		config := &types.VirtualDeviceConfigSpec{
			Device:    device,
			Operation: op,
		}

		if disk, ok := device.(*types.VirtualDisk); ok {
			config.FileOperation = fop

			// Special case to attach an existing disk
			if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 {
				childDisk := false
				if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
					childDisk = b.Parent != nil
				}

				if !childDisk {
					config.FileOperation = "" // existing disk
				}
			}
		}

		spec.DeviceChange = append(spec.DeviceChange, config)
	}

	task, err := v.Reconfigure(ctx, spec)
	if err != nil {
		return err
	}

	return task.Wait(ctx)
}
Ejemplo n.º 3
0
func (m *Manager) Detach(op trace.Operation, d *VirtualDisk) error {
	defer trace.End(trace.Begin(d.DevicePath))
	op.Infof("Detaching disk %s", d.DevicePath)

	d.lock()
	defer d.unlock()

	if !d.Attached() {
		op.Infof("Disk %s is already detached", d.DevicePath)
		return nil
	}

	if err := d.canBeDetached(); err != nil {
		return errors.Trace(err)
	}

	spec := types.VirtualMachineConfigSpec{}

	disk, err := findDisk(op, m.vm, d.DatastoreURI)
	if err != nil {
		return errors.Trace(err)
	}

	config := []types.BaseVirtualDeviceConfigSpec{
		&types.VirtualDeviceConfigSpec{
			Device:    disk,
			Operation: types.VirtualDeviceConfigSpecOperationRemove,
		},
	}

	spec.DeviceChange = config

	m.reconfig.Lock()
	_, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) {
		t, er := m.vm.Reconfigure(ctx, spec)

		op.Debugf("Detach reconfigure task=%s", t.Reference())

		return t, er
	})
	m.reconfig.Unlock()

	if err != nil {
		op.Errorf(err.Error())
		log.Warnf("detach for %s failed with %s", d.DevicePath, errors.ErrorStack(err))
		return errors.Trace(err)
	}

	func() {
		select {
		case <-m.maxAttached:
		default:
		}
	}()

	return d.setDetached()
}
Ejemplo n.º 4
0
// SetBootOptions reconfigures the VirtualMachine with the given options.
func (v VirtualMachine) SetBootOptions(ctx context.Context, options *types.VirtualMachineBootOptions) error {
	spec := types.VirtualMachineConfigSpec{}

	spec.BootOptions = options

	task, err := v.Reconfigure(ctx, spec)
	if err != nil {
		return err
	}

	return task.Wait(ctx)
}
Ejemplo n.º 5
0
func (m *Manager) Detach(ctx context.Context, d *VirtualDisk) error {
	defer trace.End(trace.Begin(d.DevicePath))
	log.Infof("Detaching disk %s", d.DevicePath)

	d.lock()
	defer d.unlock()

	if !d.Attached() {
		log.Infof("Disk %s is already detached", d.DevicePath)
		return nil
	}

	if err := d.canBeDetached(); err != nil {
		return errors.Trace(err)
	}

	spec := types.VirtualMachineConfigSpec{}

	disk, err := findDisk(ctx, m.vm, d.DatastoreURI)
	if err != nil {
		return errors.Trace(err)
	}

	config := []types.BaseVirtualDeviceConfigSpec{
		&types.VirtualDeviceConfigSpec{
			Device:    disk,
			Operation: types.VirtualDeviceConfigSpecOperationRemove,
		},
	}

	spec.DeviceChange = config

	err = tasks.Wait(ctx, func(ctx context.Context) (tasks.Waiter, error) {
		return m.vm.Reconfigure(ctx, spec)
	})
	if err != nil {
		log.Warnf("detach for %s failed with %s", d.DevicePath, errors.ErrorStack(err))
		return errors.Trace(err)
	}

	func() {
		select {
		case <-m.maxAttached:
		default:
		}
	}()

	return d.setDetached()
}
Ejemplo n.º 6
0
func (cmd *create) createVM(ctx context.Context) (*object.Task, error) {
	var devices object.VirtualDeviceList
	var err error

	spec := types.VirtualMachineConfigSpec{
		Name:     cmd.name,
		GuestId:  cmd.guestID,
		Files:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name())},
		NumCPUs:  cmd.cpus,
		MemoryMB: int64(cmd.memory),
	}

	devices, err = cmd.addStorage(nil)
	if err != nil {
		return nil, err
	}

	devices, err = cmd.addNetwork(devices)
	if err != nil {
		return nil, err
	}

	deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)
	if err != nil {
		return nil, err
	}

	spec.DeviceChange = deviceChange

	if !cmd.force {
		vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name)

		_, err := cmd.Datastore.Stat(ctx, vmxPath)
		if err == nil {
			dsPath := cmd.Datastore.Path(vmxPath)
			return nil, fmt.Errorf("File %s already exists", dsPath)
		}
	}

	folders, err := cmd.Datacenter.Folders(ctx)
	if err != nil {
		return nil, err
	}

	return folders.VmFolder.CreateVM(ctx, spec, cmd.ResourcePool, cmd.HostSystem)
}
Ejemplo n.º 7
0
func (cmd *create) createVM(name string) (*object.Task, error) {
	spec := types.VirtualMachineConfigSpec{
		Name:     name,
		GuestId:  cmd.guestID,
		Files:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name())},
		NumCPUs:  cmd.cpus,
		MemoryMB: int64(cmd.memory),
	}

	if !cmd.force {
		vmxPath := fmt.Sprintf("%s/%s.vmx", name, name)

		_, err := cmd.Datastore.Stat(context.TODO(), vmxPath)
		if err == nil {
			dsPath := cmd.Datastore.Path(vmxPath)
			return nil, fmt.Errorf("File %s already exists", dsPath)
		}
	}

	if cmd.controller != "ide" {
		scsi, err := object.SCSIControllerTypes().CreateSCSIController(cmd.controller)
		if err != nil {
			return nil, err
		}

		spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{
			Operation: types.VirtualDeviceConfigSpecOperationAdd,
			Device:    scsi,
		})
	}

	folders, err := cmd.Datacenter.Folders(context.TODO())
	if err != nil {
		return nil, err
	}

	return folders.VmFolder.CreateVM(context.TODO(), spec, cmd.ResourcePool, cmd.HostSystem)
}
// createVirtualMchine creates a new VirtualMachine.
func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
	dc, err := getDatacenter(c, vm.datacenter)
	if err != nil {
		return err
	}
	finder := find.NewFinder(c.Client, true)
	finder = finder.SetDatacenter(dc)

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	for _, network := range vm.networkInterfaces {
		// network device
		nd, err := createNetworkDevice(finder, network.label, "e1000")
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)
	}

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		GuestId:           "otherLinux64Guest",
		Name:              vm.name,
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
		DeviceChange:      networkDevices,
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
		if err != nil {
			// TODO: datastore cluster support in govmomi finder function
			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
			if err != nil {
				return err
			}

			if d.Type == "StoragePod" {
				sp := object.StoragePod{
					object.NewFolder(c.Client, d),
				}
				sps := createStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
				datastore, err = findDatastore(c, sps)
				if err != nil {
					return err
				}
			} else {
				datastore = object.NewDatastore(c.Client, d)
			}
		}
	}

	log.Printf("[DEBUG] datastore: %#v", datastore)

	var mds mo.Datastore
	if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
		return err
	}
	log.Printf("[DEBUG] datastore: %#v", mds.Name)
	scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
		Operation: types.VirtualDeviceConfigSpecOperationAdd,
		Device:    scsi,
	})
	configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}

	task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks)
	for _, hd := range vm.hardDisks {
		log.Printf("[DEBUG] add hard disk: %v", hd.size)
		log.Printf("[DEBUG] add hard disk: %v", hd.iops)
		err = addHardDisk(newVM, hd.size, hd.iops, "thin")
		if err != nil {
			return err
		}
	}
	return nil
}
// deployVirtualMachine deploys a new VirtualMachine.
func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
	dc, err := getDatacenter(c, vm.datacenter)
	if err != nil {
		return err
	}
	finder := find.NewFinder(c.Client, true)
	finder = finder.SetDatacenter(dc)

	template, err := finder.VirtualMachine(context.TODO(), vm.template)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] template: %#v", template)

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] folder: %#v", vm.folder)
	folder := dcFolders.VmFolder
	if len(vm.folder) > 0 {
		si := object.NewSearchIndex(c.Client)
		folderRef, err := si.FindByInventoryPath(
			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
		if err != nil {
			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
		} else if folderRef == nil {
			return fmt.Errorf("Cannot find folder %s", vm.folder)
		} else {
			folder = folderRef.(*object.Folder)
		}
	}

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
		if err != nil {
			// TODO: datastore cluster support in govmomi finder function
			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
			if err != nil {
				return err
			}

			if d.Type == "StoragePod" {
				sp := object.StoragePod{
					Folder: object.NewFolder(c.Client, d),
				}
				sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)

				datastore, err = findDatastore(c, sps)
				if err != nil {
					return err
				}
			} else {
				datastore = object.NewDatastore(c.Client, d)
			}
		}
	}
	log.Printf("[DEBUG] datastore: %#v", datastore)

	relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] relocate spec: %v", relocateSpec)

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	networkConfigs := []types.CustomizationAdapterMapping{}
	for _, network := range vm.networkInterfaces {
		// network device
		nd, err := buildNetworkDevice(finder, network.label, "vmxnet3")
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)

		// TODO: IPv6 support
		var ipSetting types.CustomizationIPSettings
		if network.ipv4Address == "" {
			ipSetting = types.CustomizationIPSettings{
				Ip: &types.CustomizationDhcpIpGenerator{},
			}
		} else {
			if network.ipv4PrefixLength == 0 {
				return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
			}
			m := net.CIDRMask(network.ipv4PrefixLength, 32)
			sm := net.IPv4(m[0], m[1], m[2], m[3])
			subnetMask := sm.String()
			log.Printf("[DEBUG] gateway: %v", vm.gateway)
			log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address)
			log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength)
			log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask)
			ipSetting = types.CustomizationIPSettings{
				Gateway: []string{
					vm.gateway,
				},
				Ip: &types.CustomizationFixedIp{
					IpAddress: network.ipv4Address,
				},
				SubnetMask: subnetMask,
			}
		}

		// network config
		config := types.CustomizationAdapterMapping{
			Adapter: ipSetting,
		}
		networkConfigs = append(networkConfigs, config)
	}
	log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter)

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
	}

	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations)

	// make ExtraConfig
	if len(vm.customConfigurations) > 0 {
		var ov []types.BaseOptionValue
		for k, v := range vm.customConfigurations {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			ov = append(ov, &o)
		}
		configSpec.ExtraConfig = ov
		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
	}

	var template_mo mo.VirtualMachine
	err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)

	var identity_options types.BaseCustomizationIdentitySettings
	if strings.HasPrefix(template_mo.Config.GuestId, "win") {
		var timeZone int
		if vm.timeZone == "Etc/UTC" {
			vm.timeZone = "085"
		}
		timeZone, err := strconv.Atoi(vm.timeZone)
		if err != nil {
			return fmt.Errorf("Error converting TimeZone: %s", err)
		}

		guiUnattended := types.CustomizationGuiUnattended{
			AutoLogon:      false,
			AutoLogonCount: 1,
			TimeZone:       timeZone,
		}

		customIdentification := types.CustomizationIdentification{}

		userData := types.CustomizationUserData{
			ComputerName: &types.CustomizationFixedName{
				Name: strings.Split(vm.name, ".")[0],
			},
			ProductId: vm.windowsOptionalConfig.productKey,
			FullName:  "terraform",
			OrgName:   "terraform",
		}

		if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
			customIdentification.DomainAdminPassword = &types.CustomizationPassword{
				PlainText: true,
				Value:     vm.windowsOptionalConfig.domainUserPassword,
			}
			customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
			customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
		}

		if vm.windowsOptionalConfig.adminPassword != "" {
			guiUnattended.Password = &types.CustomizationPassword{
				PlainText: true,
				Value:     vm.windowsOptionalConfig.adminPassword,
			}
		}

		identity_options = &types.CustomizationSysprep{
			GuiUnattended:  guiUnattended,
			Identification: customIdentification,
			UserData:       userData,
		}
	} else {
		identity_options = &types.CustomizationLinuxPrep{
			HostName: &types.CustomizationFixedName{
				Name: strings.Split(vm.name, ".")[0],
			},
			Domain:     vm.domain,
			TimeZone:   vm.timeZone,
			HwClockUTC: types.NewBool(true),
		}
	}

	// create CustomizationSpec
	customSpec := types.CustomizationSpec{
		Identity: identity_options,
		GlobalIPSettings: types.CustomizationGlobalIPSettings{
			DnsSuffixList: vm.dnsSuffixes,
			DnsServerList: vm.dnsServers,
		},
		NicSettingMap: networkConfigs,
	}
	log.Printf("[DEBUG] custom spec: %v", customSpec)

	// make vm clone spec
	cloneSpec := types.VirtualMachineCloneSpec{
		Location: relocateSpec,
		Template: false,
		Config:   &configSpec,
		PowerOn:  false,
	}
	if vm.linkedClone {
		if err != nil {
			return fmt.Errorf("Error reading base VM properties: %s", err)
		}
		if template_mo.Snapshot == nil {
			return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
		}
		cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
	}
	log.Printf("[DEBUG] clone spec: %v", cloneSpec)

	task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec)
	if err != nil {
		return err
	}

	_, err = task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	devices, err := newVM.Device(context.TODO())
	if err != nil {
		log.Printf("[DEBUG] Template devices can't be found")
		return err
	}

	for _, dvc := range devices {
		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
		if devices.Type(dvc) == "ethernet" {
			err := newVM.RemoveDevice(context.TODO(), dvc)
			if err != nil {
				return err
			}
		}
	}
	// Add Network devices
	for _, dvc := range networkDevices {
		err := newVM.AddDevice(
			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
		if err != nil {
			return err
		}
	}

	// Create the cdroms if needed.
	if err := createCdroms(newVM, vm.cdroms); err != nil {
		return err
	}

	taskb, err := newVM.Customize(context.TODO(), customSpec)
	if err != nil {
		return err
	}

	_, err = taskb.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] VM customization finished")

	for i := 1; i < len(vm.hardDisks); i++ {
		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, vm.hardDisks[i].vmdkPath)
		if err != nil {
			return err
		}
	}

	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	newVM.PowerOn(context.TODO())

	ip, err := newVM.WaitForIP(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] ip address: %v", ip)

	return nil
}
Ejemplo n.º 10
0
// Create has the following implementation:
// 1. check whether the docker directory contains the boot2docker ISO
// 2. generate an SSH keypair and bundle it in a tar.
// 3. create a virtual machine with the boot2docker ISO mounted;
// 4. reconfigure the virtual machine network and disk size;
func (d *Driver) Create() error {
	b2dutils := mcnutils.NewB2dUtils(d.StorePath)
	if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil {
		return err
	}

	log.Infof("Generating SSH Keypair...")
	if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil {
		return err
	}

	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	c, err := d.vsphereLogin(ctx)
	if err != nil {
		return err
	}
	defer c.Logout(ctx)

	// Create a new finder
	f := find.NewFinder(c.Client, true)

	dc, err := f.DatacenterOrDefault(ctx, d.Datacenter)
	if err != nil {
		return err
	}

	f.SetDatacenter(dc)

	dss, err := f.DatastoreOrDefault(ctx, d.Datastore)
	if err != nil {
		return err
	}

	net, err := f.NetworkOrDefault(ctx, d.Network)
	if err != nil {
		return err
	}

	hs, err := f.HostSystemOrDefault(ctx, d.HostSystem)
	if err != nil {
		return err
	}

	var rp *object.ResourcePool
	if d.Pool != "" {
		// Find specified Resource Pool
		rp, err = f.ResourcePool(ctx, d.Pool)
		if err != nil {
			return err
		}
	} else {
		// Pick default Resource Pool for Host System
		rp, err = hs.ResourcePool(ctx)
		if err != nil {
			return err
		}
	}

	spec := types.VirtualMachineConfigSpec{
		Name:     d.MachineName,
		GuestId:  "otherLinux64Guest",
		Files:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", dss.Name())},
		NumCPUs:  int32(d.CPU),
		MemoryMB: int64(d.Memory),
	}

	scsi, err := object.SCSIControllerTypes().CreateSCSIController("pvscsi")
	if err != nil {
		return err
	}

	spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{
		Operation: types.VirtualDeviceConfigSpecOperationAdd,
		Device:    scsi,
	})

	log.Infof("Creating VM...")
	folders, err := dc.Folders(ctx)
	task, err := folders.VmFolder.CreateVM(ctx, spec, rp, hs)
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(ctx, nil)
	if err != nil {
		return err
	}

	log.Infof("Uploading Boot2docker ISO ...")
	dsurl, err := dss.URL(ctx, dc, fmt.Sprintf("%s/%s", d.MachineName, isoFilename))
	if err != nil {
		return err
	}
	p := soap.DefaultUpload
	if err = c.Client.UploadFile(d.ISO, dsurl, &p); err != nil {
		return err
	}

	// Retrieve the new VM
	vm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))

	devices, err := vm.Device(ctx)
	if err != nil {
		return err
	}

	var add []types.BaseVirtualDevice

	controller, err := devices.FindDiskController("scsi")
	if err != nil {
		return err
	}

	disk := devices.CreateDisk(controller, dss.Reference(),
		dss.Path(fmt.Sprintf("%s/%s.vmdk", d.MachineName, d.MachineName)))

	// Convert MB to KB
	disk.CapacityInKB = int64(d.DiskSize) * 1024

	add = append(add, disk)
	ide, err := devices.FindIDEController("")
	if err != nil {
		return err
	}

	cdrom, err := devices.CreateCdrom(ide)
	if err != nil {
		return err
	}

	add = append(add, devices.InsertIso(cdrom, dss.Path(fmt.Sprintf("%s/%s", d.MachineName, isoFilename))))

	backing, err := net.EthernetCardBackingInfo(ctx)
	if err != nil {
		return err
	}

	netdev, err := object.EthernetCardTypes().CreateEthernetCard("vmxnet3", backing)
	if err != nil {
		return err
	}

	log.Infof("Reconfiguring VM...")
	add = append(add, netdev)
	if vm.AddDevice(ctx, add...); err != nil {
		return err
	}

	if err := d.Start(); err != nil {
		return err
	}

	log.Infof("Provisioning certs and ssh keys...")
	// Generate a tar keys bundle
	if err := d.generateKeyBundle(); err != nil {
		return err
	}

	opman := guest.NewOperationsManager(c.Client, vm.Reference())

	fileman, err := opman.FileManager(ctx)
	if err != nil {
		return err
	}

	src := d.ResolveStorePath("userdata.tar")
	s, err := os.Stat(src)
	if err != nil {
		return err
	}

	auth := AuthFlag{}
	flag := FileAttrFlag{}
	auth.auth.Username = B2DUser
	auth.auth.Password = B2DPass
	flag.SetPerms(0, 0, 660)
	url, err := fileman.InitiateFileTransferToGuest(ctx, auth.Auth(), "/home/docker/userdata.tar", flag.Attr(), s.Size(), true)
	if err != nil {
		return err
	}
	u, err := c.Client.ParseURL(url)
	if err != nil {
		return err
	}
	if err = c.Client.UploadFile(src, u, nil); err != nil {
		return err
	}

	procman, err := opman.ProcessManager(ctx)
	if err != nil {
		return err
	}

	var env []string
	guestspec := types.GuestProgramSpec{
		ProgramPath:      "/usr/bin/sudo",
		Arguments:        "/bin/mv /home/docker/userdata.tar /var/lib/boot2docker/userdata.tar && /usr/bin/sudo tar xf /var/lib/boot2docker/userdata.tar -C /home/docker/ > /var/log/userdata.log 2>&1 && /usr/bin/sudo chown -R docker:staff /home/docker",
		WorkingDirectory: "",
		EnvVariables:     env,
	}

	_, err = procman.StartProgram(ctx, auth.Auth(), &guestspec)
	if err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 11
0
func TestCreateVm(t *testing.T) {
	ctx := context.Background()

	for _, model := range []*Model{ESX(), VPX()} {
		defer model.Remove()
		err := model.Create()
		if err != nil {
			t.Fatal(err)
		}

		s := model.Service.NewServer()
		defer s.Close()

		c, err := govmomi.NewClient(ctx, s.URL, true)
		if err != nil {
			t.Fatal(err)
		}

		spec := types.VirtualMachineConfigSpec{
			// Note: real ESX allows the VM to be created without a GuestId,
			// but will power on will fail.
			GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
		}

		steps := []func(){
			func() {
				spec.Name = "test"
			},
			func() {
				spec.Files = &types.VirtualMachineFileInfo{
					VmPathName: fmt.Sprintf("[LocalDS_0] %s/%s.vmx", spec.Name, spec.Name),
				}
			},
		}

		finder := find.NewFinder(c.Client, false)

		dc, err := finder.DefaultDatacenter(ctx)
		if err != nil {
			t.Fatal(err)
		}

		finder.SetDatacenter(dc)

		folders, err := dc.Folders(ctx)
		if err != nil {
			t.Fatal(err)
		}

		hosts, err := finder.HostSystemList(ctx, "*/*")
		if err != nil {
			t.Fatal(err)
		}

		nhosts := len(hosts)
		host := hosts[rand.Intn(nhosts)]
		pool, err := host.ResourcePool(ctx)
		if err != nil {
			t.Fatal(err)
		}

		if nhosts == 1 {
			// test the default path against the ESX model
			host = nil
		}

		vmFolder := folders.VmFolder
		// expecting CreateVM to fail until all steps are taken
		for _, step := range steps {
			task, cerr := vmFolder.CreateVM(ctx, spec, pool, host)
			if cerr != nil {
				t.Fatal(err)
			}

			_, cerr = task.WaitForResult(ctx, nil)
			if cerr == nil {
				t.Error("expected error")
			}

			step()
		}

		task, err := vmFolder.CreateVM(ctx, spec, pool, host)
		if err != nil {
			t.Fatal(err)
		}

		info, err := task.WaitForResult(ctx, nil)
		if err != nil {
			t.Fatal(err)
		}

		vm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))

		name, err := vm.ObjectName(ctx)
		if err != nil {
			t.Fatal(err)
		}

		if name != spec.Name {
			t.Errorf("name=%s", name)
		}

		_, err = vm.Device(ctx)
		if err != nil {
			t.Fatal(err)
		}

		recreate := func(context.Context) (*object.Task, error) {
			return vmFolder.CreateVM(ctx, spec, pool, nil)
		}

		ops := []struct {
			method func(context.Context) (*object.Task, error)
			state  types.VirtualMachinePowerState
			fail   bool
		}{
			// Powered off by default
			{nil, types.VirtualMachinePowerStatePoweredOff, false},
			// Create with same .vmx path should fail
			{recreate, "", true},
			// Off -> On  == ok
			{vm.PowerOn, types.VirtualMachinePowerStatePoweredOn, false},
			// On  -> On  == fail
			{vm.PowerOn, types.VirtualMachinePowerStatePoweredOn, true},
			// On  -> Off == ok
			{vm.PowerOff, types.VirtualMachinePowerStatePoweredOff, false},
			// Off -> Off == fail
			{vm.PowerOff, types.VirtualMachinePowerStatePoweredOff, true},
			// Off -> On  == ok
			{vm.PowerOn, types.VirtualMachinePowerStatePoweredOn, false},
			// Destroy == fail (power is On)
			{vm.Destroy, types.VirtualMachinePowerStatePoweredOn, true},
			// On  -> Off == ok
			{vm.PowerOff, types.VirtualMachinePowerStatePoweredOff, false},
			// Destroy == ok (power is Off)
			{vm.Destroy, "", false},
		}

		for i, op := range ops {
			if op.method != nil {
				task, err = op.method(ctx)
				if err != nil {
					t.Fatal(err)
				}

				err = task.Wait(ctx)
				if op.fail {
					if err == nil {
						t.Errorf("%d: expected error", i)
					}
				} else {
					if err != nil {
						t.Errorf("%d: %s", i, err)
					}
				}
			}

			if len(op.state) != 0 {
				state, err := vm.PowerState(ctx)
				if err != nil {
					t.Fatal(err)
				}

				if state != op.state {
					t.Errorf("state=%s", state)
				}
			}
		}
	}
}
// createVirtualMachine creates a new VirtualMachine.
func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
	dc, err := getDatacenter(c, vm.datacenter)

	if err != nil {
		return err
	}
	finder := find.NewFinder(c.Client, true)
	finder = finder.SetDatacenter(dc)

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] folder: %#v", vm.folder)
	folder := dcFolders.VmFolder
	if len(vm.folder) > 0 {
		si := object.NewSearchIndex(c.Client)
		folderRef, err := si.FindByInventoryPath(
			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
		if err != nil {
			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
		} else if folderRef == nil {
			return fmt.Errorf("Cannot find folder %s", vm.folder)
		} else {
			folder = folderRef.(*object.Folder)
		}
	}

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	for _, network := range vm.networkInterfaces {
		// network device
		nd, err := buildNetworkDevice(finder, network.label, "e1000")
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)
	}

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		GuestId:           "otherLinux64Guest",
		Name:              vm.name,
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
		MemoryAllocation: &types.ResourceAllocationInfo{
			Reservation: vm.memoryAllocation.reservation,
		},
		DeviceChange: networkDevices,
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	// make ExtraConfig
	log.Printf("[DEBUG] virtual machine Extra Config spec start")
	if len(vm.customConfigurations) > 0 {
		var ov []types.BaseOptionValue
		for k, v := range vm.customConfigurations {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
			ov = append(ov, &o)
		}
		configSpec.ExtraConfig = ov
		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
	}

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
		if err != nil {
			// TODO: datastore cluster support in govmomi finder function
			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
			if err != nil {
				return err
			}

			if d.Type == "StoragePod" {
				sp := object.StoragePod{
					Folder: object.NewFolder(c.Client, d),
				}
				sps := buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
				datastore, err = findDatastore(c, sps)
				if err != nil {
					return err
				}
			} else {
				datastore = object.NewDatastore(c.Client, d)
			}
		}
	}

	log.Printf("[DEBUG] datastore: %#v", datastore)

	var mds mo.Datastore
	if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
		return err
	}
	log.Printf("[DEBUG] datastore: %#v", mds.Name)
	scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
		Operation: types.VirtualDeviceConfigSpecOperationAdd,
		Device:    scsi,
	})

	configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}

	task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks)
	for _, hd := range vm.hardDisks {
		log.Printf("[DEBUG] add hard disk: %v", hd.size)
		log.Printf("[DEBUG] add hard disk: %v", hd.iops)
		err = addHardDisk(newVM, hd.size, hd.iops, "thin", datastore, hd.vmdkPath)
		if err != nil {
			return err
		}
	}

	// Create the cdroms if needed.
	if err := createCdroms(newVM, vm.cdroms); err != nil {
		return err
	}

	if vm.bootableVmdk {
		newVM.PowerOn(context.TODO())
		ip, err := newVM.WaitForIP(context.TODO())
		if err != nil {
			return err
		}
		log.Printf("[DEBUG] ip address: %v", ip)
	}

	return nil
}
// createVirtualMchine creates a new VirtualMachine.
func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
	var dc *object.Datacenter
	var err error

	finder := find.NewFinder(c.Client, true)

	if vm.datacenter != "" {
		dc, err = finder.Datacenter(context.TODO(), vm.datacenter)
		if err != nil {
			return err
		}
	} else {
		dc, err = finder.DefaultDatacenter(context.TODO())
		if err != nil {
			return err
		}
	}
	finder = finder.SetDatacenter(dc)

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	for _, network := range vm.networkInterfaces {
		// network device
		nd, err := createNetworkDevice(finder, network.label, "e1000")
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)
	}

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		GuestId:           "otherLinux64Guest",
		Name:              vm.name,
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
		DeviceChange:      networkDevices,
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		s := object.NewSearchIndex(c.Client)
		ref, err := s.FindChild(context.TODO(), dcFolders.DatastoreFolder, vm.datastore)
		if err != nil {
			return err
		}
		log.Printf("[DEBUG] findDatastore: reference: %#v", ref)

		mor := ref.Reference()
		if mor.Type == "StoragePod" {
			storagePod := object.NewFolder(c.Client, mor)

			vmfr := dcFolders.VmFolder.Reference()
			rpr := resourcePool.Reference()
			spr := storagePod.Reference()

			sps := types.StoragePlacementSpec{
				Type:       "create",
				ConfigSpec: &configSpec,
				PodSelectionSpec: types.StorageDrsPodSelectionSpec{
					StoragePod: &spr,
				},
				Folder:       &vmfr,
				ResourcePool: &rpr,
			}
			log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps)

			srm := object.NewStorageResourceManager(c.Client)
			rds, err := srm.RecommendDatastores(context.TODO(), sps)
			if err != nil {
				return err
			}
			log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds)

			spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction)
			datastore = object.NewDatastore(c.Client, spa.Destination)
			if err != nil {
				return err
			}
		} else {
			datastore = object.NewDatastore(c.Client, mor)
		}
	}
	log.Printf("[DEBUG] datastore: %#v", datastore)

	var mds mo.Datastore
	if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
		return err
	}
	log.Printf("[DEBUG] datastore: %#v", mds.Name)
	scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
		Operation: types.VirtualDeviceConfigSpecOperationAdd,
		Device:    scsi,
	})
	configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}

	task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	log.Printf("[DEBUG] add hard disk: %v", vm.hardDisks)
	for _, hd := range vm.hardDisks {
		log.Printf("[DEBUG] add hard disk: %v", hd.size)
		log.Printf("[DEBUG] add hard disk: %v", hd.iops)
		err = addHardDisk(newVM, hd.size, hd.iops, "thin")
		if err != nil {
			return err
		}
	}
	return nil
}
// deployVirtualMachine deploys a new VirtualMachine.
func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
	dc, err := getDatacenter(c, vm.datacenter)
	if err != nil {
		return err
	}
	finder := find.NewFinder(c.Client, true)
	finder = finder.SetDatacenter(dc)

	template, err := finder.VirtualMachine(context.TODO(), vm.template)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] template: %#v", template)

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] folder: %#v", vm.folder)
	folder := dcFolders.VmFolder
	if len(vm.folder) > 0 {
		si := object.NewSearchIndex(c.Client)
		folderRef, err := si.FindByInventoryPath(
			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
		if err != nil {
			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
		} else if folderRef == nil {
			return fmt.Errorf("Cannot find folder %s", vm.folder)
		} else {
			folder = folderRef.(*object.Folder)
		}
	}

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
		if err != nil {
			// TODO: datastore cluster support in govmomi finder function
			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
			if err != nil {
				return err
			}

			if d.Type == "StoragePod" {
				sp := object.StoragePod{
					object.NewFolder(c.Client, d),
				}
				sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
				datastore, err = findDatastore(c, sps)
				if err != nil {
					return err
				}
			} else {
				datastore = object.NewDatastore(c.Client, d)
			}
		}
	}
	log.Printf("[DEBUG] datastore: %#v", datastore)

	relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] relocate spec: %v", relocateSpec)

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	networkConfigs := []types.CustomizationAdapterMapping{}
	for _, network := range vm.networkInterfaces {
		// network device
		nd, err := buildNetworkDevice(finder, network.label, "vmxnet3")
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)

		// TODO: IPv6 support
		var ipSetting types.CustomizationIPSettings
		if network.ipv4Address == "" {
			ipSetting = types.CustomizationIPSettings{
				Ip: &types.CustomizationDhcpIpGenerator{},
			}
		} else {
			if network.ipv4PrefixLength == 0 {
				return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
			}
			m := net.CIDRMask(network.ipv4PrefixLength, 32)
			sm := net.IPv4(m[0], m[1], m[2], m[3])
			subnetMask := sm.String()
			log.Printf("[DEBUG] gateway: %v", vm.gateway)
			log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address)
			log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength)
			log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask)
			ipSetting = types.CustomizationIPSettings{
				Gateway: []string{
					vm.gateway,
				},
				Ip: &types.CustomizationFixedIp{
					IpAddress: network.ipv4Address,
				},
				SubnetMask: subnetMask,
			}
		}

		// network config
		config := types.CustomizationAdapterMapping{
			Adapter: ipSetting,
		}
		networkConfigs = append(networkConfigs, config)
	}
	log.Printf("[DEBUG] network configs: %v", networkConfigs[0].Adapter)

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations)

	// make ExtraConfig
	if len(vm.customConfigurations) > 0 {
		var ov []types.BaseOptionValue
		for k, v := range vm.customConfigurations {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			ov = append(ov, &o)
		}
		configSpec.ExtraConfig = ov
		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
	}

	// create CustomizationSpec
	customSpec := types.CustomizationSpec{
		Identity: &types.CustomizationLinuxPrep{
			HostName: &types.CustomizationFixedName{
				Name: strings.Split(vm.name, ".")[0],
			},
			Domain:     vm.domain,
			TimeZone:   vm.timeZone,
			HwClockUTC: types.NewBool(true),
		},
		GlobalIPSettings: types.CustomizationGlobalIPSettings{
			DnsSuffixList: vm.dnsSuffixes,
			DnsServerList: vm.dnsServers,
		},
		NicSettingMap: networkConfigs,
	}
	log.Printf("[DEBUG] custom spec: %v", customSpec)

	// make vm clone spec
	cloneSpec := types.VirtualMachineCloneSpec{
		Location: relocateSpec,
		Template: false,
		Config:   &configSpec,
		PowerOn:  false,
	}
	log.Printf("[DEBUG] clone spec: %v", cloneSpec)

	task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec)
	if err != nil {
		return err
	}

	_, err = task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	devices, err := newVM.Device(context.TODO())
	if err != nil {
		log.Printf("[DEBUG] Template devices can't be found")
		return err
	}

	for _, dvc := range devices {
		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
		if devices.Type(dvc) == "ethernet" {
			err := newVM.RemoveDevice(context.TODO(), dvc)
			if err != nil {
				return err
			}
		}
	}
	// Add Network devices
	for _, dvc := range networkDevices {
		err := newVM.AddDevice(
			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
		if err != nil {
			return err
		}
	}

	taskb, err := newVM.Customize(context.TODO(), customSpec)
	if err != nil {
		return err
	}

	_, err = taskb.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG]VM customization finished")

	for i := 1; i < len(vm.hardDisks); i++ {
		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed")
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	newVM.PowerOn(context.TODO())

	ip, err := newVM.WaitForIP(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] ip address: %v", ip)

	return nil
}
func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
	// flag if changes have to be applied
	hasChanges := false
	// flag if changes have to be done when powered off
	rebootRequired := false

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{}

	if d.HasChange("vcpu") {
		configSpec.NumCPUs = int32(d.Get("vcpu").(int))
		hasChanges = true
		rebootRequired = true
	}

	if d.HasChange("memory") {
		configSpec.MemoryMB = int64(d.Get("memory").(int))
		hasChanges = true
		rebootRequired = true
	}

	client := meta.(*govmomi.Client)
	dc, err := getDatacenter(client, d.Get("datacenter").(string))
	if err != nil {
		return err
	}
	finder := find.NewFinder(client.Client, true)
	finder = finder.SetDatacenter(dc)

	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
	if err != nil {
		return err
	}

	if d.HasChange("disk") {
		hasChanges = true
		oldDisks, newDisks := d.GetChange("disk")
		oldDiskSet := oldDisks.(*schema.Set)
		newDiskSet := newDisks.(*schema.Set)

		addedDisks := newDiskSet.Difference(oldDiskSet)
		removedDisks := oldDiskSet.Difference(newDiskSet)

		// Removed disks
		for _, diskRaw := range removedDisks.List() {
			if disk, ok := diskRaw.(map[string]interface{}); ok {
				devices, err := vm.Device(context.TODO())
				if err != nil {
					return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err)
				}
				virtualDisk := devices.FindByKey(int32(disk["key"].(int)))

				keep := false
				if v, ok := d.GetOk("keep_on_remove"); ok {
					keep = v.(bool)
				}

				err = vm.RemoveDevice(context.TODO(), keep, virtualDisk)
				if err != nil {
					return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err)
				}
			}
		}
		// Added disks
		for _, diskRaw := range addedDisks.List() {
			if disk, ok := diskRaw.(map[string]interface{}); ok {

				var datastore *object.Datastore
				if disk["datastore"] == "" {
					datastore, err = finder.DefaultDatastore(context.TODO())
					if err != nil {
						return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err)
					}
				} else {
					datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string))
					if err != nil {
						log.Printf("[ERROR] Couldn't find datastore %v.  %s", disk["datastore"].(string), err)
						return err
					}
				}

				var size int64
				if disk["size"] == 0 {
					size = 0
				} else {
					size = int64(disk["size"].(int))
				}
				iops := int64(disk["iops"].(int))
				controller_type := disk["controller"].(string)

				var mo mo.VirtualMachine
				vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo)

				var diskPath string
				switch {
				case disk["vmdk"] != "":
					diskPath = disk["vmdk"].(string)
				case disk["name"] != "":
					snapshotFullDir := mo.Config.Files.SnapshotDirectory
					split := strings.Split(snapshotFullDir, " ")
					if len(split) != 2 {
						return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
					}
					vmWorkingPath := split[1]
					diskPath = vmWorkingPath + disk["name"].(string)
				default:
					return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given")
				}

				log.Printf("[INFO] Attaching disk: %v", diskPath)
				err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type)
				if err != nil {
					log.Printf("[ERROR] Add Hard Disk Failed: %v", err)
					return err
				}
			}
			if err != nil {
				return err
			}
		}
	}

	// do nothing if there are no changes
	if !hasChanges {
		return nil
	}

	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	if rebootRequired {
		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())

		task, err := vm.PowerOff(context.TODO())
		if err != nil {
			return err
		}

		err = task.Wait(context.TODO())
		if err != nil {
			return err
		}
	}

	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())

	task, err := vm.Reconfigure(context.TODO(), configSpec)
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	if rebootRequired {
		task, err = vm.PowerOn(context.TODO())
		if err != nil {
			return err
		}

		err = task.Wait(context.TODO())
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}
	}

	ip, err := vm.WaitForIP(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] ip address: %v", ip)

	return resourceVSphereVirtualMachineRead(d, meta)
}
func resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*vim25.Client)

	dc_name := d.Get("datacenter").(string)
	if dc_name == "" {
		finder := find.NewFinder(client, false)
		dc, err := finder.DefaultDatacenter(context.TODO())
		if err != nil {
			return fmt.Errorf("Error reading default datacenter: %s", err)
		}
		var dc_mo mo.Datacenter
		err = dc.Properties(context.TODO(), dc.Reference(), []string{"name"}, &dc_mo)
		if err != nil {
			return fmt.Errorf("Error reading datacenter name: %s", err)
		}
		dc_name = dc_mo.Name
		d.Set("datacenter", dc_name)
	}

	image_name := d.Get("image").(string)
	image_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%s/vm/%s", dc_name, image_name))
	if err != nil {
		return fmt.Errorf("Error reading vm: %s", err)
	}
	if image_ref == nil {
		return fmt.Errorf("Cannot find image %s", image_name)
	}
	image := image_ref.(*object.VirtualMachine)

	var image_mo mo.VirtualMachine
	err = image.Properties(context.TODO(), image.Reference(), []string{"parent", "config.template", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &image_mo)
	if err != nil {
		return fmt.Errorf("Error reading base VM properties: %s", err)
	}

	var folder_ref object.Reference
	var folder *object.Folder
	if d.Get("folder").(string) != "" {
		folder_ref, err = object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/vm/%v", dc_name, d.Get("folder").(string)))
		if err != nil {
			return fmt.Errorf("Error reading folder: %s", err)
		}
		if folder_ref == nil {
			return fmt.Errorf("Cannot find folder %s", d.Get("folder").(string))
		}

		folder = folder_ref.(*object.Folder)
	} else {
		folder = object.NewFolder(client, *image_mo.Parent)
	}

	host_name := d.Get("host").(string)
	if host_name == "" {
		if image_mo.Config.Template == true {
			return fmt.Errorf("Image is a template, 'host' is a required")
		} else {
			var pool_mo mo.ResourcePool
			err = property.DefaultCollector(client).RetrieveOne(context.TODO(), *image_mo.ResourcePool, []string{"owner"}, &pool_mo)
			if err != nil {
				return fmt.Errorf("Error reading resource pool of base VM: %s", err)
			}

			if strings.Contains(pool_mo.Owner.Value, "domain-s") {
				var host_mo mo.ComputeResource
				err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &host_mo)
				if err != nil {
					return fmt.Errorf("Error reading host of base VM: %s", err)
				}
				host_name = host_mo.Name
			} else if strings.Contains(pool_mo.Owner.Value, "domain-c") {
				var cluster_mo mo.ClusterComputeResource
				err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &cluster_mo)
				if err != nil {
					return fmt.Errorf("Error reading cluster of base VM: %s", err)
				}
				host_name = cluster_mo.Name
			} else {
				return fmt.Errorf("Unknown compute resource format of base VM: %s", pool_mo.Owner.Value)
			}
		}
	}

	pool_name := d.Get("resource_pool").(string)
	pool_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/host/%v/Resources/%v", dc_name, host_name, pool_name))
	if err != nil {
		return fmt.Errorf("Error reading resource pool: %s", err)
	}
	if pool_ref == nil {
		return fmt.Errorf("Cannot find resource pool %s", pool_name)
	}

	var relocateSpec types.VirtualMachineRelocateSpec
	var pool_mor types.ManagedObjectReference
	pool_mor = pool_ref.Reference()
	relocateSpec.Pool = &pool_mor

	if d.Get("linked_clone").(bool) {
		relocateSpec.DiskMoveType = "createNewChildDiskBacking"
	}
	var confSpec types.VirtualMachineConfigSpec
	if d.Get("cpus") != nil {
		confSpec.NumCPUs = d.Get("cpus").(int)
	}
	if d.Get("memory") != nil {
		confSpec.MemoryMB = int64(d.Get("memory").(int))
	}

	params := d.Get("configuration_parameters").(map[string]interface{})
	var ov []types.BaseOptionValue
	if len(params) > 0 {
		for k, v := range params {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			ov = append(ov, &o)
		}
		confSpec.ExtraConfig = ov
	}

	cloneSpec := types.VirtualMachineCloneSpec{
		Location: relocateSpec,
		Config:   &confSpec,
		PowerOn:  d.Get("power_on").(bool),
	}
	if d.Get("linked_clone").(bool) {
		if image_mo.Snapshot == nil {
			return fmt.Errorf("`linked_clone=true`, but image VM has no snapshots")
		}
		cloneSpec.Snapshot = image_mo.Snapshot.CurrentSnapshot
	}

	domain := d.Get("domain").(string)
	ip_address := d.Get("ip_address").(string)
	if domain != "" {
		if image_mo.Guest.ToolsVersionStatus2 == "guestToolsNotInstalled" {
			return fmt.Errorf("VMware tools are not installed in base VM")
		}
		if !strings.Contains(image_mo.Config.GuestFullName, "Linux") && !strings.Contains(image_mo.Config.GuestFullName, "CentOS") {
			return fmt.Errorf("Guest customization is supported only for Linux. Base image OS is: %s", image_mo.Config.GuestFullName)
		}
		customizationSpec := types.CustomizationSpec{
			GlobalIPSettings: types.CustomizationGlobalIPSettings{},
			Identity: &types.CustomizationLinuxPrep{
				HostName: &types.CustomizationVirtualMachineName{},
				Domain:   domain,
			},
			NicSettingMap: []types.CustomizationAdapterMapping{
				{
					Adapter: types.CustomizationIPSettings{},
				},
			},
		}
		if ip_address != "" {
			mask := d.Get("subnet_mask").(string)
			if mask == "" {
				return fmt.Errorf("'subnet_mask' must be set, if static 'ip_address' is specified")
			}
			customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationFixedIp{
				IpAddress: ip_address,
			}
			customizationSpec.NicSettingMap[0].Adapter.SubnetMask = d.Get("subnet_mask").(string)
			gateway := d.Get("gateway").(string)
			if gateway != "" {
				customizationSpec.NicSettingMap[0].Adapter.Gateway = []string{gateway}
			}
		} else {
			customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationDhcpIpGenerator{}
		}
		cloneSpec.Customization = &customizationSpec
	} else if ip_address != "" {
		return fmt.Errorf("'domain' must be set, if static 'ip_address' is specified")
	}

	task, err := image.Clone(context.TODO(), folder, d.Get("name").(string), cloneSpec)
	if err != nil {
		return fmt.Errorf("Error clonning vm: %s", err)
	}
	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return fmt.Errorf("Error clonning vm: %s", err)
	}

	vm_mor := info.Result.(types.ManagedObjectReference)
	d.SetId(vm_mor.Value)
	vm := object.NewVirtualMachine(client, vm_mor)
	// workaround for https://github.com/vmware/govmomi/issues/218
	if ip_address == "" && d.Get("power_on").(bool) {
		ip, err := vm.WaitForIP(context.TODO())
		if err != nil {
			log.Printf("[ERROR] Cannot read ip address: %s", err)
		} else {
			d.Set("ip_address", ip)
			d.SetConnInfo(map[string]string{
				"type": "ssh",
				"host": ip,
			})
		}
	}

	return nil
}
func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error {
	// flag if changes have to be applied
	hasChanges := false
	// flag if changes have to be done when powered off
	rebootRequired := false

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{}

	if d.HasChange("vcpu") {
		configSpec.NumCPUs = d.Get("vcpu").(int)
		hasChanges = true
		rebootRequired = true
	}

	if d.HasChange("memory") {
		configSpec.MemoryMB = int64(d.Get("memory").(int))
		hasChanges = true
		rebootRequired = true
	}

	// do nothing if there are no changes
	if !hasChanges {
		return nil
	}

	client := meta.(*govmomi.Client)
	dc, err := getDatacenter(client, d.Get("datacenter").(string))
	if err != nil {
		return err
	}
	finder := find.NewFinder(client.Client, true)
	finder = finder.SetDatacenter(dc)

	vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	if rebootRequired {
		log.Printf("[INFO] Shutting down virtual machine: %s", d.Id())

		task, err := vm.PowerOff(context.TODO())
		if err != nil {
			return err
		}

		err = task.Wait(context.TODO())
		if err != nil {
			return err
		}
	}

	log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id())

	task, err := vm.Reconfigure(context.TODO(), configSpec)
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	if rebootRequired {
		task, err = vm.PowerOn(context.TODO())
		if err != nil {
			return err
		}

		err = task.Wait(context.TODO())
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}
	}

	ip, err := vm.WaitForIP(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] ip address: %v", ip)

	return resourceVSphereVirtualMachineRead(d, meta)
}
Ejemplo n.º 18
0
func (cmd *clone) Run(ctx context.Context, f *flag.FlagSet) error {
	var err error

	if len(f.Args()) != 1 {
		return flag.ErrHelp
	}

	cmd.name = f.Arg(0)
	if cmd.name == "" {
		return flag.ErrHelp
	}

	cmd.Client, err = cmd.ClientFlag.Client()
	if err != nil {
		return err
	}

	cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()
	if err != nil {
		return err
	}

	if cmd.StoragePodFlag.Isset() {
		cmd.StoragePod, err = cmd.StoragePodFlag.StoragePod()
		if err != nil {
			return err
		}
	} else {
		cmd.Datastore, err = cmd.DatastoreFlag.Datastore()
		if err != nil {
			return err
		}
	}

	cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()
	if err != nil {
		return err
	}

	if cmd.HostSystem != nil {
		if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(context.TODO()); err != nil {
			return err
		}
	} else {
		// -host is optional
		if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {
			return err
		}
	}

	if cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {
		return err
	}

	if cmd.VirtualMachine, err = cmd.VirtualMachineFlag.VirtualMachine(); err != nil {
		return err
	}

	task, err := cmd.cloneVM(context.TODO())
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}

	vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))

	if cmd.cpus > 0 || cmd.memory > 0 {
		vmConfigSpec := types.VirtualMachineConfigSpec{}
		if cmd.cpus > 0 {
			vmConfigSpec.NumCPUs = int32(cmd.cpus)
		}
		if cmd.memory > 0 {
			vmConfigSpec.MemoryMB = int64(cmd.memory)
		}
		task, err := vm.Reconfigure(context.TODO(), vmConfigSpec)
		if err != nil {
			return err
		}
		_, err = task.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}
	}

	if cmd.on {
		task, err := vm.PowerOn(context.TODO())
		if err != nil {
			return err
		}

		_, err = task.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}

		if cmd.waitForIP {
			_, err = vm.WaitForIP(ctx)
			if err != nil {
				return err
			}
		}
	}

	return nil
}
func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error {
	dc, err := getDatacenter(c, vm.datacenter)

	if err != nil {
		return err
	}
	finder := find.NewFinder(c.Client, true)
	finder = finder.SetDatacenter(dc)

	var template *object.VirtualMachine
	var template_mo mo.VirtualMachine
	var vm_mo mo.VirtualMachine
	if vm.template != "" {
		template, err = finder.VirtualMachine(context.TODO(), vm.template)
		if err != nil {
			return err
		}
		log.Printf("[DEBUG] template: %#v", template)

		err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo)
		if err != nil {
			return err
		}
	}

	var resourcePool *object.ResourcePool
	if vm.resourcePool == "" {
		if vm.cluster == "" {
			resourcePool, err = finder.DefaultResourcePool(context.TODO())
			if err != nil {
				return err
			}
		} else {
			resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources")
			if err != nil {
				return err
			}
		}
	} else {
		resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool)
		if err != nil {
			return err
		}
	}
	log.Printf("[DEBUG] resource pool: %#v", resourcePool)

	dcFolders, err := dc.Folders(context.TODO())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] folder: %#v", vm.folder)

	folder := dcFolders.VmFolder
	if len(vm.folder) > 0 {
		si := object.NewSearchIndex(c.Client)
		folderRef, err := si.FindByInventoryPath(
			context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
		if err != nil {
			return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
		} else if folderRef == nil {
			return fmt.Errorf("Cannot find folder %s", vm.folder)
		} else {
			folder = folderRef.(*object.Folder)
		}
	}

	// make config spec
	configSpec := types.VirtualMachineConfigSpec{
		Name:              vm.name,
		NumCPUs:           vm.vcpu,
		NumCoresPerSocket: 1,
		MemoryMB:          vm.memoryMb,
		MemoryAllocation: &types.ResourceAllocationInfo{
			Reservation: vm.memoryAllocation.reservation,
		},
	}
	if vm.template == "" {
		configSpec.GuestId = "otherLinux64Guest"
	}
	log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)

	// make ExtraConfig
	log.Printf("[DEBUG] virtual machine Extra Config spec start")
	if len(vm.customConfigurations) > 0 {
		var ov []types.BaseOptionValue
		for k, v := range vm.customConfigurations {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
			ov = append(ov, &o)
		}
		configSpec.ExtraConfig = ov
		log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
	}

	var datastore *object.Datastore
	if vm.datastore == "" {
		datastore, err = finder.DefaultDatastore(context.TODO())
		if err != nil {
			return err
		}
	} else {
		datastore, err = finder.Datastore(context.TODO(), vm.datastore)
		if err != nil {
			// TODO: datastore cluster support in govmomi finder function
			d, err := getDatastoreObject(c, dcFolders, vm.datastore)
			if err != nil {
				return err
			}

			if d.Type == "StoragePod" {
				sp := object.StoragePod{
					Folder: object.NewFolder(c.Client, d),
				}

				var sps types.StoragePlacementSpec
				if vm.template != "" {
					sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
				} else {
					sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec)
				}

				datastore, err = findDatastore(c, sps)
				if err != nil {
					return err
				}
			} else {
				datastore = object.NewDatastore(c.Client, d)
			}
		}
	}

	log.Printf("[DEBUG] datastore: %#v", datastore)

	// network
	networkDevices := []types.BaseVirtualDeviceConfigSpec{}
	networkConfigs := []types.CustomizationAdapterMapping{}
	for _, network := range vm.networkInterfaces {
		// network device
		var networkDeviceType string
		if vm.template == "" {
			networkDeviceType = "e1000"
		} else {
			networkDeviceType = "vmxnet3"
		}
		nd, err := buildNetworkDevice(finder, network.label, networkDeviceType)
		if err != nil {
			return err
		}
		networkDevices = append(networkDevices, nd)

		if vm.template != "" {
			var ipSetting types.CustomizationIPSettings
			if network.ipv4Address == "" {
				ipSetting.Ip = &types.CustomizationDhcpIpGenerator{}
			} else {
				if network.ipv4PrefixLength == 0 {
					return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
				}
				m := net.CIDRMask(network.ipv4PrefixLength, 32)
				sm := net.IPv4(m[0], m[1], m[2], m[3])
				subnetMask := sm.String()
				log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway)
				log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address)
				log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength)
				log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask)
				ipSetting.Gateway = []string{
					network.ipv4Gateway,
				}
				ipSetting.Ip = &types.CustomizationFixedIp{
					IpAddress: network.ipv4Address,
				}
				ipSetting.SubnetMask = subnetMask
			}

			ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{}
			if network.ipv6Address == "" {
				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
					&types.CustomizationDhcpIpV6Generator{},
				}
			} else {
				log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway)
				log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address)
				log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength)

				ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{
					&types.CustomizationFixedIpV6{
						IpAddress:  network.ipv6Address,
						SubnetMask: int32(network.ipv6PrefixLength),
					},
				}
				ipv6Spec.Gateway = []string{network.ipv6Gateway}
			}
			ipSetting.IpV6Spec = ipv6Spec

			// network config
			config := types.CustomizationAdapterMapping{
				Adapter: ipSetting,
			}
			networkConfigs = append(networkConfigs, config)
		}
	}
	log.Printf("[DEBUG] network devices: %v", networkDevices)
	log.Printf("[DEBUG] network configs: %v", networkConfigs)

	var task *object.Task
	if vm.template == "" {
		var mds mo.Datastore
		if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil {
			return err
		}
		log.Printf("[DEBUG] datastore: %#v", mds.Name)
		scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi")
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}

		configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{
			Operation: types.VirtualDeviceConfigSpecOperationAdd,
			Device:    scsi,
		})

		configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}

		task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}

		err = task.Wait(context.TODO())
		if err != nil {
			log.Printf("[ERROR] %s", err)
		}

	} else {

		relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType)
		if err != nil {
			return err
		}

		log.Printf("[DEBUG] relocate spec: %v", relocateSpec)

		// make vm clone spec
		cloneSpec := types.VirtualMachineCloneSpec{
			Location: relocateSpec,
			Template: false,
			Config:   &configSpec,
			PowerOn:  false,
		}
		if vm.linkedClone {
			if template_mo.Snapshot == nil {
				return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots")
			}
			cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot
		}
		log.Printf("[DEBUG] clone spec: %v", cloneSpec)

		task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec)
		if err != nil {
			return err
		}
	}

	err = task.Wait(context.TODO())
	if err != nil {
		log.Printf("[ERROR] %s", err)
	}

	newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] new vm: %v", newVM)

	devices, err := newVM.Device(context.TODO())
	if err != nil {
		log.Printf("[DEBUG] Template devices can't be found")
		return err
	}

	for _, dvc := range devices {
		// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
		if devices.Type(dvc) == "ethernet" {
			err := newVM.RemoveDevice(context.TODO(), false, dvc)
			if err != nil {
				return err
			}
		}
	}
	// Add Network devices
	for _, dvc := range networkDevices {
		err := newVM.AddDevice(
			context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
		if err != nil {
			return err
		}
	}

	// Create the cdroms if needed.
	if err := createCdroms(newVM, vm.cdroms); err != nil {
		return err
	}

	newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo)
	firstDisk := 0
	if vm.template != "" {
		firstDisk++
	}
	for i := firstDisk; i < len(vm.hardDisks); i++ {
		log.Printf("[DEBUG] disk index: %v", i)

		var diskPath string
		switch {
		case vm.hardDisks[i].vmdkPath != "":
			diskPath = vm.hardDisks[i].vmdkPath
		case vm.hardDisks[i].name != "":
			snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory
			split := strings.Split(snapshotFullDir, " ")
			if len(split) != 2 {
				return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir)
			}
			vmWorkingPath := split[1]
			diskPath = vmWorkingPath + vm.hardDisks[i].name
		default:
			return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i])
		}

		err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller)
		if err != nil {
			return err
		}
	}

	if vm.skipCustomization || vm.template == "" {
		log.Printf("[DEBUG] VM customization skipped")
	} else {
		var identity_options types.BaseCustomizationIdentitySettings
		if strings.HasPrefix(template_mo.Config.GuestId, "win") {
			var timeZone int
			if vm.timeZone == "Etc/UTC" {
				vm.timeZone = "085"
			}
			timeZone, err := strconv.Atoi(vm.timeZone)
			if err != nil {
				return fmt.Errorf("Error converting TimeZone: %s", err)
			}

			guiUnattended := types.CustomizationGuiUnattended{
				AutoLogon:      false,
				AutoLogonCount: 1,
				TimeZone:       int32(timeZone),
			}

			customIdentification := types.CustomizationIdentification{}

			userData := types.CustomizationUserData{
				ComputerName: &types.CustomizationFixedName{
					Name: strings.Split(vm.name, ".")[0],
				},
				ProductId: vm.windowsOptionalConfig.productKey,
				FullName:  "terraform",
				OrgName:   "terraform",
			}

			if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" {
				customIdentification.DomainAdminPassword = &types.CustomizationPassword{
					PlainText: true,
					Value:     vm.windowsOptionalConfig.domainUserPassword,
				}
				customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser
				customIdentification.JoinDomain = vm.windowsOptionalConfig.domain
			}

			if vm.windowsOptionalConfig.adminPassword != "" {
				guiUnattended.Password = &types.CustomizationPassword{
					PlainText: true,
					Value:     vm.windowsOptionalConfig.adminPassword,
				}
			}

			identity_options = &types.CustomizationSysprep{
				GuiUnattended:  guiUnattended,
				Identification: customIdentification,
				UserData:       userData,
			}
		} else {
			identity_options = &types.CustomizationLinuxPrep{
				HostName: &types.CustomizationFixedName{
					Name: strings.Split(vm.name, ".")[0],
				},
				Domain:     vm.domain,
				TimeZone:   vm.timeZone,
				HwClockUTC: types.NewBool(true),
			}
		}

		// create CustomizationSpec
		customSpec := types.CustomizationSpec{
			Identity: identity_options,
			GlobalIPSettings: types.CustomizationGlobalIPSettings{
				DnsSuffixList: vm.dnsSuffixes,
				DnsServerList: vm.dnsServers,
			},
			NicSettingMap: networkConfigs,
		}
		log.Printf("[DEBUG] custom spec: %v", customSpec)

		log.Printf("[DEBUG] VM customization starting")
		taskb, err := newVM.Customize(context.TODO(), customSpec)
		if err != nil {
			return err
		}
		_, err = taskb.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}
		log.Printf("[DEBUG] VM customization finished")
	}

	if vm.hasBootableVmdk || vm.template != "" {
		newVM.PowerOn(context.TODO())
	}
	return nil
}