func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*vim25.Client)
	vm_mor := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id()}
	vm := object.NewVirtualMachine(client, vm_mor)

	var vm_mo mo.VirtualMachine
	err := vm.Properties(context.TODO(), vm.Reference(), []string{"summary"}, &vm_mo)
	if err != nil {
		log.Printf("[INFO] Cannot read VM properties: %s", err)
		d.SetId("")
		return nil
	}
	d.Set("name", vm_mo.Summary.Config.Name)
	d.Set("cpus", vm_mo.Summary.Config.NumCpu)
	d.Set("memory", vm_mo.Summary.Config.MemorySizeMB)

	if vm_mo.Summary.Runtime.PowerState == "poweredOn" {
		d.Set("power_on", true)
	} else {
		d.Set("power_on", false)
	}

	if d.Get("power_on").(bool) {
		ip, err := vm.WaitForIP(context.TODO())
		if err != nil {
			log.Printf("[ERROR] Cannot read ip address: %s", err)
		} else {
			d.Set("ip_address", ip)
		}
	}

	return nil
}
Пример #2
0
func (cmd *ovfx) Run(ctx context.Context, f *flag.FlagSet) error {
	fpath, err := cmd.Prepare(f)
	if err != nil {
		return err
	}

	cmd.Archive = &FileArchive{fpath}

	moref, err := cmd.Import(fpath)
	if err != nil {
		return err
	}

	vm := object.NewVirtualMachine(cmd.Client, *moref)
	return cmd.Deploy(vm)
}
Пример #3
0
func (cmd *vmdk) CreateVM(spec *configSpec) (*object.VirtualMachine, error) {
	folders, err := cmd.Datacenter.Folders(context.TODO())
	if err != nil {
		return nil, err
	}

	task, err := folders.VmFolder.CreateVM(context.TODO(), spec.ToSpec(), cmd.ResourcePool, nil)
	if err != nil {
		return nil, err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return nil, err
	}

	return object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil
}
Пример #4
0
// GetSelf gets VirtualMachine reference for the VM this process is running on
func GetSelf(ctx context.Context, s *session.Session) (*object.VirtualMachine, error) {
	u, err := sys.UUID()
	if err != nil {
		return nil, err
	}

	search := object.NewSearchIndex(s.Vim25())
	ref, err := search.FindByUuid(ctx, s.Datacenter, u, true, nil)
	if err != nil {
		return nil, err
	}

	if ref == nil {
		return nil, fmt.Errorf("can't find the hosting vm")
	}

	vm := object.NewVirtualMachine(s.Client.Client, ref.Reference())
	return vm, nil
}
func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*vim25.Client)
	vm_mor := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id()}
	vm := object.NewVirtualMachine(client, vm_mor)

	task, err := vm.PowerOff(context.TODO())
	if err != nil {
		return fmt.Errorf("Error powering vm off: %s", err)
	}
	task.WaitForResult(context.TODO(), nil)

	task, err = vm.Destroy(context.TODO())
	if err != nil {
		return fmt.Errorf("Error deleting vm: %s", err)
	}
	_, err = task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return fmt.Errorf("Error deleting vm: %s", err)
	}

	return nil
}
Пример #6
0
func (f *Finder) VirtualMachineList(ctx context.Context, path string) ([]*object.VirtualMachine, error) {
	es, err := f.find(ctx, f.vmFolder, false, path)
	if err != nil {
		return nil, err
	}

	var vms []*object.VirtualMachine
	for _, e := range es {
		switch o := e.Object.(type) {
		case mo.VirtualMachine:
			vm := object.NewVirtualMachine(f.client, o.Reference())
			vm.InventoryPath = e.Path
			vms = append(vms, vm)
		}
	}

	if len(vms) == 0 {
		return nil, &NotFoundError{"vm", path}
	}

	return vms, nil
}
Пример #7
0
func (cmd *vmdk) CloneVM(vm *object.VirtualMachine, name string) (*object.VirtualMachine, error) {
	folders, err := cmd.Datacenter.Folders(context.TODO())
	if err != nil {
		return nil, err
	}

	spec := types.VirtualMachineCloneSpec{
		Config:   &types.VirtualMachineConfigSpec{},
		Location: types.VirtualMachineRelocateSpec{},
	}

	task, err := vm.Clone(context.TODO(), folders.VmFolder, name, spec)
	if err != nil {
		return nil, err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return nil, err
	}

	return object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil
}
Пример #8
0
// Create has the following implementation:
// 1. check whether the docker directory contains the boot2docker ISO
// 2. generate an SSH keypair and bundle it in a tar.
// 3. create a virtual machine with the boot2docker ISO mounted;
// 4. reconfigure the virtual machine network and disk size;
func (d *Driver) Create() error {
	b2dutils := mcnutils.NewB2dUtils(d.StorePath)
	if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil {
		return err
	}

	log.Infof("Generating SSH Keypair...")
	if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil {
		return err
	}

	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	c, err := d.vsphereLogin(ctx)
	if err != nil {
		return err
	}
	defer c.Logout(ctx)

	// Create a new finder
	f := find.NewFinder(c.Client, true)

	dc, err := f.DatacenterOrDefault(ctx, d.Datacenter)
	if err != nil {
		return err
	}

	f.SetDatacenter(dc)

	dss, err := f.DatastoreOrDefault(ctx, d.Datastore)
	if err != nil {
		return err
	}

	net, err := f.NetworkOrDefault(ctx, d.Network)
	if err != nil {
		return err
	}

	hs, err := f.HostSystemOrDefault(ctx, d.HostSystem)
	if err != nil {
		return err
	}

	var rp *object.ResourcePool
	if d.Pool != "" {
		// Find specified Resource Pool
		rp, err = f.ResourcePool(ctx, d.Pool)
		if err != nil {
			return err
		}
	} else {
		// Pick default Resource Pool for Host System
		rp, err = hs.ResourcePool(ctx)
		if err != nil {
			return err
		}
	}

	spec := types.VirtualMachineConfigSpec{
		Name:     d.MachineName,
		GuestId:  "otherLinux64Guest",
		Files:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", dss.Name())},
		NumCPUs:  int32(d.CPU),
		MemoryMB: int64(d.Memory),
	}

	scsi, err := object.SCSIControllerTypes().CreateSCSIController("pvscsi")
	if err != nil {
		return err
	}

	spec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{
		Operation: types.VirtualDeviceConfigSpecOperationAdd,
		Device:    scsi,
	})

	log.Infof("Creating VM...")
	folders, err := dc.Folders(ctx)
	task, err := folders.VmFolder.CreateVM(ctx, spec, rp, hs)
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(ctx, nil)
	if err != nil {
		return err
	}

	log.Infof("Uploading Boot2docker ISO ...")
	dsurl, err := dss.URL(ctx, dc, fmt.Sprintf("%s/%s", d.MachineName, isoFilename))
	if err != nil {
		return err
	}
	p := soap.DefaultUpload
	if err = c.Client.UploadFile(d.ISO, dsurl, &p); err != nil {
		return err
	}

	// Retrieve the new VM
	vm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))

	devices, err := vm.Device(ctx)
	if err != nil {
		return err
	}

	var add []types.BaseVirtualDevice

	controller, err := devices.FindDiskController("scsi")
	if err != nil {
		return err
	}

	disk := devices.CreateDisk(controller, dss.Reference(),
		dss.Path(fmt.Sprintf("%s/%s.vmdk", d.MachineName, d.MachineName)))

	// Convert MB to KB
	disk.CapacityInKB = int64(d.DiskSize) * 1024

	add = append(add, disk)
	ide, err := devices.FindIDEController("")
	if err != nil {
		return err
	}

	cdrom, err := devices.CreateCdrom(ide)
	if err != nil {
		return err
	}

	add = append(add, devices.InsertIso(cdrom, dss.Path(fmt.Sprintf("%s/%s", d.MachineName, isoFilename))))

	backing, err := net.EthernetCardBackingInfo(ctx)
	if err != nil {
		return err
	}

	netdev, err := object.EthernetCardTypes().CreateEthernetCard("vmxnet3", backing)
	if err != nil {
		return err
	}

	log.Infof("Reconfiguring VM...")
	add = append(add, netdev)
	if vm.AddDevice(ctx, add...); err != nil {
		return err
	}

	if err := d.Start(); err != nil {
		return err
	}

	log.Infof("Provisioning certs and ssh keys...")
	// Generate a tar keys bundle
	if err := d.generateKeyBundle(); err != nil {
		return err
	}

	opman := guest.NewOperationsManager(c.Client, vm.Reference())

	fileman, err := opman.FileManager(ctx)
	if err != nil {
		return err
	}

	src := d.ResolveStorePath("userdata.tar")
	s, err := os.Stat(src)
	if err != nil {
		return err
	}

	auth := AuthFlag{}
	flag := FileAttrFlag{}
	auth.auth.Username = B2DUser
	auth.auth.Password = B2DPass
	flag.SetPerms(0, 0, 660)
	url, err := fileman.InitiateFileTransferToGuest(ctx, auth.Auth(), "/home/docker/userdata.tar", flag.Attr(), s.Size(), true)
	if err != nil {
		return err
	}
	u, err := c.Client.ParseURL(url)
	if err != nil {
		return err
	}
	if err = c.Client.UploadFile(src, u, nil); err != nil {
		return err
	}

	procman, err := opman.ProcessManager(ctx)
	if err != nil {
		return err
	}

	var env []string
	guestspec := types.GuestProgramSpec{
		ProgramPath:      "/usr/bin/sudo",
		Arguments:        "/bin/mv /home/docker/userdata.tar /var/lib/boot2docker/userdata.tar && /usr/bin/sudo tar xf /var/lib/boot2docker/userdata.tar -C /home/docker/ > /var/log/userdata.log 2>&1 && /usr/bin/sudo chown -R docker:staff /home/docker",
		WorkingDirectory: "",
		EnvVariables:     env,
	}

	_, err = procman.StartProgram(ctx, auth.Auth(), &guestspec)
	if err != nil {
		return err
	}

	return nil
}
Пример #9
0
func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {
	var err error

	if len(f.Args()) != 1 {
		return flag.ErrHelp
	}

	cmd.name = f.Arg(0)
	if cmd.name == "" {
		return flag.ErrHelp
	}

	cmd.Client, err = cmd.ClientFlag.Client()
	if err != nil {
		return err
	}

	cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()
	if err != nil {
		return err
	}

	cmd.Datastore, err = cmd.DatastoreFlag.Datastore()
	if err != nil {
		return err
	}

	cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()
	if err != nil {
		return err
	}

	if cmd.HostSystem != nil {
		if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(context.TODO()); err != nil {
			return err
		}
	} else {
		// -host is optional
		if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {
			return err
		}
	}

	// Verify ISO exists
	if cmd.iso != "" {
		_, err = cmd.isoDatastoreFlag.Stat(context.TODO(), cmd.iso)
		if err != nil {
			return err
		}

		cmd.isoDatastore, err = cmd.isoDatastoreFlag.Datastore()
		if err != nil {
			return err
		}
	}

	// Verify disk exists
	if cmd.disk != "" {
		_, err = cmd.diskDatastoreFlag.Stat(context.TODO(), cmd.disk)
		if err != nil {
			return err
		}

		cmd.diskDatastore, err = cmd.diskDatastoreFlag.Datastore()
		if err != nil {
			return err
		}
	}

	task, err := cmd.createVM(context.TODO())
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}

	vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))

	if cmd.on {
		task, err := vm.PowerOn(context.TODO())
		if err != nil {
			return err
		}

		_, err = task.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #10
0
func TestCreateVm(t *testing.T) {
	ctx := context.Background()

	for _, model := range []*Model{ESX(), VPX()} {
		defer model.Remove()
		err := model.Create()
		if err != nil {
			t.Fatal(err)
		}

		s := model.Service.NewServer()
		defer s.Close()

		c, err := govmomi.NewClient(ctx, s.URL, true)
		if err != nil {
			t.Fatal(err)
		}

		spec := types.VirtualMachineConfigSpec{
			// Note: real ESX allows the VM to be created without a GuestId,
			// but will power on will fail.
			GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
		}

		steps := []func(){
			func() {
				spec.Name = "test"
			},
			func() {
				spec.Files = &types.VirtualMachineFileInfo{
					VmPathName: fmt.Sprintf("[LocalDS_0] %s/%s.vmx", spec.Name, spec.Name),
				}
			},
		}

		finder := find.NewFinder(c.Client, false)

		dc, err := finder.DefaultDatacenter(ctx)
		if err != nil {
			t.Fatal(err)
		}

		finder.SetDatacenter(dc)

		folders, err := dc.Folders(ctx)
		if err != nil {
			t.Fatal(err)
		}

		hosts, err := finder.HostSystemList(ctx, "*/*")
		if err != nil {
			t.Fatal(err)
		}

		nhosts := len(hosts)
		host := hosts[rand.Intn(nhosts)]
		pool, err := host.ResourcePool(ctx)
		if err != nil {
			t.Fatal(err)
		}

		if nhosts == 1 {
			// test the default path against the ESX model
			host = nil
		}

		vmFolder := folders.VmFolder
		// expecting CreateVM to fail until all steps are taken
		for _, step := range steps {
			task, cerr := vmFolder.CreateVM(ctx, spec, pool, host)
			if cerr != nil {
				t.Fatal(err)
			}

			_, cerr = task.WaitForResult(ctx, nil)
			if cerr == nil {
				t.Error("expected error")
			}

			step()
		}

		task, err := vmFolder.CreateVM(ctx, spec, pool, host)
		if err != nil {
			t.Fatal(err)
		}

		info, err := task.WaitForResult(ctx, nil)
		if err != nil {
			t.Fatal(err)
		}

		vm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))

		name, err := vm.ObjectName(ctx)
		if err != nil {
			t.Fatal(err)
		}

		if name != spec.Name {
			t.Errorf("name=%s", name)
		}

		_, err = vm.Device(ctx)
		if err != nil {
			t.Fatal(err)
		}

		recreate := func(context.Context) (*object.Task, error) {
			return vmFolder.CreateVM(ctx, spec, pool, nil)
		}

		ops := []struct {
			method func(context.Context) (*object.Task, error)
			state  types.VirtualMachinePowerState
			fail   bool
		}{
			// Powered off by default
			{nil, types.VirtualMachinePowerStatePoweredOff, false},
			// Create with same .vmx path should fail
			{recreate, "", true},
			// Off -> On  == ok
			{vm.PowerOn, types.VirtualMachinePowerStatePoweredOn, false},
			// On  -> On  == fail
			{vm.PowerOn, types.VirtualMachinePowerStatePoweredOn, true},
			// On  -> Off == ok
			{vm.PowerOff, types.VirtualMachinePowerStatePoweredOff, false},
			// Off -> Off == fail
			{vm.PowerOff, types.VirtualMachinePowerStatePoweredOff, true},
			// Off -> On  == ok
			{vm.PowerOn, types.VirtualMachinePowerStatePoweredOn, false},
			// Destroy == fail (power is On)
			{vm.Destroy, types.VirtualMachinePowerStatePoweredOn, true},
			// On  -> Off == ok
			{vm.PowerOff, types.VirtualMachinePowerStatePoweredOff, false},
			// Destroy == ok (power is Off)
			{vm.Destroy, "", false},
		}

		for i, op := range ops {
			if op.method != nil {
				task, err = op.method(ctx)
				if err != nil {
					t.Fatal(err)
				}

				err = task.Wait(ctx)
				if op.fail {
					if err == nil {
						t.Errorf("%d: expected error", i)
					}
				} else {
					if err != nil {
						t.Errorf("%d: %s", i, err)
					}
				}
			}

			if len(op.state) != 0 {
				state, err := vm.PowerState(ctx)
				if err != nil {
					t.Fatal(err)
				}

				if state != op.state {
					t.Errorf("state=%s", state)
				}
			}
		}
	}
}
func resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*vim25.Client)

	dc_name := d.Get("datacenter").(string)
	if dc_name == "" {
		finder := find.NewFinder(client, false)
		dc, err := finder.DefaultDatacenter(context.TODO())
		if err != nil {
			return fmt.Errorf("Error reading default datacenter: %s", err)
		}
		var dc_mo mo.Datacenter
		err = dc.Properties(context.TODO(), dc.Reference(), []string{"name"}, &dc_mo)
		if err != nil {
			return fmt.Errorf("Error reading datacenter name: %s", err)
		}
		dc_name = dc_mo.Name
		d.Set("datacenter", dc_name)
	}

	image_name := d.Get("image").(string)
	image_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%s/vm/%s", dc_name, image_name))
	if err != nil {
		return fmt.Errorf("Error reading vm: %s", err)
	}
	if image_ref == nil {
		return fmt.Errorf("Cannot find image %s", image_name)
	}
	image := image_ref.(*object.VirtualMachine)

	var image_mo mo.VirtualMachine
	err = image.Properties(context.TODO(), image.Reference(), []string{"parent", "config.template", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &image_mo)
	if err != nil {
		return fmt.Errorf("Error reading base VM properties: %s", err)
	}

	var folder_ref object.Reference
	var folder *object.Folder
	if d.Get("folder").(string) != "" {
		folder_ref, err = object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/vm/%v", dc_name, d.Get("folder").(string)))
		if err != nil {
			return fmt.Errorf("Error reading folder: %s", err)
		}
		if folder_ref == nil {
			return fmt.Errorf("Cannot find folder %s", d.Get("folder").(string))
		}

		folder = folder_ref.(*object.Folder)
	} else {
		folder = object.NewFolder(client, *image_mo.Parent)
	}

	host_name := d.Get("host").(string)
	if host_name == "" {
		if image_mo.Config.Template == true {
			return fmt.Errorf("Image is a template, 'host' is a required")
		} else {
			var pool_mo mo.ResourcePool
			err = property.DefaultCollector(client).RetrieveOne(context.TODO(), *image_mo.ResourcePool, []string{"owner"}, &pool_mo)
			if err != nil {
				return fmt.Errorf("Error reading resource pool of base VM: %s", err)
			}

			if strings.Contains(pool_mo.Owner.Value, "domain-s") {
				var host_mo mo.ComputeResource
				err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &host_mo)
				if err != nil {
					return fmt.Errorf("Error reading host of base VM: %s", err)
				}
				host_name = host_mo.Name
			} else if strings.Contains(pool_mo.Owner.Value, "domain-c") {
				var cluster_mo mo.ClusterComputeResource
				err = property.DefaultCollector(client).RetrieveOne(context.TODO(), pool_mo.Owner, []string{"name"}, &cluster_mo)
				if err != nil {
					return fmt.Errorf("Error reading cluster of base VM: %s", err)
				}
				host_name = cluster_mo.Name
			} else {
				return fmt.Errorf("Unknown compute resource format of base VM: %s", pool_mo.Owner.Value)
			}
		}
	}

	pool_name := d.Get("resource_pool").(string)
	pool_ref, err := object.NewSearchIndex(client).FindByInventoryPath(context.TODO(), fmt.Sprintf("%v/host/%v/Resources/%v", dc_name, host_name, pool_name))
	if err != nil {
		return fmt.Errorf("Error reading resource pool: %s", err)
	}
	if pool_ref == nil {
		return fmt.Errorf("Cannot find resource pool %s", pool_name)
	}

	var relocateSpec types.VirtualMachineRelocateSpec
	var pool_mor types.ManagedObjectReference
	pool_mor = pool_ref.Reference()
	relocateSpec.Pool = &pool_mor

	if d.Get("linked_clone").(bool) {
		relocateSpec.DiskMoveType = "createNewChildDiskBacking"
	}
	var confSpec types.VirtualMachineConfigSpec
	if d.Get("cpus") != nil {
		confSpec.NumCPUs = d.Get("cpus").(int)
	}
	if d.Get("memory") != nil {
		confSpec.MemoryMB = int64(d.Get("memory").(int))
	}

	params := d.Get("configuration_parameters").(map[string]interface{})
	var ov []types.BaseOptionValue
	if len(params) > 0 {
		for k, v := range params {
			key := k
			value := v
			o := types.OptionValue{
				Key:   key,
				Value: &value,
			}
			ov = append(ov, &o)
		}
		confSpec.ExtraConfig = ov
	}

	cloneSpec := types.VirtualMachineCloneSpec{
		Location: relocateSpec,
		Config:   &confSpec,
		PowerOn:  d.Get("power_on").(bool),
	}
	if d.Get("linked_clone").(bool) {
		if image_mo.Snapshot == nil {
			return fmt.Errorf("`linked_clone=true`, but image VM has no snapshots")
		}
		cloneSpec.Snapshot = image_mo.Snapshot.CurrentSnapshot
	}

	domain := d.Get("domain").(string)
	ip_address := d.Get("ip_address").(string)
	if domain != "" {
		if image_mo.Guest.ToolsVersionStatus2 == "guestToolsNotInstalled" {
			return fmt.Errorf("VMware tools are not installed in base VM")
		}
		if !strings.Contains(image_mo.Config.GuestFullName, "Linux") && !strings.Contains(image_mo.Config.GuestFullName, "CentOS") {
			return fmt.Errorf("Guest customization is supported only for Linux. Base image OS is: %s", image_mo.Config.GuestFullName)
		}
		customizationSpec := types.CustomizationSpec{
			GlobalIPSettings: types.CustomizationGlobalIPSettings{},
			Identity: &types.CustomizationLinuxPrep{
				HostName: &types.CustomizationVirtualMachineName{},
				Domain:   domain,
			},
			NicSettingMap: []types.CustomizationAdapterMapping{
				{
					Adapter: types.CustomizationIPSettings{},
				},
			},
		}
		if ip_address != "" {
			mask := d.Get("subnet_mask").(string)
			if mask == "" {
				return fmt.Errorf("'subnet_mask' must be set, if static 'ip_address' is specified")
			}
			customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationFixedIp{
				IpAddress: ip_address,
			}
			customizationSpec.NicSettingMap[0].Adapter.SubnetMask = d.Get("subnet_mask").(string)
			gateway := d.Get("gateway").(string)
			if gateway != "" {
				customizationSpec.NicSettingMap[0].Adapter.Gateway = []string{gateway}
			}
		} else {
			customizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationDhcpIpGenerator{}
		}
		cloneSpec.Customization = &customizationSpec
	} else if ip_address != "" {
		return fmt.Errorf("'domain' must be set, if static 'ip_address' is specified")
	}

	task, err := image.Clone(context.TODO(), folder, d.Get("name").(string), cloneSpec)
	if err != nil {
		return fmt.Errorf("Error clonning vm: %s", err)
	}
	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return fmt.Errorf("Error clonning vm: %s", err)
	}

	vm_mor := info.Result.(types.ManagedObjectReference)
	d.SetId(vm_mor.Value)
	vm := object.NewVirtualMachine(client, vm_mor)
	// workaround for https://github.com/vmware/govmomi/issues/218
	if ip_address == "" && d.Get("power_on").(bool) {
		ip, err := vm.WaitForIP(context.TODO())
		if err != nil {
			log.Printf("[ERROR] Cannot read ip address: %s", err)
		} else {
			d.Set("ip_address", ip)
			d.SetConnInfo(map[string]string{
				"type": "ssh",
				"host": ip,
			})
		}
	}

	return nil
}
Пример #12
0
Файл: vm.go Проект: kjplatz/vic
// NewVirtualMachine returns a NewVirtualMachine object
func NewVirtualMachine(ctx context.Context, session *session.Session, moref types.ManagedObjectReference) *VirtualMachine {
	return NewVirtualMachineFromVM(ctx, session, object.NewVirtualMachine(session.Client.Client, moref))
}
Пример #13
0
func (i *vSphereInstanceManager) Start(ctx context.Context, baseName string) (*Instance, error) {
	client, err := i.client(ctx)
	if err != nil {
		return nil, err
	}

	vm, snapshotTree, err := i.findBaseVMAndSnapshot(ctx, baseName)
	if err != nil {
		return nil, fmt.Errorf("couldn't get base VM and snapshot: %s", err)
	}

	resourcePool, err := i.resourcePool(ctx)
	if err != nil {
		return nil, fmt.Errorf("couldn't get resource pool: %s", err)
	}

	relocateSpec := types.VirtualMachineRelocateSpec{
		DiskMoveType: string(types.VirtualMachineRelocateDiskMoveOptionsCreateNewChildDiskBacking),
		Pool:         resourcePool,
	}

	cloneSpec := types.VirtualMachineCloneSpec{
		Location: relocateSpec,
		PowerOn:  false,
		Template: false,
		Snapshot: &snapshotTree.Snapshot,
	}

	name := uuid.NewRandom()

	vmFolder, err := i.vmFolder(ctx)
	if err != nil {
		return nil, err
	}

	task, err := vm.Clone(ctx, vmFolder, name.String(), cloneSpec)
	if err != nil {
		return nil, err
	}

	err = task.Wait(ctx)
	if err != nil {
		return nil, err
	}

	var mt mo.Task
	err = task.Properties(ctx, task.Reference(), []string{"info"}, &mt)
	if err != nil {
		return nil, err
	}

	if mt.Info.Result == nil {
		return nil, fmt.Errorf("expected VM, but got nil")
	}

	vmManagedRef, ok := mt.Info.Result.(types.ManagedObjectReference)
	if !ok {
		return nil, fmt.Errorf("expected ManagedObjectReference, but got %T", mt.Info.Result)
	}

	newVM := object.NewVirtualMachine(client.Client, vmManagedRef)

	task, err = newVM.PowerOn(ctx)
	if err != nil {
		return nil, err
	}

	err = task.Wait(ctx)
	if err != nil {
		return nil, err
	}

	return i.instanceForVirtualMachine(ctx, newVM)
}
Пример #14
0
func TestIssue242(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	h := NewHelper(t)
	defer h.Teardown()

	h.RequireVirtualCenter()

	df, err := h.Datacenter().Folders(ctx)
	if err != nil {
		t.Fatal(err)
	}

	cr := h.ComputeResource()

	// Get local datastores for compute resource
	dss, err := h.LocalDatastores(ctx, cr)
	if err != nil {
		t.Fatal(err)
	}
	if len(dss) == 0 {
		t.Fatalf("No local datastores")
	}

	// Get root resource pool for compute resource
	rp, err := cr.ResourcePool(ctx)
	if err != nil {
		t.Fatal(err)
	}

	spec := types.VirtualMachineConfigSpec{
		Name:     fmt.Sprintf("govmomi-test-%s", time.Now().Format(time.RFC3339)),
		Files:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", dss[0].Name())},
		NumCPUs:  1,
		MemoryMB: 32,
	}

	// Create new VM
	task, err := df.VmFolder.CreateVM(context.Background(), spec, rp, nil)
	if err != nil {
		t.Fatal(err)
	}

	info, err := task.WaitForResult(context.Background(), nil)
	if err != nil {
		t.Fatal(err)
	}

	vm := object.NewVirtualMachine(h.c, info.Result.(types.ManagedObjectReference))
	defer func() {
		task, err := vm.Destroy(context.Background())
		if err != nil {
			panic(err)
		}
		err = task.Wait(context.Background())
		if err != nil {
			panic(err)
		}
	}()

	// Mark VM as template
	err = vm.MarkAsTemplate(context.Background())
	if err != nil {
		t.Fatal(err)
	}

	// Get "environmentBrowser" property for VM template
	var mvm mo.VirtualMachine
	err = property.DefaultCollector(h.c).RetrieveOne(ctx, vm.Reference(), []string{"environmentBrowser"}, &mvm)
	if err != nil {
		t.Fatal(err)
	}
}
Пример #15
0
func (cmd *clone) Run(ctx context.Context, f *flag.FlagSet) error {
	var err error

	if len(f.Args()) != 1 {
		return flag.ErrHelp
	}

	cmd.name = f.Arg(0)
	if cmd.name == "" {
		return flag.ErrHelp
	}

	cmd.Client, err = cmd.ClientFlag.Client()
	if err != nil {
		return err
	}

	cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()
	if err != nil {
		return err
	}

	if cmd.StoragePodFlag.Isset() {
		cmd.StoragePod, err = cmd.StoragePodFlag.StoragePod()
		if err != nil {
			return err
		}
	} else {
		cmd.Datastore, err = cmd.DatastoreFlag.Datastore()
		if err != nil {
			return err
		}
	}

	cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()
	if err != nil {
		return err
	}

	if cmd.HostSystem != nil {
		if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(context.TODO()); err != nil {
			return err
		}
	} else {
		// -host is optional
		if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {
			return err
		}
	}

	if cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {
		return err
	}

	if cmd.VirtualMachine, err = cmd.VirtualMachineFlag.VirtualMachine(); err != nil {
		return err
	}

	task, err := cmd.cloneVM(context.TODO())
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}

	vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))

	if cmd.cpus > 0 || cmd.memory > 0 {
		vmConfigSpec := types.VirtualMachineConfigSpec{}
		if cmd.cpus > 0 {
			vmConfigSpec.NumCPUs = int32(cmd.cpus)
		}
		if cmd.memory > 0 {
			vmConfigSpec.MemoryMB = int64(cmd.memory)
		}
		task, err := vm.Reconfigure(context.TODO(), vmConfigSpec)
		if err != nil {
			return err
		}
		_, err = task.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}
	}

	if cmd.on {
		task, err := vm.PowerOn(context.TODO())
		if err != nil {
			return err
		}

		_, err = task.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}

		if cmd.waitForIP {
			_, err = vm.WaitForIP(ctx)
			if err != nil {
				return err
			}
		}
	}

	return nil
}
Пример #16
0
//
// Create creates the VM by
// 1. Generating SSH keys
// 2. Logs in to vSphere and gathers dc, datastore, network, resource pool and
//    other information
// 3. Clones the VM template
// 4. Powers on the VM
// 5. Uploads the SSH key bundle
//
// Parameters:
//      None
// Returns:
//      (error): various errors from vSphere
//
func (d *Driver) Create() error {
	var relocateSpec types.VirtualMachineRelocateSpec

	log.Infof("Generating SSH Keypair...")
	if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil {
		return err
	}

	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// Query the inventory
	c, err := d.vsphereLogin(ctx)
	if err != nil {
		return err
	}
	defer c.Logout(ctx)
	// Create a new finder
	f := find.NewFinder(c.Client, true)

	dc, err := f.DatacenterOrDefault(ctx, d.Datacenter)
	if err != nil {
		return err
	}

	folders, err := dc.Folders(ctx)
	if err != nil {
		return err
	}
	folder := folders.VmFolder

	f.SetDatacenter(dc)

	dss, err := f.DatastoreOrDefault(ctx, d.Datastore)
	if err != nil {
		return err
	}

	rp, err := f.ResourcePoolOrDefault(ctx, d.Pool)
	if err != nil {
		// Pick default Resource Pool for Host System
		hs, err := f.HostSystemOrDefault(ctx, d.HostSystem)
		if err != nil {
			log.Warnf("Unable to find host system ", err)
		}

		if hs != nil {
			rp, err = hs.ResourcePool(ctx)
			if err != nil {
				return err
			}
			if hs != nil {
				hostref := hs.Reference()
				relocateSpec.Host = &hostref
			}
		} else {
			return err
		}
	}

	dcName, err := d.getDatacenterName(dc)
	if err != nil {
		return err
	}

	image, err := d.getVMTemplate(d.VMTemplate, dcName, c.Client)
	if err != nil {
		return err
	}

	var imageMoRef mo.VirtualMachine
	err = image.Properties(ctx, image.Reference(), []string{"parent", "config.template", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &imageMoRef)
	if err != nil {
		return fmt.Errorf("Error reading base VM properties: %s", err)
	}

	// Create a CloneSpec to clone the VM
	datastoreref := dss.Reference()
	folderref := folder.Reference()
	poolref := rp.Reference()

	relocateSpec.Datastore = &datastoreref
	relocateSpec.Folder = &folderref
	relocateSpec.Pool = &poolref

	spec := types.VirtualMachineConfigSpec{
		Name:     d.MachineName,
		GuestId:  "otherLinux64Guest",
		Files:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", dss.Name())},
		NumCPUs:  d.CPU,
		MemoryMB: int64(d.Memory),
	}

	cloneSpec := types.VirtualMachineCloneSpec{
		Config: &spec,
	}

	if imageMoRef.Snapshot != nil {
		relocateSpec.DiskMoveType = "createNewChildDiskBacking"
		cloneSpec.Snapshot = imageMoRef.Snapshot.CurrentSnapshot
	} else {
		return fmt.Errorf("No snapshots for template, cannot use for cloning")
	}

	if d.Network != "" {
		// search for the first network card of the source
		devices, err := image.Device(ctx)
		if err != nil {
			return fmt.Errorf("Error reading base VM devices: %s", err)
		}
		var card *types.VirtualEthernetCard
		for _, device := range devices {
			if c, ok := device.(types.BaseVirtualEthernetCard); ok {
				card = c.GetVirtualEthernetCard()
				break
			}
		}
		if card == nil {
			return fmt.Errorf("No network device found for the template.")
		}

		// get the new backing information
		net, err := f.NetworkOrDefault(ctx, d.Network)
		if err != nil {
			return fmt.Errorf("Network not found: %s", err)
		}
		backing, err := net.EthernetCardBackingInfo(ctx)
		if err != nil {
			return fmt.Errorf("Network backing not found: %s", err)
		}
		netdev, err := object.EthernetCardTypes().CreateEthernetCard("vmxnet3", backing)
		if err != nil {
			return fmt.Errorf("Failed to create ethernet card: %s", err)
		}

		//set backing info
		card.Backing = netdev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard().Backing

		// prepare virtual device config spec for network card
		configSpecs := []types.BaseVirtualDeviceConfigSpec{
			&types.VirtualDeviceConfigSpec{
				Operation: types.VirtualDeviceConfigSpecOperationEdit,
				Device:    card,
			},
		}
		relocateSpec.DeviceChange = configSpecs
	}

	cloneSpec.Location = relocateSpec

	task, err := image.Clone(ctx, folder, d.MachineName, cloneSpec)
	if err != nil {
		return fmt.Errorf("Error cloning vm: %s", err)
	}
	info, err := task.WaitForResult(ctx, nil)
	if err != nil {
		return fmt.Errorf("Error cloning vm: %s", err)
	}

	vmMoRef := info.Result.(types.ManagedObjectReference)
	vm := object.NewVirtualMachine(c.Client, vmMoRef)

	// Power On the VM
	if err := d.Start(); err != nil {
		return err
	}

	// Upload the bundle
	return d.uploadBundle(vm.Reference(), ctx, c.Client)
}
Пример #17
0
func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {
	var err error

	if len(f.Args()) != 1 {
		return flag.ErrHelp
	}

	cmd.Client, err = cmd.ClientFlag.Client()
	if err != nil {
		return err
	}

	cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()
	if err != nil {
		return err
	}

	cmd.Datastore, err = cmd.DatastoreFlag.Datastore()
	if err != nil {
		return err
	}

	cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()
	if err != nil {
		return err
	}

	if cmd.HostSystem != nil {
		if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(context.TODO()); err != nil {
			return err
		}
	} else {
		// -host is optional
		if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {
			return err
		}
	}

	for _, file := range []*string{&cmd.iso, &cmd.disk} {
		if *file != "" {
			_, err = cmd.Datastore.Stat(context.TODO(), *file)
			if err != nil {
				return err
			}
		}
	}

	task, err := cmd.createVM(f.Arg(0))
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(context.TODO(), nil)
	if err != nil {
		return err
	}

	vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))

	if err := cmd.addDevices(vm); err != nil {
		return err
	}

	if cmd.on {
		task, err := vm.PowerOn(context.TODO())
		if err != nil {
			return err
		}

		_, err = task.WaitForResult(context.TODO(), nil)
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #18
0
func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {
	var err error

	if len(f.Args()) != 1 {
		return flag.ErrHelp
	}

	cmd.name = f.Arg(0)
	if cmd.name == "" {
		return flag.ErrHelp
	}

	cmd.Client, err = cmd.ClientFlag.Client()
	if err != nil {
		return err
	}

	cmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()
	if err != nil {
		return err
	}

	if cmd.StoragePodFlag.Isset() {
		cmd.StoragePod, err = cmd.StoragePodFlag.StoragePod()
		if err != nil {
			return err
		}
	} else {
		cmd.Datastore, err = cmd.DatastoreFlag.Datastore()
		if err != nil {
			return err
		}
	}

	cmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()
	if err != nil {
		return err
	}

	if cmd.HostSystem != nil {
		if cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(ctx); err != nil {
			return err
		}
	} else {
		// -host is optional
		if cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {
			return err
		}
	}

	if cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {
		return err
	}

	// Verify ISO exists
	if cmd.iso != "" {
		_, err = cmd.isoDatastoreFlag.Stat(ctx, cmd.iso)
		if err != nil {
			return err
		}

		cmd.isoDatastore, err = cmd.isoDatastoreFlag.Datastore()
		if err != nil {
			return err
		}
	}

	// Verify disk exists
	if cmd.disk != "" {
		var b units.ByteSize

		// If disk can be parsed as byte units, don't stat
		err = b.Set(cmd.disk)
		if err == nil {
			cmd.diskByteSize = int64(b)
		} else {
			_, err = cmd.diskDatastoreFlag.Stat(ctx, cmd.disk)
			if err != nil {
				return err
			}

			cmd.diskDatastore, err = cmd.diskDatastoreFlag.Datastore()
			if err != nil {
				return err
			}
		}
	}

	task, err := cmd.createVM(ctx)
	if err != nil {
		return err
	}

	info, err := task.WaitForResult(ctx, nil)
	if err != nil {
		return err
	}

	vm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))

	if cmd.on {
		task, err := vm.PowerOn(ctx)
		if err != nil {
			return err
		}

		_, err = task.WaitForResult(ctx, nil)
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #19
0
// addTestVM will add a pseudo VM to the container
func addTestVM(container *Container) {
	mo := types.ManagedObjectReference{Type: "vm", Value: "12"}
	v := object.NewVirtualMachine(nil, mo)
	container.vm = vm.NewVirtualMachineFromVM(nil, nil, v)
}