// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) { var key int devices, err := vm.Device(context.TODO()) if err != nil { return types.VirtualMachineRelocateSpec{}, err } for _, d := range devices { if devices.Type(d) == "disk" { key = d.GetVirtualDevice().Key } } rpr := rp.Reference() dsr := ds.Reference() return types.VirtualMachineRelocateSpec{ Datastore: &dsr, Pool: &rpr, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: dsr, DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(false), EagerlyScrub: types.NewBool(true), }, DiskId: key, }, }, }, nil }
func createPool(ctx context.Context, sess *session.Session, poolPath string, name string, t *testing.T) error { rp, err := sess.Finder.ResourcePool(ctx, poolPath) if err != nil { t.Logf("Failed to get parent pool: %s", err) return err } t.Logf("Creating Resource Pool %s", name) resSpec := types.ResourceConfigSpec{ CpuAllocation: &types.ResourceAllocationInfo{ Shares: &types.SharesInfo{ Level: types.SharesLevelNormal, }, ExpandableReservation: types.NewBool(true), Limit: -1, Reservation: 1, }, MemoryAllocation: &types.ResourceAllocationInfo{ Shares: &types.SharesInfo{ Level: types.SharesLevelNormal, }, ExpandableReservation: types.NewBool(true), Limit: -1, Reservation: 1, }, } _, err = rp.Create(ctx, name, resSpec) if err != nil { t.Logf("Failed to create resource pool %s: %s", name, err) return err } return nil }
func (l Lister) ListDatacenter(ctx context.Context) ([]Element, error) { ospec := types.ObjectSpec{ Obj: l.Reference, Skip: types.NewBool(true), } // Include every datastore folder in the select set fields := []string{ "vmFolder", "hostFolder", "datastoreFolder", "networkFolder", } for _, f := range fields { tspec := types.TraversalSpec{ Path: f, Skip: types.NewBool(false), Type: "Datacenter", } ospec.SelectSet = append(ospec.SelectSet, &tspec) } pspec := types.PropertySpec{ Type: "Folder", } if l.All { pspec.All = types.NewBool(true) } else { pspec.PathSet = []string{"name"} } req := types.RetrieveProperties{ SpecSet: []types.PropertyFilterSpec{ { ObjectSet: []types.ObjectSpec{ospec}, PropSet: []types.PropertySpec{pspec}, }, }, } var dst []interface{} err := l.retrieveProperties(ctx, req, &dst) if err != nil { return nil, err } es := []Element{} for _, v := range dst { es = append(es, ToElement(v.(mo.Reference), l.Prefix)) } return es, nil }
// addHardDisk adds a new Hard Disk to the VirtualMachine. func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error { devices, err := vm.Device(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] vm devices: %#v\n", devices) controller, err := devices.FindDiskController(controller_type) if err != nil { return err } log.Printf("[DEBUG] disk controller: %#v\n", controller) // TODO Check if diskPath & datastore exist // If diskPath is not specified, pass empty string to CreateDisk() if diskPath == "" { return fmt.Errorf("[ERROR] addHardDisk - No path proided") } else { // TODO Check if diskPath & datastore exist diskPath = fmt.Sprintf("[%v] %v", datastore.Name(), diskPath) } log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) existing := devices.SelectByBackingInfo(disk.Backing) log.Printf("[DEBUG] disk: %#v\n", disk) if len(existing) == 0 { disk.CapacityInKB = int64(size * 1024 * 1024) if iops != 0 { disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ Limit: iops, } } backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) if diskType == "eager_zeroed" { // eager zeroed thick virtual disk backing.ThinProvisioned = types.NewBool(false) backing.EagerlyScrub = types.NewBool(true) } else if diskType == "thin" { // thin provisioned virtual disk backing.ThinProvisioned = types.NewBool(true) } log.Printf("[DEBUG] addHardDisk: %#v\n", disk) log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB) return vm.AddDevice(context.TODO(), disk) } else { log.Printf("[DEBUG] addHardDisk: Disk already present.\n") return nil } }
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. func buildVMRelocateSpec(finder *find.Finder, rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linked bool) (types.VirtualMachineRelocateSpec, error) { var key int var parent *types.VirtualDiskFlatVer2BackingInfo devices, err := vm.Device(context.TODO()) if err != nil { return types.VirtualMachineRelocateSpec{}, err } for _, d := range devices { if devices.Type(d) == "disk" { vd := d.GetVirtualDevice() parent = vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo) key = vd.Key } } rpr := rp.Reference() relocateSpec := types.VirtualMachineRelocateSpec{} // Treat linked clones a bit differently. if linked { parentDs := strings.SplitN(parent.FileName[1:], "]", 2) parentDsObj, err := finder.Datastore(context.TODO(), parentDs[0]) if err != nil { return types.VirtualMachineRelocateSpec{}, err } parentDbObjRef := parentDsObj.Reference() relocateSpec = types.VirtualMachineRelocateSpec{ Datastore: &parentDbObjRef, Pool: &rpr, DiskMoveType: "createNewChildDiskBacking", } } else { dsr := ds.Reference() relocateSpec = types.VirtualMachineRelocateSpec{ Datastore: &dsr, Pool: &rpr, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: dsr, DiskId: key, DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(false), EagerlyScrub: types.NewBool(true), }, }, }, } } return relocateSpec, nil }
func (cmd *boot) Register(f *flag.FlagSet) { f.Int64Var(&cmd.BootDelay, "delay", 0, "Delay in ms before starting the boot sequence") f.StringVar(&cmd.order, "order", "", "Boot device order") f.Int64Var(&cmd.BootRetryDelay, "retry-delay", 0, "Delay in ms before a boot retry") cmd.BootRetryEnabled = types.NewBool(false) f.BoolVar(cmd.BootRetryEnabled, "retry", false, "If true, retry boot after retry-delay") cmd.EnterBIOSSetup = types.NewBool(false) f.BoolVar(cmd.EnterBIOSSetup, "setup", false, "If true, enter BIOS setup on next boot") }
func (cmd *configure) Register(f *flag.FlagSet) { cmd.defaults.Enabled = types.NewBool(false) f.BoolVar(cmd.defaults.Enabled, "enabled", false, "") f.IntVar(&cmd.defaults.StartDelay, "start-delay", 0, "") f.StringVar(&cmd.defaults.StopAction, "stop-action", "", "") f.IntVar(&cmd.defaults.StopDelay, "stop-delay", 0, "") cmd.defaults.WaitForHeartbeat = types.NewBool(false) f.BoolVar(cmd.defaults.WaitForHeartbeat, "wait-for-heartbeat", false, "") }
func loadUsedPorts(c *vim25.Client, host types.ManagedObjectReference) ([]int, error) { ctx := context.TODO() ospec := types.ObjectSpec{ Obj: host, SelectSet: []types.BaseSelectionSpec{ &types.TraversalSpec{ Type: "HostSystem", Path: "vm", Skip: types.NewBool(false), }, }, Skip: types.NewBool(false), } pspec := types.PropertySpec{ Type: "VirtualMachine", PathSet: []string{"config.extraConfig"}, } req := types.RetrieveProperties{ This: c.ServiceContent.PropertyCollector, SpecSet: []types.PropertyFilterSpec{ { ObjectSet: []types.ObjectSpec{ospec}, PropSet: []types.PropertySpec{pspec}, }, }, } var vms []mo.VirtualMachine err := mo.RetrievePropertiesForRequest(ctx, c, req, &vms) if err != nil { return nil, err } var ports []int for _, vm := range vms { if vm.Config == nil || vm.Config.ExtraConfig == nil { continue } options := vncOptionsFromExtraConfig(vm.Config.ExtraConfig) if ps, ok := options["port"]; ok && ps != "" { pi, err := strconv.Atoi(ps) if err == nil { ports = append(ports, pi) } } } return ports, nil }
// CreateDisk creates a new VirtualDisk device which can be added to a VM. func (l VirtualDeviceList) CreateDisk(c types.BaseVirtualController, name string) *types.VirtualDisk { // If name is not specified, one will be chosen for you. // But if when given, make sure it ends in .vmdk, otherwise it will be treated as a directory. if len(name) > 0 && filepath.Ext(name) != ".vmdk" { name += ".vmdk" } device := &types.VirtualDisk{ VirtualDevice: types.VirtualDevice{ Backing: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: string(types.VirtualDiskModePersistent), ThinProvisioned: types.NewBool(true), VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ FileName: name, }, }, }, } l.AssignController(device, c) if device.UnitNumber == 0 { device.UnitNumber = -1 // TODO: this field is annotated as omitempty } return device }
func (flag *SearchFlag) searchByUUID(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) { isVM := false switch flag.t { case SearchVirtualMachines: isVM = true case SearchHosts: default: panic("unsupported type") } var ref object.Reference var err error for _, iu := range []*bool{nil, types.NewBool(true)} { ref, err = flag.searchIndex(c).FindByUuid(context.TODO(), dc, flag.byUUID, isVM, iu) if err != nil { if soap.IsSoapFault(err) { fault := soap.ToSoapFault(err).VimFault() if _, ok := fault.(types.InvalidArgument); ok { continue } } return nil, err } if ref != nil { break } } return ref, nil }
// RevertToSnapshot reverts to a named snapshot func (v VirtualMachine) RevertToSnapshot(ctx context.Context, name string, suppressPowerOn bool) (*Task, error) { var o mo.VirtualMachine err := v.Properties(ctx, v.Reference(), []string{"snapshot"}, &o) snapshotTree := o.Snapshot.RootSnapshotList if len(snapshotTree) < 1 { return nil, errors.New("No snapshots for this VM") } snapshot, err := traverseSnapshotInTree(snapshotTree, name) if err != nil { return nil, err } req := types.RevertToSnapshot_Task{ This: snapshot, SuppressPowerOn: types.NewBool(suppressPowerOn), } res, err := methods.RevertToSnapshot_Task(ctx, v.c, &req) if err != nil { return nil, err } return NewTask(v.c, res.Returnval), nil }
func (s *ResourceConfigSpecFlag) Register(f *flag.FlagSet) { opts := []struct { name string units string *types.ResourceAllocationInfo }{ {"CPU", "MHz", &s.CpuAllocation}, {"Memory", "MB", &s.MemoryAllocation}, } for _, opt := range opts { prefix := strings.ToLower(opt.name)[:3] shares := (*sharesInfo)(opt.Shares) expandableReservation := false if v := opt.ExpandableReservation; v != nil { expandableReservation = *v } // Initialize bool pointer opt.ExpandableReservation = types.NewBool(false) f.Int64Var(&opt.Limit, prefix+".limit", 0, opt.name+" limit in "+opt.units) f.Int64Var(&opt.Reservation, prefix+".reservation", 0, opt.name+" reservation in "+opt.units) f.BoolVar(opt.ExpandableReservation, prefix+".expandable", expandableReservation, opt.name+" expandable reservation") f.Var(shares, prefix+".shares", opt.name+" shares level or number") } }
// MoveVirtualDisk moves a virtual disk. func (m VirtualDiskManager) MoveVirtualDisk( ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destName string, destDatacenter *Datacenter, force bool) (*Task, error) { req := types.MoveVirtualDisk_Task{ This: m.Reference(), SourceName: sourceName, DestName: destName, Force: types.NewBool(force), } if sourceDatacenter != nil { ref := sourceDatacenter.Reference() req.SourceDatacenter = &ref } if destDatacenter != nil { ref := destDatacenter.Reference() req.DestDatacenter = &ref } res, err := methods.MoveVirtualDisk_Task(ctx, m.c, &req) if err != nil { return nil, err } return NewTask(m.c, res.Returnval), nil }
func (f FileManager) CopyDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) { req := types.CopyDatastoreFile_Task{ This: f.Reference(), SourceName: sourceName, DestinationName: destinationName, Force: types.NewBool(force), } if sourceDatacenter != nil { ref := sourceDatacenter.Reference() req.SourceDatacenter = &ref } if destinationDatacenter != nil { ref := destinationDatacenter.Reference() req.DestinationDatacenter = &ref } res, err := methods.CopyDatastoreFile_Task(ctx, f.c, &req) if err != nil { return nil, err } return NewTask(f.c, res.Returnval), nil }
func (cmd *ls) Run(f *flag.FlagSet) error { ds, err := cmd.Datastore() if err != nil { return err } b, err := ds.Browser(context.TODO()) if err != nil { return err } args := f.Args() if len(args) == 0 { args = []string{""} } result := &listOutput{ rs: make([]types.HostDatastoreBrowserSearchResults, 0), long: cmd.long, } for _, arg := range args { spec := types.HostDatastoreBrowserSearchSpec{ MatchPattern: []string{"*"}, } if cmd.long { spec.Details = &types.FileQueryFlags{ FileType: true, FileSize: true, FileOwner: types.NewBool(true), // TODO: omitempty is generated, but seems to be required Modification: true, } } for i := 0; ; i++ { r, err := cmd.ListPath(b, arg, spec) if err != nil { // Treat the argument as a match pattern if not found as directory if i == 0 && types.IsFileNotFound(err) { spec.MatchPattern[0] = path.Base(arg) arg = path.Dir(arg) continue } return err } // Treat an empty result against match pattern as file not found if i == 1 && len(r.File) == 0 { return fmt.Errorf("File %s/%s was not found", r.FolderPath, spec.MatchPattern[0]) } result.add(r) break } } return cmd.WriteResult(result) }
// AddVirtualDisk adds a virtual disk to a virtual machine. func (s *VirtualMachineConfigSpec) AddVirtualDisk(device *types.VirtualDisk) *VirtualMachineConfigSpec { defer trace.End(trace.Begin(s.ID())) device.GetVirtualDevice().Key = s.generateNextKey() device.CapacityInKB = defaultCapacityInKB moref := s.Datastore.Reference() device.GetVirtualDevice().Backing = &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: string(types.VirtualDiskModePersistent), ThinProvisioned: types.NewBool(true), VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ FileName: s.Datastore.Path(fmt.Sprintf("%s/%[1]s.vmdk", s.ID())), Datastore: &moref, }, } // Add the parent if we set ParentImageID backing := device.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo) if s.ParentImageID() != "" { backing.Parent = &types.VirtualDiskFlatVer2BackingInfo{ VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ // XXX This needs to come from a storage helper in the future // and should not be computed here like this. FileName: s.Datastore.Path(fmt.Sprintf("VIC/%s/images/%s/%[2]s.vmdk", s.ImageStoreName(), s.ParentImageID())), }, } } return s.AddAndCreateVirtualDevice(device) }
func (l VirtualDeviceList) setDefaultFloppyBacking(device *types.VirtualFloppy) { device.Backing = &types.VirtualFloppyDeviceBackingInfo{ VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ DeviceName: fmt.Sprintf("%s-%d", DeviceTypeFloppy, device.UnitNumber), UseAutoDetect: types.NewBool(false), }, } }
func (l VirtualDeviceList) setDefaultCdromBacking(device *types.VirtualCdrom) { device.Backing = &types.VirtualCdromAtapiBackingInfo{ VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ DeviceName: fmt.Sprintf("%s-%d-%d", DeviceTypeCdrom, device.ControllerKey, device.UnitNumber), UseAutoDetect: types.NewBool(false), }, } }
// Retrieve loads properties for a slice of managed objects. The dst argument // must be a pointer to a []interface{}, which is populated with the instances // of the specified managed objects, with the relevant properties filled in. If // the properties slice is nil, all properties are loaded. func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}) error { var propSpec *types.PropertySpec var objectSet []types.ObjectSpec for _, obj := range objs { // Ensure that all object reference types are the same if propSpec == nil { propSpec = &types.PropertySpec{ Type: obj.Type, } if ps == nil { propSpec.All = types.NewBool(true) } else { propSpec.PathSet = ps } } else { if obj.Type != propSpec.Type { return errors.New("object references must have the same type") } } objectSpec := types.ObjectSpec{ Obj: obj, Skip: types.NewBool(false), } objectSet = append(objectSet, objectSpec) } req := types.RetrieveProperties{ SpecSet: []types.PropertyFilterSpec{ { ObjectSet: objectSet, PropSet: []types.PropertySpec{*propSpec}, }, }, } res, err := p.RetrieveProperties(ctx, req) if err != nil { return err } return mo.LoadRetrievePropertiesResponse(res, dst) }
// addHardDisk adds a new Hard Disk to the VirtualMachine. func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error { devices, err := vm.Device(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] vm devices: %#v\n", devices) controller, err := devices.FindDiskController("scsi") if err != nil { return err } log.Printf("[DEBUG] disk controller: %#v\n", controller) disk := devices.CreateDisk(controller, "") existing := devices.SelectByBackingInfo(disk.Backing) log.Printf("[DEBUG] disk: %#v\n", disk) if len(existing) == 0 { disk.CapacityInKB = int64(size * 1024 * 1024) if iops != 0 { disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ Limit: iops, } } backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) if diskType == "eager_zeroed" { // eager zeroed thick virtual disk backing.ThinProvisioned = types.NewBool(false) backing.EagerlyScrub = types.NewBool(true) } else if diskType == "thin" { // thin provisioned virtual disk backing.ThinProvisioned = types.NewBool(true) } log.Printf("[DEBUG] addHardDisk: %#v\n", disk) log.Printf("[DEBUG] addHardDisk: %#v\n", disk.CapacityInKB) return vm.AddDevice(context.TODO(), disk) } else { log.Printf("[DEBUG] addHardDisk: Disk already present.\n") return nil } }
func init() { spec := NewResourceConfigSpecFlag() spec.SetAllocation(func(a *types.ResourceAllocationInfo) { a.Shares.Level = types.SharesLevelNormal a.ExpandableReservation = types.NewBool(true) }) cli.Register("pool.create", &create{ResourceConfigSpecFlag: spec}) }
func (cmd *reconnect) Register(ctx context.Context, f *flag.FlagSet) { cmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx) cmd.HostSystemFlag.Register(ctx, f) cmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx) cmd.HostConnectFlag.Register(ctx, f) cmd.HostSystemReconnectSpec.SyncState = types.NewBool(false) f.BoolVar(cmd.HostSystemReconnectSpec.SyncState, "sync-state", false, "Sync state") }
func NewResourceConfigSpecFlag() *ResourceConfigSpecFlag { f := new(ResourceConfigSpecFlag) f.MemoryAllocation = new(types.ResourceAllocationInfo) f.CpuAllocation = new(types.ResourceAllocationInfo) f.SetAllocation(func(a types.BaseResourceAllocationInfo) { a.GetResourceAllocationInfo().Shares = new(types.SharesInfo) a.GetResourceAllocationInfo().ExpandableReservation = types.NewBool(false) }) return f }
func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { cmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx) cmd.DatacenterFlag.Register(ctx, f) cmd.ResourceConfigSpecFlag = NewResourceConfigSpecFlag() cmd.ResourceConfigSpecFlag.SetAllocation(func(a types.BaseResourceAllocationInfo) { ra := a.GetResourceAllocationInfo() ra.Shares.Level = types.SharesLevelNormal ra.ExpandableReservation = types.NewBool(true) }) cmd.ResourceConfigSpecFlag.Register(ctx, f) }
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { var key int var moveType string if linkedClone { moveType = "createNewChildDiskBacking" } else { moveType = "moveAllDiskBackingsAndDisallowSharing" } log.Printf("[DEBUG] relocate type: [%s]", moveType) devices, err := vm.Device(context.TODO()) if err != nil { return types.VirtualMachineRelocateSpec{}, err } for _, d := range devices { if devices.Type(d) == "disk" { key = d.GetVirtualDevice().Key } } isThin := initType == "thin" rpr := rp.Reference() dsr := ds.Reference() return types.VirtualMachineRelocateSpec{ Datastore: &dsr, Pool: &rpr, DiskMoveType: moveType, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ types.VirtualMachineRelocateSpecDiskLocator{ Datastore: dsr, DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(isThin), EagerlyScrub: types.NewBool(!isThin), }, DiskId: key, }, }, }, nil }
// RevertToCurrentSnapshot reverts to the current snapshot func (v VirtualMachine) RevertToCurrentSnapshot(ctx context.Context, suppressPowerOn bool) (*Task, error) { req := types.RevertToCurrentSnapshot_Task{ This: v.Reference(), SuppressPowerOn: types.NewBool(suppressPowerOn), } res, err := methods.RevertToCurrentSnapshot_Task(ctx, v.c, &req) if err != nil { return nil, err } return NewTask(v.c, res.Returnval), nil }
func (dss *HostDatastoreSystem) CreateLocalDatastore(c *types.CreateLocalDatastore) soap.HasFault { r := &methods.CreateLocalDatastoreBody{} ds := &Datastore{} ds.Name = c.Name ds.Self.Value = c.Path ds.Info = &types.LocalDatastoreInfo{ DatastoreInfo: types.DatastoreInfo{ Name: c.Name, Url: c.Path, }, Path: c.Path, } ds.Summary.Type = "local" if err := dss.add(ds); err != nil { r.Fault_ = err return r } ds.Host = append(ds.Host, types.DatastoreHostMount{ Key: dss.Host.Reference(), MountInfo: types.HostMountInfo{ AccessMode: string(types.HostMountModeReadWrite), Mounted: types.NewBool(true), Accessible: types.NewBool(true), }, }) _ = ds.RefreshDatastore(&types.RefreshDatastore{This: ds.Self}) r.Res = &types.CreateLocalDatastoreResponse{ Returnval: ds.Self, } return r }
// RetrieveProperties retrieves the properties of the managed object specified // as obj and decodes the response struct into the value pointed to by dst. func RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types.ManagedObjectReference, dst interface{}) error { req := types.RetrieveProperties{ This: pc, SpecSet: []types.PropertyFilterSpec{ { ObjectSet: []types.ObjectSpec{ { Obj: obj, Skip: types.NewBool(false), }, }, PropSet: []types.PropertySpec{ { All: types.NewBool(true), Type: obj.Type, }, }, }, }, } return RetrievePropertiesForRequest(ctx, r, req, dst) }
func (h HostSystem) EnterMaintenanceMode(ctx context.Context, timeout int32, evacuate bool, spec *types.HostMaintenanceSpec) (*Task, error) { req := types.EnterMaintenanceMode_Task{ This: h.Reference(), Timeout: timeout, EvacuatePoweredOffVms: types.NewBool(evacuate), MaintenanceSpec: spec, } res, err := methods.EnterMaintenanceMode_Task(ctx, h.c, &req) if err != nil { return nil, err } return NewTask(h.c, res.Returnval), nil }
// MakeDirectory creates a folder using the specified name. func (f FileManager) MakeDirectory(ctx context.Context, name string, dc *Datacenter, createParentDirectories bool) error { req := types.MakeDirectory{ This: f.Reference(), Name: name, CreateParentDirectories: types.NewBool(createParentDirectories), } if dc != nil { ref := dc.Reference() req.Datacenter = &ref } _, err := methods.MakeDirectory(ctx, f.c, &req) return err }