func (p *eventProcessor) run(ctx context.Context, tail bool) error { if len(p.tailers) == 0 { return nil } var err error var collectors []types.ManagedObjectReference for _, t := range p.tailers { collectors = append(collectors, t.collector) } if len(p.tailers) > 1 { // create and populate a ListView viewMgr := view.NewManager(p.mgr.Client()) var listView *view.ListView listView, err = viewMgr.CreateListView(ctx, collectors) if err != nil { return err } count := 0 // Retrieve the property from the objects in the ListView err = property.WaitForView(ctx, property.DefaultCollector(p.mgr.Client()), listView.Reference(), collectors[0], []string{latestPageProp}, func(c types.ManagedObjectReference, pc []types.PropertyChange) bool { if err = p.process(c, pc); err != nil { return false } count++ if count == len(collectors) && !tail { return true } return false }) return err } // only one object to follow err = property.Wait(ctx, property.DefaultCollector(p.mgr.Client()), collectors[0], []string{latestPageProp}, func(pc []types.PropertyChange) bool { if err = p.process(collectors[0], pc); err != nil { return false } if !tail { return true } return false }) return err }
func (cmd *vmdk) DetachDisk(vm *object.VirtualMachine) (string, error) { ctx := context.TODO() var mvm mo.VirtualMachine pc := property.DefaultCollector(cmd.Client) err := pc.RetrieveOne(ctx, vm.Reference(), []string{"config.hardware"}, &mvm) if err != nil { return "", err } spec := new(configSpec) dsFile := spec.RemoveDisk(&mvm) task, err := vm.Reconfigure(ctx, spec.ToSpec()) if err != nil { return "", err } err = task.Wait(ctx) if err != nil { return "", err } return dsFile, nil }
func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { client, err := cmd.Client() if err != nil { return err } ns, err := cmd.HostNetworkSystem() if err != nil { return err } var mns mo.HostNetworkSystem pc := property.DefaultCollector(client) err = pc.RetrieveOne(context.TODO(), ns.Reference(), []string{"networkInfo.vswitch"}, &mns) if err != nil { return err } tw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) for i, s := range mns.NetworkInfo.Vswitch { if i > 0 { fmt.Fprintln(tw) } fmt.Fprintf(tw, "Name:\t%s\n", s.Name) fmt.Fprintf(tw, "Portgroup:\t%s\n", cmd.keys("key-vim.host.PortGroup-", s.Portgroup)) fmt.Fprintf(tw, "Pnic:\t%s\n", cmd.keys("key-vim.host.PhysicalNic-", s.Pnic)) fmt.Fprintf(tw, "MTU:\t%d\n", s.Mtu) fmt.Fprintf(tw, "Ports:\t%d\n", s.NumPorts) fmt.Fprintf(tw, "Ports Available:\t%d\n", s.NumPortsAvailable) } return tw.Flush() }
func (h *Helper) LocalDatastores(ctx context.Context, cr *object.ComputeResource) ([]*object.Datastore, error) { // List datastores for compute resource dss, err := cr.Datastores(ctx) if err != nil { return nil, err } // Filter local datastores var ldss []*object.Datastore for _, ds := range dss { var mds mo.Datastore err = property.DefaultCollector(h.c).RetrieveOne(ctx, ds.Reference(), nil, &mds) if err != nil { return nil, err } switch i := mds.Info.(type) { case *types.VmfsDatastoreInfo: if i.Vmfs.Local != nil && *i.Vmfs.Local == true { break } default: continue } ds.InventoryPath = mds.Name ldss = append(ldss, ds) } return ldss, nil }
func (c ComputeResource) Hosts(ctx context.Context) ([]*HostSystem, error) { var cr mo.ComputeResource err := c.Properties(ctx, c.Reference(), []string{"host"}, &cr) if err != nil { return nil, err } if len(cr.Host) == 0 { return nil, nil } var hs []mo.HostSystem pc := property.DefaultCollector(c.Client()) err = pc.Retrieve(ctx, cr.Host, []string{"name"}, &hs) if err != nil { return nil, err } var hosts []*HostSystem for _, h := range hs { host := NewHostSystem(c.Client(), h.Reference()) host.InventoryPath = path.Join(c.InventoryPath, h.Name) hosts = append(hosts, host) } return hosts, nil }
func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) resource.StateRefreshFunc { return func() (interface{}, string, error) { dc, err := getDatacenter(client, datacenter) if err != nil { log.Printf("[ERROR] %#v", err) return nil, "", err } finder := find.NewFinder(client.Client, true) finder = finder.SetDatacenter(dc) vm, err := finder.VirtualMachine(context.TODO(), name) if err != nil { log.Printf("[ERROR] %#v", err) return nil, "", err } var mvm mo.VirtualMachine collector := property.DefaultCollector(client.Client) if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"summary"}, &mvm); err != nil { log.Printf("[ERROR] %#v", err) return nil, "", err } if mvm.Summary.Guest.IpAddress != "" { log.Printf("[DEBUG] IP address with DHCP: %v", mvm.Summary.Guest.IpAddress) return mvm.Summary, "active", err } else { log.Printf("[DEBUG] Waiting for IP address") return nil, "pending", err } } }
func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { var ip string p := property.DefaultCollector(v.c) err := property.Wait(ctx, p, v.Reference(), []string{"guest.ipAddress"}, func(pc []types.PropertyChange) bool { for _, c := range pc { if c.Name != "guest.ipAddress" { continue } if c.Op != types.PropertyChangeOpAssign { continue } if c.Val == nil { continue } ip = c.Val.(string) return true } return false }) if err != nil { return "", err } return ip, nil }
// IpAddress attempts to find the guest IP address using esxcli. // ESX hosts must be configured with the /Net/GuestIPHack enabled. // For example: // $ govc host.esxcli -- system settings advanced set -o /Net/GuestIPHack -i 1 func (g *GuestInfo) IpAddress(vm *object.VirtualMachine) (string, error) { const any = "0.0.0.0" var mvm mo.VirtualMachine pc := property.DefaultCollector(g.c) err := pc.RetrieveOne(context.TODO(), vm.Reference(), []string{"runtime.host", "config.uuid"}, &mvm) if err != nil { return "", err } h, err := g.hostInfo(mvm.Runtime.Host) if err != nil { return "", err } // Normalize uuid, esxcli and mo.VirtualMachine have different formats uuid := strings.Replace(mvm.Config.Uuid, "-", "", -1) if wid, ok := h.wids[uuid]; ok { res, err := h.Run([]string{"network", "vm", "port", "list", "--world-id", wid}) if err != nil { return "", err } for _, val := range res.Values { if ip, ok := val["IPAddress"]; ok { if ip[0] != any { return ip[0], nil } } } } return any, nil }
// WaitForMac will wait until VM get mac for all attached nics. // Returns map "Virtual Network Name": "nic MAC address" func (vm VirtualMachine) WaitForMAC(ctx context.Context) (map[string]string, error) { devices, err := vm.Device(ctx) if err != nil { log.Errorf("Unable to get device listing for VM") return nil, err } nics := devices.SelectByType(&types.VirtualEthernetCard{}) macs := make(map[string]string) // device name:network name nicMappings := make(map[string]string) for _, nic := range nics { if n, ok := nic.(types.BaseVirtualEthernetCard); ok { netName, err := vm.getNetworkName(ctx, n) if err != nil { log.Errorf("failed to get network name: %s", err) return nil, err } macs[netName] = "" nicMappings[devices.Name(nic)] = netName } else { log.Errorf("Failed to get network name of vNIC: %v", nic) return nil, err } } p := property.DefaultCollector(vm.Session.Vim25()) // Wait for all NICs to have a MacAddress, which may not be generated yet. err = property.Wait(ctx, p, vm.Reference(), []string{"config.hardware.device"}, func(pc []types.PropertyChange) bool { for _, c := range pc { if c.Op != types.PropertyChangeOpAssign { continue } changedDevices := c.Val.(types.ArrayOfVirtualDevice).VirtualDevice for _, device := range changedDevices { if nic, ok := device.(types.BaseVirtualEthernetCard); ok { mac := nic.GetVirtualEthernetCard().MacAddress if mac == "" { continue } netName := nicMappings[devices.Name(device)] macs[netName] = mac } } } for key, value := range macs { if value == "" { log.Debugf("Didn't get mac address for nic on %s, continue", key) return false } } return true }) return macs, err }
func TestWaitForUpdates(t *testing.T) { folder := esx.RootFolder s := New(NewServiceInstance(esx.ServiceContent, folder)) ts := s.NewServer() defer ts.Close() ctx := context.Background() c, err := govmomi.NewClient(ctx, ts.URL, true) if err != nil { t.Fatal(err) } cb := func(once bool) func([]types.PropertyChange) bool { return func(pc []types.PropertyChange) bool { if len(pc) != 1 { t.Fail() } c := pc[0] if c.Op != types.PropertyChangeOpAssign { t.Fail() } if c.Name != "name" { t.Fail() } if c.Val.(string) != folder.Name { t.Fail() } return once } } pc := property.DefaultCollector(c.Client) props := []string{"name"} err = property.Wait(ctx, pc, folder.Reference(), props, cb(true)) if err != nil { t.Error(err) } // incremental updates not yet suppported err = property.Wait(ctx, pc, folder.Reference(), props, cb(false)) if err == nil { t.Error("expected error") } // test object not found Map.Remove(folder.Reference()) err = property.Wait(ctx, pc, folder.Reference(), props, cb(true)) if err == nil { t.Error("expected error") } }
func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } finder, err := cmd.Finder() if err != nil { return err } args := f.Args() if len(args) == 0 { args = []string{"*"} } var props []string res := infoResult{ finder: finder, ctx: ctx, } if !cmd.OutputFlag.JSON { props = []string{ "name", "vmFolder", "hostFolder", "datastoreFolder", "networkFolder", "datastore", "network", } } for _, arg := range args { objects, err := finder.DatacenterList(ctx, arg) if err != nil { return err } res.objects = append(res.objects, objects...) } if len(res.objects) != 0 { refs := make([]types.ManagedObjectReference, 0, len(res.objects)) for _, o := range res.objects { refs = append(refs, o.Reference()) } pc := property.DefaultCollector(c) err = pc.Retrieve(ctx, refs, props, &res.Datacenters) if err != nil { return err } } return cmd.WriteResult(&res) }
func getVirtualMachineManagedObjectReference(ctx context.Context, c *govmomi.Client, vm *object.VirtualMachine, field string, dst interface{}) error { collector := property.DefaultCollector(c.Client) // Retrieve required field from VM object err := collector.RetrieveOne(ctx, vm.Reference(), []string{field}, dst) if err != nil { return err } return nil }
func (m Manager) List(ctx context.Context) ([]types.LicenseManagerLicenseInfo, error) { var mlm mo.LicenseManager err := property.DefaultCollector(m.c).RetrieveOne(ctx, m.Reference(), []string{"licenses"}, &mlm) if err != nil { return nil, err } return mlm.Licenses, nil }
func (cmd *info) Run(f *flag.FlagSet) error { if f.NArg() == 0 { return flag.ErrHelp } c, err := cmd.Client() if err != nil { return err } ctx := context.TODO() finder, err := cmd.Finder() if err != nil { return err } var res infoResult var props []string if cmd.OutputFlag.JSON { props = nil } else { props = []string{ "name", "config.cpuAllocation", "config.memoryAllocation", "runtime.cpu", "runtime.memory", } } for _, arg := range f.Args() { objects, err := finder.ResourcePoolList(ctx, arg) if err != nil { return err } res.objects = append(res.objects, objects...) } if len(res.objects) != 0 { refs := make([]types.ManagedObjectReference, 0, len(res.objects)) for _, o := range res.objects { refs = append(refs, o.Reference()) } pc := property.DefaultCollector(c) err = pc.Retrieve(ctx, refs, props, &res.ResourcePools) if err != nil { return err } } return cmd.WriteResult(&res) }
func (cmd *info) Run(f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } ctx := context.TODO() var res infoResult var props []string if cmd.OutputFlag.JSON { props = nil // Load everything } else { props = []string{"summary"} // Load summary } // We could do without the -host flag, leaving it for compat host, err := cmd.HostSystemIfSpecified() if err != nil { return err } // Default only if there is a single host if host == nil && f.NArg() == 0 { host, err = cmd.HostSystem() if err != nil { return err } } if host != nil { res.objects = append(res.objects, host) } else { res.objects, err = cmd.HostSystems(f.Args()) if err != nil { return err } } if len(res.objects) != 0 { refs := make([]types.ManagedObjectReference, 0, len(res.objects)) for _, o := range res.objects { refs = append(refs, o.Reference()) } pc := property.DefaultCollector(c) err = pc.Retrieve(ctx, refs, props, &res.HostSystems) if err != nil { return err } } return cmd.WriteResult(&res) }
func (c *info) Run(f *flag.FlagSet) error { client, err := c.Client() if err != nil { return err } var hosts []*object.HostSystem // We could do without the -host flag, leaving it for compat host, err := c.HostSystemIfSpecified() if err != nil { return err } // Default only if there is a single host if host == nil && f.NArg() == 0 { host, err = c.HostSystem() if err != nil { return err } } if host != nil { hosts = append(hosts, host) } else { hosts, err = c.HostSystems(f.Args()) if err != nil { return err } } var res infoResult var props []string if c.OutputFlag.JSON { props = nil // Load everything } else { props = []string{"summary"} // Load summary } for _, host := range hosts { var h mo.HostSystem pc := property.DefaultCollector(client) err = pc.RetrieveOne(context.TODO(), host.Reference(), props, &h) if err != nil { return err } res.HostSystems = append(res.HostSystems, h) } return c.WriteResult(&res) }
func NewFinder(client *vim25.Client, all bool) *Finder { f := &Finder{ client: client, recurser: list.Recurser{ Collector: property.DefaultCollector(client), All: all, }, } return f }
// WaitForExtraConfig waits until key shows up with the expected value inside the ExtraConfig func (vm *VirtualMachine) WaitForExtraConfig(ctx context.Context, waitFunc func(pc []types.PropertyChange) bool) error { // Get the default collector p := property.DefaultCollector(vm.Vim25()) // Wait on config.extraConfig // https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.vm.ConfigInfo.html err := property.Wait(ctx, p, vm.Reference(), []string{"config.extraConfig"}, waitFunc) if err != nil { log.Errorf("Property collector error: %s", err) return err } return nil }
func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { if f.NArg() == 0 { return flag.ErrHelp } c, err := cmd.Client() if err != nil { return err } finder, err := cmd.Finder() if err != nil { return err } var res infoResult var props []string if cmd.OutputFlag.JSON { props = nil } else { props = []string{ "name", "config.cpuAllocation", "config.memoryAllocation", "runtime.cpu", "runtime.memory", } } for _, arg := range f.Args() { vapps, err := finder.VirtualAppList(context.TODO(), arg) if err != nil { return err } for _, vapp := range vapps { var p mo.VirtualApp pc := property.DefaultCollector(c) err = pc.RetrieveOne(context.TODO(), vapp.Reference(), props, &p) if err != nil { return err } res.VApps = append(res.VApps, p) } } return cmd.WriteResult(&res) }
func multipleObjectEvents(ctx context.Context, m Manager, objects []types.ManagedObjectReference, pageSize int32, tail bool, force bool, prop []string, f func([]types.BaseEvent) error) error { // create an EventHistoryCollector for each object var collectors []types.ManagedObjectReference for _, o := range objects { filter := types.EventFilterSpec{ Entity: &types.EventFilterSpecByEntity{ Entity: o, Recursion: types.EventFilterSpecRecursionOptionAll, }, } collector, err := m.CreateCollectorForEvents(ctx, filter) if err != nil { return fmt.Errorf("[%#v] %s", o, err) } defer collector.Destroy(ctx) err = collector.SetPageSize(ctx, pageSize) if err != nil { return err } collectors = append(collectors, collector.Reference()) } // create and populate a ListView viewMgr := view.NewManager(m.Client()) listView, err := viewMgr.CreateListView(ctx, collectors) if err != nil { return err } count := 0 // Retrieve the property from the objects in the ListView return property.WaitForView(ctx, property.DefaultCollector(m.Client()), listView.Reference(), collectors[0], prop, func(pc []types.PropertyChange) bool { for _, u := range pc { if u.Name != prop[0] { continue } if u.Val == nil { continue } f(u.Val.(types.ArrayOfEvent).Event) } count++ if count == len(collectors) && !tail { return true } return false }) }
func (cmd *info) Run(f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } vms, err := cmd.VirtualMachines(f.Args()) if err != nil { if _, ok := err.(*find.NotFoundError); ok { // Continue with empty VM slice } else { return err } } var res infoResult var props []string if cmd.OutputFlag.JSON { props = nil // Load everything } else { props = []string{"summary", "guest.ipAddress"} // Load summary } for _, vm := range vms { for { var mvm mo.VirtualMachine pc := property.DefaultCollector(c) err = pc.RetrieveOne(context.TODO(), vm.Reference(), props, &mvm) if err != nil { return err } if cmd.WaitForIP && mvm.Guest.IpAddress == "" { _, err = vm.WaitForIP(context.TODO()) if err != nil { return err } // Reload virtual machine object continue } res.VirtualMachines = append(res.VirtualMachines, mvm) break } } return cmd.WriteResult(&res) }
func (cmd *info) Run(f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } ctx := context.TODO() finder, err := cmd.Finder() if err != nil { return err } args := f.Args() if len(args) == 0 { args = []string{"*"} } var res infoResult var props []string if cmd.OutputFlag.JSON { props = nil // Load everything } else { props = []string{"info", "summary"} // Load summary } for _, arg := range args { objects, err := finder.DatastoreList(ctx, arg) if err != nil { return err } res.objects = append(res.objects, objects...) } if len(res.objects) != 0 { refs := make([]types.ManagedObjectReference, 0, len(res.objects)) for _, o := range res.objects { refs = append(refs, o.Reference()) } pc := property.DefaultCollector(c) err = pc.Retrieve(ctx, refs, props, &res.Datastores) if err != nil { return err } } return cmd.WriteResult(&res) }
func (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } finder, err := cmd.Finder() if err != nil { return err } args := f.Args() if len(args) == 0 { args = []string{"/"} } var props []string var res infoResult if !cmd.OutputFlag.JSON { props = []string{ "name", "childEntity", "childType", } } for _, arg := range args { object, err := finder.FolderList(ctx, arg) if err != nil { return err } res.objects = append(res.objects, object...) } if len(res.objects) != 0 { refs := make([]types.ManagedObjectReference, 0, len(res.objects)) for _, o := range res.objects { refs = append(refs, o.Reference()) } pc := property.DefaultCollector(c) err = pc.Retrieve(ctx, refs, props, &res.Folders) if err != nil { return err } } return cmd.WriteResult(&res) }
func (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } var m mo.SessionManager pc := property.DefaultCollector(c) err = pc.RetrieveOne(ctx, *c.ServiceContent.SessionManager, nil, &m) if err != nil { return nil } return cmd.WriteResult(&sessionInfo{cmd, m}) }
func (v *Validator) QueryDatastore(ctx context.Context, vch *config.VirtualContainerHostConfigSpec, sess *session.Session) { var dataStores dsList dsNames := make(map[string]bool) for _, url := range vch.ImageStores { dsNames[url.Host] = true } for _, url := range vch.VolumeLocations { dsNames[url.Host] = true } for _, url := range vch.ContainerStores { dsNames[url.Host] = true } refs := []types.ManagedObjectReference{} for dsName, _ := range dsNames { ds, err := sess.Finder.DatastoreOrDefault(ctx, dsName) if err != nil { log.Errorf("Unable to collect information for datastore %s: %s", dsName, err) } else { refs = append(refs, ds.Reference()) } } pc := property.DefaultCollector(sess.Client.Client) err := pc.Retrieve(ctx, refs, nil, &dataStores) sort.Sort(dataStores) if err != nil { log.Errorf("Error while accessing datastore: %s", err) return } for _, ds := range dataStores { log.Infof("Datastore %s Status: %s", ds.Name, ds.OverallStatus) log.Infof("Datastore %s Free Space: %.1fGB", ds.Name, float64(ds.Summary.FreeSpace)/(1<<30)) log.Infof("Datastore %s Capacity: %.1fGB", ds.Name, float64(ds.Summary.Capacity)/(1<<30)) v.StorageRemaining = template.HTML(fmt.Sprintf(`%s <div class="row card-text"> <div class="sixty">%s:</div> <div class="fourty">%.1f GB remaining</div> </div>`, v.StorageRemaining, ds.Name, float64(ds.Summary.FreeSpace)/(1<<30))) } }
func (o HttpNfcLease) Wait(ctx context.Context) (*types.HttpNfcLeaseInfo, error) { var lease mo.HttpNfcLease pc := property.DefaultCollector(o.c) err := property.Wait(ctx, pc, o.Reference(), []string{"state", "info", "error"}, func(pc []types.PropertyChange) bool { done := false for _, c := range pc { if c.Val == nil { continue } switch c.Name { case "error": val := c.Val.(types.LocalizedMethodFault) lease.Error = &val done = true case "info": val := c.Val.(types.HttpNfcLeaseInfo) lease.Info = &val case "state": lease.State = c.Val.(types.HttpNfcLeaseState) if lease.State != types.HttpNfcLeaseStateInitializing { done = true } } } return done }) if err != nil { return nil, err } if lease.State == types.HttpNfcLeaseStateReady { return lease.Info, nil } if lease.Error != nil { return nil, errors.New(lease.Error.LocalizedMessage) } return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) }
// CertificateInfo wraps the host CertificateManager certificateInfo property with the HostCertificateInfo helper. // The ThumbprintSHA1 field is set to HostSystem.Summary.Config.SslThumbprint if the host system is managed by a vCenter. func (m HostCertificateManager) CertificateInfo(ctx context.Context) (*HostCertificateInfo, error) { var hs mo.HostSystem var cm mo.HostCertificateManager pc := property.DefaultCollector(m.Client()) err := pc.RetrieveOne(ctx, m.Reference(), []string{"certificateInfo"}, &cm) if err != nil { return nil, err } _ = pc.RetrieveOne(ctx, m.Host.Reference(), []string{"summary.config.sslThumbprint"}, &hs) return &HostCertificateInfo{ HostCertificateManagerCertificateInfo: cm.CertificateInfo, ThumbprintSHA1: hs.Summary.Config.SslThumbprint, }, nil }
func (cmd *question) Run(f *flag.FlagSet) error { c, err := cmd.Client() if err != nil { return err } vm, err := cmd.VirtualMachine() if err != nil { return err } if vm == nil { return errors.New("No VM specified") } var mvm mo.VirtualMachine pc := property.DefaultCollector(c) err = pc.RetrieveOne(context.TODO(), vm.Reference(), []string{"runtime.question"}, &mvm) if err != nil { return err } q := mvm.Runtime.Question if q == nil { fmt.Printf("No pending question\n") return nil } // Print question if no answer is specified if cmd.answer == "" { fmt.Printf("Question:\n%s\n\n", q.Text) fmt.Printf("Possible answers:\n") for _, e := range q.Choice.ChoiceInfo { ed := e.(*types.ElementDescription) fmt.Printf("%s) %s\n", ed.Key, ed.Description.Label) } return nil } // Answer question return vm.Answer(context.TODO(), q.Id, cmd.answer) }
// AttachedHosts returns hosts that have this Datastore attached, accessible and writable. func (d Datastore) AttachedHosts(ctx context.Context) ([]*HostSystem, error) { var ds mo.Datastore var hosts []*HostSystem pc := property.DefaultCollector(d.Client()) err := pc.RetrieveOne(ctx, d.Reference(), []string{"host"}, &ds) if err != nil { return nil, err } for _, host := range ds.Host { info := host.MountInfo if *info.Mounted && *info.Accessible && info.AccessMode == string(types.HostMountModeReadWrite) { hosts = append(hosts, NewHostSystem(d.Client(), host.Key)) } } return hosts, nil }
// UserSession retrieves and returns the SessionManager's CurrentSession field. // Nil is returned if the session is not authenticated. func (sm *Manager) UserSession(ctx context.Context) (*types.UserSession, error) { var mgr mo.SessionManager pc := property.DefaultCollector(sm.client) err := pc.RetrieveOne(ctx, sm.Reference(), []string{"currentSession"}, &mgr) if err != nil { // It's OK if we can't retrieve properties because we're not authenticated if f, ok := err.(types.HasFault); ok { switch f.Fault().(type) { case *types.NotAuthenticated: return nil, nil } } return nil, err } return mgr.CurrentSession, nil }