// upgradeStatusMessage generates a user facing status string about upgrade progress and status func (l *List) upgradeStatusMessage(ctx context.Context, vch *vm.VirtualMachine, installerVer *version.Build, vchVer *version.Build) string { if sameVer := installerVer.Equal(vchVer); sameVer { return "Up to date" } upgrading, _, err := vch.UpgradeInProgress(ctx, management.UpgradePrefix) if err != nil { return fmt.Sprintf("Unknown: %s", err) } if upgrading { return "Upgrade in progress" } canUpgrade, err := installerVer.IsNewer(vchVer) if err != nil { return fmt.Sprintf("Unknown: %s", err) } if canUpgrade { return fmt.Sprintf("Upgradeable to %s", installerVer.ShortVersion()) } oldInstaller, err := installerVer.IsOlder(vchVer) if err != nil { return fmt.Sprintf("Unknown: %s", err) } if oldInstaller { return fmt.Sprintf("VCH has newer version") } // can't get here return "Invalid upgrade status" }
func (d *Dispatcher) InspectVCH(vch *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec) error { defer trace.End(trace.Begin(conf.Name)) state, err := vch.PowerState(d.ctx) if err != nil { log.Errorf("Failed to get VM power state, service might not be available at this moment.") } if state != types.VirtualMachinePowerStatePoweredOn { err = errors.Errorf("VCH is not powered on, state %s", state) log.Errorf("%s", err) return err } clientIP := conf.ExecutorConfig.Networks["client"].Assigned.IP externalIP := conf.ExecutorConfig.Networks["external"].Assigned.IP if ip.IsUnspecifiedIP(clientIP) { err = errors.Errorf("No client IP address assigned") log.Errorf("%s", err) return err } if ip.IsUnspecifiedIP(externalIP) { err = errors.Errorf("No external IP address assigned") log.Errorf("%s", err) return err } d.HostIP = clientIP.String() log.Debugf("IP address for client interface: %s", d.HostIP) if !conf.HostCertificate.IsNil() { d.VICAdminProto = "https" d.DockerPort = fmt.Sprintf("%d", opts.DefaultTLSHTTPPort) } else { d.VICAdminProto = "http" d.DockerPort = fmt.Sprintf("%d", opts.DefaultHTTPPort) } // try looking up preferred name, irrespective of CAs if cert, err := conf.HostCertificate.X509Certificate(); err == nil { name, _ := viableHostAddress([]net.IP{clientIP}, cert, conf.CertificateAuthorities) if name != "" { log.Debugf("Retrieved proposed name from host certificate: %q", name) log.Debugf("Assigning first name from set: %s", name) if name != d.HostIP { log.Infof("Using address from host certificate over allocated IP: %s", d.HostIP) // reassign d.HostIP = name } } else { log.Warnf("Unable to identify address acceptable to host certificate") } } else { log.Debugf("Failed to load host cert: %s", err) } d.ShowVCH(conf, "", "", "", "") return nil }
func (d *Dispatcher) InspectVCH(vch *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec) error { defer trace.End(trace.Begin(conf.Name)) state, err := vch.PowerState(d.ctx) if err != nil { log.Errorf("Failed to get VM power state, service might not be avaialble at this moment.") } if state != types.VirtualMachinePowerStatePoweredOn { err = errors.Errorf("VCH is not powered on, state %s", state) log.Errorf("%s", err) return err } if ip.IsUnspecifiedIP(conf.ExecutorConfig.Networks["client"].Assigned.IP) { err = errors.Errorf("No client IP address assigned") log.Errorf("%s", err) return err } d.HostIP = conf.ExecutorConfig.Networks["client"].Assigned.IP.String() log.Debug("IP address for client interface: %s", d.HostIP) if !conf.HostCertificate.IsNil() { d.VICAdminProto = "https" d.DockerPort = fmt.Sprintf("%d", opts.DefaultTLSHTTPPort) } else { d.VICAdminProto = "http" d.DockerPort = fmt.Sprintf("%d", opts.DefaultHTTPPort) } d.ShowVCH(conf, "", "") return nil }
func (d *Dispatcher) deleteNetworkDevices(vmm *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec) error { defer trace.End(trace.Begin(conf.Name)) log.Infof("Removing appliance VM network devices") power, err := vmm.PowerState(d.ctx) if err != nil { log.Errorf("Failed to get vm power status %q: %s", vmm.Reference(), err) return err } if power != types.VirtualMachinePowerStatePoweredOff { if _, err = vmm.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) { return vmm.PowerOff(ctx) }); err != nil { log.Errorf("Failed to power off existing appliance for %s", err) return err } } devices, err := d.networkDevices(vmm) if err != nil { log.Errorf("Unable to get network devices: %s", err) return err } if len(devices) == 0 { log.Infof("No network device attached") return nil } // remove devices return vmm.RemoveDevice(d.ctx, false, devices...) }
func (d *Dispatcher) NewVCHFromComputePath(computePath string, name string, v *validate.Validator) (*vm.VirtualMachine, error) { defer trace.End(trace.Begin(fmt.Sprintf("path %q, name %q", computePath, name))) var err error parent, err := v.ResourcePoolHelper(d.ctx, computePath) if err != nil { return nil, err } d.vchPoolPath = path.Join(parent.InventoryPath, name) var vchPool *object.ResourcePool if d.isVC { vapp, err := d.findVirtualApp(d.vchPoolPath) if err != nil { log.Errorf("Failed to get VCH virtual app %q: %s", d.vchPoolPath, err) return nil, err } if vapp != nil { vchPool = vapp.ResourcePool } } if vchPool == nil { vchPool, err = d.session.Finder.ResourcePool(d.ctx, d.vchPoolPath) if err != nil { log.Errorf("Failed to get VCH resource pool %q: %s", d.vchPoolPath, err) return nil, err } } rp := compute.NewResourcePool(d.ctx, d.session, vchPool.Reference()) var vmm *vm.VirtualMachine if vmm, err = rp.GetChildVM(d.ctx, d.session, name); err != nil { log.Errorf("Failed to get VCH VM: %s", err) return nil, err } if vmm == nil { err = errors.Errorf("Didn't find VM %q in resource pool %q", name, rp.Name()) log.Error(err) return nil, err } vmm.InventoryPath = path.Join(d.vchPoolPath, name) // check if it's VCH var ok bool if ok, err = d.isVCH(vmm); err != nil { log.Error(err) return nil, err } if !ok { err = errors.Errorf("Not a VCH") log.Error(err) return nil, err } return vmm, nil }
// retrieves the uuid of the appliance vm to create a unique vsphere extension name func (d *Dispatcher) GenerateExtensionName(conf *config.VirtualContainerHostConfigSpec, vm *vm.VirtualMachine) error { defer trace.End(trace.Begin(conf.ExtensionName)) var o mo.VirtualMachine err := vm.Properties(d.ctx, vm.Reference(), []string{"config.uuid"}, &o) if err != nil { return errors.Errorf("Could not get VM UUID from appliance VM due to error: %s", err) } conf.ExtensionName = "com.vmware.vic." + o.Config.Uuid return nil }
func (d *Dispatcher) configIso(conf *metadata.VirtualContainerHostConfigSpec, vm *vm.VirtualMachine) (object.VirtualDeviceList, error) { defer trace.End(trace.Begin("")) var devices object.VirtualDeviceList var err error vmDevices, err := vm.Device(d.ctx) if err != nil { log.Errorf("Failed to get vm devices for appliance: %s", err) return nil, err } ide, err := vmDevices.FindIDEController("") if err != nil { log.Errorf("Failed to find IDE controller for appliance: %s", err) return nil, err } cdrom, err := devices.CreateCdrom(ide) if err != nil { log.Errorf("Failed to create Cdrom device for appliance: %s", err) return nil, err } cdrom = devices.InsertIso(cdrom, fmt.Sprintf("[%s] %s/appliance.iso", conf.ImageStores[0].Host, d.vmPathName)) devices = append(devices, cdrom) return devices, nil }
func (d *Dispatcher) getName(vm *vm.VirtualMachine) string { name, err := vm.Name(d.ctx) if err != nil { log.Errorf("VM name not found: %s", err) return "" } return name }
func (d *Dispatcher) networkDevices(vmm *vm.VirtualMachine) ([]types.BaseVirtualDevice, error) { defer trace.End(trace.Begin("")) var err error vmDevices, err := vmm.Device(d.ctx) if err != nil { log.Errorf("Failed to get vm devices for appliance: %s", err) return nil, err } var devices []types.BaseVirtualDevice for _, device := range vmDevices { if _, ok := device.(types.BaseVirtualEthernetCard); ok { devices = append(devices, device) } } return devices, nil }
// upgradeStatusMessage generates a user facing status string about upgrade progress and status func (i *Inspect) upgradeStatusMessage(ctx context.Context, vch *vm.VirtualMachine, installerVer *version.Build, vchVer *version.Build) { if sameVer := installerVer.Equal(vchVer); sameVer { log.Info("Installer has same version as VCH") log.Info("No upgrade available with this installer version") return } upgrading, _, err := vch.UpgradeInProgress(ctx, management.UpgradePrefix) if err != nil { log.Errorf("Unable to determine if upgrade is in progress: %s", err) return } if upgrading { log.Info("Upgrade in progress") return } canUpgrade, err := installerVer.IsNewer(vchVer) if err != nil { log.Errorf("Unable to determine if upgrade is availabile: %s", err) return } if canUpgrade { log.Info("Upgrade available") return } oldInstaller, err := installerVer.IsOlder(vchVer) if err != nil { log.Errorf("Unable to determine if upgrade is available: %s", err) return } if oldInstaller { log.Info("Installer has older version than VCH") log.Info("No upgrade available with this installer version") return } // can't get here log.Warn("Invalid upgrade status") return }
// Find the disk by name attached to the given vm. func findDisk(op trace.Operation, vm *vm.VirtualMachine, name string) (*types.VirtualDisk, error) { defer trace.End(trace.Begin(vm.String())) log.Debugf("Looking for attached disk matching filename %s", name) devices, err := vm.Device(op) if err != nil { return nil, fmt.Errorf("Failed to refresh devices for vm: %s", errors.ErrorStack(err)) } candidates := devices.Select(func(device types.BaseVirtualDevice) bool { db := device.GetVirtualDevice().Backing if db == nil { return false } backing, ok := device.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo) if !ok { return false } log.Debugf("backing file name %s", backing.VirtualDeviceFileBackingInfo.FileName) match := strings.HasSuffix(backing.VirtualDeviceFileBackingInfo.FileName, name) if match { log.Debugf("Found candidate disk for %s at %s", name, backing.VirtualDeviceFileBackingInfo.FileName) } return match }) if len(candidates) == 0 { log.Warnf("No disks match name: %s", name) return nil, os.ErrNotExist } if len(candidates) > 1 { return nil, errors.Errorf("Too many disks match name: %s", name) } return candidates[0].(*types.VirtualDisk), nil }
func (d *Dispatcher) deleteVM(vm *vm.VirtualMachine, force bool) error { defer trace.End(trace.Begin("")) var err error power, err := vm.PowerState(d.ctx) if err != nil || power != types.VirtualMachinePowerStatePoweredOff { if err != nil { log.Warnf("Failed to get vm power status %s: %s", vm.Reference(), err) } if !force { if err != nil { return err } name, err := vm.Name(d.ctx) if err != nil { log.Errorf("VM name is not found, %s", err) } if name != "" { err = errors.Errorf("VM %s is powered on", name) } else { err = errors.Errorf("VM %s is powered on", vm.Reference()) } return err } if _, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) { return vm.PowerOff(ctx) }); err != nil { log.Debugf("Failed to power off existing appliance for %s, try to remove anyway", err) } } // get the actual folder name before we delete it folder, err := vm.FolderName(d.ctx) if err != nil { log.Warnf("Failed to get actual folder name for VM. Will not attempt to delete additional data files in VM directory: %s", err) } _, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) { return vm.Destroy(ctx) }) if err != nil { err = errors.Errorf("Failed to destroy vm %s: %s", vm.Reference(), err) return err } if _, err = d.deleteDatastoreFiles(d.session.Datastore, folder, true); err != nil { log.Warnf("VM path %s is not removed, %s", folder, err) } return nil }
func (d *Dispatcher) isVCH(vm *vm.VirtualMachine) (bool, error) { if vm == nil { return false, errors.New("nil parameter") } defer trace.End(trace.Begin(vm.InventoryPath)) info, err := vm.FetchExtraConfig(d.ctx) if err != nil { err = errors.Errorf("Failed to fetch guest info of appliance vm: %s", err) return false, err } var remoteConf config.VirtualContainerHostConfigSpec extraconfig.Decode(extraconfig.MapSource(info), &remoteConf) // if the moref of the target matches where we expect to find it for a VCH, run with it if remoteConf.ExecutorConfig.ID == vm.Reference().String() { return true, nil } return false, nil }
func (d *Dispatcher) GetVCHConfig(vm *vm.VirtualMachine) (*metadata.VirtualContainerHostConfigSpec, error) { defer trace.End(trace.Begin("")) //this is the appliance vm mapConfig, err := vm.FetchExtraConfig(d.ctx) if err != nil { err = errors.Errorf("Failed to get VM extra config of %s, %s", vm.Reference(), err) log.Errorf("%s", err) return nil, err } data := extraconfig.MapSource(mapConfig) vchConfig := &metadata.VirtualContainerHostConfigSpec{} result := extraconfig.Decode(data, vchConfig) if result == nil { err = errors.Errorf("Failed to decode VM configuration %s, %s", vm.Reference(), err) log.Errorf("%s", err) return nil, err } // vchConfig.ID return vchConfig, nil }
func (d *Debug) Run(cli *cli.Context) error { var err error if err = d.processParams(); err != nil { return err } if d.Debug.Debug > 0 { log.SetLevel(log.DebugLevel) trace.Logger.Level = log.DebugLevel } if len(cli.Args()) > 0 { log.Errorf("Unknown argument: %s", cli.Args()[0]) return errors.New("invalid CLI arguments") } log.Infof("### Configuring VCH for debug ####") ctx, cancel := context.WithTimeout(context.Background(), d.Timeout) defer cancel() validator, err := validate.NewValidator(ctx, d.Data) if err != nil { log.Errorf("Debug cannot continue - failed to create validator: %s", err) return errors.New("Debug failed") } executor := management.NewDispatcher(validator.Context, validator.Session, nil, d.Force) var vch *vm.VirtualMachine if d.Data.ID != "" { vch, err = executor.NewVCHFromID(d.Data.ID) } else { vch, err = executor.NewVCHFromComputePath(d.Data.ComputeResourcePath, d.Data.DisplayName, validator) } if err != nil { log.Errorf("Failed to get Virtual Container Host %s", d.DisplayName) log.Error(err) return errors.New("Debug failed") } log.Infof("") log.Infof("VCH ID: %s", vch.Reference().String()) vchConfig, err := executor.GetVCHConfig(vch) if err != nil { log.Error("Failed to get Virtual Container Host configuration") log.Error(err) return errors.New("Debug failed") } executor.InitDiagnosticLogs(vchConfig) installerVer := version.GetBuild() log.Info("") log.Infof("Installer version: %s", installerVer.ShortVersion()) log.Infof("VCH version: %s", vchConfig.Version.ShortVersion()) // load the key file if set var key []byte if d.authorizedKey != "" { key, err = ioutil.ReadFile(d.authorizedKey) if err != nil { log.Errorf("Unable to read public key from %s: %s", d.authorizedKey, err) return errors.New("unable to load public key") } } if err = executor.DebugVCH(vch, vchConfig, d.password, string(key)); err != nil { executor.CollectDiagnosticLogs() log.Errorf("%s", err) return errors.New("Debug failed") } // display the VCH endpoints again for convenience if err = executor.InspectVCH(vch, vchConfig); err != nil { executor.CollectDiagnosticLogs() log.Errorf("%s", err) return errors.New("inspect failed") } log.Infof("Completed successfully") return nil }
func (d *Uninstall) Run(cli *cli.Context) (err error) { if err = d.processParams(); err != nil { return err } if d.Debug.Debug > 0 { log.SetLevel(log.DebugLevel) trace.Logger.Level = log.DebugLevel } if len(cli.Args()) > 0 { log.Errorf("Unknown argument: %s", cli.Args()[0]) return errors.New("invalid CLI arguments") } log.Infof("### Removing VCH ####") ctx, cancel := context.WithTimeout(context.Background(), d.Timeout) defer cancel() defer func() { if ctx.Err() != nil && ctx.Err() == context.DeadlineExceeded { //context deadline exceeded, replace returned error message err = errors.Errorf("Delete timed out: use --timeout to add more time") } }() validator, err := validate.NewValidator(ctx, d.Data) if err != nil { log.Errorf("Delete cannot continue - failed to create validator: %s", err) return errors.New("delete failed") } executor := management.NewDispatcher(validator.Context, validator.Session, nil, d.Force) var vch *vm.VirtualMachine if d.Data.ID != "" { vch, err = executor.NewVCHFromID(d.Data.ID) } else { vch, err = executor.NewVCHFromComputePath(d.Data.ComputeResourcePath, d.Data.DisplayName, validator) } if err != nil { log.Errorf("Failed to get Virtual Container Host %s", d.DisplayName) log.Error(err) return errors.New("delete failed") } log.Infof("") log.Infof("VCH ID: %s", vch.Reference().String()) vchConfig, err := executor.GetVCHConfig(vch) if err != nil { log.Error("Failed to get Virtual Container Host configuration") log.Error(err) return errors.New("delete failed") } executor.InitDiagnosticLogs(vchConfig) if err = executor.DeleteVCH(vchConfig); err != nil { executor.CollectDiagnosticLogs() log.Errorf("%s", err) return errors.New("delete failed") } log.Infof("Completed successfully") return nil }
func (i *Inspect) Run(cli *cli.Context) error { var err error if err = i.processParams(); err != nil { return err } if i.Debug.Debug > 0 { log.SetLevel(log.DebugLevel) trace.Logger.Level = log.DebugLevel } if len(cli.Args()) > 0 { log.Errorf("Unknown argument: %s", cli.Args()[0]) return errors.New("invalid CLI arguments") } log.Infof("### Inspecting VCH ####") ctx, cancel := context.WithTimeout(context.Background(), i.Timeout) defer cancel() validator, err := validate.NewValidator(ctx, i.Data) if err != nil { log.Errorf("Inspect cannot continue - failed to create validator: %s", err) return errors.New("inspect failed") } executor := management.NewDispatcher(validator.Context, validator.Session, nil, i.Force) var vch *vm.VirtualMachine if i.Data.ID != "" { vch, err = executor.NewVCHFromID(i.Data.ID) } else { vch, err = executor.NewVCHFromComputePath(i.Data.ComputeResourcePath, i.Data.DisplayName, validator) } if err != nil { log.Errorf("Failed to get Virtual Container Host %s", i.DisplayName) log.Error(err) return errors.New("inspect failed") } log.Infof("") log.Infof("VCH ID: %s", vch.Reference().String()) vchConfig, err := executor.GetVCHConfig(vch) if err != nil { log.Error("Failed to get Virtual Container Host configuration") log.Error(err) return errors.New("inspect failed") } executor.InitDiagnosticLogs(vchConfig) if err = executor.InspectVCH(vch, vchConfig); err != nil { executor.CollectDiagnosticLogs() log.Errorf("%s", err) return errors.New("inspect failed") } log.Infof("Completed successfully") return nil }
func (d *Dispatcher) deleteVM(vm *vm.VirtualMachine, force bool) error { defer trace.End(trace.Begin(fmt.Sprintf("vm %q, force %t", vm.String(), force))) var err error power, err := vm.PowerState(d.ctx) if err != nil || power != types.VirtualMachinePowerStatePoweredOff { if err != nil { log.Warnf("Failed to get vm power status %q: %s", vm.Reference(), err) } if !force { if err != nil { return err } name := d.getName(vm) if name != "" { err = errors.Errorf("VM %q is powered on", name) } else { err = errors.Errorf("VM %q is powered on", vm.Reference()) } return err } if _, err = vm.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) { return vm.PowerOff(ctx) }); err != nil { log.Debugf("Failed to power off existing appliance for %s, try to remove anyway", err) } } // get the actual folder name before we delete it folder, err := vm.FolderName(d.ctx) if err != nil { // failed to get folder name, might not be able to remove files for this VM name := d.getName(vm) if name == "" { log.Errorf("Unable to automatically remove all files in datastore for VM %q", vm.Reference()) } else { // try to use the vm name in place of folder log.Infof("Delete will attempt to remove datastore files for VM %q", name) folder = name } } _, err = vm.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) { return vm.DeleteExceptDisks(ctx) }) if err != nil { err = errors.Errorf("Failed to destroy VM %q: %s", vm.Reference(), err) err2 := vm.Unregister(d.ctx) if err2 != nil { return errors.Errorf("%s then failed to unregister VM: %s", err, err2) } log.Infof("Unregistered VM to cleanup after failed destroy: %q", vm.Reference()) } if _, err = d.deleteDatastoreFiles(d.session.Datastore, folder, true); err != nil { log.Warnf("Failed to remove datastore files for VM path %q: %s", folder, err) } return nil }
func (d *Dispatcher) enableSSH(ctx context.Context, vch *vm.VirtualMachine, password, authorizedKey string) error { op, err := trace.FromContext(ctx) if err != nil { op = trace.NewOperation(ctx, "enable ssh in appliance") } state, err := vch.PowerState(op) if err != nil { log.Errorf("Failed to get appliance power state, service might not be available at this moment.") } if state != types.VirtualMachinePowerStatePoweredOn { err = errors.Errorf("VCH appliance is not powered on, state %s", state) op.Errorf("%s", err) return err } running, err := vch.IsToolsRunning(op) if err != nil || !running { err = errors.New("Tools is not running in the appliance, unable to continue") op.Errorf("%s", err) return err } manager := guest.NewOperationsManager(d.session.Client.Client, vch.Reference()) processManager, err := manager.ProcessManager(op) if err != nil { err = errors.Errorf("Unable to manage processes in appliance VM: %s", err) op.Errorf("%s", err) return err } auth := types.NamePasswordAuthentication{} spec := types.GuestProgramSpec{ ProgramPath: "enable-ssh", Arguments: string(authorizedKey), WorkingDirectory: "/", EnvVariables: []string{}, } _, err = processManager.StartProgram(op, &auth, &spec) if err != nil { err = errors.Errorf("Unable to enable SSH in appliance VM: %s", err) op.Errorf("%s", err) return err } if password == "" { return nil } // set the password as well spec = types.GuestProgramSpec{ ProgramPath: "passwd", Arguments: password, WorkingDirectory: "/", EnvVariables: []string{}, } _, err = processManager.StartProgram(op, &auth, &spec) if err != nil { err = errors.Errorf("Unable to enable in appliance VM: %s", err) op.Errorf("%s", err) return err } return nil }
func (u *Upgrade) Run(cli *cli.Context) error { var err error if err = u.processParams(); err != nil { return err } if u.Debug.Debug > 0 { log.SetLevel(log.DebugLevel) trace.Logger.Level = log.DebugLevel } if len(cli.Args()) > 0 { log.Errorf("Unknown argument: %s", cli.Args()[0]) return errors.New("invalid CLI arguments") } var images map[string]string if images, err = u.CheckImagesFiles(u.Force); err != nil { return err } log.Infof("### Upgrading VCH ####") ctx, cancel := context.WithTimeout(context.Background(), u.Timeout) defer cancel() validator, err := validate.NewValidator(ctx, u.Data) if err != nil { log.Errorf("Upgrade cannot continue - failed to create validator: %s", err) return errors.New("upgrade failed") } executor := management.NewDispatcher(validator.Context, validator.Session, nil, u.Force) var vch *vm.VirtualMachine if u.Data.ID != "" { vch, err = executor.NewVCHFromID(u.Data.ID) } else { vch, err = executor.NewVCHFromComputePath(u.Data.ComputeResourcePath, u.Data.DisplayName, validator) } if err != nil { log.Errorf("Failed to get Virtual Container Host %s", u.DisplayName) log.Error(err) return errors.New("upgrade failed") } log.Infof("") log.Infof("VCH ID: %s", vch.Reference().String()) vchConfig, err := executor.GetVCHConfig(vch) if err != nil { log.Error("Failed to get Virtual Container Host configuration") log.Error(err) return errors.New("upgrade failed") } executor.InitDiagnosticLogs(vchConfig) vConfig := validator.AddDeprecatedFields(ctx, vchConfig, u.Data) vConfig.ImageFiles = images vConfig.ApplianceISO = path.Base(u.ApplianceISO) vConfig.BootstrapISO = path.Base(u.BootstrapISO) vConfig.RollbackTimeout = u.Timeout if vchConfig, err = validator.MigrateConfig(ctx, vchConfig); err != nil { log.Errorf("Failed to migrate Virtual Container Host configuration %s", u.DisplayName) log.Error(err) return errors.New("upgrade failed") } if err = executor.Upgrade(vch, vchConfig, vConfig); err != nil { // upgrade failed executor.CollectDiagnosticLogs() if err == nil { err = errors.New("upgrade failed") } return err } // check the docker endpoint is responsive if err = executor.CheckDockerAPI(vchConfig, nil); err != nil { executor.CollectDiagnosticLogs() return err } log.Infof("Completed successfully") return nil }
// ensures that a paravirtual scsi controller is present and determines the // base path of disks attached to it returns a handle to the controller and a // format string, with a single decimal for the disk unit number which will // result in the /dev/disk/by-path path func verifyParavirtualScsiController(op trace.Operation, vm *vm.VirtualMachine) (*types.ParaVirtualSCSIController, string, error) { devices, err := vm.Device(op) if err != nil { log.Errorf("vmware driver failed to retrieve device list for VM %s: %s", vm, errors.ErrorStack(err)) return nil, "", errors.Trace(err) } controller, ok := devices.PickController((*types.ParaVirtualSCSIController)(nil)).(*types.ParaVirtualSCSIController) if controller == nil || !ok { err = errors.Errorf("vmware driver failed to find a paravirtual SCSI controller - ensure setup ran correctly") log.Error(err.Error()) return nil, "", errors.Trace(err) } // build the base path // first we determine which label we're looking for (requires VMW hardware version >=10) targetLabel := fmt.Sprintf("SCSI%d", controller.BusNumber) log.Debugf("Looking for scsi controller with label %s", targetLabel) pciBase := "/sys/bus/pci/devices" pciBus, err := os.Open(pciBase) if err != nil { log.Errorf("Failed to open %s for reading: %s", pciBase, errors.ErrorStack(err)) return controller, "", errors.Trace(err) } defer pciBus.Close() pciDevices, err := pciBus.Readdirnames(0) if err != nil { log.Errorf("Failed to read contents of %s: %s", pciBase, errors.ErrorStack(err)) return controller, "", errors.Trace(err) } var buf = make([]byte, len(targetLabel)) var controllerName string for _, n := range pciDevices { nlabel := fmt.Sprintf("%s/%s/label", pciBase, n) flabel, err := os.Open(nlabel) if err != nil { if !os.IsNotExist(err) { log.Errorf("Unable to read label from %s: %s", nlabel, errors.ErrorStack(err)) } continue } defer flabel.Close() _, err = flabel.Read(buf) if err != nil { log.Errorf("Unable to read label from %s: %s", nlabel, errors.ErrorStack(err)) continue } if targetLabel == string(buf) { // we've found our controller controllerName = n log.Debugf("Found pvscsi controller directory: %s", controllerName) break } } if controllerName == "" { err := errors.Errorf("Failed to locate pvscsi controller directory") log.Errorf(err.Error()) return controller, "", errors.Trace(err) } formatString := fmt.Sprintf("/dev/disk/by-path/pci-%s-scsi-0:0:%%d:0", controllerName) log.Debugf("Disk location format: %s", formatString) return controller, formatString, nil }