func (v *Validator) target(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) targetURL := input.Target.URLWithoutPassword() if !v.IsVC() { var err error targetURL, err = url.Parse(v.Session.Service) if err != nil { v.NoteIssue(fmt.Errorf("Error processing target after transformation to SOAP endpoint: %q: %s", v.Session.Service, err)) return } // ESXi requires user/password to be encoded in the Target URL // However, this gets lost when the URL is Marshaled conf.UserPassword = targetURL.User.String() } // check if host is managed by VC v.managedbyVC(ctx) conf.Target = *targetURL conf.TargetThumbprint = input.Thumbprint // TODO: more checks needed here if specifying service account for VCH }
func (v *Validator) addNetworkHelper(ctx context.Context, conf *config.VirtualContainerHostConfigSpec, netName, epName, contNetName string, def bool) error { defer trace.End(trace.Begin("")) moid, err := v.networkHelper(ctx, netName) if err != nil { return err } if epName != "" { conf.AddNetwork(&executor.NetworkEndpoint{ Common: executor.Common{ Name: epName, }, Network: executor.ContainerNetwork{ Common: executor.Common{ Name: contNetName, ID: moid, }, Default: def, }, }) } else { conf.AddNetwork(&executor.NetworkEndpoint{ Network: executor.ContainerNetwork{ Common: executor.Common{ Name: contNetName, ID: moid, }, }, }) } return nil }
func createAppliance(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData, hasErr bool, t *testing.T) { var err error d := &Dispatcher{ session: sess, ctx: ctx, isVC: sess.IsVC(), force: false, } delete(conf.Networks, "bridge") // FIXME: cannot create bridge network in simulator if d.isVC { if d.vchVapp, err = d.createVApp(conf, vConf); err != nil { // FIXME: Got error: ServerFaultCode: ResourcePool:resourcepool-14 does not implement: CreateVApp. Simulator need to implement CreateVApp // t.Errorf("Unable to create virtual app: %s", err) } } if d.vchPool, err = d.createResourcePool(conf, vConf); err != nil { t.Errorf("Unable to create resource pool: %s", err) } spec, err := d.createApplianceSpec(conf, vConf) if err != nil { t.Errorf("Unable to create appliance spec: %s", err) return } // create appliance VM info, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) { return d.session.Folders(ctx).VmFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host) }) // get VM reference and save it moref := info.Result.(types.ManagedObjectReference) conf.SetMoref(&moref) obj, err := d.session.Finder.ObjectReference(d.ctx, moref) if err != nil { t.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err) return } gvm, ok := obj.(*object.VirtualMachine) if !ok { t.Errorf("Required reference after appliance creation was not for a VM: %T", obj) return } vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm) uuid, err := vm2.UUID(d.ctx) if err != nil { t.Errorf("Failed to get VM UUID: %s", err) return } t.Logf("uuid: %s", uuid) // leverage create volume method to create image datastore conf.VolumeLocations["images-store"], _ = url.Parse(fmt.Sprintf("ds://LocalDS_0/VIC/%s/images", uuid)) if err := d.createVolumeStores(conf); err != nil { t.Errorf("Unable to create volume stores: %s", err) return } }
func (v *Validator) target(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) targetURL := input.Target.URLWithoutPassword() if !v.IsVC() { var err error targetURL, err = url.Parse(v.Session.Service) if err != nil { v.NoteIssue(fmt.Errorf("Error processing target after transformation to SOAP endpoint: %q: %s", v.Session.Service, err)) return } // ESXi requires user/password to be encoded in the Target URL // However, this gets lost when the URL is Marshaled conf.UserPassword = targetURL.User.String() } // bridge network params var err error _, conf.BridgeIPRange, err = net.ParseCIDR(input.BridgeIPRange) if err != nil { v.NoteIssue(fmt.Errorf("Error parsing bridge network ip range: %s. Range must be in CIDR format, e.g., 172.16.0.0/12", err)) } conf.Target = *targetURL conf.Insecure = input.Insecure // TODO: more checks needed here if specifying service account for VCH }
func (v *Validator) storage(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) // Image Store imageDSpath, ds, err := v.DatastoreHelper(ctx, input.ImageDatastorePath, "", "--image-store") if imageDSpath == nil { v.NoteIssue(err) return } // provide a default path if only a DS name is provided if imageDSpath.Path == "" { imageDSpath.Path = input.DisplayName } v.NoteIssue(err) if ds != nil { v.SetDatastore(ds, imageDSpath) conf.AddImageStore(imageDSpath) } if conf.VolumeLocations == nil { conf.VolumeLocations = make(map[string]*url.URL) } // TODO: add volume locations for label, volDSpath := range input.VolumeLocations { dsURL, _, err := v.DatastoreHelper(ctx, volDSpath, label, "--volume-store") v.NoteIssue(err) if dsURL != nil { conf.VolumeLocations[label] = dsURL } } }
func (v *Validator) basics(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) // TODO: ensure that displayname doesn't violate constraints (length, characters, etc) conf.SetName(input.DisplayName) conf.SetDebug(input.Debug.Debug) conf.Name = input.DisplayName }
func (d *Dispatcher) CreateVCH(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) error { defer trace.End(trace.Begin(conf.Name)) var err error if err = d.checkExistence(conf, settings); err != nil { return err } if d.isVC && !settings.UseRP { if d.vchVapp, err = d.createVApp(conf, settings); err != nil { detail := fmt.Sprintf("Creating virtual app failed: %s", err) if !d.force { return errors.New(detail) } log.Error(detail) log.Errorf("Deploying vch under parent pool %q, (--force=true)", settings.ResourcePoolPath) d.vchPool = d.session.Pool conf.ComputeResources = append(conf.ComputeResources, d.vchPool.Reference()) } } else { if d.vchPool, err = d.createResourcePool(conf, settings); err != nil { detail := fmt.Sprintf("Creating resource pool failed: %s", err) if !d.force { return errors.New(detail) } log.Error(detail) log.Errorf("Deploying vch under parent pool %q, (--force=true)", settings.ResourcePoolPath) d.vchPool = d.session.Pool conf.ComputeResources = append(conf.ComputeResources, d.vchPool.Reference()) } } if err = d.createBridgeNetwork(conf); err != nil { return err } if err = d.createVolumeStores(conf); err != nil { return errors.Errorf("Exiting because we could not create volume stores due to error: %s", err) } if err = d.createAppliance(conf, settings); err != nil { return errors.Errorf("Creating the appliance failed with %s. Exiting...", err) } if err = d.uploadImages(settings.ImageFiles); err != nil { return errors.Errorf("Uploading images failed with %s. Exiting...", err) } if d.session.IsVC() { if err = d.RegisterExtension(conf, settings.Extension); err != nil { return errors.Errorf("Error registering VCH vSphere extension: %s", err) } } return d.startAppliance(conf) }
func (d *Dispatcher) createBridgeNetwork(conf *config.VirtualContainerHostConfigSpec) error { defer trace.End(trace.Begin("")) // if the bridge network is already extant there's nothing to do bnet := conf.ExecutorConfig.Networks[conf.BridgeNetwork] if bnet != nil && bnet.ID != "" { return nil } // network didn't exist during validation given we don't have a moref, so create it if d.session.Client.IsVC() { // double check return errors.New("bridge network must already exist for vCenter environments") } // in this case the name to use is held in container network ID name := bnet.Network.ID log.Infof("Creating VirtualSwitch") hostNetSystem, err := d.session.Host.ConfigManager().NetworkSystem(d.ctx) if err != nil { err = errors.Errorf("Failed to retrieve host network system: %s", err) return err } if err = hostNetSystem.AddVirtualSwitch(d.ctx, name, &types.HostVirtualSwitchSpec{ NumPorts: 1024, }); err != nil { err = errors.Errorf("Failed to add virtual switch (%q): %s", name, err) return err } log.Infof("Creating Portgroup") if err = hostNetSystem.AddPortGroup(d.ctx, types.HostPortGroupSpec{ Name: name, VlanId: 1, // TODO: expose this for finer grained grouping within the switch VswitchName: name, Policy: types.HostNetworkPolicy{}, }); err != nil { err = errors.Errorf("Failed to add port group (%q): %s", name, err) return err } net, err := d.session.Finder.Network(d.ctx, name) if err != nil { _, ok := err.(*find.NotFoundError) if !ok { err = errors.Errorf("Failed to query virtual switch (%q): %s", name, err) return err } } // assign the moref to the bridge network config on the appliance bnet.ID = net.Reference().String() bnet.Network.ID = net.Reference().String() conf.CreateBridgeNetwork = true return nil }
// retrieves the uuid of the appliance vm to create a unique vsphere extension name func (d *Dispatcher) GenerateExtensionName(conf *config.VirtualContainerHostConfigSpec, vm *vm.VirtualMachine) error { defer trace.End(trace.Begin(conf.ExtensionName)) var o mo.VirtualMachine err := vm.Properties(d.ctx, vm.Reference(), []string{"config.uuid"}, &o) if err != nil { return errors.Errorf("Could not get VM UUID from appliance VM due to error: %s", err) } conf.ExtensionName = "com.vmware.vic." + o.Config.Uuid return nil }
func (v *Validator) certificate(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) if len(input.CertPEM) == 0 && len(input.KeyPEM) == 0 { // if there's no data supplied then we're configuring without TLS log.Debug("Configuring without TLS due to empty key and cert buffers") return } // check the cert can be loaded _, err := tls.X509KeyPair(input.CertPEM, input.KeyPEM) v.NoteIssue(err) conf.HostCertificate = &config.RawCertificate{ Key: input.KeyPEM, Cert: input.CertPEM, } }
func (v *Validator) basics(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) // TODO: ensure that displayname doesn't violate constraints (length, characters, etc) conf.SetName(input.DisplayName) conf.SetDebug(input.Debug.Debug) conf.Name = input.DisplayName conf.Version = version.GetBuild() scratchSize, err := units.FromHumanSize(input.ScratchSize) if err != nil { // TODO set minimum size of scratch disk v.NoteIssue(errors.Errorf("Invalid default image size %s provided; error from parser: %s", input.ScratchSize, err.Error())) } else { conf.ScratchSize = scratchSize / units.KB log.Debugf("Setting scratch image size to %d KB in VCHConfig", conf.ScratchSize) } }
func (v *Validator) certificateAuthorities(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) if len(input.ClientCAs) == 0 { // if there's no data supplied then we're configuring without client verification log.Debug("Configuring without client verification due to empty certificate authorities") return } // ensure TLS is configurable if len(input.CertPEM) == 0 { v.NoteIssue(errors.New("Certificate authority specified, but no TLS certificate provided")) return } // check a CA can be loaded pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(input.ClientCAs) { v.NoteIssue(errors.New("Unable to load certificate authority data")) return } conf.CertificateAuthorities = input.ClientCAs }
// Upgrade will try to upgrade vch appliance to new version. If failed will try to roll back to original status. func (d *Dispatcher) Upgrade(vch *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) (err error) { defer trace.End(trace.Begin(conf.Name)) d.appliance = vch // update the displayname to the actual folder name used if d.vmPathName, err = d.appliance.FolderName(d.ctx); err != nil { log.Errorf("Failed to get canonical name for appliance: %s", err) return err } ds, err := d.session.Finder.Datastore(d.ctx, conf.ImageStores[0].Host) if err != nil { err = errors.Errorf("Failed to find image datastore %q", conf.ImageStores[0].Host) return err } d.session.Datastore = ds if !conf.HostCertificate.IsNil() { d.VICAdminProto = "https" d.DockerPort = fmt.Sprintf("%d", opts.DefaultTLSHTTPPort) } else { d.VICAdminProto = "http" d.DockerPort = fmt.Sprintf("%d", opts.DefaultHTTPPort) } if err = d.uploadImages(settings.ImageFiles); err != nil { return errors.Errorf("Uploading images failed with %s. Exiting...", err) } conf.BootstrapImagePath = fmt.Sprintf("[%s] %s/%s", conf.ImageStores[0].Host, d.vmPathName, settings.BootstrapISO) // ensure that we wait for components to come up for _, s := range conf.ExecutorConfig.Sessions { s.Started = "" } snapshotName := fmt.Sprintf("%s %s", UpgradePrefix, conf.Version.BuildNumber) snapshotName = strings.TrimSpace(snapshotName) snapshotRefID, err := d.createSnapshot(snapshotName, "upgrade snapshot") if err != nil { d.deleteUpgradeImages(ds, settings) return err } defer func() { if err == nil { // do clean up aggressively, even the previous operation failed with context deadline excceeded. d.deleteSnapshot(*snapshotRefID, snapshotName, conf.Name) } }() if err = d.update(conf, settings); err == nil { return nil } log.Errorf("Failed to upgrade: %s", err) log.Infof("Rolling back upgrade") // reset timeout, to make sure rollback still happens in case of deadline exceeded error in previous step var cancel context.CancelFunc d.ctx, cancel = context.WithTimeout(context.Background(), settings.RollbackTimeout) defer cancel() if rerr := d.rollback(conf, snapshotName); rerr != nil { log.Errorf("Failed to revert appliance to snapshot: %s", rerr) // return the error message for upgrade, instead of rollback return err } d.deleteUpgradeImages(ds, settings) log.Infof("Appliance is rollback to old version") return err }
func (v *Validator) network(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) { defer trace.End(trace.Begin("")) // External net // external network is default for appliance err := v.addNetworkHelper(ctx, conf, input.ExternalNetworkName, "external", "external", true) if err != nil { v.NoteIssue(fmt.Errorf("Error checking network for --external-network: %s", err)) v.suggestNetwork("--external-network", true) } // Bridge network should be different than all other networks v.checkNetworkConflict(input.BridgeNetworkName, input.ExternalNetworkName, "external") // Client net if input.ClientNetworkName == "" { input.ClientNetworkName = input.ExternalNetworkName } err = v.addNetworkHelper(ctx, conf, input.ClientNetworkName, "client", "client", false) if err != nil { v.NoteIssue(fmt.Errorf("Error checking network for --client-network: %s", err)) v.suggestNetwork("--client-network", true) } v.checkNetworkConflict(input.BridgeNetworkName, input.ClientNetworkName, "client") // Management net if input.ManagementNetworkName == "" { input.ManagementNetworkName = input.ClientNetworkName } err = v.addNetworkHelper(ctx, conf, input.ManagementNetworkName, "", "management", false) if err != nil { v.NoteIssue(fmt.Errorf("Error checking network for --management-network: %s", err)) v.suggestNetwork("--management-network", true) } v.checkNetworkConflict(input.BridgeNetworkName, input.ManagementNetworkName, "management") // Bridge net - // vCenter: must exist and must be a DPG // ESX: doesn't need to exist - we will create with default value // // for now we're hardcoded to "bridge" for the container host name conf.BridgeNetwork = "bridge" endpointMoref, err := v.dpgHelper(ctx, input.BridgeNetworkName) var bridgeID, netMoid string if err != nil { bridgeID = "" netMoid = "" } else { bridgeID = endpointMoref.String() netMoid = endpointMoref.String() } checkBridgeVDS := true if err != nil { if _, ok := err.(*find.NotFoundError); !ok || v.IsVC() { v.NoteIssue(fmt.Errorf("An existing distributed port group must be specified for bridge network on vCenter: %s", err)) v.suggestNetwork("--bridge-network", false) checkBridgeVDS = false // prevent duplicate error output } // this allows the dispatcher to create the network with corresponding name // if BridgeNetworkName doesn't already exist then we set the ContainerNetwork // ID to the name, but leaving the NetworkEndpoint moref as "" netMoid = input.BridgeNetworkName } bridgeNet := &executor.NetworkEndpoint{ Common: executor.Common{ Name: "bridge", ID: bridgeID, }, Static: &net.IPNet{IP: net.IPv4zero}, // static but managed externally Network: executor.ContainerNetwork{ Common: executor.Common{ Name: "bridge", ID: netMoid, }, }, } // we need to have the bridge network identified as an available container network conf.AddContainerNetwork(&bridgeNet.Network) // we also need to have the appliance attached to the bridge network to allow // port forwarding conf.AddNetwork(bridgeNet) err = v.checkVDSMembership(ctx, endpointMoref, input.BridgeNetworkName) if err != nil && checkBridgeVDS { v.NoteIssue(fmt.Errorf("Unable to check hosts in vDS for %q: %s", input.BridgeNetworkName, err)) } // add mapped networks (from --container-network) // these should be a distributed port groups in vCenter suggestedMapped := false // only suggest mapped nets once for name, net := range input.MappedNetworks { checkMappedVDS := true // "bridge" is reserved if name == "bridge" { v.NoteIssue(fmt.Errorf("Cannot use reserved name \"bridge\" for container network")) continue } gw := input.MappedNetworksGateways[name] pools := input.MappedNetworksIPRanges[name] dns := input.MappedNetworksDNS[name] if len(pools) != 0 && ip.IsUnspecifiedSubnet(&gw) { v.NoteIssue(fmt.Errorf("IP range specified without gateway for container network %q", name)) continue } if !ip.IsUnspecifiedSubnet(&gw) && !ip.IsRoutableIP(gw.IP, &gw) { v.NoteIssue(fmt.Errorf("Gateway %s is not a routable address", gw.IP)) continue } err = nil // verify ip ranges are within subnet, // and don't overlap with each other for i, r := range pools { if !gw.Contains(r.FirstIP) || !gw.Contains(r.LastIP) { err = fmt.Errorf("IP range %q is not in subnet %q", r, gw) break } for _, r2 := range pools[i+1:] { if r2.Overlaps(r) { err = fmt.Errorf("Overlapping ip ranges: %q %q", r2, r) break } } if err != nil { break } } if err != nil { v.NoteIssue(err) continue } moref, err := v.dpgHelper(ctx, net) if err != nil { v.NoteIssue(fmt.Errorf("Error adding container network %q: %s", name, err)) checkMappedVDS = false if !suggestedMapped { v.suggestNetwork("--container-network", true) suggestedMapped = true } } mappedNet := &executor.ContainerNetwork{ Common: executor.Common{ Name: name, ID: moref.String(), }, Gateway: gw, Nameservers: dns, Pools: pools, } if input.BridgeNetworkName == net { v.NoteIssue(errors.Errorf("the bridge network must not be shared with another network role - %q also mapped as container network %q", input.BridgeNetworkName, name)) } err = v.checkVDSMembership(ctx, moref, net) if err != nil && checkMappedVDS { v.NoteIssue(fmt.Errorf("Unable to check hosts in vDS for %q: %s", net, err)) } conf.AddContainerNetwork(mappedNet) } }
func (d *Dispatcher) createVApp(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) (*object.VirtualApp, error) { defer trace.End(trace.Begin("")) var err error log.Infof("Creating virtual app %q", conf.Name) resSpec := types.ResourceConfigSpec{ CpuAllocation: &types.ResourceAllocationInfo{ Shares: &types.SharesInfo{ Level: types.SharesLevelNormal, }, ExpandableReservation: types.NewBool(true), }, MemoryAllocation: &types.ResourceAllocationInfo{ Shares: &types.SharesInfo{ Level: types.SharesLevelNormal, }, ExpandableReservation: types.NewBool(true), }, } cpu := resSpec.CpuAllocation.GetResourceAllocationInfo() cpu.Limit = -1 if settings.VCHSize.CPU.Limit != 0 { cpu.Limit = settings.VCHSize.CPU.Limit } // FIXME: govmomi omitempty cpu.Reservation = 1 if settings.VCHSize.CPU.Reservation != 0 { cpu.Reservation = settings.VCHSize.CPU.Reservation } if settings.VCHSize.CPU.Shares != nil { cpu.Shares = settings.VCHSize.CPU.Shares } memory := resSpec.MemoryAllocation.GetResourceAllocationInfo() memory.Limit = -1 if settings.VCHSize.Memory.Limit != 0 { memory.Limit = settings.VCHSize.Memory.Limit } // FIXME: govmomi omitempty memory.Reservation = 1 if settings.VCHSize.Memory.Reservation != 0 { memory.Reservation = settings.VCHSize.Memory.Reservation } if settings.VCHSize.Memory.Shares != nil { memory.Shares = settings.VCHSize.Memory.Shares } prodSpec := types.VAppProductSpec{ Info: &types.VAppProductInfo{ Name: "vSphere Integrated Containers", Vendor: "VMware", VendorUrl: "http://www.vmware.com/", Version: version.Version, }, ArrayUpdateSpec: types.ArrayUpdateSpec{ Operation: types.ArrayUpdateOperationAdd, }, } configSpec := types.VAppConfigSpec{ Annotation: "vSphere Integrated Containers", VmConfigSpec: types.VmConfigSpec{ Product: []types.VAppProductSpec{prodSpec}, }, } app, err := d.session.Pool.CreateVApp(d.ctx, conf.Name, resSpec, configSpec, d.session.Folders(d.ctx).VmFolder) if err != nil { log.Debugf("Failed to create virtual app %q: %s", conf.Name, err) return nil, err } conf.ComputeResources = append(conf.ComputeResources, app.Reference()) return app, nil }
func (v *Validator) migrateData(ctx context.Context, conf *config.VirtualContainerHostConfigSpec) (*config.VirtualContainerHostConfigSpec, error) { conf.Version = version.GetBuild() return conf, nil }
func (d *Dispatcher) createResourcePool(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) (*object.ResourcePool, error) { defer trace.End(trace.Begin("")) d.vchPoolPath = path.Join(settings.ResourcePoolPath, conf.Name) rp, err := d.session.Finder.ResourcePool(d.ctx, d.vchPoolPath) if err != nil { _, ok := err.(*find.NotFoundError) if !ok { err = errors.Errorf("Failed to query compute resource (%q): %q", d.vchPoolPath, err) return nil, err } } else { conf.ComputeResources = append(conf.ComputeResources, rp.Reference()) return rp, nil } log.Infof("Creating Resource Pool %q", conf.Name) // TODO: expose the limits and reservation here via options resSpec := types.ResourceConfigSpec{ CpuAllocation: &types.ResourceAllocationInfo{ Shares: &types.SharesInfo{ Level: types.SharesLevelNormal, }, ExpandableReservation: types.NewBool(true), }, MemoryAllocation: &types.ResourceAllocationInfo{ Shares: &types.SharesInfo{ Level: types.SharesLevelNormal, }, ExpandableReservation: types.NewBool(true), }, } cpu := resSpec.CpuAllocation.GetResourceAllocationInfo() cpu.Limit = -1 if settings.VCHSize.CPU.Limit != 0 { cpu.Limit = settings.VCHSize.CPU.Limit } // FIXME: govmomi omitempty cpu.Reservation = 1 if settings.VCHSize.CPU.Reservation != 0 { cpu.Reservation = settings.VCHSize.CPU.Reservation } if settings.VCHSize.CPU.Shares != nil { cpu.Shares = settings.VCHSize.CPU.Shares } memory := resSpec.MemoryAllocation.GetResourceAllocationInfo() memory.Limit = -1 if settings.VCHSize.Memory.Limit != 0 { memory.Limit = settings.VCHSize.Memory.Limit } // FIXME: govmomi omitempty memory.Reservation = 1 if settings.VCHSize.Memory.Reservation != 0 { memory.Reservation = settings.VCHSize.Memory.Reservation } if settings.VCHSize.Memory.Shares != nil { memory.Shares = settings.VCHSize.Memory.Shares } rp, err = d.session.Pool.Create(d.ctx, conf.Name, resSpec) if err != nil { log.Debugf("Failed to create resource pool %q: %s", d.vchPoolPath, err) return nil, err } conf.ComputeResources = append(conf.ComputeResources, rp.Reference()) return rp, nil }
func (d *Dispatcher) createAppliance(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) error { defer trace.End(trace.Begin("")) log.Infof("Creating appliance on target") spec, err := d.createApplianceSpec(conf, settings) if err != nil { log.Errorf("Unable to create appliance spec: %s", err) return err } var info *types.TaskInfo // create appliance VM if d.isVC && d.vchVapp != nil { info, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) { return d.vchVapp.CreateChildVM_Task(ctx, *spec, d.session.Host) }) } else { // if vapp is not created, fall back to create VM under default resource pool info, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) { return d.session.Folders(ctx).VmFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host) }) } if err != nil { log.Errorf("Unable to create appliance VM: %s", err) return err } if info.Error != nil || info.State != types.TaskInfoStateSuccess { log.Errorf("Create appliance reported: %s", info.Error.LocalizedMessage) } // get VM reference and save it moref := info.Result.(types.ManagedObjectReference) conf.SetMoref(&moref) obj, err := d.session.Finder.ObjectReference(d.ctx, moref) if err != nil { log.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err) return err } gvm, ok := obj.(*object.VirtualMachine) if !ok { return fmt.Errorf("Required reference after appliance creation was not for a VM: %T", obj) } vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm) // update the displayname to the actual folder name used if d.vmPathName, err = vm2.FolderName(d.ctx); err != nil { log.Errorf("Failed to get canonical name for appliance: %s", err) return err } log.Debugf("vm folder name: %q", d.vmPathName) log.Debugf("vm inventory path: %q", vm2.InventoryPath) // create an extension to register the appliance as if err = d.GenerateExtensionName(conf, vm2); err != nil { return errors.Errorf("Could not generate extension name during appliance creation due to error: %s", err) } settings.Extension = types.Extension{ Description: &types.Description{ Label: "VIC", Summary: "vSphere Integrated Containers Virtual Container Host", }, Company: "VMware, Inc.", Version: "0.0", Key: conf.ExtensionName, } conf.AddComponent("vicadmin", &executor.SessionConfig{ User: "******", Group: "vicadmin", Cmd: executor.Cmd{ Path: "/sbin/vicadmin", Args: []string{ "/sbin/vicadmin", "-docker-host=unix:///var/run/docker.sock", // FIXME: hack during config migration "-insecure", "-ds=" + conf.ImageStores[0].Host, "-cluster=" + settings.ClusterPath, "-pool=" + settings.ResourcePoolPath, "-vm-path=" + vm2.InventoryPath, }, Env: []string{ "PATH=/sbin:/bin", }, Dir: "/home/vicadmin", }, Restart: true, }, ) if conf.HostCertificate != nil { d.VICAdminProto = "https" d.DockerPort = fmt.Sprintf("%d", opts.DefaultTLSHTTPPort) } else { d.VICAdminProto = "http" d.DockerPort = fmt.Sprintf("%d", opts.DefaultHTTPPort) } conf.AddComponent("docker-personality", &executor.SessionConfig{ Cmd: executor.Cmd{ Path: "/sbin/docker-engine-server", Args: []string{ "/sbin/docker-engine-server", //FIXME: hack during config migration "-serveraddr=0.0.0.0", "-port=" + d.DockerPort, "-port-layer-port=8080", }, Env: []string{ "PATH=/sbin", }, }, Restart: true, }, ) conf.AddComponent("port-layer", &executor.SessionConfig{ Cmd: executor.Cmd{ Path: "/sbin/port-layer-server", Args: []string{ "/sbin/port-layer-server", //FIXME: hack during config migration "--host=localhost", "--port=8080", "--insecure", "--sdk=" + conf.Target.String(), "--datacenter=" + settings.DatacenterName, "--cluster=" + settings.ClusterPath, "--pool=" + settings.ResourcePoolPath, "--datastore=" + conf.ImageStores[0].Host, "--vch=" + conf.ExecutorConfig.Name, }, }, Restart: true, }, ) conf.BootstrapImagePath = fmt.Sprintf("[%s] %s/%s", conf.ImageStores[0].Host, d.vmPathName, settings.BootstrapISO) spec, err = d.reconfigureApplianceSpec(vm2, conf, settings) if err != nil { log.Errorf("Error while getting appliance reconfig spec: %s", err) return err } // reconfig info, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) { return vm2.Reconfigure(ctx, *spec) }) if err != nil { log.Errorf("Error while setting component parameters to appliance: %s", err) return err } if info.State != types.TaskInfoStateSuccess { log.Errorf("Setting parameters to appliance reported: %s", info.Error.LocalizedMessage) return err } d.appliance = vm2 return nil }
func testStorage(v *Validator, input *data.Data, conf *config.VirtualContainerHostConfigSpec, t *testing.T) { tests := []struct { image string volumes map[string]string hasErr bool expectImage string expectVolumes map[string]string }{ {"LocalDS_0", map[string]string{"volume1": "LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}, false, "ds://LocalDS_0/test001", map[string]string{"volume1": "ds://LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}}, {"LocalDS_0/images", map[string]string{"volume1": "LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}, false, "ds://LocalDS_0/images", map[string]string{"volume1": "ds://LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}}, {"ds://LocalDS_0/images", map[string]string{"volume1": "LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}, false, "ds://LocalDS_0/images", map[string]string{"volume1": "ds://LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}}, {"ds://LocalDS_0/images/xyz", map[string]string{"volume1": "LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}, false, "ds://LocalDS_0/images/xyz", map[string]string{"volume1": "ds://LocalDS_0/volumes/volume1", "volume2": "ds://LocalDS_0/volumes/volume2"}}, {"ds://😗", map[string]string{"volume1": "😗/volumes/volume1", "volume2": "ds://😗/volumes/volume2"}, true, "ds://😗/test001", nil}, {"ds://LocalDS_0", map[string]string{"volume1": "LocalDS_1/volumes/volume1", "volume2": "ds://LocalDS_1/volumes/volume2"}, true, "ds://LocalDS_0/test001", nil}, {"LocalDS_0", map[string]string{"volume1": "LocalDS_1/volumes/volume1", "volume2": "ds://LocalDS_1/volumes/volume2"}, true, "ds://LocalDS_0/test001", nil}, {"LocalDS_0", map[string]string{"volume1": "LocalDS_1/volumes/volume1", "volume2": "ds://LocalDS_1/volumes/volume2"}, true, "ds://LocalDS_0/test001", nil}, {"", map[string]string{"volume1": "", "volume2": "ds://"}, true, "", nil}, {"ds://", map[string]string{"volume1": "", "volume2": "ds://"}, true, "", nil}, } for _, test := range tests { t.Logf("%+v", test) input.ImageDatastorePath = test.image input.VolumeLocations = test.volumes v.storage(v.Context, input, conf) v.ListIssues() if !test.hasErr { assert.Equal(t, 0, len(v.issues)) assert.Equal(t, test.expectImage, conf.ImageStores[0].String()) conf.ImageStores = conf.ImageStores[1:] for key, volume := range conf.VolumeLocations { assert.Equal(t, test.expectVolumes[key], volume.String()) } } else { assert.True(t, len(v.issues) > 0, "Should have errors") } v.issues = nil } }