func (m *Manager) Attach(op trace.Operation, disk *types.VirtualDisk) error { deviceList := object.VirtualDeviceList{} deviceList = append(deviceList, disk) changeSpec, err := deviceList.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return err } machineSpec := types.VirtualMachineConfigSpec{} machineSpec.DeviceChange = append(machineSpec.DeviceChange, changeSpec...) m.reconfig.Lock() _, err = m.vm.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) { t, er := m.vm.Reconfigure(ctx, machineSpec) op.Debugf("Attach reconfigure task=%s", t.Reference()) return t, er }) m.reconfig.Unlock() if err != nil { op.Errorf("vmdk storage driver failed to attach disk: %s", errors.ErrorStack(err)) return errors.Trace(err) } return nil }
func (d *Dispatcher) reconfigureApplianceSpec(vm *vm.VirtualMachine, conf *metadata.VirtualContainerHostConfigSpec) (*types.VirtualMachineConfigSpec, error) { defer trace.End(trace.Begin("")) var devices object.VirtualDeviceList var err error spec := &types.VirtualMachineConfigSpec{ Name: conf.Name, GuestId: "other3xLinux64Guest", Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", conf.ImageStores[0].Host)}, } if devices, err = d.configIso(conf, vm); err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { log.Errorf("Failed to create config spec for appliance: %s", err) return nil, err } spec.DeviceChange = deviceChange cfg := make(map[string]string) extraconfig.Encode(extraconfig.MapSink(cfg), conf) spec.ExtraConfig = append(spec.ExtraConfig, extraconfig.OptionValueFromMap(cfg)...) return spec, nil }
func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { var devices object.VirtualDeviceList var err error spec := &types.VirtualMachineConfigSpec{ Name: cmd.name, GuestId: cmd.guestID, NumCPUs: int32(cmd.cpus), MemoryMB: int64(cmd.memory), } devices, err = cmd.addStorage(nil) if err != nil { return nil, err } devices, err = cmd.addNetwork(devices) if err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } spec.DeviceChange = deviceChange var datastore *object.Datastore // If storage pod is specified, collect placement recommendations if cmd.StoragePod != nil { datastore, err = cmd.recommendDatastore(ctx, spec) if err != nil { return nil, err } } else { datastore = cmd.Datastore } if !cmd.force { vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name) _, err := datastore.Stat(ctx, vmxPath) if err == nil { dsPath := cmd.Datastore.Path(vmxPath) return nil, fmt.Errorf("File %s already exists", dsPath) } } folder := cmd.Folder spec.Files = &types.VirtualMachineFileInfo{ VmPathName: fmt.Sprintf("[%s]", datastore.Name()), } return folder.CreateVM(ctx, *spec, cmd.ResourcePool, cmd.HostSystem) }
func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { var devices object.VirtualDeviceList var err error spec := types.VirtualMachineConfigSpec{ Name: cmd.name, GuestId: cmd.guestID, Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", cmd.Datastore.Name())}, NumCPUs: cmd.cpus, MemoryMB: int64(cmd.memory), } devices, err = cmd.addStorage(nil) if err != nil { return nil, err } devices, err = cmd.addNetwork(devices) if err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } spec.DeviceChange = deviceChange if !cmd.force { vmxPath := fmt.Sprintf("%s/%s.vmx", cmd.name, cmd.name) _, err := cmd.Datastore.Stat(ctx, vmxPath) if err == nil { dsPath := cmd.Datastore.Path(vmxPath) return nil, fmt.Errorf("File %s already exists", dsPath) } } folders, err := cmd.Datacenter.Folders(ctx) if err != nil { return nil, err } return folders.VmFolder.CreateVM(ctx, spec, cmd.ResourcePool, cmd.HostSystem) }
func (d *Dispatcher) createApplianceSpec(conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData) (*types.VirtualMachineConfigSpec, error) { defer trace.End(trace.Begin("")) var devices object.VirtualDeviceList var err error cfg, err := d.encodeConfig(conf) if err != nil { return nil, err } spec := &spec.VirtualMachineConfigSpec{ VirtualMachineConfigSpec: &types.VirtualMachineConfigSpec{ Name: conf.Name, GuestId: "other3xLinux64Guest", Files: &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", conf.ImageStores[0].Host)}, NumCPUs: int32(vConf.ApplianceSize.CPU.Limit), MemoryMB: vConf.ApplianceSize.Memory.Limit, // Encode the config both here and after the VMs created so that it can be identified as a VCH appliance as soon as // creation is complete. ExtraConfig: vmomi.OptionValueFromMap(cfg), }, } if devices, err = d.addIDEController(devices); err != nil { return nil, err } if devices, err = d.addParaVirtualSCSIController(devices); err != nil { return nil, err } if devices, err = d.addNetworkDevices(conf, spec, devices); err != nil { return nil, err } deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } spec.DeviceChange = deviceChange return spec.VirtualMachineConfigSpec, nil }
func (c *configSpec) AddDisk(ds *object.Datastore, path string) { var devices object.VirtualDeviceList controller, err := devices.CreateSCSIController("") if err != nil { panic(err) } devices = append(devices, controller) disk := devices.CreateDisk(controller.(types.BaseVirtualController), ds.Reference(), ds.Path(path)) devices = append(devices, disk) spec, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { panic(err) } c.DeviceChange = append(c.DeviceChange, spec...) }
func (c *Context) RemoveContainer(h *exec.Handle, scope string) error { defer trace.End(trace.Begin("")) c.Lock() defer c.Unlock() if h == nil { return fmt.Errorf("handle is required") } if con, _ := c.container(h); con != nil { return fmt.Errorf("container is bound") } var err error s, err := c.resolveScope(scope) if err != nil { return err } var ne *executor.NetworkEndpoint ne, ok := h.ExecConfig.Networks[s.Name()] if !ok { return fmt.Errorf("container %s not part of network %s", h.ExecConfig.ID, s.Name()) } // figure out if any other networks are using the NIC removeNIC := true for _, ne2 := range h.ExecConfig.Networks { if ne2 == ne { continue } if ne2.ID == ne.ID { removeNIC = false break } } if removeNIC { var devices object.VirtualDeviceList backing, err := s.network.EthernetCardBackingInfo(context.Background()) if err != nil { return err } d, err := devices.CreateEthernetCard("vmxnet3", backing) if err != nil { return err } devices = append(devices, d) spec, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationRemove) if err != nil { return err } h.Spec.DeviceChange = append(h.Spec.DeviceChange, spec...) } delete(h.ExecConfig.Networks, s.Name()) return nil }
slots := make(map[int32]bool) for _, e := range h.ExecConfig.Networks { if e.Common.ID != "" { slot, err := strconv.Atoi(e.Common.ID) if err == nil { slots[int32(slot)] = true } } } h.Spec.AssignSlotNumber(d, slots) } if dc == nil { devices = append(devices, d) deviceSpecs, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) if err != nil { return nil, err } h.Spec.DeviceChange = append(h.Spec.DeviceChange, deviceSpecs...) } return d, nil } func (c *Context) resolveScope(scope string) (*Scope, error) { scopes, err := c.findScopes(&scope) if err != nil || len(scopes) != 1 { return nil, err }
func TestContextAddContainer(t *testing.T) { ctx, err := NewContext(net.IPNet{IP: net.IPv4(172, 16, 0, 0), Mask: net.CIDRMask(12, 32)}, net.CIDRMask(16, 32)) if err != nil { t.Fatalf("NewContext() => (nil, %s), want (ctx, nil)", err) return } h := exec.NewContainer("foo") var devices object.VirtualDeviceList backing, _ := ctx.DefaultScope().Network().EthernetCardBackingInfo(context.TODO()) specWithEthCard := &spec.VirtualMachineConfigSpec{ VirtualMachineConfigSpec: &types.VirtualMachineConfigSpec{}, } var d types.BaseVirtualDevice if d, err = devices.CreateEthernetCard("vmxnet3", backing); err == nil { d.GetVirtualDevice().SlotInfo = &types.VirtualDevicePciBusSlotInfo{ PciSlotNumber: 1111, } devices = append(devices, d) var cs []types.BaseVirtualDeviceConfigSpec if cs, err = devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd); err == nil { specWithEthCard.DeviceChange = cs } } if err != nil { t.Fatalf(err.Error()) } aecErr := func(_ *exec.Handle, _ *Scope) (types.BaseVirtualDevice, error) { return nil, fmt.Errorf("error") } otherScope, err := ctx.NewScope(BridgeScopeType, "other", nil, net.IPv4(0, 0, 0, 0), nil, nil) if err != nil { t.Fatalf("failed to add scope") } hBar := exec.NewContainer(uid.New()) var tests = []struct { aec func(h *exec.Handle, s *Scope) (types.BaseVirtualDevice, error) h *exec.Handle s *spec.VirtualMachineConfigSpec scope string ip *net.IP err error }{ // nil handle {nil, nil, nil, "", nil, fmt.Errorf("")}, // scope not found {nil, h, nil, "foo", nil, ResourceNotFoundError{}}, // addEthernetCard returns error {aecErr, h, nil, "default", nil, fmt.Errorf("")}, // add a container {nil, h, nil, "default", nil, nil}, // container already added {nil, h, nil, "default", nil, nil}, {nil, hBar, specWithEthCard, "default", nil, nil}, {nil, hBar, nil, otherScope.Name(), nil, nil}, } origAEC := addEthernetCard defer func() { addEthernetCard = origAEC }() for i, te := range tests { // setup addEthernetCard = origAEC scopy := &spec.VirtualMachineConfigSpec{} if te.h != nil { te.h.SetSpec(te.s) if te.h.Spec != nil { *scopy = *te.h.Spec } } if te.aec != nil { addEthernetCard = te.aec } options := &AddContainerOptions{ Scope: te.scope, IP: te.ip, } err := ctx.AddContainer(te.h, options) if te.err != nil { // expect an error if err == nil { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => nil want err", i, te.h, te.scope, te.ip) } if reflect.TypeOf(err) != reflect.TypeOf(te.err) { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => (%v, %v) want (%v, %v)", i, te.h, te.scope, te.ip, err, te.err, err, te.err) } if _, ok := te.err.(DuplicateResourceError); ok { continue } // verify no device changes in the spec if te.s != nil { if len(scopy.DeviceChange) != len(h.Spec.DeviceChange) { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) added device", i, te.h, te.scope, te.ip) } } continue } if err != nil { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) => %s want nil", i, te.h, te.scope, te.ip, err) } // verify the container was not added to the scope s, _ := ctx.resolveScope(te.scope) if s != nil && te.h != nil { c := s.Container(uid.Parse(te.h.Container.ExecConfig.ID)) if c != nil { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) added container", i, te.h, te.scope, te.ip) } } // spec should have a nic attached to the scope's network var dev types.BaseVirtualDevice dcs, err := te.h.Spec.FindNICs(context.TODO(), s.Network()) if len(dcs) != 1 { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) more than one NIC added for scope %s", i, te.h, te.scope, te.ip, s.Network()) } dev = dcs[0].GetVirtualDeviceConfigSpec().Device if spec.VirtualDeviceSlotNumber(dev) == spec.NilSlot { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) NIC added has nil pci slot", i, te.h, te.scope, te.ip) } // spec metadata should be updated with endpoint info ne, ok := te.h.ExecConfig.Networks[s.Name()] if !ok { t.Fatalf("case %d: ctx.AddContainer(%v, %s, %s) no network endpoint info added", i, te.h, te.scope, te.ip) } if spec.VirtualDeviceSlotNumber(dev) != atoiOrZero(ne.ID) { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.ID == %d, want %d", i, te.h, te.scope, te.ip, atoiOrZero(ne.ID), spec.VirtualDeviceSlotNumber(dev)) } if ne.Network.Name != s.Name() { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.NetworkName == %s, want %s", i, te.h, te.scope, te.ip, ne.Network.Name, s.Name()) } if te.ip != nil && !te.ip.Equal(ne.Static.IP) { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.Static.IP == %s, want %s", i, te.h, te.scope, te.ip, ne.Static.IP, te.ip) } if te.ip == nil && ne.Static != nil { t.Fatalf("case %d; ctx.AddContainer(%v, %s, %s) => ne.Static.IP == %s, want %s", i, te.h, te.scope, te.ip, ne.Static.IP, net.IPv4zero) } } }