Ejemplo n.º 1
0
func createNodes(ctx context.Context, sess *session.Session, pool *object.ResourcePool, node *Node, base string) error {
	log.Debugf("create node %+v", node)
	if node == nil {
		return nil
	}
	spec := simulator.NewResourceConfigSpec()
	node.Name = fmt.Sprintf("%s-%s", base, node.Name)
	switch node.Kind {
	case rpNode:
		child, err := pool.Create(ctx, node.Name, spec)
		if err != nil {
			return err
		}
		for _, childNode := range node.Children {
			return createNodes(ctx, sess, child, childNode, base)
		}
	case vappNode:
		confSpec := types.VAppConfigSpec{
			VmConfigSpec: types.VmConfigSpec{},
		}
		vapp, err := pool.CreateVApp(ctx, node.Name, spec, confSpec, nil)
		if err != nil {
			return err
		}
		config := types.VirtualMachineConfigSpec{
			Name:    node.Name,
			GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
			Files: &types.VirtualMachineFileInfo{
				VmPathName: fmt.Sprintf("[LocalDS_0] %s", node.Name),
			},
		}
		if _, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
			return vapp.CreateChildVM_Task(ctx, config, nil)
		}); err != nil {
			return err
		}
	case vmNode:
		config := types.VirtualMachineConfigSpec{
			Name:    node.Name,
			GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
			Files: &types.VirtualMachineFileInfo{
				VmPathName: fmt.Sprintf("[LocalDS_0] %s", node.Name),
			},
		}
		folder := sess.Folders(ctx).VmFolder
		if _, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
			return folder.CreateVM(ctx, config, pool, nil)
		}); err != nil {
			return err
		}
	default:
		return nil
	}
	return nil
}
Ejemplo n.º 2
0
Archivo: vm.go Proyecto: vmware/vic
// WaitForResult is designed to handle VM invalid state error for any VM operations.
// It will call tasks.WaitForResult to retry if there is task in progress error.
func (vm *VirtualMachine) WaitForResult(ctx context.Context, f func(context.Context) (tasks.Task, error)) (*types.TaskInfo, error) {
	info, err := tasks.WaitForResult(ctx, f)
	if err == nil || !vm.needsFix(err) {
		return info, err
	}
	log.Debugf("Try to fix task failure %s", err)
	if nerr := vm.FixInvalidState(ctx); nerr != nil {
		log.Errorf("Failed to fix task failure: %s", nerr)
		return info, err
	}
	log.Debugf("Fixed")
	return tasks.WaitForResult(ctx, f)
}
Ejemplo n.º 3
0
func (d *Dispatcher) deleteVM(vm *vm.VirtualMachine, force bool) error {
	defer trace.End(trace.Begin(""))

	var err error
	power, err := vm.PowerState(d.ctx)
	if err != nil || power != types.VirtualMachinePowerStatePoweredOff {
		if err != nil {
			log.Warnf("Failed to get vm power status %s: %s", vm.Reference(), err)
		}
		if !force {
			if err != nil {
				return err
			}
			name, err := vm.Name(d.ctx)
			if err != nil {
				log.Errorf("VM name is not found, %s", err)
			}
			if name != "" {
				err = errors.Errorf("VM %s is powered on", name)
			} else {
				err = errors.Errorf("VM %s is powered on", vm.Reference())
			}
			return err
		}
		if _, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return vm.PowerOff(ctx)
		}); err != nil {
			log.Debugf("Failed to power off existing appliance for %s, try to remove anyway", err)
		}
	}
	// get the actual folder name before we delete it
	folder, err := vm.FolderName(d.ctx)
	if err != nil {
		log.Warnf("Failed to get actual folder name for VM. Will not attempt to delete additional data files in VM directory: %s", err)
	}

	_, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return vm.Destroy(ctx)
	})
	if err != nil {
		err = errors.Errorf("Failed to destroy vm %s: %s", vm.Reference(), err)
		return err
	}
	if _, err = d.deleteDatastoreFiles(d.session.Datastore, folder, true); err != nil {
		log.Warnf("VM path %s is not removed, %s", folder, err)
	}

	return nil
}
Ejemplo n.º 4
0
func (d *Dispatcher) deleteNetworkDevices(vmm *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec) error {
	defer trace.End(trace.Begin(""))

	log.Infof("Removing appliance VM network devices")

	power, err := vmm.PowerState(d.ctx)
	if err != nil {
		log.Errorf("Failed to get vm power status %q: %s", vmm.Reference(), err)
		return err

	}
	if power != types.VirtualMachinePowerStatePoweredOff {
		if _, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return vmm.PowerOff(ctx)
		}); err != nil {
			log.Errorf("Failed to power off existing appliance for %s", err)
			return err
		}
	}

	devices, err := d.networkDevices(vmm)
	if err != nil {
		log.Errorf("Unable to get network devices: %s", err)
		return err
	}

	if len(devices) == 0 {
		log.Infof("No network device attached")
		return nil
	}
	// remove devices
	return vmm.RemoveDevice(d.ctx, false, devices...)
}
Ejemplo n.º 5
0
func TestDeleteExceptDisk(t *testing.T) {
	s := os.Getenv("DRONE")
	if s != "" {
		t.Skip("Skipping: test must be run in a VM")
	}

	ctx := context.Background()

	session := test.Session(ctx, t)
	defer session.Logout(ctx)

	host := test.PickRandomHost(ctx, session, t)

	uuid, err := sys.UUID()
	if err != nil {
		t.Fatalf("unable to get UUID for guest - used for VM name: %s", err)
	}
	name := fmt.Sprintf("%s-%d", uuid, rand.Intn(math.MaxInt32))

	moref, err := CreateVM(ctx, session, host, name)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}
	// Wrap the result with our version of VirtualMachine
	vm := NewVirtualMachine(ctx, session, *moref)

	folder, err := vm.FolderName(ctx)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}

	// generate the disk name
	diskName := fmt.Sprintf("%s/%s.vmdk", folder, folder)

	// Delete the VM but not it's disk
	_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return vm.DeleteExceptDisks(ctx)
	})
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}

	// check that the disk still exists
	session.Datastore.Stat(ctx, diskName)
	if err != nil {
		t.Fatalf("Disk does not exist")
	}

	// clean up
	dm := object.NewVirtualDiskManager(session.Client.Client)

	task, err := dm.DeleteVirtualDisk(context.TODO(), diskName, nil)
	if err != nil {
		t.Fatalf("Unable to locate orphan vmdk: %s", err)
	}

	if err = task.Wait(context.TODO()); err != nil {
		t.Fatalf("Unable to remove orphan vmdk: %s", err)
	}
}
Ejemplo n.º 6
0
func (d *Dispatcher) destroyResourcePoolIfEmpty(conf *config.VirtualContainerHostConfigSpec) error {
	defer trace.End(trace.Begin(""))

	log.Infof("Removing Resource Pool %q", conf.Name)

	rpRef := conf.ComputeResources[len(conf.ComputeResources)-1]
	rp := compute.NewResourcePool(d.ctx, d.session, rpRef)

	var vms []*vm.VirtualMachine
	var err error
	if vms, err = rp.GetChildrenVMs(d.ctx, d.session); err != nil {
		err = errors.Errorf("Unable to get children vm of resource pool %q: %s", rp.Name(), err)
		return err
	}
	if len(vms) != 0 {
		err = errors.Errorf("Resource pool is not empty: %q", rp.Name())
		return err
	}
	if _, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return rp.Destroy(ctx)
	}); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 7
0
func createAppliance(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData, hasErr bool, t *testing.T) {
	var err error

	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}
	delete(conf.Networks, "bridge") // FIXME: cannot create bridge network in simulator
	if d.isVC {
		if d.vchVapp, err = d.createVApp(conf, vConf); err != nil {
			// FIXME: Got error: ServerFaultCode: ResourcePool:resourcepool-14 does not implement: CreateVApp. Simulator need to implement CreateVApp
			//			t.Errorf("Unable to create virtual app: %s", err)
		}
	}
	if d.vchPool, err = d.createResourcePool(conf, vConf); err != nil {
		t.Errorf("Unable to create resource pool: %s", err)
	}

	spec, err := d.createApplianceSpec(conf, vConf)
	if err != nil {
		t.Errorf("Unable to create appliance spec: %s", err)
		return
	}

	// create appliance VM
	info, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return d.session.Folders(ctx).VmFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host)
	})
	// get VM reference and save it
	moref := info.Result.(types.ManagedObjectReference)
	conf.SetMoref(&moref)
	obj, err := d.session.Finder.ObjectReference(d.ctx, moref)
	if err != nil {
		t.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err)
		return
	}
	gvm, ok := obj.(*object.VirtualMachine)
	if !ok {
		t.Errorf("Required reference after appliance creation was not for a VM: %T", obj)
		return
	}

	vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm)
	uuid, err := vm2.UUID(d.ctx)
	if err != nil {
		t.Errorf("Failed to get VM UUID: %s", err)
		return
	}
	t.Logf("uuid: %s", uuid)

	// leverage create volume method to create image datastore
	conf.VolumeLocations["images-store"], _ = url.Parse(fmt.Sprintf("ds://LocalDS_0/VIC/%s/images", uuid))

	if err := d.createVolumeStores(conf); err != nil {
		t.Errorf("Unable to create volume stores: %s", err)
		return
	}
}
Ejemplo n.º 8
0
func CreateVM(ctx context.Context, session *session.Session, host *object.HostSystem, name string) (*types.ManagedObjectReference, error) {
	// Create the spec config
	specconfig := test.SpecConfig(session, name)

	// Create a linux guest
	linux, err := guest.NewLinuxGuest(ctx, session, specconfig)
	if err != nil {
		return nil, err
	}

	// Find the Virtual Machine folder that we use
	folders, err := session.Datacenter.Folders(ctx)
	if err != nil {
		return nil, err
	}
	parent := folders.VmFolder

	// Create the vm
	info, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return parent.CreateVM(ctx, *linux.Spec().Spec(), session.Pool, host)
	})
	if err != nil {
		return nil, err
	}

	moref := info.Result.(types.ManagedObjectReference)
	// Return the moRef
	return &moref, nil
}
Ejemplo n.º 9
0
func TestVM(t *testing.T) {

	s := os.Getenv("DRONE")
	if s != "" {
		t.Skip("Skipping: test must be run in a VM")
	}

	ctx := context.Background()

	session := test.Session(ctx, t)
	defer session.Logout(ctx)

	host := test.PickRandomHost(ctx, session, t)

	uuid, err := sys.UUID()
	if err != nil {
		t.Fatalf("unable to get UUID for guest - used for VM name: %s", err)
		return
	}
	name := fmt.Sprintf("%s-%d", uuid, rand.Intn(math.MaxInt32))

	moref, err := CreateVM(ctx, session, host, name)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}
	// Wrap the result with our version of VirtualMachine
	vm := NewVirtualMachine(ctx, session, *moref)

	// Check the state
	state, err := vm.PowerState(ctx)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}

	assert.Equal(t, types.VirtualMachinePowerStatePoweredOff, state)

	// Check VM name
	rname, err := vm.Name(ctx)
	if err != nil {
		t.Errorf("Failed to load VM name: %s", err)
	}
	assert.Equal(t, name, rname)

	// Get VM UUID
	ruuid, err := vm.UUID(ctx)
	if err != nil {
		t.Errorf("Failed to load VM UUID: %s", err)
	}
	t.Logf("Got UUID: %s", ruuid)

	// Destroy the vm
	_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return vm.Destroy(ctx)
	})
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}
}
Ejemplo n.º 10
0
func (d *Dispatcher) deleteVMFSFiles(m *object.FileManager, ds *object.Datastore, dsPath string) error {
	defer trace.End(trace.Begin(dsPath))

	if _, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) {
		return m.DeleteDatastoreFile(ctx, dsPath, d.session.Datacenter)
	}); err != nil {
		log.Debugf("Failed to delete %q: %s", dsPath, err)
	}
	return nil
}
Ejemplo n.º 11
0
// Start starts a container vm with the given params
func (c *Container) start(ctx context.Context) error {
	defer trace.End(trace.Begin("Container.start"))

	if c.vm == nil {
		return fmt.Errorf("vm not set")
	}
	// get existing state and set to starting
	// if there's a failure we'll revert to existing
	existingState := c.State
	c.State = StateStarting

	// Power on
	_, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return c.vm.PowerOn(ctx)
	})
	if err != nil {
		c.State = existingState
		return err
	}

	// guestinfo key that we want to wait for
	key := fmt.Sprintf("guestinfo..sessions|%s.started", c.ExecConfig.ID)
	var detail string

	// Wait some before giving up...
	ctx, cancel := context.WithTimeout(ctx, propertyCollectorTimeout)
	defer cancel()

	detail, err = c.vm.WaitForKeyInExtraConfig(ctx, key)
	if err != nil {
		c.State = existingState
		return fmt.Errorf("unable to wait for process launch status: %s", err.Error())
	}

	if detail != "true" {
		c.State = existingState
		return errors.New(detail)
	}

	return nil
}
Ejemplo n.º 12
0
func (c *Container) stop(ctx context.Context, waitTime *int32) error {
	defer trace.End(trace.Begin("Container.stop"))

	if c.vm == nil {
		return fmt.Errorf("vm not set")
	}
	// get existing state and set to stopping
	// if there's a failure we'll revert to existing
	existingState := c.State
	c.State = StateStopping

	err := c.shutdown(ctx, waitTime)
	if err == nil {
		return nil
	}

	log.Warnf("stopping %s via hard power off due to: %s", c.ExecConfig.ID, err)

	_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return c.vm.PowerOff(ctx)
	})

	if err != nil {
		// It is possible the VM has finally shutdown in between, ignore the error in that case
		if terr, ok := err.(task.Error); ok {
			if serr, ok := terr.Fault().(*types.InvalidPowerState); ok {
				if serr.ExistingState == types.VirtualMachinePowerStatePoweredOff {
					log.Warnf("power off %s task skipped (state was already %s)", c.ExecConfig.ID, serr.ExistingState)
					return nil
				}
			}
		}
		c.State = existingState
	}
	return err
}
Ejemplo n.º 13
0
// Create a lineage of disks inheriting from eachother, write portion of a
// string to each, the confirm the result is the whole string
func TestCreateAndDetach(t *testing.T) {
	log.SetLevel(log.DebugLevel)

	client := Session(context.Background(), t)
	if client == nil {
		return
	}

	imagestore := client.Datastore.Path(datastore.TestName("diskManagerTest"))

	fm := object.NewFileManager(client.Vim25())

	// create a directory in the datastore
	// eat the error because we dont care if it exists
	fm.MakeDirectory(context.TODO(), imagestore, nil, true)

	vdm, err := NewDiskManager(context.TODO(), client)
	if err != nil && err.Error() == "can't find the hosting vm" {
		t.Skip("Skipping: test must be run in a VM")
	}

	if !assert.NoError(t, err) || !assert.NotNil(t, vdm) {
		return
	}

	diskSize := int64(1 << 10)
	parent, err := vdm.Create(context.TODO(), path.Join(imagestore, "scratch.vmdk"), diskSize)
	if !assert.NoError(t, err) {
		return
	}

	numChildren := 3
	children := make([]*VirtualDisk, numChildren)

	testString := "Ground control to Major Tom"
	writeSize := len(testString) / numChildren
	// Create children which inherit from eachother
	for i := 0; i < numChildren; i++ {

		p := path.Join(imagestore, fmt.Sprintf("child%d.vmdk", i))
		child, cerr := vdm.CreateAndAttach(context.TODO(), p, parent.DatastoreURI, 0, os.O_RDWR)
		if !assert.NoError(t, cerr) {
			return
		}

		children[i] = child

		// Write directly to the disk
		f, cerr := os.OpenFile(child.DevicePath, os.O_RDWR, os.FileMode(0777))
		if !assert.NoError(t, cerr) {
			return
		}

		start := i * writeSize
		end := start + writeSize

		if i == numChildren-1 {
			// last chunk, write to the end.
			_, cerr = f.WriteAt([]byte(testString[start:]), int64(start))
			if !assert.NoError(t, cerr) || !assert.NoError(t, f.Sync()) {
				return
			}

			// Try to read the whole string
			b := make([]byte, len(testString))
			f.Seek(0, 0)
			_, cerr = f.Read(b)
			if !assert.NoError(t, cerr) {
				return
			}

			//check against the test string
			if !assert.Equal(t, testString, string(b)) {
				return
			}

		} else {
			_, cerr = f.WriteAt([]byte(testString[start:end]), int64(start))
			if !assert.NoError(t, cerr) || !assert.NoError(t, f.Sync()) {
				return
			}
		}

		f.Close()

		cerr = vdm.Detach(context.TODO(), child)
		if !assert.NoError(t, cerr) {
			return
		}

		// use this image as the next parent
		parent = child
	}

	//	// Nuke the images
	//	for i := len(children) - 1; i >= 0; i-- {
	//		err = vdm.Delete(context.TODO(), children[i])
	//		if !assert.NoError(t, err) {
	//			return
	//		}
	//	}

	// Nuke the image store
	_, err = tasks.WaitForResult(context.TODO(), func(ctx context.Context) (tasks.ResultWaiter, error) {
		return fm.DeleteDatastoreFile(ctx, imagestore, nil)
	})

	if !assert.NoError(t, err) {
		return
	}
}
Ejemplo n.º 14
0
func (d *Dispatcher) createAppliance(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) error {
	defer trace.End(trace.Begin(""))

	log.Infof("Creating appliance on target")

	spec, err := d.createApplianceSpec(conf, settings)
	if err != nil {
		log.Errorf("Unable to create appliance spec: %s", err)
		return err
	}

	var info *types.TaskInfo
	// create appliance VM
	if d.isVC && d.vchVapp != nil {
		info, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) {
			return d.vchVapp.CreateChildVM_Task(ctx, *spec, d.session.Host)
		})
	} else {
		// if vapp is not created, fall back to create VM under default resource pool
		folder := d.session.Folders(d.ctx).VmFolder
		info, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) {
			return folder.CreateVM(ctx, *spec, d.vchPool, d.session.Host)
		})
	}

	if err != nil {
		log.Errorf("Unable to create appliance VM: %s", err)
		return err
	}
	if info.Error != nil || info.State != types.TaskInfoStateSuccess {
		log.Errorf("Create appliance reported: %s", info.Error.LocalizedMessage)
	}

	// get VM reference and save it
	moref := info.Result.(types.ManagedObjectReference)
	conf.SetMoref(&moref)
	obj, err := d.session.Finder.ObjectReference(d.ctx, moref)
	if err != nil {
		log.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err)
		return err
	}
	gvm, ok := obj.(*object.VirtualMachine)
	if !ok {
		return fmt.Errorf("Required reference after appliance creation was not for a VM: %T", obj)
	}
	vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm)

	// update the displayname to the actual folder name used
	if d.vmPathName, err = vm2.FolderName(d.ctx); err != nil {
		log.Errorf("Failed to get canonical name for appliance: %s", err)
		return err
	}
	log.Debugf("vm folder name: %q", d.vmPathName)
	log.Debugf("vm inventory path: %q", vm2.InventoryPath)

	// create an extension to register the appliance as
	if err = d.GenerateExtensionName(conf, vm2); err != nil {
		return errors.Errorf("Could not generate extension name during appliance creation due to error: %s", err)
	}

	settings.Extension = types.Extension{
		Description: &types.Description{
			Label:   "VIC",
			Summary: "vSphere Integrated Containers Virtual Container Host",
		},
		Company: "VMware, Inc.",
		Version: "0.0",
		Key:     conf.ExtensionName,
	}

	conf.AddComponent("vicadmin", &executor.SessionConfig{
		User:  "******",
		Group: "vicadmin",
		Cmd: executor.Cmd{
			Path: "/sbin/vicadmin",
			Args: []string{
				"/sbin/vicadmin",
				"--dc=" + settings.DatacenterName,
				"--pool=" + settings.ResourcePoolPath,
				"--cluster=" + settings.ClusterPath,
			},
			Env: []string{
				"PATH=/sbin:/bin",
				"GOTRACEBACK=all",
			},
			Dir: "/home/vicadmin",
		},
		Restart: true,
	},
	)

	if conf.HostCertificate != nil {
		d.VICAdminProto = "https"
		d.DockerPort = fmt.Sprintf("%d", opts.DefaultTLSHTTPPort)
	} else {
		d.VICAdminProto = "http"
		d.DockerPort = fmt.Sprintf("%d", opts.DefaultHTTPPort)
	}

	personality := executor.Cmd{
		Path: "/sbin/docker-engine-server",
		Args: []string{
			"/sbin/docker-engine-server",
			//FIXME: hack during config migration
			"-port=" + d.DockerPort,
			fmt.Sprintf("-port-layer-port=%d", portLayerPort),
		},
		Env: []string{
			"PATH=/sbin",
			"GOTRACEBACK=all",
		},
	}
	if settings.HTTPProxy != nil {
		personality.Env = append(personality.Env, fmt.Sprintf("HTTP_PROXY=%s", settings.HTTPProxy.String()))
	}
	if settings.HTTPSProxy != nil {
		personality.Env = append(personality.Env, fmt.Sprintf("HTTPS_PROXY=%s", settings.HTTPSProxy.String()))
	}

	conf.AddComponent("docker-personality", &executor.SessionConfig{
		// currently needed for iptables interaction
		// User:  "******",
		// Group: "nobody",
		Cmd:     personality,
		Restart: true,
	},
	)

	conf.AddComponent("port-layer", &executor.SessionConfig{
		Cmd: executor.Cmd{
			Path: "/sbin/port-layer-server",
			Args: []string{
				"/sbin/port-layer-server",
				"--host=localhost",
				fmt.Sprintf("--port=%d", portLayerPort),
			},
			Env: []string{
				//FIXME: hack during config migration
				"VC_URL=" + conf.Target.String(),
				"DC_PATH=" + settings.DatacenterName,
				"CS_PATH=" + settings.ClusterPath,
				"POOL_PATH=" + settings.ResourcePoolPath,
				"DS_PATH=" + conf.ImageStores[0].Host,
			},
		},
		Restart: true,
	},
	)

	conf.BootstrapImagePath = fmt.Sprintf("[%s] %s/%s", conf.ImageStores[0].Host, d.vmPathName, settings.BootstrapISO)

	spec, err = d.reconfigureApplianceSpec(vm2, conf, settings)
	if err != nil {
		log.Errorf("Error while getting appliance reconfig spec: %s", err)
		return err
	}

	// reconfig
	info, err = vm2.WaitForResult(d.ctx, func(ctx context.Context) (tasks.Task, error) {
		return vm2.Reconfigure(ctx, *spec)
	})

	if err != nil {
		log.Errorf("Error while setting component parameters to appliance: %s", err)
		return err
	}
	if info.State != types.TaskInfoStateSuccess {
		log.Errorf("Setting parameters to appliance reported: %s", info.Error.LocalizedMessage)
		return err
	}

	d.appliance = vm2
	return nil
}
Ejemplo n.º 15
0
func (d *Dispatcher) createAppliance(conf *metadata.VirtualContainerHostConfigSpec, settings *data.InstallerData) error {
	defer trace.End(trace.Begin(""))

	log.Infof("Creating appliance on target")

	spec, err := d.createApplianceSpec(conf, settings)
	if err != nil {
		log.Errorf("Unable to create appliance spec: %s", err)
		return err
	}

	// create test VM
	info, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return d.session.Folders(ctx).VmFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host)
	})

	if err != nil {
		log.Errorf("Unable to create appliance VM: %s", err)
		return err
	}
	if info.Error != nil || info.State != types.TaskInfoStateSuccess {
		log.Errorf("Create appliance reported: %s", info.Error.LocalizedMessage)
	}

	// get VM reference and save it
	moref := info.Result.(types.ManagedObjectReference)
	conf.SetMoref(&moref)
	obj, err := d.session.Finder.ObjectReference(d.ctx, moref)
	if err != nil {
		log.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err)
		return err
	}
	gvm, ok := obj.(*object.VirtualMachine)
	if !ok {
		return fmt.Errorf("Required reference after appliance creation was not for a VM: %T", obj)
	}
	vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm)

	// update the displayname to the actual folder name used
	if d.vmPathName, err = vm2.FolderName(d.ctx); err != nil {
		log.Errorf("Failed to get canonical name for appliance: %s", err)
		return err
	}
	log.Debugf("vm folder name: %s", d.vmPathName)
	log.Debugf("vm inventory path: %s", vm2.InventoryPath)

	// create an extension to register the appliance as
	if err = d.GenerateExtensionName(conf); err != nil {
		return errors.Errorf("Could not generate extension name during appliance creation due to error: %s", err)
	}

	settings.Extension = types.Extension{
		Description: &types.Description{
			Label:   "VIC",
			Summary: "vSphere Integrated Containers Virtual Container Host",
		},
		Company: "VMware, Inc.",
		Version: "0.0",
		Key:     conf.ExtensionName,
	}

	conf.AddComponent("vicadmin", &metadata.SessionConfig{
		Cmd: metadata.Cmd{
			Path: "/sbin/vicadmin",
			Args: []string{
				"/sbin/vicadmin",
				"-docker-host=unix:///var/run/docker.sock",
				// FIXME: hack during config migration
				"-insecure",
				"-sdk=" + conf.Target.String(),
				"-ds=" + conf.ImageStores[0].Host,
				"-cluster=" + settings.ClusterPath,
				"-pool=" + settings.ResourcePoolPath,
				"-vm-path=" + vm2.InventoryPath,
			},
			Env: []string{
				"PATH=/sbin:/bin",
			},
		},
	},
	)

	if conf.HostCertificate != nil {
		d.VICAdminProto = "https"
		d.DockerPort = "2376"
	} else {
		d.VICAdminProto = "http"
		d.DockerPort = "2375"
	}

	conf.AddComponent("docker-personality", &metadata.SessionConfig{
		Cmd: metadata.Cmd{
			Path: "/sbin/docker-engine-server",
			Args: []string{
				"/sbin/docker-engine-server",
				//FIXME: hack during config migration
				"-serveraddr=0.0.0.0",
				"-port=" + d.DockerPort,
				"-port-layer-port=8080",
			},
			Env: []string{
				"PATH=/sbin",
			},
		},
	},
	)

	conf.AddComponent("port-layer", &metadata.SessionConfig{
		Cmd: metadata.Cmd{
			Path: "/sbin/port-layer-server",
			Args: []string{
				"/sbin/port-layer-server",
				//FIXME: hack during config migration
				"--host=localhost",
				"--port=8080",
				"--insecure",
				"--sdk=" + conf.Target.String(),
				"--datacenter=" + settings.DatacenterName,
				"--cluster=" + settings.ClusterPath,
				"--pool=" + settings.ResourcePoolPath,
				"--datastore=" + conf.ImageStores[0].Host,
				"--vch=" + conf.ExecutorConfig.Name,
			},
		},
	},
	)

	spec, err = d.reconfigureApplianceSpec(vm2, conf)

	// reconfig
	info, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return vm2.Reconfigure(ctx, *spec)
	})

	if err != nil {
		log.Errorf("Error while setting component parameters to appliance: %s", err)
		return err
	}
	if info.State != types.TaskInfoStateSuccess {
		log.Errorf("Setting parameters to appliance reported: %s", info.Error.LocalizedMessage)
		return err
	}

	d.appliance = vm2
	return nil
}
Ejemplo n.º 16
0
func TestVMAttributes(t *testing.T) {

	ctx := context.Background()

	session := test.Session(ctx, t)
	defer session.Logout(ctx)

	host := test.PickRandomHost(ctx, session, t)

	uuid, err := sys.UUID()
	if err != nil {
		t.Fatalf("unable to get UUID for guest - used for VM name: %s", err)
		return
	}
	ID := fmt.Sprintf("%s-%d", uuid, rand.Intn(math.MaxInt32))

	moref, err := CreateVM(ctx, session, host, ID)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}
	// Wrap the result with our version of VirtualMachine
	vm := NewVirtualMachine(ctx, session, *moref)

	folder, err := vm.FolderName(ctx)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}

	name, err := vm.Name(ctx)
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}
	assert.Equal(t, name, folder)

	_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return vm.PowerOn(ctx)
	})
	if err != nil {
		t.Fatalf("ERROR: %s", err)
	}

	if guest, err := vm.FetchExtraConfig(ctx); err != nil {
		t.Fatalf("ERROR: %s", err)
	} else {
		assert.NotEmpty(t, guest)
	}
	defer func() {
		// Destroy the vm
		_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return vm.PowerOff(ctx)
		})
		if err != nil {
			t.Fatalf("ERROR: %s", err)
		}
		_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return vm.Destroy(ctx)
		})
		if err != nil {
			t.Fatalf("ERROR: %s", err)
		}
	}()
}
Ejemplo n.º 17
0
func (c *Container) Remove(ctx context.Context, sess *session.Session) error {
	defer trace.End(trace.Begin("Container.Remove"))
	c.Lock()
	defer c.Unlock()

	if c.vm == nil {
		return NotFoundError{}
	}

	// check state first
	if c.State == StateRunning {
		return RemovePowerError{fmt.Errorf("Container is powered on")}
	}

	// get existing state and set to removing
	// if there's a failure we'll revert to existing
	existingState := c.State
	c.State = StateRemoving

	// get the folder the VM is in
	url, err := c.vm.DSPath(ctx)
	if err != nil {

		// handle the out-of-band removal case
		if soap.IsSoapFault(err) {
			fault := soap.ToSoapFault(err).VimFault()
			if _, ok := fault.(types.ManagedObjectNotFound); ok {
				containers.Remove(c.ExecConfig.ID)
				return NotFoundError{}
			}
		}

		log.Errorf("Failed to get datastore path for %s: %s", c.ExecConfig.ID, err)
		c.State = existingState
		return err
	}
	// FIXME: was expecting to find a utility function to convert to/from datastore/url given
	// how widely it's used but couldn't - will ask around.
	dsPath := fmt.Sprintf("[%s] %s", url.Host, url.Path)

	//removes the vm from vsphere, but detaches the disks first
	_, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return c.vm.DeleteExceptDisks(ctx)
	})
	if err != nil {
		c.State = existingState
		return err
	}

	// remove from datastore
	fm := object.NewFileManager(c.vm.Client.Client)

	if _, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return fm.DeleteDatastoreFile(ctx, dsPath, sess.Datacenter)
	}); err != nil {
		c.State = existingState
		log.Debugf("Failed to delete %s, %s", dsPath, err)
	}

	//remove container from cache
	containers.Remove(c.ExecConfig.ID)
	return nil
}
Ejemplo n.º 18
0
func (d *Dispatcher) CreateVCH(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) error {
	defer trace.End(trace.Begin(conf.Name))

	var err error

	if err = d.checkExistence(conf, settings); err != nil {
		return err
	}

	if d.isVC && !settings.UseRP {
		if d.vchVapp, err = d.createVApp(conf, settings); err != nil {
			detail := fmt.Sprintf("Creating virtual app failed: %s", err)
			if !d.force {
				return errors.New(detail)
			}

			log.Error(detail)
			log.Errorf("Deploying vch under parent pool %q, (--force=true)", settings.ResourcePoolPath)
			d.vchPool = d.session.Pool
			conf.ComputeResources = append(conf.ComputeResources, d.vchPool.Reference())
		}
	} else {
		if d.vchPool, err = d.createResourcePool(conf, settings); err != nil {
			detail := fmt.Sprintf("Creating resource pool failed: %s", err)
			if !d.force {
				return errors.New(detail)
			}

			log.Error(detail)
			log.Errorf("Deploying vch under parent pool %q, (--force=true)", settings.ResourcePoolPath)
			d.vchPool = d.session.Pool
			conf.ComputeResources = append(conf.ComputeResources, d.vchPool.Reference())
		}
	}

	if err = d.createBridgeNetwork(conf); err != nil {
		return err
	}

	if err = d.createVolumeStores(conf); err != nil {
		return errors.Errorf("Exiting because we could not create volume stores due to error: %s", err)
	}

	if err = d.createAppliance(conf, settings); err != nil {
		return errors.Errorf("Creating the appliance failed with %s. Exiting...", err)
	}

	if err = d.uploadImages(settings.ImageFiles); err != nil {
		return errors.Errorf("Uploading images failed with %s. Exiting...", err)
	}

	if d.session.IsVC() {
		if err = d.RegisterExtension(conf, settings.Extension); err != nil {
			return errors.Errorf("Error registering VCH vSphere extension: %s", err)
		}
	}
	_, err = tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return d.appliance.PowerOn(ctx)
	})

	if err != nil {
		return errors.Errorf("Failed to power on appliance %s. Exiting...", err)
	}

	if err = d.ensureApplianceInitializes(conf); err != nil {
		return errors.Errorf("%s. Exiting...", err)
	}

	// wait till the appliance components are fully initialized
	if err = d.ensureComponentsInitialize(conf); err != nil {
		return errors.Errorf("%s. Exiting...", err)
	}

	return nil
}
Ejemplo n.º 19
0
func TestVolumeCreateListAndRestart(t *testing.T) {
	client := datastore.Session(context.TODO(), t)
	if client == nil {
		return
	}

	ctx := context.TODO()

	// Create the backing store on vsphere
	vsVolumeStore, err := NewVolumeStore(ctx, client)
	if !assert.NoError(t, err) || !assert.NotNil(t, vsVolumeStore) {
		return
	}

	// Root our datastore
	testStorePath := datastore.TestName("voltest")
	ds, err := datastore.NewHelper(ctx, client, client.Datastore, testStorePath)
	if !assert.NoError(t, err) || !assert.NotNil(t, ds) {
		return
	}

	// Add a volume store and give it a name ("testStoreName")
	volumeStore, err := vsVolumeStore.AddStore(ctx, ds, "testStoreName")
	if !assert.NoError(t, err) || !assert.NotNil(t, volumeStore) {
		return
	}

	// test we can list it
	m, err := vsVolumeStore.VolumeStoresList(ctx)
	if !assert.NoError(t, err) || !assert.NotNil(t, m) {
		return
	}

	// test the returned url matches
	s, ok := m["testStoreName"]
	if !assert.True(t, ok) || !assert.Equal(t, testStorePath, filepath.Base(s.String())) {
		return
	}

	// Clean up the mess
	defer func() {
		fm := object.NewFileManager(client.Vim25())
		tasks.WaitForResult(context.TODO(), func(ctx context.Context) (tasks.ResultWaiter, error) {
			return fm.DeleteDatastoreFile(ctx, client.Datastore.Path(testStorePath), client.Datacenter)
		})
	}()

	// Create the cache
	cache, err := portlayer.NewVolumeLookupCache(ctx, vsVolumeStore)
	if !assert.NoError(t, err) || !assert.NotNil(t, cache) {
		return
	}

	// Create the volumes (in parallel)
	numVols := 5
	wg := &sync.WaitGroup{}
	wg.Add(numVols)
	volumes := make(map[string]*portlayer.Volume)
	for i := 0; i < numVols; i++ {
		go func(idx int) {
			defer wg.Done()
			ID := fmt.Sprintf("testvolume-%d", idx)

			// add some metadata if i is even
			var info map[string][]byte

			if idx%2 == 0 {
				info = make(map[string][]byte)
				info[ID] = []byte(ID)
			}

			outVol, err := cache.VolumeCreate(ctx, ID, volumeStore, 10240, info)
			if !assert.NoError(t, err) || !assert.NotNil(t, outVol) {
				return
			}

			volumes[ID] = outVol
		}(i)
	}

	wg.Wait()

	// list using the datastore (skipping the cache)
	outVols, err := vsVolumeStore.VolumesList(ctx)
	if !assert.NoError(t, err) || !assert.NotNil(t, outVols) || !assert.Equal(t, numVols, len(outVols)) {
		return
	}

	for _, outVol := range outVols {
		if !assert.Equal(t, volumes[outVol.ID], outVol) {
			return
		}
	}

	// Test restart

	// Create a new vs and cache to the same datastore (simulating restart) and compare
	secondVStore, err := NewVolumeStore(ctx, client)
	if !assert.NoError(t, err) || !assert.NotNil(t, vsVolumeStore) {
		return
	}

	volumeStore, err = secondVStore.AddStore(ctx, ds, "testStoreName")
	if !assert.NoError(t, err) || !assert.NotNil(t, volumeStore) {
		return
	}
	secondCache, err := portlayer.NewVolumeLookupCache(ctx, secondVStore)
	if !assert.NoError(t, err) || !assert.NotNil(t, cache) {
		return
	}

	secondOutVols, err := secondCache.VolumesList(ctx)
	if !assert.NoError(t, err) || !assert.NotNil(t, secondOutVols) || !assert.Equal(t, numVols, len(secondOutVols)) {
		return
	}

	for _, outVol := range secondOutVols {
		// XXX we could compare the Volumes, but the paths are different the
		// second time around on vsan since the vsan UUID is not included.
		if !assert.NotEmpty(t, volumes[outVol.ID].Device.DiskPath()) {
			return
		}
	}
}
Ejemplo n.º 20
0
// Remove removes a containerVM after detaching the disks
func (c *Container) Remove(ctx context.Context, sess *session.Session) error {
	defer trace.End(trace.Begin(c.ExecConfig.ID))
	c.m.Lock()
	defer c.m.Unlock()

	if c.vm == nil {
		return NotFoundError{}
	}

	// check state first
	if c.state == StateRunning {
		return RemovePowerError{fmt.Errorf("Container is powered on")}
	}

	// get existing state and set to removing
	// if there's a failure we'll revert to existing
	existingState := c.updateState(StateRemoving)

	// get the folder the VM is in
	url, err := c.vm.DSPath(ctx)
	if err != nil {

		// handle the out-of-band removal case
		if soap.IsSoapFault(err) {
			fault := soap.ToSoapFault(err).VimFault()
			if _, ok := fault.(types.ManagedObjectNotFound); ok {
				Containers.Remove(c.ExecConfig.ID)
				return NotFoundError{}
			}
		}

		log.Errorf("Failed to get datastore path for %s: %s", c.ExecConfig.ID, err)
		c.updateState(existingState)
		return err
	}
	// FIXME: was expecting to find a utility function to convert to/from datastore/url given
	// how widely it's used but couldn't - will ask around.
	dsPath := fmt.Sprintf("[%s] %s", url.Host, url.Path)

	//removes the vm from vsphere, but detaches the disks first
	_, err = c.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
		return c.vm.DeleteExceptDisks(ctx)
	})
	if err != nil {
		f, ok := err.(types.HasFault)
		if !ok {
			c.updateState(existingState)
			return err
		}
		switch f.Fault().(type) {
		case *types.InvalidState:
			log.Warnf("container VM is in invalid state, unregistering")
			if err := c.vm.Unregister(ctx); err != nil {
				log.Errorf("Error while attempting to unregister container VM: %s", err)
				return err
			}
		default:
			log.Debugf("Fault while attempting to destroy vm: %#v", f.Fault())
			c.updateState(existingState)
			return err
		}
	}

	// remove from datastore
	fm := object.NewFileManager(c.vm.Client.Client)

	if _, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
		return fm.DeleteDatastoreFile(ctx, dsPath, sess.Datacenter)
	}); err != nil {
		// at this phase error doesn't matter. Just log it.
		log.Debugf("Failed to delete %s, %s", dsPath, err)
	}

	//remove container from cache
	Containers.Remove(c.ExecConfig.ID)
	return nil
}
Ejemplo n.º 21
0
Archivo: commit.go Proyecto: vmware/vic
// Commit executes the requires steps on the handle
func Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int32) error {
	defer trace.End(trace.Begin(h.ExecConfig.ID))

	c := Containers.Container(h.ExecConfig.ID)
	creation := h.vm == nil
	if creation {
		if h.Spec == nil {
			return fmt.Errorf("a spec must be provided for create operations")
		}

		if sess == nil {
			// session must not be nil
			return fmt.Errorf("no session provided for create operations")
		}

		// the only permissible operation is to create a VM
		if h.Spec == nil {
			return fmt.Errorf("only create operations can be committed without an existing VM")
		}

		if c != nil {
			return fmt.Errorf("a container already exists in the cache with this ID")
		}

		var res *types.TaskInfo
		var err error
		if sess.IsVC() && Config.VirtualApp.ResourcePool != nil {
			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
				return Config.VirtualApp.CreateChildVM_Task(ctx, *h.Spec.Spec(), nil)
			})
		} else {
			// Find the Virtual Machine folder that we use
			var folders *object.DatacenterFolders
			folders, err = sess.Datacenter.Folders(ctx)
			if err != nil {
				log.Errorf("Could not get folders")
				return err
			}
			parent := folders.VmFolder

			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
				return parent.CreateVM(ctx, *h.Spec.Spec(), Config.ResourcePool, nil)
			})
		}

		if err != nil {
			log.Errorf("Something failed. Spec was %+v", *h.Spec.Spec())
			return err
		}

		h.vm = vm.NewVirtualMachine(ctx, sess, res.Result.(types.ManagedObjectReference))
		c = newContainer(&h.containerBase)
		Containers.Put(c)
		// inform of creation irrespective of remaining operations
		publishContainerEvent(c.ExecConfig.ID, time.Now().UTC(), events.ContainerCreated)

		// clear the spec as we've acted on it - this prevents a reconfigure from occurring in follow-on
		// processing
		h.Spec = nil
	}

	// if we're stopping the VM, do so before the reconfigure to preserve the extraconfig
	refresh := true
	if h.TargetState() == StateStopped {
		if h.Runtime == nil {
			log.Warnf("Commit called with incomplete runtime state for %s", h.ExecConfig.ID)
		}

		if h.Runtime != nil && h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff {
			log.Infof("Dropping duplicate power off operation for %s", h.ExecConfig.ID)
		} else {
			// stop the container
			if err := c.stop(ctx, waitTime); err != nil {
				return err
			}

			// inform of creation irrespective of remaining operations
			publishContainerEvent(h.ExecConfig.ID, time.Now().UTC(), events.ContainerStopped)

			// we must refresh now to get the new ChangeVersion - this is used to gate on powerstate in the reconfigure
			// because we cannot set the ExtraConfig if the VM is powered on. There is still a race here unfortunately because
			// tasks don't appear to contain the new ChangeVersion
			// we don't use refresh because we want to keep the extraconfig state
			base, err := h.updates(ctx)
			if err != nil {
				// TODO: can we recover here, or at least set useful state for inspection?
				return err
			}
			h.Runtime = base.Runtime
			h.Config = base.Config

			refresh = false
		}
	}

	// reconfigure operation
	if h.Spec != nil {
		if h.Runtime == nil {
			log.Errorf("Refusing to perform reconfigure operation with incomplete runtime state for %s", h.ExecConfig.ID)
		} else {
			// ensure that our logic based on Runtime state remains valid

			// NOTE: this inline refresh can be removed when switching away from guestinfo where we have non-persistence issues
			// when updating ExtraConfig via the API with a powered on VM - we therefore have to be absolutely certain about the
			// power state to decide if we can continue without nilifying extraconfig

			for s := h.Spec.Spec(); ; refresh, s = true, h.Spec.Spec() {
				// FIXME!!! this is a temporary hack until the concurrent modification retry logic is in place
				if refresh {
					base, err := h.updates(ctx)
					if err == nil {
						h.Runtime = base.Runtime
						h.Config = base.Config
					}
				}

				s.ChangeVersion = h.Config.ChangeVersion

				// nilify ExtraConfig if vm is running
				if h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
					log.Errorf("Nilifying ExtraConfig as we are running")
					s.ExtraConfig = nil
				}

				_, err := h.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
					return h.vm.Reconfigure(ctx, *s)
				})
				if err != nil {
					log.Errorf("Reconfigure failed with %#+v", err)

					// Check whether we get ConcurrentAccess and wrap it if needed
					if f, ok := err.(types.HasFault); ok {
						switch f.Fault().(type) {
						case *types.ConcurrentAccess:
							log.Errorf("We have ConcurrentAccess for version %s", s.ChangeVersion)

							continue
							// return ConcurrentAccessError{err}
						}
					}
					return err
				}

				break
			}
		}
	}

	// best effort update of container cache using committed state - this will not reflect the power on below, however
	// this is primarily for updating ExtraConfig state.
	if !creation {
		defer c.RefreshFromHandle(ctx, h)
	}

	if h.TargetState() == StateRunning {
		if h.Runtime != nil && h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
			log.Infof("Dropping duplicate power on operation for %s", h.ExecConfig.ID)
			return nil
		}

		if h.Runtime == nil && !creation {
			log.Warnf("Commit called with incomplete runtime state for %s", h.ExecConfig.ID)
		}

		// start the container
		if err := c.start(ctx); err != nil {
			return err
		}

		// inform of creation irrespective of remaining operations
		publishContainerEvent(h.ExecConfig.ID, time.Now().UTC(), events.ContainerStarted)
	}

	return nil
}
Ejemplo n.º 22
0
func (c *Container) Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int32) error {
	defer trace.End(trace.Begin("Committing handle"))

	c.Lock()
	defer c.Unlock()

	if c.vm == nil {
		// the only permissible operation is to create a VM
		if h.Spec == nil {
			return fmt.Errorf("only create operations can be committed without an existing VM")
		}

		var res *types.TaskInfo
		var err error

		if sess.IsVC() && VCHConfig.VirtualApp != nil {
			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
				return VCHConfig.VirtualApp.CreateChildVM_Task(ctx, *h.Spec.Spec(), nil)
			})

			c.State = StateCreated
		} else {
			// Find the Virtual Machine folder that we use
			var folders *object.DatacenterFolders
			folders, err = sess.Datacenter.Folders(ctx)
			if err != nil {
				log.Errorf("Could not get folders")
				return err
			}
			parent := folders.VmFolder

			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
				return parent.CreateVM(ctx, *h.Spec.Spec(), VCHConfig.ResourcePool, nil)
			})

			c.State = StateCreated
		}

		if err != nil {
			log.Errorf("Something failed. Spec was %+v", *h.Spec.Spec())
			return err
		}

		c.vm = vm.NewVirtualMachine(ctx, sess, res.Result.(types.ManagedObjectReference))

		// clear the spec as we've acted on it
		h.Spec = nil
	}

	// if we're stopping the VM, do so before the reconfigure to preserve the extraconfig
	if h.State != nil && *h.State == StateStopped {
		// stop the container
		if err := h.Container.stop(ctx, waitTime); err != nil {
			return err
		}

		c.State = *h.State
	}

	if h.Spec != nil {
		// FIXME: add check that the VM is powered off - it should be, but this will destroy the
		// extraconfig if it's not.

		s := h.Spec.Spec()
		_, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return c.vm.Reconfigure(ctx, *s)
		})

		if err != nil {
			return err
		}
	}

	if h.State != nil && *h.State == StateRunning {
		// start the container
		if err := h.Container.start(ctx); err != nil {
			return err
		}

		c.State = *h.State
	}

	c.ExecConfig = &h.ExecConfig

	// add or overwrite the container in the cache
	containers.Put(c)
	return nil
}