Ejemplo n.º 1
23
func TestMain(t *testing.T) {

	ctx := context.Background()

	for i, model := range []*simulator.Model{simulator.ESX(), simulator.VPX()} {
		t.Logf("%d", i)
		defer model.Remove()
		err := model.Create()
		if err != nil {
			t.Fatal(err)
		}

		s := model.Service.NewServer()
		defer s.Close()

		s.URL.User = url.UserPassword("user", "pass")
		t.Logf("server URL: %s", s.URL)

		var sess *session.Session
		if i == 0 {
			sess, err = getESXSession(ctx, s.URL.String())
		} else {
			sess, err = getVPXSession(ctx, s.URL.String())
		}
		if err != nil {
			t.Fatal(err)
		}
		defer sess.Logout(ctx)
		testGetChildrenVMs(ctx, sess, t)
		testGetChildVM(ctx, sess, t)
		testFindResourcePool(ctx, sess, t)
		testGetCluster(ctx, sess, t)
	}
}
Ejemplo n.º 2
0
func findDiagnosticLogs(c *session.Session) ([]entryReader, error) {
	defer trace.End(trace.Begin(""))

	// When connected to VC, we collect vpxd.log and hostd.log for all cluster hosts attached to the datastore.
	// When connected to ESX, we just collect hostd.log.
	const (
		vpxdKey  = "vpxd:vpxd.log"
		hostdKey = "hostd"
	)

	var logs []entryReader
	var err error

	if c.IsVC() {
		logs = append(logs, dlogReader{c, vpxdKey, nil})

		var hosts []*object.HostSystem
		if c.Cluster == nil && c.Host != nil {
			hosts = []*object.HostSystem{c.Host}
		} else {
			hosts, err = c.Datastore.AttachedClusterHosts(context.TODO(), c.Cluster)
			if err != nil {
				return nil, err
			}
		}

		for _, host := range hosts {
			logs = append(logs, dlogReader{c, hostdKey, host})
		}
	} else {
		logs = append(logs, dlogReader{c, hostdKey, nil})
	}

	return logs, nil
}
Ejemplo n.º 3
0
func createAppliance(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData, hasErr bool, t *testing.T) {
	var err error

	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}
	delete(conf.Networks, "bridge") // FIXME: cannot create bridge network in simulator
	if d.isVC {
		if d.vchVapp, err = d.createVApp(conf, vConf); err != nil {
			// FIXME: Got error: ServerFaultCode: ResourcePool:resourcepool-14 does not implement: CreateVApp. Simulator need to implement CreateVApp
			//			t.Errorf("Unable to create virtual app: %s", err)
		}
	}
	if d.vchPool, err = d.createResourcePool(conf, vConf); err != nil {
		t.Errorf("Unable to create resource pool: %s", err)
	}

	spec, err := d.createApplianceSpec(conf, vConf)
	if err != nil {
		t.Errorf("Unable to create appliance spec: %s", err)
		return
	}

	// create appliance VM
	info, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return d.session.Folders(ctx).VmFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host)
	})
	// get VM reference and save it
	moref := info.Result.(types.ManagedObjectReference)
	conf.SetMoref(&moref)
	obj, err := d.session.Finder.ObjectReference(d.ctx, moref)
	if err != nil {
		t.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err)
		return
	}
	gvm, ok := obj.(*object.VirtualMachine)
	if !ok {
		t.Errorf("Required reference after appliance creation was not for a VM: %T", obj)
		return
	}

	vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm)
	uuid, err := vm2.UUID(d.ctx)
	if err != nil {
		t.Errorf("Failed to get VM UUID: %s", err)
		return
	}
	t.Logf("uuid: %s", uuid)

	// leverage create volume method to create image datastore
	conf.VolumeLocations["images-store"], _ = url.Parse(fmt.Sprintf("ds://LocalDS_0/VIC/%s/images", uuid))

	if err := d.createVolumeStores(conf); err != nil {
		t.Errorf("Unable to create volume stores: %s", err)
		return
	}
}
Ejemplo n.º 4
0
// NewDatastore returns a Datastore.
// ctx is a context,
// s is an authenticated session
// ds is the vsphere datastore
// rootdir is the top level directory to root all data.  If root does not exist,
// it will be created.  If it already exists, NOOP. This cannot be empty.
func NewDatastore(ctx context.Context, s *session.Session, ds *object.Datastore, rootdir string) (*Datastore, error) {

	d := &Datastore{
		ds: ds,
		s:  s,
		fm: object.NewFileManager(s.Vim25()),
	}

	if strings.HasPrefix(rootdir, "/") {
		rootdir = strings.TrimPrefix(rootdir, "/")
	}

	// Get the root directory element split from the rest of the path (if there is one)
	root := strings.SplitN(rootdir, "/", 2)

	// Create the first element.  This handles vsan vmfs top level dirs.
	if err := d.mkRootDir(ctx, root[0]); err != nil {
		log.Infof("error creating root directory %s: %s", rootdir, err)
		return nil, err
	}

	// Create the rest conventionally
	if len(root) > 1 {
		r, err := d.Mkdir(ctx, true, root[1])
		if err != nil {
			return nil, err
		}
		d.RootURL = r
	}

	log.Infof("Datastore path is %s", d.RootURL)
	return d, nil
}
Ejemplo n.º 5
0
func Init(ctx context.Context, sess *session.Session, source extraconfig.DataSource, sink extraconfig.DataSink) error {
	trace.End(trace.Begin(""))

	initializer.once.Do(func() {
		var err error
		defer func() {
			initializer.err = err
		}()

		f := find.NewFinder(sess.Vim25(), false)

		var config Configuration
		config.sink = sink
		config.source = source
		config.Decode()
		config.PortGroups = make(map[string]object.NetworkReference)

		log.Debugf("Decoded VCH config for network: %#v", config)
		for nn, n := range config.ContainerNetworks {
			pgref := new(types.ManagedObjectReference)
			if !pgref.FromString(n.ID) {
				log.Warnf("Could not reacquire object reference from id for network %s: %s", nn, n.ID)
			}

			var r object.Reference
			if r, err = f.ObjectReference(ctx, *pgref); err != nil {
				log.Warnf("could not get network reference for %s network: %s", nn, err)
				err = nil
				continue
			}

			config.PortGroups[nn] = r.(object.NetworkReference)
		}

		// make sure a NIC attached to the bridge network exists
		config.BridgeLink, err = getBridgeLink(&config)
		if err != nil {
			return
		}

		var kv kvstore.KeyValueStore
		kv, err = store.NewDatastoreKeyValue(ctx, sess, "network.contexts.default")
		if err != nil {
			return
		}

		var netctx *Context
		if netctx, err = NewContext(&config, kv); err != nil {
			return
		}

		if err = engageContext(ctx, netctx, exec.Config.EventManager); err == nil {
			DefaultContext = netctx
			log.Infof("Default network context allocated")
		}
	})

	return initializer.err
}
Ejemplo n.º 6
0
func rm(t *testing.T, client *session.Session, name string) {
	t.Logf("deleting %s", name)
	fm := object.NewFileManager(client.Vim25())
	task, err := fm.DeleteDatastoreFile(context.TODO(), name, client.Datacenter)
	if !assert.NoError(t, err) {
		return
	}
	_, _ = task.WaitForResult(context.TODO(), nil)
}
Ejemplo n.º 7
0
// populate the vm attributes for the specified morefs
func populateVMAttributes(ctx context.Context, sess *session.Session, refs []types.ManagedObjectReference) ([]mo.VirtualMachine, error) {
	var vms []mo.VirtualMachine

	// current attributes we care about
	attrib := []string{"config", "runtime.powerState", "summary"}

	// populate the vm properties
	err := sess.Retrieve(ctx, refs, attrib, &vms)
	return vms, err
}
Ejemplo n.º 8
0
func createNodes(ctx context.Context, sess *session.Session, pool *object.ResourcePool, node *Node, base string) error {
	log.Debugf("create node %+v", node)
	if node == nil {
		return nil
	}
	spec := simulator.NewResourceConfigSpec()
	node.Name = fmt.Sprintf("%s-%s", base, node.Name)
	switch node.Kind {
	case rpNode:
		child, err := pool.Create(ctx, node.Name, spec)
		if err != nil {
			return err
		}
		for _, childNode := range node.Children {
			return createNodes(ctx, sess, child, childNode, base)
		}
	case vappNode:
		confSpec := types.VAppConfigSpec{
			VmConfigSpec: types.VmConfigSpec{},
		}
		vapp, err := pool.CreateVApp(ctx, node.Name, spec, confSpec, nil)
		if err != nil {
			return err
		}
		config := types.VirtualMachineConfigSpec{
			Name:    node.Name,
			GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
			Files: &types.VirtualMachineFileInfo{
				VmPathName: fmt.Sprintf("[LocalDS_0] %s", node.Name),
			},
		}
		if _, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
			return vapp.CreateChildVM_Task(ctx, config, nil)
		}); err != nil {
			return err
		}
	case vmNode:
		config := types.VirtualMachineConfigSpec{
			Name:    node.Name,
			GuestId: string(types.VirtualMachineGuestOsIdentifierOtherGuest),
			Files: &types.VirtualMachineFileInfo{
				VmPathName: fmt.Sprintf("[LocalDS_0] %s", node.Name),
			},
		}
		folder := sess.Folders(ctx).VmFolder
		if _, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
			return folder.CreateVM(ctx, config, pool, nil)
		}); err != nil {
			return err
		}
	default:
		return nil
	}
	return nil
}
Ejemplo n.º 9
0
func testDeleteVolumeStores(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, numVols int, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   true,
	}

	if removed := d.deleteVolumeStoreIfForced(conf); removed != numVols {
		t.Errorf("Did not successfully remove all specified volumes")
	}

}
Ejemplo n.º 10
0
// NewDispatcher creates a dispatcher that can act upon VIC management operations.
// clientCert is an optional client certificate to allow interaction with the Docker API for verification
// force will ignore some errors
func NewDispatcher(ctx context.Context, s *session.Session, conf *config.VirtualContainerHostConfigSpec, force bool) *Dispatcher {
	defer trace.End(trace.Begin(""))
	isVC := s.IsVC()
	e := &Dispatcher{
		session: s,
		ctx:     ctx,
		isVC:    isVC,
		force:   force,
	}
	if conf != nil {
		e.InitDiagnosticLogs(conf)
	}
	return e
}
Ejemplo n.º 11
0
// FIXME: Failed to find IDE controller in simulator, so create appliance failed
func testCreateAppliance(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData, hasErr bool, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}
	delete(conf.Networks, "bridge") // FIXME: cannot create bridge network right now
	d.vchPool = d.session.Pool
	err := d.createAppliance(conf, vConf)
	if err != nil {
		t.Logf("Expected error: %s", err)
	}
}
Ejemplo n.º 12
0
// NewDiagnosticManager returns a new DiagnosticManager object
func NewDiagnosticManager(session *session.Session) *Manager {
	return &Manager{
		DiagnosticManager: object.NewDiagnosticManager(
			session.Vim25(),
		),
		Session: session,
	}
}
Ejemplo n.º 13
0
Archivo: vapp.go Proyecto: jak-atx/vic
// NewResourcePool returns a New ResourcePool object
func NewVirtualApp(ctx context.Context, session *session.Session, moref types.ManagedObjectReference) *VirtualApp {
	return &VirtualApp{
		VirtualApp: object.NewVirtualApp(
			session.Vim25(),
			moref,
		),
		Session: session,
	}
}
Ejemplo n.º 14
0
// GetSelf gets VirtualMachine reference for the VM this process is running on
func GetSelf(ctx context.Context, s *session.Session) (*vm.VirtualMachine, error) {
	u, err := UUID()
	if err != nil {
		return nil, err
	}

	search := object.NewSearchIndex(s.Vim25())
	ref, err := search.FindByUuid(ctx, s.Datacenter, u, true, nil)
	if err != nil {
		return nil, err
	}

	if ref == nil {
		return nil, fmt.Errorf("can't find the hosting vm")
	}

	return vm.NewVirtualMachine(ctx, s, ref.Reference()), nil
}
Ejemplo n.º 15
0
Archivo: rp.go Proyecto: kjplatz/vic
// NewResourcePool returns a New ResourcePool object
func NewResourcePool(ctx context.Context, session *session.Session, moref types.ManagedObjectReference) *ResourcePool {
	return &ResourcePool{
		ResourcePool: object.NewResourcePool(
			session.Vim25(),
			moref,
		),
		Session: session,
	}
}
Ejemplo n.º 16
0
Archivo: exec.go Proyecto: vmware/vic
func isManagedbyVCH(sess *session.Session, moref types.ManagedObjectReference) bool {
	var vm mo.VirtualMachine

	// current attributes we care about
	attrib := []string{"resourcePool"}

	// populate the vm properties
	ctx := context.Background()
	if err := sess.RetrieveOne(ctx, moref, attrib, &vm); err != nil {
		log.Errorf("Failed to query registered vm object %s: %s", moref.String(), err)
		return false
	}
	if *vm.ResourcePool != Config.ResourcePool.Reference() {
		log.Debugf("container vm %q does not belong to this VCH, ignoring", vm.Config.Name)
		return false
	}
	return true
}
Ejemplo n.º 17
0
func (c *Container) Update(ctx context.Context, sess *session.Session) (*executor.ExecutorConfig, error) {
	defer trace.End(trace.Begin("Container.Update"))
	c.Lock()
	defer c.Unlock()

	if c.vm == nil {
		return nil, fmt.Errorf("container does not have a vm")
	}

	var vm []mo.VirtualMachine

	if err := sess.Retrieve(ctx, []types.ManagedObjectReference{c.vm.Reference()}, []string{"config"}, &vm); err != nil {
		return nil, err
	}

	extraconfig.Decode(vmomi.OptionValueSource(vm[0].Config.ExtraConfig), c.ExecConfig)
	return c.ExecConfig, nil
}
Ejemplo n.º 18
0
func testCreateNetwork(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}

	err := d.createBridgeNetwork(conf)
	if d.isVC && err != nil {
		t.Logf("Got exepcted err: %s", err)
		return
	}
	if d.isVC {
		t.Errorf("Should not create network in VC")
		return
	}
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
}
Ejemplo n.º 19
0
func testCreateVolumeStores(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, hasErr bool, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}

	err := d.createVolumeStores(conf)
	if hasErr && err != nil {
		t.Logf("Got exepcted err: %s", err)
		return
	}
	if hasErr {
		t.Errorf("Should have error, but got success")
		return
	}
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
}
Ejemplo n.º 20
0
func createTestData(ctx context.Context, sess *session.Session, prefix string) error {
	dcs, err := sess.Finder.DatacenterList(ctx, "*")
	if err != nil {
		return err
	}
	for _, dc := range dcs {
		sess.Finder.SetDatacenter(dc)
		sess.Datacenter = dc
		resources := &Node{
			Kind: rpNode,
			Name: prefix + "Root",
			Children: []*Node{
				{
					Kind: rpNode,
					Name: prefix + "pool1",
					Children: []*Node{
						{
							Kind: vmNode,
							Name: prefix + "pool1",
						},
						{
							Kind: rpNode,
							Name: prefix + "pool1-2",
							Children: []*Node{
								{
									Kind: rpNode,
									Name: prefix + "pool1-2-1",
									Children: []*Node{
										{
											Kind: vmNode,
											Name: prefix + "vch1-2-1",
										},
									},
								},
							},
						},
					},
				},
				{
					Kind: vmNode,
					Name: prefix + "vch2",
				},
			},
		}
		if err = createResources(ctx, sess, resources); err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 21
0
// GetDatastores returns a map of datastores given a map of names and urls
func GetDatastores(ctx context.Context, s *session.Session, dsURLs map[string]*url.URL) (map[string]*Helper, error) {
	stores := make(map[string]*Helper)

	fm := object.NewFileManager(s.Vim25())
	for name, dsURL := range dsURLs {

		vsDs, err := s.Finder.DatastoreOrDefault(ctx, s.DatastorePath)
		if err != nil {
			return nil, err
		}

		d := &Helper{
			ds:      vsDs,
			s:       s,
			fm:      fm,
			RootURL: dsURL.Path,
		}

		stores[name] = d
	}

	return stores, nil
}
Ejemplo n.º 22
0
Archivo: logs.go Proyecto: vmware/vic
func findDiagnosticLogs(c *session.Session) (map[string]entryReader, error) {
	defer trace.End(trace.Begin(""))

	// When connected to VC, we collect vpxd.log and hostd.log for all cluster hosts attached to the datastore.
	// When connected to ESX, we just collect hostd.log.
	const (
		vpxdKey  = "vpxd:vpxd.log"
		hostdKey = "hostd"
	)

	logs := map[string]entryReader{}
	var err error

	if c.IsVC() {
		logs[vpxdKey] = dlogReader{c, vpxdKey, nil}

		var hosts []*object.HostSystem
		if c.Cluster == nil && c.Host != nil {
			hosts = []*object.HostSystem{c.Host}
		} else {
			hosts, err = c.Datastore.AttachedClusterHosts(context.TODO(), c.Cluster)
			if err != nil {
				return nil, err
			}
		}

		for _, host := range hosts {
			lname := fmt.Sprintf("%s/%s", hostdKey, host)
			logs[lname] = dlogReader{c, hostdKey, host}
		}
	} else {
		logs[hostdKey] = dlogReader{c, hostdKey, nil}
	}

	return logs, nil
}
Ejemplo n.º 23
0
// NewDatastore returns a Datastore.
// ctx is a context,
// s is an authenticated session
// ds is the vsphere datastore
// rootdir is the top level directory to root all data.  If root does not exist,
// it will be created.  If it already exists, NOOP. This cannot be empty.
func NewHelper(ctx context.Context, s *session.Session, ds *object.Datastore, rootdir string) (*Helper, error) {

	d := &Helper{
		ds: ds,
		s:  s,
		fm: object.NewFileManager(s.Vim25()),
	}

	if path.IsAbs(rootdir) {
		rootdir = rootdir[1:]
	}

	if err := d.mkRootDir(ctx, rootdir); err != nil {
		log.Infof("error creating root directory %s: %s", rootdir, err)
		return nil, err
	}

	if d.RootURL == "" {
		return nil, fmt.Errorf("failed to create root directory")
	}

	log.Infof("Datastore path is %s", d.RootURL)
	return d, nil
}
Ejemplo n.º 24
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	f := find.NewFinder(sess.Vim25(), false)

	extraconfig.Decode(source, &exec.VCHConfig)
	log.Debugf("Decoded VCH config for execution: %#v", exec.VCHConfig)
	ccount := len(exec.VCHConfig.ComputeResources)
	if ccount != 1 {
		detail := fmt.Sprintf("expected singular compute resource element, found %d", ccount)
		log.Errorf(detail)
		return err
	}

	cr := exec.VCHConfig.ComputeResources[0]
	r, err := f.ObjectReference(ctx, cr)
	if err != nil {
		detail := fmt.Sprintf("could not get resource pool or virtual app reference from %q: %s", cr.String(), err)
		log.Errorf(detail)
		return err
	}
	switch o := r.(type) {
	case *object.VirtualApp:
		exec.VCHConfig.VirtualApp = o
		exec.VCHConfig.ResourcePool = o.ResourcePool
	case *object.ResourcePool:
		exec.VCHConfig.ResourcePool = o
	default:
		detail := fmt.Sprintf("could not get resource pool or virtual app from reference %q: object type is wrong", cr.String())
		log.Errorf(detail)
		return errors.New(detail)
	}

	// we have a resource pool, so lets create the event manager for monitoring
	exec.VCHConfig.EventManager = vsphere.NewEventManager(sess)
	// configure event manager to monitor the resource pool
	exec.VCHConfig.EventManager.AddMonitoredObject(exec.VCHConfig.ResourcePool.Reference().String())

	// instantiate the container cache now
	exec.NewContainerCache()

	// need to blacklist the VCH from eventlistening - too many reconfigures
	vch, err := guest.GetSelf(ctx, sess)
	if err != nil {
		return fmt.Errorf("Unable to get a reference to the VCH: %s", err.Error())
	}
	exec.VCHConfig.EventManager.Blacklist(vch.Reference().String())

	// other managed objects could be added for the event stream, but for now the resource pool will do
	exec.VCHConfig.EventManager.Start()

	//FIXME: temporary injection of debug network for debug nic
	ne := exec.VCHConfig.Networks["client"]
	if ne == nil {
		detail := fmt.Sprintf("could not get client network reference for debug nic - this code can be removed once network mapping/dhcp client is present")
		log.Errorf(detail)
		return err
	}
	nr := new(types.ManagedObjectReference)
	nr.FromString(ne.Network.ID)
	r, err = f.ObjectReference(ctx, *nr)
	if err != nil {
		detail := fmt.Sprintf("could not get client network reference from %s: %s", nr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.VCHConfig.DebugNetwork = r.(object.NetworkReference)

	extraconfig.Decode(source, &network.Config)
	log.Debugf("Decoded VCH config for network: %#v", network.Config)
	for nn, n := range network.Config.ContainerNetworks {
		pgref := new(types.ManagedObjectReference)
		if !pgref.FromString(n.ID) {
			log.Errorf("Could not reacquire object reference from id for network %s: %s", nn, n.ID)
		}

		r, err = f.ObjectReference(ctx, *pgref)
		if err != nil {
			log.Warnf("could not get network reference for %s network", nn)
			continue
		}

		n.PortGroup = r.(object.NetworkReference)
	}

	// Grab the storage layer config blobs from extra config
	extraconfig.Decode(source, &storage.Config)
	log.Debugf("Decoded VCH config for storage: %#v", storage.Config)

	// Grab the AboutInfo about our host environment
	about := sess.Vim25().ServiceContent.About
	exec.VCHConfig.VCHMhz = exec.NCPU(ctx)
	exec.VCHConfig.VCHMemoryLimit = exec.MemTotal(ctx)
	exec.VCHConfig.HostOS = about.OsType
	exec.VCHConfig.HostOSVersion = about.Version
	exec.VCHConfig.HostProductName = about.Name
	log.Debugf("Host - OS (%s), version (%s), name (%s)", about.OsType, about.Version, about.Name)
	log.Debugf("VCH limits - %d Mhz, %d MB", exec.VCHConfig.VCHMhz, exec.VCHConfig.VCHMemoryLimit)
	return nil
}
Ejemplo n.º 25
0
func (c *Container) Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int32) error {
	defer trace.End(trace.Begin("Committing handle"))

	c.Lock()
	defer c.Unlock()

	if c.vm == nil {
		// the only permissible operation is to create a VM
		if h.Spec == nil {
			return fmt.Errorf("only create operations can be committed without an existing VM")
		}

		var res *types.TaskInfo
		var err error

		if sess.IsVC() && VCHConfig.VirtualApp != nil {
			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
				return VCHConfig.VirtualApp.CreateChildVM_Task(ctx, *h.Spec.Spec(), nil)
			})

			c.State = StateCreated
		} else {
			// Find the Virtual Machine folder that we use
			var folders *object.DatacenterFolders
			folders, err = sess.Datacenter.Folders(ctx)
			if err != nil {
				log.Errorf("Could not get folders")
				return err
			}
			parent := folders.VmFolder

			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
				return parent.CreateVM(ctx, *h.Spec.Spec(), VCHConfig.ResourcePool, nil)
			})

			c.State = StateCreated
		}

		if err != nil {
			log.Errorf("Something failed. Spec was %+v", *h.Spec.Spec())
			return err
		}

		c.vm = vm.NewVirtualMachine(ctx, sess, res.Result.(types.ManagedObjectReference))

		// clear the spec as we've acted on it
		h.Spec = nil
	}

	// if we're stopping the VM, do so before the reconfigure to preserve the extraconfig
	if h.State != nil && *h.State == StateStopped {
		// stop the container
		if err := h.Container.stop(ctx, waitTime); err != nil {
			return err
		}

		c.State = *h.State
	}

	if h.Spec != nil {
		// FIXME: add check that the VM is powered off - it should be, but this will destroy the
		// extraconfig if it's not.

		s := h.Spec.Spec()
		_, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return c.vm.Reconfigure(ctx, *s)
		})

		if err != nil {
			return err
		}
	}

	if h.State != nil && *h.State == StateRunning {
		// start the container
		if err := h.Container.start(ctx); err != nil {
			return err
		}

		c.State = *h.State
	}

	c.ExecConfig = &h.ExecConfig

	// add or overwrite the container in the cache
	containers.Put(c)
	return nil
}
Ejemplo n.º 26
0
Archivo: spec.go Proyecto: kjplatz/vic
// NewVirtualMachineConfigSpec returns a VirtualMachineConfigSpec
func NewVirtualMachineConfigSpec(ctx context.Context, session *session.Session, config *VirtualMachineConfigSpecConfig) (*VirtualMachineConfigSpec, error) {
	defer trace.End(trace.Begin(config.ID))

	log.Debugf("Adding metadata to the configspec: %+v", config.Metadata)
	// TEMPORARY

	// set VM name to prettyname-ID, to make it readable a little bit
	// if prettyname-ID is longer than max vm name length, truncate pretty name, instead of UUID, to make it unique
	nameMaxLen := maxVMNameLength - len(config.ID)
	prettyName := config.Name
	if len(prettyName) > nameMaxLen-1 {
		prettyName = prettyName[:nameMaxLen-1]
	}
	fullName := fmt.Sprintf("%s-%s", prettyName, config.ID)
	config.VMFullName = fullName

	VMPathName := config.VMPathName
	if !session.IsVSAN(ctx) {
		// VMFS requires the full path to vmx or everything but the datastore is ignored
		VMPathName = fmt.Sprintf("%s/%s/%s.vmx", config.VMPathName, config.VMFullName, config.ID)
	}

	s := &types.VirtualMachineConfigSpec{
		Name: fullName,
		Uuid: config.BiosUUID,
		Files: &types.VirtualMachineFileInfo{
			VmPathName: VMPathName,
		},
		NumCPUs:             config.NumCPUs,
		CpuHotAddEnabled:    &config.VMForkEnabled, // this disables vNUMA when true
		MemoryMB:            config.MemoryMB,
		MemoryHotAddEnabled: &config.VMForkEnabled,

		ExtraConfig: []types.BaseOptionValue{
			// lets us see the UUID for the containerfs disk (hidden from daemon)
			&types.OptionValue{Key: "disk.EnableUUID", Value: "true"},
			// needed to avoid the questions that occur when attaching multiple disks with the same uuid (bugzilla 1362918)
			&types.OptionValue{Key: "answer.msg.disk.duplicateUUID", Value: "Yes"},
			// needed to avoid the question that occur when opening a file backed serial port
			&types.OptionValue{Key: "answer.msg.serial.file.open", Value: "Append"},

			&types.OptionValue{Key: "sched.mem.lpage.maxSharedPages", Value: "256"},
			// seems to be needed to avoid children hanging shortly after fork
			&types.OptionValue{Key: "vmotion.checkpointSVGAPrimarySize", Value: "4194304"},

			// trying this out - if it works then we need to determine if we can rely on serial0 being the correct index.
			&types.OptionValue{Key: "serial0.hardwareFlowControl", Value: "TRUE"},

			// https://enatai-jira.eng.vmware.com/browse/BON-257
			// Hotadd memory above 3 GB not working
			&types.OptionValue{Key: "memory.noHotAddOver4GB", Value: "FALSE"},
			&types.OptionValue{Key: "memory.maxGrow", Value: "512"},

			// http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2030189
			&types.OptionValue{Key: "tools.remindInstall", Value: "FALSE"},
			&types.OptionValue{Key: "tools.upgrade.policy", Value: "manual"},
		},
	}

	// encode the config as optionvalues
	cfg := map[string]string{}
	extraconfig.Encode(extraconfig.MapSink(cfg), config.Metadata)
	metaCfg := vmomi.OptionValueFromMap(cfg)

	// merge it with the sec
	s.ExtraConfig = append(s.ExtraConfig, metaCfg...)

	vmcs := &VirtualMachineConfigSpec{
		Session:                  session,
		VirtualMachineConfigSpec: s,
		config: config,
	}

	log.Debugf("Virtual machine config spec created: %+v", vmcs)
	return vmcs, nil
}
Ejemplo n.º 27
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	f := find.NewFinder(sess.Vim25(), false)

	extraconfig.Decode(source, &exec.Config)
	log.Debugf("Decoded VCH config for execution: %#v", exec.Config)
	ccount := len(exec.Config.ComputeResources)
	if ccount != 1 {
		detail := fmt.Sprintf("expected singular compute resource element, found %d", ccount)
		log.Errorf(detail)
		return err
	}

	cr := exec.Config.ComputeResources[0]
	r, err := f.ObjectReference(ctx, cr)
	if err != nil {
		detail := fmt.Sprintf("could not get resource pool reference from %s: %s", cr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.Config.ResourcePool = r.(*object.ResourcePool)
	//FIXME: temporary injection of debug network for debug nic
	ne := exec.Config.Networks["client"]
	if ne == nil {
		detail := fmt.Sprintf("could not get client network reference for debug nic - this code can be removed once network mapping/dhcp client is present")
		log.Errorf(detail)
		return err
	}
	nr := new(types.ManagedObjectReference)
	nr.FromString(ne.Network.ID)
	r, err = f.ObjectReference(ctx, *nr)
	if err != nil {
		detail := fmt.Sprintf("could not get client network reference from %s: %s", nr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.Config.DebugNetwork = r.(object.NetworkReference)

	extraconfig.Decode(source, &network.Config)
	log.Debugf("Decoded VCH config for network: %#v", network.Config)
	for nn, n := range network.Config.ContainerNetworks {
		pgref := new(types.ManagedObjectReference)
		if !pgref.FromString(n.ID) {
			log.Errorf("Could not reacquire object reference from id for network %s: %s", nn, n.ID)
		}

		r, err = f.ObjectReference(ctx, *pgref)
		if err != nil {
			log.Warnf("could not get network reference for %s network", nn)
			continue
		}

		n.PortGroup = r.(object.NetworkReference)
	}

	return nil
}
Ejemplo n.º 28
0
Archivo: commit.go Proyecto: vmware/vic
// Commit executes the requires steps on the handle
func Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int32) error {
	defer trace.End(trace.Begin(h.ExecConfig.ID))

	c := Containers.Container(h.ExecConfig.ID)
	creation := h.vm == nil
	if creation {
		if h.Spec == nil {
			return fmt.Errorf("a spec must be provided for create operations")
		}

		if sess == nil {
			// session must not be nil
			return fmt.Errorf("no session provided for create operations")
		}

		// the only permissible operation is to create a VM
		if h.Spec == nil {
			return fmt.Errorf("only create operations can be committed without an existing VM")
		}

		if c != nil {
			return fmt.Errorf("a container already exists in the cache with this ID")
		}

		var res *types.TaskInfo
		var err error
		if sess.IsVC() && Config.VirtualApp.ResourcePool != nil {
			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
				return Config.VirtualApp.CreateChildVM_Task(ctx, *h.Spec.Spec(), nil)
			})
		} else {
			// Find the Virtual Machine folder that we use
			var folders *object.DatacenterFolders
			folders, err = sess.Datacenter.Folders(ctx)
			if err != nil {
				log.Errorf("Could not get folders")
				return err
			}
			parent := folders.VmFolder

			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
				return parent.CreateVM(ctx, *h.Spec.Spec(), Config.ResourcePool, nil)
			})
		}

		if err != nil {
			log.Errorf("Something failed. Spec was %+v", *h.Spec.Spec())
			return err
		}

		h.vm = vm.NewVirtualMachine(ctx, sess, res.Result.(types.ManagedObjectReference))
		c = newContainer(&h.containerBase)
		Containers.Put(c)
		// inform of creation irrespective of remaining operations
		publishContainerEvent(c.ExecConfig.ID, time.Now().UTC(), events.ContainerCreated)

		// clear the spec as we've acted on it - this prevents a reconfigure from occurring in follow-on
		// processing
		h.Spec = nil
	}

	// if we're stopping the VM, do so before the reconfigure to preserve the extraconfig
	refresh := true
	if h.TargetState() == StateStopped {
		if h.Runtime == nil {
			log.Warnf("Commit called with incomplete runtime state for %s", h.ExecConfig.ID)
		}

		if h.Runtime != nil && h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff {
			log.Infof("Dropping duplicate power off operation for %s", h.ExecConfig.ID)
		} else {
			// stop the container
			if err := c.stop(ctx, waitTime); err != nil {
				return err
			}

			// inform of creation irrespective of remaining operations
			publishContainerEvent(h.ExecConfig.ID, time.Now().UTC(), events.ContainerStopped)

			// we must refresh now to get the new ChangeVersion - this is used to gate on powerstate in the reconfigure
			// because we cannot set the ExtraConfig if the VM is powered on. There is still a race here unfortunately because
			// tasks don't appear to contain the new ChangeVersion
			// we don't use refresh because we want to keep the extraconfig state
			base, err := h.updates(ctx)
			if err != nil {
				// TODO: can we recover here, or at least set useful state for inspection?
				return err
			}
			h.Runtime = base.Runtime
			h.Config = base.Config

			refresh = false
		}
	}

	// reconfigure operation
	if h.Spec != nil {
		if h.Runtime == nil {
			log.Errorf("Refusing to perform reconfigure operation with incomplete runtime state for %s", h.ExecConfig.ID)
		} else {
			// ensure that our logic based on Runtime state remains valid

			// NOTE: this inline refresh can be removed when switching away from guestinfo where we have non-persistence issues
			// when updating ExtraConfig via the API with a powered on VM - we therefore have to be absolutely certain about the
			// power state to decide if we can continue without nilifying extraconfig

			for s := h.Spec.Spec(); ; refresh, s = true, h.Spec.Spec() {
				// FIXME!!! this is a temporary hack until the concurrent modification retry logic is in place
				if refresh {
					base, err := h.updates(ctx)
					if err == nil {
						h.Runtime = base.Runtime
						h.Config = base.Config
					}
				}

				s.ChangeVersion = h.Config.ChangeVersion

				// nilify ExtraConfig if vm is running
				if h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
					log.Errorf("Nilifying ExtraConfig as we are running")
					s.ExtraConfig = nil
				}

				_, err := h.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
					return h.vm.Reconfigure(ctx, *s)
				})
				if err != nil {
					log.Errorf("Reconfigure failed with %#+v", err)

					// Check whether we get ConcurrentAccess and wrap it if needed
					if f, ok := err.(types.HasFault); ok {
						switch f.Fault().(type) {
						case *types.ConcurrentAccess:
							log.Errorf("We have ConcurrentAccess for version %s", s.ChangeVersion)

							continue
							// return ConcurrentAccessError{err}
						}
					}
					return err
				}

				break
			}
		}
	}

	// best effort update of container cache using committed state - this will not reflect the power on below, however
	// this is primarily for updating ExtraConfig state.
	if !creation {
		defer c.RefreshFromHandle(ctx, h)
	}

	if h.TargetState() == StateRunning {
		if h.Runtime != nil && h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
			log.Infof("Dropping duplicate power on operation for %s", h.ExecConfig.ID)
			return nil
		}

		if h.Runtime == nil && !creation {
			log.Warnf("Commit called with incomplete runtime state for %s", h.ExecConfig.ID)
		}

		// start the container
		if err := c.start(ctx); err != nil {
			return err
		}

		// inform of creation irrespective of remaining operations
		publishContainerEvent(h.ExecConfig.ID, time.Now().UTC(), events.ContainerStarted)
	}

	return nil
}
Ejemplo n.º 29
0
Archivo: exec.go Proyecto: vmware/vic
func Init(ctx context.Context, sess *session.Session, source extraconfig.DataSource, _ extraconfig.DataSink) error {
	initializer.once.Do(func() {
		var err error
		defer func() {
			if err != nil {
				initializer.err = err
			}
		}()
		f := find.NewFinder(sess.Vim25(), false)

		extraconfig.Decode(source, &Config)

		log.Debugf("Decoded VCH config for execution: %#v", Config)
		ccount := len(Config.ComputeResources)
		if ccount != 1 {
			err = fmt.Errorf("expected singular compute resource element, found %d", ccount)
			log.Error(err)
			return
		}

		cr := Config.ComputeResources[0]
		var r object.Reference
		r, err = f.ObjectReference(ctx, cr)
		if err != nil {
			err = fmt.Errorf("could not get resource pool or virtual app reference from %q: %s", cr.String(), err)
			log.Error(err)
			return
		}
		switch o := r.(type) {
		case *object.VirtualApp:
			Config.VirtualApp = o
			Config.ResourcePool = o.ResourcePool
		case *object.ResourcePool:
			Config.ResourcePool = o
		default:
			err = fmt.Errorf("could not get resource pool or virtual app from reference %q: object type is wrong", cr.String())
			log.Error(err)
			return
		}

		// we want to monitor the cluster, so create a vSphere Event Collector
		// The cluster managed object will either be a proper vSphere Cluster or
		// a specific host when standalone mode
		ec := vsphere.NewCollector(sess.Vim25(), sess.Cluster.Reference().String())

		// start the collection of vsphere events
		err = ec.Start()
		if err != nil {
			err = fmt.Errorf("%s failed to start: %s", ec.Name(), err)
			log.Error(err)
			return
		}

		// create the event manager &  register the existing collector
		Config.EventManager = event.NewEventManager(ec)

		// subscribe the exec layer to the event stream for Vm events
		Config.EventManager.Subscribe(events.NewEventType(vsphere.VMEvent{}).Topic(), "exec", eventCallback)
		// subscribe callback to handle vm registered event
		Config.EventManager.Subscribe(events.NewEventType(vsphere.VMEvent{}).Topic(), "registeredVMEvent", func(ie events.Event) {
			registeredVMCallback(sess, ie)
		})

		// instantiate the container cache now
		NewContainerCache()

		// Grab the AboutInfo about our host environment
		about := sess.Vim25().ServiceContent.About
		Config.VCHMhz = NCPU(ctx)
		Config.VCHMemoryLimit = MemTotal(ctx)
		Config.HostOS = about.OsType
		Config.HostOSVersion = about.Version
		Config.HostProductName = about.Name
		log.Debugf("Host - OS (%s), version (%s), name (%s)", about.OsType, about.Version, about.Name)
		log.Debugf("VCH limits - %d Mhz, %d MB", Config.VCHMhz, Config.VCHMemoryLimit)

		// sync container cache
		if err = Containers.sync(ctx, sess); err != nil {
			return
		}
	})
	return initializer.err
}
Ejemplo n.º 30
0
Archivo: vm.go Proyecto: vmware/vic
// NewVirtualMachine returns a NewVirtualMachine object
func NewVirtualMachine(ctx context.Context, session *session.Session, moref types.ManagedObjectReference) *VirtualMachine {
	return NewVirtualMachineFromVM(ctx, session, object.NewVirtualMachine(session.Vim25(), moref))
}