Пример #1
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	sink, err := extraconfig.GuestInfoSink()
	if err != nil {
		return err
	}

	// Grab the storage layer config blobs from extra config
	extraconfig.Decode(source, &storage.Config)
	log.Debugf("Decoded VCH config for storage: %#v", storage.Config)

	// create or restore a portlayer k/v store in the VCH's directory.
	vch, err := guest.GetSelf(ctx, sess)
	if err != nil {
		return err
	}

	vchvm := vm.NewVirtualMachineFromVM(ctx, sess, vch)
	vmPath, err := vchvm.VMPathName(ctx)
	if err != nil {
		return err
	}

	// vmPath is set to the vmx.  Grab the directory from that.
	vmFolder, err := datastore.ToURL(path.Dir(vmPath))
	if err != nil {
		return err
	}

	if err = store.Init(ctx, sess, vmFolder); err != nil {
		return err
	}

	if err := exec.Init(ctx, sess, source, sink); err != nil {
		return err
	}

	if err = network.Init(ctx, sess, source, sink); err != nil {
		return err
	}

	return nil
}
Пример #2
0
func NewDiskManager(ctx context.Context, session *session.Session) (*Manager, error) {
	vm, err := guest.GetSelf(ctx, session)
	if err != nil {
		return nil, errors.Trace(err)
	}

	// create handle to the docker daemon VM as we need to mount disks on it
	controller, byPathFormat, err := verifyParavirtualScsiController(ctx, vm)
	if err != nil {
		return nil, errors.Trace(err)
	}

	d := &Manager{
		maxAttached:  make(chan bool, MaxAttachedDisks),
		vm:           vm,
		controller:   controller,
		byPathFormat: byPathFormat,
	}
	return d, nil
}
Пример #3
0
func Init(ctx context.Context, sess *session.Session) error {
	source, err := extraconfig.GuestInfoSource()
	if err != nil {
		return err
	}

	f := find.NewFinder(sess.Vim25(), false)

	extraconfig.Decode(source, &exec.VCHConfig)
	log.Debugf("Decoded VCH config for execution: %#v", exec.VCHConfig)
	ccount := len(exec.VCHConfig.ComputeResources)
	if ccount != 1 {
		detail := fmt.Sprintf("expected singular compute resource element, found %d", ccount)
		log.Errorf(detail)
		return err
	}

	cr := exec.VCHConfig.ComputeResources[0]
	r, err := f.ObjectReference(ctx, cr)
	if err != nil {
		detail := fmt.Sprintf("could not get resource pool or virtual app reference from %q: %s", cr.String(), err)
		log.Errorf(detail)
		return err
	}
	switch o := r.(type) {
	case *object.VirtualApp:
		exec.VCHConfig.VirtualApp = o
		exec.VCHConfig.ResourcePool = o.ResourcePool
	case *object.ResourcePool:
		exec.VCHConfig.ResourcePool = o
	default:
		detail := fmt.Sprintf("could not get resource pool or virtual app from reference %q: object type is wrong", cr.String())
		log.Errorf(detail)
		return errors.New(detail)
	}

	// we have a resource pool, so lets create the event manager for monitoring
	exec.VCHConfig.EventManager = vsphere.NewEventManager(sess)
	// configure event manager to monitor the resource pool
	exec.VCHConfig.EventManager.AddMonitoredObject(exec.VCHConfig.ResourcePool.Reference().String())

	// instantiate the container cache now
	exec.NewContainerCache()

	// need to blacklist the VCH from eventlistening - too many reconfigures
	vch, err := guest.GetSelf(ctx, sess)
	if err != nil {
		return fmt.Errorf("Unable to get a reference to the VCH: %s", err.Error())
	}
	exec.VCHConfig.EventManager.Blacklist(vch.Reference().String())

	// other managed objects could be added for the event stream, but for now the resource pool will do
	exec.VCHConfig.EventManager.Start()

	//FIXME: temporary injection of debug network for debug nic
	ne := exec.VCHConfig.Networks["client"]
	if ne == nil {
		detail := fmt.Sprintf("could not get client network reference for debug nic - this code can be removed once network mapping/dhcp client is present")
		log.Errorf(detail)
		return err
	}
	nr := new(types.ManagedObjectReference)
	nr.FromString(ne.Network.ID)
	r, err = f.ObjectReference(ctx, *nr)
	if err != nil {
		detail := fmt.Sprintf("could not get client network reference from %s: %s", nr.String(), err)
		log.Errorf(detail)
		return err
	}
	exec.VCHConfig.DebugNetwork = r.(object.NetworkReference)

	extraconfig.Decode(source, &network.Config)
	log.Debugf("Decoded VCH config for network: %#v", network.Config)
	for nn, n := range network.Config.ContainerNetworks {
		pgref := new(types.ManagedObjectReference)
		if !pgref.FromString(n.ID) {
			log.Errorf("Could not reacquire object reference from id for network %s: %s", nn, n.ID)
		}

		r, err = f.ObjectReference(ctx, *pgref)
		if err != nil {
			log.Warnf("could not get network reference for %s network", nn)
			continue
		}

		n.PortGroup = r.(object.NetworkReference)
	}

	// Grab the storage layer config blobs from extra config
	extraconfig.Decode(source, &storage.Config)
	log.Debugf("Decoded VCH config for storage: %#v", storage.Config)

	// Grab the AboutInfo about our host environment
	about := sess.Vim25().ServiceContent.About
	exec.VCHConfig.VCHMhz = exec.NCPU(ctx)
	exec.VCHConfig.VCHMemoryLimit = exec.MemTotal(ctx)
	exec.VCHConfig.HostOS = about.OsType
	exec.VCHConfig.HostOSVersion = about.Version
	exec.VCHConfig.HostProductName = about.Name
	log.Debugf("Host - OS (%s), version (%s), name (%s)", about.OsType, about.Version, about.Name)
	log.Debugf("VCH limits - %d Mhz, %d MB", exec.VCHConfig.VCHMhz, exec.VCHConfig.VCHMemoryLimit)
	return nil
}
Пример #4
0
// listVMPaths returns an array of datastore paths for VMs associated with the
// VCH - this includes containerVMs and the appliance
func listVMPaths(ctx context.Context, s *session.Session) ([]logfile, error) {
	defer trace.End(trace.Begin(""))

	var err error
	var children []*vm.VirtualMachine

	if len(vchConfig.ComputeResources) == 0 {
		return nil, errors.New("compute resources is empty")
	}

	ref := vchConfig.ComputeResources[0]
	rp := compute.NewResourcePool(ctx, s, ref)
	if children, err = rp.GetChildrenVMs(ctx, s); err != nil {
		return nil, err
	}

	self, err := guest.GetSelf(ctx, s)
	if err != nil {
		log.Errorf("Unable to get handle to self for log filtering")
	}

	log.Infof("Found %d candidate VMs in resource pool %s for log collection", len(children), ref.String())

	logfiles := []logfile{}
	for _, child := range children {
		path, err := child.DSPath(ctx)

		if err != nil {
			log.Errorf("Unable to get datastore path for child VM %s: %s", child.Reference(), err)
			// we need to get as many logs as possible
			continue
		}

		logname, err := child.Name(ctx)
		if err != nil {
			log.Errorf("Unable to get the vm name for %s: %s", child.Reference(), err)
			continue
		}

		if self != nil && child.Reference().String() == self.Reference().String() {
			// FIXME: until #2630 is addressed, and we confirm this filters secrets from appliance vmware.log as well,
			// we're skipping direct collection of those logs.
			log.Info("Skipping collection for appliance VM (moref match)")
			continue
		}

		// backup check if we were unable to initialize self for some reason
		if self == nil && logname == vchConfig.Name {
			log.Info("Skipping collection for appliance VM (string match)")
			continue
		}

		log.Debugf("Adding VM for log collection: %s", path.String())

		log := logfile{
			URL:    path,
			VMName: logname,
		}

		logfiles = append(logfiles, log)
	}

	log.Infof("Collecting logs from %d VMs", len(logfiles))
	log.Infof("Found VM paths are : %#v", logfiles)
	return logfiles, nil
}