Exemplo n.º 1
0
func (v *Validator) inventoryPathToComputePath(path string) string {
	defer trace.End(trace.Begin(path))

	// sanity check datacenter
	if !strings.HasPrefix(path, v.DatacenterPath) {
		log.Debugf("Expected path to be within target datacenter %q: %q", v.DatacenterPath, path)
		v.NoteIssue(errors.New("inventory path was not in datacenter scope"))
		return ""
	}

	// inventory path is always /dc/host/computeResource/Resources/path/to/pool
	// NOTE: all of the indexes are +1 because the leading / means we have an empty string for [0]
	pElems := strings.Split(path, "/")
	if len(pElems) < 4 {
		log.Debugf("Expected path to be fully qualified, e.g. /dcName/host/clusterName/Resources/poolName: %s", path)
		v.NoteIssue(errors.New("inventory path format was not recognised"))
		return ""
	}

	if len(pElems) == 4 || len(pElems) == 5 {
		// cluster only or cluster/Resources
		return pElems[3]
	}

	// messy but avoid reallocation - overwrite Resources with cluster name
	pElems[4] = pElems[3]

	// /dc/host/cluster/Resources/path/to/pool
	return strings.Join(pElems[4:], "/")
}
Exemplo n.º 2
0
func (v *Validator) dpgMorefHelper(ctx context.Context, ref string) (string, error) {
	defer trace.End(trace.Begin(ref))

	moref := new(types.ManagedObjectReference)
	ok := moref.FromString(ref)
	if !ok {
		// TODO: error message about no such match and how to get a network list
		return "", errors.New("could not restore serialized managed object reference: " + ref)
	}

	net, err := v.Session.Finder.ObjectReference(ctx, *moref)
	if err != nil {
		// TODO: error message about no such match and how to get a network list
		return "", errors.New("unable to locate network from moref: " + ref)
	}

	// ensure that the type of the network is a Distributed Port Group if the target is a vCenter
	// if it's not then any network suffices
	if v.IsVC() {
		_, dpg := net.(*object.DistributedVirtualPortgroup)
		if !dpg {
			return "", fmt.Errorf("%q is not a Distributed Port Group", ref)
		}
	}

	return ref, nil
}
Exemplo n.º 3
0
Arquivo: create.go Projeto: vmware/vic
func (d *Dispatcher) CreateVCH(conf *config.VirtualContainerHostConfigSpec, settings *data.InstallerData) error {
	defer trace.End(trace.Begin(conf.Name))

	var err error

	if err = d.checkExistence(conf, settings); err != nil {
		return err
	}

	if d.isVC && !settings.UseRP {
		if d.vchVapp, err = d.createVApp(conf, settings); err != nil {
			detail := fmt.Sprintf("Creating virtual app failed: %s", err)
			if !d.force {
				return errors.New(detail)
			}

			log.Error(detail)
			log.Errorf("Deploying vch under parent pool %q, (--force=true)", settings.ResourcePoolPath)
			d.vchPool = d.session.Pool
			conf.ComputeResources = append(conf.ComputeResources, d.vchPool.Reference())
		}
	} else {
		if d.vchPool, err = d.createResourcePool(conf, settings); err != nil {
			detail := fmt.Sprintf("Creating resource pool failed: %s", err)
			if !d.force {
				return errors.New(detail)
			}

			log.Error(detail)
			log.Errorf("Deploying vch under parent pool %q, (--force=true)", settings.ResourcePoolPath)
			d.vchPool = d.session.Pool
			conf.ComputeResources = append(conf.ComputeResources, d.vchPool.Reference())
		}
	}

	if err = d.createBridgeNetwork(conf); err != nil {
		return err
	}

	if err = d.createVolumeStores(conf); err != nil {
		return errors.Errorf("Exiting because we could not create volume stores due to error: %s", err)
	}

	if err = d.createAppliance(conf, settings); err != nil {
		return errors.Errorf("Creating the appliance failed with %s. Exiting...", err)
	}

	if err = d.uploadImages(settings.ImageFiles); err != nil {
		return errors.Errorf("Uploading images failed with %s. Exiting...", err)
	}

	if d.session.IsVC() {
		if err = d.RegisterExtension(conf, settings.Extension); err != nil {
			return errors.Errorf("Error registering VCH vSphere extension: %s", err)
		}
	}
	return d.startAppliance(conf)
}
Exemplo n.º 4
0
Arquivo: list.go Projeto: vmware/vic
func (l *List) Run(cli *cli.Context) (err error) {
	if err = l.processParams(); err != nil {
		return err
	}

	if l.Debug.Debug > 0 {
		log.SetLevel(log.DebugLevel)
		trace.Logger.Level = log.DebugLevel
	}

	if len(cli.Args()) > 0 {
		log.Errorf("Unknown argument: %s", cli.Args()[0])
		return errors.New("invalid CLI arguments")
	}

	log.Infof("### Listing VCHs ####")

	ctx, cancel := context.WithTimeout(context.Background(), l.Timeout)
	defer cancel()
	defer func() {
		if ctx.Err() != nil && ctx.Err() == context.DeadlineExceeded {
			//context deadline exceeded, replace returned error message
			err = errors.Errorf("List timed out: use --timeout to add more time")
		}
	}()

	var validator *validate.Validator
	if l.Data.ComputeResourcePath == "" {
		validator, err = validate.CreateNoDCCheck(ctx, l.Data)
	} else {
		validator, err = validate.NewValidator(ctx, l.Data)
	}
	if err != nil {
		log.Errorf("List cannot continue - failed to create validator: %s", err)
		return errors.New("list failed")
	}

	_, err = validator.ValidateTarget(ctx, l.Data)
	if err != nil {
		log.Errorf("List cannot continue - target validation failed: %s", err)
		return err
	}
	_, err = validator.ValidateCompute(ctx, l.Data)
	if err != nil {
		log.Errorf("List cannot continue - compute resource validation failed: %s", err)
		return err
	}
	executor := management.NewDispatcher(validator.Context, validator.Session, nil, false)
	vchs, err := executor.SearchVCHs(validator.ResourcePoolPath)
	if err != nil {
		log.Errorf("List cannot continue - failed to search VCHs in %s: %s", validator.ResourcePoolPath, err)
	}
	l.prettyPrint(cli, ctx, vchs, executor)
	return nil
}
Exemplo n.º 5
0
Arquivo: update.go Projeto: vmware/vic
func (v *Validator) assertTarget(conf *config.VirtualContainerHostConfigSpec) {
	defer trace.End(trace.Begin(""))
	if conf.Target.User != nil {
		if _, set := conf.Target.User.Password(); set {
			v.NoteIssue(errors.New("Password should not be set in target URL"))
		}
	}

	if !v.IsVC() && conf.UserPassword == "" {
		v.NoteIssue(errors.New("ESX credential is not set"))
	}
}
Exemplo n.º 6
0
func (d *Dispatcher) deleteDatastoreFiles(ds *object.Datastore, path string, force bool) (bool, error) {
	defer trace.End(trace.Begin(fmt.Sprintf("path %q, force %t", path, force)))

	// refuse to delete everything on the datstore, ignore force
	if path == "" {
		dsn, _ := ds.ObjectName(d.ctx)
		msg := fmt.Sprintf("refusing to remove datastore files for path \"\" on datastore %q", dsn)
		return false, errors.New(msg)
	}

	var empty bool
	dsPath := ds.Path(path)

	res, err := d.lsFolder(ds, dsPath)
	if err != nil {
		if !types.IsFileNotFound(err) {
			err = errors.Errorf("Failed to browse folder %q: %s", dsPath, err)
			return empty, err
		}
		log.Debugf("Folder %q is not found", dsPath)
		empty = true
		return empty, nil
	}
	if len(res.File) > 0 && !force {
		log.Debugf("Folder %q is not empty, leave it there", dsPath)
		return empty, nil
	}

	m := object.NewFileManager(ds.Client())
	if err = d.deleteFilesIteratively(m, ds, dsPath); err != nil {
		return empty, err
	}
	return true, nil
}
Exemplo n.º 7
0
Arquivo: config.go Projeto: vmware/vic
func (v *Validator) checkLicense(ctx context.Context) error {
	var invalidLic []string
	client := v.Session.Client.Client

	lm := license.NewManager(client)
	licenses, err := lm.List(ctx)
	if err != nil {
		return err
	}
	v.checkEvalLicense(licenses)

	features := []string{"serialuri"}

	for _, feature := range features {
		if len(licenses.WithFeature(feature)) == 0 {
			msg := fmt.Sprintf("Host license missing feature %q", feature)
			invalidLic = append(invalidLic, msg)
		}
	}

	if len(invalidLic) > 0 {
		log.Errorf("License check FAILED:")
		for _, h := range invalidLic {
			log.Errorf("  %q", h)
		}
		msg := "License does not meet minimum requirements to use VIC"
		return errors.New(msg)
	}
	log.Infof("License check OK")
	return nil
}
Exemplo n.º 8
0
func (d *Dispatcher) findApplianceByID(conf *metadata.VirtualContainerHostConfigSpec) (*vm.VirtualMachine, error) {
	defer trace.End(trace.Begin(""))

	var err error
	var vmm *vm.VirtualMachine

	moref := new(types.ManagedObjectReference)
	if ok := moref.FromString(conf.ID); !ok {
		message := "Failed to get appliance VM mob reference"
		log.Errorf(message)
		return nil, errors.New(message)
	}
	ref, err := d.session.Finder.ObjectReference(d.ctx, *moref)
	if err != nil {
		if _, ok := err.(*find.NotFoundError); !ok {
			err = errors.Errorf("Failed to query appliance (%s): %s", moref, err)
			return nil, err
		}
		log.Debugf("Appliance is not found")
		return nil, nil

	}
	ovm, ok := ref.(*object.VirtualMachine)
	if !ok {
		log.Errorf("Failed to find VM %s, %s", moref, err)
		return nil, err
	}
	vmm = vm.NewVirtualMachine(d.ctx, d.session, ovm.Reference())
	return vmm, nil
}
Exemplo n.º 9
0
func (d *Dispatcher) DeleteVCHInstances(vmm *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec) error {
	defer trace.End(trace.Begin(""))

	log.Infof("Removing VMs")
	var errs []string

	var err error
	var children []*vm.VirtualMachine

	rpRef := conf.ComputeResources[len(conf.ComputeResources)-1]
	ref, err := d.session.Finder.ObjectReference(d.ctx, rpRef)
	if err != nil {
		err = errors.Errorf("Failed to get VCH resource pool %q: %s", rpRef, err)
		return err
	}
	switch ref.(type) {
	case *object.VirtualApp:
	case *object.ResourcePool:
		//		ok
	default:
		log.Errorf("Failed to find virtual app or resource pool %q: %s", rpRef, err)
		return err
	}

	rp := compute.NewResourcePool(d.ctx, d.session, ref.Reference())
	if children, err = rp.GetChildrenVMs(d.ctx, d.session); err != nil {
		return err
	}

	ds, err := d.session.Finder.Datastore(d.ctx, conf.ImageStores[0].Host)
	if err != nil {
		err = errors.Errorf("Failed to find image datastore %q", conf.ImageStores[0].Host)
		return err
	}
	d.session.Datastore = ds

	for _, child := range children {
		name, err := child.Name(d.ctx)
		if err != nil {
			errs = append(errs, err.Error())
			continue
		}
		//Leave VCH appliance there until everything else is removed, cause it has VCH configuration. Then user could retry delete in case of any failure.
		if name == conf.Name {
			continue
		}
		if err = d.deleteVM(child, d.force); err != nil {
			errs = append(errs, err.Error())
		}
	}

	if len(errs) > 0 {
		log.Debugf("Error deleting container VMs %s", errs)
		return errors.New(strings.Join(errs, "\n"))
	}

	return nil
}
Exemplo n.º 10
0
func (d *Dispatcher) createBridgeNetwork(conf *config.VirtualContainerHostConfigSpec) error {
	defer trace.End(trace.Begin(""))

	// if the bridge network is already extant there's nothing to do
	bnet := conf.ExecutorConfig.Networks[conf.BridgeNetwork]
	if bnet != nil && bnet.ID != "" {
		return nil
	}

	// network didn't exist during validation given we don't have a moref, so create it
	if d.session.Client.IsVC() {
		// double check
		return errors.New("bridge network must already exist for vCenter environments")
	}

	// in this case the name to use is held in container network ID
	name := bnet.Network.ID

	log.Infof("Creating VirtualSwitch")
	hostNetSystem, err := d.session.Host.ConfigManager().NetworkSystem(d.ctx)
	if err != nil {
		err = errors.Errorf("Failed to retrieve host network system: %s", err)
		return err
	}

	if err = hostNetSystem.AddVirtualSwitch(d.ctx, name, &types.HostVirtualSwitchSpec{
		NumPorts: 1024,
	}); err != nil {
		err = errors.Errorf("Failed to add virtual switch (%q): %s", name, err)
		return err
	}

	log.Infof("Creating Portgroup")
	if err = hostNetSystem.AddPortGroup(d.ctx, types.HostPortGroupSpec{
		Name:        name,
		VlanId:      1, // TODO: expose this for finer grained grouping within the switch
		VswitchName: name,
		Policy:      types.HostNetworkPolicy{},
	}); err != nil {
		err = errors.Errorf("Failed to add port group (%q): %s", name, err)
		return err
	}

	net, err := d.session.Finder.Network(d.ctx, name)
	if err != nil {
		_, ok := err.(*find.NotFoundError)
		if !ok {
			err = errors.Errorf("Failed to query virtual switch (%q): %s", name, err)
			return err
		}
	}

	// assign the moref to the bridge network config on the appliance
	bnet.ID = net.Reference().String()
	bnet.Network.ID = net.Reference().String()
	conf.CreateBridgeNetwork = true
	return nil
}
Exemplo n.º 11
0
func (d *Dispatcher) DeleteVCH(conf *config.VirtualContainerHostConfigSpec) error {
	defer trace.End(trace.Begin(conf.Name))

	var errs []string

	var err error
	var vmm *vm.VirtualMachine

	if vmm, err = d.findApplianceByID(conf); err != nil {
		return err
	}
	if vmm == nil {
		return nil
	}

	if err = d.DeleteVCHInstances(vmm, conf); err != nil {
		// if container delete failed, do not remove anything else
		log.Infof("Specify --force to force delete")
		return err
	}

	if err = d.deleteImages(conf); err != nil {
		errs = append(errs, err.Error())
	}

	d.deleteVolumeStoreIfForced(conf) // logs errors but doesn't ever bail out if it has an issue

	if err = d.deleteNetworkDevices(vmm, conf); err != nil {
		errs = append(errs, err.Error())
	}
	if err = d.removeNetwork(conf); err != nil {
		errs = append(errs, err.Error())
	}
	if len(errs) > 0 {
		// stop here, leave vch appliance there for next time delete
		return errors.New(strings.Join(errs, "\n"))
	}

	if d.isVC {
		log.Infoln("Removing VCH vSphere extension")
		if err = d.GenerateExtensionName(conf, vmm); err != nil {
			log.Warnf("Failed to get extension name during VCH deletion: %s", err)
		}
		if err = d.UnregisterExtension(conf.ExtensionName); err != nil {
			log.Warnf("Failed to remove extension %q: %s", conf.ExtensionName, err)
		}
	}

	err = d.deleteVM(vmm, true)
	if err != nil {
		log.Debugf("Error deleting appliance VM %s", err)
		return err
	}
	if err = d.destroyResourcePoolIfEmpty(conf); err != nil {
		log.Warnf("VCH resource pool is not removed: %s", err)
	}
	return nil
}
Exemplo n.º 12
0
Arquivo: config.go Projeto: vmware/vic
func (v *Validator) checkAssignedLicenses(ctx context.Context) error {
	var hosts []*object.HostSystem
	var invalidLic []string
	var validLic []string
	var err error
	client := v.Session.Client.Client

	if hosts, err = v.Session.Datastore.AttachedClusterHosts(ctx, v.Session.Cluster); err != nil {
		log.Errorf("Unable to get the list of hosts attached to given storage: %s", err)
		return err
	}

	lm := license.NewManager(client)

	am, err := lm.AssignmentManager(ctx)
	if err != nil {
		return err
	}

	features := []string{"serialuri", "dvs"}

	for _, host := range hosts {
		valid := true
		la, err := am.QueryAssigned(ctx, host.Reference().Value)
		if err != nil {
			return err
		}

		for _, feature := range features {
			if !v.assignedLicenseHasFeature(la, feature) {
				valid = false
				msg := fmt.Sprintf("%q - license missing feature %q", host.InventoryPath, feature)
				invalidLic = append(invalidLic, msg)
			}
		}

		if valid == true {
			validLic = append(validLic, host.InventoryPath)
		}
	}

	if len(validLic) > 0 {
		log.Infof("License check OK on hosts:")
		for _, h := range validLic {
			log.Infof("  %q", h)
		}
	}
	if len(invalidLic) > 0 {
		log.Errorf("License check FAILED on hosts:")
		for _, h := range invalidLic {
			log.Errorf("  %q", h)
		}
		msg := "License does not meet minimum requirements to use VIC"
		return errors.New(msg)
	}
	return nil
}
Exemplo n.º 13
0
func CreateNoDCCheck(ctx context.Context, input *data.Data) (*Validator, error) {
	defer trace.End(trace.Begin(""))
	var err error

	v := &Validator{}
	v.Context = ctx
	tURL := input.URL

	// default to https scheme
	if tURL.Scheme == "" {
		tURL.Scheme = "https"
	}

	// if they specified only an IP address the parser for some reason considers that a path
	if tURL.Host == "" {
		tURL.Host = tURL.Path
		tURL.Path = ""
	}

	sessionconfig := &session.Config{
		Insecure: input.Insecure,
	}

	// if a datacenter was specified, set it
	v.DatacenterPath = tURL.Path
	if v.DatacenterPath != "" {
		sessionconfig.DatacenterPath = v.DatacenterPath
		// path needs to be stripped before we can use it as a service url
		tURL.Path = ""
	}

	sessionconfig.Service = tURL.String()

	v.Session = session.NewSession(sessionconfig)
	v.Session, err = v.Session.Connect(v.Context)
	if err != nil {
		return nil, err
	}

	// cached here to allow a modicum of testing while session is still in use.
	v.isVC = v.Session.IsVC()
	finder := find.NewFinder(v.Session.Client.Client, false)
	v.Session.Finder = finder

	v.Session.Populate(ctx)

	// only allow the datacenter to be specified in the taget url, if any
	pElems := strings.Split(v.DatacenterPath, "/")
	if len(pElems) > 2 {
		detail := "--target should only specify datacenter in the path (e.g. https://addr/datacenter) - specify cluster, resource pool, or folder with --compute-resource"
		log.Error(detail)
		v.suggestDatacenter()
		return nil, errors.New(detail)
	}

	return v, nil
}
Exemplo n.º 14
0
// checkVDSMembership verifes all hosts in the vCenter are connected to the vDS
func (v *Validator) checkVDSMembership(ctx context.Context, network types.ManagedObjectReference, netName string) error {
	defer trace.End(trace.Begin(network.Value))

	var dvp mo.DistributedVirtualPortgroup
	var nonMembers []string

	if !v.IsVC() {
		return nil
	}

	if v.Session.Cluster == nil {
		return errors.New("Invalid cluster. Check --compute-resource")
	}

	clusterHosts, err := v.Session.Cluster.Hosts(ctx)
	if err != nil {
		return err
	}

	r := object.NewDistributedVirtualPortgroup(v.Session.Client.Client, network)
	if err := r.Properties(ctx, r.Reference(), []string{"name", "host"}, &dvp); err != nil {
		return err
	}

	for _, h := range clusterHosts {
		if !v.inDVP(h.Reference(), dvp.Host) {
			nonMembers = append(nonMembers, h.InventoryPath)
		}
	}

	if len(nonMembers) > 0 {
		log.Errorf("vDS configuration incorrect on %q. All cluster hosts must be in the vDS.", netName)
		log.Errorf("  %q is missing hosts:", netName)
		for _, hs := range nonMembers {
			log.Errorf("    %q", hs)
		}

		errMsg := fmt.Sprintf("All cluster hosts must be in the vDS. %q is missing hosts: %s", netName, nonMembers)
		v.NoteIssue(errors.New(errMsg))
	} else {
		log.Infof("vDS configuration OK on %q", netName)
	}
	return nil
}
Exemplo n.º 15
0
func (v *Validator) datacenter() error {
	if v.Session.Datacenter == nil {
		detail := "Datacenter must be specified in --target (e.g. https://addr/datacenter)"
		log.Error(detail)
		v.suggestDatacenter()
		return errors.New(detail)
	}
	v.DatacenterPath = v.Session.Datacenter.InventoryPath
	return nil
}
Exemplo n.º 16
0
// Populate resolves the set of cached resources that should be presented
// This returns accumulated error detail if there is ambiguity, but sets all
// unambiguous or correct resources.
func (s *Session) Populate(ctx context.Context) (*Session, error) {
	// Populate s
	var errs []string
	var err error

	finder := s.Finder

	log.Debug("vSphere resource cache populating...")
	s.Datacenter, err = finder.DatacenterOrDefault(ctx, s.DatacenterPath)
	if err != nil {
		errs = append(errs, fmt.Sprintf("Failure finding dc (%s): %s", s.DatacenterPath, err.Error()))
	} else {
		finder.SetDatacenter(s.Datacenter)
		log.Debugf("Cached dc: %s", s.DatacenterPath)
	}

	finder.SetDatacenter(s.Datacenter)

	s.Cluster, err = finder.ComputeResourceOrDefault(ctx, s.ClusterPath)
	if err != nil {
		errs = append(errs, fmt.Sprintf("Failure finding cluster (%s): %s", s.ClusterPath, err.Error()))
	} else {
		log.Debugf("Cached cluster: %s", s.ClusterPath)
	}

	s.Datastore, err = finder.DatastoreOrDefault(ctx, s.DatastorePath)
	if err != nil {
		errs = append(errs, fmt.Sprintf("Failure finding ds (%s): %s", s.DatastorePath, err.Error()))
	} else {
		log.Debugf("Cached ds: %s", s.DatastorePath)
	}

	s.Host, err = finder.HostSystemOrDefault(ctx, s.HostPath)
	if err != nil {
		if _, ok := err.(*find.DefaultMultipleFoundError); !ok || !s.IsVC() {
			errs = append(errs, fmt.Sprintf("Failure finding host (%s): %s", s.HostPath, err.Error()))
		}
	} else {
		log.Debugf("Cached host: %s", s.HostPath)
	}

	s.Pool, err = finder.ResourcePoolOrDefault(ctx, s.PoolPath)
	if err != nil {
		errs = append(errs, fmt.Sprintf("Failure finding pool (%s): %s", s.PoolPath, err.Error()))
	} else {
		log.Debugf("Cached pool: %s", s.PoolPath)
	}

	if len(errs) > 0 {
		log.Debugf("Error count populating vSphere cache: (%d)", len(errs))
		return nil, errors.New(strings.Join(errs, "\n"))
	}
	log.Debug("vSphere resource cache populated...")
	return s, nil
}
Exemplo n.º 17
0
func (v *Validator) sessionValid(errMsg string) bool {
	if c := v.checkSessionSet(); len(c) > 0 {
		log.Error(errMsg)
		for _, e := range c {
			log.Errorf("  %s", e)
		}
		v.NoteIssue(errors.New(errMsg))
		return false
	}
	return true
}
Exemplo n.º 18
0
// Certificate turns the KeyPair back into useful TLS constructs
func (kp *KeyPair) Certificate() (*tls.Certificate, error) {
	if kp.CertPEM == nil || kp.KeyPEM == nil {
		return nil, errors.New("KeyPair has no data")
	}

	cert, err := tls.X509KeyPair(kp.CertPEM, kp.KeyPEM)
	if err != nil {
		return nil, err
	}

	return &cert, nil
}
Exemplo n.º 19
0
Arquivo: create.go Projeto: vmware/vic
func (c *Create) processVolumeStores() error {
	defer trace.End(trace.Begin(""))
	c.VolumeLocations = make(map[string]string)
	for _, arg := range c.volumeStores {
		splitMeta := strings.SplitN(arg, ":", 2)
		if len(splitMeta) != 2 {
			return errors.New("Volume store input must be in format datastore/path:label")
		}
		c.VolumeLocations[splitMeta[1]] = splitMeta[0]
	}

	return nil
}
Exemplo n.º 20
0
Arquivo: config.go Projeto: vmware/vic
// drs checks that DRS is enabled
func (v *Validator) CheckDrs(ctx context.Context) {
	if v.DisableDRSCheck {
		return
	}
	defer trace.End(trace.Begin(""))

	errMsg := "DRS check SKIPPED"
	if !v.sessionValid(errMsg) {
		return
	}

	cl := v.Session.Cluster
	ref := cl.Reference()

	if v.isStandaloneHost() {
		log.Info("DRS check SKIPPED - target is standalone host")
		return
	}

	var ccr mo.ClusterComputeResource

	err := cl.Properties(ctx, ref, []string{"configurationEx"}, &ccr)
	if err != nil {
		msg := fmt.Sprintf("Failed to validate DRS config: %s", err)
		v.NoteIssue(errors.New(msg))
		return
	}

	z := ccr.ConfigurationEx.(*types.ClusterConfigInfoEx).DrsConfig

	if !(*z.Enabled) {
		log.Error("DRS check FAILED")
		log.Errorf("  DRS must be enabled on cluster %q", v.Session.Pool.InventoryPath)
		v.NoteIssue(errors.New("DRS must be enabled to use VIC"))
		return
	}
	log.Info("DRS check OK on:")
	log.Infof("  %q", v.Session.Pool.InventoryPath)
}
Exemplo n.º 21
0
func (v *Validator) ListIssues() error {
	defer trace.End(trace.Begin(""))

	if len(v.issues) == 0 {
		return nil
	}

	log.Error("--------------------")
	for _, err := range v.issues {
		log.Error(err)
	}

	return errors.New("validation of configuration failed")
}
Exemplo n.º 22
0
// inventoryPathToCluster is a convenience method that will return the cluster
// path prefix or "" in the case of unexpected path structure
func (v *Validator) inventoryPathToCluster(path string) string {
	defer trace.End(trace.Begin(path))

	// inventory path is always /dc/host/computeResource/Resources/path/to/pool
	pElems := strings.Split(path, "/")
	if len(pElems) < 3 {
		log.Debugf("Expected path to be fully qualified, e.g. /dcName/host/clusterName/Resources/poolName: %s", path)
		v.NoteIssue(errors.New("inventory path format was not recognised"))
		return ""
	}

	// /dc/host/cluster/Resources/path/to/pool
	return strings.Join(pElems[:4], "/")
}
Exemplo n.º 23
0
// ensureComponentsInitialize checks if the appliance components are initialized by issuing
// `docker info` to the appliance
func (d *Dispatcher) ensureComponentsInitialize(conf *config.VirtualContainerHostConfigSpec) error {
	var (
		proto  string
		client *http.Client
		res    *http.Response
		err    error
		req    *http.Request
	)

	if conf.HostCertificate.IsNil() {
		// TLS disabled
		proto = "http"
		client = &http.Client{}
	} else {
		// TLS enabled
		proto = "https"
		// TODO: configure this when support is added for user-signed certs
		tr := &http.Transport{
			TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
		}
		client = &http.Client{Transport: tr}
	}

	dockerInfoURL := fmt.Sprintf("%s://%s:%s/info", proto, d.HostIP, d.DockerPort)
	req, err = http.NewRequest("GET", dockerInfoURL, nil)
	if err != nil {
		return errors.New("invalid HTTP request for docker info")
	}
	req = req.WithContext(d.ctx)

	ticker := time.NewTicker(time.Second)
	defer ticker.Stop()
	for {
		res, err = client.Do(req)
		if err == nil && res.StatusCode == http.StatusOK {
			if isPortLayerRunning(res) {
				break
			}
		}

		select {
		case <-ticker.C:
		case <-d.ctx.Done():
			return d.ctx.Err()
		}
		log.Debug("Components not initialized yet, retrying docker info request")
	}

	return nil
}
Exemplo n.º 24
0
func (v *Validator) certificateAuthorities(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) {
	defer trace.End(trace.Begin(""))

	if len(input.ClientCAs) == 0 {
		// if there's no data supplied then we're configuring without client verification
		log.Debug("Configuring without client verification due to empty certificate authorities")
		return
	}

	// ensure TLS is configurable
	if len(input.CertPEM) == 0 {
		v.NoteIssue(errors.New("Certificate authority specified, but no TLS certificate provided"))
		return
	}

	// check a CA can be loaded
	pool := x509.NewCertPool()
	if !pool.AppendCertsFromPEM(input.ClientCAs) {
		v.NoteIssue(errors.New("Unable to load certificate authority data"))
		return
	}

	conf.CertificateAuthorities = input.ClientCAs
}
Exemplo n.º 25
0
func (v *Validator) getNetwork(ctx context.Context, path string) (object.NetworkReference, error) {
	defer trace.End(trace.Begin(path))

	nets, err := v.Session.Finder.NetworkList(ctx, path)
	if err != nil {
		log.Debugf("no such network %q", path)
		// TODO: error message about no such match and how to get a network list
		// we return err directly here so we can check the type
		return nil, err
	}
	if len(nets) > 1 {
		// TODO: error about required disabmiguation and list entries in nets
		return nil, errors.New("ambiguous network " + path)
	}
	return nets[0], nil
}
Exemplo n.º 26
0
Arquivo: create.go Projeto: vmware/vic
func (c *Create) processCertificates() error {
	// check for insecure case
	if c.noTLS {
		log.Warn("Configuring without TLS - all communications will be insecure")
		return nil
	}

	// if one or more CAs are provided, then so must the key and cert for host certificate
	cas, keypair, err := c.loadCertificates()
	if err != nil {
		log.Error("Create cannot continue: unable to load certificates")
		return err
	}

	if len(cas) != 0 && keypair == nil {
		log.Error("Create cannot continue: specifying a CA requires --key and --cert parameters")
		return errors.New("If supplying a CA, certificate and key for TLS must also be supplied")
	}

	if len(cas) == 0 && keypair == nil {
		// if we get here we didn't load a CA or keys, so we're generating
		cas, keypair, err = c.generateCertificates(!c.noTLSverify)
		if err != nil {
			log.Error("Create cannot continue: unable to generate certificates")
			return err
		}
	}

	if keypair == nil {
		// this should be caught in earlier error returns, but sanity check
		log.Error("Create cannot continue: unable to load or generate TLS certificates and --no-tls was not specified")
		return err
	}

	c.KeyPEM = keypair.KeyPEM
	c.CertPEM = keypair.CertPEM

	// do we have key, cert, and --no-tlsverify
	if c.noTLSverify || len(cas) == 0 {
		log.Warnf("Configuring without TLS verify - client authentication disabled")
		return nil
	}

	c.ClientCAs = cas
	return nil
}
Exemplo n.º 27
0
func (v *Validator) checkDatastoresAreWriteable(ctx context.Context, conf *config.VirtualContainerHostConfigSpec) {
	defer trace.End(trace.Begin(""))

	// gather compute host references
	clusterDatastores, err := v.Session.Cluster.Datastores(ctx)
	v.NoteIssue(err)

	// check that the cluster can see all of the datastores in question
	requestedDatastores := v.getAllDatastores(ctx, conf)
	validStores := make(map[types.ManagedObjectReference]*object.Datastore)
	// remove any found datastores from requested datastores
	for _, cds := range clusterDatastores {
		if requestedDatastores[cds.Reference()] != nil {
			delete(requestedDatastores, cds.Reference())
			validStores[cds.Reference()] = cds
		}
	}
	// if requestedDatastores is not empty, some requested datastores are not writable
	for _, store := range requestedDatastores {
		v.NoteIssue(errors.Errorf("Datastore %q is not accessible by the compute resource", store.Name()))
	}

	clusterHosts, err := v.Session.Cluster.Hosts(ctx)
	justOneHost := len(clusterHosts) == 1
	v.NoteIssue(err)

	for _, store := range validStores {
		aHosts, err := store.AttachedHosts(ctx)
		v.NoteIssue(err)
		clusterHosts = intersect(clusterHosts, aHosts)
	}

	if len(clusterHosts) == 0 {
		v.NoteIssue(errors.New("No single host can access all of the requested datastores. Installation cannot continue."))
	}

	if len(clusterHosts) == 1 && v.Session.IsVC() && !justOneHost {
		// if we have a cluster with >1 host to begin with, on VC, and only one host can talk to all the datastores, warn
		// on ESX and clusters with only one host to begin with, this warning would be redundant/irrelevant
		log.Warnf("Only one host can access all of the image/container/volume datastores. This may be a point of contention/performance degradation and HA/DRS may not work as intended.")
	}
}
Exemplo n.º 28
0
// Start starts a container vm with the given params
func (c *Container) start(ctx context.Context) error {
	defer trace.End(trace.Begin("Container.start"))

	if c.vm == nil {
		return fmt.Errorf("vm not set")
	}
	// get existing state and set to starting
	// if there's a failure we'll revert to existing
	existingState := c.State
	c.State = StateStarting

	// Power on
	_, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return c.vm.PowerOn(ctx)
	})
	if err != nil {
		c.State = existingState
		return err
	}

	// guestinfo key that we want to wait for
	key := fmt.Sprintf("guestinfo..sessions|%s.started", c.ExecConfig.ID)
	var detail string

	// Wait some before giving up...
	ctx, cancel := context.WithTimeout(ctx, propertyCollectorTimeout)
	defer cancel()

	detail, err = c.vm.WaitForKeyInExtraConfig(ctx, key)
	if err != nil {
		c.State = existingState
		return fmt.Errorf("unable to wait for process launch status: %s", err.Error())
	}

	if detail != "true" {
		c.State = existingState
		return errors.New(detail)
	}

	return nil
}
Exemplo n.º 29
0
func (d *Dispatcher) NewVCHFromID(id string) (*vm.VirtualMachine, error) {
	defer trace.End(trace.Begin(id))

	var err error
	var vmm *vm.VirtualMachine

	moref := new(types.ManagedObjectReference)
	if ok := moref.FromString(id); !ok {
		message := "Failed to get appliance VM mob reference"
		log.Errorf(message)
		return nil, errors.New(message)
	}
	ref, err := d.session.Finder.ObjectReference(d.ctx, *moref)
	if err != nil {
		if _, ok := err.(*find.NotFoundError); !ok {
			err = errors.Errorf("Failed to query appliance (%s): %s", moref, err)
			return nil, err
		}
		log.Debugf("Appliance is not found")
		return nil, nil
	}
	ovm, ok := ref.(*object.VirtualMachine)
	if !ok {
		log.Errorf("Failed to find VM %s, %s", moref, err)
		return nil, err
	}
	vmm = vm.NewVirtualMachine(d.ctx, d.session, ovm.Reference())

	// check if it's VCH
	if ok, err = d.isVCH(vmm); err != nil {
		log.Errorf("%s", err)
		return nil, err
	}
	if !ok {
		err = errors.Errorf("Not a VCH")
		log.Errorf("%s", err)
		return nil, err
	}
	return vmm, nil
}
Exemplo n.º 30
0
func (d *Dispatcher) isVCH(vm *vm.VirtualMachine) (bool, error) {
	if vm == nil {
		return false, errors.New("nil parameter")
	}
	defer trace.End(trace.Begin(vm.InventoryPath))

	info, err := vm.FetchExtraConfig(d.ctx)
	if err != nil {
		err = errors.Errorf("Failed to fetch guest info of appliance vm: %s", err)
		return false, err
	}

	var remoteConf config.VirtualContainerHostConfigSpec
	extraconfig.Decode(extraconfig.MapSource(info), &remoteConf)

	// if the moref of the target matches where we expect to find it for a VCH, run with it
	if remoteConf.ExecutorConfig.ID == vm.Reference().String() {
		return true, nil
	}

	return false, nil
}