Beispiel #1
0
func findDiagnosticLogs(c *session.Session) ([]entryReader, error) {
	defer trace.End(trace.Begin(""))

	// When connected to VC, we collect vpxd.log and hostd.log for all cluster hosts attached to the datastore.
	// When connected to ESX, we just collect hostd.log.
	const (
		vpxdKey  = "vpxd:vpxd.log"
		hostdKey = "hostd"
	)

	var logs []entryReader
	var err error

	if c.IsVC() {
		logs = append(logs, dlogReader{c, vpxdKey, nil})

		var hosts []*object.HostSystem
		if c.Cluster == nil && c.Host != nil {
			hosts = []*object.HostSystem{c.Host}
		} else {
			hosts, err = c.Datastore.AttachedClusterHosts(context.TODO(), c.Cluster)
			if err != nil {
				return nil, err
			}
		}

		for _, host := range hosts {
			logs = append(logs, dlogReader{c, hostdKey, host})
		}
	} else {
		logs = append(logs, dlogReader{c, hostdKey, nil})
	}

	return logs, nil
}
Beispiel #2
0
func createAppliance(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData, hasErr bool, t *testing.T) {
	var err error

	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}
	delete(conf.Networks, "bridge") // FIXME: cannot create bridge network in simulator
	if d.isVC {
		if d.vchVapp, err = d.createVApp(conf, vConf); err != nil {
			// FIXME: Got error: ServerFaultCode: ResourcePool:resourcepool-14 does not implement: CreateVApp. Simulator need to implement CreateVApp
			//			t.Errorf("Unable to create virtual app: %s", err)
		}
	}
	if d.vchPool, err = d.createResourcePool(conf, vConf); err != nil {
		t.Errorf("Unable to create resource pool: %s", err)
	}

	spec, err := d.createApplianceSpec(conf, vConf)
	if err != nil {
		t.Errorf("Unable to create appliance spec: %s", err)
		return
	}

	// create appliance VM
	info, err := tasks.WaitForResult(d.ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
		return d.session.Folders(ctx).VmFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host)
	})
	// get VM reference and save it
	moref := info.Result.(types.ManagedObjectReference)
	conf.SetMoref(&moref)
	obj, err := d.session.Finder.ObjectReference(d.ctx, moref)
	if err != nil {
		t.Errorf("Failed to reacquire reference to appliance VM after creation: %s", err)
		return
	}
	gvm, ok := obj.(*object.VirtualMachine)
	if !ok {
		t.Errorf("Required reference after appliance creation was not for a VM: %T", obj)
		return
	}

	vm2 := vm.NewVirtualMachineFromVM(d.ctx, d.session, gvm)
	uuid, err := vm2.UUID(d.ctx)
	if err != nil {
		t.Errorf("Failed to get VM UUID: %s", err)
		return
	}
	t.Logf("uuid: %s", uuid)

	// leverage create volume method to create image datastore
	conf.VolumeLocations["images-store"], _ = url.Parse(fmt.Sprintf("ds://LocalDS_0/VIC/%s/images", uuid))

	if err := d.createVolumeStores(conf); err != nil {
		t.Errorf("Unable to create volume stores: %s", err)
		return
	}
}
Beispiel #3
0
func testDeleteVolumeStores(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, numVols int, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   true,
	}

	if removed := d.deleteVolumeStoreIfForced(conf); removed != numVols {
		t.Errorf("Did not successfully remove all specified volumes")
	}

}
Beispiel #4
0
// FIXME: Failed to find IDE controller in simulator, so create appliance failed
func testCreateAppliance(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, vConf *data.InstallerData, hasErr bool, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}
	delete(conf.Networks, "bridge") // FIXME: cannot create bridge network right now
	d.vchPool = d.session.Pool
	err := d.createAppliance(conf, vConf)
	if err != nil {
		t.Logf("Expected error: %s", err)
	}
}
Beispiel #5
0
// NewDispatcher creates a dispatcher that can act upon VIC management operations.
// clientCert is an optional client certificate to allow interaction with the Docker API for verification
// force will ignore some errors
func NewDispatcher(ctx context.Context, s *session.Session, conf *config.VirtualContainerHostConfigSpec, force bool) *Dispatcher {
	defer trace.End(trace.Begin(""))
	isVC := s.IsVC()
	e := &Dispatcher{
		session: s,
		ctx:     ctx,
		isVC:    isVC,
		force:   force,
	}
	if conf != nil {
		e.InitDiagnosticLogs(conf)
	}
	return e
}
Beispiel #6
0
func testCreateVolumeStores(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, hasErr bool, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}

	err := d.createVolumeStores(conf)
	if hasErr && err != nil {
		t.Logf("Got exepcted err: %s", err)
		return
	}
	if hasErr {
		t.Errorf("Should have error, but got success")
		return
	}
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
}
Beispiel #7
0
func testCreateNetwork(ctx context.Context, sess *session.Session, conf *config.VirtualContainerHostConfigSpec, t *testing.T) {
	d := &Dispatcher{
		session: sess,
		ctx:     ctx,
		isVC:    sess.IsVC(),
		force:   false,
	}

	err := d.createBridgeNetwork(conf)
	if d.isVC && err != nil {
		t.Logf("Got exepcted err: %s", err)
		return
	}
	if d.isVC {
		t.Errorf("Should not create network in VC")
		return
	}
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
}
Beispiel #8
0
func findDiagnosticLogs(c *session.Session) (map[string]entryReader, error) {
	defer trace.End(trace.Begin(""))

	// When connected to VC, we collect vpxd.log and hostd.log for all cluster hosts attached to the datastore.
	// When connected to ESX, we just collect hostd.log.
	const (
		vpxdKey  = "vpxd:vpxd.log"
		hostdKey = "hostd"
	)

	logs := map[string]entryReader{}
	var err error

	if c.IsVC() {
		logs[vpxdKey] = dlogReader{c, vpxdKey, nil}

		var hosts []*object.HostSystem
		if c.Cluster == nil && c.Host != nil {
			hosts = []*object.HostSystem{c.Host}
		} else {
			hosts, err = c.Datastore.AttachedClusterHosts(context.TODO(), c.Cluster)
			if err != nil {
				return nil, err
			}
		}

		for _, host := range hosts {
			lname := fmt.Sprintf("%s/%s", hostdKey, host)
			logs[lname] = dlogReader{c, hostdKey, host}
		}
	} else {
		logs[hostdKey] = dlogReader{c, hostdKey, nil}
	}

	return logs, nil
}
Beispiel #9
0
func (c *Container) Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int32) error {
	defer trace.End(trace.Begin("Committing handle"))

	c.Lock()
	defer c.Unlock()

	if c.vm == nil {
		// the only permissible operation is to create a VM
		if h.Spec == nil {
			return fmt.Errorf("only create operations can be committed without an existing VM")
		}

		var res *types.TaskInfo
		var err error

		if sess.IsVC() && VCHConfig.VirtualApp != nil {
			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
				return VCHConfig.VirtualApp.CreateChildVM_Task(ctx, *h.Spec.Spec(), nil)
			})

			c.State = StateCreated
		} else {
			// Find the Virtual Machine folder that we use
			var folders *object.DatacenterFolders
			folders, err = sess.Datacenter.Folders(ctx)
			if err != nil {
				log.Errorf("Could not get folders")
				return err
			}
			parent := folders.VmFolder

			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
				return parent.CreateVM(ctx, *h.Spec.Spec(), VCHConfig.ResourcePool, nil)
			})

			c.State = StateCreated
		}

		if err != nil {
			log.Errorf("Something failed. Spec was %+v", *h.Spec.Spec())
			return err
		}

		c.vm = vm.NewVirtualMachine(ctx, sess, res.Result.(types.ManagedObjectReference))

		// clear the spec as we've acted on it
		h.Spec = nil
	}

	// if we're stopping the VM, do so before the reconfigure to preserve the extraconfig
	if h.State != nil && *h.State == StateStopped {
		// stop the container
		if err := h.Container.stop(ctx, waitTime); err != nil {
			return err
		}

		c.State = *h.State
	}

	if h.Spec != nil {
		// FIXME: add check that the VM is powered off - it should be, but this will destroy the
		// extraconfig if it's not.

		s := h.Spec.Spec()
		_, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.ResultWaiter, error) {
			return c.vm.Reconfigure(ctx, *s)
		})

		if err != nil {
			return err
		}
	}

	if h.State != nil && *h.State == StateRunning {
		// start the container
		if err := h.Container.start(ctx); err != nil {
			return err
		}

		c.State = *h.State
	}

	c.ExecConfig = &h.ExecConfig

	// add or overwrite the container in the cache
	containers.Put(c)
	return nil
}
Beispiel #10
0
// Commit executes the requires steps on the handle
func Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int32) error {
	defer trace.End(trace.Begin(h.ExecConfig.ID))

	c := Containers.Container(h.ExecConfig.ID)
	creation := h.vm == nil
	if creation {
		if h.Spec == nil {
			return fmt.Errorf("a spec must be provided for create operations")
		}

		if sess == nil {
			// session must not be nil
			return fmt.Errorf("no session provided for create operations")
		}

		// the only permissible operation is to create a VM
		if h.Spec == nil {
			return fmt.Errorf("only create operations can be committed without an existing VM")
		}

		if c != nil {
			return fmt.Errorf("a container already exists in the cache with this ID")
		}

		var res *types.TaskInfo
		var err error
		if sess.IsVC() && Config.VirtualApp.ResourcePool != nil {
			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
				return Config.VirtualApp.CreateChildVM_Task(ctx, *h.Spec.Spec(), nil)
			})
		} else {
			// Find the Virtual Machine folder that we use
			var folders *object.DatacenterFolders
			folders, err = sess.Datacenter.Folders(ctx)
			if err != nil {
				log.Errorf("Could not get folders")
				return err
			}
			parent := folders.VmFolder

			// Create the vm
			res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
				return parent.CreateVM(ctx, *h.Spec.Spec(), Config.ResourcePool, nil)
			})
		}

		if err != nil {
			log.Errorf("Something failed. Spec was %+v", *h.Spec.Spec())
			return err
		}

		h.vm = vm.NewVirtualMachine(ctx, sess, res.Result.(types.ManagedObjectReference))
		c = newContainer(&h.containerBase)
		Containers.Put(c)
		// inform of creation irrespective of remaining operations
		publishContainerEvent(c.ExecConfig.ID, time.Now().UTC(), events.ContainerCreated)

		// clear the spec as we've acted on it - this prevents a reconfigure from occurring in follow-on
		// processing
		h.Spec = nil
	}

	// if we're stopping the VM, do so before the reconfigure to preserve the extraconfig
	refresh := true
	if h.TargetState() == StateStopped {
		if h.Runtime == nil {
			log.Warnf("Commit called with incomplete runtime state for %s", h.ExecConfig.ID)
		}

		if h.Runtime != nil && h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff {
			log.Infof("Dropping duplicate power off operation for %s", h.ExecConfig.ID)
		} else {
			// stop the container
			if err := c.stop(ctx, waitTime); err != nil {
				return err
			}

			// inform of creation irrespective of remaining operations
			publishContainerEvent(h.ExecConfig.ID, time.Now().UTC(), events.ContainerStopped)

			// we must refresh now to get the new ChangeVersion - this is used to gate on powerstate in the reconfigure
			// because we cannot set the ExtraConfig if the VM is powered on. There is still a race here unfortunately because
			// tasks don't appear to contain the new ChangeVersion
			// we don't use refresh because we want to keep the extraconfig state
			base, err := h.updates(ctx)
			if err != nil {
				// TODO: can we recover here, or at least set useful state for inspection?
				return err
			}
			h.Runtime = base.Runtime
			h.Config = base.Config

			refresh = false
		}
	}

	// reconfigure operation
	if h.Spec != nil {
		if h.Runtime == nil {
			log.Errorf("Refusing to perform reconfigure operation with incomplete runtime state for %s", h.ExecConfig.ID)
		} else {
			// ensure that our logic based on Runtime state remains valid

			// NOTE: this inline refresh can be removed when switching away from guestinfo where we have non-persistence issues
			// when updating ExtraConfig via the API with a powered on VM - we therefore have to be absolutely certain about the
			// power state to decide if we can continue without nilifying extraconfig

			for s := h.Spec.Spec(); ; refresh, s = true, h.Spec.Spec() {
				// FIXME!!! this is a temporary hack until the concurrent modification retry logic is in place
				if refresh {
					base, err := h.updates(ctx)
					if err == nil {
						h.Runtime = base.Runtime
						h.Config = base.Config
					}
				}

				s.ChangeVersion = h.Config.ChangeVersion

				// nilify ExtraConfig if vm is running
				if h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
					log.Errorf("Nilifying ExtraConfig as we are running")
					s.ExtraConfig = nil
				}

				_, err := h.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
					return h.vm.Reconfigure(ctx, *s)
				})
				if err != nil {
					log.Errorf("Reconfigure failed with %#+v", err)

					// Check whether we get ConcurrentAccess and wrap it if needed
					if f, ok := err.(types.HasFault); ok {
						switch f.Fault().(type) {
						case *types.ConcurrentAccess:
							log.Errorf("We have ConcurrentAccess for version %s", s.ChangeVersion)

							continue
							// return ConcurrentAccessError{err}
						}
					}
					return err
				}

				break
			}
		}
	}

	// best effort update of container cache using committed state - this will not reflect the power on below, however
	// this is primarily for updating ExtraConfig state.
	if !creation {
		defer c.RefreshFromHandle(ctx, h)
	}

	if h.TargetState() == StateRunning {
		if h.Runtime != nil && h.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn {
			log.Infof("Dropping duplicate power on operation for %s", h.ExecConfig.ID)
			return nil
		}

		if h.Runtime == nil && !creation {
			log.Warnf("Commit called with incomplete runtime state for %s", h.ExecConfig.ID)
		}

		// start the container
		if err := c.start(ctx); err != nil {
			return err
		}

		// inform of creation irrespective of remaining operations
		publishContainerEvent(h.ExecConfig.ID, time.Now().UTC(), events.ContainerStarted)
	}

	return nil
}