Ejemplo n.º 1
0
func (handler *ScopesHandlersImpl) ScopesAddContainer(params scopes.AddContainerParams) middleware.Responder {
	defer trace.End(trace.Begin("ScopesAddContainer"))

	h := exec.GetHandle(params.Config.Handle)
	if h == nil {
		return scopes.NewAddContainerNotFound().WithPayload(&models.Error{Message: "container not found"})
	}

	err := func() error {
		var ip *net.IP
		if params.Config.NetworkConfig.Address != nil {
			i := net.ParseIP(*params.Config.NetworkConfig.Address)
			if i == nil {
				return fmt.Errorf("invalid ip address")
			}

			ip = &i
		}

		return handler.netCtx.AddContainer(h, params.Config.NetworkConfig.NetworkName, ip)
	}()

	if err != nil {
		if _, ok := err.(*network.ResourceNotFoundError); ok {
			return scopes.NewAddContainerNotFound().WithPayload(errorPayload(err))
		}

		return scopes.NewAddContainerInternalServerError().WithPayload(errorPayload(err))
	}

	return scopes.NewAddContainerOK().WithPayload(h.String())
}
Ejemplo n.º 2
0
func (u *URLFetcher) FetchAuthToken(url *url.URL) (*Token, error) {
	defer trace.End(trace.Begin(url.String()))

	data, err := u.Fetch(context.Background(), url, false, nil)
	if err != nil {
		log.Errorf("Download failed: %v", err)
		return nil, err
	}

	token := &Token{}

	err = json.Unmarshal([]byte(data), &token)
	if err != nil {
		log.Errorf("Incorrect token format: %v", err)
		return nil, err
	}

	if token.ExpiresIn == 0 {
		token.Expires = time.Now().Add(DefaultTokenExpirationDuration)
	} else {
		token.Expires = time.Now().Add(time.Duration(token.ExpiresIn) * time.Second)
	}

	return token, nil
}
Ejemplo n.º 3
0
func (d *Dispatcher) configIso(conf *metadata.VirtualContainerHostConfigSpec, vm *vm.VirtualMachine) (object.VirtualDeviceList, error) {
	defer trace.End(trace.Begin(""))

	var devices object.VirtualDeviceList
	var err error

	vmDevices, err := vm.Device(d.ctx)
	if err != nil {
		log.Errorf("Failed to get vm devices for appliance: %s", err)
		return nil, err
	}
	ide, err := vmDevices.FindIDEController("")
	if err != nil {
		log.Errorf("Failed to find IDE controller for appliance: %s", err)
		return nil, err
	}
	cdrom, err := devices.CreateCdrom(ide)
	if err != nil {
		log.Errorf("Failed to create Cdrom device for appliance: %s", err)
		return nil, err
	}
	cdrom = devices.InsertIso(cdrom, fmt.Sprintf("[%s] %s/appliance.iso", conf.ImageStores[0].Host, d.vmPathName))
	devices = append(devices, cdrom)
	return devices, nil
}
Ejemplo n.º 4
0
// sessionLogWriter returns a writer that will persist the session output
func (t *operations) SessionLog(session *tether.SessionConfig) (dio.DynamicMultiWriter, error) {
	com := "COM3"

	defer trace.End(trace.Begin("configure session log writer"))

	if t.logging {
		detail := "unable to log more than one session concurrently"
		log.Error(detail)
		return nil, errors.New(detail)
	}

	t.logging = true

	// redirect backchannel to the serial connection
	log.Infof("opening %s%s for session logging", pathPrefix, com)
	f, err := OpenPort(fmt.Sprintf("%s%s", pathPrefix, com))
	if err != nil {
		detail := fmt.Sprintf("failed to open serial port for session log: %s", err)
		log.Error(detail)
		return nil, errors.New(detail)
	}

	// use multi-writer so it goes to both screen and session log
	return dio.MultiWriter(f, os.Stdout), nil
}
Ejemplo n.º 5
0
func (d *Dispatcher) lsFolder(ds *object.Datastore, dsPath string) (*types.HostDatastoreBrowserSearchResults, error) {
	defer trace.End(trace.Begin(dsPath))

	spec := types.HostDatastoreBrowserSearchSpec{
		MatchPattern: []string{"*"},
	}

	b, err := ds.Browser(d.ctx)
	if err != nil {
		return nil, err
	}

	task, err := b.SearchDatastore(d.ctx, dsPath, &spec)
	if err != nil {
		return nil, err
	}

	info, err := task.WaitForResult(d.ctx, nil)
	if err != nil {
		return nil, err
	}

	res := info.Result.(types.HostDatastoreBrowserSearchResults)
	return &res, nil
}
Ejemplo n.º 6
0
func (c *Context) newBridgeScope(id uid.UID, name string, subnet *net.IPNet, gateway net.IP, dns []net.IP, pools []string) (newScope *Scope, err error) {
	defer trace.End(trace.Begin(""))
	bnPG, ok := c.config.PortGroups[c.config.BridgeNetwork]
	if !ok || bnPG == nil {
		return nil, fmt.Errorf("bridge network not set")
	}

	if ip.IsUnspecifiedSubnet(subnet) {
		// get the next available subnet from the default bridge pool
		var err error
		subnet, err = c.defaultBridgePool.NextIP4Net(c.defaultBridgeMask)
		if err != nil {
			return nil, err
		}
	}

	s, err := c.newScopeCommon(id, name, constants.BridgeScopeType, subnet, gateway, dns, pools, bnPG)
	if err != nil {
		return nil, err
	}

	// add the gateway address to the bridge interface
	if err = c.config.BridgeLink.AddrAdd(net.IPNet{IP: s.Gateway(), Mask: s.Subnet().Mask}); err != nil {
		if errno, ok := err.(syscall.Errno); !ok || errno != syscall.EEXIST {
			log.Warnf("failed to add gateway address %s to bridge interface: %s", s.Gateway(), err)
		}
	}

	return s, nil
}
Ejemplo n.º 7
0
func (c *Context) Scopes(ctx context.Context, idName *string) ([]*Scope, error) {
	defer trace.End(trace.Begin(""))

	c.Lock()
	defer c.Unlock()

	scopes, err := c.findScopes(idName)
	if err != nil {
		return nil, err
	}

	// collate the containers to update
	containers := make(map[uid.UID]*Container)
	for _, s := range scopes {
		if !s.isDynamic() {
			continue
		}

		for _, c := range s.Containers() {
			containers[c.ID()] = c
		}
	}

	for _, c := range containers {
		c.Refresh(ctx)
	}

	return scopes, nil
}
Ejemplo n.º 8
0
Archivo: base.go Proyecto: vmware/vic
// updates acquires updates from the infrastructure without holding a lock
func (c *containerBase) updates(ctx context.Context) (*containerBase, error) {
	defer trace.End(trace.Begin(c.ExecConfig.ID))

	var o mo.VirtualMachine

	// make sure we have vm
	if c.vm == nil {
		return nil, NotYetExistError{c.ExecConfig.ID}
	}

	if err := c.vm.Properties(ctx, c.vm.Reference(), []string{"config", "runtime"}, &o); err != nil {
		return nil, err
	}

	base := &containerBase{
		vm:         c.vm,
		Config:     o.Config,
		Runtime:    &o.Runtime,
		ExecConfig: &executor.ExecutorConfig{},
	}

	// Get the ExtraConfig
	extraconfig.Decode(vmomi.OptionValueSource(o.Config.ExtraConfig), base.ExecConfig)

	return base, nil
}
Ejemplo n.º 9
0
// Reload - tether.Extension implementation
func (t *attachServerSSH) Reload(config *tether.ExecutorConfig) error {
	defer trace.End(trace.Begin("attach reload"))

	t.config = config
	// process the sessions and launch if needed
	for id, session := range config.Sessions {
		log.Infof("Processing config for session %s", id)
		if session.Attach {
			log.Infof("Session %s is configured for attach", id)
			// this will return nil if already running - calling server.start not t.start so that
			// test impl gets invoked (couldn't find a better way of doing this without full polymorphism)
			err := server.start()
			if err != nil {
				detail := fmt.Sprintf("unable to start attach server: %s", err)
				log.Error(detail)
				return errors.New(detail)
			}

			return nil
		}
	}

	// none of the sessions allows attach, so stop the server - calling server.start not t.start so that
	// test impl gets invoked
	server.stop()
	return nil
}
Ejemplo n.º 10
0
//VolumeCreate : docker personality implementation for VIC
func (v *Volume) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) {
	defer trace.End(trace.Begin("Volume.VolumeCreate"))
	result := &types.Volume{}

	//TODO: design a way to have better error returns.

	client := PortLayerClient()
	if client == nil {
		return nil, derr.NewErrorWithStatusCode(fmt.Errorf("Failed to get a portlayer client"), http.StatusInternalServerError)
	}

	//TODO: support having another driver besides vsphere.
	//assign the values of the model to be passed to the portlayer handler
	model, varErr := translateInputsToPortlayerRequestModel(name, driverName, opts, labels)
	if varErr != nil {
		return result, derr.NewErrorWithStatusCode(fmt.Errorf("Bad Driver Arg: %s", varErr), http.StatusBadRequest)
	}

	//TODO: setup name randomization if name == nil

	res, err := client.Storage.CreateVolume(storage.NewCreateVolumeParams().WithVolumeRequest(&model))
	if err != nil {
		return result, derr.NewErrorWithStatusCode(fmt.Errorf("Server error from Portlayer: %s", err), http.StatusInternalServerError)
	}

	result = fillDockerVolumeModel(res.Payload, labels)
	return result, nil
}
Ejemplo n.º 11
0
Archivo: base.go Proyecto: vmware/vic
func (c *containerBase) startGuestProgram(ctx context.Context, name string, args string) error {
	// make sure we have vm
	if c.vm == nil {
		return NotYetExistError{c.ExecConfig.ID}
	}

	defer trace.End(trace.Begin(c.ExecConfig.ID))
	o := guest.NewOperationsManager(c.vm.Client.Client, c.vm.Reference())
	m, err := o.ProcessManager(ctx)
	if err != nil {
		return err
	}

	spec := types.GuestProgramSpec{
		ProgramPath: name,
		Arguments:   args,
	}

	auth := types.NamePasswordAuthentication{
		Username: c.ExecConfig.ID,
	}

	_, err = m.StartProgram(ctx, &auth, &spec)

	return err
}
Ejemplo n.º 12
0
func (t *Mocker) LinkSetAlias(link netlink.Link, alias string) error {
	defer trace.End(trace.Begin(fmt.Sprintf("Adding alias %s to %s", alias, link.Attrs().Name)))

	iface := link.(*Interface)
	iface.Alias = alias
	return nil
}
Ejemplo n.º 13
0
func (t *Mocker) LinkSetUp(link netlink.Link) error {
	defer trace.End(trace.Begin(fmt.Sprintf("Bringing %s up", link.Attrs().Name)))

	iface := link.(*Interface)
	iface.Up = true
	return nil
}
Ejemplo n.º 14
0
func (v *Validator) storage(ctx context.Context, input *data.Data, conf *config.VirtualContainerHostConfigSpec) {
	defer trace.End(trace.Begin(""))

	// Image Store
	imageDSpath, ds, err := v.DatastoreHelper(ctx, input.ImageDatastorePath, "", "--image-store")

	if imageDSpath == nil {
		v.NoteIssue(err)
		return
	}

	// provide a default path if only a DS name is provided
	if imageDSpath.Path == "" {
		imageDSpath.Path = input.DisplayName
	}

	v.NoteIssue(err)
	if ds != nil {
		v.SetDatastore(ds, imageDSpath)
		conf.AddImageStore(imageDSpath)
	}

	if conf.VolumeLocations == nil {
		conf.VolumeLocations = make(map[string]*url.URL)
	}

	// TODO: add volume locations
	for label, volDSpath := range input.VolumeLocations {
		dsURL, _, err := v.DatastoreHelper(ctx, volDSpath, label, "--volume-store")
		v.NoteIssue(err)
		if dsURL != nil {
			conf.VolumeLocations[label] = dsURL
		}
	}
}
Ejemplo n.º 15
0
func (handler *ContainersHandlersImpl) ContainerWaitHandler(params containers.ContainerWaitParams) middleware.Responder {
	defer trace.End(trace.Begin(fmt.Sprintf("%s:%d", params.ID, params.Timeout)))

	// default context timeout in seconds
	defaultTimeout := int64(containerWaitTimeout.Seconds())

	// if we have a positive timeout specified then use it
	if params.Timeout > 0 {
		defaultTimeout = params.Timeout
	}

	timeout := time.Duration(defaultTimeout) * time.Second

	ctx, cancel := context.WithTimeout(context.Background(), timeout)
	defer cancel()

	c := exec.Containers.Container(uid.Parse(params.ID).String())
	if c == nil {
		return containers.NewContainerWaitNotFound().WithPayload(&models.Error{
			Message: fmt.Sprintf("container %s not found", params.ID),
		})
	}

	select {
	case <-c.WaitForState(exec.StateStopped):
		c.Refresh(context.Background())
		containerInfo := convertContainerToContainerInfo(c.Info())

		return containers.NewContainerWaitOK().WithPayload(containerInfo)
	case <-ctx.Done():
		return containers.NewContainerWaitInternalServerError().WithPayload(&models.Error{
			Message: fmt.Sprintf("ContainerWaitHandler(%s) Error: %s", params.ID, ctx.Err()),
		})
	}
}
Ejemplo n.º 16
0
func (path fileReader) open() (entry, error) {
	defer trace.End(trace.Begin(string(path)))

	f, err := os.Open(string(path))
	if err != nil {
		return nil, err
	}

	s, err := os.Stat(string(path))
	if err != nil {
		return nil, err
	}

	// Files in /proc always have struct stat.st_size==0, so just read it into memory.
	if s.Size() == 0 && strings.HasPrefix(f.Name(), "/proc/") {
		b, err := ioutil.ReadAll(f)
		_ = f.Close()
		if err != nil {
			return nil, err
		}

		return newBytesEntry(f.Name(), b), nil
	}

	return &fileEntry{
		ReadCloser: f,
		FileInfo:   s,
	}, nil
}
Ejemplo n.º 17
0
func (c *Context) DeleteScope(ctx context.Context, name string) error {
	defer trace.End(trace.Begin(""))

	c.Lock()
	defer c.Unlock()

	s, err := c.resolveScope(name)
	if err != nil {
		return err
	}

	if s == nil {
		return ResourceNotFoundError{}
	}

	if s.builtin {
		return fmt.Errorf("cannot remove builtin scope")
	}

	if len(s.Endpoints()) != 0 {
		return fmt.Errorf("%s has active endpoints", s.Name())
	}

	if c.kv != nil {
		if err = c.kv.Delete(ctx, scopeKey(s.Name())); err != nil && err != kvstore.ErrKeyNotFound {
			return err
		}
	}

	c.deleteScope(s)
	return nil
}
Ejemplo n.º 18
0
// listVMPaths returns an array of datastore paths for VMs assocaited with the
// VCH - this includes containerVMs and the appliance
func listVMPaths(ctx context.Context, s *session.Session) ([]url.URL, error) {
	defer trace.End(trace.Begin(""))

	var err error
	var children []*vm.VirtualMachine

	if len(vchConfig.ComputeResources) == 0 {
		return nil, errors.New("compute resources is empty")
	}

	ref := vchConfig.ComputeResources[0]
	rp := compute.NewResourcePool(ctx, s, ref)
	if children, err = rp.GetChildrenVMs(ctx, s); err != nil {
		return nil, err
	}

	log.Infof("Found %d candidate VMs in resource pool %s for log collection", len(children), ref.String())

	directories := []url.URL{}
	for _, child := range children {
		path, err := child.DSPath(ctx)
		if err != nil {
			log.Errorf("Unable to get datastore path for child VM %s: %s", child.Reference(), err)
			// we need to get as many logs as possible
			continue
		}

		log.Debugf("Adding VM for log collection: %s", path.String())
		directories = append(directories, path)
	}

	log.Infof("Collecting logs from %d VMs", len(directories))
	return directories, nil
}
Ejemplo n.º 19
0
func (c *Context) NewScope(ctx context.Context, scopeType, name string, subnet *net.IPNet, gateway net.IP, dns []net.IP, pools []string) (*Scope, error) {
	defer trace.End(trace.Begin(""))

	c.Lock()
	defer c.Unlock()

	s, err := c.newScope(scopeType, name, subnet, gateway, dns, pools)
	if err != nil {
		return nil, err
	}
	defer func() {
		if err != nil {
			c.deleteScope(s)
		}
	}()

	// save the scope in the kv store
	if c.kv != nil {
		var d []byte
		d, err = s.MarshalJSON()
		if err != nil {
			return nil, err
		}

		if err = c.kv.Put(ctx, scopeKey(s.Name()), d); err != nil {
			return nil, err
		}
	}

	return s, nil
}
Ejemplo n.º 20
0
Archivo: attach.go Proyecto: vmware/vic
// Join adds network backed serial port to the caller and configures them
func Join(h interface{}) (interface{}, error) {
	defer trace.End(trace.Begin(""))

	handle, ok := h.(*exec.Handle)
	if !ok {
		return nil, fmt.Errorf("Type assertion failed for %#+v", handle)
	}

	// Tether serial port - backed by network
	serial := &types.VirtualSerialPort{
		VirtualDevice: types.VirtualDevice{
			Backing: &types.VirtualSerialPortURIBackingInfo{
				VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{
					Direction: string(types.VirtualDeviceURIBackingOptionDirectionClient),
					// Set it to 0.0.0.0 during Join call, VCH IP will be set when we call Bind
					ServiceURI: fmt.Sprintf("tcp://0.0.0.0:%d", constants.SerialOverLANPort),
				},
			},
			Connectable: &types.VirtualDeviceConnectInfo{
				Connected:         false,
				StartConnected:    false,
				AllowGuestControl: true,
			},
		},
		YieldOnPoll: true,
	}
	config := &types.VirtualDeviceConfigSpec{
		Device:    serial,
		Operation: types.VirtualDeviceConfigSpecOperationAdd,
	}
	handle.Spec.DeviceChange = append(handle.Spec.DeviceChange, config)

	return handle, nil
}
Ejemplo n.º 21
0
func (c *Context) BindContainer(h *exec.Handle) ([]*Endpoint, error) {
	defer trace.End(trace.Begin(""))
	c.Lock()
	defer c.Unlock()

	return c.bindContainer(h)
}
Ejemplo n.º 22
0
Archivo: disk.go Proyecto: jak-atx/vic
// AddVirtualDisk adds a virtual disk to a virtual machine.
func (s *VirtualMachineConfigSpec) AddVirtualDisk(device *types.VirtualDisk) *VirtualMachineConfigSpec {
	defer trace.End(trace.Begin(s.ID()))

	device.GetVirtualDevice().Key = s.generateNextKey()

	device.CapacityInKB = defaultCapacityInKB

	moref := s.Datastore.Reference()

	device.GetVirtualDevice().Backing = &types.VirtualDiskFlatVer2BackingInfo{
		DiskMode:        string(types.VirtualDiskModePersistent),
		ThinProvisioned: types.NewBool(true),

		VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{
			FileName:  s.Datastore.Path(fmt.Sprintf("%s/%[1]s.vmdk", s.ID())),
			Datastore: &moref,
		},
	}

	// Add the parent if we set ParentImageID
	backing := device.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo)
	if s.ParentImageID() != "" {
		backing.Parent = &types.VirtualDiskFlatVer2BackingInfo{
			VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{
				// XXX This needs to come from a storage helper in the future
				// and should not be computed here like this.
				FileName: s.Datastore.Path(fmt.Sprintf("VIC/%s/images/%s/%[2]s.vmdk",
					s.ImageStoreName(),
					s.ParentImageID())),
			},
		}
	}

	return s.AddAndCreateVirtualDevice(device)
}
Ejemplo n.º 23
0
func (d *Dispatcher) deleteDatastoreFiles(ds *object.Datastore, path string, force bool) (bool, error) {
	defer trace.End(trace.Begin(fmt.Sprintf("path %q, force %t", path, force)))

	// refuse to delete everything on the datstore, ignore force
	if path == "" {
		dsn, _ := ds.ObjectName(d.ctx)
		msg := fmt.Sprintf("refusing to remove datastore files for path \"\" on datastore %q", dsn)
		return false, errors.New(msg)
	}

	var empty bool
	dsPath := ds.Path(path)

	res, err := d.lsFolder(ds, dsPath)
	if err != nil {
		if !types.IsFileNotFound(err) {
			err = errors.Errorf("Failed to browse folder %q: %s", dsPath, err)
			return empty, err
		}
		log.Debugf("Folder %q is not found", dsPath)
		empty = true
		return empty, nil
	}
	if len(res.File) > 0 && !force {
		log.Debugf("Folder %q is not empty, leave it there", dsPath)
		return empty, nil
	}

	m := object.NewFileManager(ds.Client())
	if err = d.deleteFilesIteratively(m, ds, dsPath); err != nil {
		return empty, err
	}
	return true, nil
}
Ejemplo n.º 24
0
func (handler *ContainersHandlersImpl) GetStateHandler(params containers.GetStateParams) middleware.Responder {
	defer trace.End(trace.Begin(fmt.Sprintf("handle(%s)", params.Handle)))

	// NOTE: I've no idea why GetStateHandler takes a handle instead of an ID - hopefully there was a reason for an inspection
	// operation to take this path
	h := exec.GetHandle(params.Handle)
	if h == nil || h.ExecConfig == nil {
		return containers.NewGetStateNotFound()
	}

	container := exec.Containers.Container(h.ExecConfig.ID)
	if container == nil {
		return containers.NewGetStateNotFound()
	}

	var state string
	switch container.CurrentState() {
	case exec.StateRunning:
		state = "RUNNING"

	case exec.StateStopped:
		state = "STOPPED"

	case exec.StateCreated:
		state = "CREATED"

	default:
		return containers.NewGetStateDefault(http.StatusServiceUnavailable)
	}

	return containers.NewGetStateOK().WithPayload(&models.ContainerGetStateResponse{Handle: h.String(), State: state})
}
Ejemplo n.º 25
0
func (p *Pluginator) connect() error {
	defer trace.End(trace.Begin(""))
	var err error

	sessionconfig := &session.Config{
		Insecure: true,
	}
	sessionconfig.Service = p.tURL.String()

	p.Session = session.NewSession(sessionconfig)
	p.Session, err = p.Session.Connect(p.Context)
	if err != nil {
		return fmt.Errorf("failed to connect: %s", err)
	}

	p.Session.Populate(p.Context)

	em, err := object.GetExtensionManager(p.Session.Client.Client)
	if err != nil {
		return fmt.Errorf("failed to get extension manager: %s", err)
	}
	p.ExtensionManager = em

	p.connected = true
	return nil
}
Ejemplo n.º 26
0
func (handler *ContainersHandlersImpl) RemoveContainerHandler(params containers.ContainerRemoveParams) middleware.Responder {
	defer trace.End(trace.Begin(params.ID))

	// get the indicated container for removal
	cID := uid.Parse(params.ID)
	h := exec.GetContainer(context.Background(), cID)
	if h == nil || h.ExecConfig == nil {
		return containers.NewContainerRemoveNotFound()
	}

	container := exec.Containers.Container(h.ExecConfig.ID)
	if container == nil {
		return containers.NewGetStateNotFound()
	}

	// NOTE: this should allowing batching of operations, as with Create, Start, Stop, et al
	err := container.Remove(context.Background(), handler.handlerCtx.Session)
	if err != nil {
		switch err := err.(type) {
		case exec.NotFoundError:
			return containers.NewContainerRemoveNotFound()
		case exec.RemovePowerError:
			return containers.NewContainerRemoveConflict().WithPayload(&models.Error{Message: err.Error()})
		default:
			return containers.NewContainerRemoveInternalServerError()
		}
	}

	return containers.NewContainerRemoveOK()
}
Ejemplo n.º 27
0
func (d *Dispatcher) findApplianceByID(conf *metadata.VirtualContainerHostConfigSpec) (*vm.VirtualMachine, error) {
	defer trace.End(trace.Begin(""))

	var err error
	var vmm *vm.VirtualMachine

	moref := new(types.ManagedObjectReference)
	if ok := moref.FromString(conf.ID); !ok {
		message := "Failed to get appliance VM mob reference"
		log.Errorf(message)
		return nil, errors.New(message)
	}
	ref, err := d.session.Finder.ObjectReference(d.ctx, *moref)
	if err != nil {
		if _, ok := err.(*find.NotFoundError); !ok {
			err = errors.Errorf("Failed to query appliance (%s): %s", moref, err)
			return nil, err
		}
		log.Debugf("Appliance is not found")
		return nil, nil

	}
	ovm, ok := ref.(*object.VirtualMachine)
	if !ok {
		log.Errorf("Failed to find VM %s, %s", moref, err)
		return nil, err
	}
	vmm = vm.NewVirtualMachine(d.ctx, d.session, ovm.Reference())
	return vmm, nil
}
Ejemplo n.º 28
0
func (handler *ContainersHandlersImpl) GetContainerLogsHandler(params containers.GetContainerLogsParams) middleware.Responder {
	defer trace.End(trace.Begin(params.ID))

	container := exec.Containers.Container(params.ID)
	if container == nil {
		return containers.NewGetContainerLogsNotFound().WithPayload(&models.Error{
			Message: fmt.Sprintf("container %s not found", params.ID),
		})
	}

	follow := false
	tail := -1

	if params.Follow != nil {
		follow = *params.Follow
	}

	if params.Taillines != nil {
		tail = int(*params.Taillines)
	}

	reader, err := container.LogReader(context.Background(), tail, follow)
	if err != nil {
		return containers.NewGetContainerLogsInternalServerError().WithPayload(&models.Error{Message: err.Error()})
	}

	detachableOut := NewFlushingReader(reader)

	return NewContainerOutputHandler("logs").WithPayload(detachableOut, params.ID)
}
Ejemplo n.º 29
0
func (d *Dispatcher) reconfigureApplianceSpec(vm *vm.VirtualMachine, conf *metadata.VirtualContainerHostConfigSpec) (*types.VirtualMachineConfigSpec, error) {
	defer trace.End(trace.Begin(""))

	var devices object.VirtualDeviceList
	var err error

	spec := &types.VirtualMachineConfigSpec{
		Name:    conf.Name,
		GuestId: "other3xLinux64Guest",
		Files:   &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", conf.ImageStores[0].Host)},
	}

	if devices, err = d.configIso(conf, vm); err != nil {
		return nil, err
	}

	deviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)
	if err != nil {
		log.Errorf("Failed to create config spec for appliance: %s", err)
		return nil, err
	}

	spec.DeviceChange = deviceChange

	cfg := make(map[string]string)
	extraconfig.Encode(extraconfig.MapSink(cfg), conf)
	spec.ExtraConfig = append(spec.ExtraConfig, extraconfig.OptionValueFromMap(cfg)...)
	return spec, nil
}
Ejemplo n.º 30
0
Archivo: volume.go Proyecto: vmware/vic
// VolumeInspect : docker personality implementation for VIC
func (v *Volume) VolumeInspect(name string) (*types.Volume, error) {
	defer trace.End(trace.Begin(name))

	client := PortLayerClient()
	if client == nil {
		return nil, fmt.Errorf("failed to get a portlayer client")
	}

	if name == "" {
		return nil, nil
	}

	param := storage.NewGetVolumeParamsWithContext(ctx).WithName(name)
	res, err := client.Storage.GetVolume(param)
	if err != nil {
		switch err := err.(type) {
		case *storage.GetVolumeNotFound:
			return nil, VolumeNotFoundError(name)
		default:
			return nil, derr.NewErrorWithStatusCode(fmt.Errorf("error from portlayer server: %s", err.Error()), http.StatusInternalServerError)
		}
	}

	volumeMetadata, err := extractDockerMetadata(res.Payload.Metadata)
	if err != nil {
		return nil, derr.NewErrorWithStatusCode(fmt.Errorf("error unmarshalling docker metadata: %s", err), http.StatusInternalServerError)
	}
	volume := NewVolumeModel(res.Payload, volumeMetadata.Labels)

	return volume, nil
}