Exemplo n.º 1
0
Arquivo: volume.go Projeto: vmware/vic
// volumeCreate issues a CreateVolume request to the portlayer
func (v *Volume) volumeCreate(name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) {
	defer trace.End(trace.Begin(""))
	result := &types.Volume{}

	client := PortLayerClient()
	if client == nil {
		return nil, fmt.Errorf("failed to get a portlayer client")
	}

	if name == "" {
		name = uuid.New().String()
	}

	// TODO: support having another driver besides vsphere.
	// assign the values of the model to be passed to the portlayer handler
	req, varErr := newVolumeCreateReq(name, driverName, volumeData, labels)
	if varErr != nil {
		return result, varErr
	}
	log.Infof("Finalized model for volume create request to portlayer: %#v", req)

	res, err := client.Storage.CreateVolume(storage.NewCreateVolumeParamsWithContext(ctx).WithVolumeRequest(req))
	if err != nil {
		return result, err
	}
	result = NewVolumeModel(res.Payload, labels)
	return result, nil
}
Exemplo n.º 2
0
func (p *PortLayerRPCClient) Connect() error {
	// Ignore Init args on the client - that is the server's responsibilty
	var err error
	gob.Register(uuid.New())
	p.client, err = rpc.DialHTTP("tcp", serverAddress+":1234")
	return err
}
Exemplo n.º 3
0
func TestMain(m *testing.M) {
	Sys = system.System{
		UUID: uuid.New().String(),
	}

	os.Exit(m.Run())
}
Exemplo n.º 4
0
func (s *SessionManager) AcquireGenericServiceTicket(ticket *types.AcquireGenericServiceTicket) soap.HasFault {
	return &methods.AcquireGenericServiceTicketBody{
		Res: &types.AcquireGenericServiceTicketResponse{
			Returnval: types.SessionManagerGenericServiceTicket{
				Id:       uuid.New().String(),
				HostName: s.ServiceHostName,
			},
		},
	}
}
Exemplo n.º 5
0
func main() {
	gob.Register(uuid.New())
	rpcServer := new(PortLayerRPCServer)
	rpc.Register(rpcServer)
	rpc.HandleHTTP()
	l, e := net.Listen("tcp", ":1234")
	if e != nil {
		log.Fatal("listen error:", e)
	}
	fmt.Println("Server listening")
	http.Serve(l, nil)
}
Exemplo n.º 6
0
func NewVirtualMachine(spec *types.VirtualMachineConfigSpec) (*VirtualMachine, types.BaseMethodFault) {
	vm := &VirtualMachine{}

	if spec.Name == "" {
		return nil, &types.InvalidVmConfig{Property: "configSpec.name"}
	}

	if spec.Files == nil || spec.Files.VmPathName == "" {
		return nil, &types.InvalidVmConfig{Property: "configSpec.files.vmPathName"}
	}

	vm.Config = &types.VirtualMachineConfigInfo{}
	vm.Summary.Guest = &types.VirtualMachineGuestSummary{}
	vm.Summary.Storage = &types.VirtualMachineStorageSummary{}

	// Add the default devices
	devices, _ := object.VirtualDeviceList(esx.VirtualDevice).ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)

	if !strings.HasSuffix(spec.Files.VmPathName, ".vmx") {
		spec.Files.VmPathName = path.Join(spec.Files.VmPathName, spec.Name+".vmx")
	}

	dsPath := path.Dir(spec.Files.VmPathName)

	defaults := types.VirtualMachineConfigSpec{
		NumCPUs:           1,
		NumCoresPerSocket: 1,
		MemoryMB:          32,
		Uuid:              uuid.New().String(),
		Version:           "vmx-11",
		Files: &types.VirtualMachineFileInfo{
			SnapshotDirectory: dsPath,
			SuspendDirectory:  dsPath,
			LogDirectory:      dsPath,
		},
		DeviceChange: devices,
	}

	err := vm.configure(&defaults)
	if err != nil {
		return nil, err
	}

	vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff
	vm.Summary.Runtime = vm.Runtime

	err = vm.configure(spec)
	if err != nil {
		return nil, err
	}

	return vm, nil
}
Exemplo n.º 7
0
/* This is where we do the actual location monitoring. This
   is started as a concurent go routine, and monitors loc_id
   indefinatly.
   - loc_id is the index to the config.Locations array to
     monitor*/
func monitor(loc_id int, cont chan bool) {
	log.Println("Spinning up monitor on location ID:", loc_id)

	stop := false
	location := config.Locations[loc_id]
	if st, err := checkPermissions(location); err != nil {
		log.Error(err)
		return
	} else {
		log.Infoln("Permissions check for", location, "passed:", st.Mode())
	}
	watcher, err := inotify.NewWatcher()
	if err != nil {
		log.Error(err)
		return
	}
	err = watcher.Watch(location)
	if err != nil {
		log.Error(err)
		return
	}
	log.Infoln("Watcher up; monitoring:", location)
	for !stop {
		cached_id := uuid.New().String() //cache a new uuid
		select {
		case ev := <-watcher.Event:
			log.Debugln("monitored directory event:", ev)
			if ev.Mask == inotify.IN_CLOSE_WRITE {
				log.Info("Found; ", path.Base(ev.Name), " Moving to staging")
				os.Rename(ev.Name, config.Staging_loc+"/"+cached_id)
				var op Operation
				op.Code = ProcessFile
				op.Id = cached_id
				op.Name = path.Base(ev.Name)
				op.Location = path.Dir(ev.Name)
				op.Overwrite = false //TODO determine if this should be gleamed from the file name
				meta.stash <- op
			}
		case err := <-watcher.Error:
			log.Error("Monitor error;", err)
			continue
		case stop = <-cont:
			log.Infoln("Spinning down monitor on ", location)
			break
		}
	}
}
Exemplo n.º 8
0
// VolumeCreate : docker personality implementation for VIC
func (v *Volume) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) {
	defer trace.End(trace.Begin("Volume.VolumeCreate"))
	result := &types.Volume{}

	client := PortLayerClient()
	if client == nil {
		return nil, derr.NewErrorWithStatusCode(fmt.Errorf("Failed to get a portlayer client"), http.StatusInternalServerError)
	}

	// TODO: support having another driver besides vsphere.
	// assign the values of the model to be passed to the portlayer handler
	model, varErr := translateInputsToPortlayerRequestModel(name, driverName, opts, labels)
	if varErr != nil {
		return result, derr.NewErrorWithStatusCode(fmt.Errorf("Bad Driver Arg: %s", varErr), http.StatusBadRequest)
	}

	if model.Name == "" {
		model.Name = uuid.New().String()
	}

	res, err := client.Storage.CreateVolume(storage.NewCreateVolumeParamsWithContext(ctx).WithVolumeRequest(model))
	if err != nil {
		switch err := err.(type) {

		case *storage.CreateVolumeInternalServerError:
			// FIXME: right now this does not return an error model...
			return result, derr.NewErrorWithStatusCode(fmt.Errorf("%s", err.Error()), http.StatusInternalServerError)

		case *storage.CreateVolumeDefault:
			return result, derr.NewErrorWithStatusCode(fmt.Errorf("%s", err.Payload.Message), http.StatusInternalServerError)

		default:
			return result, derr.NewErrorWithStatusCode(fmt.Errorf("%s", err), http.StatusInternalServerError)
		}
	}

	result = NewVolumeModel(res.Payload, labels)
	return result, nil
}
Exemplo n.º 9
0
// A sparse handle is simply a random string
func newSparseHandle() SparseHandle {
	return SparseHandle(uuid.New())
}
Exemplo n.º 10
0
func TestResourcePool(t *testing.T) {
	ctx := context.Background()

	m := &Model{
		ServiceContent: esx.ServiceContent,
		RootFolder:     esx.RootFolder,
	}

	err := m.Create()
	if err != nil {
		t.Fatal(err)
	}

	c := m.Service.client

	finder := find.NewFinder(c, false)
	finder.SetDatacenter(object.NewDatacenter(c, esx.Datacenter.Reference()))

	spec := NewResourceConfigSpec()

	parent := object.NewResourcePool(c, esx.ResourcePool.Self)

	// can't destroy a root pool
	task, err := parent.Destroy(ctx)
	if err != nil {
		t.Fatal(err)
	}
	if err = task.Wait(ctx); err == nil {
		t.Fatal("expected error destroying a root pool")
	}

	// create a child pool
	childName := uuid.New().String()

	child, err := parent.Create(ctx, childName, spec)
	if err != nil {
		t.Fatal(err)
	}

	if child.Reference() == esx.ResourcePool.Self {
		t.Error("expected new pool Self reference")
	}

	// create a grandchild pool
	grandChildName := uuid.New().String()
	_, err = child.Create(ctx, grandChildName, spec)
	if err != nil {
		t.Fatal(err)
	}

	// create sibling (of the grand child) pool
	siblingName := uuid.New().String()
	_, err = child.Create(ctx, siblingName, spec)
	if err != nil {
		t.Fatal(err)
	}

	// finder should return the 2 grand children
	pools, err := finder.ResourcePoolList(ctx, "*/Resources/"+childName+"/*")
	if err != nil {
		t.Fatal(err)
	}
	if len(pools) != 2 {
		t.Fatalf("len(pools) == %d", len(pools))
	}

	// destroy the child
	task, err = child.Destroy(ctx)
	if err != nil {
		t.Fatal(err)
	}
	err = task.Wait(ctx)
	if err != nil {
		t.Fatal(err)
	}

	// finder should error not found after destroying the child
	_, err = finder.ResourcePoolList(ctx, "*/Resources/"+childName+"/*")
	if err == nil {
		t.Fatal("expected not found error")
	}

	// since the child was destroyed, grand child pools should now be children of the root pool
	pools, err = finder.ResourcePoolList(ctx, "*/Resources/*")
	if err != nil {
		t.Fatal(err)
	}

	if len(pools) != 2 {
		t.Fatalf("len(pools) == %d", len(pools))
	}
}
Exemplo n.º 11
0
func GenerateID() ID {
	return ID(uuid.New())
}
Exemplo n.º 12
0
// TestName builds a unique datastore name
func TestName(suffix string) string {
	return uuid.New().String()[0:16] + "-" + suffix
}