Пример #1
0
func TestVolumeCreate(t *testing.T) {

	testStore := NewMockVolumeStore()
	op := trace.NewOperation(context.Background(), "test")
	volCache, err := spl.NewVolumeLookupCache(op, testStore)
	if !assert.NoError(t, err) {
		return
	}

	handler := StorageHandlersImpl{
		volumeCache: volCache,
	}

	model := models.VolumeRequest{}
	model.Store = "blah"
	model.Name = "testVolume"
	model.Capacity = 1
	model.Driver = "vsphere"
	model.DriverArgs = make(map[string]string)
	model.DriverArgs["stuff"] = "things"
	model.Metadata = make(map[string]string)
	params := storage.NewCreateVolumeParams()
	params.VolumeRequest = &model

	handler.CreateVolume(params)
	testVolume, err := testStore.VolumeGet(op, model.Name)
	if !assert.NoError(t, err) {
		return
	}

	if !assert.NotNil(t, testVolume) {
		return
	}
	testVolumeStoreName, err := util.VolumeStoreName(testVolume.Store)
	if !assert.NoError(t, err) {
		return
	}
	if !assert.Equal(t, "blah", testVolumeStoreName) {
		return
	}
	if !assert.Equal(t, "testVolume", testVolume.ID) {
		return
	}
}
Пример #2
0
func TestVolumeCreate(t *testing.T) {

	var err error
	testStore := NewMockVolumeStore()
	storageVolumeLayer, err = spl.NewVolumeLookupCache(context.TODO(), testStore)
	if !assert.NoError(t, err) {
		return
	}

	model := models.VolumeRequest{}
	model.Store = "blah"
	model.Name = "testVolume"
	model.Capacity = 1
	model.Driver = "vsphere"
	model.DriverArgs = make(map[string]string)
	model.DriverArgs["stuff"] = "things"
	model.Metadata = make(map[string]string)
	params := storage.NewCreateVolumeParams()
	params.VolumeRequest = &model
	implementationHandler := StorageHandlersImpl{}

	implementationHandler.CreateVolume(params)
	testVolume, err := testStore.VolumeGet(context.TODO(), model.Name)
	if !assert.NoError(t, err) {
		return
	}

	if !assert.NotNil(t, testVolume) {
		return
	}
	testVolumeStoreName, err := util.VolumeStoreName(testVolume.Store)
	if !assert.NoError(t, err) {
		return
	}
	if !assert.Equal(t, "blah", testVolumeStoreName) {
		return
	}
	if !assert.Equal(t, "testVolume", testVolume.ID) {
		return
	}
}
Пример #3
0
func TestVolumeCreateListAndRestart(t *testing.T) {
	client := datastore.Session(context.TODO(), t)
	if client == nil {
		return
	}

	ctx := context.TODO()

	// Create the backing store on vsphere
	vsVolumeStore, err := NewVolumeStore(ctx, client)
	if !assert.NoError(t, err) || !assert.NotNil(t, vsVolumeStore) {
		return
	}

	// Root our datastore
	testStorePath := datastore.TestName("voltest")
	ds, err := datastore.NewHelper(ctx, client, client.Datastore, testStorePath)
	if !assert.NoError(t, err) || !assert.NotNil(t, ds) {
		return
	}

	// Add a volume store and give it a name ("testStoreName")
	volumeStore, err := vsVolumeStore.AddStore(ctx, ds, "testStoreName")
	if !assert.NoError(t, err) || !assert.NotNil(t, volumeStore) {
		return
	}

	// test we can list it
	m, err := vsVolumeStore.VolumeStoresList(ctx)
	if !assert.NoError(t, err) || !assert.NotNil(t, m) {
		return
	}

	// test the returned url matches
	s, ok := m["testStoreName"]
	if !assert.True(t, ok) || !assert.Equal(t, testStorePath, filepath.Base(s.String())) {
		return
	}

	// Clean up the mess
	defer func() {
		fm := object.NewFileManager(client.Vim25())
		tasks.WaitForResult(context.TODO(), func(ctx context.Context) (tasks.ResultWaiter, error) {
			return fm.DeleteDatastoreFile(ctx, client.Datastore.Path(testStorePath), client.Datacenter)
		})
	}()

	// Create the cache
	cache, err := portlayer.NewVolumeLookupCache(ctx, vsVolumeStore)
	if !assert.NoError(t, err) || !assert.NotNil(t, cache) {
		return
	}

	// Create the volumes (in parallel)
	numVols := 5
	wg := &sync.WaitGroup{}
	wg.Add(numVols)
	volumes := make(map[string]*portlayer.Volume)
	for i := 0; i < numVols; i++ {
		go func(idx int) {
			defer wg.Done()
			ID := fmt.Sprintf("testvolume-%d", idx)

			// add some metadata if i is even
			var info map[string][]byte

			if idx%2 == 0 {
				info = make(map[string][]byte)
				info[ID] = []byte(ID)
			}

			outVol, err := cache.VolumeCreate(ctx, ID, volumeStore, 10240, info)
			if !assert.NoError(t, err) || !assert.NotNil(t, outVol) {
				return
			}

			volumes[ID] = outVol
		}(i)
	}

	wg.Wait()

	// list using the datastore (skipping the cache)
	outVols, err := vsVolumeStore.VolumesList(ctx)
	if !assert.NoError(t, err) || !assert.NotNil(t, outVols) || !assert.Equal(t, numVols, len(outVols)) {
		return
	}

	for _, outVol := range outVols {
		if !assert.Equal(t, volumes[outVol.ID], outVol) {
			return
		}
	}

	// Test restart

	// Create a new vs and cache to the same datastore (simulating restart) and compare
	secondVStore, err := NewVolumeStore(ctx, client)
	if !assert.NoError(t, err) || !assert.NotNil(t, vsVolumeStore) {
		return
	}

	volumeStore, err = secondVStore.AddStore(ctx, ds, "testStoreName")
	if !assert.NoError(t, err) || !assert.NotNil(t, volumeStore) {
		return
	}
	secondCache, err := portlayer.NewVolumeLookupCache(ctx, secondVStore)
	if !assert.NoError(t, err) || !assert.NotNil(t, cache) {
		return
	}

	secondOutVols, err := secondCache.VolumesList(ctx)
	if !assert.NoError(t, err) || !assert.NotNil(t, secondOutVols) || !assert.Equal(t, numVols, len(secondOutVols)) {
		return
	}

	for _, outVol := range secondOutVols {
		// XXX we could compare the Volumes, but the paths are different the
		// second time around on vsan since the vsan UUID is not included.
		if !assert.NotEmpty(t, volumes[outVol.ID].Device.DiskPath()) {
			return
		}
	}
}
Пример #4
0
// Configure assigns functions to all the storage api handlers
func (handler *StorageHandlersImpl) Configure(api *operations.PortLayerAPI, handlerCtx *HandlerContext) {
	var err error

	ctx := context.Background()

	sessionconfig := &session.Config{
		Service:        options.PortLayerOptions.SDK,
		Insecure:       options.PortLayerOptions.Insecure,
		Keepalive:      options.PortLayerOptions.Keepalive,
		DatacenterPath: options.PortLayerOptions.DatacenterPath,
		ClusterPath:    options.PortLayerOptions.ClusterPath,
		PoolPath:       options.PortLayerOptions.PoolPath,
		DatastorePath:  options.PortLayerOptions.DatastorePath,
	}

	storageSession, err := session.NewSession(sessionconfig).Create(ctx)
	if err != nil {
		log.Fatalf("StorageHandler ERROR: %s", err)
	}
	if len(spl.Config.ImageStores) == 0 {
		log.Panicf("No image stores provided; unable to instantiate storage layer")
	}
	imageStoreURL := spl.Config.ImageStores[0]
	// TODO: support multiple image stores. Right now we only support the first one
	if len(spl.Config.ImageStores) > 1 {
		log.Warningf("Multiple image stores found. Multiple image stores are not yet supported. Using [%s] %s", imageStoreURL.Host, imageStoreURL.Path)
	}
	ds, err := vsphereSpl.NewImageStore(ctx, storageSession, &imageStoreURL)
	if err != nil {
		log.Panicf("Cannot instantiate storage layer: %s", err)
	}

	// The imagestore is implemented via a cache which is backed via an
	// implementation that writes to disks.  The cache is used to avoid
	// expensive metadata lookups.
	storageImageLayer = spl.NewLookupCache(ds)
	vsVolumeStore, err := vsphereSpl.NewVolumeStore(context.TODO(), storageSession)
	if err != nil {
		log.Panicf("Cannot instantiate the volume store: %s", err)
	}

	// Get the datastores for volumes.
	// Each volume store name maps to a datastore + path, which can be referred to by the name.
	dstores, err := datastore.GetDatastores(context.TODO(), storageSession, spl.Config.VolumeLocations)
	if err != nil {
		log.Panicf("Cannot find datastores: %s", err)
	}

	// Add datastores to the vsphere volume store impl
	for volStoreName, volDatastore := range dstores {
		log.Infof("Adding volume store %s (%s)", volStoreName, volDatastore.RootURL)
		_, err := vsVolumeStore.AddStore(context.TODO(), volDatastore, volStoreName)
		if err != nil {
			log.Errorf("volume addition error %s", err)
		}
	}

	storageVolumeLayer, err = spl.NewVolumeLookupCache(context.TODO(), vsVolumeStore)
	if err != nil {
		log.Panicf("Cannot instantiate the Volume Lookup cache: %s", err)
	}

	api.StorageCreateImageStoreHandler = storage.CreateImageStoreHandlerFunc(handler.CreateImageStore)
	api.StorageGetImageHandler = storage.GetImageHandlerFunc(handler.GetImage)
	api.StorageGetImageTarHandler = storage.GetImageTarHandlerFunc(handler.GetImageTar)
	api.StorageListImagesHandler = storage.ListImagesHandlerFunc(handler.ListImages)
	api.StorageWriteImageHandler = storage.WriteImageHandlerFunc(handler.WriteImage)

	api.StorageVolumeStoresListHandler = storage.VolumeStoresListHandlerFunc(handler.VolumeStoresList)
	api.StorageCreateVolumeHandler = storage.CreateVolumeHandlerFunc(handler.CreateVolume)
	api.StorageRemoveVolumeHandler = storage.RemoveVolumeHandlerFunc(handler.RemoveVolume)
	api.StorageVolumeJoinHandler = storage.VolumeJoinHandlerFunc(handler.VolumeJoin)
	api.StorageListVolumesHandler = storage.ListVolumesHandlerFunc(handler.VolumesList)
}
Пример #5
0
// Configure assigns functions to all the storage api handlers
func (h *StorageHandlersImpl) Configure(api *operations.PortLayerAPI, handlerCtx *HandlerContext) {
	var err error

	ctx := context.Background()
	op := trace.NewOperation(ctx, "configure")

	if len(spl.Config.ImageStores) == 0 {
		log.Panicf("No image stores provided; unable to instantiate storage layer")
	}
	imageStoreURL := spl.Config.ImageStores[0]
	// TODO: support multiple image stores. Right now we only support the first one
	if len(spl.Config.ImageStores) > 1 {
		log.Warningf("Multiple image stores found. Multiple image stores are not yet supported. Using [%s] %s", imageStoreURL.Host, imageStoreURL.Path)
	}

	ds, err := vsphereSpl.NewImageStore(op, handlerCtx.Session, &imageStoreURL)
	if err != nil {
		log.Panicf("Cannot instantiate storage layer: %s", err)
	}

	// The imagestore is implemented via a cache which is backed via an
	// implementation that writes to disks.  The cache is used to avoid
	// expensive metadata lookups.
	h.imageCache = spl.NewLookupCache(ds)

	// The same is done for volumes.  It's implemented via a cache which writes
	// to an implementation that takes a datastore to write to.
	vsVolumeStore, err := vsphereSpl.NewVolumeStore(op, handlerCtx.Session)
	if err != nil {
		log.Panicf("Cannot instantiate the volume store: %s", err)
	}

	// Get the datastores for volumes.
	// Each volume store name maps to a datastore + path, which can be referred to by the name.
	dstores, err := datastore.GetDatastores(context.TODO(), handlerCtx.Session, spl.Config.VolumeLocations)
	if err != nil {
		log.Panicf("Cannot find datastores: %s", err)
	}

	// Add datastores to the vsphere volume store impl
	for volStoreName, volDatastore := range dstores {
		log.Infof("Adding volume store %s (%s)", volStoreName, volDatastore.RootURL)
		_, err := vsVolumeStore.AddStore(op, volDatastore, volStoreName)
		if err != nil {
			log.Errorf("volume addition error %s", err)
		}
	}

	h.volumeCache, err = spl.NewVolumeLookupCache(op, vsVolumeStore)
	if err != nil {
		log.Panicf("Cannot instantiate the Volume Lookup cache: %s", err)
	}

	api.StorageCreateImageStoreHandler = storage.CreateImageStoreHandlerFunc(h.CreateImageStore)
	api.StorageGetImageHandler = storage.GetImageHandlerFunc(h.GetImage)
	api.StorageGetImageTarHandler = storage.GetImageTarHandlerFunc(h.GetImageTar)
	api.StorageListImagesHandler = storage.ListImagesHandlerFunc(h.ListImages)
	api.StorageWriteImageHandler = storage.WriteImageHandlerFunc(h.WriteImage)
	api.StorageDeleteImageHandler = storage.DeleteImageHandlerFunc(h.DeleteImage)

	api.StorageVolumeStoresListHandler = storage.VolumeStoresListHandlerFunc(h.VolumeStoresList)
	api.StorageCreateVolumeHandler = storage.CreateVolumeHandlerFunc(h.CreateVolume)
	api.StorageRemoveVolumeHandler = storage.RemoveVolumeHandlerFunc(h.RemoveVolume)
	api.StorageVolumeJoinHandler = storage.VolumeJoinHandlerFunc(h.VolumeJoin)
	api.StorageListVolumesHandler = storage.ListVolumesHandlerFunc(h.VolumesList)
	api.StorageGetVolumeHandler = storage.GetVolumeHandlerFunc(h.GetVolume)
}