//RemoveVolume : Remove a Volume from existence func (h *StorageHandlersImpl) RemoveVolume(params storage.RemoveVolumeParams) middleware.Responder { defer trace.End(trace.Begin("storage_handlers.RemoveVolume")) op := trace.NewOperation(context.Background(), fmt.Sprintf("VolumeDestroy(%s)", params.Name)) err := h.volumeCache.VolumeDestroy(op, params.Name) if err != nil { switch { case os.IsNotExist(err): return storage.NewRemoveVolumeNotFound().WithPayload(&models.Error{ Message: err.Error(), }) case spl.IsErrVolumeInUse(err): return storage.NewRemoveVolumeConflict().WithPayload(&models.Error{ Message: err.Error(), }) default: return storage.NewRemoveVolumeInternalServerError().WithPayload(&models.Error{ Message: err.Error(), }) } } return storage.NewRemoveVolumeOK() }
// WriteImage writes an image to an image store func (h *StorageHandlersImpl) WriteImage(params storage.WriteImageParams) middleware.Responder { u, err := util.ImageStoreNameToURL(params.StoreName) if err != nil { return storage.NewWriteImageDefault(http.StatusInternalServerError).WithPayload( &models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } parent := &spl.Image{ Store: u, ID: params.ParentID, } var meta map[string][]byte if params.Metadatakey != nil && params.Metadataval != nil { meta = map[string][]byte{*params.Metadatakey: []byte(*params.Metadataval)} } op := trace.NewOperation(context.Background(), fmt.Sprintf("WriteImage(%s)", params.ImageID)) image, err := h.imageCache.WriteImage(op, parent, params.ImageID, meta, params.Sum, params.ImageFile) if err != nil { return storage.NewWriteImageDefault(http.StatusInternalServerError).WithPayload( &models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } i := convertImage(image) return storage.NewWriteImageCreated().WithPayload(i) }
//VolumeJoin : modifies the config spec of a container to mount the specified container func (h *StorageHandlersImpl) VolumeJoin(params storage.VolumeJoinParams) middleware.Responder { defer trace.End(trace.Begin("")) op := trace.NewOperation(context.Background(), fmt.Sprintf("VolumeJoin(%s)", params.Name)) actualHandle := epl.GetHandle(params.JoinArgs.Handle) //Note: Name should already be populated by now. volume, err := h.volumeCache.VolumeGet(op, params.Name) if err != nil { log.Errorf("Volumes: StorageHandler : %#v", err) return storage.NewVolumeJoinInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } actualHandle, err = vsphereSpl.VolumeJoin(op, actualHandle, volume, params.JoinArgs.MountPath, params.JoinArgs.Flags) if err != nil { log.Errorf("Volumes: StorageHandler : %#v", err) return storage.NewVolumeJoinInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } log.Infof("volume %s has been joined to a container", volume.ID) return storage.NewVolumeJoinOK().WithPayload(actualHandle.String()) }
// Create an image store then test it exists func TestCreateAndGetImageStore(t *testing.T) { vsis, client, parentPath, err := setup(t) if !assert.NoError(t, err) { return } // Nuke the parent image store directory defer rm(t, client, client.Datastore.Path(parentPath)) storeName := "bogusStoreName" op := trace.NewOperation(context.Background(), "test") u, err := vsis.CreateImageStore(op, storeName) if !assert.NoError(t, err) || !assert.NotNil(t, u) { return } u, err = vsis.GetImageStore(op, storeName) if !assert.NoError(t, err) || !assert.NotNil(t, u) { return } // Negative test. Check for a dir that doesn't exist u, err = vsis.GetImageStore(op, storeName+"garbage") if !assert.Error(t, err) || !assert.Nil(t, u) { return } // Test for a store that already exists u, err = vsis.CreateImageStore(op, storeName) if !assert.Error(t, err) || !assert.Nil(t, u) || !assert.Equal(t, err, os.ErrExist) { return } }
func TestListImageStore(t *testing.T) { vsis, client, parentPath, err := setup(t) if !assert.NoError(t, err) { return } // Nuke the parent image store directory defer rm(t, client, client.Datastore.Path(parentPath)) op := trace.NewOperation(context.Background(), "test") count := 3 for i := 0; i < count; i++ { storeName := fmt.Sprintf("storeName%d", i) u, err := vsis.CreateImageStore(op, storeName) if !assert.NoError(t, err) || !assert.NotNil(t, u) { return } } images, err := vsis.ListImageStores(op) if !assert.NoError(t, err) || !assert.Equal(t, len(images), count) { return } }
// ListImages returns a list of images in a store func (h *StorageHandlersImpl) ListImages(params storage.ListImagesParams) middleware.Responder { u, err := util.ImageStoreNameToURL(params.StoreName) if err != nil { return storage.NewListImagesDefault(http.StatusInternalServerError).WithPayload( &models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } op := trace.NewOperation(context.Background(), fmt.Sprintf("ListImages(%s, %q)", u.String(), params.Ids)) images, err := h.imageCache.ListImages(op, u, params.Ids) if err != nil { return storage.NewListImagesNotFound().WithPayload( &models.Error{ Code: swag.Int64(http.StatusNotFound), Message: err.Error(), }) } result := make([]*models.Image, 0, len(images)) for _, image := range images { result = append(result, convertImage(image)) } return storage.NewListImagesOK().WithPayload(result) }
func setup(t *testing.T) (*portlayer.NameLookupCache, *session.Session, string, error) { logrus.SetLevel(logrus.DebugLevel) client := datastore.Session(context.TODO(), t) if client == nil { return nil, nil, "", fmt.Errorf("skip") } storeURL := &url.URL{ Path: datastore.TestName("imageTests"), Host: client.DatastorePath} op := trace.NewOperation(context.Background(), "setup") vsImageStore, err := NewImageStore(op, client, storeURL) if err != nil { if err.Error() == "can't find the hosting vm" { t.Skip("Skipping: test must be run in a VM") } return nil, nil, "", err } s := portlayer.NewLookupCache(vsImageStore) return s, client, storeURL.Path, nil }
// NewDatastoreKeyValue will validate the supplied name and create a datastore // backed key / value store // // The file will be located at the init datastoreURL -- currently that's in the // appliance directory under the {dsFolder} folder (i.e. [datastore]vch-appliance/{dsFolder}/{name}) func NewDatastoreKeyValue(ctx context.Context, session *session.Session, name string) (kvstore.KeyValueStore, error) { defer trace.End(trace.Begin(name)) mgr.m.Lock() defer mgr.m.Unlock() // validate the name err := validateStoreName(name) if err != nil { return nil, err } // get a ds helper for this ds url dsHelper, err := datastore.NewHelper(trace.NewOperation(ctx, "datastore helper creation"), session, session.Datastore, fmt.Sprintf("%s/%s", mgr.datastoreURL.Path, KVStoreFolder)) if err != nil { return nil, fmt.Errorf("unable to get datastore helper for %s store creation: %s", name, err.Error()) } // create or restore the specified K/V store keyVal, err := kvstore.NewKeyValueStore(ctx, kvstore.NewDatastoreBackend(dsHelper), name) if err != nil && !os.IsExist(err) { return nil, fmt.Errorf("unable to create %s datastore backed store: %s", name, err.Error()) } // throw it in the store map mgr.dsStores[name] = keyVal return keyVal, nil }
//VolumesList : Lists available volumes for use func (h *StorageHandlersImpl) VolumesList(params storage.ListVolumesParams) middleware.Responder { defer trace.End(trace.Begin("")) var result []*models.VolumeResponse op := trace.NewOperation(context.Background(), "VolumeList") portlayerVolumes, err := h.volumeCache.VolumesList(op) if err != nil { log.Error(err) return storage.NewListVolumesInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } log.Debugf("volumes fetched from list call : %#v", portlayerVolumes) for i := range portlayerVolumes { model, err := fillVolumeModel(portlayerVolumes[i]) if err != nil { log.Error(err) return storage.NewListVolumesInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } result = append(result, &model) } log.Debugf("volumes returned from list call : %#v", result) return storage.NewListVolumesOK().WithPayload(result) }
func save(t *testing.T, kv KeyValueStore, key string, expectedvalue []byte) { op := trace.NewOperation(context.Background(), "save") if !assert.NoError(t, kv.Put(op, key, expectedvalue)) { return } }
// Creates numLayers layers in parallel using the same parent to exercise parallel reconfigures func TestParallel(t *testing.T) { numLayers := 10 cacheStore, client, parentPath, err := setup(t) if !assert.NoError(t, err) { return } vsStore := cacheStore.DataStore.(*ImageStore) defer cleanup(t, client, vsStore, parentPath) op := trace.NewOperation(context.Background(), "test") storeURL, err := cacheStore.CreateImageStore(op, "testStore") if !assert.NoError(t, err) { return } // base this image off scratch parent, err := cacheStore.GetImage(op, storeURL, portlayer.Scratch.ID) if !assert.NoError(t, err) { return } wg := sync.WaitGroup{} wg.Add(numLayers) for i := 0; i < numLayers; i++ { go func(idx int) { defer wg.Done() imageID := fmt.Sprintf("testStore-%d", idx) op := trace.NewOperation(context.Background(), imageID) // Write the image via the cache (which writes to the vsphere impl). We're passing a bogus sum so the image should fail to save. writtenImage, err := cacheStore.WriteImage(op, parent, imageID, nil, "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", new(bytes.Buffer)) if !assert.NoError(t, err) || !assert.NotNil(t, writtenImage) { t.FailNow() return } }(i) } wg.Wait() }
//CreateVolume : Create a Volume func (h *StorageHandlersImpl) CreateVolume(params storage.CreateVolumeParams) middleware.Responder { defer trace.End(trace.Begin("storage_handlers.CreateVolume")) //TODO: FIXME: add more errorcodes as we identify error scenarios. storeURL, err := util.VolumeStoreNameToURL(params.VolumeRequest.Store) if err != nil { log.Errorf("storagehandler: VolumeStoreName error: %s", err) return storage.NewCreateVolumeInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } byteMap := make(map[string][]byte) for key, value := range params.VolumeRequest.Metadata { byteMap[key] = []byte(value) } capacity := uint64(0) if params.VolumeRequest.Capacity < 0 { capacity = uint64(1024) //FIXME: this should look for a default cap and set or fail here. } else { capacity = uint64(params.VolumeRequest.Capacity) } op := trace.NewOperation(context.Background(), fmt.Sprintf("VolumeCreate(%s)", params.VolumeRequest.Name)) volume, err := h.volumeCache.VolumeCreate(op, params.VolumeRequest.Name, storeURL, capacity*1024, byteMap) if err != nil { log.Errorf("storagehandler: VolumeCreate error: %#v", err) if os.IsExist(err) { return storage.NewCreateVolumeConflict().WithPayload(&models.Error{ Code: swag.Int64(http.StatusConflict), Message: err.Error(), }) } if _, ok := err.(spl.VolumeStoreNotFoundError); ok { return storage.NewCreateVolumeNotFound().WithPayload(&models.Error{ Code: swag.Int64(http.StatusNotFound), Message: err.Error(), }) } return storage.NewCreateVolumeInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } response := volumeToCreateResponse(volume, params.VolumeRequest) return storage.NewCreateVolumeCreated().WithPayload(&response) }
// Create an image on the datastore directly and try to WriteImage via the // cache. The datastore should reflect the image already exists and bale out // without an error. func TestOutsideCacheWriteImage(t *testing.T) { s := NewLookupCache(NewMockDataStore()) op := trace.NewOperation(context.Background(), "test") storeURL, err := s.CreateImageStore(op, "testStore") if !assert.NoError(t, err) { return } if !assert.NotNil(t, storeURL) { return } // Create a set of images images := make(map[string]*Image) parent := Scratch parent.Store = storeURL for i := 1; i < 50; i++ { id := fmt.Sprintf("ID-%d", i) // Write to the datastore creating images img, werr := s.DataStore.WriteImage(op, &parent, id, nil, "", nil) if !assert.NoError(t, werr) { return } if !assert.NotNil(t, img) { return } images[id] = img } testSum := "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" // Try to write the same images as above, but this time via the cache. WriteImage should return right away without any data written. for i := 1; i < 50; i++ { id := fmt.Sprintf("ID-%d", i) // Write to the datastore creating images img, werr := s.WriteImage(op, &parent, id, nil, testSum, nil) if !assert.NoError(t, werr) { return } if !assert.NotNil(t, img) { return } // assert it's the same image if !assert.Equal(t, images[img.ID], img) { return } } }
func TestParentEmptyRestore(t *testing.T) { ctx, ds, cleanupfunc := datastore.DSsetup(t) if t.Failed() { return } defer cleanupfunc() op := trace.NewOperation(ctx, "test") par, err := restoreParentMap(op, ds, testStore) if !assert.NoError(t, err) && !assert.NotNil(t, par) { return } }
// Cache population should be happening in order starting from parent(id1) to children(id4) func TestPopulateCacheInExpectedOrder(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) st := NewMockDataStore() op := trace.NewOperation(context.Background(), "test") storeURL, _ := util.ImageStoreNameToURL("testStore") storageURLStr := storeURL.String() url1, _ := url.Parse(storageURLStr + "/id1") url2, _ := url.Parse(storageURLStr + "/id2") url3, _ := url.Parse(storageURLStr + "/id3") url4, _ := url.Parse(storageURLStr + "/id4") scratchURL, _ := url.Parse(storageURLStr + Scratch.ID) img1 := &Image{ID: "id1", SelfLink: url1, ParentLink: scratchURL, Store: storeURL} img2 := &Image{ID: "id2", SelfLink: url2, ParentLink: url1, Store: storeURL} img3 := &Image{ID: "id3", SelfLink: url3, ParentLink: url2, Store: storeURL} img4 := &Image{ID: "id4", SelfLink: url4, ParentLink: url3, Store: storeURL} scratchImg := &Image{ ID: Scratch.ID, SelfLink: scratchURL, ParentLink: scratchURL, Store: storeURL, } // Order does matter for some reason. imageMap := map[string]*Image{ img1.ID: img1, img4.ID: img4, img2.ID: img2, img3.ID: img3, scratchImg.ID: scratchImg, } st.db[*storeURL] = imageMap imageCache := NewLookupCache(st) imageCache.GetImageStore(op, "testStore") // Check if all images are available. imageIds := []string{"id1", "id2", "id3", "id4"} for _, imageID := range imageIds { v, _ := imageCache.GetImage(op, storeURL, imageID) assert.NotNil(t, v) } }
func (d *Dispatcher) DebugVCH(vch *vm.VirtualMachine, conf *config.VirtualContainerHostConfigSpec, password, authorizedKey string) error { defer trace.End(trace.Begin(conf.Name)) op, err := trace.FromContext(d.ctx) if err != nil { op = trace.NewOperation(d.ctx, "enable appliance debug") } err = d.enableSSH(op, vch, password, authorizedKey) if err != nil { op.Errorf("Unable to enable ssh on the VCH appliance VM: %s", err) return err } d.sshEnabled = true return nil }
func (handler *KvHandlersImpl) DeleteValueHandler(params kv.DeleteValueParams) middleware.Responder { defer trace.End(trace.Begin(params.Key)) err := handler.defaultStore.Delete(trace.NewOperation(context.Background(), "DeleteValue"), params.Key) if err != nil { switch err { case kvstore.ErrKeyNotFound: return kv.NewDeleteValueNotFound() default: log.Errorf("Error deleting Key/Value: %s", err.Error()) return kv.NewGetValueInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } } return kv.NewDeleteValueOK() }
func TestVolumeCreate(t *testing.T) { testStore := NewMockVolumeStore() op := trace.NewOperation(context.Background(), "test") volCache, err := spl.NewVolumeLookupCache(op, testStore) if !assert.NoError(t, err) { return } handler := StorageHandlersImpl{ volumeCache: volCache, } model := models.VolumeRequest{} model.Store = "blah" model.Name = "testVolume" model.Capacity = 1 model.Driver = "vsphere" model.DriverArgs = make(map[string]string) model.DriverArgs["stuff"] = "things" model.Metadata = make(map[string]string) params := storage.NewCreateVolumeParams() params.VolumeRequest = &model handler.CreateVolume(params) testVolume, err := testStore.VolumeGet(op, model.Name) if !assert.NoError(t, err) { return } if !assert.NotNil(t, testVolume) { return } testVolumeStoreName, err := util.VolumeStoreName(testVolume.Store) if !assert.NoError(t, err) { return } if !assert.Equal(t, "blah", testVolumeStoreName) { return } if !assert.Equal(t, "testVolume", testVolume.ID) { return } }
func TestRestartImageStore(t *testing.T) { t.Skip("this test needs TLC") // Start the image store once cacheStore, client, parentPath, err := setup(t) if !assert.NoError(t, err) { return } origVsStore := cacheStore.DataStore.(*ImageStore) defer cleanup(t, client, origVsStore, parentPath) storeName := "bogusStoreName" op := trace.NewOperation(context.Background(), "test") origStore, err := cacheStore.CreateImageStore(op, storeName) if !assert.NoError(t, err) || !assert.NotNil(t, origStore) { return } imageStoreURL := &url.URL{ Path: StorageParentDir, Host: client.DatastorePath} // now start it again restartedVsStore, err := NewImageStore(op, client, imageStoreURL) if !assert.NoError(t, err) || !assert.NotNil(t, restartedVsStore) { return } // Check we didn't create a new UUID directory (relevant if vsan) if !assert.Equal(t, origVsStore.ds.RootURL, restartedVsStore.ds.RootURL) { return } restartedStore, err := restartedVsStore.GetImageStore(op, storeName) if !assert.NoError(t, err) || !assert.NotNil(t, restartedStore) { return } if !assert.Equal(t, origStore.String(), restartedStore.String()) { return } }
// Write some child -> parent mappings and see if we can read them. func TestParentSaveRestore(t *testing.T) { ctx, ds, cleanupfunc := datastore.DSsetup(t) if t.Failed() { return } defer cleanupfunc() op := trace.NewOperation(ctx, "test") par, err := restoreParentMap(op, ds, testStore) if !assert.NoError(t, err) && !assert.NotNil(t, par) { return } expected := make(map[string]string) for i := 0; i < 10; i++ { child := fmt.Sprintf("c%d", i) parent := fmt.Sprintf("p%d", i) expected[child] = parent par.Add(child, parent) } err = par.Save(op) if !assert.NoError(t, err) { return } // load into a different map p, err := restoreParentMap(op, ds, testStore) if !assert.NoError(t, err) && !assert.NotNil(t, p) { return } // check if the 2nd map loaded everything correctly if !assert.Equal(t, expected, p.db) { return } // Now save it to be extra paranoid err = p.Save(op) if !assert.NoError(t, err) { return } }
// CreateImageStore creates a new image store func (h *StorageHandlersImpl) CreateImageStore(params storage.CreateImageStoreParams) middleware.Responder { op := trace.NewOperation(context.Background(), fmt.Sprintf("CreateImageStore(%s)", params.Body.Name)) url, err := h.imageCache.CreateImageStore(op, params.Body.Name) if err != nil { if os.IsExist(err) { return storage.NewCreateImageStoreConflict().WithPayload( &models.Error{ Code: swag.Int64(http.StatusConflict), Message: "An image store with that name already exists", }) } return storage.NewCreateImageStoreDefault(http.StatusInternalServerError).WithPayload( &models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } s := &models.StoreURL{Code: swag.Int64(http.StatusCreated), URL: url.String()} return storage.NewCreateImageStoreCreated().WithPayload(s) }
// Create 2 store caches but use the same backing datastore. Create images // with the first cache, then get the image with the second. This simulates // restart since the second cache is empty and has to go to the backing store. func TestVolumeCacheRestart(t *testing.T) { mvs := NewMockVolumeStore() op := trace.NewOperation(context.Background(), "test") firstCache, err := NewVolumeLookupCache(op, mvs) if !assert.NoError(t, err) || !assert.NotNil(t, firstCache) { return } storeURL, err := util.VolumeStoreNameToURL("testStore") if !assert.NoError(t, err) || !assert.NotNil(t, storeURL) { return } // Create a set of volumes inVols := make(map[string]*Volume) for i := 1; i < 50; i++ { id := fmt.Sprintf("ID-%d", i) // Write to the datastore vol, err := firstCache.VolumeCreate(op, id, storeURL, 0, nil) if !assert.NoError(t, err) || !assert.NotNil(t, vol) { return } inVols[id] = vol } secondCache, err := NewVolumeLookupCache(op, mvs) if !assert.NoError(t, err) || !assert.NotNil(t, secondCache) { return } // get the vols from the second cache to ensure it goes to the ds for _, expectedVol := range inVols { vol, err := secondCache.VolumeGet(op, expectedVol.ID) if !assert.NoError(t, err) || !assert.Equal(t, expectedVol, vol) { return } } }
// GetImage retrieves an image from a store func (h *StorageHandlersImpl) GetImage(params storage.GetImageParams) middleware.Responder { id := params.ID url, err := util.ImageStoreNameToURL(params.StoreName) if err != nil { return storage.NewGetImageDefault(http.StatusInternalServerError).WithPayload( &models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } op := trace.NewOperation(context.Background(), fmt.Sprintf("GetImage(%s)", id)) image, err := h.imageCache.GetImage(op, url, id) if err != nil { e := &models.Error{Code: swag.Int64(http.StatusNotFound), Message: err.Error()} return storage.NewGetImageNotFound().WithPayload(e) } result := convertImage(image) return storage.NewGetImageOK().WithPayload(result) }
func mountLayerRO(v *ImageStore, parent *portlayer.Image) (*disk.VirtualDisk, error) { roName := v.imageDiskDSPath("testStore", parent.ID) + "-ro.vmdk" parentDsURI := v.imageDiskDSPath("testStore", parent.ID) op := trace.NewOperation(context.TODO(), "ro") roDisk, err := v.dm.CreateAndAttach(op, roName, parentDsURI, 0, os.O_RDONLY) if err != nil { return nil, err } dir, err := ioutil.TempDir("", parent.ID+"ro") if err != nil { return nil, err } if err := roDisk.Mount(dir, nil); err != nil { return nil, err } return roDisk, nil }
// VolumeStoresList lists the configured volume stores and their datastore path URIs. func (h *StorageHandlersImpl) VolumeStoresList() middleware.Responder { defer trace.End(trace.Begin("storage_handlers.VolumeStoresList")) op := trace.NewOperation(context.Background(), "VolumeStoresList") stores, err := h.volumeCache.VolumeStoresList(op) if err != nil { return storage.NewVolumeStoresListInternalServerError().WithPayload( &models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } resp := &models.VolumeStoresListResponse{ Stores: make(map[string]string), } for name, ds := range stores { resp.Stores[name] = ds.String() } return storage.NewVolumeStoresListOK().WithPayload(resp) }
//GetVolume : Gets a handle to a volume func (h *StorageHandlersImpl) GetVolume(params storage.GetVolumeParams) middleware.Responder { defer trace.End(trace.Begin(params.Name)) op := trace.NewOperation(context.Background(), fmt.Sprintf("VolumeGet(%s)", params.Name)) data, err := h.volumeCache.VolumeGet(op, params.Name) if err == os.ErrNotExist { return storage.NewGetVolumeNotFound().WithPayload(&models.Error{ Code: swag.Int64(http.StatusNotFound), Message: err.Error(), }) } response, err := fillVolumeModel(data) if err != nil { return storage.NewListVolumesInternalServerError().WithPayload(&models.Error{ Code: swag.Int64(http.StatusInternalServerError), Message: err.Error(), }) } log.Debugf("VolumeGet returned : %#v", response) return storage.NewGetVolumeOK().WithPayload(&response) }
// DeleteImage deletes an image from a store func (h *StorageHandlersImpl) DeleteImage(params storage.DeleteImageParams) middleware.Responder { ferr := func(err error, code int) middleware.Responder { log.Errorf("DeleteImage: error %s", err.Error()) return storage.NewDeleteImageDefault(code).WithPayload( &models.Error{ Code: swag.Int64(int64(code)), Message: err.Error(), }) } imageURL, err := util.ImageURL(params.StoreName, params.ID) if err != nil { return ferr(err, http.StatusInternalServerError) } image, err := spl.Parse(imageURL) if err != nil { return ferr(err, http.StatusInternalServerError) } op := trace.NewOperation(context.Background(), fmt.Sprintf("DeleteImage(%s)", image.ID)) if err = h.imageCache.DeleteImage(op, image); err != nil { switch { case spl.IsErrImageInUse(err): return ferr(err, http.StatusLocked) case os.IsNotExist(err): return ferr(err, http.StatusNotFound) default: return ferr(err, http.StatusInternalServerError) } } return storage.NewDeleteImageOK() }
func TestBrokenPull(t *testing.T) { cacheStore, client, parentPath, err := setup(t) if !assert.NoError(t, err) { return } vsStore := cacheStore.DataStore.(*ImageStore) defer cleanup(t, client, vsStore, parentPath) op := trace.NewOperation(context.Background(), "test") storeURL, err := cacheStore.CreateImageStore(op, "testStore") if !assert.NoError(t, err) { return } // base this image off scratch parent, err := cacheStore.GetImage(op, storeURL, portlayer.Scratch.ID) if !assert.NoError(t, err) { return } imageID := "dir0" // Add some files to the archive. var files = []tarFile{ {imageID, tar.TypeDir, ""}, {imageID + "/readme.txt", tar.TypeReg, "This archive contains some text files."}, {imageID + "/gopher.txt", tar.TypeReg, "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, {imageID + "/todo.txt", tar.TypeReg, "Get animal handling license."}, } // meta for the image meta := make(map[string][]byte) meta[imageID+"_meta"] = []byte("Some Meta") meta[imageID+"_moreMeta"] = []byte("Some More Meta") meta[imageID+"_scorpions"] = []byte("Here I am, rock you like a hurricane") // Tar the files buf, terr := tarFiles(files) if !assert.NoError(t, terr) { return } // Calculate the checksum h := sha256.New() h.Write(buf.Bytes()) actualsum := fmt.Sprintf("sha256:%x", h.Sum(nil)) // Write the image via the cache (which writes to the vsphere impl). We're passing a bogus sum so the image should fail to save. writtenImage, err := cacheStore.WriteImage(op, parent, imageID, meta, "bogusSum", new(bytes.Buffer)) if !assert.Error(t, err) || !assert.Nil(t, writtenImage) { return } // Now try again with the right sum and there shouldn't be an error. writtenImage, err = cacheStore.WriteImage(op, parent, imageID, meta, actualsum, buf) if !assert.NoError(t, err) || !assert.NotNil(t, writtenImage) { return } }
func (d *Dispatcher) enableSSH(ctx context.Context, vch *vm.VirtualMachine, password, authorizedKey string) error { op, err := trace.FromContext(ctx) if err != nil { op = trace.NewOperation(ctx, "enable ssh in appliance") } state, err := vch.PowerState(op) if err != nil { log.Errorf("Failed to get appliance power state, service might not be available at this moment.") } if state != types.VirtualMachinePowerStatePoweredOn { err = errors.Errorf("VCH appliance is not powered on, state %s", state) op.Errorf("%s", err) return err } running, err := vch.IsToolsRunning(op) if err != nil || !running { err = errors.New("Tools is not running in the appliance, unable to continue") op.Errorf("%s", err) return err } manager := guest.NewOperationsManager(d.session.Client.Client, vch.Reference()) processManager, err := manager.ProcessManager(op) if err != nil { err = errors.Errorf("Unable to manage processes in appliance VM: %s", err) op.Errorf("%s", err) return err } auth := types.NamePasswordAuthentication{} spec := types.GuestProgramSpec{ ProgramPath: "enable-ssh", Arguments: string(authorizedKey), WorkingDirectory: "/", EnvVariables: []string{}, } _, err = processManager.StartProgram(op, &auth, &spec) if err != nil { err = errors.Errorf("Unable to enable SSH in appliance VM: %s", err) op.Errorf("%s", err) return err } if password == "" { return nil } // set the password as well spec = types.GuestProgramSpec{ ProgramPath: "passwd", Arguments: password, WorkingDirectory: "/", EnvVariables: []string{}, } _, err = processManager.StartProgram(op, &auth, &spec) if err != nil { err = errors.Errorf("Unable to enable in appliance VM: %s", err) op.Errorf("%s", err) return err } return nil }
func TestInProgressCleanup(t *testing.T) { cacheStore, client, parentPath, err := setup(t) if !assert.NoError(t, err) { return } vsStore := cacheStore.DataStore.(*ImageStore) defer cleanup(t, client, vsStore, parentPath) op := trace.NewOperation(context.Background(), "test") storeURL, err := cacheStore.CreateImageStore(op, "testStore") if !assert.NoError(t, err) { return } // base this image off scratch parent, err := cacheStore.GetImage(op, storeURL, portlayer.Scratch.ID) if !assert.NoError(t, err) { return } // create a test image imageID := "testImage" // meta for the image meta := make(map[string][]byte) meta[imageID+"_meta"] = []byte("Some Meta") // Tar the files buf, err := tarFiles([]tarFile{}) if !assert.NoError(t, err) { return } // Calculate the checksum h := sha256.New() h.Write(buf.Bytes()) sum := fmt.Sprintf("sha256:%x", h.Sum(nil)) writtenImage, err := cacheStore.WriteImage(op, parent, imageID, meta, sum, buf) if !assert.NoError(t, err) || !assert.NotNil(t, writtenImage) { return } // nuke the done file. rm(t, client, path.Join(vsStore.ds.RootURL, vsStore.imageDirPath("testStore", imageID), manifest)) // ensure GetImage doesn't find this image now if _, err = vsStore.GetImage(op, storeURL, imageID); !assert.Error(t, err) { return } // call cleanup if err = vsStore.cleanup(op, storeURL); !assert.NoError(t, err) { return } // Make sure list is now empty. listedImages, err := vsStore.ListImages(op, parent.Store, nil) if !assert.NoError(t, err) || !assert.Equal(t, len(listedImages), 1) || !assert.Equal(t, listedImages[0].ID, portlayer.Scratch.ID) { return } }