func setup(t *testing.T) (*portlayer.NameLookupCache, *session.Session, string, error) { logrus.SetLevel(logrus.DebugLevel) client := datastore.Session(context.TODO(), t) if client == nil { return nil, nil, "", fmt.Errorf("skip") } storeURL := &url.URL{ Path: datastore.TestName("imageTests"), Host: client.DatastorePath} op := trace.NewOperation(context.Background(), "setup") vsImageStore, err := NewImageStore(op, client, storeURL) if err != nil { if err.Error() == "can't find the hosting vm" { t.Skip("Skipping: test must be run in a VM") } return nil, nil, "", err } s := portlayer.NewLookupCache(vsImageStore) return s, client, storeURL.Path, nil }
func setup(t *testing.T) (*portlayer.NameLookupCache, *session.Session, error) { logrus.SetLevel(logrus.DebugLevel) StorageParentDir = datastore.TestName("imageTests") client := datastore.Session(context.TODO(), t) if client == nil { return nil, nil, fmt.Errorf("skip") } storeURL := &url.URL{ Path: StorageParentDir, Host: client.DatastorePath} vsImageStore, err := NewImageStore(context.TODO(), client, storeURL) if err != nil { if err.Error() == "can't find the hosting vm" { t.Skip("Skipping: test must be run in a VM") } return nil, nil, err } s := portlayer.NewLookupCache(vsImageStore) return s, client, nil }
func TestVolumeCreateListAndRestart(t *testing.T) { client := datastore.Session(context.TODO(), t) if client == nil { return } ctx := context.TODO() // Create the backing store on vsphere vsVolumeStore, err := NewVolumeStore(ctx, client) if !assert.NoError(t, err) || !assert.NotNil(t, vsVolumeStore) { return } // Root our datastore testStorePath := datastore.TestName("voltest") ds, err := datastore.NewHelper(ctx, client, client.Datastore, testStorePath) if !assert.NoError(t, err) || !assert.NotNil(t, ds) { return } // Add a volume store and give it a name ("testStoreName") volumeStore, err := vsVolumeStore.AddStore(ctx, ds, "testStoreName") if !assert.NoError(t, err) || !assert.NotNil(t, volumeStore) { return } // test we can list it m, err := vsVolumeStore.VolumeStoresList(ctx) if !assert.NoError(t, err) || !assert.NotNil(t, m) { return } // test the returned url matches s, ok := m["testStoreName"] if !assert.True(t, ok) || !assert.Equal(t, testStorePath, filepath.Base(s.String())) { return } // Clean up the mess defer func() { fm := object.NewFileManager(client.Vim25()) tasks.WaitForResult(context.TODO(), func(ctx context.Context) (tasks.ResultWaiter, error) { return fm.DeleteDatastoreFile(ctx, client.Datastore.Path(testStorePath), client.Datacenter) }) }() // Create the cache cache, err := portlayer.NewVolumeLookupCache(ctx, vsVolumeStore) if !assert.NoError(t, err) || !assert.NotNil(t, cache) { return } // Create the volumes (in parallel) numVols := 5 wg := &sync.WaitGroup{} wg.Add(numVols) volumes := make(map[string]*portlayer.Volume) for i := 0; i < numVols; i++ { go func(idx int) { defer wg.Done() ID := fmt.Sprintf("testvolume-%d", idx) // add some metadata if i is even var info map[string][]byte if idx%2 == 0 { info = make(map[string][]byte) info[ID] = []byte(ID) } outVol, err := cache.VolumeCreate(ctx, ID, volumeStore, 10240, info) if !assert.NoError(t, err) || !assert.NotNil(t, outVol) { return } volumes[ID] = outVol }(i) } wg.Wait() // list using the datastore (skipping the cache) outVols, err := vsVolumeStore.VolumesList(ctx) if !assert.NoError(t, err) || !assert.NotNil(t, outVols) || !assert.Equal(t, numVols, len(outVols)) { return } for _, outVol := range outVols { if !assert.Equal(t, volumes[outVol.ID], outVol) { return } } // Test restart // Create a new vs and cache to the same datastore (simulating restart) and compare secondVStore, err := NewVolumeStore(ctx, client) if !assert.NoError(t, err) || !assert.NotNil(t, vsVolumeStore) { return } volumeStore, err = secondVStore.AddStore(ctx, ds, "testStoreName") if !assert.NoError(t, err) || !assert.NotNil(t, volumeStore) { return } secondCache, err := portlayer.NewVolumeLookupCache(ctx, secondVStore) if !assert.NoError(t, err) || !assert.NotNil(t, cache) { return } secondOutVols, err := secondCache.VolumesList(ctx) if !assert.NoError(t, err) || !assert.NotNil(t, secondOutVols) || !assert.Equal(t, numVols, len(secondOutVols)) { return } for _, outVol := range secondOutVols { // XXX we could compare the Volumes, but the paths are different the // second time around on vsan since the vsan UUID is not included. if !assert.NotEmpty(t, volumes[outVol.ID].Device.DiskPath()) { return } } }
// Create a disk, make an ext filesystem on it, set the label, mount it, // unmount it, then clean up. func TestCreateFS(t *testing.T) { log.SetLevel(log.DebugLevel) client := Session(context.Background(), t) if client == nil { return } imagestore := client.Datastore.Path(datastore.TestName("diskTest")) fm := object.NewFileManager(client.Vim25()) // create a directory in the datastore // eat the error because we dont care if it exists fm.MakeDirectory(context.TODO(), imagestore, nil, true) // Nuke the image store defer func() { task, err := fm.DeleteDatastoreFile(context.TODO(), imagestore, nil) if err != nil && err.Error() == "can't find the hosting vm" { t.Skip("Skipping: test must be run in a VM") } if !assert.NoError(t, err) { return } _, err = task.WaitForResult(context.TODO(), nil) if !assert.NoError(t, err) { return } }() op := trace.NewOperation(context.TODO(), "test") vdm, err := NewDiskManager(op, client) if err != nil && err.Error() == "can't find the hosting vm" { t.Skip("Skipping: test must be run in a VM") } if !assert.NoError(t, err) || !assert.NotNil(t, vdm) { return } diskSize := int64(1 << 10) d, err := vdm.CreateAndAttach(op, path.Join(imagestore, "scratch.vmdk"), "", diskSize, os.O_RDWR) if !assert.NoError(t, err) { return } // make the filesysetem if err = d.Mkfs("foo"); !assert.NoError(t, err) { return } // set the label if err = d.SetLabel("foo"); !assert.NoError(t, err) { return } // make a tempdir to mount the fs to dir, err := ioutil.TempDir("", "mnt") if !assert.NoError(t, err) { return } defer os.RemoveAll(dir) // do the mount err = d.Mount(dir, nil) if !assert.NoError(t, err) { return } // boom if mounted, err := mount.Mounted(dir); !assert.NoError(t, err) || !assert.True(t, mounted) { return } // clean up err = d.Unmount() if !assert.NoError(t, err) { return } err = vdm.Detach(op, d) if !assert.NoError(t, err) { return } }
// Create a lineage of disks inheriting from eachother, write portion of a // string to each, the confirm the result is the whole string func TestCreateAndDetach(t *testing.T) { log.SetLevel(log.DebugLevel) client := Session(context.Background(), t) if client == nil { return } imagestore := client.Datastore.Path(datastore.TestName("diskManagerTest")) fm := object.NewFileManager(client.Vim25()) // create a directory in the datastore // eat the error because we dont care if it exists fm.MakeDirectory(context.TODO(), imagestore, nil, true) vdm, err := NewDiskManager(context.TODO(), client) if err != nil && err.Error() == "can't find the hosting vm" { t.Skip("Skipping: test must be run in a VM") } if !assert.NoError(t, err) || !assert.NotNil(t, vdm) { return } diskSize := int64(1 << 10) parent, err := vdm.Create(context.TODO(), path.Join(imagestore, "scratch.vmdk"), diskSize) if !assert.NoError(t, err) { return } numChildren := 3 children := make([]*VirtualDisk, numChildren) testString := "Ground control to Major Tom" writeSize := len(testString) / numChildren // Create children which inherit from eachother for i := 0; i < numChildren; i++ { p := path.Join(imagestore, fmt.Sprintf("child%d.vmdk", i)) child, cerr := vdm.CreateAndAttach(context.TODO(), p, parent.DatastoreURI, 0, os.O_RDWR) if !assert.NoError(t, cerr) { return } children[i] = child // Write directly to the disk f, cerr := os.OpenFile(child.DevicePath, os.O_RDWR, os.FileMode(0777)) if !assert.NoError(t, cerr) { return } start := i * writeSize end := start + writeSize if i == numChildren-1 { // last chunk, write to the end. _, cerr = f.WriteAt([]byte(testString[start:]), int64(start)) if !assert.NoError(t, cerr) || !assert.NoError(t, f.Sync()) { return } // Try to read the whole string b := make([]byte, len(testString)) f.Seek(0, 0) _, cerr = f.Read(b) if !assert.NoError(t, cerr) { return } //check against the test string if !assert.Equal(t, testString, string(b)) { return } } else { _, cerr = f.WriteAt([]byte(testString[start:end]), int64(start)) if !assert.NoError(t, cerr) || !assert.NoError(t, f.Sync()) { return } } f.Close() cerr = vdm.Detach(context.TODO(), child) if !assert.NoError(t, cerr) { return } // use this image as the next parent parent = child } // // Nuke the images // for i := len(children) - 1; i >= 0; i-- { // err = vdm.Delete(context.TODO(), children[i]) // if !assert.NoError(t, err) { // return // } // } // Nuke the image store _, err = tasks.WaitForResult(context.TODO(), func(ctx context.Context) (tasks.ResultWaiter, error) { return fm.DeleteDatastoreFile(ctx, imagestore, nil) }) if !assert.NoError(t, err) { return } }