func TestRollback(t *testing.T) { zpoolTest(t, func() { f, err := zfs.CreateFilesystem("test/snapshot-test", nil) ok(t, err) filesystems, err := zfs.Filesystems("") ok(t, err) for _, filesystem := range filesystems { equals(t, zfs.DatasetFilesystem, filesystem.Type) } s1, err := f.Snapshot("test", false) ok(t, err) _, err = f.Snapshot("test2", false) ok(t, err) s3, err := f.Snapshot("test3", false) ok(t, err) err = s3.Rollback(false) ok(t, err) err = s1.Rollback(false) assert(t, err != nil, "should error when rolling back beyond most recent without destroyMoreRecent = true") err = s1.Rollback(true) ok(t, err) ok(t, s1.Destroy(zfs.DestroyDefault)) ok(t, f.Destroy(zfs.DestroyDefault)) }) }
func (d *Driver) create(id, parent string, storageOpt map[string]string) error { name := d.zfsPath(id) quota, err := parseStorageOpt(storageOpt) if err != nil { return err } if parent == "" { mountoptions := map[string]string{"mountpoint": "legacy"} fs, err := zfs.CreateFilesystem(name, mountoptions) if err == nil { err = setQuota(name, quota) if err == nil { d.Lock() d.filesystemsCache[fs.Name] = true d.Unlock() } } return err } err = d.cloneFilesystem(name, d.zfsPath(parent)) if err == nil { err = setQuota(name, quota) } return err }
func TestSendSnapshot(t *testing.T) { zpoolTest(t, func() { f, err := zfs.CreateFilesystem("test/snapshot-test", nil) ok(t, err) filesystems, err := zfs.Filesystems("") ok(t, err) for _, filesystem := range filesystems { equals(t, zfs.DatasetFilesystem, filesystem.Type) } s, err := f.Snapshot("test", false) ok(t, err) file, _ := ioutil.TempFile("/tmp/", "zfs-") defer file.Close() err = file.Truncate(pow2(30)) ok(t, err) defer os.Remove(file.Name()) err = s.SendSnapshot(file) ok(t, err) ok(t, s.Destroy(zfs.DestroyDefault)) ok(t, f.Destroy(zfs.DestroyDefault)) }) }
func TestClone(t *testing.T) { zpoolTest(t, func() { f, err := zfs.CreateFilesystem("test/snapshot-test", nil) ok(t, err) filesystems, err := zfs.Filesystems("") ok(t, err) for _, filesystem := range filesystems { equals(t, zfs.DatasetFilesystem, filesystem.Type) } s, err := f.Snapshot("test", false) ok(t, err) equals(t, zfs.DatasetSnapshot, s.Type) equals(t, "test/snapshot-test@test", s.Name) c, err := s.Clone("test/clone-test", nil) ok(t, err) equals(t, zfs.DatasetFilesystem, c.Type) ok(t, c.Destroy(zfs.DestroyDefault)) ok(t, s.Destroy(zfs.DestroyDefault)) ok(t, f.Destroy(zfs.DestroyDefault)) }) }
func (ZpoolTests) TestOrphanedZpoolFileAdoption(c *C) { dataset := "testpool-bananagram" backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12)) defer os.Remove(backingFilePath) provider, err := NewProvider(&ProviderConfig{ DatasetName: dataset, Make: &MakeDev{ BackingFilename: backingFilePath, Size: one_gig, }, }) defer func() { pool, _ := gzfs.GetZpool(dataset) if pool != nil { pool.Destroy() } }() c.Assert(err, IsNil) c.Assert(provider, NotNil) // add a dataset to this zpool, so we can check for it on the flip side markerDatasetName := path.Join(dataset, "testfs") _, err = gzfs.CreateFilesystem(markerDatasetName, nil) c.Assert(err, IsNil) // do a 'zpool export' // this roughly approximates what we see after a host reboot // (though host reboots may leave it in an even more unclean state than this) err = exec.Command("zpool", "export", "-f", dataset).Run() c.Assert(err, IsNil) // sanity check that our test is doing the right thing: zpool forgot about these _, err = gzfs.GetDataset(dataset) c.Assert(err, NotNil) _, err = gzfs.GetDataset(markerDatasetName) c.Assert(err, NotNil) // if we create another provider with the same file vdev path, it should // pick up that file again without wrecking the dataset provider, err = NewProvider(&ProviderConfig{ DatasetName: dataset, Make: &MakeDev{ BackingFilename: backingFilePath, Size: one_gig, }, }) c.Assert(err, IsNil) c.Assert(provider, NotNil) _, err = gzfs.GetDataset(markerDatasetName) c.Assert(err, IsNil) }
func TestDiff(t *testing.T) { zpoolTest(t, func() { fs, err := zfs.CreateFilesystem("test/origin", nil) ok(t, err) linkedFile, err := os.Create(filepath.Join(fs.Mountpoint, "linked")) ok(t, err) movedFile, err := os.Create(filepath.Join(fs.Mountpoint, "file")) ok(t, err) snapshot, err := fs.Snapshot("snapshot", false) ok(t, err) unicodeFile, err := os.Create(filepath.Join(fs.Mountpoint, "i ❤ unicode")) ok(t, err) err = os.Rename(movedFile.Name(), movedFile.Name()+"-new") ok(t, err) err = os.Link(linkedFile.Name(), linkedFile.Name()+"_hard") ok(t, err) inodeChanges, err := fs.Diff(snapshot.Name) ok(t, err) equals(t, 4, len(inodeChanges)) equals(t, "/test/origin/", inodeChanges[0].Path) equals(t, zfs.Directory, inodeChanges[0].Type) equals(t, zfs.Modified, inodeChanges[0].Change) equals(t, "/test/origin/linked", inodeChanges[1].Path) equals(t, zfs.File, inodeChanges[1].Type) equals(t, zfs.Modified, inodeChanges[1].Change) equals(t, 1, inodeChanges[1].ReferenceCountChange) equals(t, "/test/origin/file", inodeChanges[2].Path) equals(t, "/test/origin/file-new", inodeChanges[2].NewPath) equals(t, zfs.File, inodeChanges[2].Type) equals(t, zfs.Renamed, inodeChanges[2].Change) equals(t, "/test/origin/i ❤ unicode", inodeChanges[3].Path) equals(t, zfs.File, inodeChanges[3].Type) equals(t, zfs.Created, inodeChanges[3].Change) ok(t, movedFile.Close()) ok(t, unicodeFile.Close()) ok(t, linkedFile.Close()) ok(t, snapshot.Destroy(zfs.DestroyForceUmount)) ok(t, fs.Destroy(zfs.DestroyForceUmount)) }) }
func (d *Driver) create(id, parent string) error { name := d.ZfsPath(id) if parent == "" { mountoptions := map[string]string{"mountpoint": "legacy"} fs, err := zfs.CreateFilesystem(name, mountoptions) if err == nil { d.Lock() d.filesystemsCache[fs.Name] = true d.Unlock() } return err } return d.cloneFilesystem(name, d.ZfsPath(parent)) }
func TestFilesystems(t *testing.T) { zpoolTest(t, func() { f, err := zfs.CreateFilesystem("test/filesystem-test", nil) ok(t, err) filesystems, err := zfs.Filesystems("") ok(t, err) for _, filesystem := range filesystems { equals(t, zfs.DatasetFilesystem, filesystem.Type) } ok(t, f.Destroy(zfs.DestroyDefault)) }) }
func (p *Provider) NewVolume() (volume.Volume, error) { id := random.UUID() v := &zfsVolume{ info: &volume.Info{ID: id}, provider: p, basemount: p.mountPath(id), } var err error v.dataset, err = zfs.CreateFilesystem(path.Join(v.provider.dataset.Name, id), map[string]string{ "mountpoint": v.basemount, }) if err != nil { return nil, err } p.volumes[id] = v return v, nil }
func TestChildren(t *testing.T) { zpoolTest(t, func() { f, err := zfs.CreateFilesystem("test/snapshot-test", nil) ok(t, err) s, err := f.Snapshot("test", false) ok(t, err) equals(t, zfs.DatasetSnapshot, s.Type) equals(t, "test/snapshot-test@test", s.Name) children, err := f.Children(0) ok(t, err) equals(t, 1, len(children)) equals(t, "test/snapshot-test@test", children[0].Name) ok(t, s.Destroy(zfs.DestroyDefault)) ok(t, f.Destroy(zfs.DestroyDefault)) }) }
func TestCreateFilesystemWithProperties(t *testing.T) { zpoolTest(t, func() { props := map[string]string{ "compression": "lz4", } f, err := zfs.CreateFilesystem("test/filesystem-test", props) ok(t, err) equals(t, "lz4", f.Compression) filesystems, err := zfs.Filesystems("") ok(t, err) for _, filesystem := range filesystems { equals(t, zfs.DatasetFilesystem, filesystem.Type) } ok(t, f.Destroy(zfs.DestroyDefault)) }) }
func (p *Provider) NewVolume() (volume.Volume, error) { id := random.UUID() info := &volume.Info{ ID: id, Type: volume.VolumeTypeData, CreatedAt: time.Now(), } v := &zfsVolume{ info: info, provider: p, basemount: p.mountPath(info), } var err error v.dataset, err = zfs.CreateFilesystem(p.datasetPath(info), map[string]string{ "mountpoint": v.basemount, }) if err != nil { return nil, err } p.volumes[id] = v return v, nil }
func NewProvider(config *ProviderConfig) (volume.Provider, error) { if _, err := exec.LookPath("zfs"); err != nil { return nil, fmt.Errorf("zfs command is not available") } dataset, err := zfs.GetDataset(config.DatasetName) if err != nil { if isDatasetNotExistsError(err) { // if the dataset doesn't exist... if config.Make == nil { // not much we can do without a dataset or pool to contain data return nil, err } // make a zpool backed by a sparse file. it's the most portable thing we can do. if err := os.MkdirAll(filepath.Dir(config.Make.BackingFilename), 0755); err != nil { return nil, err } f, err := os.OpenFile(config.Make.BackingFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600) if err == nil { // if we've created a new file, size it and create a new zpool if err = f.Truncate(config.Make.Size); err != nil { return nil, err } f.Close() if _, err = zfs.CreateZpool( config.DatasetName, nil, "-mnone", // do not mount the root dataset. (we'll mount our own datasets as necessary.) config.Make.BackingFilename, ); err != nil { return nil, err } } else if err.(*os.PathError).Err == syscall.EEXIST { // if the file already exists, check it for existing zpool if err := zpoolImportFile(config.Make.BackingFilename); err != nil { // if 'zpool import' didn't believe it... halt here // we could overwrite but we'd rather stop and avoid potential data loss. return nil, fmt.Errorf("error attempting import of existing zpool file: %s", err) } // note: 'zpool import' recreated *all* the volume datasets in that pool. // currently, even if they're not known to a volume manager, they're not garbage collected. } else { return nil, err } // get the dataset again... `zfs.Zpool` isn't a `zfs.Dataset` dataset, err = zfs.GetDataset(config.DatasetName) if err != nil { return nil, err } } else { // any error more complicated than not_exists, and we're out of our depth return nil, err } } if config.WorkingDir == "" { config.WorkingDir = "/var/lib/flynn/volumes/zfs/" } for _, typ := range volume.VolumeTypes { if err := os.MkdirAll(filepath.Join(config.WorkingDir, "mnt", string(typ)), 0755); err != nil { return nil, err } dataset := filepath.Join(config.DatasetName, string(typ)) if _, err := zfs.GetDataset(dataset); err != nil { if _, err := zfs.CreateFilesystem(dataset, nil); err != nil { return nil, err } } } return &Provider{ config: config, dataset: dataset, volumes: make(map[string]*zfsVolume), }, nil }