Example #1
0
func (ZpoolTests) TestOrphanedZpoolFileAdoption(c *C) {
	dataset := "testpool-bananagram"

	backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
	defer os.Remove(backingFilePath)

	provider, err := NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	defer func() {
		pool, _ := gzfs.GetZpool(dataset)
		if pool != nil {
			pool.Destroy()
		}
	}()
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)

	// add a dataset to this zpool, so we can check for it on the flip side
	markerDatasetName := path.Join(dataset, "testfs")
	_, err = gzfs.CreateFilesystem(markerDatasetName, nil)
	c.Assert(err, IsNil)

	// do a 'zpool export'
	// this roughly approximates what we see after a host reboot
	// (though host reboots may leave it in an even more unclean state than this)
	err = exec.Command("zpool", "export", "-f", dataset).Run()
	c.Assert(err, IsNil)

	// sanity check that our test is doing the right thing: zpool forgot about these
	_, err = gzfs.GetDataset(dataset)
	c.Assert(err, NotNil)
	_, err = gzfs.GetDataset(markerDatasetName)
	c.Assert(err, NotNil)

	// if we create another provider with the same file vdev path, it should
	// pick up that file again without wrecking the dataset
	provider, err = NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)
	_, err = gzfs.GetDataset(markerDatasetName)
	c.Assert(err, IsNil)
}
Example #2
0
func (b *Provider) NewVolume() (volume.Volume, error) {
	id := random.UUID()
	v := &zfsVolume{
		info:      &volume.Info{ID: id},
		provider:  b,
		basemount: b.mountPath(id),
	}
	var err error
	v.dataset, err = zfs.CreateFilesystem(path.Join(v.provider.dataset.Name, id), map[string]string{
		"mountpoint": v.basemount,
	})
	if err != nil {
		return nil, err
	}
	b.volumes[id] = v
	return v, nil
}