Exemple #1
0
func (ZpoolTests) TestProviderAutomaticFileVdevZpoolCreation(c *C) {
	dataset := "testpool-dinosaur"

	// don't actually use ioutil.Tempfile;
	// we want to exerise the path where the file doesn't exist.
	backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
	defer os.Remove(backingFilePath)

	provider, err := NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	defer func() {
		pool, _ := gzfs.GetZpool(dataset)
		if pool != nil {
			pool.Destroy()
		}
	}()
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)

	// also, we shouldn't get any '/testpool' dir at root
	_, err = os.Stat(dataset)
	c.Assert(err, NotNil)
	c.Assert(os.IsNotExist(err), Equals, true)
}
Exemple #2
0
func (ZpoolTests) TestOrphanedZpoolFileAdoption(c *C) {
	dataset := "testpool-bananagram"

	backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
	defer os.Remove(backingFilePath)

	provider, err := NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	defer func() {
		pool, _ := gzfs.GetZpool(dataset)
		if pool != nil {
			pool.Destroy()
		}
	}()
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)

	// add a dataset to this zpool, so we can check for it on the flip side
	markerDatasetName := path.Join(dataset, "testfs")
	_, err = gzfs.CreateFilesystem(markerDatasetName, nil)
	c.Assert(err, IsNil)

	// do a 'zpool export'
	// this roughly approximates what we see after a host reboot
	// (though host reboots may leave it in an even more unclean state than this)
	err = exec.Command("zpool", "export", "-f", dataset).Run()
	c.Assert(err, IsNil)

	// sanity check that our test is doing the right thing: zpool forgot about these
	_, err = gzfs.GetDataset(dataset)
	c.Assert(err, NotNil)
	_, err = gzfs.GetDataset(markerDatasetName)
	c.Assert(err, NotNil)

	// if we create another provider with the same file vdev path, it should
	// pick up that file again without wrecking the dataset
	provider, err = NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)
	_, err = gzfs.GetDataset(markerDatasetName)
	c.Assert(err, IsNil)
}
Exemple #3
0
func (s *TempZpool) TearDownTest(c *C) {
	if s.ZpoolVdevFilePath != "" {
		os.Remove(s.ZpoolVdevFilePath)
	}
	pool, _ := gzfs.GetZpool(s.ZpoolName)
	if pool != nil {
		if datasets, err := pool.Datasets(); err == nil {
			for _, dataset := range datasets {
				dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
				os.Remove(dataset.Mountpoint)
			}
		}
		err := pool.Destroy()
		c.Assert(err, IsNil)
	}
}
Exemple #4
0
func (ZpoolTests) TestProviderExistingZpoolDetection(c *C) {
	dataset := "testpool-festival"

	backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
	defer os.Remove(backingFilePath)

	provider, err := NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	defer func() {
		pool, _ := gzfs.GetZpool(dataset)
		if pool != nil {
			pool.Destroy()
		}
	}()
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)

	// if we create another provider with the same dataset, it should
	// see the existing one and thus shouldn't hit the MakeDev path
	badFilePath := "/tmp/zfs-test-should-not-exist"
	provider, err = NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: badFilePath,
			Size:            one_gig,
		},
	})
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)
	_, err = os.Stat(badFilePath)
	c.Assert(err, NotNil)
	c.Assert(os.IsNotExist(err), Equals, true)
}
Exemple #5
0
func (ZpoolTests) TestNonZpoolFilesFailImport(c *C) {
	dataset := "testpool-landslide"

	backingFile, err := ioutil.TempFile("/tmp/", "zfs-")
	c.Assert(err, IsNil)
	backingFile.Write([]byte{'a', 'b', 'c'})
	backingFile.Close()

	provider, err := NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFile.Name(),
			Size:            one_gig,
		},
	})
	defer func() {
		pool, _ := gzfs.GetZpool(dataset)
		if pool != nil {
			pool.Destroy()
		}
	}()
	c.Assert(err, NotNil)
	c.Assert(provider, IsNil)
}
Exemple #6
0
// covers basic volume persistence and named volume persistence
func (s *PersistenceTests) TestPersistence(c *C) {
	idString := random.String(12)
	vmanDBfilePath := fmt.Sprintf("/tmp/flynn-volumes-%s.bolt", idString)
	zfsDatasetName := fmt.Sprintf("flynn-test-dataset-%s", idString)
	zfsVdevFilePath := fmt.Sprintf("/tmp/flynn-test-zpool-%s.vdev", idString)
	defer os.Remove(vmanDBfilePath)
	defer os.Remove(zfsVdevFilePath)
	defer func() {
		pool, _ := gzfs.GetZpool(zfsDatasetName)
		if pool != nil {
			if datasets, err := pool.Datasets(); err == nil {
				for _, dataset := range datasets {
					dataset.Destroy(gzfs.DestroyRecursive | gzfs.DestroyForceUmount)
					os.Remove(dataset.Mountpoint)
				}
			}
			err := pool.Destroy()
			c.Assert(err, IsNil)
		}
	}()

	// new volume manager with a new backing zfs vdev file and a new boltdb
	volProv, err := zfs.NewProvider(&zfs.ProviderConfig{
		DatasetName: zfsDatasetName,
		Make: &zfs.MakeDev{
			BackingFilename: zfsVdevFilePath,
			Size:            int64(math.Pow(2, float64(30))),
		},
	})
	c.Assert(err, IsNil)

	// new volume manager with that shiny new backing zfs vdev file and a new boltdb
	vman := volumemanager.New(
		vmanDBfilePath,
		func() (volume.Provider, error) { return volProv, nil },
	)
	c.Assert(vman.OpenDB(), IsNil)

	// make a volume
	vol1, err := vman.NewVolume()
	c.Assert(err, IsNil)

	// assert existence of filesystems; emplace some data
	f, err := os.Create(filepath.Join(vol1.Location(), "alpha"))
	c.Assert(err, IsNil)
	f.Close()

	// close persistence
	c.Assert(vman.CloseDB(), IsNil)

	// hack zfs export/umounting to emulate host shutdown
	err = exec.Command("zpool", "export", "-f", zfsDatasetName).Run()
	c.Assert(err, IsNil)

	// sanity check: assert the filesystems are gone
	// note that the directories remain present after 'zpool export'
	_, err = os.Stat(filepath.Join(vol1.Location(), "alpha"))
	c.Assert(os.IsNotExist(err), Equals, true)

	// restore
	vman = volumemanager.New(
		vmanDBfilePath,
		func() (volume.Provider, error) {
			c.Fatal("default provider setup should not be called if the previous provider was restored")
			return nil, nil
		},
	)
	c.Assert(vman.OpenDB(), IsNil)

	// assert volumes
	restoredVolumes := vman.Volumes()
	c.Assert(restoredVolumes, HasLen, 2)
	c.Assert(restoredVolumes[vol1.Info().ID], NotNil)

	// switch to the new volume references; do a bunch of smell checks on those
	vol1restored := restoredVolumes[vol1.Info().ID]
	c.Assert(vol1restored.Info(), DeepEquals, vol1.Info())
	c.Assert(vol1restored.Provider(), NotNil)

	// assert existences of filesystems and previous data
	c.Assert(vol1restored.Location(), testutils.DirContains, []string{"alpha"})
}