func (ZpoolTests) TestOrphanedZpoolFileAdoption(c *C) { dataset := "testpool-bananagram" backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12)) defer os.Remove(backingFilePath) provider, err := NewProvider(&ProviderConfig{ DatasetName: dataset, Make: &MakeDev{ BackingFilename: backingFilePath, Size: one_gig, }, }) defer func() { pool, _ := gzfs.GetZpool(dataset) if pool != nil { pool.Destroy() } }() c.Assert(err, IsNil) c.Assert(provider, NotNil) // add a dataset to this zpool, so we can check for it on the flip side markerDatasetName := path.Join(dataset, "testfs") _, err = gzfs.CreateFilesystem(markerDatasetName, nil) c.Assert(err, IsNil) // do a 'zpool export' // this roughly approximates what we see after a host reboot // (though host reboots may leave it in an even more unclean state than this) err = exec.Command("zpool", "export", "-f", dataset).Run() c.Assert(err, IsNil) // sanity check that our test is doing the right thing: zpool forgot about these _, err = gzfs.GetDataset(dataset) c.Assert(err, NotNil) _, err = gzfs.GetDataset(markerDatasetName) c.Assert(err, NotNil) // if we create another provider with the same file vdev path, it should // pick up that file again without wrecking the dataset provider, err = NewProvider(&ProviderConfig{ DatasetName: dataset, Make: &MakeDev{ BackingFilename: backingFilePath, Size: one_gig, }, }) c.Assert(err, IsNil) c.Assert(provider, NotNil) _, err = gzfs.GetDataset(markerDatasetName) c.Assert(err, IsNil) }
func (b *Provider) SendSnapshot(vol volume.Volume, haves []json.RawMessage, output io.Writer) error { zvol, err := b.owns(vol) if err != nil { return err } if !vol.IsSnapshot() { return fmt.Errorf("can only send a snapshot") } // zfs recv can only really accept snapshots that apply to the current tip var latestRemote string if haves != nil && len(haves) > 0 { have := &zfsHaves{} if err := json.Unmarshal(haves[len(haves)-1], have); err == nil { latestRemote = have.SnapID } } // look for intersection of existing snapshots on this volume; if so do incremental parentName := strings.SplitN(zvol.dataset.Name, "@", 2)[0] parentDataset, err := zfs.GetDataset(parentName) if err != nil { return err } snapshots, err := parentDataset.Snapshots() if err != nil { return err } // we can fly incremental iff the latest snap on the remote is available here useIncremental := false if latestRemote != "" { for _, snap := range snapshots { if strings.SplitN(snap.Name, "@", 2)[1] == latestRemote { useIncremental = true break } } } // at last, send: if useIncremental { sendCmd := exec.Command("zfs", "send", "-i", latestRemote, zvol.dataset.Name) sendCmd.Stdout = output return sendCmd.Run() } return zvol.dataset.SendSnapshot(output) }
func (b *Provider) RestoreVolumeState(volInfo *volume.Info, data json.RawMessage) (volume.Volume, error) { record := &zfsVolumeRecord{} if err := json.Unmarshal(data, record); err != nil { return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err) } dataset, err := zfs.GetDataset(record.Dataset) if err != nil { return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err) } v := &zfsVolume{ info: volInfo, provider: b, dataset: dataset, basemount: record.Basemount, } if err := b.mountDataset(v); err != nil { return nil, err } b.volumes[volInfo.ID] = v return v, nil }
func (b *Provider) RestoreVolumeState(volInfo *volume.Info, data json.RawMessage) (volume.Volume, error) { record := &zfsVolumeRecord{} if err := json.Unmarshal(data, record); err != nil { return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err) } dataset, err := zfs.GetDataset(record.Dataset) if err != nil { return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err) } v := &zfsVolume{ info: volInfo, provider: b, dataset: dataset, basemount: record.Basemount, } // zfs should have already remounted filesystems; special remount case for snapshots if v.IsSnapshot() { if err := b.mountSnapshot(v); err != nil { return nil, err } } b.volumes[volInfo.ID] = v return v, nil }
func NewProvider(config *ProviderConfig) (volume.Provider, error) { if _, err := exec.LookPath("zfs"); err != nil { return nil, fmt.Errorf("zfs command is not available") } dataset, err := zfs.GetDataset(config.DatasetName) if err != nil { if isDatasetNotExistsError(err) { // if the dataset doesn't exist... if config.Make == nil { // not much we can do without a dataset or pool to contain data return nil, err } // make a zpool backed by a sparse file. it's the most portable thing we can do. if err := os.MkdirAll(filepath.Dir(config.Make.BackingFilename), 0755); err != nil { return nil, err } f, err := os.OpenFile(config.Make.BackingFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600) if err == nil { // if we've created a new file, size it and create a new zpool if err = f.Truncate(config.Make.Size); err != nil { return nil, err } f.Close() if _, err = zfs.CreateZpool( config.DatasetName, nil, "-mnone", // do not mount the root dataset. (we'll mount our own datasets as necessary.) config.Make.BackingFilename, ); err != nil { return nil, err } } else if err.(*os.PathError).Err == syscall.EEXIST { // if the file already exists, check it for existing zpool if err := zpoolImportFile(config.Make.BackingFilename); err != nil { // if 'zpool import' didn't believe it... halt here // we could overwrite but we'd rather stop and avoid potential data loss. return nil, fmt.Errorf("error attempting import of existing zpool file: %s", err) } // note: 'zpool import' recreated *all* the volume datasets in that pool. // currently, even if they're not known to a volume manager, they're not garbage collected. } else { return nil, err } // get the dataset again... `zfs.Zpool` isn't a `zfs.Dataset` dataset, err = zfs.GetDataset(config.DatasetName) if err != nil { return nil, err } } else { // any error more complicated than not_exists, and we're out of our depth return nil, err } } if config.WorkingDir == "" { config.WorkingDir = "/var/lib/flynn/volumes/zfs/" } return &Provider{ config: config, dataset: dataset, volumes: make(map[string]*zfsVolume), }, nil }