コード例 #1
0
ファイル: zpool_test.go プロジェクト: ably-forks/flynn
func (ZpoolTests) TestOrphanedZpoolFileAdoption(c *C) {
	dataset := "testpool-bananagram"

	backingFilePath := fmt.Sprintf("/tmp/zfs-%s", random.String(12))
	defer os.Remove(backingFilePath)

	provider, err := NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	defer func() {
		pool, _ := gzfs.GetZpool(dataset)
		if pool != nil {
			pool.Destroy()
		}
	}()
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)

	// add a dataset to this zpool, so we can check for it on the flip side
	markerDatasetName := path.Join(dataset, "testfs")
	_, err = gzfs.CreateFilesystem(markerDatasetName, nil)
	c.Assert(err, IsNil)

	// do a 'zpool export'
	// this roughly approximates what we see after a host reboot
	// (though host reboots may leave it in an even more unclean state than this)
	err = exec.Command("zpool", "export", "-f", dataset).Run()
	c.Assert(err, IsNil)

	// sanity check that our test is doing the right thing: zpool forgot about these
	_, err = gzfs.GetDataset(dataset)
	c.Assert(err, NotNil)
	_, err = gzfs.GetDataset(markerDatasetName)
	c.Assert(err, NotNil)

	// if we create another provider with the same file vdev path, it should
	// pick up that file again without wrecking the dataset
	provider, err = NewProvider(&ProviderConfig{
		DatasetName: dataset,
		Make: &MakeDev{
			BackingFilename: backingFilePath,
			Size:            one_gig,
		},
	})
	c.Assert(err, IsNil)
	c.Assert(provider, NotNil)
	_, err = gzfs.GetDataset(markerDatasetName)
	c.Assert(err, IsNil)
}
コード例 #2
0
ファイル: zfs.go プロジェクト: imjorge/flynn
func (p *Provider) RestoreVolumeState(volInfo *volume.Info, data json.RawMessage) (volume.Volume, error) {
	record := &zfsVolumeRecord{}
	if err := json.Unmarshal(data, record); err != nil {
		return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err)
	}
	// handle legacy info which don't have a Type
	if volInfo.Type == "" {
		if record.Filesystem != nil {
			volInfo.Type = record.Filesystem.Type
		} else {
			volInfo.Type = volume.VolumeTypeData
		}
	}
	dataset, err := zfs.GetDataset(record.Dataset)
	if err != nil {
		if isDatasetNotExistsError(err) {
			return nil, volume.ErrNoSuchVolume
		}
		return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err)
	}
	v := &zfsVolume{
		info:       volInfo,
		provider:   p,
		dataset:    dataset,
		basemount:  record.Basemount,
		filesystem: record.Filesystem,
	}
	if err := p.mountDataset(v); err != nil {
		return nil, err
	}
	p.volumes[volInfo.ID] = v
	return v, nil
}
コード例 #3
0
ファイル: zfs.go プロジェクト: rlugojr/docker
func setQuota(name string, quota string) error {
	if quota == "0" {
		return nil
	}
	fs, err := zfs.GetDataset(name)
	if err != nil {
		return err
	}
	return fs.SetProperty("quota", quota)
}
コード例 #4
0
ファイル: fs.go プロジェクト: kubernetes/kubernetes
// getZfstats returns ZFS mount stats using zfsutils
func getZfstats(poolName string) (uint64, uint64, uint64, error) {
	dataset, err := zfs.GetDataset(poolName)
	if err != nil {
		return 0, 0, 0, err
	}

	total := dataset.Used + dataset.Avail + dataset.Usedbydataset

	return total, dataset.Avail, dataset.Avail, nil
}
コード例 #5
0
ファイル: zfs_test.go プロジェクト: amitkris/go-zfs
func TestDatasets(t *testing.T) {
	zpoolTest(t, func() {
		_, err := zfs.Datasets("")
		ok(t, err)

		ds, err := zfs.GetDataset("test")
		ok(t, err)
		equals(t, zfs.DatasetFilesystem, ds.Type)
		equals(t, "", ds.Origin)
		if runtime.GOOS != "solaris" {
			assert(t, ds.Logicalused != 0, "Logicalused is not greater than 0")
		}
	})
}
コード例 #6
0
ファイル: zfs.go プロジェクト: ably-forks/flynn
func (p *Provider) SendSnapshot(vol volume.Volume, haves []json.RawMessage, output io.Writer) error {
	zvol, err := p.owns(vol)
	if err != nil {
		return err
	}
	if !vol.IsSnapshot() {
		return fmt.Errorf("can only send a snapshot")
	}
	// zfs recv can only really accept snapshots that apply to the current tip
	var latestRemote string
	if haves != nil && len(haves) > 0 {
		have := &zfsHaves{}
		if err := json.Unmarshal(haves[len(haves)-1], have); err == nil {
			latestRemote = have.SnapID
		}
	}
	// look for intersection of existing snapshots on this volume; if so do incremental
	parentName := strings.SplitN(zvol.dataset.Name, "@", 2)[0]
	parentDataset, err := zfs.GetDataset(parentName)
	if err != nil {
		return err
	}
	snapshots, err := parentDataset.Snapshots()
	if err != nil {
		return err
	}
	// we can fly incremental iff the latest snap on the remote is available here
	useIncremental := false
	if latestRemote != "" {
		for _, snap := range snapshots {
			if strings.SplitN(snap.Name, "@", 2)[1] == latestRemote {
				useIncremental = true
				break
			}
		}
	}
	// at last, send:
	if useIncremental {
		sendCmd := exec.Command("zfs", "send", "-i", latestRemote, zvol.dataset.Name)
		sendCmd.Stdout = output
		return sendCmd.Run()
	}
	return zvol.dataset.SendSnapshot(output)
}
コード例 #7
0
ファイル: zfs.go プロジェクト: ably-forks/flynn
func (p *Provider) RestoreVolumeState(volInfo *volume.Info, data json.RawMessage) (volume.Volume, error) {
	record := &zfsVolumeRecord{}
	if err := json.Unmarshal(data, record); err != nil {
		return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err)
	}
	dataset, err := zfs.GetDataset(record.Dataset)
	if err != nil {
		return nil, fmt.Errorf("cannot restore volume %q: %s", volInfo.ID, err)
	}
	v := &zfsVolume{
		info:      volInfo,
		provider:  p,
		dataset:   dataset,
		basemount: record.Basemount,
	}
	if err := p.mountDataset(v); err != nil {
		return nil, err
	}
	p.volumes[volInfo.ID] = v
	return v, nil
}
コード例 #8
0
ファイル: zfs.go プロジェクト: ably-forks/flynn
func NewProvider(config *ProviderConfig) (volume.Provider, error) {
	if _, err := exec.LookPath("zfs"); err != nil {
		return nil, fmt.Errorf("zfs command is not available")
	}
	dataset, err := zfs.GetDataset(config.DatasetName)
	if err != nil {
		if isDatasetNotExistsError(err) {
			// if the dataset doesn't exist...
			if config.Make == nil {
				// not much we can do without a dataset or pool to contain data
				return nil, err
			}
			// make a zpool backed by a sparse file.  it's the most portable thing we can do.
			if err := os.MkdirAll(filepath.Dir(config.Make.BackingFilename), 0755); err != nil {
				return nil, err
			}
			f, err := os.OpenFile(config.Make.BackingFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600)
			if err == nil {
				// if we've created a new file, size it and create a new zpool
				if err = f.Truncate(config.Make.Size); err != nil {
					return nil, err
				}
				f.Close()
				if _, err = zfs.CreateZpool(
					config.DatasetName,
					nil,
					"-mnone", // do not mount the root dataset.  (we'll mount our own datasets as necessary.)
					config.Make.BackingFilename,
				); err != nil {
					return nil, err
				}
			} else if err.(*os.PathError).Err == syscall.EEXIST {
				// if the file already exists, check it for existing zpool
				if err := zpoolImportFile(config.Make.BackingFilename); err != nil {
					// if 'zpool import' didn't believe it... halt here
					// we could overwrite but we'd rather stop and avoid potential data loss.
					return nil, fmt.Errorf("error attempting import of existing zpool file: %s", err)
				}
				// note: 'zpool import' recreated *all* the volume datasets in that pool.
				// currently, even if they're not known to a volume manager, they're not garbage collected.
			} else {
				return nil, err
			}
			// get the dataset again... `zfs.Zpool` isn't a `zfs.Dataset`
			dataset, err = zfs.GetDataset(config.DatasetName)
			if err != nil {
				return nil, err
			}
		} else {
			// any error more complicated than not_exists, and we're out of our depth
			return nil, err
		}
	}
	if config.WorkingDir == "" {
		config.WorkingDir = "/var/lib/flynn/volumes/zfs/"
	}
	return &Provider{
		config:  config,
		dataset: dataset,
		volumes: make(map[string]*zfsVolume),
	}, nil
}