示例#1
0
文件: zfs.go 项目: imjorge/flynn
func (p *Provider) ForkVolume(vol volume.Volume) (volume.Volume, error) {
	zvol, err := p.owns(vol)
	if err != nil {
		return nil, err
	}
	if !vol.IsSnapshot() {
		return nil, fmt.Errorf("can only fork a snapshot")
	}
	id := random.UUID()
	info := &volume.Info{ID: id, Type: vol.Info().Type}
	v2 := &zfsVolume{
		info:      info,
		provider:  zvol.provider,
		basemount: p.mountPath(info),
	}
	cloneID := fmt.Sprintf("%s/%s", zvol.provider.dataset.Name, id)
	v2.dataset, err = zvol.dataset.Clone(cloneID, map[string]string{
		"mountpoint": v2.basemount,
	})
	if err != nil {
		return nil, fmt.Errorf("could not fork volume: %s", err)
	}
	p.volumes[id] = v2
	return v2, nil
}
示例#2
0
func (b *Provider) owns(vol volume.Volume) (*zfsVolume, error) {
	zvol := b.volumes[vol.Info().ID]
	if zvol == nil {
		return nil, fmt.Errorf("volume does not belong to this provider")
	}
	if zvol != vol { // these pointers should be canonical
		panic(fmt.Errorf("volume does not belong to this provider"))
	}
	return zvol, nil
}
示例#3
0
func (b *Provider) DestroyVolume(vol volume.Volume) error {
	zvol, err := b.owns(vol)
	if err != nil {
		return err
	}
	if vol.IsSnapshot() {
		if err := syscall.Unmount(vol.Location(), 0); err != nil {
			return err
		}
		os.Remove(vol.Location())
	}
	if err := zvol.dataset.Destroy(zfs.DestroyForceUmount); err != nil {
		for i := 0; i < 5 && err != nil && IsDatasetBusyError(err); i++ {
			// sometimes zfs will claim to be busy as if files are still open even when all container processes are dead.
			// usually this goes away, so retry a few times.
			time.Sleep(1 * time.Second)
			err = zvol.dataset.Destroy(zfs.DestroyForceUmount)
		}
		if err != nil {
			return err
		}
	}
	os.Remove(zvol.basemount)
	delete(b.volumes, vol.Info().ID)
	return nil
}
示例#4
0
func (b *Provider) SendSnapshot(vol volume.Volume, haves []json.RawMessage, output io.Writer) error {
	zvol, err := b.owns(vol)
	if err != nil {
		return err
	}
	if !vol.IsSnapshot() {
		return fmt.Errorf("can only send a snapshot")
	}
	// zfs recv can only really accept snapshots that apply to the current tip
	var latestRemote string
	if haves != nil && len(haves) > 0 {
		have := &zfsHaves{}
		if err := json.Unmarshal(haves[len(haves)-1], have); err == nil {
			latestRemote = have.SnapID
		}
	}
	// look for intersection of existing snapshots on this volume; if so do incremental
	parentName := strings.SplitN(zvol.dataset.Name, "@", 2)[0]
	parentDataset, err := zfs.GetDataset(parentName)
	if err != nil {
		return err
	}
	snapshots, err := parentDataset.Snapshots()
	if err != nil {
		return err
	}
	// we can fly incremental iff the latest snap on the remote is available here
	useIncremental := false
	if latestRemote != "" {
		for _, snap := range snapshots {
			if strings.SplitN(snap.Name, "@", 2)[1] == latestRemote {
				useIncremental = true
				break
			}
		}
	}
	// at last, send:
	if useIncremental {
		sendCmd := exec.Command("zfs", "send", "-i", latestRemote, zvol.dataset.Name)
		sendCmd.Stdout = output
		return sendCmd.Run()
	}
	return zvol.dataset.SendSnapshot(output)
}
示例#5
0
文件: zfs.go 项目: imjorge/flynn
/*
	ReceiveSnapshot both accepts a snapshotted filesystem as a byte stream,
	and applies that state to the given `vol` (i.e., if this were git, it's like
	`git fetch && git pull` at the same time; regretably, it's pretty hard to get
	zfs to separate those operations).  If there are local working changes in
	the volume, they will be overwritten.

	In addition to the given volume being mutated on disk, a reference to the
	new snapshot will be returned (this can be used for cleanup, though be aware
	that with zfs, removing snapshots may impact the ability to use incremental
	deltas when receiving future snapshots).

	Also note that ZFS is *extremely* picky about receiving snapshots; in
	addition to obvious failure modes like an incremental snapshot with
	insufficient data, the following complications apply:
	- Sending an incremental snapshot with too much history will fail.
	- Sending a full snapshot to a volume with any other snapshots will fail.
	In the former case, you can renegociate; in the latter, you will have to
	either *destroy snapshots* or make a new volume.
*/
func (p *Provider) ReceiveSnapshot(vol volume.Volume, input io.Reader) (volume.Volume, error) {
	zvol, err := p.owns(vol)
	if err != nil {
		return nil, err
	}
	// recv does the right thing with input either fresh or incremental.
	// recv with the dataset name and no snapshot suffix means the snapshot name from farside is kept;
	// this is important because though we've assigned it a new UUID, the zfs dataset name match is used for incr hinting.
	var buf bytes.Buffer
	recvCmd := exec.Command("zfs", "recv", "-F", zvol.dataset.Name)
	recvCmd.Stdin = input
	recvCmd.Stderr = &buf
	if err := recvCmd.Run(); err != nil {
		return nil, fmt.Errorf("zfs recv rejected snapshot data: %s (%s)", err, strings.TrimSpace(buf.String()))
	}
	// get the dataset reference back; whatever the latest snapshot is must be what we received
	snapshots, err := zvol.dataset.Snapshots()
	if err != nil {
		return nil, err
	}
	if len(snapshots) == 0 {
		// should never happen, unless someone else is racing the zfs controls
		return nil, fmt.Errorf("zfs recv misplaced snapshot data")
	}
	snapds := snapshots[len(snapshots)-1]
	// reassemble as a flynn volume for return
	id := random.UUID()
	info := &volume.Info{ID: id, Type: vol.Info().Type}
	snap := &zfsVolume{
		info:      info,
		provider:  zvol.provider,
		dataset:   snapds,
		basemount: p.mountPath(info),
	}
	if err := p.mountDataset(snap); err != nil {
		return nil, err
	}
	p.volumes[id] = snap
	return snap, nil
}
示例#6
0
文件: zfs.go 项目: imjorge/flynn
func (p *Provider) CreateSnapshot(vol volume.Volume) (volume.Volume, error) {
	zvol, err := p.owns(vol)
	if err != nil {
		return nil, err
	}
	id := random.UUID()
	info := &volume.Info{ID: id, Type: vol.Info().Type}
	snap := &zfsVolume{
		info:      info,
		provider:  zvol.provider,
		basemount: p.mountPath(info),
	}
	snap.dataset, err = zvol.dataset.Snapshot(id, false)
	if err != nil {
		return nil, err
	}
	if err := p.mountDataset(snap); err != nil {
		return nil, err
	}
	p.volumes[id] = snap
	return snap, nil
}
示例#7
0
// Called to sync changes to disk when a volume is updated
func (m *Manager) persistVolume(tx *bolt.Tx, vol volume.Volume) error {
	// Save the general volume info
	volumesBucket := tx.Bucket([]byte("volumes"))
	id := vol.Info().ID
	k := []byte(id)
	_, volExists := m.volumes[id]
	if !volExists {
		volumesBucket.Delete(k)
	} else {
		b, err := json.Marshal(vol.Info())
		if err != nil {
			return fmt.Errorf("failed to serialize volume info: %s", err)
		}
		err = volumesBucket.Put(k, b)
		if err != nil {
			return fmt.Errorf("could not persist volume info to boltdb: %s", err)
		}
	}
	// Save any provider-specific metadata associated with the volume.
	// These are saved per-provider since the deserialization is also only defined per-provider implementation.
	providerBucket, err := m.getProviderBucket(tx, m.providerIDs[vol.Provider()])
	if err != nil {
		return fmt.Errorf("could not persist provider volume info to boltdb: %s", err)
	}
	providerVolumesBucket := providerBucket.Bucket([]byte("volumes"))
	if !volExists {
		providerVolumesBucket.Delete(k)
	} else {
		b, err := vol.Provider().MarshalVolumeState(id)
		if err != nil {
			return fmt.Errorf("failed to serialize provider volume info: %s", err)
		}
		err = providerVolumesBucket.Put(k, b)
		if err != nil {
			return fmt.Errorf("could not persist provider volume info to boltdb: %s", err)
		}
	}
	return nil
}
示例#8
0
func assertInfoEqual(c *C, volA, volB volume.Volume) {
	c.Assert(volA.Info().ID, Equals, volB.Info().ID)
	c.Assert(volA.Info().Type, Equals, volB.Info().Type)
	c.Assert(volA.Info().Meta, DeepEquals, volB.Info().Meta)
	c.Assert(volA.Info().CreatedAt.Equal(volB.Info().CreatedAt), Equals, true)
}