Пример #1
0
// connectVolume creates a volume on app1 and connects app2 to it. It
// does all the necessary setup to authorize client to use resources
// on the server.
func connectVolume(t testing.TB, app1 *server.App, volumeName1 string, app2 *server.App, volumeName2 string) {
	pub1 := (*peer.PublicKey)(app1.Keys.Sign.Pub)
	pub2 := (*peer.PublicKey)(app2.Keys.Sign.Pub)

	sharingKey := [32]byte{1, 2, 3, 4, 5}

	var volID db.VolumeID
	setup1 := func(tx *db.Tx) error {
		peer, err := tx.Peers().Make(pub2)
		if err != nil {
			return err
		}
		if err := peer.Storage().Allow("local"); err != nil {
			return err
		}
		sharingKey, err := tx.SharingKeys().Add("friends", &sharingKey)
		if err != nil {
			return err
		}
		v, err := tx.Volumes().Create(volumeName1, "local", sharingKey)
		if err != nil {
			return err
		}
		if err := peer.Volumes().Allow(v); err != nil {
			return err
		}
		v.VolumeID(&volID)
		return nil
	}
	if err := app1.DB.Update(setup1); err != nil {
		t.Fatalf("app1 setup: %v", err)
	}

	setup2 := func(tx *db.Tx) error {
		if _, err := tx.Peers().Make(pub1); err != nil {
			return err
		}
		sharingKey, err := tx.SharingKeys().Add("friends", &sharingKey)
		if err != nil {
			return err
		}
		v, err := tx.Volumes().Add(volumeName2, &volID, "local", sharingKey)
		if err != nil {
			return err
		}
		if err := v.Storage().Add("jdoe", "peerkey:"+pub1.String(), sharingKey); err != nil {
			return err
		}
		return nil
	}
	if err := app2.DB.Update(setup2); err != nil {
		t.Fatalf("app2 setup location: %v", err)
	}
}
Пример #2
0
func (v *Volume) SyncSend(ctx context.Context, dirPath string, send func(*wirepeer.VolumeSyncPullItem) error) error {
	dirPath = path.Clean("/" + dirPath)[1:]

	// First, start a new epoch so all mutations happen after the
	// clocks that are included in the snapshot.
	//
	// We hold the lock over to prevent using clocks from using the
	// new epoch until we have a snapshot started.
	v.epoch.mu.Lock()
	locked := true
	defer func() {
		if locked {
			v.epoch.mu.Unlock()
		}
	}()
	if _, err := v.cleanEpoch(); err != nil {
		return err
	}
	sync := func(tx *db.Tx) error {
		v.epoch.mu.Unlock()
		locked = false

		// NOT HOLDING THE LOCK, accessing database snapshot ONLY

		bucket := v.bucket(tx)
		dirs := bucket.Dirs()
		clocks := bucket.Clock()

		dirInode := v.root.inode
		// Keep track of the parent of the directory, to access the
		// clock for the directory itself. Starts off as 0:"", as is
		// the convention for storing data about the root directory
		// itself.
		parentDirInode := uint64(0)
		dirName := ""
		var dirDE *wire.Dirent

		for dirPath != "" {
			dirName, dirPath = splitPath(dirPath)

			de, err := dirs.Get(dirInode, dirName)
			if err != nil {
				return err
			}
			// Might not be a dir anymore but that'll just trigger
			// ENOENT on the next round.
			parentDirInode = dirInode
			dirInode = de.Inode
			dirDE = de
		}

		// If it's not the root, make sure it's a directory; List below doesn't.
		if dirDE != nil && dirDE.Dir == nil {
			msg := &wirepeer.VolumeSyncPullItem{
				Error: wirepeer.VolumeSyncPullItem_NOT_A_DIRECTORY,
			}
			if err := send(msg); err != nil {
				return err
			}
			return nil
		}

		// TODO more complex db api would avoid unmarshal-marshal
		dirClock, err := clocks.Get(parentDirInode, dirName)
		if err != nil {
			return err
		}
		dirClockBuf, err := dirClock.MarshalBinary()
		if err != nil {
			return err
		}

		msg := &wirepeer.VolumeSyncPullItem{
			Peers: map[uint32][]byte{
				// PeerID 0 always refers to myself.
				0: v.pubKey[:],
			},
			DirClock: dirClockBuf,
		}

		cursor := tx.Peers().Cursor()
		for peer := cursor.First(); peer != nil; peer = cursor.Next() {
			// filter what ids are returned here to include only peers
			// authorized for current volumes; avoids leaking information
			// about all of our peers.
			if !peer.Volumes().IsAllowed(bucket) {
				continue
			}

			// TODO hardcoded knowledge of size of peer.ID
			msg.Peers[uint32(peer.ID())] = peer.Pub()[:]
		}

		c := dirs.List(dirInode)
		const maxBatch = 1000
		for item := c.First(); item != nil; item = c.Next() {
			name := item.Name()

			var tmp wire.Dirent
			if err := item.Unmarshal(&tmp); err != nil {
				return err
			}

			de := &wirepeer.Dirent{
				Name: name,
			}
			switch {
			case tmp.File != nil:
				de.File = &wirepeer.File{
					Manifest: tmp.File.Manifest,
				}
			case tmp.Dir != nil:
				de.Dir = &wirepeer.Dir{}
			default:
				return fmt.Errorf("unknown dirent type: %v", tmp)
			}

			clock, err := clocks.Get(dirInode, name)
			if err != nil {
				return err
			}
			// TODO more complex db api would avoid unmarshal-marshal
			// hoops
			clockBuf, err := clock.MarshalBinary()
			if err != nil {
				return err
			}
			de.Clock = clockBuf

			// TODO executable, xattr, acl
			// TODO mtime

			msg.Children = append(msg.Children, de)

			if len(msg.Children) > maxBatch {
				if err := send(msg); err != nil {
					return err
				}
				msg.Reset()
			}
		}

		if len(msg.Children) > 0 || msg.Peers != nil {
			if err := send(msg); err != nil {
				return err
			}
		}

		return nil
	}
	if err := v.db.View(sync); err != nil {
		return err
	}
	return nil
}