Beispiel #1
0
func (n *IpfsNode) loadFilesRoot() error {
	dsk := ds.NewKey("/local/filesroot")
	pf := func(ctx context.Context, k key.Key) error {
		return n.Repo.Datastore().Put(dsk, []byte(k))
	}

	var nd *merkledag.Node
	val, err := n.Repo.Datastore().Get(dsk)

	switch {
	case err == ds.ErrNotFound || val == nil:
		nd = uio.NewEmptyDirectory()
		_, err := n.DAG.Add(nd)
		if err != nil {
			return fmt.Errorf("failure writing to dagstore: %s", err)
		}
	case err == nil:
		k := key.Key(val.([]byte))
		nd, err = n.DAG.Get(n.Context(), k)
		if err != nil {
			return fmt.Errorf("error loading filesroot from DAG: %s", err)
		}
	default:
		return err
	}

	mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf)
	if err != nil {
		return err
	}

	n.FilesRoot = mr
	return nil
}
Beispiel #2
0
func TestDatastoreGetNotAllowedAfterClose(t *testing.T) {
	t.Parallel()
	path := testRepoPath("test", t)

	assert.True(!IsInitialized(path), t, "should NOT be initialized")
	assert.Nil(Init(path, &config.Config{}), t, "should initialize successfully")
	r, err := Open(path)
	assert.Nil(err, t, "should open successfully")

	k := "key"
	data := []byte(k)
	assert.Nil(r.Datastore().Put(datastore.NewKey(k), data), t, "Put should be successful")

	assert.Nil(r.Close(), t)
	_, err = r.Datastore().Get(datastore.NewKey(k))
	assert.Err(err, t, "after closer, Get should be fail")
}
Beispiel #3
0
func openDefaultDatastore(r *FSRepo) (repo.Datastore, error) {
	leveldbPath := path.Join(r.path, leveldbDirectory)

	// save leveldb reference so it can be neatly closed afterward
	leveldbDS, err := levelds.NewDatastore(leveldbPath, &levelds.Options{
		Compression: ldbopts.NoCompression,
	})
	if err != nil {
		return nil, fmt.Errorf("unable to open leveldb datastore: %v", err)
	}

	syncfs := !r.config.Datastore.NoSync
	// 5 bytes of prefix gives us 25 bits of freedom, 16 of which are taken by
	// by the Qm prefix. Leaving us with 9 bits, or 512 way sharding
	blocksDS, err := flatfs.New(path.Join(r.path, flatfsDirectory), 5, syncfs)
	if err != nil {
		return nil, fmt.Errorf("unable to open flatfs datastore: %v", err)
	}

	// Add our PeerID to metrics paths to keep them unique
	//
	// As some tests just pass a zero-value Config to fsrepo.Init,
	// cope with missing PeerID.
	id := r.config.Identity.PeerID
	if id == "" {
		// the tests pass in a zero Config; cope with it
		id = fmt.Sprintf("uninitialized_%p", r)
	}
	prefix := "fsrepo." + id + ".datastore."
	metricsBlocks := measure.New(prefix+"blocks", blocksDS)
	metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS)
	mountDS := mount.New([]mount.Mount{
		{
			Prefix:    ds.NewKey("/blocks"),
			Datastore: metricsBlocks,
		},
		{
			Prefix:    ds.NewKey("/"),
			Datastore: metricsLevelDB,
		},
	})

	return mountDS, nil
}
Beispiel #4
0
func TestDatastorePersistsFromRepoToRepo(t *testing.T) {
	t.Parallel()
	path := testRepoPath("test", t)

	assert.Nil(Init(path, &config.Config{}), t)
	r1, err := Open(path)
	assert.Nil(err, t)

	k := "key"
	expected := []byte(k)
	assert.Nil(r1.Datastore().Put(datastore.NewKey(k), expected), t, "using first repo, Put should be successful")
	assert.Nil(r1.Close(), t)

	r2, err := Open(path)
	assert.Nil(err, t)
	v, err := r2.Datastore().Get(datastore.NewKey(k))
	assert.Nil(err, t, "using second repo, Get should be successful")
	actual, ok := v.([]byte)
	assert.True(ok, t, "value should be the []byte from r1's Put")
	assert.Nil(r2.Close(), t)
	assert.True(bytes.Compare(expected, actual) == 0, t, "data should match")
}
Beispiel #5
0
func (pm *ProviderManager) deleteProvSet(k key.Key) error {
	pm.providers.Remove(k)

	res, err := pm.dstore.Query(dsq.Query{
		KeysOnly: true,
		Prefix:   mkProvKey(k).String(),
	})

	entries, err := res.Rest()
	if err != nil {
		return err
	}

	for _, e := range entries {
		err := pm.dstore.Delete(ds.NewKey(e.Key))
		if err != nil {
			log.Error("deleting provider set: ", err)
		}
	}
	return nil
}
Beispiel #6
0
import (
	"fmt"
	"sync"
	"time"

	key "github.com/ipfs/go-ipfs/blocks/key"
	"github.com/ipfs/go-ipfs/blocks/set"
	mdag "github.com/ipfs/go-ipfs/merkledag"
	logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log"
	ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore"
	context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

var log = logging.Logger("pin")

var pinDatastoreKey = ds.NewKey("/local/pins")

var emptyKey = key.B58KeyDecode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")

const (
	linkRecursive = "recursive"
	linkDirect    = "direct"
	linkIndirect  = "indirect"
	linkInternal  = "internal"
	linkNotPinned = "not pinned"
	linkAny       = "any"
	linkAll       = "all"
)

type PinMode int
Beispiel #7
0
// AllKeysChan runs a query for keys from the blockstore.
// this is very simplistic, in the future, take dsq.Query as a param?
//
// AllKeysChan respects context
func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {

	// KeysOnly, because that would be _a lot_ of data.
	q := dsq.Query{KeysOnly: true}
	// datastore/namespace does *NOT* fix up Query.Prefix
	q.Prefix = BlockPrefix.String()
	res, err := bs.datastore.Query(q)
	if err != nil {
		return nil, err
	}

	// this function is here to compartmentalize
	get := func() (key.Key, bool) {
		select {
		case <-ctx.Done():
			return "", false
		case e, more := <-res.Next():
			if !more {
				return "", false
			}
			if e.Error != nil {
				log.Debug("blockstore.AllKeysChan got err:", e.Error)
				return "", false
			}

			// need to convert to key.Key using key.KeyFromDsKey.
			k, err := key.KeyFromDsKey(ds.NewKey(e.Key))
			if err != nil {
				log.Warningf("error parsing key from DsKey: ", err)
				return "", true
			}
			log.Debug("blockstore: query got key", k)

			// key must be a multihash. else ignore it.
			_, err = mh.Cast([]byte(k))
			if err != nil {
				log.Warningf("key from datastore was not a multihash: ", err)
				return "", true
			}

			return k, true
		}
	}

	output := make(chan key.Key, dsq.KeysOnlyBufSize)
	go func() {
		defer func() {
			res.Process().Close() // ensure exit (signals early exit, too)
			close(output)
		}()

		for {
			k, ok := get()
			if !ok {
				return
			}
			if k == "" {
				continue
			}

			select {
			case <-ctx.Done():
				return
			case output <- k:
			}
		}
	}()

	return output, nil
}
Beispiel #8
0
	"sync/atomic"

	blocks "github.com/ipfs/go-ipfs/blocks"
	key "github.com/ipfs/go-ipfs/blocks/key"
	logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log"
	ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore"
	dsns "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/namespace"
	dsq "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/query"
	mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
	context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)

var log = logging.Logger("blockstore")

// BlockPrefix namespaces blockstore datastores
var BlockPrefix = ds.NewKey("blocks")

var ValueTypeMismatch = errors.New("the retrieved value is not a Block")
var ErrHashMismatch = errors.New("block in storage has different hash than requested")

var ErrNotFound = errors.New("blockstore: block not found")

// Blockstore wraps a Datastore
type Blockstore interface {
	DeleteBlock(key.Key) error
	Has(key.Key) (bool, error)
	Get(key.Key) (blocks.Block, error)
	Put(blocks.Block) error
	PutMany([]blocks.Block) error

	AllKeysChan(ctx context.Context) (<-chan key.Key, error)
Beispiel #9
0
func mkProvKey(k key.Key) ds.Key {
	return ds.NewKey(providersKeyPrefix + base32.RawStdEncoding.EncodeToString([]byte(k)))
}
Beispiel #10
0
// DsKey returns a Datastore key
func (k Key) DsKey() ds.Key {
	return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(k)))
}