func (d *Datastore) lookup(key ds.Key) (ds.Datastore, ds.Key, ds.Key) { d.lk.Lock() defer d.lk.Unlock() for _, m := range d.mounts { if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) { s := strings.TrimPrefix(key.String(), m.Prefix.String()) k := ds.NewKey(s) return m.Datastore, m.Prefix, k } } return nil, ds.NewKey("/"), key }
func openDefaultDatastore(r *FSRepo) (repo.Datastore, error) { leveldbPath := path.Join(r.path, leveldbDirectory) // save leveldb reference so it can be neatly closed afterward leveldbDS, err := levelds.NewDatastore(leveldbPath, &levelds.Options{ Compression: ldbopts.NoCompression, }) if err != nil { return nil, fmt.Errorf("unable to open leveldb datastore: %v", err) } // 4TB of 256kB objects ~=17M objects, splitting that 256-way // leads to ~66k objects per dir, splitting 256*256-way leads to // only 256. // // The keys seen by the block store have predictable prefixes, // including "/" from datastore.Key and 2 bytes from multihash. To // reach a uniform 256-way split, we need approximately 4 bytes of // prefix. syncfs := !r.config.Datastore.NoSync blocksDS, err := flatfs.New(path.Join(r.path, flatfsDirectory), 4, syncfs) if err != nil { return nil, fmt.Errorf("unable to open flatfs datastore: %v", err) } // Add our PeerID to metrics paths to keep them unique // // As some tests just pass a zero-value Config to fsrepo.Init, // cope with missing PeerID. id := r.config.Identity.PeerID if id == "" { // the tests pass in a zero Config; cope with it id = fmt.Sprintf("uninitialized_%p", r) } prefix := "fsrepo." + id + ".datastore." metricsBlocks := measure.New(prefix+"blocks", blocksDS) metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS) mountDS := mount.New([]mount.Mount{ { Prefix: ds.NewKey("/blocks"), Datastore: metricsBlocks, }, { Prefix: ds.NewKey("/"), Datastore: metricsLevelDB, }, }) return mountDS, nil }
// ConvertKey returns a B58 encoded Datastore key // TODO: this is hacky because it encodes every path component. some // path components may be proper strings already... func (b58KeyConverter) ConvertKey(dsk ds.Key) ds.Key { k := ds.NewKey("/") for _, n := range dsk.Namespaces() { k = k.ChildString(b58.Encode([]byte(n))) } return k }
// Query implements Datastore.Query func (d *Datastore) Query(q query.Query) (query.Results, error) { results := make(chan query.Result) walkFn := func(path string, info os.FileInfo, err error) error { // remove ds path prefix if strings.HasPrefix(path, d.path) { path = path[len(d.path):] } if !info.IsDir() { if strings.HasSuffix(path, ObjectKeySuffix) { path = path[:len(path)-len(ObjectKeySuffix)] } key := ds.NewKey(path) entry := query.Entry{Key: key.String(), Value: query.NotFetched} results <- query.Result{Entry: entry} } return nil } go func() { filepath.Walk(d.path, walkFn) close(results) }() r := query.ResultsWithChan(q, results) r = query.NaiveQueryApply(q, r) return r, nil }
// Query implements Query, inverting keys on the way back out. func (d *datastore) Query(q dsq.Query) (dsq.Results, error) { qr, err := d.raw.Query(q) if err != nil { return nil, err } ch := make(chan dsq.Result) go func() { defer close(ch) defer qr.Close() for r := range qr.Next() { if r.Error != nil { ch <- r continue } k := ds.NewKey(r.Entry.Key) if !d.prefix.IsAncestorOf(k) { continue } r.Entry.Key = d.Datastore.InvertKey(k).String() ch <- r } }() return dsq.DerivedResults(qr, ch), nil }
func (ds *S3Datastore) decode(raw string) (datastore.Key, bool) { k, err := hex.DecodeString(raw) if err != nil { return datastore.Key{}, false } return datastore.NewKey(string(k)), true }
func (n *IpfsNode) loadFilesRoot() error { dsk := ds.NewKey("/local/filesroot") pf := func(ctx context.Context, k key.Key) error { return n.Repo.Datastore().Put(dsk, []byte(k)) } var nd *merkledag.Node val, err := n.Repo.Datastore().Get(dsk) switch { case err == ds.ErrNotFound || val == nil: nd = uio.NewEmptyDirectory() _, err := n.DAG.Add(nd) if err != nil { return fmt.Errorf("failure writing to dagstore: %s", err) } case err == nil: k := key.Key(val.([]byte)) nd, err = n.DAG.Get(n.Context(), k) if err != nil { return fmt.Errorf("error loading filesroot from DAG: %s", err) } default: return err } mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf) if err != nil { return err } n.FilesRoot = mr return nil }
func TestDatastoreGetNotAllowedAfterClose(t *testing.T) { t.Parallel() path := testRepoPath("test", t) assert.True(!IsInitialized(path), t, "should NOT be initialized") assert.Nil(Init(path, &config.Config{}), t, "should initialize successfully") r, err := Open(path) assert.Nil(err, t, "should open successfully") k := "key" data := []byte(k) assert.Nil(r.Datastore().Put(datastore.NewKey(k), data), t, "Put should be successful") assert.Nil(r.Close(), t) _, err = r.Datastore().Get(datastore.NewKey(k)) assert.Err(err, t, "after closer, Get should be fail") }
func (mt *mountBatch) lookupBatch(key ds.Key) (ds.Batch, ds.Key, error) { mt.lk.Lock() defer mt.lk.Unlock() child, loc, rest := mt.d.lookup(key) t, ok := mt.mounts[loc.String()] if !ok { bds, ok := child.(ds.Batching) if !ok { return nil, ds.NewKey(""), ds.ErrBatchUnsupported } var err error t, err = bds.Batch() if err != nil { return nil, ds.NewKey(""), err } mt.mounts[loc.String()] = t } return t, rest, nil }
func TestDatastorePersistsFromRepoToRepo(t *testing.T) { t.Parallel() path := testRepoPath("test", t) assert.Nil(Init(path, &config.Config{}), t) r1, err := Open(path) assert.Nil(err, t) k := "key" expected := []byte(k) assert.Nil(r1.Datastore().Put(datastore.NewKey(k), expected), t, "using first repo, Put should be successful") assert.Nil(r1.Close(), t) r2, err := Open(path) assert.Nil(err, t) v, err := r2.Datastore().Get(datastore.NewKey(k)) assert.Nil(err, t, "using second repo, Get should be successful") actual, ok := v.([]byte) assert.True(ok, t, "value should be the []byte from r1's Put") assert.Nil(r2.Close(), t) assert.True(bytes.Compare(expected, actual) == 0, t, "data should match") }
func RunBatchTest(t *testing.T, ds dstore.Batching) { batch, err := ds.Batch() if err != nil { t.Fatal(err) } r := rand.New() var blocks [][]byte var keys []dstore.Key for i := 0; i < 20; i++ { blk := make([]byte, 256*1024) r.Read(blk) blocks = append(blocks, blk) key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) keys = append(keys, key) err := batch.Put(key, blk) if err != nil { t.Fatal(err) } } // Ensure they are not in the datastore before comitting for _, k := range keys { _, err := ds.Get(k) if err == nil { t.Fatal("should not have found this block") } } // commit, write them to the datastore err = batch.Commit() if err != nil { t.Fatal(err) } for i, k := range keys { blk, err := ds.Get(k) if err != nil { t.Fatal(err) } if !bytes.Equal(blk.([]byte), blocks[i]) { t.Fatal("blocks not correct!") } } }
func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) { var rnge *util.Range if qrb.Query.Prefix != "" { rnge = util.BytesPrefix([]byte(qrb.Query.Prefix)) } i := d.DB.NewIterator(rnge, nil) defer i.Release() // advance iterator for offset if qrb.Query.Offset > 0 { for j := 0; j < qrb.Query.Offset; j++ { i.Next() } } // iterate, and handle limit, too for sent := 0; i.Next(); sent++ { // end early if we hit the limit if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit { break } k := ds.NewKey(string(i.Key())).String() e := dsq.Entry{Key: k} if !qrb.Query.KeysOnly { buf := make([]byte, len(i.Value())) copy(buf, i.Value()) e.Value = buf } select { case qrb.Output <- dsq.Result{Entry: e}: // we sent it out case <-worker.Closing(): // client told us to end early. break } } if err := i.Error(); err != nil { select { case qrb.Output <- dsq.Result{Error: err}: // client read our error case <-worker.Closing(): // client told us to end. return } } }
// PrefixTransform constructs a KeyTransform with a pair of functions that // add or remove the given prefix key. // // Warning: Will panic if prefix not found when it should be there. This is // to avoid insidious data inconsistency errors. func PrefixTransform(prefix ds.Key) ktds.KeyTransform { return &ktds.Pair{ // Convert adds the prefix Convert: func(k ds.Key) ds.Key { return prefix.Child(k) }, // Invert removes the prefix. panics if prefix not found. Invert: func(k ds.Key) ds.Key { if !prefix.IsAncestorOf(k) { fmt.Errorf("Expected prefix (%s) in key (%s)", prefix, k) panic("expected prefix not found") } s := strings.TrimPrefix(k.String(), prefix.String()) return ds.NewKey(s) }, } }
func main() { bkp, err := fsrepo.BestKnownPath() if err != nil { panic(err) } r, err := fsrepo.Open(bkp) if err != nil { panic(err) } k := ds.NewKey(os.Args[1]) val, err := r.Datastore().Get(k) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fmt.Println(string(val.([]byte))) }
// Query implements Query, inverting keys on the way back out. func (d *ktds) Query(q dsq.Query) (dsq.Results, error) { qr, err := d.child.Query(q) if err != nil { return nil, err } ch := make(chan dsq.Result) go func() { defer close(ch) defer qr.Close() for r := range qr.Next() { if r.Error == nil { r.Entry.Key = d.InvertKey(ds.NewKey(r.Entry.Key)).String() } ch <- r } }() return dsq.DerivedResults(qr, ch), nil }
func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) { r := rand.New() var keys []dstore.Key for i := 0; i < 20; i++ { blk := make([]byte, 16) r.Read(blk) key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) keys = append(keys, key) err := ds.Put(key, blk) if err != nil { t.Fatal(err) } } batch, err := ds.Batch() if err != nil { t.Fatal(err) } for _, k := range keys { err := batch.Delete(k) if err != nil { t.Fatal(err) } } err = batch.Commit() if err != nil { t.Fatal(err) } for _, k := range keys { _, err := ds.Get(k) if err == nil { t.Fatal("shouldnt have found block") } } }
func (d *Datastore) Query(q query.Query) (query.Results, error) { if len(q.Filters) > 0 || len(q.Orders) > 0 || q.Limit > 0 || q.Offset > 0 { // TODO this is overly simplistic, but the only caller is // `ipfs refs local` for now, and this gets us moving. return nil, errors.New("mount only supports listing all prefixed keys in random order") } key := ds.NewKey(q.Prefix) cds, mount, k := d.lookup(key) if cds == nil { return nil, errors.New("mount only supports listing a mount point") } // TODO support listing cross mount points too // delegate the query to the mounted datastore, while adjusting // keys in and out q2 := q q2.Prefix = k.String() wrapDS := keytransform.Wrap(cds, &keytransform.Pair{ Convert: func(ds.Key) ds.Key { panic("this should never be called") }, Invert: func(k ds.Key) ds.Key { return mount.Child(k) }, }) r, err := wrapDS.Query(q2) if err != nil { return nil, err } r = query.ResultsReplaceQuery(r, q) return r, nil }
import ( "fmt" "sync" "time" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/blocks/set" mdag "github.com/ipfs/go-ipfs/merkledag" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var log = logging.Logger("pin") var pinDatastoreKey = ds.NewKey("/local/pins") var emptyKey = key.B58KeyDecode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") const ( linkDirect = "direct" linkRecursive = "recursive" ) type PinMode int const ( Recursive PinMode = iota Direct NotPinned )
func (ps *peerstore) Put(p ID, key string, val interface{}) error { dsk := ds.NewKey(string(p) + "/" + key) return ps.ds.Put(dsk, val) }
func (ps *peerstore) Get(p ID, key string) (interface{}, error) { dsk := ds.NewKey(string(p) + "/" + key) return ps.ds.Get(dsk) }
// DsKey returns a Datastore key func (k Key) DsKey() ds.Key { return ds.NewKey(string(k)) }
// AllKeysChan runs a query for keys from the blockstore. // this is very simplistic, in the future, take dsq.Query as a param? // // AllKeysChan respects context func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { // KeysOnly, because that would be _a lot_ of data. q := dsq.Query{KeysOnly: true} // datastore/namespace does *NOT* fix up Query.Prefix q.Prefix = BlockPrefix.String() res, err := bs.datastore.Query(q) if err != nil { return nil, err } // this function is here to compartmentalize get := func() (k key.Key, ok bool) { select { case <-ctx.Done(): return k, false case e, more := <-res.Next(): if !more { return k, false } if e.Error != nil { log.Debug("blockstore.AllKeysChan got err:", e.Error) return k, false } // need to convert to key.Key using key.KeyFromDsKey. k = key.KeyFromDsKey(ds.NewKey(e.Key)) log.Debug("blockstore: query got key", k) // key must be a multihash. else ignore it. _, err := mh.Cast([]byte(k)) if err != nil { return "", true } return k, true } } output := make(chan key.Key) go func() { defer func() { res.Process().Close() // ensure exit (signals early exit, too) close(output) }() for { k, ok := get() if !ok { return } if k == "" { continue } select { case <-ctx.Done(): return case output <- k: } } }() return output, nil }
"sync/atomic" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/namespace" dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) var log = logging.Logger("blockstore") // BlockPrefix namespaces blockstore datastores var BlockPrefix = ds.NewKey("blocks") var ValueTypeMismatch = errors.New("The retrieved value is not a Block") var ErrNotFound = errors.New("blockstore: block not found") // Blockstore wraps a Datastore type Blockstore interface { DeleteBlock(key.Key) error Has(key.Key) (bool, error) Get(key.Key) (blocks.Block, error) Put(blocks.Block) error PutMany([]blocks.Block) error AllKeysChan(ctx context.Context) (<-chan key.Key, error) }