func BenchmarkHandle10KBlocks(b *testing.B) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) var testdata []*blocks.Block for i := 0; i < 10000; i++ { testdata = append(testdata, blocks.NewBlock([]byte(string(i)))) } b.ResetTimer() b.SetBytes(10000) for i := 0; i < b.N; i++ { b.StopTimer() w := NewWorker(offline.Exchange(bstore), Config{ NumWorkers: 1, ClientBufferSize: 0, WorkerBufferSize: 0, }) b.StartTimer() for _, block := range testdata { if err := w.HasBlock(block); err != nil { b.Fatal(err) } } b.StopTimer() w.Close() b.StartTimer() } }
func getMockDagServ(t testing.TB) mdag.DAGService { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) return mdag.NewDAGService(bserv) }
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { // setup local peer ID (private key is loaded in online setup) if err := n.loadID(); err != nil { return err } var err error n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache) if err != nil { return err } if cfg.Online { do := setupDiscoveryOption(n.Repo.Config().Discovery) if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil { return err } } else { n.Exchange = offline.Exchange(n.Blockstore) } n.Blocks = bserv.New(n.Blockstore, n.Exchange) n.DAG = dag.NewDAGService(n.Blocks) n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG) if err != nil { // TODO: we should move towards only running 'NewPinner' explicity on // node init instead of implicitly here as a result of the pinner keys // not being found in the datastore. // this is kinda sketchy and could cause data loss n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG) } n.Resolver = &path.Resolver{DAG: n.DAG} return nil }
func TestDuplicateSemantics(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) // TODO does pinner need to share datastore with blockservice? p := NewPinner(dstore, dserv, dserv) a, _ := randNode() _, err := dserv.Add(a) if err != nil { t.Fatal(err) } // pin is recursively err = p.Pin(ctx, a, true) if err != nil { t.Fatal(err) } // pinning directly should fail err = p.Pin(ctx, a, false) if err == nil { t.Fatal("expected direct pin to fail") } // pinning recursively again should succeed err = p.Pin(ctx, a, true) if err != nil { t.Fatal(err) } }
func TestPinRecursiveFail(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv) a, _ := randNode() b, _ := randNode() err := a.AddNodeLinkClean("child", b) if err != nil { t.Fatal(err) } // NOTE: This isnt a time based test, we expect the pin to fail mctx, _ := context.WithTimeout(ctx, time.Millisecond) err = p.Pin(mctx, a, true) if err == nil { t.Fatal("should have failed to pin here") } _, err = dserv.Add(b) if err != nil { t.Fatal(err) } // this one is time based... but shouldnt cause any issues mctx, _ = context.WithTimeout(ctx, time.Second) err = p.Pin(mctx, a, true) if err != nil { t.Fatal(err) } }
func TestFetchGraph(t *testing.T) { var dservs []DAGService bsis := bstest.Mocks(2) for _, bsi := range bsis { dservs = append(dservs, NewDAGService(bsi)) } read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) root, err := imp.BuildDagFromReader(dservs[0], chunk.NewSizeSplitter(read, 512)) if err != nil { t.Fatal(err) } err = FetchGraph(context.TODO(), root, dservs[1]) if err != nil { t.Fatal(err) } // create an offline dagstore and ensure all blocks were fetched bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore)) offline_ds := NewDAGService(bs) ks := key.NewKeySet() err = EnumerateChildren(context.Background(), offline_ds, root, ks, false) if err != nil { t.Fatal(err) } }
func (n *dagService) GetOfflineLinkService() LinkService { if n.Blocks.Exchange().IsOnline() { bsrv := bserv.New(n.Blocks.Blockstore(), offline.Exchange(n.Blocks.Blockstore())) return NewDAGService(bsrv) } else { return n } }
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore, pin.ManualPinner) { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) return dserv, bstore, pin.NewPinner(tsds, dserv).GetManual() }
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) return dserv, bstore }
func Mock(t testing.TB) dag.DAGService { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bserv, err := bsrv.New(bstore, offline.Exchange(bstore)) if err != nil { t.Fatal(err) } return dag.NewDAGService(bserv) }
func getDagserv(t *testing.T) merkledag.DAGService { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv, err := bserv.New(bs, offline.Exchange(bs)) if err != nil { t.Fatal(err) } return merkledag.NewDAGService(blockserv) }
// NewMockNode constructs an IpfsNode for use in tests. func NewMockNode() (*core.IpfsNode, error) { ctx := context.Background() // Generate Identity ident, err := testutil.RandIdentity() if err != nil { return nil, err } p := ident.ID() c := config.Config{ Identity: config.Identity{ PeerID: p.String(), }, } nd, err := core.Offline(&repo.Mock{ C: c, D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())), })(ctx) if err != nil { return nil, err } nd.PrivateKey = ident.PrivateKey() nd.Peerstore = peer.NewPeerstore() nd.Peerstore.AddPrivKey(p, ident.PrivateKey()) nd.Peerstore.AddPubKey(p, ident.PublicKey()) nd.Identity = p nd.PeerHost, err = mocknet.New(nd.Context()).AddPeer(ident.PrivateKey(), ident.Address()) // effectively offline if err != nil { return nil, err } // Routing nd.Routing = offrt.NewOfflineRouter(nd.Repo.Datastore(), nd.PrivateKey) // Bitswap bstore := blockstore.NewBlockstore(nd.Repo.Datastore()) bserv, err := blockservice.New(bstore, offline.Exchange(bstore)) if err != nil { return nil, err } nd.DAG = mdag.NewDAGService(bserv) nd.Pinning = pin.NewPinner(nd.Repo.Datastore(), nd.DAG) // Namespace resolver nd.Namesys = nsys.NewNameSystem(nd.Routing) // Path resolver nd.Resolver = &path.Resolver{DAG: nd.DAG} return nd, nil }
func TestRecurivePathResolution(t *testing.T) { ctx := context.Background() dstore := sync.MutexWrap(datastore.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv, err := blockservice.New(bstore, offline.Exchange(bstore)) if err != nil { t.Fatal(err) } dagService := merkledag.NewDAGService(bserv) a, _ := randNode() b, _ := randNode() c, cKey := randNode() err = b.AddNodeLink("grandchild", c) if err != nil { t.Fatal(err) } err = a.AddNodeLink("child", b) if err != nil { t.Fatal(err) } err = dagService.AddRecursive(a) if err != nil { t.Fatal(err) } aKey, err := a.Key() if err != nil { t.Fatal(err) } segments := []string{aKey.String(), "child", "grandchild"} p, err := path.FromSegments("/ipfs/", segments...) if err != nil { t.Fatal(err) } resolver := &path.Resolver{DAG: dagService} node, err := resolver.ResolvePath(ctx, p) if err != nil { t.Fatal(err) } key, err := node.Key() if err != nil { t.Fatal(err) } if key.String() != cKey.String() { t.Fatal(fmt.Errorf( "recursive path resolution failed for %s: %s != %s", p.String(), key.String(), cKey.String())) } }
// TODO refactor so maybeRouter isn't special-cased in this way func standardWithRouting(r repo.Repo, online bool, routingOption RoutingOption, hostOption HostOption) ConfigOption { return func(ctx context.Context) (n *IpfsNode, err error) { // FIXME perform node construction in the main constructor so it isn't // necessary to perform this teardown in this scope. success := false defer func() { if !success && n != nil { n.teardown() } }() // TODO move as much of node initialization as possible into // NewIPFSNode. The larger these config options are, the harder it is // to test all node construction code paths. if r == nil { return nil, fmt.Errorf("repo required") } n = &IpfsNode{ mode: func() mode { if online { return onlineMode } return offlineMode }(), Repo: r, } n.ctx = ctx n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown) // setup Peerstore n.Peerstore = peer.NewPeerstore() // setup local peer ID (private key is loaded in online setup) if err := n.loadID(); err != nil { return nil, err } n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache) if err != nil { return nil, err } if online { do := setupDiscoveryOption(n.Repo.Config().Discovery) if err := n.startOnlineServices(ctx, routingOption, hostOption, do); err != nil { return nil, err } } else { n.Exchange = offline.Exchange(n.Blockstore) } success = true return n, nil } }
func TestMultisetRoundtrip(t *testing.T) { dstore := dssync.MutexWrap(datastore.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := blockservice.New(bstore, offline.Exchange(bstore)) dag := merkledag.NewDAGService(bserv) fn := func(m map[key.Key]uint16) bool { // Generate a smaller range for refcounts than full uint64, as // otherwise this just becomes overly cpu heavy, splitting it // out into too many items. That means we need to convert to // the right kind of map. As storeMultiset mutates the map as // part of its bookkeeping, this is actually good. refcounts := copyMap(m) ctx := context.Background() n, err := storeMultiset(ctx, dag, refcounts, ignoreKeys) if err != nil { t.Fatalf("storing multiset: %v", err) } root := &merkledag.Node{} const linkName = "dummylink" if err := root.AddNodeLink(linkName, n); err != nil { t.Fatalf("adding link to root node: %v", err) } roundtrip, err := loadMultiset(ctx, dag, root, linkName, ignoreKeys) if err != nil { t.Fatalf("loading multiset: %v", err) } orig := copyMap(m) success := true for k, want := range orig { if got, ok := roundtrip[k]; ok { if got != want { success = false t.Logf("refcount changed: %v -> %v for %q", want, got, k) } delete(orig, k) delete(roundtrip, k) } } for k, v := range orig { success = false t.Logf("refcount missing: %v for %q", v, k) } for k, v := range roundtrip { success = false t.Logf("refcount extra: %v for %q", v, k) } return success } if err := quick.Check(fn, nil); err != nil { t.Fatal(err) } }
func getDagservAndPinner(t *testing.T) dagservAndPinner { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) dserv := NewDAGService(blockserv) mpin := pin.NewPinner(db, dserv) return dagservAndPinner{ ds: dserv, mp: mpin, } }
func TestFlush(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv, dserv) _, k := randNode() p.PinWithMode(k, Recursive) if err := p.Flush(); err != nil { t.Fatal(err) } assertPinned(t, p, k, "expected key to still be pinned") }
// GC performs a mark and sweep garbage collection of the blocks in the blockstore // first, it creates a 'marked' set and adds to it the following: // - all recursively pinned blocks, plus all of their descendants (recursively) // - bestEffortRoots, plus all of its descendants (recursively) // - all directly pinned blocks // - all blocks utilized internally by the pinner // // The routine then iterates over every block in the blockstore and // deletes any block that is not found in the marked set. func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner, bestEffortRoots []key.Key) (<-chan key.Key, error) { unlocker := bs.GCLock() bsrv := bserv.New(bs, offline.Exchange(bs)) ds := dag.NewDAGService(bsrv) gcs, err := ColoredSet(ctx, pn, ds, bestEffortRoots) if err != nil { return nil, err } keychan, err := bs.AllKeysChan(ctx) if err != nil { return nil, err } output := make(chan key.Key) go func() { defer close(output) defer unlocker.Unlock() for { select { case k, ok := <-keychan: if !ok { return } if !gcs.Has(k) { err := bs.DeleteBlock(k) if err != nil { log.Debugf("Error removing key from blockstore: %s", err) return } select { case output <- k: case <-ctx.Done(): return } } case <-ctx.Done(): return } } }() return output, nil }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() _, err := bs.GetBlock(context.Background(), key.Key("")) if err != ErrNotFound { t.Error("Empty String Key should error", err) } b := blocks.NewBlock([]byte("beep boop")) h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash(), h) { t.Error("Block Multihash and data multihash not equal") } if b.Key() != key.Key(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(b) if err != nil { t.Error("failed to add block to BlockService", err) return } if k != b.Key() { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, b.Key()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if b.Key() != b2.Key() { t.Error("Block keys not equal.") } if !bytes.Equal(b.Data(), b2.Data()) { t.Error("Block data is not equal.") } }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs, err := New(bstore, offline.Exchange(bstore)) if err != nil { t.Error("failed to construct block service", err) return } defer bs.Close() b := blocks.NewBlock([]byte("beep boop")) h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash, h) { t.Error("Block Multihash and data multihash not equal") } if b.Key() != key.Key(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(b) if err != nil { t.Error("failed to add block to BlockService", err) return } if k != b.Key() { t.Error("returned key is not equal to block key", err) } ctx, _ := context.WithTimeout(context.TODO(), time.Second*5) b2, err := bs.GetBlock(ctx, b.Key()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if b.Key() != b2.Key() { t.Error("Block keys not equal.") } if !bytes.Equal(b.Data, b2.Data) { t.Error("Block data is not equal.") } }
func TestWriteThroughWorks(t *testing.T) { bstore := &PutCountingBlockstore{ blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 0, } bstore2 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) exch := offline.Exchange(bstore2) bserv := NewWriteThrough(bstore, exch) bgen := butil.NewBlockGenerator() block := bgen.Next() t.Logf("PutCounter: %d", bstore.PutCounter) bserv.AddBlock(block) if bstore.PutCounter != 1 { t.Fatalf("expected just one Put call, have: %d", bstore.PutCounter) } bserv.AddBlock(block) if bstore.PutCounter != 2 { t.Fatal("Put should have called again, should be 2 is: %d", bstore.PutCounter) } }
func BenchmarkWithConfig(c worker.Config) func(b *testing.B) { return func(b *testing.B) { routingDelay := delay.Fixed(0) // during setup dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), routingDelay)) bstore := blockstore.NewBlockstore(dstore) var testdata []*blocks.Block var i int64 for i = 0; i < kBlocksPerOp; i++ { testdata = append(testdata, blocks.NewBlock([]byte(string(i)))) } b.ResetTimer() b.SetBytes(kBlocksPerOp) for i := 0; i < b.N; i++ { b.StopTimer() w := worker.NewWorker(offline.Exchange(bstore), c) b.StartTimer() prev := routingDelay.Set(kEstRoutingDelay) // during measured section for _, block := range testdata { if err := w.HasBlock(block); err != nil { b.Fatal(err) } } routingDelay.Set(prev) // to hasten the unmeasured close period b.StopTimer() w.Close() b.StartTimer() } } }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() o := newObject([]byte("beep boop")) h := cid.NewCidV0(u.Hash([]byte("beep boop"))) if !o.Cid().Equals(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(o) if err != nil { t.Error("failed to add block to BlockService", err) return } if !k.Equals(o.Cid()) { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, o.Cid()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if !o.Cid().Equals(b2.Cid()) { t.Error("Block keys not equal.") } if !bytes.Equal(o.RawData(), b2.RawData()) { t.Error("Block data is not equal.") } }
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { // setup local peer ID (private key is loaded in online setup) if err := n.loadID(); err != nil { return err } rds := &retry.Datastore{ Batching: n.Repo.Datastore(), Delay: time.Millisecond * 200, Retries: 6, TempErrFunc: isTooManyFDError, } var err error bs := bstore.NewBlockstore(rds) opts := bstore.DefaultCacheOpts() conf, err := n.Repo.Config() if err != nil { return err } opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize if !cfg.Permament { opts.HasBloomFilterSize = 0 } cbs, err := bstore.CachedBlockstore(bs, ctx, opts) if err != nil { return err } n.Blockstore = bstore.NewGCBlockstore(cbs, bstore.NewGCLocker()) rcfg, err := n.Repo.Config() if err != nil { return err } if rcfg.Datastore.HashOnRead { bs.HashOnRead(true) } if cfg.Online { do := setupDiscoveryOption(rcfg.Discovery) if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do, cfg.getOpt("pubsub"), cfg.getOpt("mplex")); err != nil { return err } } else { n.Exchange = offline.Exchange(n.Blockstore) } n.Blocks = bserv.New(n.Blockstore, n.Exchange) n.DAG = dag.NewDAGService(n.Blocks) internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore))) n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag) if err != nil { // TODO: we should move towards only running 'NewPinner' explicity on // node init instead of implicitly here as a result of the pinner keys // not being found in the datastore. // this is kinda sketchy and could cause data loss n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag) } n.Resolver = path.NewBasicResolver(n.DAG) err = n.loadFilesRoot() if err != nil { return err } return nil }
func TestPinnerBasic(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) // TODO does pinner need to share datastore with blockservice? p := NewPinner(dstore, dserv, dserv) a, ak := randNode() _, err := dserv.Add(a) if err != nil { t.Fatal(err) } // Pin A{} err = p.Pin(ctx, a, false) if err != nil { t.Fatal(err) } assertPinned(t, p, ak, "Failed to find key") // create new node c, to be indirectly pinned through b c, _ := randNode() ck, err := dserv.Add(c) if err != nil { t.Fatal(err) } // Create new node b, to be parent to a and c b, _ := randNode() err = b.AddNodeLink("child", a) if err != nil { t.Fatal(err) } err = b.AddNodeLink("otherchild", c) if err != nil { t.Fatal(err) } _, err = dserv.Add(b) if err != nil { t.Fatal(err) } // recursively pin B{A,C} err = p.Pin(ctx, b, true) if err != nil { t.Fatal(err) } assertPinned(t, p, ck, "child of recursively pinned node not found") bk := b.Cid() assertPinned(t, p, bk, "Recursively pinned node not found..") d, _ := randNode() d.AddNodeLink("a", a) d.AddNodeLink("c", c) e, _ := randNode() d.AddNodeLink("e", e) // Must be in dagserv for unpin to work _, err = dserv.Add(e) if err != nil { t.Fatal(err) } _, err = dserv.Add(d) if err != nil { t.Fatal(err) } // Add D{A,C,E} err = p.Pin(ctx, d, true) if err != nil { t.Fatal(err) } dk := d.Cid() assertPinned(t, p, dk, "pinned node not found.") // Test recursive unpin err = p.Unpin(ctx, dk, true) if err != nil { t.Fatal(err) } err = p.Flush() if err != nil { t.Fatal(err) } np, err := LoadPinner(dstore, dserv, dserv) if err != nil { t.Fatal(err) } // Test directly pinned assertPinned(t, np, ak, "Could not find pinned node!") // Test recursively pinned assertPinned(t, np, bk, "could not find recursively pinned node") }
func Mock() dag.DAGService { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bserv := bsrv.New(bstore, offline.Exchange(bstore)) return dag.NewDAGService(bserv) }
nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{ //TODO: need this to be true or all files // hashed will be stored in memory! NilRepo: true, }) if err != nil { res.SetError(err, cmds.ErrNormal) return } n = nilnode } dserv := n.DAG local, _, _ := req.Option("local").Bool() if local { offlineexch := offline.Exchange(n.Blockstore) bserv := blockservice.New(n.Blockstore, offlineexch) dserv = dag.NewDAGService(bserv) } outChan := make(chan interface{}, 8) res.SetOutput((<-chan interface{})(outChan)) fileAdder, err := coreunix.NewAdder(req.Context(), n.Pinning, n.Blockstore, dserv) if err != nil { res.SetError(err, cmds.ErrNormal) return } fileAdder.Out = outChan fileAdder.Chunker = chunker
func getDagserv(t *testing.T) dag.DAGService { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) return dag.NewDAGService(blockserv) }
func NewMemoryDagService() dag.DAGService { // build mem-datastore for editor's intermediary nodes bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) bsrv := bserv.New(bs, offline.Exchange(bs)) return dag.NewDAGService(bsrv) }
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { // setup local peer ID (private key is loaded in online setup) if err := n.loadID(); err != nil { return err } var err error bs := bstore.NewBlockstore(n.Repo.Datastore()) opts := bstore.DefaultCacheOpts() conf, err := n.Repo.Config() if err != nil { return err } opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize if !cfg.Permament { opts.HasBloomFilterSize = 0 } n.Blockstore, err = bstore.CachedBlockstore(bs, ctx, opts) if err != nil { return err } rcfg, err := n.Repo.Config() if err != nil { return err } if rcfg.Datastore.HashOnRead { bs.RuntimeHashing(true) } if cfg.Online { do := setupDiscoveryOption(rcfg.Discovery) if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil { return err } } else { n.Exchange = offline.Exchange(n.Blockstore) } n.Blocks = bserv.New(n.Blockstore, n.Exchange) n.DAG = dag.NewDAGService(n.Blocks) n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG) if err != nil { // TODO: we should move towards only running 'NewPinner' explicity on // node init instead of implicitly here as a result of the pinner keys // not being found in the datastore. // this is kinda sketchy and could cause data loss n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG) } n.Resolver = &path.Resolver{DAG: n.DAG} err = n.loadFilesRoot() if err != nil { return err } return nil }