func TestRoutingResolve(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) serv := mockrouting.NewServer() id := testutil.RandIdentityOrFatal(t) d := serv.ClientWithDatastore(context.Background(), id, dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).B58String()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func TestDuplicateSemantics(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) // TODO does pinner need to share datastore with blockservice? p := NewPinner(dstore, dserv, dserv) a, _ := randNode() _, err := dserv.Add(a) if err != nil { t.Fatal(err) } // pin is recursively err = p.Pin(ctx, a, true) if err != nil { t.Fatal(err) } // pinning directly should fail err = p.Pin(ctx, a, false) if err == nil { t.Fatal("expected direct pin to fail") } // pinning recursively again should succeed err = p.Pin(ctx, a, true) if err != nil { t.Fatal(err) } }
func TestReturnsErrorWhenSizeNegative(t *testing.T) { bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) _, err := bloomCached(bs, context.TODO(), -1, 1) if err == nil { t.Fail() } }
func TestPutManyAddsToBloom(t *testing.T) { bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) cachedbs, err := testBloomCached(bs, ctx) select { case <-cachedbs.rebuildChan: case <-ctx.Done(): t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded()) } block1 := blocks.NewBlock([]byte("foo")) block2 := blocks.NewBlock([]byte("bar")) cachedbs.PutMany([]blocks.Block{block1}) has, err := cachedbs.Has(block1.Cid()) if err != nil { t.Fatal(err) } if has == false { t.Fatal("added block is reported missing") } has, err = cachedbs.Has(block2.Cid()) if err != nil { t.Fatal(err) } if has == true { t.Fatal("not added block is reported to be in blockstore") } }
func (cfg *BuildCfg) fillDefaults() error { if cfg.Repo != nil && cfg.NilRepo { return errors.New("cannot set a repo and specify nilrepo at the same time") } if cfg.Repo == nil { var d ds.Datastore d = ds.NewMapDatastore() if cfg.NilRepo { d = ds.NewNullDatastore() } r, err := defaultRepo(dsync.MutexWrap(d)) if err != nil { return err } cfg.Repo = r } if cfg.Routing == nil { cfg.Routing = DHTOption } if cfg.Host == nil { cfg.Host = DefaultHostOption } return nil }
// session creates a test bitswap session. // // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) const bloomSize = 512 const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) bstore, err := blockstore.CachedBlockstore(blockstore.NewBlockstore( ds_sync.MutexWrap(dstore)), ctx, blockstore.DefaultCacheOpts()) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } const alwaysSendToPeer = true bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer).(*Bitswap) return Instance{ Peer: p.ID(), Exchange: bs, blockstore: bstore, blockstoreDelay: bsdelay, } }
func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) return bsnet.NewFromIpfsHost(client, routing) }
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore) { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) return dserv, bstore }
func newEngine(ctx context.Context, idStr string) peerAndEngine { return peerAndEngine{ Peer: peer.ID(idStr), //Strategy: New(true), Engine: NewEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))), } }
func TestHasIsBloomCached(t *testing.T) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) for i := 0; i < 1000; i++ { bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i)))) } ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) cachedbs, err := testBloomCached(bs, ctx) if err != nil { t.Fatal(err) } select { case <-cachedbs.rebuildChan: case <-ctx.Done(): t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded()) } cacheFails := 0 cd.SetFunc(func() { cacheFails++ }) for i := 0; i < 1000; i++ { cachedbs.Has(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Cid()) } if float64(cacheFails)/float64(1000) > float64(0.05) { t.Fatal("Bloom filter has cache miss rate of more than 5%") } cacheFails = 0 block := blocks.NewBlock([]byte("newBlock")) cachedbs.PutMany([]blocks.Block{block}) if cacheFails != 2 { t.Fatalf("expected two datastore hits: %d", cacheFails) } cachedbs.Put(block) if cacheFails != 3 { t.Fatalf("expected datastore hit: %d", cacheFails) } if has, err := cachedbs.Has(block.Cid()); !has || err != nil { t.Fatal("has gave wrong response") } bl, err := cachedbs.Get(block.Cid()) if bl.String() != block.String() { t.Fatal("block data doesn't match") } if err != nil { t.Fatal("there should't be an error") } }
func createStores(t *testing.T) (*arccache, *blockstore, *callbackDatastore) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) arc, err := testArcCached(bs, nil) if err != nil { t.Fatal(err) } return arc, bs, cd }
func TestWriteThroughWorks(t *testing.T) { bstore := &PutCountingBlockstore{ blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 0, } bstore2 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) exch := offline.Exchange(bstore2) bserv := NewWriteThrough(bstore, exch) bgen := butil.NewBlockGenerator() block := bgen.Next() t.Logf("PutCounter: %d", bstore.PutCounter) bserv.AddBlock(block) if bstore.PutCounter != 1 { t.Fatalf("expected just one Put call, have: %d", bstore.PutCounter) } bserv.AddBlock(block) if bstore.PutCounter != 2 { t.Fatal("Put should have called again, should be 2 is: %d", bstore.PutCounter) } }
func TestFlush(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv, dserv) _, k := randNode() p.PinWithMode(k, Recursive) if err := p.Flush(); err != nil { t.Fatal(err) } assertPinned(t, p, k, "expected key to still be pinned") }
func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))) var wg sync.WaitGroup wg.Add(1) go func() { for nextEnvelope := range e.Outbox() { <-nextEnvelope } wg.Done() }() // e.Close() wg.Wait() if _, ok := <-e.Outbox(); ok { t.Fatal("channel should be closed") } }
func TestPartnerWantsThenCancels(t *testing.T) { numRounds := 10 if testing.Short() { numRounds = 1 } alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") type testCase [][]string testcases := []testCase{ { alphabet, vowels, }, { alphabet, stringsComplement(alphabet, vowels), }, } bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range alphabet { block := blocks.NewBlock([]byte(letter)) if err := bs.Put(block); err != nil { t.Fatal(err) } } for i := 0; i < numRounds; i++ { for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] keeps := stringsComplement(set, cancels) e := NewEngine(context.Background(), bs) partner := testutil.RandPeerIDFatal(t) partnerWants(e, set, partner) partnerCancels(e, cancels, partner) if err := checkHandledInOrder(t, e, keeps); err != nil { t.Logf("run #%d of %d", i, numRounds) t.Fatal(err) } } } }
func TestPinRecursiveFail(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv, dserv) a, _ := randNode() b, _ := randNode() err := a.AddNodeLinkClean("child", b) if err != nil { t.Fatal(err) } // NOTE: This isnt a time based test, we expect the pin to fail mctx, _ := context.WithTimeout(ctx, time.Millisecond) err = p.Pin(mctx, a, true) if err == nil { t.Fatal("should have failed to pin here") } _, err = dserv.Add(b) if err != nil { t.Fatal(err) } _, err = dserv.Add(a) if err != nil { t.Fatal(err) } // this one is time based... but shouldnt cause any issues mctx, _ = context.WithTimeout(ctx, time.Second) err = p.Pin(mctx, a, true) if err != nil { t.Fatal(err) } }
func TestPutProviderDoesntResultInDuplicates(t *testing.T) { routingBackend := datastore.NewMapDatastore() k := key.Key("foo") put := []*dhtpb.Message_Peer{ convPeer("bob", "127.0.0.1/tcp/4001"), convPeer("alice", "10.0.0.10/tcp/4001"), } if err := putRoutingProviders(routingBackend, k, put); err != nil { t.Fatal(err) } if err := putRoutingProviders(routingBackend, k, put); err != nil { t.Fatal(err) } got, err := getRoutingProviders(routingBackend, k) if err != nil { t.Fatal(err) } if len(got) != 2 { t.Fatal("should be 2 values, but there are", len(got)) } }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() o := newObject([]byte("beep boop")) h := cid.NewCidV0(u.Hash([]byte("beep boop"))) if !o.Cid().Equals(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(o) if err != nil { t.Error("failed to add block to BlockService", err) return } if !k.Equals(o.Cid()) { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, o.Cid()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if !o.Cid().Equals(b2.Cid()) { t.Error("Block keys not equal.") } if !bytes.Equal(o.RawData(), b2.RawData()) { t.Error("Block data is not equal.") } }
func TestPrexistingRecord(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) d := mockrouting.NewServer().ClientWithDatastore(context.Background(), testutil.RandIdentityOrFatal(t), dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } id, err := peer.IDFromPublicKey(pubk) if err != nil { t.Fatal(err) } // Make a good record and put it in the datastore h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") eol := time.Now().Add(time.Hour) err = PutRecordToRouting(context.Background(), privk, h, 0, eol, d, id) if err != nil { t.Fatal(err) } // Now, with an old record in the system already, try and publish a new one err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } err = verifyCanResolve(resolver, id.Pretty(), h) if err != nil { t.Fatal(err) } }
func TestReprovide(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() mrserv := mock.NewServer() idA := testutil.RandIdentityOrFatal(t) idB := testutil.RandIdentityOrFatal(t) clA := mrserv.Client(idA) clB := mrserv.Client(idB) bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) blk := blocks.NewBlock([]byte("this is a test")) bstore.Put(blk) reprov := NewReprovider(clA, bstore) err := reprov.Reprovide(ctx) if err != nil { t.Fatal(err) } provs, err := clB.FindProviders(ctx, blk.Cid()) if err != nil { t.Fatal(err) } if len(provs) == 0 { t.Fatal("Should have gotten a provider") } if provs[0].ID != idA.ID() { t.Fatal("Somehow got the wrong peer back as a provider.") } }
func (rs *s) Client(p testutil.Identity) Client { return rs.ClientWithDatastore(context.Background(), p, dssync.MutexWrap(ds.NewMapDatastore())) }
func NewMemoryDagService() dag.DAGService { // build mem-datastore for editor's intermediary nodes bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) bsrv := bserv.New(bs, offline.Exchange(bs)) return dag.NewDAGService(bsrv) }
func getDagserv(t *testing.T) merkledag.DAGService { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) return merkledag.NewDAGService(blockserv) }
func (rs *mocknetserver) Client(p testutil.Identity) Client { return rs.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) }
func pubFunc(c *cli.Context) error { var priv ci.PrivKey if kf := c.String("key"); kf != "" { pk, err := loadKeyFile(kf) if err != nil { return err } priv = pk } else { return fmt.Errorf("must specify key file with '--key'") } if !c.Args().Present() { return fmt.Errorf("must specify path to publish") } p, err := path.ParsePath(c.Args().First()) if err != nil { return err } interv := c.String("interval") var ticktime time.Duration = time.Hour * 12 if interv != "" { d, err := time.ParseDuration(interv) if err != nil { return err } ticktime = d } bs, err := getBootstrapAddrs() if err != nil { return err } dstore := ds.NewMapDatastore() dht, err := spawnDHT(priv, dstore, bs) if err != nil { return err } nsys := namesys.NewRoutingPublisher(dht, dstore) // publish once in either case err = nsys.Publish(context.TODO(), priv, p) if err != nil { return err } if !c.Bool("daemon") { return nil } for range time.Tick(ticktime) { fmt.Println("publishing...") before := time.Now() err = nsys.Publish(context.TODO(), priv, p) if err != nil { // TODO: probably don't want to actually error out and die here return err } fmt.Println("publish took: ", time.Now().Sub(before)) } return nil }
func TestPinnerBasic(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) // TODO does pinner need to share datastore with blockservice? p := NewPinner(dstore, dserv, dserv) a, ak := randNode() _, err := dserv.Add(a) if err != nil { t.Fatal(err) } // Pin A{} err = p.Pin(ctx, a, false) if err != nil { t.Fatal(err) } assertPinned(t, p, ak, "Failed to find key") // create new node c, to be indirectly pinned through b c, _ := randNode() ck, err := dserv.Add(c) if err != nil { t.Fatal(err) } // Create new node b, to be parent to a and c b, _ := randNode() err = b.AddNodeLink("child", a) if err != nil { t.Fatal(err) } err = b.AddNodeLink("otherchild", c) if err != nil { t.Fatal(err) } _, err = dserv.Add(b) if err != nil { t.Fatal(err) } // recursively pin B{A,C} err = p.Pin(ctx, b, true) if err != nil { t.Fatal(err) } assertPinned(t, p, ck, "child of recursively pinned node not found") bk := b.Cid() assertPinned(t, p, bk, "Recursively pinned node not found..") d, _ := randNode() d.AddNodeLink("a", a) d.AddNodeLink("c", c) e, _ := randNode() d.AddNodeLink("e", e) // Must be in dagserv for unpin to work _, err = dserv.Add(e) if err != nil { t.Fatal(err) } _, err = dserv.Add(d) if err != nil { t.Fatal(err) } // Add D{A,C,E} err = p.Pin(ctx, d, true) if err != nil { t.Fatal(err) } dk := d.Cid() assertPinned(t, p, dk, "pinned node not found.") // Test recursive unpin err = p.Unpin(ctx, dk, true) if err != nil { t.Fatal(err) } err = p.Flush() if err != nil { t.Fatal(err) } np, err := LoadPinner(dstore, dserv, dserv) if err != nil { t.Fatal(err) } // Test directly pinned assertPinned(t, np, ak, "Could not find pinned node!") // Test recursively pinned assertPinned(t, np, bk, "could not find recursively pinned node") }
func bstore() blockstore.Blockstore { return blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) }