// session creates a test bitswap session. // // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) const bloomSize = 512 const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) bstore, err := blockstore.CachedBlockstore(blockstore.NewBlockstore( ds_sync.MutexWrap(dstore)), ctx, blockstore.DefaultCacheOpts()) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } const alwaysSendToPeer = true bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer).(*Bitswap) return Instance{ Peer: p.ID(), Exchange: bs, blockstore: bstore, blockstoreDelay: bsdelay, } }
func TestRoutingResolve(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) serv := mockrouting.NewServer() id := testutil.RandIdentityOrFatal(t) d := serv.ClientWithDatastore(context.Background(), id, dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).B58String()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func TestHasIsBloomCached(t *testing.T) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) for i := 0; i < 1000; i++ { bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i)))) } ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) cachedbs, err := testBloomCached(bs, ctx) if err != nil { t.Fatal(err) } select { case <-cachedbs.rebuildChan: case <-ctx.Done(): t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded()) } cacheFails := 0 cd.SetFunc(func() { cacheFails++ }) for i := 0; i < 1000; i++ { cachedbs.Has(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Key()) } if float64(cacheFails)/float64(1000) > float64(0.05) { t.Fatal("Bloom filter has cache miss rate of more than 5%") } }
func TestGetWhenKeyIsEmptyString(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(key.Key("")) if err != ErrNotFound { t.Fail() } }
func TestAllKeysRespectsContext(t *testing.T) { N := 100 d := &queryTestDS{ds: ds.NewMapDatastore()} bs, _ := newBlockStoreWithKeys(t, d, N) started := make(chan struct{}, 1) done := make(chan struct{}, 1) errors := make(chan error, 100) getKeys := func(ctx context.Context) { started <- struct{}{} ch, err := bs.AllKeysChan(ctx) // once without cancelling if err != nil { errors <- err } _ = collect(ch) done <- struct{}{} errors <- nil // a nil one to signal break } var results dsq.Results var resultsmu = make(chan struct{}) resultChan := make(chan dsq.Result) d.SetFunc(func(q dsq.Query) (dsq.Results, error) { results = dsq.ResultsWithChan(q, resultChan) resultsmu <- struct{}{} return results, nil }) go getKeys(context.Background()) // make sure it's waiting. <-started <-resultsmu select { case <-done: t.Fatal("sync is wrong") case <-results.Process().Closing(): t.Fatal("should not be closing") case <-results.Process().Closed(): t.Fatal("should not be closed") default: } e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()} resultChan <- dsq.Result{Entry: e} // let it go. close(resultChan) <-done // should be done now. <-results.Process().Closed() // should be closed now // print any errors for err := range errors { if err == nil { break } t.Error(err) } }
func (cfg *BuildCfg) fillDefaults() error { if cfg.Repo != nil && cfg.NilRepo { return errors.New("cannot set a repo and specify nilrepo at the same time") } if cfg.Repo == nil { var d ds.Datastore d = ds.NewMapDatastore() if cfg.NilRepo { d = ds.NewNullDatastore() } r, err := defaultRepo(dsync.MutexWrap(d)) if err != nil { return err } cfg.Repo = r } if cfg.Routing == nil { cfg.Routing = DHTOption } if cfg.Host == nil { cfg.Host = DefaultHostOption } return nil }
func TestPinRecursiveFail(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv) a, _ := randNode() b, _ := randNode() err := a.AddNodeLinkClean("child", b) if err != nil { t.Fatal(err) } // NOTE: This isnt a time based test, we expect the pin to fail mctx, _ := context.WithTimeout(ctx, time.Millisecond) err = p.Pin(mctx, a, true) if err == nil { t.Fatal("should have failed to pin here") } _, err = dserv.Add(b) if err != nil { t.Fatal(err) } // this one is time based... but shouldnt cause any issues mctx, _ = context.WithTimeout(ctx, time.Second) err = p.Pin(mctx, a, true) if err != nil { t.Fatal(err) } }
func getMockDagServ(t testing.TB) mdag.DAGService { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) return mdag.NewDAGService(bserv) }
func TestDuplicateSemantics(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) // TODO does pinner need to share datastore with blockservice? p := NewPinner(dstore, dserv) a, _ := randNode() _, err := dserv.Add(a) if err != nil { t.Fatal(err) } // pin is recursively err = p.Pin(ctx, a, true) if err != nil { t.Fatal(err) } // pinning directly should fail err = p.Pin(ctx, a, false) if err == nil { t.Fatal("expected direct pin to fail") } // pinning recursively again should succeed err = p.Pin(ctx, a, true) if err != nil { t.Fatal(err) } }
func TestProvidersDatastore(t *testing.T) { old := lruCacheSize lruCacheSize = 10 defer func() { lruCacheSize = old }() ctx := context.Background() mid := peer.ID("testing") p := NewProviderManager(ctx, mid, ds.NewMapDatastore()) defer p.proc.Close() friend := peer.ID("friend") var keys []key.Key for i := 0; i < 100; i++ { k := key.Key(fmt.Sprint(i)) keys = append(keys, k) p.AddProvider(ctx, k, friend) } for _, k := range keys { resp := p.GetProviders(ctx, k) if len(resp) != 1 { t.Fatal("Could not retrieve provider.") } if resp[0] != friend { t.Fatal("expected provider to be 'friend'") } } }
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) return dserv, bstore }
func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) return bsnet.NewFromIpfsHost(client, routing) }
func newEngine(ctx context.Context, idStr string) peerAndEngine { return peerAndEngine{ Peer: peer.ID(idStr), //Strategy: New(true), Engine: NewEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))), } }
func TestGetWhenKeyNotPresent(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(key.Key("not present")) if err != nil { t.Log("As expected, block is not present") return } t.Fail() }
func TestReturnsErrorWhenSizeNegative(t *testing.T) { bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) _, err := bloomCached(bs, context.TODO(), 100, 1, -1) if err == nil { t.Fail() } _, err = bloomCached(bs, context.TODO(), -1, 1, 100) if err == nil { t.Fail() } }
func getDagservAndPinner(t *testing.T) dagservAndPinner { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) dserv := NewDAGService(blockserv) mpin := pin.NewPinner(db, dserv) return dagservAndPinner{ ds: dserv, mp: mpin, } }
func TestProviderManager(t *testing.T) { ctx := context.Background() mid := peer.ID("testing") p := NewProviderManager(ctx, mid, ds.NewMapDatastore()) a := key.Key("test") p.AddProvider(ctx, a, peer.ID("testingprovider")) resp := p.GetProviders(ctx, a) if len(resp) != 1 { t.Fatal("Could not retrieve provider.") } p.proc.Close() }
func TestProvidesExpire(t *testing.T) { pval := ProvideValidity cleanup := defaultCleanupInterval ProvideValidity = time.Second / 2 defaultCleanupInterval = time.Second / 2 defer func() { ProvideValidity = pval defaultCleanupInterval = cleanup }() ctx := context.Background() mid := peer.ID("testing") p := NewProviderManager(ctx, mid, ds.NewMapDatastore()) peers := []peer.ID{"a", "b"} var keys []key.Key for i := 0; i < 10; i++ { k := key.Key(i) keys = append(keys, k) p.AddProvider(ctx, k, peers[0]) p.AddProvider(ctx, k, peers[1]) } for i := 0; i < 10; i++ { out := p.GetProviders(ctx, keys[i]) if len(out) != 2 { t.Fatal("expected providers to still be there") } } time.Sleep(time.Second) for i := 0; i < 10; i++ { out := p.GetProviders(ctx, keys[i]) if len(out) > 2 { t.Fatal("expected providers to be cleaned up") } } if p.providers.Len() != 0 { t.Fatal("providers map not cleaned up") } allprovs, err := p.getAllProvKeys() if err != nil { t.Fatal(err) } if len(allprovs) != 0 { t.Fatal("expected everything to be cleaned out of the datastore") } }
func TestValueTypeMismatch(t *testing.T) { block := blocks.NewBlock([]byte("some data")) datastore := ds.NewMapDatastore() k := BlockPrefix.Child(block.Key().DsKey()) datastore.Put(k, "data that isn't a block!") blockstore := NewBlockstore(ds_sync.MutexWrap(datastore)) _, err := blockstore.Get(block.Key()) if err != ValueTypeMismatch { t.Fatal(err) } }
func setupDHT(ctx context.Context, t *testing.T) *IpfsDHT { h := netutil.GenHostSwarm(t, ctx) dss := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, h, dss) d.Validator["v"] = &record.ValidChecker{ Func: func(key.Key, []byte) error { return nil }, Sign: false, } return d }
func TestRuntimeHashing(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) bl := blocks.NewBlock([]byte("some data")) blBad, err := blocks.NewBlockWithHash([]byte("some other data"), bl.Key().ToMultihash()) if err != nil { t.Fatal("Debug is enabled") } bs.Put(blBad) bs.RuntimeHashing(true) if _, err := bs.Get(bl.Key()); err != ErrHashMismatch { t.Fatalf("Expected '%v' got '%v'\n", ErrHashMismatch, err) } }
func TestFlush(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv) _, k := randNode() p.PinWithMode(k, Recursive) if err := p.Flush(); err != nil { t.Fatal(err) } assertPinned(t, p, k, "expected key to still be pinned") }
func TestElideDuplicateWrite(t *testing.T) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) cachedbs, err := testBloomCached(bs, nil) if err != nil { t.Fatal(err) } b1 := blocks.NewBlock([]byte("foo")) cachedbs.Put(b1) cd.SetFunc(func() { t.Fatal("write hit the datastore") }) cachedbs.Put(b1) }
func TestPutThenGetBlock(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) block := blocks.NewBlock([]byte("some data")) err := bs.Put(block) if err != nil { t.Fatal(err) } blockFromBlockstore, err := bs.Get(block.Key()) if err != nil { t.Fatal(err) } if !bytes.Equal(block.Data(), blockFromBlockstore.Data()) { t.Fail() } }
func newBlockStoreWithKeys(t *testing.T, d ds.Datastore, N int) (Blockstore, []key.Key) { if d == nil { d = ds.NewMapDatastore() } bs := NewBlockstore(ds_sync.MutexWrap(d)) keys := make([]key.Key, N) for i := 0; i < N; i++ { block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) err := bs.Put(block) if err != nil { t.Fatal(err) } keys[i] = block.Key() } return bs, keys }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() _, err := bs.GetBlock(context.Background(), key.Key("")) if err != ErrNotFound { t.Error("Empty String Key should error", err) } b := blocks.NewBlock([]byte("beep boop")) h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash(), h) { t.Error("Block Multihash and data multihash not equal") } if b.Key() != key.Key(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(b) if err != nil { t.Error("failed to add block to BlockService", err) return } if k != b.Key() { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, b.Key()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if b.Key() != b2.Key() { t.Error("Block keys not equal.") } if !bytes.Equal(b.Data(), b2.Data()) { t.Error("Block data is not equal.") } }
func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))) var wg sync.WaitGroup wg.Add(1) go func() { for nextEnvelope := range e.Outbox() { <-nextEnvelope } wg.Done() }() // e.Close() wg.Wait() if _, ok := <-e.Outbox(); ok { t.Fatal("channel should be closed") } }
func TestPartnerWantsThenCancels(t *testing.T) { numRounds := 10 if testing.Short() { numRounds = 1 } alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") type testCase [][]string testcases := []testCase{ { alphabet, vowels, }, { alphabet, stringsComplement(alphabet, vowels), }, } bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range alphabet { block := blocks.NewBlock([]byte(letter)) if err := bs.Put(block); err != nil { t.Fatal(err) } } for i := 0; i < numRounds; i++ { for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] keeps := stringsComplement(set, cancels) e := NewEngine(context.Background(), bs) partner := testutil.RandPeerIDFatal(t) partnerWants(e, set, partner) partnerCancels(e, cancels, partner) if err := checkHandledInOrder(t, e, keeps); err != nil { t.Logf("run #%d of %d", i, numRounds) t.Fatal(err) } } } }
func TestProvidersSerialization(t *testing.T) { dstore := ds.NewMapDatastore() k := key.Key("my key!") p1 := peer.ID("peer one") p2 := peer.ID("peer two") pt1 := time.Now() pt2 := pt1.Add(time.Hour) err := writeProviderEntry(dstore, k, p1, pt1) if err != nil { t.Fatal(err) } err = writeProviderEntry(dstore, k, p2, pt2) if err != nil { t.Fatal(err) } pset, err := loadProvSet(dstore, k) if err != nil { t.Fatal(err) } lt1, ok := pset.set[p1] if !ok { t.Fatal("failed to load set correctly") } if pt1 != lt1 { t.Fatal("time wasnt serialized correctly") } lt2, ok := pset.set[p2] if !ok { t.Fatal("failed to load set correctly") } if pt2 != lt2 { t.Fatal("time wasnt serialized correctly") } }
func TestPutProviderDoesntResultInDuplicates(t *testing.T) { routingBackend := datastore.NewMapDatastore() k := key.Key("foo") put := []*dhtpb.Message_Peer{ convPeer("bob", "127.0.0.1/tcp/4001"), convPeer("alice", "10.0.0.10/tcp/4001"), } if err := putRoutingProviders(routingBackend, k, put); err != nil { t.Fatal(err) } if err := putRoutingProviders(routingBackend, k, put); err != nil { t.Fatal(err) } got, err := getRoutingProviders(routingBackend, k) if err != nil { t.Fatal(err) } if len(got) != 2 { t.Fatal("should be 2 values, but there are", len(got)) } }