func TestValidAfter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() pi := testutil.RandIdentityOrFatal(t) key := cid.NewCidV0(u.Hash([]byte("mock key"))) conf := DelayConfig{ ValueVisibility: delay.Fixed(1 * time.Hour), Query: delay.Fixed(0), } rs := NewServerWithDelay(conf) rs.Client(pi).Provide(ctx, key) var providers []pstore.PeerInfo providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } if len(providers) > 0 { t.Fail() } conf.ValueVisibility.Set(0) providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } t.Log("providers", providers) if len(providers) != 1 { t.Fail() } }
func TestValidAfter(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) var key = key.Key("mock key") var ctx = context.Background() conf := DelayConfig{ ValueVisibility: delay.Fixed(1 * time.Hour), Query: delay.Fixed(0), } rs := NewServerWithDelay(conf) rs.Client(pi).Provide(ctx, key) var providers []peer.PeerInfo providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } if len(providers) > 0 { t.Fail() } conf.ValueVisibility.Set(0) providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } t.Log("providers", providers) if len(providers) != 1 { t.Fail() } }
func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") instances := sg.Instances(2) blocks := bg.Blocks(1) err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) if err != nil { t.Fatal(err) } t.Log(blk) for _, inst := range instances { err := inst.Exchange.Close() if err != nil { t.Fatal(err) } } }
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) g := NewTestSessionGenerator(net) defer g.Close() peers := g.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() if err := hasBlock.Exchange.HasBlock(block); err != nil { t.Fatal(err) } wantsBlock := peers[1] defer wantsBlock.Exchange.Close() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") } if !bytes.Equal(block.Data, received.Data) { t.Fatal("Data doesn't match") } }
// session creates a test bitswap session. // // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) const bloomSize = 512 const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) bstore, err := blockstore.CachedBlockstore(blockstore.NewBlockstore( ds_sync.MutexWrap(dstore)), ctx, blockstore.DefaultCacheOpts()) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } const alwaysSendToPeer = true bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer).(*Bitswap) return Instance{ Peer: p.ID(), Exchange: bs, blockstore: bstore, blockstoreDelay: bsdelay, } }
func MocknetTestRepo(p peer.ID, h host.Host, conf testutil.LatencyConfig, routing core.RoutingOption) core.ConfigOption { return func(ctx context.Context) (*core.IpfsNode, error) { const kWriteCacheElems = 100 const alwaysSendToPeer = true dsDelay := delay.Fixed(conf.BlockstoreLatency) r := &repo.Mock{ D: ds2.CloserWrap(syncds.MutexWrap(ds2.WithDelay(datastore.NewMapDatastore(), dsDelay))), } ds := r.Datastore() n := &core.IpfsNode{ Peerstore: h.Peerstore(), Repo: r, PeerHost: h, Identity: p, } dhtt, err := routing(ctx, n.PeerHost, n.Repo.Datastore()) if err != nil { return nil, err } bsn := bsnet.NewFromIpfsHost(h, dhtt) bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds), kWriteCacheElems) if err != nil { return nil, err } exch := bitswap.New(ctx, p, bsn, bstore, alwaysSendToPeer) n.Blockstore = bstore n.Exchange = exch n.Routing = dhtt return n, nil } }
// Converts config field to delay func convertTimeField(field string) delay.D { val, err := strconv.Atoi(config[field]) if err != nil { log.Fatalf("Invalid value for %s.", field) } return delay.Fixed(time.Duration(val) * time.Millisecond) }
func TestDoubleGet(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") instances := sg.Instances(2) blocks := bg.Blocks(1) ctx1, cancel1 := context.WithCancel(context.Background()) blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } ctx2, cancel2 := context.WithCancel(context.Background()) defer cancel2() blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } // ensure both requests make it into the wantlist at the same time time.Sleep(time.Millisecond * 100) cancel1() _, ok := <-blkch1 if ok { t.Fatal("expected channel to be closed") } err = instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) } select { case blk, ok := <-blkch2: if !ok { t.Fatal("expected to get the block here") } t.Log(blk) case <-time.After(time.Second * 5): t.Fatal("timed out waiting on block") } for _, inst := range instances { err := inst.Exchange.Close() if err != nil { t.Fatal(err) } } }
func TestEmptyKey(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bs := sg.Instances(1)[0].Exchange _, err := bs.GetBlock(context.Background(), key.Key("")) if err != blockstore.ErrNotFound { t.Error("empty str key should return ErrNotFound") } }
func TestClose(t *testing.T) { vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sesgen := NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() block := bgen.Next() bitswap := sesgen.Next() bitswap.Exchange.Close() bitswap.Exchange.GetBlock(context.Background(), block.Key()) }
// Mocks returns |n| connected mock Blockservices func Mocks(n int) []BlockService { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) sg := bitswap.NewTestSessionGenerator(net) instances := sg.Instances(n) var servs []BlockService for _, i := range instances { servs = append(servs, New(i.Blockstore(), i.Exchange)) } return servs }
// TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { if testing.Short() { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() prev := rebroadcastDelay.Set(time.Second / 2) defer func() { rebroadcastDelay.Set(prev) }() peers := sg.Instances(2) peerA := peers[0] peerB := peers[1] t.Logf("Session %v\n", peerA.Peer) t.Logf("Session %v\n", peerB.Peer) timeout := time.Second waitTime := time.Second * 5 alpha := bg.Next() // peerA requests and waits for block alpha ctx, cancel := context.WithTimeout(context.Background(), waitTime) defer cancel() alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) if err != nil { t.Fatal(err) } // peerB announces to the network that he has block alpha ctx, cancel = context.WithTimeout(context.Background(), timeout) defer cancel() err = peerB.Exchange.HasBlock(ctx, alpha) if err != nil { t.Fatal(err) } // At some point, peerA should get alpha (or timeout) blkrecvd, ok := <-alphaPromise if !ok { t.Fatal("context timed out and broke promise channel!") } if blkrecvd.Key() != alpha.Key() { t.Fatal("Wrong block!") } }
func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) responderPeer := testutil.RandIdentityOrFatal(t) waiter := net.Adapter(testutil.RandIdentityOrFatal(t)) responder := net.Adapter(responderPeer) var wg sync.WaitGroup wg.Add(1) expectedStr := "received async" responder.SetDelegate(lambda(func( ctx context.Context, fromWaiter peer.ID, msgFromWaiter bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New(true) msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) waiter.SendMessage(ctx, fromWaiter, msgToWaiter) })) waiter.SetDelegate(lambda(func( ctx context.Context, fromResponder peer.ID, msgFromResponder bsmsg.BitSwapMessage) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false for _, b := range msgFromResponder.Blocks() { if string(b.Data) == expectedStr { wg.Done() ok = true } } if !ok { t.Fatal("Message not received from the responder") } })) messageSentAsync := bsmsg.New(true) messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( context.Background(), responderPeer.ID(), messageSentAsync) if errSending != nil { t.Fatal(errSending) } wg.Wait() // until waiter delegate function is executed }
func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { ctx := context.Background() if testing.Short() { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() instances := sg.Instances(numInstances) blocks := bg.Blocks(numBlocks) t.Log("Give the blocks to the first instance") var blkeys []key.Key first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(ctx, b) } t.Log("Distribute!") wg := sync.WaitGroup{} for _, inst := range instances[1:] { wg.Add(1) go func(inst Instance) { defer wg.Done() outch, err := inst.Exchange.GetBlocks(ctx, blkeys) if err != nil { t.Fatal(err) } for _ = range outch { } }(inst) } wg.Wait() t.Log("Verify!") for _, inst := range instances { for _, b := range blocks { if _, err := inst.Blockstore().Get(b.Key()); err != nil { t.Fatal(err) } } } }
// Mocks returns |n| connected mock Blockservices func Mocks(t fataler, n int) []*BlockService { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) sg := bitswap.NewTestSessionGenerator(net) instances := sg.Instances(n) var servs []*BlockService for _, i := range instances { bserv, err := New(i.Blockstore(), i.Exchange) if err != nil { t.Fatal(err) } servs = append(servs, bserv) } return servs }
func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this rs := mockrouting.NewServer() net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) g := NewTestSessionGenerator(net) defer g.Close() block := blocks.NewBlock([]byte("block")) pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() defer solo.Exchange.Close() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) _, err := solo.Exchange.GetBlock(ctx, block.Key()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") } }
func BenchmarkWithConfig(c worker.Config) func(b *testing.B) { return func(b *testing.B) { routingDelay := delay.Fixed(0) // during setup dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), routingDelay)) bstore := blockstore.NewBlockstore(dstore) var testdata []*blocks.Block var i int64 for i = 0; i < kBlocksPerOp; i++ { testdata = append(testdata, blocks.NewBlock([]byte(string(i)))) } b.ResetTimer() b.SetBytes(kBlocksPerOp) for i := 0; i < b.N; i++ { b.StopTimer() w := worker.NewWorker(offline.Exchange(bstore), c) b.StartTimer() prev := routingDelay.Set(kEstRoutingDelay) // during measured section for _, block := range testdata { if err := w.HasBlock(block); err != nil { b.Fatal(err) } } routingDelay.Set(prev) // to hasten the unmeasured close period b.StopTimer() w.Close() b.StartTimer() } } }
func getVirtualNetwork() tn.Network { return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) }
var ( HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 512 ) func init() { if flags.LowMemMode { HasBlockBufferSize = 64 provideKeysBufferSize = 512 provideWorkerMax = 16 } } var rebroadcastDelay = delay.Fixed(time.Minute) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the ipfs daemon in this way. // // FIXME(btc) Now that bitswap manages itself using a process, it probably // shouldn't accept a context anymore. Clients should probably use Close() // exclusively. We should probably find another way to share logging data
// NewServer returns a mockrouting Server func NewServer() Server { return NewServerWithDelay(DelayConfig{ ValueVisibility: delay.Fixed(0), Query: delay.Fixed(0), }) }
// results. // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 hasBlockTimeout = time.Second * 15 provideTimeout = time.Second * 15 sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 512 ) var rebroadcastDelay = delay.Fixed(time.Second * 10) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the IPFS daemon in this way. // // FIXME(btc) Now that bitswap manages itself using a process, it probably // shouldn't accept a context anymore. Clients should probably use Close() // exclusively. We should probably find another way to share logging data
func TestWantlistCleanup(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() instances := sg.Instances(1)[0] bswap := instances.Exchange blocks := bg.Blocks(20) var keys []*cid.Cid for _, b := range blocks { keys = append(keys, b.Cid()) } ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() _, err := bswap.GetBlock(ctx, keys[0]) if err != context.DeadlineExceeded { t.Fatal("shouldnt have fetched any blocks") } time.Sleep(time.Millisecond * 50) if len(bswap.GetWantlist()) > 0 { t.Fatal("should not have anyting in wantlist") } ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() _, err = bswap.GetBlocks(ctx, keys[:10]) if err != nil { t.Fatal(err) } <-ctx.Done() time.Sleep(time.Millisecond * 50) if len(bswap.GetWantlist()) > 0 { t.Fatal("should not have anyting in wantlist") } _, err = bswap.GetBlocks(context.Background(), keys[:1]) if err != nil { t.Fatal(err) } ctx, cancel = context.WithCancel(context.Background()) _, err = bswap.GetBlocks(ctx, keys[10:]) if err != nil { t.Fatal(err) } time.Sleep(time.Millisecond * 50) if len(bswap.GetWantlist()) != 11 { t.Fatal("should have 11 keys in wantlist") } cancel() time.Sleep(time.Millisecond * 50) if !(len(bswap.GetWantlist()) == 1 && bswap.GetWantlist()[0] == keys[0]) { t.Fatal("should only have keys[0] in wantlist") } }
func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { ctx := context.Background() if testing.Short() { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() instances := sg.Instances(numInstances) blocks := bg.Blocks(numBlocks) t.Log("Give the blocks to the first instance") nump := len(instances) - 1 // assert we're properly connected for _, inst := range instances { peers := inst.Exchange.wm.ConnectedPeers() for i := 0; i < 10 && len(peers) != nump; i++ { time.Sleep(time.Millisecond * 50) peers = inst.Exchange.wm.ConnectedPeers() } if len(peers) != nump { t.Fatal("not enough peers connected to instance") } } var blkeys []key.Key first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(b) } t.Log("Distribute!") wg := sync.WaitGroup{} errs := make(chan error) for _, inst := range instances[1:] { wg.Add(1) go func(inst Instance) { defer wg.Done() outch, err := inst.Exchange.GetBlocks(ctx, blkeys) if err != nil { errs <- err } for _ = range outch { } }(inst) } go func() { wg.Wait() close(errs) }() for err := range errs { if err != nil { t.Fatal(err) } } t.Log("Verify!") for _, inst := range instances { for _, b := range blocks { if _, err := inst.Blockstore().Get(b.Key()); err != nil { t.Fatal(err) } } } }