func getMockDagServ(t testing.TB) mdag.DAGService { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) return mdag.NewDAGService(bserv) }
func TestGetWhenKeyIsEmptyString(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(key.Key("")) if err != ErrNotFound { t.Fail() } }
func TestRoutingResolve(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) serv := mockrouting.NewServer() id := testutil.RandIdentityOrFatal(t) d := serv.ClientWithDatastore(context.Background(), id, dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).B58String()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func TestAllKeysRespectsContext(t *testing.T) { N := 100 d := &queryTestDS{ds: ds.NewMapDatastore()} bs, _ := newBlockStoreWithKeys(t, d, N) started := make(chan struct{}, 1) done := make(chan struct{}, 1) errors := make(chan error, 100) getKeys := func(ctx context.Context) { started <- struct{}{} ch, err := bs.AllKeysChan(ctx) // once without cancelling if err != nil { errors <- err } _ = collect(ch) done <- struct{}{} errors <- nil // a nil one to signal break } var results dsq.Results var resultsmu = make(chan struct{}) resultChan := make(chan dsq.Result) d.SetFunc(func(q dsq.Query) (dsq.Results, error) { results = dsq.ResultsWithChan(q, resultChan) resultsmu <- struct{}{} return results, nil }) go getKeys(context.Background()) // make sure it's waiting. <-started <-resultsmu select { case <-done: t.Fatal("sync is wrong") case <-results.Process().Closing(): t.Fatal("should not be closing") case <-results.Process().Closed(): t.Fatal("should not be closed") default: } e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()} resultChan <- dsq.Result{Entry: e} // let it go. close(resultChan) <-done // should be done now. <-results.Process().Closed() // should be closed now // print any errors for err := range errors { if err == nil { break } t.Error(err) } }
func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) return bsnet.NewFromIpfsHost(client, routing) }
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) return dserv, bstore }
func TestGetWhenKeyNotPresent(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(key.Key("not present")) if err != nil { t.Log("As expected, block is not present") return } t.Fail() }
func getDagservAndPinner(t *testing.T) dagservAndPinner { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) dserv := NewDAGService(blockserv) mpin := pin.NewPinner(db, dserv) return dagservAndPinner{ ds: dserv, mp: mpin, } }
func TestValueTypeMismatch(t *testing.T) { block := blocks.NewBlock([]byte("some data")) datastore := ds.NewMapDatastore() k := BlockPrefix.Child(block.Key().DsKey()) datastore.Put(k, "data that isn't a block!") blockstore := NewBlockstore(ds_sync.MutexWrap(datastore)) _, err := blockstore.Get(block.Key()) if err != ValueTypeMismatch { t.Fatal(err) } }
func setupDHT(ctx context.Context, t *testing.T) *IpfsDHT { h := netutil.GenHostSwarm(t, ctx) dss := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, h, dss) d.Validator["v"] = &record.ValidChecker{ Func: func(key.Key, []byte) error { return nil }, Sign: false, } return d }
func TestPutThenGetBlock(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) block := blocks.NewBlock([]byte("some data")) err := bs.Put(block) if err != nil { t.Fatal(err) } blockFromBlockstore, err := bs.Get(block.Key()) if err != nil { t.Fatal(err) } if !bytes.Equal(block.Data(), blockFromBlockstore.Data()) { t.Fail() } }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() _, err := bs.GetBlock(context.Background(), key.Key("")) if err != ErrNotFound { t.Error("Empty String Key should error", err) } b := blocks.NewBlock([]byte("beep boop")) h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash(), h) { t.Error("Block Multihash and data multihash not equal") } if b.Key() != key.Key(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(b) if err != nil { t.Error("failed to add block to BlockService", err) return } if k != b.Key() { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, b.Key()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if b.Key() != b2.Key() { t.Error("Block keys not equal.") } if !bytes.Equal(b.Data(), b2.Data()) { t.Error("Block data is not equal.") } }
func newBlockStoreWithKeys(t *testing.T, d ds.Datastore, N int) (Blockstore, []key.Key) { if d == nil { d = ds.NewMapDatastore() } bs := NewBlockstore(ds_sync.MutexWrap(d)) keys := make([]key.Key, N) for i := 0; i < N; i++ { block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) err := bs.Put(block) if err != nil { t.Fatal(err) } keys[i] = block.Key() } return bs, keys }
func TestPutProviderDoesntResultInDuplicates(t *testing.T) { routingBackend := datastore.NewMapDatastore() k := key.Key("foo") put := []*dhtpb.Message_Peer{ convPeer("bob", "127.0.0.1/tcp/4001"), convPeer("alice", "10.0.0.10/tcp/4001"), } if err := putRoutingProviders(routingBackend, k, put); err != nil { t.Fatal(err) } if err := putRoutingProviders(routingBackend, k, put); err != nil { t.Fatal(err) } got, err := getRoutingProviders(routingBackend, k) if err != nil { t.Fatal(err) } if len(got) != 2 { t.Fatal("should be 2 values, but there are", len(got)) } }
func TestPrexistingRecord(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) d := mockrouting.NewServer().ClientWithDatastore(context.Background(), testutil.RandIdentityOrFatal(t), dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } id, err := peer.IDFromPublicKey(pubk) if err != nil { t.Fatal(err) } // Make a good record and put it in the datastore h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") eol := time.Now().Add(time.Hour) err = PutRecordToRouting(context.Background(), privk, h, 0, eol, d, id) if err != nil { t.Fatal(err) } // Now, with an old record in the system already, try and publish a new one err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } err = verifyCanResolve(resolver, id.Pretty(), h) if err != nil { t.Fatal(err) } }
func TestReprovide(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() mrserv := mock.NewServer() idA := testutil.RandIdentityOrFatal(t) idB := testutil.RandIdentityOrFatal(t) clA := mrserv.Client(idA) clB := mrserv.Client(idB) bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) blk := blocks.NewBlock([]byte("this is a test")) bstore.Put(blk) reprov := NewReprovider(clA, bstore) err := reprov.Reprovide(ctx) if err != nil { t.Fatal(err) } provs, err := clB.FindProviders(ctx, blk.Key()) if err != nil { t.Fatal(err) } if len(provs) == 0 { t.Fatal("Should have gotten a provider") } if provs[0].ID != idA.ID() { t.Fatal("Somehow got the wrong peer back as a provider.") } }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test to debug another") // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 6) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for i := 1; i < 5; i++ { d.Update(ctx, hosts[i].ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: pi := host.Peerstore().PeerInfo(hosts[1].ID()) resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []pstore.PeerInfo{pi}), } if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } ctx, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() if _, err := d.GetValue(ctx, key.Key("hello")); err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestNotFound(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 16) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for _, p := range hosts { d.Update(ctx, p.ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} ps := []pstore.PeerInfo{} for i := 0; i < 7; i++ { p := hosts[rand.Intn(len(hosts))].ID() pi := host.Peerstore().PeerInfo(p) ps = append(ps, pi) } resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps) if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } // long timeout to ensure timing is not at play. ctx, cancel := context.WithTimeout(ctx, time.Second*20) defer cancel() v, err := d.GetValue(ctx, key.Key("hello")) log.Debugf("get value got %v", v) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 2) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) d.Update(ctx, hosts[1].ID()) // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { s.Close() }) // This one should time out ctx1, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) if _, err := d.GetValue(ctx1, key.Key("test")); err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err != io.EOF { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } t.Log("Timeout test passed.") // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } resp := &pb.Message{ Type: pmes.Type, } if err := pbw.WriteMsg(resp); err != nil { panic(err) } }) // This one should fail with NotFound. // long context timeout to ensure we dont end too early. // the dht should be exhausting its query and returning not found. // (was 3 seconds before which should be _plenty_ of time, but maybe // travis machines really have a hard time...) ctx2, _ := context.WithTimeout(context.Background(), 20*time.Second) _, err = d.GetValue(ctx2, key.Key("test")) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } t.Log("ErrNotFound check passed!") // Now we test this DHT's handleGetValue failure { typ := pb.Message_GET_VALUE str := "hello" sk, err := d.getOwnPrivateKey() if err != nil { t.Fatal(err) } rec, err := record.MakePutRecord(sk, key.Key(str), []byte("blah"), true) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } s, err := hosts[1].NewStream(context.Background(), ProtocolDHT, hosts[0].ID()) if err != nil { t.Fatal(err) } defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(&req); err != nil { t.Fatal(err) } pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } } }
func (rs *mocknetserver) Client(p testutil.Identity) Client { return rs.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) }
func Mock() dag.DAGService { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bserv := bsrv.New(bstore, offline.Exchange(bstore)) return dag.NewDAGService(bserv) }
func getDagserv(t *testing.T) dag.DAGService { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) return dag.NewDAGService(blockserv) }
func bstore() blockstore.Blockstore { return blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) }