func (ks *DSSuite) TestBasic(c *C) { pair := &kt.Pair{ Convert: func(k ds.Key) ds.Key { return ds.NewKey("/abc").Child(k) }, Invert: func(k ds.Key) ds.Key { // remove abc prefix l := k.List() if l[0] != "abc" { panic("key does not have prefix. convert failed?") } return ds.KeyWithNamespaces(l[1:]) }, } mpds := ds.NewMapDatastore() ktds := kt.Wrap(mpds, pair) keys := strsToKeys([]string{ "foo", "foo/bar", "foo/bar/baz", "foo/barb", "foo/bar/bazb", "foo/bar/baz/barb", }) for _, k := range keys { err := ktds.Put(k, []byte(k.String())) c.Check(err, Equals, nil) } for _, k := range keys { v1, err := ktds.Get(k) c.Check(err, Equals, nil) c.Check(bytes.Equal(v1.([]byte), []byte(k.String())), Equals, true) v2, err := mpds.Get(ds.NewKey("abc").Child(k)) c.Check(err, Equals, nil) c.Check(bytes.Equal(v2.([]byte), []byte(k.String())), Equals, true) } listA, errA := mpds.KeyList() listB, errB := ktds.KeyList() c.Check(errA, Equals, nil) c.Check(errB, Equals, nil) c.Check(len(listA), Equals, len(listB)) // sort them cause yeah. sort.Sort(ds.KeySlice(listA)) sort.Sort(ds.KeySlice(listB)) for i, kA := range listA { kB := listB[i] c.Check(pair.Invert(kA), Equals, kB) c.Check(kA, Equals, pair.Convert(kB)) } }
func getMockDagServ(t *testing.T) mdag.DAGService { dstore := ds.NewMapDatastore() bserv, err := bs.NewBlockService(dstore, nil) if err != nil { t.Fatal(err) } return mdag.NewDAGService(bserv) }
func GetDAGServ(t testing.TB) dag.DAGService { dstore := ds.NewMapDatastore() bserv, err := bsrv.NewBlockService(dstore, nil) if err != nil { t.Fatal(err) } return dag.NewDAGService(bserv) }
func TestGetWhenKeyNotPresent(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(u.Key("not present")) if err != nil { t.Log("As expected, block is not present") return } t.Fail() }
func TestValueTypeMismatch(t *testing.T) { block := blocks.NewBlock([]byte("some data")) datastore := ds.NewMapDatastore() datastore.Put(block.Key().DsKey(), "data that isn't a block!") blockstore := NewBlockstore(ds_sync.MutexWrap(datastore)) _, err := blockstore.Get(block.Key()) if err != ValueTypeMismatch { t.Fatal(err) } }
func TestPutThenGetBlock(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) block := blocks.NewBlock([]byte("some data")) err := bs.Put(block) if err != nil { t.Fatal(err) } blockFromBlockstore, err := bs.Get(block.Key()) if err != nil { t.Fatal(err) } if !bytes.Equal(block.Data, blockFromBlockstore.Data) { t.Fail() } }
// NewMockNode constructs an IpfsNode for use in tests. func NewMockNode() (*IpfsNode, error) { nd := new(IpfsNode) // Generate Identity sk, pk, err := ci.GenerateKeyPair(ci.RSA, 1024) if err != nil { return nil, err } p, err := peer.WithKeyPair(sk, pk) if err != nil { return nil, err } nd.Peerstore = peer.NewPeerstore() nd.Identity, err = nd.Peerstore.Add(p) if err != nil { return nil, err } // Temp Datastore dstore := ds.NewMapDatastore() nd.Datastore = util.CloserWrap(syncds.MutexWrap(dstore)) // Routing dht := mdht.NewMockRouter(nd.Identity, nd.Datastore) nd.Routing = dht // Bitswap //?? bserv, err := bs.NewBlockService(nd.Datastore, nil) if err != nil { return nil, err } nd.DAG = mdag.NewDAGService(bserv) // Namespace resolver nd.Namesys = nsys.NewNameSystem(dht) // Path resolver nd.Resolver = &path.Resolver{DAG: nd.DAG} return nd, nil }
func (ks *DSSuite) TestBasic(c *C) { mpds := ds.NewMapDatastore() nsds := ns.Wrap(mpds, ds.NewKey("abc")) keys := strsToKeys([]string{ "foo", "foo/bar", "foo/bar/baz", "foo/barb", "foo/bar/bazb", "foo/bar/baz/barb", }) for _, k := range keys { err := nsds.Put(k, []byte(k.String())) c.Check(err, Equals, nil) } for _, k := range keys { v1, err := nsds.Get(k) c.Check(err, Equals, nil) c.Check(bytes.Equal(v1.([]byte), []byte(k.String())), Equals, true) v2, err := mpds.Get(ds.NewKey("abc").Child(k)) c.Check(err, Equals, nil) c.Check(bytes.Equal(v2.([]byte), []byte(k.String())), Equals, true) } listA, errA := mpds.KeyList() listB, errB := nsds.KeyList() c.Check(errA, Equals, nil) c.Check(errB, Equals, nil) c.Check(len(listA), Equals, len(listB)) // sort them cause yeah. sort.Sort(ds.KeySlice(listA)) sort.Sort(ds.KeySlice(listB)) for i, kA := range listA { kB := listB[i] c.Check(nsds.InvertKey(kA), Equals, kB) c.Check(kA, Equals, nsds.ConvertKey(kB)) } }
func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT { peerstore := peer.NewPeerstore() dhts := netservice.NewService(ctx, nil) // nil handler for now, need to patch it net, err := inet.NewIpfsNetwork(ctx, p.Addresses(), p, peerstore, &mux.ProtocolMap{ mux.ProtocolID_Routing: dhts, }) if err != nil { t.Fatal(err) } d := NewDHT(ctx, p, peerstore, net, dhts, ds.NewMapDatastore()) dhts.SetHandler(d) d.Validators["v"] = func(u.Key, []byte) error { return nil } return d }
func TestBlocks(t *testing.T) { d := ds.NewMapDatastore() bs, err := NewBlockService(d, nil) if err != nil { t.Error("failed to construct block service", err) return } b := blocks.NewBlock([]byte("beep boop")) h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash, h) { t.Error("Block Multihash and data multihash not equal") } if b.Key() != u.Key(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(b) if err != nil { t.Error("failed to add block to BlockService", err) return } if k != b.Key() { t.Error("returned key is not equal to block key", err) } ctx, _ := context.WithTimeout(context.TODO(), time.Second*5) b2, err := bs.GetBlock(ctx, b.Key()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if b.Key() != b2.Key() { t.Error("Block keys not equal.") } if !bytes.Equal(b.Data, b2.Data) { t.Error("Block data is not equal.") } }
func Example() { mp := ds.NewMapDatastore() ns := nsds.Wrap(mp, ds.NewKey("/foo/bar")) k := ds.NewKey("/beep") v := "boop" ns.Put(k, v) fmt.Printf("ns.Put %s %s\n", k, v) v2, _ := ns.Get(k) fmt.Printf("ns.Get %s -> %s\n", k, v2) k3 := ds.NewKey("/foo/bar/beep") v3, _ := mp.Get(k3) fmt.Printf("mp.Get %s -> %s\n", k3, v3) // Output: // ns.Put /beep boop // ns.Get /beep -> boop // mp.Get /foo/bar/beep -> boop }
func TestRoutingResolve(t *testing.T) { local := peer.WithIDString("testID") lds := ds.NewMapDatastore() d := mock.NewMockRouter(local, lds) resolver := NewRoutingResolver(d) publisher := NewRoutingPublisher(d) privk, pubk, err := ci.GenerateKeyPair(ci.RSA, 512) if err != nil { t.Fatal(err) } err = publisher.Publish(privk, "Hello") if err == nil { t.Fatal("should have errored out when publishing a non-multihash val") } h := u.Key(u.Hash([]byte("Hello"))).Pretty() err = publisher.Publish(privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(u.Key(pkhash).Pretty()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
// session creates a test bitswap session. // // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { p := peer.WithID(id) adapter := net.Adapter(p) htc := rs.Client(p) blockstore := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) const alwaysSendToPeer = true bs := &bitswap{ blockstore: blockstore, notifications: notifications.New(), strategy: strategy.New(alwaysSendToPeer), routing: htc, sender: adapter, wantlist: util.NewKeySet(), } adapter.SetDelegate(bs) return instance{ peer: p, exchange: bs, blockstore: blockstore, } }
func makeDatastore(cfg config.Datastore) (u.ThreadSafeDatastoreCloser, error) { if len(cfg.Type) == 0 { return nil, debugerror.Errorf("config datastore.type required") } switch cfg.Type { case "leveldb": return makeLevelDBDatastore(cfg) case "memory": return u.CloserWrap(syncds.MutexWrap(ds.NewMapDatastore())), nil case "fs": log.Warning("using fs.Datastore at .datastore for testing.") d, err := fsds.NewDatastore(".datastore") // for testing!! if err != nil { return nil, err } ktd := ktds.Wrap(d, u.B58KeyConverter) return u.CloserWrap(syncds.MutexWrap(ktd)), nil } return nil, debugerror.Errorf("Unknown datastore type: %s", cfg.Type) }
// NewPeerstore creates a threadsafe collection of peers. func NewPeerstore() Peerstore { return &peerstore{ peers: ds.NewMapDatastore(), } }
func TestPinnerBasic(t *testing.T) { dstore := ds.NewMapDatastore() bserv, err := bs.NewBlockService(dstore, nil) if err != nil { t.Fatal(err) } dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv) a, ak := randNode() // Pin A{} err = p.Pin(a, false) if err != nil { t.Fatal(err) } if !p.IsPinned(ak) { t.Fatal("Failed to find key") } b, _ := randNode() err = b.AddNodeLink("child", a) if err != nil { t.Fatal(err) } c, ck := randNode() err = b.AddNodeLink("otherchild", c) if err != nil { t.Fatal(err) } // recursively pin B{A,C} err = p.Pin(b, true) if err != nil { t.Fatal(err) } if !p.IsPinned(ck) { t.Fatal("Child of recursively pinned node not found") } bk, _ := b.Key() if !p.IsPinned(bk) { t.Fatal("Recursively pinned node not found..") } d, _ := randNode() d.AddNodeLink("a", a) d.AddNodeLink("c", c) e, ek := randNode() d.AddNodeLink("e", e) // Must be in dagserv for unpin to work err = dserv.AddRecursive(d) if err != nil { t.Fatal(err) } // Add D{A,C,E} err = p.Pin(d, true) if err != nil { t.Fatal(err) } if !p.IsPinned(ek) { t.Fatal(err) } dk, _ := d.Key() if !p.IsPinned(dk) { t.Fatal("pinned node not found.") } // Test recursive unpin err = p.Unpin(dk, true) if err != nil { t.Fatal(err) } // c should still be pinned under b if !p.IsPinned(ck) { t.Fatal("Recursive / indirect unpin fail.") } err = p.Flush() if err != nil { t.Fatal(err) } np, err := LoadPinner(dstore, dserv) if err != nil { t.Fatal(err) } // Test directly pinned if !np.IsPinned(ak) { t.Fatal("Could not find pinned node!") } // Test indirectly pinned if !np.IsPinned(ck) { t.Fatal("could not find indirectly pinned node") } // Test recursively pinned if !np.IsPinned(bk) { t.Fatal("could not find recursively pinned node") } }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() fn := &fauxNet{} fs := &fauxSender{} peerstore := peer.NewPeerstore() local := makePeer(nil) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) other := makePeer(nil) d.Update(ctx, other) // This one should time out // u.POut("Timout Test\n") ctx1, _ := context.WithTimeout(context.Background(), time.Second) _, err := d.GetValue(ctx1, u.Key("test")) if err != nil { if err != context.DeadlineExceeded { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } // u.POut("NotFound Test\n") // Reply with failures to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } resp := &pb.Message{ Type: pmes.Type, } m, err := msg.FromObject(mes.Peer(), resp) return m }) // This one should fail with NotFound ctx2, _ := context.WithTimeout(context.Background(), time.Second) _, err = d.GetValue(ctx2, u.Key("test")) if err != nil { if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } fs.handlers = nil // Now we test this DHT's handleGetValue failure typ := pb.Message_GET_VALUE str := "hello" rec, err := d.makePutRecord(u.Key(str), []byte("blah")) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } // u.POut("handleGetValue Test\n") mes, err := msg.FromObject(other, &req) if err != nil { t.Error(err) } mes = d.HandleMessage(ctx, mes) pmes := new(pb.Message) err = proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } }
func TestNotFound(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() fn := &fauxNet{} fs := &fauxSender{} local := makePeer(nil) peerstore := peer.NewPeerstore() peerstore.Add(local) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) var ps []peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ctx, ps[i]) } // Reply with random peers to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} peers := []peer.Peer{} for i := 0; i < 7; i++ { peers = append(peers, _randPeer()) } resp.CloserPeers = pb.PeersToPBPeers(peers) mes, err := msg.FromObject(mes.Peer(), resp) if err != nil { t.Error(err) } return mes default: panic("Shouldnt recieve this.") } }) ctx, _ = context.WithTimeout(ctx, time.Second*5) v, err := d.GetValue(ctx, u.Key("hello")) log.Debugf("get value got %v", v) if err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() u.Debug = false fn := &fauxNet{} fs := &fauxSender{} local := makePeer(nil) peerstore := peer.NewPeerstore() peerstore.Add(local) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) var ps []peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ctx, ps[i]) } other := _randPeer() // Reply with random peers to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeersToPBPeers([]peer.Peer{other}), } mes, err := msg.FromObject(mes.Peer(), resp) if err != nil { t.Error(err) } return mes default: panic("Shouldnt recieve this.") } }) ctx, _ = context.WithTimeout(ctx, time.Second*30) _, err := d.GetValue(ctx, u.Key("hello")) if err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }