func TestPushPop(t *testing.T) { prq := newPRQ() partner := testutil.RandPeerIDFatal(t) alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") consonants := func() []string { var out []string for _, letter := range alphabet { skip := false for _, vowel := range vowels { if letter == vowel { skip = true } } if !skip { out = append(out, letter) } } return out }() sort.Strings(alphabet) sort.Strings(vowels) sort.Strings(consonants) // add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters letter := alphabet[index] t.Log(partner.String()) c := cid.NewCidV0(u.Hash([]byte(letter))) prq.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}, partner) } for _, consonant := range consonants { c := cid.NewCidV0(u.Hash([]byte(consonant))) prq.Remove(c, partner) } prq.fullThaw() var out []string for { received := prq.Pop() if received == nil { break } out = append(out, received.Entry.Cid.String()) } // Entries popped should already be in correct order for i, expected := range vowels { exp := cid.NewCidV0(u.Hash([]byte(expected))).String() if out[i] != exp { t.Fatal("received", out[i], "expected", expected) } } }
func TestClientOverMax(t *testing.T) { rs := NewServer() k := cid.NewCidV0(u.Hash([]byte("hello"))) numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { pi := testutil.RandIdentityOrFatal(t) err := rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Fatal(err) } } max := 10 pi := testutil.RandIdentityOrFatal(t) client := rs.Client(pi) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { i++ } if i != max { t.Fatal("Too many providers returned") } }
func TestClientFindProviders(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) rs := NewServer() client := rs.Client(pi) k := cid.NewCidV0(u.Hash([]byte("hello"))) err := client.Provide(context.Background(), k) if err != nil { t.Fatal(err) } // This is bad... but simulating networks is hard time.Sleep(time.Millisecond * 300) max := 100 providersFromClient := client.FindProvidersAsync(context.Background(), k, max) isInClient := false for pi := range providersFromClient { if pi.ID == pi.ID { isInClient = true } } if !isInClient { t.Fatal("Despite client providing key, client didn't receive peer when finding providers") } }
func TestValidAfter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() pi := testutil.RandIdentityOrFatal(t) key := cid.NewCidV0(u.Hash([]byte("mock key"))) conf := DelayConfig{ ValueVisibility: delay.Fixed(1 * time.Hour), Query: delay.Fixed(0), } rs := NewServerWithDelay(conf) rs.Client(pi).Provide(ctx, key) var providers []pstore.PeerInfo providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } if len(providers) > 0 { t.Fail() } conf.ValueVisibility.Set(0) providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } t.Log("providers", providers) if len(providers) != 1 { t.Fail() } }
func TestRoutingResolve(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) serv := mockrouting.NewServer() id := testutil.RandIdentityOrFatal(t) d := serv.ClientWithDatastore(context.Background(), id, dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).B58String()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func NewRawNode(data []byte) *RawNode { h := u.Hash(data) c := cid.NewCidV1(cid.Raw, h) blk, _ := blocks.NewBlockWithCid(data, c) return &RawNode{blk} }
// RandPeerID generates random "valid" peer IDs. it does not NEED to generate // keys because it is as if we lost the key right away. fine to read randomness // and hash it. to generate proper keys and corresponding PeerID, use: // sk, pk, _ := testutil.RandKeyPair() // id, _ := peer.IDFromPublicKey(pk) func RandPeerID() (peer.ID, error) { buf := make([]byte, 16) if _, err := io.ReadFull(u.NewTimeSeededRand(), buf); err != nil { return "", err } h := u.Hash(buf) return peer.ID(h), nil }
func TestBlockReturnsErr(t *testing.T) { off := Exchange(bstore()) c := cid.NewCidV0(u.Hash([]byte("foo"))) _, err := off.GetBlock(context.Background(), c) if err != nil { return // as desired } t.Fail() }
// TODO does dht ensure won't receive self as a provider? probably not. func TestCanceledContext(t *testing.T) { rs := NewServer() k := cid.NewCidV0(u.Hash([]byte("hello"))) // avoid leaking goroutine, without using the context to signal // (we want the goroutine to keep trying to publish on a // cancelled context until we've tested it doesnt do anything.) done := make(chan struct{}) defer func() { done <- struct{}{} }() t.Log("async'ly announce infinite stream of providers for key") i := 0 go func() { // infinite stream for { select { case <-done: t.Log("exiting async worker") return default: } pi, err := testutil.RandIdentity() if err != nil { t.Error(err) } err = rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Error(err) } i++ } }() local := testutil.RandIdentityOrFatal(t) client := rs.Client(local) t.Log("warning: max is finite so this test is non-deterministic") t.Log("context cancellation could simply take lower priority") t.Log("and result in receiving the max number of results") max := 1000 t.Log("cancel the context before consuming") ctx, cancelFunc := context.WithCancel(context.Background()) cancelFunc() providers := client.FindProvidersAsync(ctx, k, max) numProvidersReturned := 0 for _ = range providers { numProvidersReturned++ } t.Log(numProvidersReturned) if numProvidersReturned == max { t.Fatal("Context cancel had no effect") } }
// This test checks that peers wont starve out other peers func TestPeerRepeats(t *testing.T) { prq := newPRQ() a := testutil.RandPeerIDFatal(t) b := testutil.RandPeerIDFatal(t) c := testutil.RandPeerIDFatal(t) d := testutil.RandPeerIDFatal(t) // Have each push some blocks for i := 0; i < 5; i++ { elcid := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) prq.Push(&wantlist.Entry{Cid: elcid}, a) prq.Push(&wantlist.Entry{Cid: elcid}, b) prq.Push(&wantlist.Entry{Cid: elcid}, c) prq.Push(&wantlist.Entry{Cid: elcid}, d) } // now, pop off four entries, there should be one from each var targets []string var tasks []*peerRequestTask for i := 0; i < 4; i++ { t := prq.Pop() targets = append(targets, t.Target.Pretty()) tasks = append(tasks, t) } expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()} sort.Strings(expected) sort.Strings(targets) t.Log(targets) t.Log(expected) for i, s := range targets { if expected[i] != s { t.Fatal("unexpected peer", s, expected[i]) } } // Now, if one of the tasks gets finished, the next task off the queue should // be for the same peer for blockI := 0; blockI < 4; blockI++ { for i := 0; i < 4; i++ { // its okay to mark the same task done multiple times here (JUST FOR TESTING) tasks[i].Done() ntask := prq.Pop() if ntask.Target != tasks[i].Target { t.Fatal("Expected task from peer with lowest active count") } } } }
func TestKeyNotFound(t *testing.T) { var pi = testutil.RandIdentityOrFatal(t) var key = cid.NewCidV0(u.Hash([]byte("mock key"))) var ctx = context.Background() rs := NewServer() providers := rs.Client(pi).FindProvidersAsync(ctx, key, 10) _, ok := <-providers if ok { t.Fatal("should be closed") } }
// FWIW: At the time of this commit, including a timestamp in task increases // time cost of Push by 3%. func BenchmarkTaskQueuePush(b *testing.B) { q := newPRQ() peers := []peer.ID{ testutil.RandPeerIDFatal(b), testutil.RandPeerIDFatal(b), testutil.RandPeerIDFatal(b), } b.ResetTimer() for i := 0; i < b.N; i++ { c := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) q.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32}, peers[i%len(peers)]) } }
// EncodeProtobuf returns the encoded raw data version of a Node instance. // It may use a cached encoded version, unless the force flag is given. func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) { sort.Stable(LinkSlice(n.links)) // keep links sorted if n.encoded == nil || force { n.cached = nil var err error n.encoded, err = n.Marshal() if err != nil { return nil, err } } if n.cached == nil { n.cached = cid.NewCidV0(u.Hash(n.encoded)) } return n.encoded, nil }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() o := newObject([]byte("beep boop")) h := cid.NewCidV0(u.Hash([]byte("beep boop"))) if !o.Cid().Equals(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(o) if err != nil { t.Error("failed to add block to BlockService", err) return } if !k.Equals(o.Cid()) { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, o.Cid()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if !o.Cid().Equals(b2.Cid()) { t.Error("Block keys not equal.") } if !bytes.Equal(o.RawData(), b2.RawData()) { t.Error("Block data is not equal.") } }
// NewBlock creates a Block object from opaque data. It will hash the data. func NewBlock(data []byte) *BasicBlock { // TODO: fix assumptions return &BasicBlock{data: data, cid: cid.NewCidV0(u.Hash(data))} }
func mkFakeCid(s string) *cid.Cid { return cid.NewCidV0(u.Hash([]byte(s))) }