// This test checks that peers wont starve out other peers func TestPeerRepeats(t *testing.T) { prq := newPRQ() a := testutil.RandPeerIDFatal(t) b := testutil.RandPeerIDFatal(t) c := testutil.RandPeerIDFatal(t) d := testutil.RandPeerIDFatal(t) // Have each push some blocks for i := 0; i < 5; i++ { prq.Push(wantlist.Entry{Key: key.Key(i)}, a) prq.Push(wantlist.Entry{Key: key.Key(i)}, b) prq.Push(wantlist.Entry{Key: key.Key(i)}, c) prq.Push(wantlist.Entry{Key: key.Key(i)}, d) } // now, pop off four entries, there should be one from each var targets []string var tasks []*peerRequestTask for i := 0; i < 4; i++ { t := prq.Pop() targets = append(targets, t.Target.Pretty()) tasks = append(tasks, t) } expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()} sort.Strings(expected) sort.Strings(targets) t.Log(targets) t.Log(expected) for i, s := range targets { if expected[i] != s { t.Fatal("unexpected peer", s, expected[i]) } } // Now, if one of the tasks gets finished, the next task off the queue should // be for the same peer for blockI := 0; blockI < 4; blockI++ { for i := 0; i < 4; i++ { // its okay to mark the same task done multiple times here (JUST FOR TESTING) tasks[i].Done() ntask := prq.Pop() if ntask.Target != tasks[i].Target { t.Fatal("Expected task from peer with lowest active count") } } } }
func TestDialBadAddrs(t *testing.T) { m := func(s string) ma.Multiaddr { maddr, err := ma.NewMultiaddr(s) if err != nil { t.Fatal(err) } return maddr } ctx := context.Background() s := makeSwarms(ctx, t, 1)[0] test := func(a ma.Multiaddr) { p := testutil.RandPeerIDFatal(t) s.peers.AddAddr(p, a, peer.PermanentAddrTTL) if _, err := s.Dial(ctx, p); err == nil { t.Error("swarm should not dial: %s", m) } } test(m("/ip6/fe80::1")) // link local test(m("/ip6/fe80::100")) // link local test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp }
func TestTableFindMultiple(t *testing.T) { local := tu.RandPeerIDFatal(t) m := peer.NewMetrics() rt := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 18; i++ { peers[i] = tu.RandPeerIDFatal(t) rt.Update(peers[i]) } t.Logf("Searching for peer: '%s'", peers[2]) found := rt.NearestPeers(ConvertPeerID(peers[2]), 15) if len(found) != 15 { t.Fatalf("Got back different number of peers than we expected.") } }
func TestTableFind(t *testing.T) { local := tu.RandPeerIDFatal(t) m := peer.NewMetrics() rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 5; i++ { peers[i] = tu.RandPeerIDFatal(t) rt.Update(peers[i]) } t.Logf("Searching for peer: '%s'", peers[2]) found := rt.NearestPeer(ConvertPeerID(peers[2])) if !(found == peers[2]) { t.Fatalf("Failed to lookup known node...") } }
func TestPushPop(t *testing.T) { prq := newPRQ() partner := testutil.RandPeerIDFatal(t) alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") consonants := func() []string { var out []string for _, letter := range alphabet { skip := false for _, vowel := range vowels { if letter == vowel { skip = true } } if !skip { out = append(out, letter) } } return out }() sort.Strings(alphabet) sort.Strings(vowels) sort.Strings(consonants) // add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters letter := alphabet[index] t.Log(partner.String()) prq.Push(wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner) } for _, consonant := range consonants { prq.Remove(key.Key(consonant), partner) } var out []string for { received := prq.Pop() if received == nil { break } out = append(out, string(received.Entry.Key)) } // Entries popped should already be in correct order for i, expected := range vowels { if out[i] != expected { t.Fatal("received", out[i], "expected", expected) } } }
// Right now, this just makes sure that it doesnt hang or crash func TestTableUpdate(t *testing.T) { local := tu.RandPeerIDFatal(t) m := peer.NewMetrics() rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { peers[i] = tu.RandPeerIDFatal(t) } // Testing Update for i := 0; i < 10000; i++ { rt.Update(peers[rand.Intn(len(peers))]) } for i := 0; i < 100; i++ { id := ConvertPeerID(tu.RandPeerIDFatal(t)) ret := rt.NearestPeers(id, 5) if len(ret) == 0 { t.Fatal("Failed to find node near ID.") } } }
// Test basic features of the bucket struct func TestBucket(t *testing.T) { b := newBucket() peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { peers[i] = tu.RandPeerIDFatal(t) b.PushFront(peers[i]) } local := tu.RandPeerIDFatal(t) localID := ConvertPeerID(local) i := rand.Intn(len(peers)) if !b.Has(peers[i]) { t.Errorf("Failed to find peer: %v", peers[i]) } spl := b.Split(0, ConvertPeerID(local)) llist := b.list for e := llist.Front(); e != nil; e = e.Next() { p := ConvertPeerID(e.Value.(peer.ID)) cpl := commonPrefixLen(p, localID) if cpl > 0 { t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket") } } rlist := spl.list for e := rlist.Front(); e != nil; e = e.Next() { p := ConvertPeerID(e.Value.(peer.ID)) cpl := commonPrefixLen(p, localID) if cpl == 0 { t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket") } } }
func BenchmarkUpdates(b *testing.B) { b.StopTimer() local := ConvertKey("localKey") m := peer.NewMetrics() tab := NewRoutingTable(20, local, time.Hour, m) var peers []peer.ID for i := 0; i < b.N; i++ { peers = append(peers, tu.RandPeerIDFatal(b)) } b.StartTimer() for i := 0; i < b.N; i++ { tab.Update(peers[i]) } }
func newSilentPeer(t *testing.T) (peer.ID, ma.Multiaddr, net.Listener) { dst := testutil.RandPeerIDFatal(t) lst, err := net.Listen("tcp", ":0") if err != nil { t.Fatal(err) } addr, err := manet.FromNetAddr(lst.Addr()) if err != nil { t.Fatal(err) } addrs := []ma.Multiaddr{addr} addrs, err = addrutil.ResolveUnspecifiedAddresses(addrs, nil) if err != nil { t.Fatal(err) } t.Log("new silent peer:", dst, addrs[0]) return dst, addrs[0], lst }
func TestPartnerWantsThenCancels(t *testing.T) { numRounds := 10 if testing.Short() { numRounds = 1 } alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") type testCase [][]string testcases := []testCase{ { alphabet, vowels, }, { alphabet, stringsComplement(alphabet, vowels), }, } bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range alphabet { block := blocks.NewBlock([]byte(letter)) if err := bs.Put(block); err != nil { t.Fatal(err) } } for i := 0; i < numRounds; i++ { for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] keeps := stringsComplement(set, cancels) e := NewEngine(context.Background(), bs) partner := testutil.RandPeerIDFatal(t) partnerWants(e, set, partner) partnerCancels(e, cancels, partner) if err := checkHandledInOrder(t, e, keeps); err != nil { t.Logf("run #%d of %d", i, numRounds) t.Fatal(err) } } } }
// Looks for race conditions in table operations. For a more 'certain' // test, increase the loop counter from 1000 to a much higher number // and set GOMAXPROCS above 1 func TestTableMultithreaded(t *testing.T) { local := peer.ID("localPeer") m := peer.NewMetrics() tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m) var peers []peer.ID for i := 0; i < 500; i++ { peers = append(peers, tu.RandPeerIDFatal(t)) } done := make(chan struct{}) go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Update(peers[n]) } done <- struct{}{} }() go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Update(peers[n]) } done <- struct{}{} }() go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Find(peers[n]) } done <- struct{}{} }() <-done <-done <-done }