func TestClientOverMax(t *testing.T) { rs := NewServer() k := key.Key("hello") numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { pi := testutil.RandIdentityOrFatal(t) err := rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Fatal(err) } } max := 10 pi := testutil.RandIdentityOrFatal(t) client := rs.Client(pi) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { i++ } if i != max { t.Fatal("Too many providers returned") } }
func TestRoutingResolve(t *testing.T) { d := mockrouting.NewServer().Client(testutil.RandIdentityOrFatal(t)) resolver := NewRoutingResolver(d) publisher := NewRoutingPublisher(d) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).Pretty()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func TestAddLink(t *testing.T) { ds := mdtest.Mock() fishnode := &dag.Node{ Data: []byte("fishcakes!"), } fk, err := ds.Add(fishnode) if err != nil { t.Fatal(err) } nd := new(dag.Node) nnode, err := addLink(context.Background(), ds, nd, "fish", fishnode) if err != nil { t.Fatal(err) } fnprime, err := nnode.GetLinkedNode(context.Background(), ds, "fish") if err != nil { t.Fatal(err) } fnpkey, err := fnprime.Key() if err != nil { t.Fatal(err) } if fnpkey != fk { t.Fatal("wrong child node found!") } }
func TestClientFindProviders(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) rs := NewServer() client := rs.Client(pi) k := key.Key("hello") err := client.Provide(context.Background(), k) if err != nil { t.Fatal(err) } // This is bad... but simulating networks is hard time.Sleep(time.Millisecond * 300) max := 100 providersFromClient := client.FindProvidersAsync(context.Background(), key.Key("hello"), max) isInClient := false for pi := range providersFromClient { if pi.ID == pi.ID { isInClient = true } } if !isInClient { t.Fatal("Despite client providing key, client didn't receive peer when finding providers") } }
// TODO does dht ensure won't receive self as a provider? probably not. func TestCanceledContext(t *testing.T) { rs := NewServer() k := key.Key("hello") // avoid leaking goroutine, without using the context to signal // (we want the goroutine to keep trying to publish on a // cancelled context until we've tested it doesnt do anything.) done := make(chan struct{}) defer func() { done <- struct{}{} }() t.Log("async'ly announce infinite stream of providers for key") i := 0 go func() { // infinite stream for { select { case <-done: t.Log("exiting async worker") return default: } pi, err := testutil.RandIdentity() if err != nil { t.Error(err) } err = rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Error(err) } i++ } }() local := testutil.RandIdentityOrFatal(t) client := rs.Client(local) t.Log("warning: max is finite so this test is non-deterministic") t.Log("context cancellation could simply take lower priority") t.Log("and result in receiving the max number of results") max := 1000 t.Log("cancel the context before consuming") ctx, cancelFunc := context.WithCancel(context.Background()) cancelFunc() providers := client.FindProvidersAsync(ctx, k, max) numProvidersReturned := 0 for _ = range providers { numProvidersReturned++ } t.Log(numProvidersReturned) if numProvidersReturned == max { t.Fatal("Context cancel had no effect") } }
func ExampleEventLogger() { { log := EventLogger(nil) e := log.EventBegin(context.Background(), "dial") e.Done() } { log := EventLogger(nil) e := log.EventBegin(context.Background(), "dial") _ = e.Close() // implements io.Closer for convenience } }
func TestMultiWrite(t *testing.T) { dserv, pins := getMockDagServ(t) _, n := getNode(t, dserv, 0, pins) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } data := make([]byte, 4000) u.NewTimeSeededRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) if err != nil { t.Fatal(err) } if n != 1 { t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") } size, err := dagmod.Size() if err != nil { t.Fatal(err) } if size != int64(i+1) { t.Fatal("Size was reported incorrectly") } } nd, err := dagmod.GetNode() if err != nil { t.Fatal(err) } read, err := uio.NewDagReader(context.Background(), nd, dserv) if err != nil { t.Fatal(err) } rbuf, err := ioutil.ReadAll(read) if err != nil { t.Fatal(err) } err = arrComp(rbuf, data) if err != nil { t.Fatal(err) } }
func TestGetBlocks(t *testing.T) { store := bstore() ex := Exchange(store) g := blocksutil.NewBlockGenerator() expected := g.Blocks(2) for _, b := range expected { if err := ex.HasBlock(b); err != nil { t.Fail() } } request := func() []key.Key { var ks []key.Key for _, b := range expected { ks = append(ks, b.Key()) } return ks }() received, err := ex.GetBlocks(context.Background(), request) if err != nil { t.Fatal(err) } var count int for _ = range received { count++ } if len(expected) != count { t.Fail() } }
func TestConnHandler(t *testing.T) { // t.Skip("skipping for another test") t.Parallel() ctx := context.Background() swarms := makeSwarms(ctx, t, 5) gotconn := make(chan struct{}, 10) swarms[0].SetConnHandler(func(conn *Conn) { gotconn <- struct{}{} }) connectSwarms(t, ctx, swarms) <-time.After(time.Millisecond) // should've gotten 5 by now. swarms[0].SetConnHandler(nil) expect := 4 for i := 0; i < expect; i++ { select { case <-time.After(time.Second): t.Fatal("failed to get connections") case <-gotconn: } } select { case <-gotconn: t.Fatalf("should have connected to %d swarms", expect) default: } }
func TestFindPeer(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() _, peers, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() dhts[i].host.Close() } }() connect(t, ctx, dhts[0], dhts[1]) connect(t, ctx, dhts[1], dhts[2]) connect(t, ctx, dhts[1], dhts[3]) ctxT, _ := context.WithTimeout(ctx, time.Second) p, err := dhts[0].FindPeer(ctxT, peers[2]) if err != nil { t.Fatal(err) } if p.ID == "" { t.Fatal("Failed to find peer.") } if p.ID != peers[2] { t.Fatal("Didnt find expected peer.") } }
func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv, pins := getMockDagServ(b) _, n := getNode(b, dserv, 0, pins) ctx, cancel := context.WithCancel(context.Background()) defer cancel() wrsize := 4096 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) if err != nil { b.Fatal(err) } buf := make([]byte, b.N*wrsize) u.NewTimeSeededRand().Read(buf) b.StartTimer() b.SetBytes(int64(wrsize)) for i := 0; i < b.N; i++ { n, err := dagmod.Write(buf[i*wrsize : (i+1)*wrsize]) if err != nil { b.Fatal(err) } if n != wrsize { b.Fatal("Wrote bad size") } } }
func TestBuilderConsistency(t *testing.T) { nbytes := 100000 buf := new(bytes.Buffer) io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes)) should := dup(buf.Bytes()) dagserv := mdtest.Mock() nd, err := buildTestDag(dagserv, chunk.DefaultSplitter(buf)) if err != nil { t.Fatal(err) } r, err := uio.NewDagReader(context.Background(), nd, dagserv) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func TestSeekEndSingleBlockFile(t *testing.T) { nbytes := int64(100) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 5000)) if err != nil { t.Fatal(err) } rs, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } seeked, err := rs.Seek(0, os.SEEK_END) if err != nil { t.Fatal(err) } if seeked != nbytes { t.Fatal("Failed to seek to end") } }
func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr string) { child := &dag.Node{Data: []byte(data)} ck, err := e.ds.Add(child) if err != nil { t.Fatal(err) } var c func() *dag.Node if create { c = func() *dag.Node { return &dag.Node{} } } err = e.InsertNodeAtPath(context.Background(), path, child, c) if experr != "" { var got string if err != nil { got = err.Error() } if got != experr { t.Fatalf("expected '%s' but got '%s'", experr, got) } return } if err != nil { t.Fatal(err) } assertNodeAtPath(t, e.ds, e.root, path, ck) }
func TestDeadlineFractionCancel(t *testing.T) { ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond) ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5) select { case <-ctx1.Done(): t.Fatal("ctx1 ended too early") case <-ctx2.Done(): t.Fatal("ctx2 ended too early") default: } cancel2() select { case <-ctx1.Done(): t.Fatal("ctx1 should NOT be cancelled") case <-ctx2.Done(): default: t.Fatal("ctx2 should be cancelled") } cancel1() select { case <-ctx1.Done(): case <-ctx2.Done(): default: t.Fatal("ctx1 should be cancelled") } }
func TestDialBadAddrs(t *testing.T) { m := func(s string) ma.Multiaddr { maddr, err := ma.NewMultiaddr(s) if err != nil { t.Fatal(err) } return maddr } ctx := context.Background() s := makeSwarms(ctx, t, 1)[0] test := func(a ma.Multiaddr) { p := testutil.RandPeerIDFatal(t) s.peers.AddAddr(p, a, peer.PermanentAddrTTL) if _, err := s.Dial(ctx, p); err == nil { t.Error("swarm should not dial: %s", m) } } test(m("/ip6/fe80::1")) // link local test(m("/ip6/fe80::100")) // link local test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp }
func TestFilterBounds(t *testing.T) { ctx := context.Background() swarms := makeSwarms(ctx, t, 2) conns := make(chan struct{}, 8) swarms[0].SetConnHandler(func(conn *Conn) { conns <- struct{}{} }) // Address that we wont be dialing from _, block, err := net.ParseCIDR("192.0.0.1/8") if err != nil { t.Fatal(err) } // set filter on both sides, shouldnt matter swarms[1].Filters.AddDialFilter(block) swarms[0].Filters.AddDialFilter(block) connectSwarms(t, ctx, swarms) select { case <-time.After(time.Second): t.Fatal("should have gotten connection") case <-conns: t.Log("got connect") } }
func TestAddrBlocking(t *testing.T) { ctx := context.Background() swarms := makeSwarms(ctx, t, 2) swarms[0].SetConnHandler(func(conn *Conn) { t.Fatal("no connections should happen!") }) _, block, err := net.ParseCIDR("127.0.0.1/8") if err != nil { t.Fatal(err) } swarms[1].Filters.AddDialFilter(block) swarms[1].peers.AddAddr(swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0], peer.PermanentAddrTTL) _, err = swarms[1].Dial(ctx, swarms[0].LocalPeer()) if err == nil { t.Fatal("dial should have failed") } swarms[0].peers.AddAddr(swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0], peer.PermanentAddrTTL) _, err = swarms[0].Dial(ctx, swarms[1].LocalPeer()) if err == nil { t.Fatal("dial should have failed") } }
func TestIndirectBlocks(t *testing.T) { splitter := chunk.SizeSplitterGen(512) nbytes := 1024 * 1024 buf := make([]byte, nbytes) u.NewTimeSeededRand().Read(buf) read := bytes.NewReader(buf) ds := mdtest.Mock() dag, err := buildTestDag(ds, splitter(read)) if err != nil { t.Fatal(err) } reader, err := uio.NewDagReader(context.Background(), dag, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(reader) if err != nil { t.Fatal(err) } if !bytes.Equal(out, buf) { t.Fatal("Not equal!") } }
func addDefaultAssets(out io.Writer, repoRoot string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() r, err := fsrepo.Open(repoRoot) if err != nil { // NB: repo is owned by the node return err } nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r}) if err != nil { return err } defer nd.Close() dkey, err := assets.SeedInitDocs(nd) if err != nil { return fmt.Errorf("init: seeding init docs failed: %s", err) } log.Debugf("init: seeded init docs %s", dkey) if _, err = fmt.Fprintf(out, "to get started, enter:\n"); err != nil { return err } _, err = fmt.Fprintf(out, "\n\tipfs cat /ipfs/%s/readme\n\n", dkey) return err }
func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int) { should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() nd, err := buildTestDag(ds, bs(read)) if err != nil { t.Fatal(err) } r, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func TestSimultOpen(t *testing.T) { // t.Skip("skipping for another test") t.Parallel() ctx := context.Background() swarms := makeSwarms(ctx, t, 2) // connect everyone { var wg sync.WaitGroup connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { // copy for other peer log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr) s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL) if _, err := s.Dial(ctx, dst); err != nil { t.Fatal("error swarm dialing to peer", err) } wg.Done() } log.Info("Connecting swarms simultaneously.") wg.Add(2) go connect(swarms[0], swarms[1].local, swarms[1].ListenAddresses()[0]) go connect(swarms[1], swarms[0].local, swarms[0].ListenAddresses()[0]) wg.Wait() } for _, s := range swarms { s.Close() } }
func RunSupernodePutRecordGetRecord(conf testutil.LatencyConfig) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() servers, clients, err := InitializeSupernodeNetwork(ctx, 2, 2, conf) if err != nil { return err } for _, n := range append(servers, clients...) { defer n.Close() } putter := clients[0] getter := clients[1] k := key.Key("key") note := []byte("a note from putter") if err := putter.Routing.PutValue(ctx, k, note); err != nil { return fmt.Errorf("failed to put value: %s", err) } received, err := getter.Routing.GetValue(ctx, k) if err != nil { return fmt.Errorf("failed to get value: %s", err) } if 0 != bytes.Compare(note, received) { return errors.New("record doesn't match") } return nil }
func MockCmdsCtx() (commands.Context, error) { // Generate Identity ident, err := testutil.RandIdentity() if err != nil { return commands.Context{}, err } p := ident.ID() conf := config.Config{ Identity: config.Identity{ PeerID: p.String(), }, } r := &repo.Mock{ D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())), C: conf, } node, err := core.NewNode(context.Background(), &core.BuildCfg{ Repo: r, }) return commands.Context{ Online: true, ConfigRoot: "/tmp/.mockipfsconfig", LoadConfig: func(path string) (*config.Config, error) { return &conf, nil }, ConstructNode: func() (*core.IpfsNode, error) { return node, nil }, }, nil }
func TestDagTruncate(t *testing.T) { dserv, pins := getMockDagServ(t) b, n := getNode(t, dserv, 50000, pins) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } err = dagmod.Truncate(12345) if err != nil { t.Fatal(err) } _, err = dagmod.Seek(0, os.SEEK_SET) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(dagmod) if err != nil { t.Fatal(err) } if err = arrComp(out, b[:12345]); err != nil { t.Fatal(err) } }
func TestGetBlocksSequential(t *testing.T) { var servs = Mocks(4) for _, s := range servs { defer s.Close() } bg := blocksutil.NewBlockGenerator() blks := bg.Blocks(50) var keys []key.Key for _, blk := range blks { keys = append(keys, blk.Key()) servs[0].AddBlock(blk) } t.Log("one instance at a time, get blocks concurrently") for i := 1; i < len(servs); i++ { ctx, cancel := context.WithTimeout(context.Background(), time.Second*50) defer cancel() out := servs[i].GetBlocks(ctx, keys) gotten := make(map[key.Key]*blocks.Block) for blk := range out { if _, ok := gotten[blk.Key()]; ok { t.Fatal("Got duplicate block!") } gotten[blk.Key()] = blk } if len(gotten) != len(blks) { t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(blks)) } } }
func printDag(nd *mdag.Node, ds mdag.DAGService, indent int) { pbd, err := ft.FromBytes(nd.Data) if err != nil { panic(err) } for i := 0; i < indent; i++ { fmt.Print(" ") } fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes())) if len(nd.Links) > 0 { fmt.Println() } for _, lnk := range nd.Links { child, err := lnk.GetNode(context.Background(), ds) if err != nil { panic(err) } printDag(child, ds, indent+1) } if len(nd.Links) > 0 { for i := 0; i < indent; i++ { fmt.Print(" ") } } fmt.Println("}") }
func TestBalancedDag(t *testing.T) { ds := mdtest.Mock() buf := make([]byte, 10000) u.NewTimeSeededRand().Read(buf) r := bytes.NewReader(buf) nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil) if err != nil { t.Fatal(err) } dr, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(dr) if err != nil { t.Fatal(err) } if !bytes.Equal(out, buf) { t.Fatal("bad read") } }
func TestValidAfter(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) var key = key.Key("mock key") var ctx = context.Background() conf := DelayConfig{ ValueVisibility: delay.Fixed(1 * time.Hour), Query: delay.Fixed(0), } rs := NewServerWithDelay(conf) rs.Client(pi).Provide(ctx, key) var providers []peer.PeerInfo providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } if len(providers) > 0 { t.Fail() } conf.ValueVisibility.Set(0) providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } t.Log("providers", providers) if len(providers) != 1 { t.Fail() } }
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) g := NewTestSessionGenerator(net) defer g.Close() peers := g.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() if err := hasBlock.Exchange.HasBlock(block); err != nil { t.Fatal(err) } wantsBlock := peers[1] defer wantsBlock.Exchange.Close() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") } if !bytes.Equal(block.Data, received.Data) { t.Fatal("Data doesn't match") } }