コード例 #1
0
ファイル: mdns_test.go プロジェクト: libp2p/go-libp2p
func TestMdnsDiscovery(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	a := bhost.New(netutil.GenSwarmNetwork(t, ctx))
	b := bhost.New(netutil.GenSwarmNetwork(t, ctx))

	sa, err := NewMdnsService(ctx, a, time.Second)
	if err != nil {
		t.Fatal(err)
	}

	sb, err := NewMdnsService(ctx, b, time.Second)
	if err != nil {
		t.Fatal(err)
	}

	_ = sb

	n := &DiscoveryNotifee{a}

	sa.RegisterNotifee(n)

	time.Sleep(time.Second * 2)

	err = a.Connect(ctx, pstore.PeerInfo{ID: b.ID()})
	if err != nil {
		t.Fatal(err)
	}
}
コード例 #2
0
ファイル: reconnect_test.go プロジェクト: libp2p/go-libp2p
// TestReconnect tests whether hosts are able to disconnect and reconnect.
func TestReconnect2(t *testing.T) {
	ctx := context.Background()
	h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	hosts := []host.Host{h1, h2}

	h1.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
	h2.SetStreamHandler(protocol.TestingID, EchoStreamHandler)

	rounds := 8
	if testing.Short() {
		rounds = 4
	}
	for i := 0; i < rounds; i++ {
		log.Debugf("TestReconnect: %d/%d\n", i, rounds)
		SubtestConnSendDisc(t, hosts)
	}
}
コード例 #3
0
ファイル: ping_test.go プロジェクト: libp2p/go-libp2p
func TestPing(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	h1 := bhost.New(netutil.GenSwarmNetwork(t, ctx))
	h2 := bhost.New(netutil.GenSwarmNetwork(t, ctx))

	err := h1.Connect(ctx, pstore.PeerInfo{
		ID:    h2.ID(),
		Addrs: h2.Addrs(),
	})

	if err != nil {
		t.Fatal(err)
	}

	ps1 := NewPingService(h1)
	ps2 := NewPingService(h2)

	testPing(t, ps1, h2.ID())
	testPing(t, ps2, h1.ID())
}
コード例 #4
0
ファイル: mock_net.go プロジェクト: peckjerry/go-libp2p
func (mn *mocknet) AddPeerWithPeerstore(p peer.ID, ps pstore.Peerstore) (host.Host, error) {
	n, err := newPeernet(mn.ctx, mn, p, ps)
	if err != nil {
		return nil, err
	}

	h := bhost.New(n)

	mn.proc.AddChild(n.proc)

	mn.Lock()
	mn.nets[n.peer] = n
	mn.hosts[n.peer] = h
	mn.Unlock()
	return h, nil
}
コード例 #5
0
ファイル: main.go プロジェクト: libp2p/go-libp2p
// create a 'Host' with a random peer to listen on the given address
func makeBasicHost(listen string, secio bool) (host.Host, error) {
	addr, err := ma.NewMultiaddr(listen)
	if err != nil {
		return nil, err
	}

	ps := pstore.NewPeerstore()
	var pid peer.ID

	if secio {
		ident, err := testutil.RandIdentity()
		if err != nil {
			return nil, err
		}

		ident.PrivateKey()
		ps.AddPrivKey(ident.ID(), ident.PrivateKey())
		ps.AddPubKey(ident.ID(), ident.PublicKey())
		pid = ident.ID()
	} else {
		fakepid, err := testutil.RandPeerID()
		if err != nil {
			return nil, err
		}
		pid = fakepid
	}

	ctx := context.Background()

	// create a new swarm to be used by the service host
	netw, err := swarm.NewNetwork(ctx, []ma.Multiaddr{addr}, pid, ps, nil)
	if err != nil {
		return nil, err
	}

	log.Printf("I am %s/ipfs/%s\n", addr, pid.Pretty())
	return bhost.New(netw), nil
}
コード例 #6
0
ファイル: backpressure_test.go プロジェクト: libp2p/go-libp2p
// TestBackpressureStreamHandler tests whether mux handler
// ratelimiting works. Meaning, since the handler is sequential
// it should block senders.
//
// Important note: spdystream (which peerstream uses) has a set
// of n workers (n=spdsystream.FRAME_WORKERS) which handle new
// frames, including those starting new streams. So all of them
// can be in the handler at one time. Also, the sending side
// does not rate limit unless we call stream.Wait()
//
//
// Note: right now, this happens muxer-wide. the muxer should
// learn to flow control, so handlers cant block each other.
func TestBackpressureStreamHandler(t *testing.T) {
	t.Skip(`Sadly, as cool as this test is, it doesn't work
Because spdystream doesnt handle stream open backpressure
well IMO. I'll see about rewriting that part when it becomes
a problem.
`)

	// a number of concurrent request handlers
	limit := 10

	// our way to signal that we're done with 1 request
	requestHandled := make(chan struct{})

	// handler rate limiting
	receiverRatelimit := make(chan struct{}, limit)
	for i := 0; i < limit; i++ {
		receiverRatelimit <- struct{}{}
	}

	// sender counter of successfully opened streams
	senderOpened := make(chan struct{}, limit*100)

	// sender signals it's done (errored out)
	senderDone := make(chan struct{})

	// the receiver handles requests with some rate limiting
	receiver := func(s inet.Stream) {
		log.Debug("receiver received a stream")

		<-receiverRatelimit // acquire
		go func() {
			// our request handler. can do stuff here. we
			// simulate something taking time by waiting
			// on requestHandled
			log.Debug("request worker handling...")
			<-requestHandled
			log.Debug("request worker done!")
			receiverRatelimit <- struct{}{} // release
		}()
	}

	// the sender opens streams as fast as possible
	sender := func(host host.Host, remote peer.ID) {
		var s inet.Stream
		var err error
		defer func() {
			t.Error(err)
			log.Debug("sender error. exiting.")
			senderDone <- struct{}{}
		}()

		for {
			s, err = host.NewStream(context.Background(), remote, protocol.TestingID)
			if err != nil {
				return
			}

			_ = s
			// if err = s.SwarmStream().Stream().Wait(); err != nil {
			// 	return
			// }

			// "count" another successfully opened stream
			// (large buffer so shouldn't block in normal operation)
			log.Debug("sender opened another stream!")
			senderOpened <- struct{}{}
		}
	}

	// count our senderOpened events
	countStreamsOpenedBySender := func(min int) int {
		opened := 0
		for opened < min {
			log.Debugf("countStreamsOpenedBySender got %d (min %d)", opened, min)
			select {
			case <-senderOpened:
				opened++
			case <-time.After(10 * time.Millisecond):
			}
		}
		return opened
	}

	// count our received events
	// waitForNReceivedStreams := func(n int) {
	// 	for n > 0 {
	// 		log.Debugf("waiting for %d received streams...", n)
	// 		select {
	// 		case <-receiverRatelimit:
	// 			n--
	// 		}
	// 	}
	// }

	testStreamsOpened := func(expected int) {
		log.Debugf("testing rate limited to %d streams", expected)
		if n := countStreamsOpenedBySender(expected); n != expected {
			t.Fatalf("rate limiting did not work :( -- %d != %d", expected, n)
		}
	}

	// ok that's enough setup. let's do it!

	ctx := context.Background()
	h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))

	// setup receiver handler
	h1.SetStreamHandler(protocol.TestingID, receiver)

	h2pi := h2.Peerstore().PeerInfo(h2.ID())
	log.Debugf("dialing %s", h2pi.Addrs)
	if err := h1.Connect(ctx, h2pi); err != nil {
		t.Fatalf("Failed to connect:", err)
	}

	// launch sender!
	go sender(h2, h1.ID())

	// ok, what do we expect to happen? the receiver should
	// receive 10 requests and stop receiving, blocking the sender.
	// we can test this by counting 10x senderOpened requests

	<-senderOpened // wait for the sender to successfully open some.
	testStreamsOpened(limit - 1)

	// let's "handle" 3 requests.
	<-requestHandled
	<-requestHandled
	<-requestHandled
	// the sender should've now been able to open exactly 3 more.

	testStreamsOpened(3)

	// shouldn't have opened anything more
	testStreamsOpened(0)

	// let's "handle" 100 requests in batches of 5
	for i := 0; i < 20; i++ {
		<-requestHandled
		<-requestHandled
		<-requestHandled
		<-requestHandled
		<-requestHandled
		testStreamsOpened(5)
	}

	// success!

	// now for the sugar on top: let's tear down the receiver. it should
	// exit the sender.
	h1.Close()

	// shouldn't have opened anything more
	testStreamsOpened(0)

	select {
	case <-time.After(100 * time.Millisecond):
		t.Error("receiver shutdown failed to exit sender")
	case <-senderDone:
		log.Info("handler backpressure works!")
	}
}
コード例 #7
0
ファイル: backpressure_test.go プロジェクト: libp2p/go-libp2p
// TestStBackpressureStreamWrite tests whether streams see proper
// backpressure when writing data over the network streams.
func TestStBackpressureStreamWrite(t *testing.T) {

	// senderWrote signals that the sender wrote bytes to remote.
	// the value is the count of bytes written.
	senderWrote := make(chan int, 10000)

	// sender signals it's done (errored out)
	senderDone := make(chan struct{})

	// writeStats lets us listen to all the writes and return
	// how many happened and how much was written
	writeStats := func() (int, int) {
		writes := 0
		bytes := 0
		for {
			select {
			case n := <-senderWrote:
				writes++
				bytes = bytes + n
			default:
				log.Debugf("stats: sender wrote %d bytes, %d writes", bytes, writes)
				return bytes, writes
			}
		}
	}

	// sender attempts to write as fast as possible, signaling on the
	// completion of every write. This makes it possible to see how
	// fast it's actually writing. We pair this with a receiver
	// that waits for a signal to read.
	sender := func(s inet.Stream) {
		defer func() {
			s.Close()
			senderDone <- struct{}{}
		}()

		// ready a buffer of random data
		buf := make([]byte, 65536)
		u.NewTimeSeededRand().Read(buf)

		for {
			// send a randomly sized subchunk
			from := rand.Intn(len(buf) / 2)
			to := rand.Intn(len(buf) / 2)
			sendbuf := buf[from : from+to]

			n, err := s.Write(sendbuf)
			if err != nil {
				log.Debug("sender error. exiting:", err)
				return
			}

			log.Debugf("sender wrote %d bytes", n)
			senderWrote <- n
		}
	}

	// receive a number of bytes from a stream.
	// returns the number of bytes written.
	receive := func(s inet.Stream, expect int) {
		log.Debugf("receiver to read %d bytes", expect)
		rbuf := make([]byte, expect)
		n, err := io.ReadFull(s, rbuf)
		if err != nil {
			t.Error("read failed:", err)
		}
		if expect != n {
			t.Error("read len differs: %d != %d", expect, n)
		}
	}

	// ok let's do it!

	// setup the networks
	ctx := context.Background()
	h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))

	// setup sender handler on 1
	h1.SetStreamHandler(protocol.TestingID, sender)

	h2pi := h2.Peerstore().PeerInfo(h2.ID())
	log.Debugf("dialing %s", h2pi.Addrs)
	if err := h1.Connect(ctx, h2pi); err != nil {
		t.Fatalf("Failed to connect:", err)
	}

	// open a stream, from 2->1, this is our reader
	s, err := h2.NewStream(context.Background(), h1.ID(), protocol.TestingID)
	if err != nil {
		t.Fatal(err)
	}

	// let's make sure r/w works.
	testSenderWrote := func(bytesE int) {
		bytesA, writesA := writeStats()
		if bytesA != bytesE {
			t.Errorf("numbers failed: %d =?= %d bytes, via %d writes", bytesA, bytesE, writesA)
		}
	}

	// trigger lazy connection handshaking
	_, err = s.Read(nil)
	if err != nil {
		t.Fatal(err)
	}

	// 500ms rounds of lockstep write + drain
	roundsStart := time.Now()
	roundsTotal := 0
	for roundsTotal < (2 << 20) {
		// let the sender fill its buffers, it will stop sending.
		<-time.After(300 * time.Millisecond)
		b, _ := writeStats()
		testSenderWrote(0)
		testSenderWrote(0)

		// drain it all, wait again
		receive(s, b)
		roundsTotal = roundsTotal + b
	}
	roundsTime := time.Since(roundsStart)

	// now read continously, while we measure stats.
	stop := make(chan struct{})
	contStart := time.Now()

	go func() {
		for {
			select {
			case <-stop:
				return
			default:
				receive(s, 2<<15)
			}
		}
	}()

	contTotal := 0
	for contTotal < (2 << 20) {
		n := <-senderWrote
		contTotal += n
	}
	stop <- struct{}{}
	contTime := time.Since(contStart)

	// now compare! continuous should've been faster AND larger
	if roundsTime < contTime {
		t.Error("continuous should have been faster")
	}

	if roundsTotal < contTotal {
		t.Error("continuous should have been larger, too!")
	}

	// and a couple rounds more for good measure ;)
	for i := 0; i < 3; i++ {
		// let the sender fill its buffers, it will stop sending.
		<-time.After(300 * time.Millisecond)
		b, _ := writeStats()
		testSenderWrote(0)
		testSenderWrote(0)

		// drain it all, wait again
		receive(s, b)
	}

	// this doesn't work :(:
	// // now for the sugar on top: let's tear down the receiver. it should
	// // exit the sender.
	// n1.Close()
	// testSenderWrote(0)
	// testSenderWrote(0)
	// select {
	// case <-time.After(2 * time.Second):
	// 	t.Error("receiver shutdown failed to exit sender")
	// case <-senderDone:
	// 	log.Info("handler backpressure works!")
	// }
}
コード例 #8
0
ファイル: relay_test.go プロジェクト: libp2p/go-libp2p
func TestRelayStress(t *testing.T) {
	buflen := 1 << 18
	iterations := 10

	ctx := context.Background()

	// these networks have the relay service wired in already.
	n1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	n2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	n3 := bhost.New(testutil.GenSwarmNetwork(t, ctx))

	n1p := n1.ID()
	n2p := n2.ID()
	n3p := n3.ID()

	n2pi := n2.Peerstore().PeerInfo(n2p)
	if err := n1.Connect(ctx, n2pi); err != nil {
		t.Fatalf("Failed to dial:", err)
	}
	if err := n3.Connect(ctx, n2pi); err != nil {
		t.Fatalf("Failed to dial:", err)
	}

	// setup handler on n3 to copy everything over to the pipe.
	piper, pipew := io.Pipe()
	n3.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
		log.Debug("relay stream opened to n3!")
		log.Debug("piping and echoing everything")
		w := io.MultiWriter(s, pipew)
		io.Copy(w, s)
		log.Debug("closing stream")
		s.Close()
	})

	// ok, now we can try to relay n1--->n2--->n3.
	log.Debug("open relay stream")
	s, err := n1.NewStream(ctx, n2p, relay.ID)
	if err != nil {
		t.Fatal(err)
	}

	// ok first thing we write the relay header n1->n3
	log.Debug("write relay header")
	if err := relay.WriteHeader(s, n1p, n3p); err != nil {
		t.Fatal(err)
	}

	// ok now the header's there, we can write the next protocol header.
	log.Debug("write testing header")
	if err := msmux.SelectProtoOrFail(string(protocol.TestingID), s); err != nil {
		t.Fatal(err)
	}

	// okay, now write lots of text and read it back out from both
	// the pipe and the stream.
	buf1 := make([]byte, buflen)
	buf2 := make([]byte, len(buf1))
	buf3 := make([]byte, len(buf1))

	fillbuf := func(buf []byte, b byte) {
		for i := range buf {
			buf[i] = b
		}
	}

	for i := 0; i < iterations; i++ {
		fillbuf(buf1, byte(int('a')+i))
		log.Debugf("writing %d bytes (%d/%d)", len(buf1), i, iterations)
		if _, err := s.Write(buf1); err != nil {
			t.Fatal(err)
		}

		log.Debug("read it out from the pipe.")
		if _, err := io.ReadFull(piper, buf2); err != nil {
			t.Fatal(err)
		}
		if string(buf1) != string(buf2) {
			t.Fatal("should've gotten that text out of the pipe")
		}

		// read it out from the stream (echoed)
		log.Debug("read it out from the stream (echoed).")
		if _, err := io.ReadFull(s, buf3); err != nil {
			t.Fatal(err)
		}
		if string(buf1) != string(buf3) {
			t.Fatal("should've gotten that text out of the stream")
		}
	}

	log.Debug("sweet, relay works under stress.")
	s.Close()
}
コード例 #9
0
ファイル: relay_test.go プロジェクト: libp2p/go-libp2p
func TestRelaySimple(t *testing.T) {

	ctx := context.Background()

	// these networks have the relay service wired in already.
	n1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	n2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
	n3 := bhost.New(testutil.GenSwarmNetwork(t, ctx))

	n1p := n1.ID()
	n2p := n2.ID()
	n3p := n3.ID()

	n2pi := n2.Peerstore().PeerInfo(n2p)
	if err := n1.Connect(ctx, n2pi); err != nil {
		t.Fatal("Failed to connect:", err)
	}
	if err := n3.Connect(ctx, n2pi); err != nil {
		t.Fatal("Failed to connect:", err)
	}

	// setup handler on n3 to copy everything over to the pipe.
	piper, pipew := io.Pipe()
	n3.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
		log.Debug("relay stream opened to n3!")
		log.Debug("piping and echoing everything")
		w := io.MultiWriter(s, pipew)
		io.Copy(w, s)
		log.Debug("closing stream")
		s.Close()
	})

	// ok, now we can try to relay n1--->n2--->n3.
	log.Debug("open relay stream")
	s, err := n1.NewStream(ctx, n2p, relay.ID)
	if err != nil {
		t.Fatal(err)
	}

	// ok first thing we write the relay header n1->n3
	log.Debug("write relay header")
	if err := relay.WriteHeader(s, n1p, n3p); err != nil {
		t.Fatal(err)
	}

	// ok now the header's there, we can write the next protocol header.
	log.Debug("write testing header")
	if err := msmux.SelectProtoOrFail(string(protocol.TestingID), s); err != nil {
		t.Fatal(err)
	}

	// okay, now we should be able to write text, and read it out.
	buf1 := []byte("abcdefghij")
	buf2 := make([]byte, 10)
	buf3 := make([]byte, 10)
	log.Debug("write in some text.")
	if _, err := s.Write(buf1); err != nil {
		t.Fatal(err)
	}

	// read it out from the pipe.
	log.Debug("read it out from the pipe.")
	if _, err := io.ReadFull(piper, buf2); err != nil {
		t.Fatal(err)
	}
	if string(buf1) != string(buf2) {
		t.Fatal("should've gotten that text out of the pipe")
	}

	// read it out from the stream (echoed)
	log.Debug("read it out from the stream (echoed).")
	if _, err := io.ReadFull(s, buf3); err != nil {
		t.Fatal(err)
	}
	if string(buf1) != string(buf3) {
		t.Fatal("should've gotten that text out of the stream")
	}

	// sweet. relay works.
	log.Debug("sweet, relay works.")
	s.Close()
}
コード例 #10
0
ファイル: util.go プロジェクト: peckjerry/go-libp2p
func GenHostSwarm(t *testing.T, ctx context.Context) *bhost.BasicHost {
	n := GenSwarmNetwork(t, ctx)
	return bhost.New(n)
}