コード例 #1
0
func TestHostSimple(t *testing.T) {

	ctx := context.Background()
	h1 := testutil.GenHostSwarm(t, ctx)
	h2 := testutil.GenHostSwarm(t, ctx)
	defer h1.Close()
	defer h2.Close()

	h2pi := h2.Peerstore().PeerInfo(h2.ID())
	if err := h1.Connect(ctx, h2pi); err != nil {
		t.Fatal(err)
	}

	piper, pipew := io.Pipe()
	h2.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
		defer s.Close()
		w := io.MultiWriter(s, pipew)
		io.Copy(w, s) // mirror everything
	})

	s, err := h1.NewStream(protocol.TestingID, h2pi.ID)
	if err != nil {
		t.Fatal(err)
	}

	// write to the stream
	buf1 := []byte("abcdefghijkl")
	if _, err := s.Write(buf1); err != nil {
		t.Fatal(err)
	}

	// get it from the stream (echoed)
	buf2 := make([]byte, len(buf1))
	if _, err := io.ReadFull(s, buf2); err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(buf1, buf2) {
		t.Fatal("buf1 != buf2 -- %x != %x", buf1, buf2)
	}

	// get it from the pipe (tee)
	buf3 := make([]byte, len(buf1))
	if _, err := io.ReadFull(piper, buf3); err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(buf1, buf3) {
		t.Fatal("buf1 != buf3 -- %x != %x", buf1, buf3)
	}
}
コード例 #2
0
// TestReconnect tests whether hosts are able to disconnect and reconnect.
func TestReconnect2(t *testing.T) {
	ctx := context.Background()
	h1 := testutil.GenHostSwarm(t, ctx)
	h2 := testutil.GenHostSwarm(t, ctx)
	hosts := []host.Host{h1, h2}

	h1.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
	h2.SetStreamHandler(protocol.TestingID, EchoStreamHandler)

	rounds := 8
	if testing.Short() {
		rounds = 4
	}
	for i := 0; i < rounds; i++ {
		log.Debugf("TestReconnect: %d/%d\n", i, rounds)
		SubtestConnSendDisc(t, hosts)
	}
}
コード例 #3
0
ファイル: ping_test.go プロジェクト: andradeandrey/go-ipfs
func TestPing(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	h1 := netutil.GenHostSwarm(t, ctx)
	h2 := netutil.GenHostSwarm(t, ctx)

	err := h1.Connect(ctx, peer.PeerInfo{
		ID:    h2.ID(),
		Addrs: h2.Addrs(),
	})

	if err != nil {
		t.Fatal(err)
	}

	ps1 := NewPingService(h1)
	ps2 := NewPingService(h2)

	testPing(t, ps1, h2.ID())
	testPing(t, ps2, h1.ID())
}
コード例 #4
0
ファイル: id_test.go プロジェクト: andradeandrey/go-ipfs
func subtestIDService(t *testing.T, postDialWait time.Duration) {

	// the generated networks should have the id service wired in.
	ctx := context.Background()
	h1 := testutil.GenHostSwarm(t, ctx)
	h2 := testutil.GenHostSwarm(t, ctx)

	h1p := h1.ID()
	h2p := h2.ID()

	testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
	testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing

	h2pi := h2.Peerstore().PeerInfo(h2p)
	if err := h1.Connect(ctx, h2pi); err != nil {
		t.Fatal(err)
	}

	// we need to wait here if Dial returns before ID service is finished.
	if postDialWait > 0 {
		<-time.After(postDialWait)
	}

	// the IDService should be opened automatically, by the network.
	// what we should see now is that both peers know about each others listen addresses.
	testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p)) // has them
	testHasProtocolVersions(t, h1, h2p)

	// now, this wait we do have to do. it's the wait for the Listening side
	// to be done identifying the connection.
	c := h2.Network().ConnsToPeer(h1.ID())
	if len(c) < 1 {
		t.Fatal("should have connection by now at least.")
	}
	<-h2.IDService().IdentifyWait(c[0])

	// and the protocol versions.
	testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addrs(h1p)) // has them
	testHasProtocolVersions(t, h2, h1p)
}
コード例 #5
0
ファイル: dht_test.go プロジェクト: avbalu/go-ipfs
func setupDHT(ctx context.Context, t *testing.T) *IpfsDHT {
	h := netutil.GenHostSwarm(t, ctx)

	dss := dssync.MutexWrap(ds.NewMapDatastore())
	d := NewDHT(ctx, h, dss)

	d.Validator["v"] = &record.ValidChecker{
		Func: func(key.Key, []byte) error {
			return nil
		},
		Sign: false,
	}
	return d
}
コード例 #6
0
ファイル: relay_test.go プロジェクト: andradeandrey/go-ipfs
func TestRelayStress(t *testing.T) {
	buflen := 1 << 18
	iterations := 10

	ctx := context.Background()

	// these networks have the relay service wired in already.
	n1 := testutil.GenHostSwarm(t, ctx)
	n2 := testutil.GenHostSwarm(t, ctx)
	n3 := testutil.GenHostSwarm(t, ctx)

	n1p := n1.ID()
	n2p := n2.ID()
	n3p := n3.ID()

	n2pi := n2.Peerstore().PeerInfo(n2p)
	if err := n1.Connect(ctx, n2pi); err != nil {
		t.Fatalf("Failed to dial:", err)
	}
	if err := n3.Connect(ctx, n2pi); err != nil {
		t.Fatalf("Failed to dial:", err)
	}

	// setup handler on n3 to copy everything over to the pipe.
	piper, pipew := io.Pipe()
	n3.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
		log.Debug("relay stream opened to n3!")
		log.Debug("piping and echoing everything")
		w := io.MultiWriter(s, pipew)
		io.Copy(w, s)
		log.Debug("closing stream")
		s.Close()
	})

	// ok, now we can try to relay n1--->n2--->n3.
	log.Debug("open relay stream")
	s, err := n1.NewStream(relay.ID, n2p)
	if err != nil {
		t.Fatal(err)
	}

	// ok first thing we write the relay header n1->n3
	log.Debug("write relay header")
	if err := relay.WriteHeader(s, n1p, n3p); err != nil {
		t.Fatal(err)
	}

	// ok now the header's there, we can write the next protocol header.
	log.Debug("write testing header")
	if err := protocol.WriteHeader(s, protocol.TestingID); err != nil {
		t.Fatal(err)
	}

	// okay, now write lots of text and read it back out from both
	// the pipe and the stream.
	buf1 := make([]byte, buflen)
	buf2 := make([]byte, len(buf1))
	buf3 := make([]byte, len(buf1))

	fillbuf := func(buf []byte, b byte) {
		for i := range buf {
			buf[i] = b
		}
	}

	for i := 0; i < iterations; i++ {
		fillbuf(buf1, byte(int('a')+i))
		log.Debugf("writing %d bytes (%d/%d)", len(buf1), i, iterations)
		if _, err := s.Write(buf1); err != nil {
			t.Fatal(err)
		}

		log.Debug("read it out from the pipe.")
		if _, err := io.ReadFull(piper, buf2); err != nil {
			t.Fatal(err)
		}
		if string(buf1) != string(buf2) {
			t.Fatal("should've gotten that text out of the pipe")
		}

		// read it out from the stream (echoed)
		log.Debug("read it out from the stream (echoed).")
		if _, err := io.ReadFull(s, buf3); err != nil {
			t.Fatal(err)
		}
		if string(buf1) != string(buf3) {
			t.Fatal("should've gotten that text out of the stream")
		}
	}

	log.Debug("sweet, relay works under stress.")
	s.Close()
}
コード例 #7
0
ファイル: relay_test.go プロジェクト: andradeandrey/go-ipfs
func TestRelaySimple(t *testing.T) {

	ctx := context.Background()

	// these networks have the relay service wired in already.
	n1 := testutil.GenHostSwarm(t, ctx)
	n2 := testutil.GenHostSwarm(t, ctx)
	n3 := testutil.GenHostSwarm(t, ctx)

	n1p := n1.ID()
	n2p := n2.ID()
	n3p := n3.ID()

	n2pi := n2.Peerstore().PeerInfo(n2p)
	if err := n1.Connect(ctx, n2pi); err != nil {
		t.Fatal("Failed to connect:", err)
	}
	if err := n3.Connect(ctx, n2pi); err != nil {
		t.Fatal("Failed to connect:", err)
	}

	// setup handler on n3 to copy everything over to the pipe.
	piper, pipew := io.Pipe()
	n3.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
		log.Debug("relay stream opened to n3!")
		log.Debug("piping and echoing everything")
		w := io.MultiWriter(s, pipew)
		io.Copy(w, s)
		log.Debug("closing stream")
		s.Close()
	})

	// ok, now we can try to relay n1--->n2--->n3.
	log.Debug("open relay stream")
	s, err := n1.NewStream(relay.ID, n2p)
	if err != nil {
		t.Fatal(err)
	}

	// ok first thing we write the relay header n1->n3
	log.Debug("write relay header")
	if err := relay.WriteHeader(s, n1p, n3p); err != nil {
		t.Fatal(err)
	}

	// ok now the header's there, we can write the next protocol header.
	log.Debug("write testing header")
	if err := protocol.WriteHeader(s, protocol.TestingID); err != nil {
		t.Fatal(err)
	}

	// okay, now we should be able to write text, and read it out.
	buf1 := []byte("abcdefghij")
	buf2 := make([]byte, 10)
	buf3 := make([]byte, 10)
	log.Debug("write in some text.")
	if _, err := s.Write(buf1); err != nil {
		t.Fatal(err)
	}

	// read it out from the pipe.
	log.Debug("read it out from the pipe.")
	if _, err := io.ReadFull(piper, buf2); err != nil {
		t.Fatal(err)
	}
	if string(buf1) != string(buf2) {
		t.Fatal("should've gotten that text out of the pipe")
	}

	// read it out from the stream (echoed)
	log.Debug("read it out from the stream (echoed).")
	if _, err := io.ReadFull(s, buf3); err != nil {
		t.Fatal(err)
	}
	if string(buf1) != string(buf3) {
		t.Fatal("should've gotten that text out of the stream")
	}

	// sweet. relay works.
	log.Debug("sweet, relay works.")
	s.Close()
}
コード例 #8
0
ファイル: backpressure_test.go プロジェクト: noffle/go-ipfs
// TestBackpressureStreamHandler tests whether mux handler
// ratelimiting works. Meaning, since the handler is sequential
// it should block senders.
//
// Important note: spdystream (which peerstream uses) has a set
// of n workers (n=spdsystream.FRAME_WORKERS) which handle new
// frames, including those starting new streams. So all of them
// can be in the handler at one time. Also, the sending side
// does not rate limit unless we call stream.Wait()
//
//
// Note: right now, this happens muxer-wide. the muxer should
// learn to flow control, so handlers cant block each other.
func TestBackpressureStreamHandler(t *testing.T) {
	t.Skip(`Sadly, as cool as this test is, it doesn't work
Because spdystream doesnt handle stream open backpressure
well IMO. I'll see about rewriting that part when it becomes
a problem.
`)

	// a number of concurrent request handlers
	limit := 10

	// our way to signal that we're done with 1 request
	requestHandled := make(chan struct{})

	// handler rate limiting
	receiverRatelimit := make(chan struct{}, limit)
	for i := 0; i < limit; i++ {
		receiverRatelimit <- struct{}{}
	}

	// sender counter of successfully opened streams
	senderOpened := make(chan struct{}, limit*100)

	// sender signals it's done (errored out)
	senderDone := make(chan struct{})

	// the receiver handles requests with some rate limiting
	receiver := func(s inet.Stream) {
		log.Debug("receiver received a stream")

		<-receiverRatelimit // acquire
		go func() {
			// our request handler. can do stuff here. we
			// simulate something taking time by waiting
			// on requestHandled
			log.Debug("request worker handling...")
			<-requestHandled
			log.Debug("request worker done!")
			receiverRatelimit <- struct{}{} // release
		}()
	}

	// the sender opens streams as fast as possible
	sender := func(host host.Host, remote peer.ID) {
		var s inet.Stream
		var err error
		defer func() {
			t.Error(err)
			log.Debug("sender error. exiting.")
			senderDone <- struct{}{}
		}()

		for {
			s, err = host.NewStream(protocol.TestingID, remote)
			if err != nil {
				return
			}

			_ = s
			// if err = s.SwarmStream().Stream().Wait(); err != nil {
			// 	return
			// }

			// "count" another successfully opened stream
			// (large buffer so shouldn't block in normal operation)
			log.Debug("sender opened another stream!")
			senderOpened <- struct{}{}
		}
	}

	// count our senderOpened events
	countStreamsOpenedBySender := func(min int) int {
		opened := 0
		for opened < min {
			log.Debugf("countStreamsOpenedBySender got %d (min %d)", opened, min)
			select {
			case <-senderOpened:
				opened++
			case <-time.After(10 * time.Millisecond):
			}
		}
		return opened
	}

	// count our received events
	// waitForNReceivedStreams := func(n int) {
	// 	for n > 0 {
	// 		log.Debugf("waiting for %d received streams...", n)
	// 		select {
	// 		case <-receiverRatelimit:
	// 			n--
	// 		}
	// 	}
	// }

	testStreamsOpened := func(expected int) {
		log.Debugf("testing rate limited to %d streams", expected)
		if n := countStreamsOpenedBySender(expected); n != expected {
			t.Fatalf("rate limiting did not work :( -- %d != %d", expected, n)
		}
	}

	// ok that's enough setup. let's do it!

	ctx := context.Background()
	h1 := testutil.GenHostSwarm(t, ctx)
	h2 := testutil.GenHostSwarm(t, ctx)

	// setup receiver handler
	h1.SetStreamHandler(protocol.TestingID, receiver)

	h2pi := h2.Peerstore().PeerInfo(h2.ID())
	log.Debugf("dialing %s", h2pi.Addrs)
	if err := h1.Connect(ctx, h2pi); err != nil {
		t.Fatalf("Failed to connect:", err)
	}

	// launch sender!
	go sender(h2, h1.ID())

	// ok, what do we expect to happen? the receiver should
	// receive 10 requests and stop receiving, blocking the sender.
	// we can test this by counting 10x senderOpened requests

	<-senderOpened // wait for the sender to successfully open some.
	testStreamsOpened(limit - 1)

	// let's "handle" 3 requests.
	<-requestHandled
	<-requestHandled
	<-requestHandled
	// the sender should've now been able to open exactly 3 more.

	testStreamsOpened(3)

	// shouldn't have opened anything more
	testStreamsOpened(0)

	// let's "handle" 100 requests in batches of 5
	for i := 0; i < 20; i++ {
		<-requestHandled
		<-requestHandled
		<-requestHandled
		<-requestHandled
		<-requestHandled
		testStreamsOpened(5)
	}

	// success!

	// now for the sugar on top: let's tear down the receiver. it should
	// exit the sender.
	h1.Close()

	// shouldn't have opened anything more
	testStreamsOpened(0)

	select {
	case <-time.After(100 * time.Millisecond):
		t.Error("receiver shutdown failed to exit sender")
	case <-senderDone:
		log.Info("handler backpressure works!")
	}
}
コード例 #9
0
ファイル: backpressure_test.go プロジェクト: noffle/go-ipfs
// TestStBackpressureStreamWrite tests whether streams see proper
// backpressure when writing data over the network streams.
func TestStBackpressureStreamWrite(t *testing.T) {

	// senderWrote signals that the sender wrote bytes to remote.
	// the value is the count of bytes written.
	senderWrote := make(chan int, 10000)

	// sender signals it's done (errored out)
	senderDone := make(chan struct{})

	// writeStats lets us listen to all the writes and return
	// how many happened and how much was written
	writeStats := func() (int, int) {
		writes := 0
		bytes := 0
		for {
			select {
			case n := <-senderWrote:
				writes++
				bytes = bytes + n
			default:
				log.Debugf("stats: sender wrote %d bytes, %d writes", bytes, writes)
				return bytes, writes
			}
		}
	}

	// sender attempts to write as fast as possible, signaling on the
	// completion of every write. This makes it possible to see how
	// fast it's actually writing. We pair this with a receiver
	// that waits for a signal to read.
	sender := func(s inet.Stream) {
		defer func() {
			s.Close()
			senderDone <- struct{}{}
		}()

		// ready a buffer of random data
		buf := make([]byte, 65536)
		u.NewTimeSeededRand().Read(buf)

		for {
			// send a randomly sized subchunk
			from := rand.Intn(len(buf) / 2)
			to := rand.Intn(len(buf) / 2)
			sendbuf := buf[from : from+to]

			n, err := s.Write(sendbuf)
			if err != nil {
				log.Debug("sender error. exiting:", err)
				return
			}

			log.Debugf("sender wrote %d bytes", n)
			senderWrote <- n
		}
	}

	// receive a number of bytes from a stream.
	// returns the number of bytes written.
	receive := func(s inet.Stream, expect int) {
		log.Debugf("receiver to read %d bytes", expect)
		rbuf := make([]byte, expect)
		n, err := io.ReadFull(s, rbuf)
		if err != nil {
			t.Error("read failed:", err)
		}
		if expect != n {
			t.Error("read len differs: %d != %d", expect, n)
		}
	}

	// ok let's do it!

	// setup the networks
	ctx := context.Background()
	h1 := testutil.GenHostSwarm(t, ctx)
	h2 := testutil.GenHostSwarm(t, ctx)

	// setup sender handler on 1
	h1.SetStreamHandler(protocol.TestingID, sender)

	h2pi := h2.Peerstore().PeerInfo(h2.ID())
	log.Debugf("dialing %s", h2pi.Addrs)
	if err := h1.Connect(ctx, h2pi); err != nil {
		t.Fatalf("Failed to connect:", err)
	}

	// open a stream, from 2->1, this is our reader
	s, err := h2.NewStream(protocol.TestingID, h1.ID())
	if err != nil {
		t.Fatal(err)
	}

	// let's make sure r/w works.
	testSenderWrote := func(bytesE int) {
		bytesA, writesA := writeStats()
		if bytesA != bytesE {
			t.Errorf("numbers failed: %d =?= %d bytes, via %d writes", bytesA, bytesE, writesA)
		}
	}

	// trigger lazy connection handshaking
	_, err = s.Read(nil)
	if err != nil {
		t.Fatal(err)
	}

	// 500ms rounds of lockstep write + drain
	roundsStart := time.Now()
	roundsTotal := 0
	for roundsTotal < (2 << 20) {
		// let the sender fill its buffers, it will stop sending.
		<-time.After(300 * time.Millisecond)
		b, _ := writeStats()
		testSenderWrote(0)
		testSenderWrote(0)

		// drain it all, wait again
		receive(s, b)
		roundsTotal = roundsTotal + b
	}
	roundsTime := time.Now().Sub(roundsStart)

	// now read continously, while we measure stats.
	stop := make(chan struct{})
	contStart := time.Now()

	go func() {
		for {
			select {
			case <-stop:
				return
			default:
				receive(s, 2<<15)
			}
		}
	}()

	contTotal := 0
	for contTotal < (2 << 20) {
		n := <-senderWrote
		contTotal += n
	}
	stop <- struct{}{}
	contTime := time.Now().Sub(contStart)

	// now compare! continuous should've been faster AND larger
	if roundsTime < contTime {
		t.Error("continuous should have been faster")
	}

	if roundsTotal < contTotal {
		t.Error("continuous should have been larger, too!")
	}

	// and a couple rounds more for good measure ;)
	for i := 0; i < 3; i++ {
		// let the sender fill its buffers, it will stop sending.
		<-time.After(300 * time.Millisecond)
		b, _ := writeStats()
		testSenderWrote(0)
		testSenderWrote(0)

		// drain it all, wait again
		receive(s, b)
	}

	// this doesn't work :(:
	// // now for the sugar on top: let's tear down the receiver. it should
	// // exit the sender.
	// n1.Close()
	// testSenderWrote(0)
	// testSenderWrote(0)
	// select {
	// case <-time.After(2 * time.Second):
	// 	t.Error("receiver shutdown failed to exit sender")
	// case <-senderDone:
	// 	log.Info("handler backpressure works!")
	// }
}