func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {

	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	block := blocks.NewBlock([]byte("block"))
	g := NewSessionGenerator(net, rs)

	hasBlock := g.Next()

	if err := hasBlock.blockstore.Put(block); err != nil {
		t.Fatal(err)
	}
	if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil {
		t.Fatal(err)
	}

	wantsBlock := g.Next()

	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	received, err := wantsBlock.exchange.Block(ctx, block.Key())
	if err != nil {
		t.Log(err)
		t.Fatal("Expected to succeed")
	}

	if !bytes.Equal(block.Data, received.Data) {
		t.Fatal("Data doesn't match")
	}
}
func TestClientFindProviders(t *testing.T) {
	peer := peer.WithIDString("42")
	rs := VirtualRoutingServer()
	client := rs.Client(peer)

	k := u.Key("hello")
	err := client.Provide(context.Background(), k)
	if err != nil {
		t.Fatal(err)
	}
	max := 100

	providersFromHashTable := rs.Providers(k)

	isInHT := false
	for _, p := range providersFromHashTable {
		if bytes.Equal(p.ID(), peer.ID()) {
			isInHT = true
		}
	}
	if !isInHT {
		t.Fatal("Despite client providing key, peer wasn't in hash table as a provider")
	}
	providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max)
	isInClient := false
	for p := range providersFromClient {
		if bytes.Equal(p.ID(), peer.ID()) {
			isInClient = true
		}
	}
	if !isInClient {
		t.Fatal("Despite client providing key, client didn't receive peer when finding providers")
	}
}
func TestServiceHandler(t *testing.T) {
	ctx := context.Background()
	h := &ReverseHandler{}
	s := NewService(ctx, h)
	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")

	d, err := wrapData([]byte("beep"), nil)
	if err != nil {
		t.Error(err)
	}

	m1 := msg.New(peer1, d)
	s.GetPipe().Incoming <- m1
	m2 := <-s.GetPipe().Outgoing

	d, rid, err := unwrapData(m2.Data())
	if err != nil {
		t.Error(err)
	}

	if rid != nil {
		t.Error("RequestID should be nil")
	}

	if !bytes.Equal(d, []byte("peeb")) {
		t.Errorf("service handler data incorrect: %v != %v", d, "oof")
	}
}
func TestClientOverMax(t *testing.T) {
	rs := VirtualRoutingServer()
	k := u.Key("hello")
	numProvidersForHelloKey := 100
	for i := 0; i < numProvidersForHelloKey; i++ {
		peer := peer.WithIDString(string(i))
		err := rs.Announce(peer, k)
		if err != nil {
			t.Fatal(err)
		}
	}
	providersFromHashTable := rs.Providers(k)
	if len(providersFromHashTable) != numProvidersForHelloKey {
		t.Log(1 == len(providersFromHashTable))
		t.Fatal("not all providers were returned")
	}

	max := 10
	peer := peer.WithIDString("TODO")
	client := rs.Client(peer)

	providersFromClient := client.FindProvidersAsync(context.Background(), k, max)
	i := 0
	for _ = range providersFromClient {
		i++
	}
	if i != max {
		t.Fatal("Too many providers returned")
	}
}
func TestMulticonnClose(t *testing.T) {
	// t.Skip("fooo")

	log.Info("TestMulticonnSendUnderlying")
	ctx := context.Background()
	c1, c2 := setupMultiConns(t, ctx)

	for _, c := range c1.conns {
		c.Close()
	}

	for _, c := range c2.conns {
		c.Close()
	}

	timeout := time.After(100 * time.Millisecond)
	select {
	case <-c1.Closed():
	case <-timeout:
		t.Fatal("timeout")
	}

	select {
	case <-c2.Closed():
	case <-timeout:
		t.Fatal("timeout")
	}
}
func TestServiceRequest(t *testing.T) {
	ctx := context.Background()
	s1 := NewService(ctx, &ReverseHandler{})
	s2 := NewService(ctx, &ReverseHandler{})

	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")

	// patch services together
	go func() {
		for {
			select {
			case m := <-s1.GetPipe().Outgoing:
				s2.GetPipe().Incoming <- m
			case m := <-s2.GetPipe().Outgoing:
				s1.GetPipe().Incoming <- m
			case <-ctx.Done():
				return
			}
		}
	}()

	m1 := msg.New(peer1, []byte("beep"))
	m2, err := s1.SendRequest(ctx, m1)
	if err != nil {
		t.Error(err)
	}

	if !bytes.Equal(m2.Data(), []byte("peeb")) {
		t.Errorf("service handler data incorrect: %v != %v", m2.Data(), "oof")
	}
}
func TestServiceRequestTimeout(t *testing.T) {
	ctx, _ := context.WithTimeout(context.Background(), time.Millisecond)
	s1 := NewService(ctx, &ReverseHandler{})
	s2 := NewService(ctx, &ReverseHandler{})
	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")

	// patch services together
	go func() {
		for {
			<-time.After(time.Millisecond)
			select {
			case m := <-s1.GetPipe().Outgoing:
				s2.GetPipe().Incoming <- m
			case m := <-s2.GetPipe().Outgoing:
				s1.GetPipe().Incoming <- m
			case <-ctx.Done():
				return
			}
		}
	}()

	m1 := msg.New(peer1, []byte("beep"))
	m2, err := s1.SendRequest(ctx, m1)
	if err == nil || m2 != nil {
		t.Error("should've timed out")
	}
}
示例#8
0
func TestCancel(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/5534", "/ip4/127.0.0.1/tcp/5545")

	select {
	case <-c1.Closed():
		t.Fatal("done before close")
	case <-c2.Closed():
		t.Fatal("done before close")
	default:
	}

	c1.Close()
	c2.Close()
	cancel() // listener

	// wait to ensure other goroutines run and close things.
	<-time.After(time.Microsecond * 10)
	// test that cancel called Close.

	select {
	case <-c1.Closed():
	default:
		t.Fatal("not done after cancel")
	}

	select {
	case <-c2.Closed():
	default:
		t.Fatal("not done after cancel")
	}

}
示例#9
0
func TestClose(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/5534", "/ip4/127.0.0.1/tcp/5545")

	select {
	case <-c1.Closed():
		t.Fatal("done before close")
	case <-c2.Closed():
		t.Fatal("done before close")
	default:
	}

	c1.Close()

	select {
	case <-c1.Closed():
	default:
		t.Fatal("not done after cancel")
	}

	c2.Close()

	select {
	case <-c2.Closed():
	default:
		t.Fatal("not done after cancel")
	}

	cancel() // close the listener :P
}
示例#10
0
func TestProvides(t *testing.T) {
	// t.Skip("skipping test to debug another")
	ctx := context.Background()

	u.Debug = false

	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			defer dhts[i].dialer.(inet.Network).Close()
		}
	}()

	_, err := dhts[0].Connect(ctx, peers[1])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	bits, err := dhts[3].getLocal(u.Key("hello"))
	if err != nil && bytes.Equal(bits, []byte("world")) {
		t.Fatal(err)
	}

	err = dhts[3].Provide(ctx, u.Key("hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	provchan := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 1)

	after := time.After(time.Second)
	select {
	case prov := <-provchan:
		if prov == nil {
			t.Fatal("Got back nil provider")
		}
	case <-after:
		t.Fatal("Did not get a provider back.")
	}
}
示例#11
0
func TestValueGetSet(t *testing.T) {
	// t.Skip("skipping test to debug another")

	ctx := context.Background()
	u.Debug = false
	addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/11235")
	if err != nil {
		t.Fatal(err)
	}
	addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/15679")
	if err != nil {
		t.Fatal(err)
	}

	peerA := makePeer(addrA)
	peerB := makePeer(addrB)

	dhtA := setupDHT(ctx, t, peerA)
	dhtB := setupDHT(ctx, t, peerB)

	vf := func(u.Key, []byte) error {
		return nil
	}
	dhtA.Validators["v"] = vf
	dhtB.Validators["v"] = vf

	defer dhtA.Close()
	defer dhtB.Close()
	defer dhtA.dialer.(inet.Network).Close()
	defer dhtB.dialer.(inet.Network).Close()

	_, err = dhtA.Connect(ctx, peerB)
	if err != nil {
		t.Fatal(err)
	}

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	dhtA.PutValue(ctxT, "/v/hello", []byte("world"))

	ctxT, _ = context.WithTimeout(ctx, time.Second*2)
	val, err := dhtA.GetValue(ctxT, "/v/hello")
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatalf("Expected 'world' got '%s'", string(val))
	}

	ctxT, _ = context.WithTimeout(ctx, time.Second*2)
	val, err = dhtB.GetValue(ctxT, "/v/hello")
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatalf("Expected 'world' got '%s'", string(val))
	}
}
示例#12
0
func TestCloseLeak(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	if os.Getenv("TRAVIS") == "true" {
		t.Skip("this doesn't work well on travis")
	}

	var wg sync.WaitGroup

	runPair := func(p1, p2, num int) {
		a1 := strconv.Itoa(p1)
		a2 := strconv.Itoa(p2)
		ctx, cancel := context.WithCancel(context.Background())
		c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2)

		for i := 0; i < num; i++ {
			b1 := []byte("beep")
			c1.Out() <- b1
			b2 := <-c2.In()
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			b2 = []byte("boop")
			c2.Out() <- b2
			b1 = <-c1.In()
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			<-time.After(time.Microsecond * 5)
		}

		c1.Close()
		c2.Close()
		cancel() // close the listener
		wg.Done()
	}

	var cons = 20
	var msgs = 100
	fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs)
	for i := 0; i < cons; i++ {
		wg.Add(1)
		go runPair(2000+i, 2001+i, msgs)
	}

	fmt.Printf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
示例#13
0
func TestBlockReturnsErr(t *testing.T) {
	off := NewOfflineExchange()
	_, err := off.Block(context.Background(), u.Key("foo"))
	if err != nil {
		return // as desired
	}
	t.Fail()
}
示例#14
0
func TestHasBlockReturnsNil(t *testing.T) {
	off := NewOfflineExchange()
	block := blocks.NewBlock([]byte("data"))
	err := off.HasBlock(context.Background(), *block)
	if err != nil {
		t.Fatal("")
	}
}
示例#15
0
func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) {
	if _, err := bitswap.blockstore.Get(b.Key()); err != nil {
		_, err := bitswap.exchange.Block(context.Background(), b.Key())
		if err != nil {
			t.Fatal(err)
		}
	}
	wg.Done()
}
示例#16
0
func TestStopping(t *testing.T) {
	ctx := context.Background()

	// setup
	p1 := &TestProtocol{Pipe: msg.NewPipe(10)}
	p2 := &TestProtocol{Pipe: msg.NewPipe(10)}
	pid1 := pb.ProtocolID_Test
	pid2 := pb.ProtocolID_Identify
	mux1 := NewMuxer(ctx, ProtocolMap{
		pid1: p1,
		pid2: p2,
	})
	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")
	// peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb")

	// test outgoing p1
	for _, s := range []string{"foo1", "bar1", "baz1"} {
		p1.Outgoing <- msg.New(peer1, []byte(s))
		testWrappedMsg(t, <-mux1.Outgoing, pid1, []byte(s))
	}

	// test incoming p1
	for _, s := range []string{"foo2", "bar2", "baz2"} {
		d, err := wrapData([]byte(s), pid1)
		if err != nil {
			t.Error(err)
		}
		mux1.Incoming <- msg.New(peer1, d)
		testMsg(t, <-p1.Incoming, []byte(s))
	}

	mux1.Close() // waits

	// test outgoing p1
	for _, s := range []string{"foo3", "bar3", "baz3"} {
		p1.Outgoing <- msg.New(peer1, []byte(s))
		select {
		case m := <-mux1.Outgoing:
			t.Errorf("should not have received anything. Got: %v", string(m.Data()))
		case <-time.After(time.Millisecond):
		}
	}

	// test incoming p1
	for _, s := range []string{"foo4", "bar4", "baz4"} {
		d, err := wrapData([]byte(s), pid1)
		if err != nil {
			t.Error(err)
		}
		mux1.Incoming <- msg.New(peer1, d)
		select {
		case <-p1.Incoming:
			t.Error("should not have received anything.")
		case <-time.After(time.Millisecond):
		}
	}
}
示例#17
0
func TestDoReturnsNil(t *testing.T) {
	ctx := context.Background()
	err := ContextDo(ctx, func() error {
		return nil
	})
	if err != nil {
		t.Fail()
	}
}
示例#18
0
func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
	net := VirtualNetwork()
	idOfResponder := []byte("responder")
	waiter := net.Adapter(peer.WithIDString("waiter"))
	responder := net.Adapter(peer.WithID(idOfResponder))

	var wg sync.WaitGroup

	wg.Add(1)

	expectedStr := "received async"

	responder.SetDelegate(lambda(func(
		ctx context.Context,
		fromWaiter peer.Peer,
		msgFromWaiter bsmsg.BitSwapMessage) (
		peer.Peer, bsmsg.BitSwapMessage) {

		msgToWaiter := bsmsg.New()
		msgToWaiter.AddBlock(*blocks.NewBlock([]byte(expectedStr)))

		return fromWaiter, msgToWaiter
	}))

	waiter.SetDelegate(lambda(func(
		ctx context.Context,
		fromResponder peer.Peer,
		msgFromResponder bsmsg.BitSwapMessage) (
		peer.Peer, bsmsg.BitSwapMessage) {

		// TODO assert that this came from the correct peer and that the message contents are as expected
		ok := false
		for _, b := range msgFromResponder.Blocks() {
			if string(b.Data) == expectedStr {
				wg.Done()
				ok = true
			}
		}

		if !ok {
			t.Fatal("Message not received from the responder")

		}
		return nil, nil
	}))

	messageSentAsync := bsmsg.New()
	messageSentAsync.AddBlock(*blocks.NewBlock([]byte("data")))
	errSending := waiter.SendMessage(
		context.Background(), peer.WithID(idOfResponder), messageSentAsync)
	if errSending != nil {
		t.Fatal(errSending)
	}

	wg.Wait() // until waiter delegate function is executed
}
示例#19
0
func TestDoReturnsFuncError(t *testing.T) {
	ctx := context.Background()
	expected := errors.New("expected to be returned by ContextDo")
	err := ContextDo(ctx, func() error {
		return expected
	})
	if err != expected {
		t.Fail()
	}
}
示例#20
0
func TestDoReturnsContextErr(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	ch := make(chan struct{})
	err := ContextDo(ctx, func() error {
		cancel()
		ch <- struct{}{} // won't return
		return nil
	})
	if err != ctx.Err() {
		t.Fail()
	}
}
func TestProviderManager(t *testing.T) {
	ctx := context.Background()
	mid := peer.ID("testing")
	p := NewProviderManager(ctx, mid)
	a := u.Key("test")
	p.AddProvider(a, peer.WithIDString("testingprovider"))
	resp := p.GetProviders(a)
	if len(resp) != 1 {
		t.Fatal("Could not retrieve provider.")
	}
	p.Close()
}
func TestCarryOnWhenDeadlineExpires(t *testing.T) {

	impossibleDeadline := time.Nanosecond
	fastExpiringCtx, _ := context.WithTimeout(context.Background(), impossibleDeadline)

	n := New()
	defer n.Shutdown()
	block := blocks.NewBlock([]byte("A Missed Connection"))
	blockChannel := n.Subscribe(fastExpiringCtx, block.Key())

	assertBlockChannelNil(t, blockChannel)
}
示例#23
0
func TestLayeredGet(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	u.Debug = false
	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			defer dhts[i].dialer.(inet.Network).Close()
		}
	}()

	_, err := dhts[0].Connect(ctx, peers[1])
	if err != nil {
		t.Fatalf("Failed to connect: %s", err)
	}

	_, err = dhts[1].Connect(ctx, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("/v/hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].Provide(ctx, u.Key("/v/hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	val, err := dhts[0].GetValue(ctxT, u.Key("/v/hello"))
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatal("Got incorrect value.")
	}

}
func ExampleWithTimeout() {
	// Pass a context with a timeout to tell a blocking function that it
	// should abandon its work after the timeout elapses.
	ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
	select {
	case <-time.After(200 * time.Millisecond):
		fmt.Println("overslept")
	case <-ctx.Done():
		fmt.Println(ctx.Err()) // prints "context deadline exceeded"
	}
	// Output:
	// context deadline exceeded
}
示例#25
0
func TestSwarm(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	sg := NewSessionGenerator(net, rs)
	bg := NewBlockGenerator()

	t.Log("Create a ton of instances, and just a few blocks")

	numInstances := 500
	numBlocks := 2

	instances := sg.Instances(numInstances)
	blocks := bg.Blocks(numBlocks)

	t.Log("Give the blocks to the first instance")

	first := instances[0]
	for _, b := range blocks {
		first.blockstore.Put(b)
		first.exchange.HasBlock(context.Background(), *b)
		rs.Announce(first.peer, b.Key())
	}

	t.Log("Distribute!")

	var wg sync.WaitGroup

	for _, inst := range instances {
		for _, b := range blocks {
			wg.Add(1)
			// NB: executing getOrFail concurrently puts tremendous pressure on
			// the goroutine scheduler
			getOrFail(inst, b, t, &wg)
		}
	}
	wg.Wait()

	t.Log("Verify!")

	for _, inst := range instances {
		for _, b := range blocks {
			if _, err := inst.blockstore.Get(b.Key()); err != nil {
				t.Fatal(err)
			}
		}
	}
}
示例#26
0
func TestLogErrorReceivedByParent(t *testing.T) {

	expected := errors.New("From child to parent")

	ctx, errs := ContextWithErrorLog(context.Background())

	go func() {
		LogError(ctx, expected)
	}()

	if err := <-errs; err != expected {
		t.Fatal("didn't receive the expected error")
	}
}
示例#27
0
func TestContextContainsMetadata(t *testing.T) {
	t.Parallel()

	m := Metadata{"foo": "bar"}
	ctx := ContextWithLoggable(context.Background(), m)
	got, err := MetadataFromContext(ctx)
	if err != nil {
		t.Fatal(err)
	}

	_, exists := got["foo"]
	if !exists {
		t.Fail()
	}
}
func TestPublishSubscribe(t *testing.T) {
	blockSent := blocks.NewBlock([]byte("Greetings from The Interval"))

	n := New()
	defer n.Shutdown()
	ch := n.Subscribe(context.Background(), blockSent.Key())

	n.Publish(*blockSent)
	blockRecvd, ok := <-ch
	if !ok {
		t.Fail()
	}

	assertBlocksEqual(t, blockRecvd, *blockSent)

}
示例#29
0
func TestGetBlockTimeout(t *testing.T) {

	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	g := NewSessionGenerator(net, rs)

	self := g.Next()

	ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond)
	block := blocks.NewBlock([]byte("block"))
	_, err := self.exchange.Block(ctx, block.Key())

	if err != context.DeadlineExceeded {
		t.Fatal("Expected DeadlineExceeded error")
	}
}
示例#30
0
func TestSimpleMuxer(t *testing.T) {
	ctx := context.Background()

	// setup
	p1 := &TestProtocol{Pipe: msg.NewPipe(10)}
	p2 := &TestProtocol{Pipe: msg.NewPipe(10)}
	pid1 := pb.ProtocolID_Test
	pid2 := pb.ProtocolID_Routing
	mux1 := NewMuxer(ctx, ProtocolMap{
		pid1: p1,
		pid2: p2,
	})
	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")
	// peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb")

	// test outgoing p1
	for _, s := range []string{"foo", "bar", "baz"} {
		p1.Outgoing <- msg.New(peer1, []byte(s))
		testWrappedMsg(t, <-mux1.Outgoing, pid1, []byte(s))
	}

	// test incoming p1
	for _, s := range []string{"foo", "bar", "baz"} {
		d, err := wrapData([]byte(s), pid1)
		if err != nil {
			t.Error(err)
		}
		mux1.Incoming <- msg.New(peer1, d)
		testMsg(t, <-p1.Incoming, []byte(s))
	}

	// test outgoing p2
	for _, s := range []string{"foo", "bar", "baz"} {
		p2.Outgoing <- msg.New(peer1, []byte(s))
		testWrappedMsg(t, <-mux1.Outgoing, pid2, []byte(s))
	}

	// test incoming p2
	for _, s := range []string{"foo", "bar", "baz"} {
		d, err := wrapData([]byte(s), pid2)
		if err != nil {
			t.Error(err)
		}
		mux1.Incoming <- msg.New(peer1, d)
		testMsg(t, <-p2.Incoming, []byte(s))
	}
}