コード例 #1
0
ファイル: pipe.go プロジェクト: carriercomm/interplanetary
// NewSecurePipe constructs a pipe with channels of a given buffer size.
func NewSecurePipe(ctx context.Context, bufsize int, local peer.Peer,
	peers peer.Peerstore, insecure pipes.Duplex) (*SecurePipe, error) {

	ctx, cancel := context.WithCancel(ctx)

	sp := &SecurePipe{
		Duplex: pipes.Duplex{
			In:  make(chan []byte, bufsize),
			Out: make(chan []byte, bufsize),
		},
		local:    local,
		peers:    peers,
		insecure: insecure,

		ctx:    ctx,
		cancel: cancel,
	}

	if err := sp.handshake(); err != nil {
		sp.Close()
		return nil, err
	}

	return sp, nil
}
コード例 #2
0
func TestCancel(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/5534", "/ip4/127.0.0.1/tcp/5545")

	select {
	case <-c1.Closed():
		t.Fatal("done before close")
	case <-c2.Closed():
		t.Fatal("done before close")
	default:
	}

	c1.Close()
	c2.Close()
	cancel() // listener

	// wait to ensure other goroutines run and close things.
	<-time.After(time.Microsecond * 10)
	// test that cancel called Close.

	select {
	case <-c1.Closed():
	default:
		t.Fatal("not done after cancel")
	}

	select {
	case <-c2.Closed():
	default:
		t.Fatal("not done after cancel")
	}

}
コード例 #3
0
func TestClose(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/5534", "/ip4/127.0.0.1/tcp/5545")

	select {
	case <-c1.Closed():
		t.Fatal("done before close")
	case <-c2.Closed():
		t.Fatal("done before close")
	default:
	}

	c1.Close()

	select {
	case <-c1.Closed():
	default:
		t.Fatal("not done after cancel")
	}

	c2.Close()

	select {
	case <-c2.Closed():
	default:
		t.Fatal("not done after cancel")
	}

	cancel() // close the listener :P
}
コード例 #4
0
ファイル: listen.go プロジェクト: carriercomm/interplanetary
// Listen listens on the particular multiaddr, with given peer and peerstore.
func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer.Peerstore) (Listener, error) {

	ml, err := manet.Listen(addr)
	if err != nil {
		return nil, fmt.Errorf("Failed to listen on %s: %s", addr, err)
	}

	// todo make this a variable
	chansize := 10

	l := &listener{
		Listener: ml,
		maddr:    addr,
		peers:    peers,
		local:    local,
		conns:    make(chan Conn, chansize),
		chansize: chansize,
		ctx:      ctx,
	}

	// need a separate context to use for the context closer.
	// This is because the parent context will be given to all connections too,
	// and if we close the listener, the connections shouldn't share the fate.
	ctx2, _ := context.WithCancel(ctx)
	l.ContextCloser = ctxc.NewContextCloser(ctx2, l.close)

	l.Children().Add(1)
	go l.listen()

	return l, nil
}
コード例 #5
0
func TestCloseLeak(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	if os.Getenv("TRAVIS") == "true" {
		t.Skip("this doesn't work well on travis")
	}

	var wg sync.WaitGroup

	runPair := func(p1, p2, num int) {
		a1 := strconv.Itoa(p1)
		a2 := strconv.Itoa(p2)
		ctx, cancel := context.WithCancel(context.Background())
		c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2)

		for i := 0; i < num; i++ {
			b1 := []byte("beep")
			c1.Out() <- b1
			b2 := <-c2.In()
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			b2 = []byte("boop")
			c2.Out() <- b2
			b1 = <-c1.In()
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			<-time.After(time.Microsecond * 5)
		}

		c1.Close()
		c2.Close()
		cancel() // close the listener
		wg.Done()
	}

	var cons = 20
	var msgs = 100
	fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs)
	for i := 0; i < cons; i++ {
		wg.Add(1)
		go runPair(2000+i, 2001+i, msgs)
	}

	fmt.Printf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
コード例 #6
0
ファイル: bitswap.go プロジェクト: carriercomm/interplanetary
// GetBlock attempts to retrieve a particular block from peers within the
// deadline enforced by the context
//
// TODO ensure only one active request per key
func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) {
	log.Debugf("Get Block %v", k)
	now := time.Now()
	defer func() {
		log.Debugf("GetBlock took %f secs", time.Now().Sub(now).Seconds())
	}()

	ctx, cancelFunc := context.WithCancel(parent)
	defer cancelFunc()

	bs.wantlist.Add(k)
	promise := bs.notifications.Subscribe(ctx, k)

	const maxProviders = 20
	peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders)

	go func() {
		message := bsmsg.New()
		for _, wanted := range bs.wantlist.Keys() {
			message.AddWanted(wanted)
		}
		for peerToQuery := range peersToQuery {
			log.Debugf("bitswap got peersToQuery: %s", peerToQuery)
			go func(p peer.Peer) {

				log.Debugf("bitswap dialing peer: %s", p)
				err := bs.sender.DialPeer(ctx, p)
				if err != nil {
					log.Errorf("Error sender.DialPeer(%s)", p)
					return
				}

				response, err := bs.sender.SendRequest(ctx, p, message)
				if err != nil {
					log.Errorf("Error sender.SendRequest(%s) = %s", p, err)
					return
				}
				// FIXME ensure accounting is handled correctly when
				// communication fails. May require slightly different API to
				// get better guarantees. May need shared sequence numbers.
				bs.strategy.MessageSent(p, message)

				if response == nil {
					return
				}
				bs.ReceiveMessage(ctx, p, response)
			}(peerToQuery)
		}
	}()

	select {
	case block := <-promise:
		bs.wantlist.Remove(k)
		return &block, nil
	case <-parent.Done():
		return nil, parent.Err()
	}
}
コード例 #7
0
ファイル: do_test.go プロジェクト: carriercomm/interplanetary
func TestDoReturnsContextErr(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	ch := make(chan struct{})
	err := ContextDo(ctx, func() error {
		cancel()
		ch <- struct{}{} // won't return
		return nil
	})
	if err != ctx.Err() {
		t.Fail()
	}
}
コード例 #8
0
ファイル: query.go プロジェクト: carriercomm/interplanetary
func newQueryRunner(ctx context.Context, q *dhtQuery) *dhtQueryRunner {
	ctx, cancel := context.WithCancel(ctx)

	return &dhtQueryRunner{
		ctx:            ctx,
		cancel:         cancel,
		query:          q,
		peersToQuery:   queue.NewChanQueue(ctx, queue.NewXORDistancePQ(q.key)),
		peersRemaining: todoctr.NewSyncCounter(),
		peersSeen:      peer.Map{},
		rateLimit:      make(chan struct{}, q.concurrency),
	}
}
コード例 #9
0
ファイル: closer.go プロジェクト: carriercomm/interplanetary
// NewContextCloser constructs and returns a ContextCloser. It will call
// cf CloseFunc before its Done() Wait signals fire.
func NewContextCloser(ctx context.Context, cf CloseFunc) ContextCloser {
	if cf == nil {
		cf = nilCloseFunc
	}
	ctx, cancel := context.WithCancel(ctx)
	c := &contextCloser{
		ctx:       ctx,
		cancel:    cancel,
		closeFunc: cf,
		closed:    make(chan struct{}),
	}

	c.Children().Add(1) // we're a child goroutine, to be waited upon.
	go c.closeOnContextDone()
	return c
}
コード例 #10
0
// TODO does dht ensure won't receive self as a provider? probably not.
func TestCanceledContext(t *testing.T) {
	rs := VirtualRoutingServer()
	k := u.Key("hello")

	t.Log("async'ly announce infinite stream of providers for key")
	i := 0
	go func() { // infinite stream
		for {
			peer := peer.WithIDString(string(i))
			err := rs.Announce(peer, k)
			if err != nil {
				t.Fatal(err)
			}
			i++
		}
	}()

	local := peer.WithIDString("peer id doesn't matter")
	client := rs.Client(local)

	t.Log("warning: max is finite so this test is non-deterministic")
	t.Log("context cancellation could simply take lower priority")
	t.Log("and result in receiving the max number of results")
	max := 1000

	t.Log("cancel the context before consuming")
	ctx, cancelFunc := context.WithCancel(context.Background())
	cancelFunc()
	providers := client.FindProvidersAsync(ctx, k, max)

	numProvidersReturned := 0
	for _ = range providers {
		numProvidersReturned++
	}
	t.Log(numProvidersReturned)

	if numProvidersReturned == max {
		t.Fatal("Context cancel had no effect")
	}
}
コード例 #11
0
func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
	// t.Skip("skipping for another test")

	ctx := context.Background()
	swarms, peers := makeSwarms(ctx, t, addrs)

	// connect everyone
	{
		var wg sync.WaitGroup
		connect := func(s *Swarm, dst peer.Peer) {
			// copy for other peer

			cp, err := s.peers.Get(dst.ID())
			if err != nil {
				t.Fatal(err)
			}
			cp.AddAddress(dst.Addresses()[0])

			log.Info("SWARM TEST: %s dialing %s", s.local, dst)
			if _, err := s.Dial(cp); err != nil {
				t.Fatal("error swarm dialing to peer", err)
			}
			log.Info("SWARM TEST: %s connected to %s", s.local, dst)
			wg.Done()
		}

		log.Info("Connecting swarms simultaneously.")
		for _, s := range swarms {
			for _, p := range peers {
				if p != s.local { // don't connect to self.
					wg.Add(1)
					connect(s, p)
				}
			}
		}
		wg.Wait()
	}

	// ping/pong
	for _, s1 := range swarms {
		ctx, cancel := context.WithCancel(ctx)

		// setup all others to pong
		for _, s2 := range swarms {
			if s1 == s2 {
				continue
			}

			go pong(ctx, s2)
		}

		peers, err := s1.peers.All()
		if err != nil {
			t.Fatal(err)
		}

		for k := 0; k < MsgNum; k++ {
			for _, p := range *peers {
				log.Debugf("%s ping %s (%d)", s1.local, p, k)
				s1.Outgoing <- msg.New(p, []byte("ping"))
			}
		}

		got := map[u.Key]int{}
		for k := 0; k < (MsgNum * len(*peers)); k++ {
			log.Debugf("%s waiting for pong (%d)", s1.local, k)
			msg := <-s1.Incoming
			if string(msg.Data()) != "pong" {
				t.Error("unexpected conn output", msg.Data)
			}

			n, _ := got[msg.Peer().Key()]
			got[msg.Peer().Key()] = n + 1
		}

		if len(*peers) != len(got) {
			t.Error("got less messages than sent")
		}

		for p, n := range got {
			if n != MsgNum {
				t.Error("peer did not get all msgs", p, n, "/", MsgNum)
			}
		}

		cancel()
		<-time.After(50 * time.Microsecond)
	}

	for _, s := range swarms {
		s.Close()
	}
}
コード例 #12
0
func TestSimultMuxer(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	// run muxer
	ctx, cancel := context.WithCancel(context.Background())

	// setup
	p1 := &TestProtocol{Pipe: msg.NewPipe(10)}
	p2 := &TestProtocol{Pipe: msg.NewPipe(10)}
	pid1 := pb.ProtocolID_Test
	pid2 := pb.ProtocolID_Identify
	mux1 := NewMuxer(ctx, ProtocolMap{
		pid1: p1,
		pid2: p2,
	})
	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")
	// peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb")

	// counts
	total := 10000
	speed := time.Microsecond * 1
	counts := [2][2][2]int{}
	var countsLock sync.Mutex

	// run producers at every end sending incrementing messages
	produceOut := func(pid pb.ProtocolID, size int) {
		limiter := time.Tick(speed)
		for i := 0; i < size; i++ {
			<-limiter
			s := fmt.Sprintf("proto %v out %v", pid, i)
			m := msg.New(peer1, []byte(s))
			mux1.Protocols[pid].GetPipe().Outgoing <- m
			countsLock.Lock()
			counts[pid][0][0]++
			countsLock.Unlock()
			// log.Debug("sent %v", s)
		}
	}

	produceIn := func(pid pb.ProtocolID, size int) {
		limiter := time.Tick(speed)
		for i := 0; i < size; i++ {
			<-limiter
			s := fmt.Sprintf("proto %v in %v", pid, i)
			d, err := wrapData([]byte(s), pid)
			if err != nil {
				t.Error(err)
			}

			m := msg.New(peer1, d)
			mux1.Incoming <- m
			countsLock.Lock()
			counts[pid][1][0]++
			countsLock.Unlock()
			// log.Debug("sent %v", s)
		}
	}

	consumeOut := func() {
		for {
			select {
			case m := <-mux1.Outgoing:
				data, pid, err := unwrapData(m.Data())
				if err != nil {
					t.Error(err)
				}

				// log.Debug("got %v", string(data))
				_ = data
				countsLock.Lock()
				counts[pid][1][1]++
				countsLock.Unlock()

			case <-ctx.Done():
				return
			}
		}
	}

	consumeIn := func(pid pb.ProtocolID) {
		for {
			select {
			case m := <-mux1.Protocols[pid].GetPipe().Incoming:
				countsLock.Lock()
				counts[pid][0][1]++
				countsLock.Unlock()
				// log.Debug("got %v", string(m.Data()))
				_ = m
			case <-ctx.Done():
				return
			}
		}
	}

	go produceOut(pid1, total)
	go produceOut(pid2, total)
	go produceIn(pid1, total)
	go produceIn(pid2, total)
	go consumeOut()
	go consumeIn(pid1)
	go consumeIn(pid2)

	limiter := time.Tick(speed)
	for {
		<-limiter
		countsLock.Lock()
		got := counts[0][0][0] + counts[0][0][1] +
			counts[0][1][0] + counts[0][1][1] +
			counts[1][0][0] + counts[1][0][1] +
			counts[1][1][0] + counts[1][1][1]
		countsLock.Unlock()

		if got == total*8 {
			cancel()
			return
		}
	}

}
コード例 #13
0
func TestDialer(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	p1, err := setupPeer("/ip4/127.0.0.1/tcp/4234")
	if err != nil {
		t.Fatal("error setting up peer", err)
	}

	p2, err := setupPeer("/ip4/127.0.0.1/tcp/4235")
	if err != nil {
		t.Fatal("error setting up peer", err)
	}

	ctx, cancel := context.WithCancel(context.Background())

	laddr := p1.NetAddress("tcp")
	if laddr == nil {
		t.Fatal("Listen address is nil.")
	}

	ps1 := peer.NewPeerstore()
	ps2 := peer.NewPeerstore()
	ps1.Add(p1)
	ps2.Add(p2)

	l, err := Listen(ctx, laddr, p1, ps1)
	if err != nil {
		t.Fatal(err)
	}

	go echoListen(ctx, l)

	d := &Dialer{
		Peerstore: ps2,
		LocalPeer: p2,
	}

	c, err := d.Dial(ctx, "tcp", p1)
	if err != nil {
		t.Fatal("error dialing peer", err)
	}

	// fmt.Println("sending")
	c.Out() <- []byte("beep")
	c.Out() <- []byte("boop")

	out := <-c.In()
	// fmt.Println("recving", string(out))
	data := string(out)
	if data != "beep" {
		t.Error("unexpected conn output", data)
	}

	out = <-c.In()
	data = string(out)
	if string(out) != "boop" {
		t.Error("unexpected conn output", data)
	}

	// fmt.Println("closing")
	c.Close()
	l.Close()
	cancel()
}
コード例 #14
0
func TestMulticonnSendUnderlying(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	log.Info("TestMulticonnSendUnderlying")
	ctx := context.Background()
	ctxC, cancel := context.WithCancel(ctx)

	c1, c2 := setupMultiConns(t, ctx)

	log.Info("gen msgs")
	num := 100
	msgsFrom1 := genMessages(num, "from p1 to p2")
	msgsFrom2 := genMessages(num, "from p2 to p1")

	var wg sync.WaitGroup

	send := func(c *MultiConn, msgs *msgMap) {
		defer wg.Done()

		conns := make([]Conn, 0, len(c.conns))
		for _, c1 := range c.conns {
			conns = append(conns, c1)
		}

		i := 0
		for _, m := range msgs.msgs {
			log.Info("send: %s", m.payload)
			switch i % 3 {
			case 0:
				conns[0].Out() <- []byte(m.payload)
			case 1:
				conns[1].Out() <- []byte(m.payload)
			case 2:
				c.Out() <- []byte(m.payload)
			}
			msgs.Sent(t, m.payload)
			<-time.After(time.Microsecond * 10)
			i++
		}
	}

	recv := func(ctx context.Context, c *MultiConn, msgs *msgMap) {
		defer wg.Done()

		for {
			select {
			case payload := <-c.In():
				msgs.Received(t, string(payload))
				log.Info("recv: %s", payload)
				if msgs.recv == len(msgs.msgs) {
					return
				}

			case <-ctx.Done():
				return

			}
		}

	}

	log.Info("msg send + recv")

	wg.Add(4)
	go send(c1, msgsFrom1)
	go send(c2, msgsFrom2)
	go recv(ctxC, c1, msgsFrom2)
	go recv(ctxC, c2, msgsFrom1)
	wg.Wait()
	cancel()
	c1.Close()
	c2.Close()

	msgsFrom1.CheckDone(t)
	msgsFrom2.CheckDone(t)
}