Example #1
0
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) {

	g := blocksutil.NewBlockGenerator()
	ctx, cancel := context.WithCancel(context.Background())
	n := New()
	defer n.Shutdown()

	t.Log("generate a large number of blocks. exceed default buffer")
	bs := g.Blocks(1000)
	ks := func() []key.Key {
		var keys []key.Key
		for _, b := range bs {
			keys = append(keys, b.Key())
		}
		return keys
	}()

	_ = n.Subscribe(ctx, ks...) // ignore received channel

	t.Log("cancel context before any blocks published")
	cancel()
	for _, b := range bs {
		n.Publish(b)
	}

	t.Log("publishing the large number of blocks to the ignored channel must not deadlock")
}
Example #2
0
func TestConsistentAccounting(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	sender := newEngine(ctx, "Ernie")
	receiver := newEngine(ctx, "Bert")

	// Send messages from Ernie to Bert
	for i := 0; i < 1000; i++ {

		m := message.New(false)
		content := []string{"this", "is", "message", "i"}
		m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " "))))

		sender.Engine.MessageSent(receiver.Peer, m)
		receiver.Engine.MessageReceived(sender.Peer, m)
	}

	// Ensure sender records the change
	if sender.Engine.numBytesSentTo(receiver.Peer) == 0 {
		t.Fatal("Sent bytes were not recorded")
	}

	// Ensure sender and receiver have the same values
	if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) {
		t.Fatal("Inconsistent book-keeping. Strategies don't agree")
	}

	// Ensure sender didn't record receving anything. And that the receiver
	// didn't record sending anything
	if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 {
		t.Fatal("Bert didn't send bytes to Ernie")
	}
}
Example #3
0
File: workers.go Project: rht/bssim
func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
	ctx, cancel := context.WithCancel(parent)
	defer cancel()

	broadcastSignal := time.NewTicker(rebroadcastDelay.Get())
	defer broadcastSignal.Stop()

	tick := time.NewTicker(10 * time.Second)
	defer tick.Stop()

	for {
		log.Event(ctx, "Bitswap.Rebroadcast.idle")
		select {
		case <-tick.C:
			n := bs.wm.wl.Len()
			if n > 0 {
				log.Debug(n, "keys in bitswap wantlist")
			}
		case <-broadcastSignal.C: // resend unfulfilled wantlist keys
			log.Event(ctx, "Bitswap.Rebroadcast.active")
			entries := bs.wm.wl.Entries()
			if len(entries) > 0 {
				bs.connectToProviders(ctx, entries)
			}
		case <-parent.Done():
			return
		}
	}
}
Example #4
0
File: bitswap.go Project: rht/bssim
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) {

	ctx, cancel := context.WithCancel(ctx)
	defer cancel()

	// Get providers for all entries in wantlist (could take a while)
	wg := sync.WaitGroup{}
	for _, e := range entries {
		wg.Add(1)
		go func(k key.Key) {
			defer wg.Done()

			child, cancel := context.WithTimeout(ctx, providerRequestTimeout)
			defer cancel()
			providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
			for prov := range providers {
				go func(p peer.ID) {
					bs.network.ConnectTo(ctx, p)
				}(prov)
			}
		}(e.Key)
	}

	wg.Wait() // make sure all our children do finish.
}
Example #5
0
File: fracctx.go Project: rht/bssim
// WithDeadlineFraction returns a Context with a fraction of the
// original context's timeout. This is useful in sequential pipelines
// of work, where one might try options and fall back to others
// depending on the time available, or failure to respond. For example:
//
//  // getPicture returns a picture from our encrypted database
//  // we have a pipeline of multiple steps. we need to:
//  // - get the data from a database
//  // - decrypt it
//  // - apply many transforms
//  //
//  // we **know** that each step takes increasingly more time.
//  // The transforms are much more expensive than decryption, and
//  // decryption is more expensive than the database lookup.
//  // If our database takes too long (i.e. >0.2 of available time),
//  // there's no use in continuing.
//  func getPicture(ctx context.Context, key string) ([]byte, error) {
//    // fractional timeout contexts to the rescue!
//
//    // try the database with 0.2 of remaining time.
//    ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2)
//    val, err := db.Get(ctx1, key)
//    if err != nil {
//      return nil, err
//    }
//
//    // try decryption with 0.3 of remaining time.
//    ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3)
//    if val, err = decryptor.Decrypt(ctx2, val); err != nil {
//      return nil, err
//    }
//
//    // try transforms with all remaining time. hopefully it's enough!
//    return transformer.Transform(ctx, val)
//  }
//
//
func WithDeadlineFraction(ctx context.Context, fraction float64) (
	context.Context, context.CancelFunc) {

	d, found := ctx.Deadline()
	if !found { // no deadline
		return context.WithCancel(ctx)
	}

	left := d.Sub(time.Now())
	if left < 0 { // already passed...
		return context.WithCancel(ctx)
	}

	left = time.Duration(float64(left) * fraction)
	return context.WithTimeout(ctx, left)
}
Example #6
0
File: context.go Project: rht/bssim
// WithProcessClosing returns a context.Context derived from ctx that
// is cancelled as p is Closing (after: <-p.Closing()). It is simply:
//
//   func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context {
//     ctx, cancel := context.WithCancel(ctx)
//     go func() {
//       <-p.Closing()
//       cancel()
//     }()
//     return ctx
//   }
//
func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context {
	ctx, cancel := context.WithCancel(ctx)
	go func() {
		<-p.Closing()
		cancel()
	}()
	return ctx
}
Example #7
0
// Context returns a context that cancels when the waitable is closing.
func Context(w Waitable) context.Context {
	ctx, cancel := context.WithCancel(context.Background())
	go func() {
		<-w.Closing()
		cancel()
	}()
	return ctx
}
Example #8
0
// TODO does dht ensure won't receive self as a provider? probably not.
func TestCanceledContext(t *testing.T) {
	rs := NewServer()
	k := key.Key("hello")

	// avoid leaking goroutine, without using the context to signal
	// (we want the goroutine to keep trying to publish on a
	// cancelled context until we've tested it doesnt do anything.)
	done := make(chan struct{})
	defer func() { done <- struct{}{} }()

	t.Log("async'ly announce infinite stream of providers for key")
	i := 0
	go func() { // infinite stream
		for {
			select {
			case <-done:
				t.Log("exiting async worker")
				return
			default:
			}

			pi, err := testutil.RandIdentity()
			if err != nil {
				t.Error(err)
			}
			err = rs.Client(pi).Provide(context.Background(), k)
			if err != nil {
				t.Error(err)
			}
			i++
		}
	}()

	local := testutil.RandIdentityOrFatal(t)
	client := rs.Client(local)

	t.Log("warning: max is finite so this test is non-deterministic")
	t.Log("context cancellation could simply take lower priority")
	t.Log("and result in receiving the max number of results")
	max := 1000

	t.Log("cancel the context before consuming")
	ctx, cancelFunc := context.WithCancel(context.Background())
	cancelFunc()
	providers := client.FindProvidersAsync(ctx, k, max)

	numProvidersReturned := 0
	for _ = range providers {
		numProvidersReturned++
	}
	t.Log(numProvidersReturned)

	if numProvidersReturned == max {
		t.Fatal("Context cancel had no effect")
	}
}
Example #9
0
// WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS!
func NewTestSessionGenerator(
	net tn.Network) SessionGenerator {
	ctx, cancel := context.WithCancel(context.TODO())
	return SessionGenerator{
		net:    net,
		seq:    0,
		ctx:    ctx, // TODO take ctx as param to Next, Instances
		cancel: cancel,
	}
}
Example #10
0
File: do_test.go Project: rht/bssim
func TestDoReturnsContextErr(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	ch := make(chan struct{})
	err := ContextDo(ctx, func() error {
		cancel()
		ch <- struct{}{} // won't return
		return nil
	})
	if err != ctx.Err() {
		t.Fail()
	}
}
Example #11
0
File: bitswap.go Project: rht/bssim
// New initializes a BitSwap instance that communicates over the provided
// BitSwapNetwork. This function registers the returned instance as the network
// delegate.
// Runs until context is cancelled.
func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork,
	bstore blockstore.Blockstore, nice bool) exchange.Interface {

	// important to use provided parent context (since it may include important
	// loggable data). It's probably not a good idea to allow bitswap to be
	// coupled to the concerns of the IPFS daemon in this way.
	//
	// FIXME(btc) Now that bitswap manages itself using a process, it probably
	// shouldn't accept a context anymore. Clients should probably use Close()
	// exclusively. We should probably find another way to share logging data
	ctx, cancelFunc := context.WithCancel(parent)

	notif := notifications.New()
	px := process.WithTeardown(func() error {
		notif.Shutdown()
		return nil
	})

	go func() {
		<-px.Closing() // process closes first
		cancelFunc()
	}()
	go func() {
		<-ctx.Done() // parent cancelled first
		px.Close()
	}()

	bs := &Bitswap{
		self:          p,
		blockstore:    bstore,
		notifications: notif,
		engine:        decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method
		network:       network,
		findKeys:      make(chan *blockRequest, sizeBatchRequestChan),
		process:       px,
		newBlocks:     make(chan *blocks.Block, HasBlockBufferSize),
		provideKeys:   make(chan key.Key),
		wm:            NewWantManager(ctx, network),
	}
	go bs.wm.Run()
	network.SetDelegate(bs)

	// Start up bitswaps async worker routines
	bs.startWorkers(px, ctx)
	return bs
}
Example #12
0
func TestSecureHandshakeFailsWithWrongKeys(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	c1, c2, p1, p2 := setupSingleConn(t, ctx)

	done := make(chan error)
	go secureHandshake(t, ctx, p2.PrivKey, c1, done)
	go secureHandshake(t, ctx, p1.PrivKey, c2, done)

	for i := 0; i < 2; i++ {
		if err := <-done; err == nil {
			t.Fatal("wrong keys should've errored out.")
		}
	}
}
Example #13
0
func TestClose(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	c1, c2, _, _ := setupSingleConn(t, ctx)

	testOneSendRecv(t, c1, c2)
	testOneSendRecv(t, c2, c1)

	c1.Close()
	testNotOneSendRecv(t, c1, c2)

	c2.Close()
	testNotOneSendRecv(t, c2, c1)
	testNotOneSendRecv(t, c1, c2)
}
Example #14
0
func TestSecureCancelHandshake(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx, cancel := context.WithCancel(context.Background())
	c1, c2, p1, p2 := setupSingleConn(t, ctx)

	done := make(chan error)
	go secureHandshake(t, ctx, p1.PrivKey, c1, done)
	<-time.After(time.Millisecond)
	cancel() // cancel ctx
	go secureHandshake(t, ctx, p2.PrivKey, c2, done)

	for i := 0; i < 2; i++ {
		if err := <-done; err == nil {
			t.Error("cancel should've errored out")
		}
	}
}
Example #15
0
func testPing(t *testing.T, ps *PingService, p peer.ID) {
	pctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	ts, err := ps.Ping(pctx, p)
	if err != nil {
		t.Fatal(err)
	}

	for i := 0; i < 5; i++ {
		select {
		case took := <-ts:
			t.Log("ping took: ", took)
		case <-time.After(time.Second * 4):
			t.Fatal("failed to receive ping")
		}
	}

}
Example #16
0
func newSecureSession(ctx context.Context, local peer.ID, key ci.PrivKey, insecure io.ReadWriteCloser) (*secureSession, error) {
	s := &secureSession{localPeer: local, localKey: key}
	s.ctx, s.cancel = context.WithCancel(ctx)

	switch {
	case s.localPeer == "":
		return nil, errors.New("no local id provided")
	case s.localKey == nil:
		return nil, errors.New("no local private key provided")
	case !s.localPeer.MatchesPrivateKey(s.localKey):
		return nil, fmt.Errorf("peer.ID does not match PrivateKey")
	case insecure == nil:
		return nil, fmt.Errorf("insecure ReadWriter is nil")
	}

	s.ctx = ctx
	s.insecure = insecure
	s.insecureM = msgio.NewReadWriter(insecure)
	return s, nil
}
Example #17
0
func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {

	ctx, cancel := context.WithCancel(ctx)
	log.Debugf("bootstrapping dhts...")

	// tried async. sequential fares much better. compare:
	// 100 async https://gist.github.com/jbenet/56d12f0578d5f34810b2
	// 100 sync https://gist.github.com/jbenet/6c59e7c15426e48aaedd
	// probably because results compound

	var cfg BootstrapConfig
	cfg = DefaultBootstrapConfig
	cfg.Queries = 3

	start := rand.Intn(len(dhts)) // randomize to decrease bias.
	for i := range dhts {
		dht := dhts[(start+i)%len(dhts)]
		dht.runBootstrap(ctx, cfg)
	}
	cancel()
}
Example #18
0
func TestPing(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	h1 := netutil.GenHostSwarm(t, ctx)
	h2 := netutil.GenHostSwarm(t, ctx)

	err := h1.Connect(ctx, peer.PeerInfo{
		ID:    h2.ID(),
		Addrs: h2.Addrs(),
	})

	if err != nil {
		t.Fatal(err)
	}

	ps1 := NewPingService(h1)
	ps2 := NewPingService(h2)

	testPing(t, ps1, h2.ID())
	testPing(t, ps2, h1.ID())
}
Example #19
0
File: bitswap.go Project: rht/bssim
// GetBlock attempts to retrieve a particular block from peers within the
// deadline enforced by the context.
func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) {

	// Any async work initiated by this function must end when this function
	// returns. To ensure this, derive a new context. Note that it is okay to
	// listen on parent in this scope, but NOT okay to pass |parent| to
	// functions called by this one. Otherwise those functions won't return
	// when this context's cancel func is executed. This is difficult to
	// enforce. May this comment keep you safe.

	ctx, cancelFunc := context.WithCancel(parent)

	ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest"))
	log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k)
	defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k)

	defer func() {
		cancelFunc()
	}()

	promise, err := bs.GetBlocks(ctx, []key.Key{k})
	if err != nil {
		return nil, err
	}

	select {
	case block, ok := <-promise:
		if !ok {
			select {
			case <-ctx.Done():
				return nil, ctx.Err()
			default:
				return nil, errors.New("promise channel was closed")
			}
		}
		return block, nil
	case <-parent.Done():
		return nil, parent.Err()
	}
}
Example #20
0
func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) {

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	sanfrancisco := newEngine(ctx, "sf")
	seattle := newEngine(ctx, "sea")

	m := message.New(true)

	sanfrancisco.Engine.MessageSent(seattle.Peer, m)
	seattle.Engine.MessageReceived(sanfrancisco.Peer, m)

	if seattle.Peer == sanfrancisco.Peer {
		t.Fatal("Sanity Check: Peers have same Key!")
	}

	if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) {
		t.Fatal("Peer wasn't added as a Partner")
	}

	if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) {
		t.Fatal("Peer wasn't added as a Partner")
	}
}
Example #21
0
func TestReprovide(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	mrserv := mock.NewServer()

	idA := testutil.RandIdentityOrFatal(t)
	idB := testutil.RandIdentityOrFatal(t)

	clA := mrserv.Client(idA)
	clB := mrserv.Client(idB)

	bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))

	blk := blocks.NewBlock([]byte("this is a test"))
	bstore.Put(blk)

	reprov := NewReprovider(clA, bstore)
	err := reprov.Reprovide(ctx)
	if err != nil {
		t.Fatal(err)
	}

	provs, err := clB.FindProviders(ctx, blk.Key())
	if err != nil {
		t.Fatal(err)
	}

	if len(provs) == 0 {
		t.Fatal("Should have gotten a provider")
	}

	if provs[0].ID != idA.ID() {
		t.Fatal("Somehow got the wrong peer back as a provider.")
	}
}
Example #22
0
func TestCloseLeak(t *testing.T) {
	// t.Skip("Skipping in favor of another test")
	if testing.Short() {
		t.SkipNow()
	}

	if travis.IsRunning() {
		t.Skip("this doesn't work well on travis")
	}

	var wg sync.WaitGroup

	runPair := func(num int) {
		ctx, cancel := context.WithCancel(context.Background())
		c1, c2, _, _ := setupSingleConn(t, ctx)

		for i := 0; i < num; i++ {
			b1 := []byte(fmt.Sprintf("beep%d", i))
			c1.WriteMsg(b1)
			b2, err := c2.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic(fmt.Errorf("bytes not equal: %s != %s", b1, b2))
			}

			b2 = []byte(fmt.Sprintf("boop%d", i))
			c2.WriteMsg(b2)
			b1, err = c1.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic(fmt.Errorf("bytes not equal: %s != %s", b1, b2))
			}

			<-time.After(time.Microsecond * 5)
		}

		c1.Close()
		c2.Close()
		cancel() // close the listener
		wg.Done()
	}

	var cons = 5
	var msgs = 50
	log.Debugf("Running %d connections * %d msgs.\n", cons, msgs)
	for i := 0; i < cons; i++ {
		wg.Add(1)
		go runPair(msgs)
	}

	log.Debugf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
Example #23
0
func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remoteAddrs []ma.Multiaddr) (conn.Conn, error) {

	// try to connect to one of the peer's known addresses.
	// we dial concurrently to each of the addresses, which:
	// * makes the process faster overall
	// * attempts to get the fastest connection available.
	// * mitigates the waste of trying bad addresses
	log.Debugf("%s swarm dialing %s %s", s.local, p, remoteAddrs)

	ctx, cancel := context.WithCancel(ctx)
	defer cancel() // cancel work when we exit func

	foundConn := make(chan struct{})
	conns := make(chan conn.Conn, len(remoteAddrs))
	errs := make(chan error, len(remoteAddrs))

	// dialSingleAddr is used in the rate-limited async thing below.
	dialSingleAddr := func(addr ma.Multiaddr) {
		connC, err := s.dialAddr(ctx, d, p, addr)

		// check parent still wants our results
		select {
		case <-foundConn:
			if connC != nil {
				connC.Close()
			}
			return
		default:
		}

		if err != nil {
			errs <- err
		} else if connC == nil {
			errs <- fmt.Errorf("failed to dial %s %s", p, addr)
		} else {
			conns <- connC
		}
	}

	// this whole thing is in a goroutine so we can use foundConn
	// to end early.
	go func() {
		// rate limiting just in case. at most 10 addrs at once.
		limiter := ratelimit.NewRateLimiter(process.Background(), 10)
		limiter.Go(func(worker process.Process) {
			// permute addrs so we try different sets first each time.
			for _, i := range rand.Perm(len(remoteAddrs)) {
				select {
				case <-foundConn: // if one of them succeeded already
					break
				case <-worker.Closing(): // our context was cancelled
					break
				default:
				}

				workerAddr := remoteAddrs[i] // shadow variable to avoid race
				limiter.LimitedGo(func(worker process.Process) {
					dialSingleAddr(workerAddr)
				})
			}
		})

		processctx.CloseAfterContext(limiter, ctx)
	}()

	// wair fot the results.
	exitErr := fmt.Errorf("failed to dial %s", p)
	for i := 0; i < len(remoteAddrs); i++ {
		select {
		case exitErr = <-errs: //
			log.Debug("dial error: ", exitErr)
		case connC := <-conns:
			// take the first + return asap
			close(foundConn)
			return connC, nil
		}
	}
	return nil, exitErr
}
Example #24
0
func TestSecureCloseLeak(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	if testing.Short() {
		t.SkipNow()
	}
	if travis.IsRunning() {
		t.Skip("this doesn't work well on travis")
	}

	runPair := func(c1, c2 Conn, num int) {
		log.Debugf("runPair %d", num)

		for i := 0; i < num; i++ {
			log.Debugf("runPair iteration %d", i)
			b1 := []byte("beep")
			c1.WriteMsg(b1)
			b2, err := c2.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			b2 = []byte("beep")
			c2.WriteMsg(b2)
			b1, err = c1.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			<-time.After(time.Microsecond * 5)
		}
	}

	var cons = 5
	var msgs = 50
	log.Debugf("Running %d connections * %d msgs.\n", cons, msgs)

	var wg sync.WaitGroup
	for i := 0; i < cons; i++ {
		wg.Add(1)

		ctx, cancel := context.WithCancel(context.Background())
		c1, c2, _, _ := setupSecureConn(t, ctx)
		go func(c1, c2 Conn) {

			defer func() {
				c1.Close()
				c2.Close()
				cancel()
				wg.Done()
			}()

			runPair(c1, c2, msgs)
		}(c1, c2)
	}

	log.Debugf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
Example #25
0
func testDialerCloseEarly(t *testing.T, secure bool) {
	// t.Skip("Skipping in favor of another test")

	p1 := tu.RandPeerNetParamsOrFatal(t)
	p2 := tu.RandPeerNetParamsOrFatal(t)

	key1 := p1.PrivKey
	if !secure {
		key1 = nil
		t.Log("testing insecurely")
	} else {
		t.Log("testing securely")
	}

	ctx, cancel := context.WithCancel(context.Background())
	l1, err := Listen(ctx, p1.Addr, p1.ID, key1)
	if err != nil {
		t.Fatal(err)
	}
	p1.Addr = l1.Multiaddr() // Addr has been determined by kernel.

	// lol nesting
	d2 := &Dialer{
		LocalPeer: p2.ID,
		// PrivateKey: key2, -- dont give it key. we'll just close the conn.
	}

	errs := make(chan error, 100)
	done := make(chan struct{}, 1)
	gotclosed := make(chan struct{}, 1)
	go func() {
		defer func() { done <- struct{}{} }()

		c, err := l1.Accept()
		if err != nil {
			if strings.Contains(err.Error(), "closed") {
				gotclosed <- struct{}{}
				return
			}
			errs <- err
		}

		if _, err := c.Write([]byte("hello")); err != nil {
			gotclosed <- struct{}{}
			return
		}

		errs <- fmt.Errorf("wrote to conn")
	}()

	c, err := d2.Dial(ctx, p1.Addr, p1.ID)
	if err != nil {
		errs <- err
	}
	c.Close() // close it early.

	readerrs := func() {
		for {
			select {
			case e := <-errs:
				t.Error(e)
			default:
				return
			}
		}
	}
	readerrs()

	l1.Close()
	<-done
	cancel()
	readerrs()
	close(errs)

	select {
	case <-gotclosed:
	default:
		t.Error("did not get closed")
	}
}
Example #26
0
func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
	// t.Skip("skipping for another test")

	ctx := context.Background()
	swarms := makeSwarms(ctx, t, SwarmNum)

	// connect everyone
	connectSwarms(t, ctx, swarms)

	// ping/pong
	for _, s1 := range swarms {
		log.Debugf("-------------------------------------------------------")
		log.Debugf("%s ping pong round", s1.local)
		log.Debugf("-------------------------------------------------------")

		_, cancel := context.WithCancel(ctx)
		got := map[peer.ID]int{}
		errChan := make(chan error, MsgNum*len(swarms))
		streamChan := make(chan *Stream, MsgNum)

		// send out "ping" x MsgNum to every peer
		go func() {
			defer close(streamChan)

			var wg sync.WaitGroup
			send := func(p peer.ID) {
				defer wg.Done()

				// first, one stream per peer (nice)
				stream, err := s1.NewStreamWithPeer(p)
				if err != nil {
					errChan <- err
					return
				}

				// send out ping!
				for k := 0; k < MsgNum; k++ { // with k messages
					msg := "ping"
					log.Debugf("%s %s %s (%d)", s1.local, msg, p, k)
					if _, err := stream.Write([]byte(msg)); err != nil {
						errChan <- err
						continue
					}
				}

				// read it later
				streamChan <- stream
			}

			for _, s2 := range swarms {
				if s2.local == s1.local {
					continue // dont send to self...
				}

				wg.Add(1)
				go send(s2.local)
			}
			wg.Wait()
		}()

		// receive "pong" x MsgNum from every peer
		go func() {
			defer close(errChan)
			count := 0
			countShouldBe := MsgNum * (len(swarms) - 1)
			for stream := range streamChan { // one per peer
				defer stream.Close()

				// get peer on the other side
				p := stream.Conn().RemotePeer()

				// receive pings
				msgCount := 0
				msg := make([]byte, 4)
				for k := 0; k < MsgNum; k++ { // with k messages

					// read from the stream
					if _, err := stream.Read(msg); err != nil {
						errChan <- err
						continue
					}

					if string(msg) != "pong" {
						errChan <- fmt.Errorf("unexpected message: %s", msg)
						continue
					}

					log.Debugf("%s %s %s (%d)", s1.local, msg, p, k)
					msgCount++
				}

				got[p] = msgCount
				count += msgCount
			}

			if count != countShouldBe {
				errChan <- fmt.Errorf("count mismatch: %d != %d", count, countShouldBe)
			}
		}()

		// check any errors (blocks till consumer is done)
		for err := range errChan {
			if err != nil {
				t.Error(err.Error())
			}
		}

		log.Debugf("%s got pongs", s1.local)
		if (len(swarms) - 1) != len(got) {
			t.Errorf("got (%d) less messages than sent (%d).", len(got), len(swarms))
		}

		for p, n := range got {
			if n != MsgNum {
				t.Error("peer did not get all msgs", p, n, "/", MsgNum)
			}
		}

		cancel()
		<-time.After(10 * time.Millisecond)
	}

	for _, s := range swarms {
		s.Close()
	}
}
Example #27
0
func testDialer(t *testing.T, secure bool) {
	// t.Skip("Skipping in favor of another test")

	p1 := tu.RandPeerNetParamsOrFatal(t)
	p2 := tu.RandPeerNetParamsOrFatal(t)

	key1 := p1.PrivKey
	key2 := p2.PrivKey
	if !secure {
		key1 = nil
		key2 = nil
		t.Log("testing insecurely")
	} else {
		t.Log("testing securely")
	}

	ctx, cancel := context.WithCancel(context.Background())
	l1, err := Listen(ctx, p1.Addr, p1.ID, key1)
	if err != nil {
		t.Fatal(err)
	}
	p1.Addr = l1.Multiaddr() // Addr has been determined by kernel.

	d2 := &Dialer{
		LocalPeer:  p2.ID,
		PrivateKey: key2,
	}

	go echoListen(ctx, l1)

	c, err := d2.Dial(ctx, p1.Addr, p1.ID)
	if err != nil {
		t.Fatal("error dialing peer", err)
	}

	// fmt.Println("sending")
	c.WriteMsg([]byte("beep"))
	c.WriteMsg([]byte("boop"))

	out, err := c.ReadMsg()
	if err != nil {
		t.Fatal(err)
	}

	// fmt.Println("recving", string(out))
	data := string(out)
	if data != "beep" {
		t.Error("unexpected conn output", data)
	}

	out, err = c.ReadMsg()
	if err != nil {
		t.Fatal(err)
	}

	data = string(out)
	if string(out) != "boop" {
		t.Error("unexpected conn output", data)
	}

	// fmt.Println("closing")
	c.Close()
	l1.Close()
	cancel()
}