Beispiel #1
1
func TestConnHandler(t *testing.T) {
	// t.Skip("skipping for another test")
	t.Parallel()

	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 5)

	gotconn := make(chan struct{}, 10)
	swarms[0].SetConnHandler(func(conn *Conn) {
		gotconn <- struct{}{}
	})

	connectSwarms(t, ctx, swarms)

	<-time.After(time.Millisecond)
	// should've gotten 5 by now.

	swarms[0].SetConnHandler(nil)

	expect := 4
	for i := 0; i < expect; i++ {
		select {
		case <-time.After(time.Second):
			t.Fatal("failed to get connections")
		case <-gotconn:
		}
	}

	select {
	case <-gotconn:
		t.Fatalf("should have connected to %d swarms", expect)
	default:
	}
}
Beispiel #2
0
func TestClientFindProviders(t *testing.T) {
	pi := testutil.RandIdentityOrFatal(t)
	rs := NewServer()
	client := rs.Client(pi)

	k := key.Key("hello")
	err := client.Provide(context.Background(), k)
	if err != nil {
		t.Fatal(err)
	}

	// This is bad... but simulating networks is hard
	time.Sleep(time.Millisecond * 300)
	max := 100

	providersFromClient := client.FindProvidersAsync(context.Background(), key.Key("hello"), max)
	isInClient := false
	for pi := range providersFromClient {
		if pi.ID == pi.ID {
			isInClient = true
		}
	}
	if !isInClient {
		t.Fatal("Despite client providing key, client didn't receive peer when finding providers")
	}
}
Beispiel #3
0
func TestGetBlocks(t *testing.T) {
	store := bstore()
	ex := Exchange(store)
	g := blocksutil.NewBlockGenerator()

	expected := g.Blocks(2)

	for _, b := range expected {
		if err := ex.HasBlock(context.Background(), b); err != nil {
			t.Fail()
		}
	}

	request := func() []key.Key {
		var ks []key.Key

		for _, b := range expected {
			ks = append(ks, b.Key())
		}
		return ks
	}()

	received, err := ex.GetBlocks(context.Background(), request)
	if err != nil {
		t.Fatal(err)
	}

	var count int
	for _ = range received {
		count++
	}
	if len(expected) != count {
		t.Fail()
	}
}
Beispiel #4
0
func TestClientOverMax(t *testing.T) {
	rs := NewServer()
	k := key.Key("hello")
	numProvidersForHelloKey := 100
	for i := 0; i < numProvidersForHelloKey; i++ {
		pi := testutil.RandIdentityOrFatal(t)
		err := rs.Client(pi).Provide(context.Background(), k)
		if err != nil {
			t.Fatal(err)
		}
	}

	max := 10
	pi := testutil.RandIdentityOrFatal(t)
	client := rs.Client(pi)

	providersFromClient := client.FindProvidersAsync(context.Background(), k, max)
	i := 0
	for _ = range providersFromClient {
		i++
	}
	if i != max {
		t.Fatal("Too many providers returned")
	}
}
Beispiel #5
0
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {

	net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
	block := blocks.NewBlock([]byte("block"))
	g := NewTestSessionGenerator(net)
	defer g.Close()

	peers := g.Instances(2)
	hasBlock := peers[0]
	defer hasBlock.Exchange.Close()

	if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil {
		t.Fatal(err)
	}

	wantsBlock := peers[1]
	defer wantsBlock.Exchange.Close()

	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key())
	if err != nil {
		t.Log(err)
		t.Fatal("Expected to succeed")
	}

	if !bytes.Equal(block.Data, received.Data) {
		t.Fatal("Data doesn't match")
	}
}
Beispiel #6
0
// TODO does dht ensure won't receive self as a provider? probably not.
func TestCanceledContext(t *testing.T) {
	rs := NewServer()
	k := key.Key("hello")

	// avoid leaking goroutine, without using the context to signal
	// (we want the goroutine to keep trying to publish on a
	// cancelled context until we've tested it doesnt do anything.)
	done := make(chan struct{})
	defer func() { done <- struct{}{} }()

	t.Log("async'ly announce infinite stream of providers for key")
	i := 0
	go func() { // infinite stream
		for {
			select {
			case <-done:
				t.Log("exiting async worker")
				return
			default:
			}

			pi, err := testutil.RandIdentity()
			if err != nil {
				t.Error(err)
			}
			err = rs.Client(pi).Provide(context.Background(), k)
			if err != nil {
				t.Error(err)
			}
			i++
		}
	}()

	local := testutil.RandIdentityOrFatal(t)
	client := rs.Client(local)

	t.Log("warning: max is finite so this test is non-deterministic")
	t.Log("context cancellation could simply take lower priority")
	t.Log("and result in receiving the max number of results")
	max := 1000

	t.Log("cancel the context before consuming")
	ctx, cancelFunc := context.WithCancel(context.Background())
	cancelFunc()
	providers := client.FindProvidersAsync(ctx, k, max)

	numProvidersReturned := 0
	for _ = range providers {
		numProvidersReturned++
	}
	t.Log(numProvidersReturned)

	if numProvidersReturned == max {
		t.Fatal("Context cancel had no effect")
	}
}
Beispiel #7
0
func ExampleEventLogger() {
	{
		log := EventLogger(nil)
		e := log.EventBegin(context.Background(), "dial")
		e.Done()
	}
	{
		log := EventLogger(nil)
		e := log.EventBegin(context.Background(), "dial")
		_ = e.Close() // implements io.Closer for convenience
	}
}
Beispiel #8
0
Datei: main.go Projekt: rht/bssim
func getFileCmd(nodes []int, file string) error {
	file = normalizePath(file)
	blocks, ok := files[file]
	if !ok {
		return fmt.Errorf("Tried to get file, '%s', which has not been added.\n", file)
	}
	var wg sync.WaitGroup
	//  Get blocks and then Has them
	for _, node := range nodes {
		//  remove blocks peer already has or nah?
		//  I'm assuming that peers with the first block of the file have the whole file,
		//  which i think is ok for the simulation, but i might have to change this later
		alreadyhas, err := peers[node].Blockstore().Has(files[file][0])
		check(err)

		if alreadyhas {
			continue
		}
		wg.Add(1)
		go func(i int) {
			timer := recorder.NewTimer()
			ctx, cancel := context.WithTimeout(context.Background(), deadline)
			defer cancel()
			received, _ := peers[i].Exchange.GetBlocks(ctx, blocks)

			for j := 0; j < len(blocks); j++ {
				blockTimer := recorder.NewTimer()
				x := <-received
				if x == nil {
					wg.Done()
					return
				}
				recorder.EndBlockTime(blockTimer, peers[i].Peer.Pretty())
				fmt.Println(i, x, j)
				ctx, cancel := context.WithTimeout(context.Background(), time.Second)
				err := peers[i].Exchange.HasBlock(ctx, x)
				if err != nil {
					fmt.Println("error when adding block", i, err)
				}
				cancel()
			}
			recorder.EndFileTime(timer, peers[i].Peer.Pretty(), file)

			//	peers[i].Exchange.Close()
			wg.Done()
		}(node)
	}

	wg.Wait()
	testGet(nodes, file)
	return nil
}
Beispiel #9
0
Datei: main.go Projekt: rht/bssim
//  Creates test network using delays in config
//  Returns a fully connected mocknet and an array of the instances in the network
func createTestNetwork() (mocknet.Mocknet, []bs.Instance) {
	vv := convertTimeField("visibility_delay")
	q := convertTimeField("query_delay")
	//md := convertTimeField("message_delay")

	delayCfg := mockrouting.DelayConfig{ValueVisibility: vv, Query: q}
	n, err := strconv.Atoi(config["node_count"])
	check(err)
	mn := mocknet.New(context.Background())
	snet, err := tn.StreamNet(context.Background(), mn, mockrouting.NewServerWithDelay(delayCfg))
	check(err)
	instances := genInstances(n, &mn, &snet)
	return mn, instances
}
Beispiel #10
0
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) {

	g := blocksutil.NewBlockGenerator()
	ctx, cancel := context.WithCancel(context.Background())
	n := New()
	defer n.Shutdown()

	t.Log("generate a large number of blocks. exceed default buffer")
	bs := g.Blocks(1000)
	ks := func() []key.Key {
		var keys []key.Key
		for _, b := range bs {
			keys = append(keys, b.Key())
		}
		return keys
	}()

	_ = n.Subscribe(ctx, ks...) // ignore received channel

	t.Log("cancel context before any blocks published")
	cancel()
	for _, b := range bs {
		n.Publish(b)
	}

	t.Log("publishing the large number of blocks to the ignored channel must not deadlock")
}
Beispiel #11
0
func TestFindPeer(t *testing.T) {
	// t.Skip("skipping test to debug another")
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()

	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			dhts[i].host.Close()
		}
	}()

	connect(t, ctx, dhts[0], dhts[1])
	connect(t, ctx, dhts[1], dhts[2])
	connect(t, ctx, dhts[1], dhts[3])

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	p, err := dhts[0].FindPeer(ctxT, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	if p.ID == "" {
		t.Fatal("Failed to find peer.")
	}

	if p.ID != peers[2] {
		t.Fatal("Didnt find expected peer.")
	}
}
Beispiel #12
0
func TestDeadlineFractionCancel(t *testing.T) {

	ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond)
	ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5)

	select {
	case <-ctx1.Done():
		t.Fatal("ctx1 ended too early")
	case <-ctx2.Done():
		t.Fatal("ctx2 ended too early")
	default:
	}

	cancel2()

	select {
	case <-ctx1.Done():
		t.Fatal("ctx1 should NOT be cancelled")
	case <-ctx2.Done():
	default:
		t.Fatal("ctx2 should be cancelled")
	}

	cancel1()

	select {
	case <-ctx1.Done():
	case <-ctx2.Done():
	default:
		t.Fatal("ctx1 should be cancelled")
	}

}
Beispiel #13
0
func TestDialBadAddrs(t *testing.T) {

	m := func(s string) ma.Multiaddr {
		maddr, err := ma.NewMultiaddr(s)
		if err != nil {
			t.Fatal(err)
		}
		return maddr
	}

	ctx := context.Background()
	s := makeSwarms(ctx, t, 1)[0]

	test := func(a ma.Multiaddr) {
		p := testutil.RandPeerIDFatal(t)
		s.peers.AddAddr(p, a, peer.PermanentAddrTTL)
		if _, err := s.Dial(ctx, p); err == nil {
			t.Error("swarm should not dial: %s", m)
		}
	}

	test(m("/ip6/fe80::1"))                // link local
	test(m("/ip6/fe80::100"))              // link local
	test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp
}
Beispiel #14
0
func TestSimultOpen(t *testing.T) {
	// t.Skip("skipping for another test")
	t.Parallel()

	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 2)

	// connect everyone
	{
		var wg sync.WaitGroup
		connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
			// copy for other peer
			log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
			s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
			if _, err := s.Dial(ctx, dst); err != nil {
				t.Fatal("error swarm dialing to peer", err)
			}
			wg.Done()
		}

		log.Info("Connecting swarms simultaneously.")
		wg.Add(2)
		go connect(swarms[0], swarms[1].local, swarms[1].ListenAddresses()[0])
		go connect(swarms[1], swarms[0].local, swarms[0].ListenAddresses()[0])
		wg.Wait()
	}

	for _, s := range swarms {
		s.Close()
	}
}
Beispiel #15
0
func TestAddrBlocking(t *testing.T) {
	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 2)

	swarms[0].SetConnHandler(func(conn *Conn) {
		t.Fatal("no connections should happen!")
	})

	_, block, err := net.ParseCIDR("127.0.0.1/8")
	if err != nil {
		t.Fatal(err)
	}

	swarms[1].Filters.AddDialFilter(block)

	swarms[1].peers.AddAddr(swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0], peer.PermanentAddrTTL)
	_, err = swarms[1].Dial(context.TODO(), swarms[0].LocalPeer())
	if err == nil {
		t.Fatal("dial should have failed")
	}

	swarms[0].peers.AddAddr(swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0], peer.PermanentAddrTTL)
	_, err = swarms[0].Dial(context.TODO(), swarms[1].LocalPeer())
	if err == nil {
		t.Fatal("dial should have failed")
	}
}
Beispiel #16
0
func TestFilterBounds(t *testing.T) {
	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 2)

	conns := make(chan struct{}, 8)
	swarms[0].SetConnHandler(func(conn *Conn) {
		conns <- struct{}{}
	})

	// Address that we wont be dialing from
	_, block, err := net.ParseCIDR("192.0.0.1/8")
	if err != nil {
		t.Fatal(err)
	}

	// set filter on both sides, shouldnt matter
	swarms[1].Filters.AddDialFilter(block)
	swarms[0].Filters.AddDialFilter(block)

	connectSwarms(t, ctx, swarms)

	select {
	case <-time.After(time.Second):
		t.Fatal("should have gotten connection")
	case <-conns:
		t.Log("got connect")
	}
}
Beispiel #17
0
func TestValidAfter(t *testing.T) {

	pi := testutil.RandIdentityOrFatal(t)
	var key = key.Key("mock key")
	var ctx = context.Background()
	conf := DelayConfig{
		ValueVisibility: delay.Fixed(1 * time.Hour),
		Query:           delay.Fixed(0),
	}

	rs := NewServerWithDelay(conf)

	rs.Client(pi).Provide(ctx, key)

	var providers []peer.PeerInfo
	providers, err := rs.Client(pi).FindProviders(ctx, key)
	if err != nil {
		t.Fatal(err)
	}
	if len(providers) > 0 {
		t.Fail()
	}

	conf.ValueVisibility.Set(0)
	providers, err = rs.Client(pi).FindProviders(ctx, key)
	if err != nil {
		t.Fatal(err)
	}
	t.Log("providers", providers)
	if len(providers) != 1 {
		t.Fail()
	}
}
Beispiel #18
0
func TestConsistentAccounting(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	sender := newEngine(ctx, "Ernie")
	receiver := newEngine(ctx, "Bert")

	// Send messages from Ernie to Bert
	for i := 0; i < 1000; i++ {

		m := message.New(false)
		content := []string{"this", "is", "message", "i"}
		m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " "))))

		sender.Engine.MessageSent(receiver.Peer, m)
		receiver.Engine.MessageReceived(sender.Peer, m)
	}

	// Ensure sender records the change
	if sender.Engine.numBytesSentTo(receiver.Peer) == 0 {
		t.Fatal("Sent bytes were not recorded")
	}

	// Ensure sender and receiver have the same values
	if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) {
		t.Fatal("Inconsistent book-keeping. Strategies don't agree")
	}

	// Ensure sender didn't record receving anything. And that the receiver
	// didn't record sending anything
	if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 {
		t.Fatal("Bert didn't send bytes to Ernie")
	}
}
Beispiel #19
0
func TestSecureClose(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	ctx := context.Background()
	c1, c2, p1, p2 := setupSingleConn(t, ctx)

	done := make(chan error)
	go secureHandshake(t, ctx, p1.PrivKey, c1, done)
	go secureHandshake(t, ctx, p2.PrivKey, c2, done)

	for i := 0; i < 2; i++ {
		if err := <-done; err != nil {
			t.Fatal(err)
		}
	}

	testOneSendRecv(t, c1, c2)

	c1.Close()
	testNotOneSendRecv(t, c1, c2)

	c2.Close()
	testNotOneSendRecv(t, c1, c2)
	testNotOneSendRecv(t, c2, c1)

}
Beispiel #20
0
// Context returns a context that cancels when the waitable is closing.
func Context(w Waitable) context.Context {
	ctx, cancel := context.WithCancel(context.Background())
	go func() {
		<-w.Closing()
		cancel()
	}()
	return ctx
}
Beispiel #21
0
func TestBlockReturnsErr(t *testing.T) {
	off := Exchange(bstore())
	_, err := off.GetBlock(context.Background(), key.Key("foo"))
	if err != nil {
		return // as desired
	}
	t.Fail()
}
Beispiel #22
0
func TestLimitedStreams(t *testing.T) {
	mn, err := FullMeshConnected(context.Background(), 2)
	if err != nil {
		t.Fatal(err)
	}

	var wg sync.WaitGroup
	messages := 4
	messageSize := 500
	handler := func(s inet.Stream) {
		b := make([]byte, messageSize)
		for i := 0; i < messages; i++ {
			if _, err := io.ReadFull(s, b); err != nil {
				log.Fatal(err)
			}
			if !bytes.Equal(b[:4], []byte("ping")) {
				log.Fatal("bytes mismatch")
			}
			wg.Done()
		}
		s.Close()
	}

	hosts := mn.Hosts()
	for _, h := range mn.Hosts() {
		h.SetStreamHandler(protocol.TestingID, handler)
	}

	peers := mn.Peers()
	links := mn.LinksBetweenPeers(peers[0], peers[1])
	//  1000 byte per second bandwidth
	bps := float64(1000)
	opts := links[0].Options()
	opts.Bandwidth = bps
	for _, link := range links {
		link.SetOptions(opts)
	}

	s, err := hosts[0].NewStream(protocol.TestingID, hosts[1].ID())
	if err != nil {
		t.Fatal(err)
	}

	filler := make([]byte, messageSize-4)
	data := append([]byte("ping"), filler...)
	before := time.Now()
	for i := 0; i < messages; i++ {
		wg.Add(1)
		if _, err := s.Write(data); err != nil {
			panic(err)
		}
	}

	wg.Wait()
	if !within(time.Since(before), time.Duration(time.Second*2), time.Second/3) {
		t.Fatal("Expected 2ish seconds but got ", time.Since(before))
	}
}
Beispiel #23
0
func TestDoReturnsNil(t *testing.T) {
	ctx := context.Background()
	err := ContextDo(ctx, func() error {
		return nil
	})
	if err != nil {
		t.Fail()
	}
}
Beispiel #24
0
Datei: main.go Projekt: rht/bssim
func putCmd(nodes []int, block *blocks.Block) error {
	for _, node := range nodes {
		err := peers[node].Exchange.HasBlock(context.Background(), block)
		if err != nil {
			return err
		}
	}
	return nil
}
Beispiel #25
0
func getOrFail(bitswap Instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) {
	if _, err := bitswap.Blockstore().Get(b.Key()); err != nil {
		_, err := bitswap.Exchange.GetBlock(context.Background(), b.Key())
		if err != nil {
			t.Fatal(err)
		}
	}
	wg.Done()
}
Beispiel #26
0
func TestDeadlineFractionForever(t *testing.T) {

	ctx, _ := WithDeadlineFraction(context.Background(), 0.5)

	_, found := ctx.Deadline()
	if found {
		t.Fatal("should last forever")
	}
}
Beispiel #27
0
func TestDoReturnsFuncError(t *testing.T) {
	ctx := context.Background()
	expected := errors.New("expected to be returned by ContextDo")
	err := ContextDo(ctx, func() error {
		return expected
	})
	if err != expected {
		t.Fail()
	}
}
Beispiel #28
0
func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this

	rs := mockrouting.NewServer()
	net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay))
	g := NewTestSessionGenerator(net)
	defer g.Close()

	block := blocks.NewBlock([]byte("block"))
	pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t)
	rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network

	solo := g.Next()
	defer solo.Exchange.Close()

	ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond)
	_, err := solo.Exchange.GetBlock(ctx, block.Key())

	if err != context.DeadlineExceeded {
		t.Fatal("Expected DeadlineExceeded error")
	}
}
Beispiel #29
0
func TestProviderManager(t *testing.T) {
	ctx := context.Background()
	mid := peer.ID("testing")
	p := NewProviderManager(ctx, mid)
	a := key.Key("test")
	p.AddProvider(ctx, a, peer.ID("testingprovider"))
	resp := p.GetProviders(ctx, a)
	if len(resp) != 1 {
		t.Fatal("Could not retrieve provider.")
	}
	p.proc.Close()
}
Beispiel #30
0
func TestClose(t *testing.T) {
	vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
	sesgen := NewTestSessionGenerator(vnet)
	defer sesgen.Close()
	bgen := blocksutil.NewBlockGenerator()

	block := bgen.Next()
	bitswap := sesgen.Next()

	bitswap.Exchange.Close()
	bitswap.Exchange.GetBlock(context.Background(), block.Key())
}