コード例 #1
0
ファイル: ci.go プロジェクト: andradeandrey/go-ipfs
// IsRunning attempts to determine whether this process is
// running on CI. This is done by checking any of:
//
//  CI=true
//  travis.IsRunning()
//  jenkins.IsRunning()
//
func IsRunning() bool {
	if os.Getenv(string(VarCI)) == "true" {
		return true
	}

	return travis.IsRunning() || jenkins.IsRunning()
}
コード例 #2
0
ファイル: bitswap_test.go プロジェクト: musha68k/go-ipfs
func TestLargeFile(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	if !travis.IsRunning() {
		t.Parallel()
	}

	numInstances := 10
	numBlocks := 100
	PerformDistributionTest(t, numInstances, numBlocks)
}
コード例 #3
0
ファイル: bitswap_test.go プロジェクト: musha68k/go-ipfs
func TestLargeSwarm(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	numInstances := 100
	numBlocks := 2
	if detectrace.WithRace() {
		// when running with the race detector, 500 instances launches
		// well over 8k goroutines. This hits a race detector limit.
		numInstances = 100
	} else if travis.IsRunning() {
		numInstances = 200
	} else {
		t.Parallel()
	}
	PerformDistributionTest(t, numInstances, numBlocks)
}
コード例 #4
0
ファイル: fracctx_test.go プロジェクト: avbalu/go-ipfs
func TestDeadlineFractionHalf(t *testing.T) {
	if travis.IsRunning() {
		t.Skip("timeouts don't work reliably on travis")
	}

	ctx1, _ := context.WithTimeout(context.Background(), 10*time.Millisecond)
	ctx2, _ := WithDeadlineFraction(ctx1, 0.5)

	select {
	case <-ctx1.Done():
		t.Fatal("ctx1 ended too early")
	case <-ctx2.Done():
		t.Fatal("ctx2 ended too early")
	default:
	}

	<-time.After(2 * time.Millisecond)

	select {
	case <-ctx1.Done():
		t.Fatal("ctx1 ended too early")
	case <-ctx2.Done():
		t.Fatal("ctx2 ended too early")
	default:
	}

	<-time.After(4 * time.Millisecond)

	select {
	case <-ctx1.Done():
		t.Fatal("ctx1 ended too early")
	case <-ctx2.Done():
	default:
		t.Fatal("ctx2 ended too late")
	}

	<-time.After(6 * time.Millisecond)

	select {
	case <-ctx1.Done():
	default:
		t.Fatal("ctx1 ended too late")
	}

}
コード例 #5
0
ファイル: dial_test.go プロジェクト: avbalu/go-ipfs
func TestDialWait(t *testing.T) {
	// t.Skip("skipping for another test")
	t.Parallel()

	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 1)
	s1 := swarms[0]
	defer s1.Close()

	s1.dialT = time.Millisecond * 300 // lower timeout for tests.
	if travis.IsRunning() {
		s1.dialT = time.Second
	}

	// dial to a non-existent peer.
	s2p, s2addr, s2l := newSilentPeer(t)
	go acceptAndHang(s2l)
	defer s2l.Close()
	s1.peers.AddAddr(s2p, s2addr, peer.PermanentAddrTTL)

	before := time.Now()
	if c, err := s1.Dial(ctx, s2p); err == nil {
		defer c.Close()
		t.Fatal("error swarm dialing to unknown peer worked...", err)
	} else {
		t.Log("correctly got error:", err)
	}
	duration := time.Now().Sub(before)

	dt := s1.dialT
	if duration < dt*dialAttempts {
		t.Error("< DialTimeout * dialAttempts not being respected", duration, dt*dialAttempts)
	}
	if duration > 2*dt*dialAttempts {
		t.Error("> 2*DialTimeout * dialAttempts not being respected", duration, 2*dt*dialAttempts)
	}

	if !s1.backf.Backoff(s2p) {
		t.Error("s2 should now be on backoff")
	}
}
コード例 #6
0
ファイル: fracctx_test.go プロジェクト: avbalu/go-ipfs
// this test is on the context tool itself, not our stuff. it's for sanity on ours.
func TestDeadline(t *testing.T) {
	if travis.IsRunning() {
		t.Skip("timeouts don't work reliably on travis")
	}

	ctx, _ := context.WithTimeout(context.Background(), 5*time.Millisecond)

	select {
	case <-ctx.Done():
		t.Fatal("ended too early")
	default:
	}

	<-time.After(6 * time.Millisecond)

	select {
	case <-ctx.Done():
	default:
		t.Fatal("ended too late")
	}
}
コード例 #7
0
ファイル: conn_test.go プロジェクト: noffle/go-ipfs
func TestCloseLeak(t *testing.T) {
	// t.Skip("Skipping in favor of another test")
	if testing.Short() {
		t.SkipNow()
	}

	if travis.IsRunning() {
		t.Skip("this doesn't work well on travis")
	}

	var wg sync.WaitGroup

	runPair := func(num int) {
		ctx, cancel := context.WithCancel(context.Background())
		c1, c2, _, _ := setupSingleConn(t, ctx)

		mc1 := msgioWrap(c1)
		mc2 := msgioWrap(c2)

		for i := 0; i < num; i++ {
			b1 := []byte(fmt.Sprintf("beep%d", i))
			mc1.WriteMsg(b1)
			b2, err := mc2.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic(fmt.Errorf("bytes not equal: %s != %s", b1, b2))
			}

			b2 = []byte(fmt.Sprintf("boop%d", i))
			mc2.WriteMsg(b2)
			b1, err = mc1.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic(fmt.Errorf("bytes not equal: %s != %s", b1, b2))
			}

			<-time.After(time.Microsecond * 5)
		}

		c1.Close()
		c2.Close()
		cancel() // close the listener
		wg.Done()
	}

	var cons = 5
	var msgs = 50
	log.Debugf("Running %d connections * %d msgs.\n", cons, msgs)
	for i := 0; i < cons; i++ {
		wg.Add(1)
		go runPair(msgs)
	}

	log.Debugf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
コード例 #8
0
ファイル: dht_test.go プロジェクト: avbalu/go-ipfs
func TestConnectCollision(t *testing.T) {
	// t.Skip("skipping test to debug another")
	if testing.Short() {
		t.SkipNow()
	}
	if travisci.IsRunning() {
		t.Skip("Skipping on Travis-CI.")
	}

	runTimes := 10

	for rtime := 0; rtime < runTimes; rtime++ {
		log.Info("Running Time: ", rtime)

		ctx := context.Background()

		dhtA := setupDHT(ctx, t)
		dhtB := setupDHT(ctx, t)

		addrA := dhtA.peerstore.Addrs(dhtA.self)[0]
		addrB := dhtB.peerstore.Addrs(dhtB.self)[0]

		peerA := dhtA.self
		peerB := dhtB.self

		errs := make(chan error)
		go func() {
			dhtA.peerstore.AddAddr(peerB, addrB, peer.TempAddrTTL)
			err := dhtA.Connect(ctx, peerB)
			errs <- err
		}()
		go func() {
			dhtB.peerstore.AddAddr(peerA, addrA, peer.TempAddrTTL)
			err := dhtB.Connect(ctx, peerA)
			errs <- err
		}()

		timeout := time.After(5 * time.Second)
		select {
		case e := <-errs:
			if e != nil {
				t.Fatal(e)
			}
		case <-timeout:
			t.Fatal("Timeout received!")
		}
		select {
		case e := <-errs:
			if e != nil {
				t.Fatal(e)
			}
		case <-timeout:
			t.Fatal("Timeout received!")
		}

		dhtA.Close()
		dhtB.Close()
		dhtA.host.Close()
		dhtB.host.Close()
	}
}
コード例 #9
0
func TestSecureCloseLeak(t *testing.T) {
	// t.Skip("Skipping in favor of another test")

	if testing.Short() {
		t.SkipNow()
	}
	if travis.IsRunning() {
		t.Skip("this doesn't work well on travis")
	}

	runPair := func(c1, c2 Conn, num int) {
		log.Debugf("runPair %d", num)

		for i := 0; i < num; i++ {
			log.Debugf("runPair iteration %d", i)
			b1 := []byte("beep")
			c1.WriteMsg(b1)
			b2, err := c2.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			b2 = []byte("beep")
			c2.WriteMsg(b2)
			b1, err = c1.ReadMsg()
			if err != nil {
				panic(err)
			}
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			<-time.After(time.Microsecond * 5)
		}
	}

	var cons = 5
	var msgs = 50
	log.Debugf("Running %d connections * %d msgs.\n", cons, msgs)

	var wg sync.WaitGroup
	for i := 0; i < cons; i++ {
		wg.Add(1)

		ctx, cancel := context.WithCancel(context.Background())
		c1, c2, _, _ := setupSecureConn(t, ctx)
		go func(c1, c2 Conn) {

			defer func() {
				c1.Close()
				c2.Close()
				cancel()
				wg.Done()
			}()

			runPair(c1, c2, msgs)
		}(c1, c2)
	}

	log.Debugf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
コード例 #10
0
ファイル: ci.go プロジェクト: thomas-gardner/go-ipfs
// IsRunning attempts to determine whether this process is
// running on CI. This is done by checking any of:
//
//  CI=true
//  travis.IsRunning()
//
func IsRunning() bool {
	return os.Getenv(string(VarCI)) == "true" || travis.IsRunning()
}
コード例 #11
0
ファイル: dial_test.go プロジェクト: avbalu/go-ipfs
func TestDialBackoff(t *testing.T) {
	// t.Skip("skipping for another test")
	if travis.IsRunning() || jenkins.IsRunning() {
		t.Skip("travis and jenkins will never have fun with this test")
	}

	t.Parallel()

	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 2)
	s1 := swarms[0]
	s2 := swarms[1]
	defer s1.Close()
	defer s2.Close()

	s1.dialT = time.Second // lower timeout for tests.
	s2.dialT = time.Second // lower timeout for tests.

	s2addrs, err := s2.InterfaceListenAddresses()
	if err != nil {
		t.Fatal(err)
	}
	s1.peers.AddAddrs(s2.local, s2addrs, peer.PermanentAddrTTL)

	// dial to a non-existent peer.
	s3p, s3addr, s3l := newSilentPeer(t)
	go acceptAndHang(s3l)
	defer s3l.Close()
	s1.peers.AddAddr(s3p, s3addr, peer.PermanentAddrTTL)

	// in this test we will:
	//   1) dial 10x to each node.
	//   2) all dials should hang
	//   3) s1->s2 should succeed.
	//   4) s1->s3 should not (and should place s3 on backoff)
	//   5) disconnect entirely
	//   6) dial 10x to each node again
	//   7) s3 dials should all return immediately (except 1)
	//   8) s2 dials should all hang, and succeed
	//   9) last s3 dial ends, unsuccessful

	dialOnlineNode := func(dst peer.ID, times int) <-chan bool {
		ch := make(chan bool)
		for i := 0; i < times; i++ {
			go func() {
				if _, err := s1.Dial(ctx, dst); err != nil {
					t.Error("error dialing", dst, err)
					ch <- false
				} else {
					ch <- true
				}
			}()
		}
		return ch
	}

	dialOfflineNode := func(dst peer.ID, times int) <-chan bool {
		ch := make(chan bool)
		for i := 0; i < times; i++ {
			go func() {
				if c, err := s1.Dial(ctx, dst); err != nil {
					ch <- false
				} else {
					t.Error("succeeded in dialing", dst)
					ch <- true
					c.Close()
				}
			}()
		}
		return ch
	}

	{
		// 1) dial 10x to each node.
		N := 10
		s2done := dialOnlineNode(s2.local, N)
		s3done := dialOfflineNode(s3p, N)

		// when all dials should be done by:
		dialTimeout1x := time.After(s1.dialT)
		// dialTimeout1Ax := time.After(s1.dialT * 2)       // dialAttempts)
		dialTimeout10Ax := time.After(s1.dialT * 2 * 10) // dialAttempts * 10)

		// 2) all dials should hang
		select {
		case <-s2done:
			t.Error("s2 should not happen immediately")
		case <-s3done:
			t.Error("s3 should not happen yet")
		case <-time.After(time.Millisecond):
			// s2 may finish very quickly, so let's get out.
		}

		// 3) s1->s2 should succeed.
		for i := 0; i < N; i++ {
			select {
			case r := <-s2done:
				if !r {
					t.Error("s2 should not fail")
				}
			case <-s3done:
				t.Error("s3 should not happen yet")
			case <-dialTimeout1x:
				t.Error("s2 took too long")
			}
		}

		select {
		case <-s2done:
			t.Error("s2 should have no more")
		case <-s3done:
			t.Error("s3 should not happen yet")
		case <-dialTimeout1x: // let it pass
		}

		// 4) s1->s3 should not (and should place s3 on backoff)
		// N-1 should finish before dialTimeout1x * 2
		for i := 0; i < N; i++ {
			select {
			case <-s2done:
				t.Error("s2 should have no more")
			case r := <-s3done:
				if r {
					t.Error("s3 should not succeed")
				}
			case <-(dialTimeout1x):
				if i < (N - 1) {
					t.Fatal("s3 took too long")
				}
				t.Log("dialTimeout1x * 1.3 hit for last peer")
			case <-dialTimeout10Ax:
				t.Fatal("s3 took too long")
			}
		}

		// check backoff state
		if s1.backf.Backoff(s2.local) {
			t.Error("s2 should not be on backoff")
		}
		if !s1.backf.Backoff(s3p) {
			t.Error("s3 should be on backoff")
		}

		// 5) disconnect entirely

		for _, c := range s1.Connections() {
			c.Close()
		}
		for i := 0; i < 100 && len(s1.Connections()) > 0; i++ {
			<-time.After(time.Millisecond)
		}
		if len(s1.Connections()) > 0 {
			t.Fatal("s1 conns must exit")
		}
	}

	{
		// 6) dial 10x to each node again
		N := 10
		s2done := dialOnlineNode(s2.local, N)
		s3done := dialOfflineNode(s3p, N)

		// when all dials should be done by:
		dialTimeout1x := time.After(s1.dialT)
		// dialTimeout1Ax := time.After(s1.dialT * 2)       // dialAttempts)
		dialTimeout10Ax := time.After(s1.dialT * 2 * 10) // dialAttempts * 10)

		// 7) s3 dials should all return immediately (except 1)
		for i := 0; i < N-1; i++ {
			select {
			case <-s2done:
				t.Error("s2 should not succeed yet")
			case r := <-s3done:
				if r {
					t.Error("s3 should not succeed")
				}
			case <-dialTimeout1x:
				t.Fatal("s3 took too long")
			}
		}

		// 8) s2 dials should all hang, and succeed
		for i := 0; i < N; i++ {
			select {
			case r := <-s2done:
				if !r {
					t.Error("s2 should succeed")
				}
			// case <-s3done:
			case <-(dialTimeout1x):
				t.Fatal("s3 took too long")
			}
		}

		// 9) the last s3 should return, failed.
		select {
		case <-s2done:
			t.Error("s2 should have no more")
		case r := <-s3done:
			if r {
				t.Error("s3 should not succeed")
			}
		case <-dialTimeout10Ax:
			t.Fatal("s3 took too long")
		}

		// check backoff state (the same)
		if s1.backf.Backoff(s2.local) {
			t.Error("s2 should not be on backoff")
		}
		if !s1.backf.Backoff(s3p) {
			t.Error("s3 should be on backoff")
		}

	}
}