Beispiel #1
1
func TestChan3() {
	fmt.Println("@@@@@@@@@@@@ TestChan 3")

	fmt.Printf("cpu num: %d\n", runtime.NumCPU()) // 8核cpu

	// 虽然goroutine是并发执行的,但是它们并不是并行运行的。如果不告诉Go额外的东西,同
	// 一时刻只会有一个goroutine执行。利用runtime.GOMAXPROCS(n)可以设置goroutine
	// 并行执行的数量。GOMAXPROCS 设置了同时运行的CPU 的最大数量,并返回之前的设置。
	val := runtime.GOMAXPROCS(runtime.NumCPU() * 4)
	fmt.Printf("last goroutine num: %d\n", val) // 8个

	fmt.Printf("goroutine num: %d\n", runtime.NumGoroutine()) // 4个goroutine同时运行

	var ch1 chan int = make(chan int, 0)
	var ch2 chan int = make(chan int, 0)
	var ch3 chan int = make(chan int, 0)

	go write(ch1, 22)
	go write(ch2, 33)
	go write(ch3, 44)
	go read(ch1)
	go read(ch2)
	go read(ch3)

	fmt.Printf("goroutine num: %d\n", runtime.NumGoroutine()) // 10个goroutine同时运行
	sleep("TestChan3", 3)
}
Beispiel #2
0
func TestParallelSimple(t *testing.T) {
	On("p-1", func() {
		for i := 1; i <= 10000; i++ {

		}
	})

	On("p-2", func() {
		for i := 1; i <= 10000; i++ {

		}
	})
	prev := runtime.NumGoroutine()
	FireBackground("p-1")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")

	now := runtime.NumGoroutine()
	fmt.Println("Number of go routine running ", now-prev)
	Equal(t, 8, now-prev)
	ClearEvents()
}
Beispiel #3
0
func main() {
	fmt.Println("Please wait....")
	URL := flag.String("i", "https://127.0.0.1/", "dst server URL")
	num := flag.Int("n", 1, "process num")
	loop := flag.Bool("l", false, "loop flag")
	ecdh := flag.Bool("e", false, "ecdh flag")
	show := flag.Bool("s", false, "show body flag")
	flag.Parse()
	result := make(chan int)
	//通信結果を受け取るためのプロセス
	go recieving(result, *loop, *num)

	t := time.Now()
	for i := 0; i < *num; i++ { //通信プロセスの作成
		go connect(*URL, i, *loop, *show, *ecdh, result)
	}
	f := time.Now()
	//通信プロセスの作成時間
	fmt.Println("Create process time: ", f.Sub(t))
	//現在動いてるプロセス数
	fmt.Println("Current Process num: ", runtime.NumGoroutine())
	//mainプロセスを待機させておく
	time.Sleep(3 * time.Second) //プロセス作成時間分待機
	//プロセスが残ってるうちは待機
	for runtime.NumGoroutine() > 4 || *loop {
		time.Sleep(1 * time.Second)
	}
	//終了時のプロセス数
	fmt.Println("Current Process num: ", runtime.NumGoroutine())
}
func TestQueueConsumerRunStopsGracefullyWhenCancelled(t *testing.T) {
	// log to /dev/null because the deleter is chatty
	log.SetOutput(ioutil.Discard)
	defer func() {
		log.SetOutput(os.Stderr)
	}()

	ctl := gomock.NewController(t)
	defer ctl.Finish()

	// delay so that the cancel occurs mid-receive
	delay := func(x interface{}) {
		time.Sleep(10 * time.Millisecond)
	}
	m := mock.NewMockSQSAPI(ctl)
	m.EXPECT().ReceiveMessage(gomock.Any()).Do(delay).Return(&sqs.ReceiveMessageOutput{}, nil).AnyTimes()
	m.EXPECT().DeleteMessageBatch(gomock.Any()).AnyTimes().Return(&sqs.DeleteMessageBatchOutput{}, nil)
	m.EXPECT().ChangeMessageVisibilityBatch(gomock.Any()).AnyTimes()

	s := &SQSService{Svc: m}
	q := NewConsumer(s, noop)
	q.delayAfterReceiveError = time.Millisecond

	ngo := runtime.NumGoroutine()

	// wait long enough to ensure ReceiveMessage is running
	ctx, _ := context.WithTimeout(context.Background(), 5*time.Millisecond)
	err := q.Run(ctx)

	assert.Error(t, err)

	time.Sleep(time.Millisecond) // time for goroutines to end
	assert.InDelta(t, ngo, runtime.NumGoroutine(), 2, "Should not leak goroutines")
}
Beispiel #5
0
// For issue 1791
func (s *testTableCodecSuite) TestGoroutineLeak(c *C) {
	var sr SelectResult
	countBefore := runtime.NumGoroutine()

	sr = &selectResult{
		resp:    &mockResponse{},
		results: make(chan PartialResult, 5),
		done:    make(chan error, 1),
		closed:  make(chan struct{}),
	}
	go sr.Fetch()
	for {
		// mock test will generate some partial result then return error
		_, err := sr.Next()
		if err != nil {
			// close selectResult on error, partialResult's fetch goroutine may leak
			sr.Close()
			break
		}
	}

	tick := 10 * time.Millisecond
	totalSleep := time.Duration(0)
	for totalSleep < 3*time.Second {
		time.Sleep(tick)
		totalSleep += tick
		countAfter := runtime.NumGoroutine()

		if countAfter-countBefore < 5 {
			return
		}
	}

	c.Error("distsql goroutine leak!")
}
Beispiel #6
0
func TestStartStop(t *testing.T) {
	assert := assert.New(t)

	startGoroutineNum := runtime.NumGoroutine()

	for i := 0; i < 10; i++ {
		qa.Root(t, func(root string) {
			configFile := TestConfig(root)

			app := carbon.New(configFile)

			assert.NoError(app.ParseConfig())
			assert.NoError(app.Start())

			app.Stop()
		})
	}

	endGoroutineNum := runtime.NumGoroutine()

	// GC worker etc
	if !assert.InDelta(startGoroutineNum, endGoroutineNum, 3) {
		p := pprof.Lookup("goroutine")
		p.WriteTo(os.Stdout, 1)
	}

}
Beispiel #7
0
func TestClient(t *testing.T) {
	log.Println("----------------TestClient begins----------------")
	cli, err := NewClient(s)

	if err != nil {
		t.Fatal(err)
	}

	if err = cli.Connect(); err != nil {
		t.Fatal(err)
	}
	n := runtime.NumGoroutine()
	go cli.Spin()
	if n == runtime.NumGoroutine() {
		t.Fatalf("no goroutine created!!!")
	}
	time.Sleep(10 * time.Second)
	cli.Stop()
	time.Sleep(2 * time.Second) // let goroutines die
	log.Println("stopped spinning")
	if n != runtime.NumGoroutine() {
		t.Fatalf("%d goroutines still running!!!", runtime.NumGoroutine()-n)
	}
	log.Println("----------------TestClient ends----------------")
}
Beispiel #8
0
func TestRecvChanLeakGoRoutines(t *testing.T) {
	s := RunDefaultServer()
	defer s.Shutdown()

	ec := NewEConn(t)
	defer ec.Close()

	// Call this to make sure that we have everything setup connection wise
	ec.Flush()

	before := runtime.NumGoroutine()

	ch := make(chan int)

	sub, err := ec.BindRecvChan("foo", ch)
	if err != nil {
		t.Fatalf("Failed to bind to a send channel: %v\n", err)
	}
	sub.Unsubscribe()

	// Sleep a bit to wait for the Go routine to exit.
	time.Sleep(500 * time.Millisecond)

	delta := (runtime.NumGoroutine() - before)

	if delta > 0 {
		t.Fatalf("Leaked Go routine(s) : %d, closing channel should have closed them\n", delta)
	}
}
Beispiel #9
0
func TestRecvChanAsyncLeakGoRoutines(t *testing.T) {
	ec := NewEConn(t)
	defer ec.Close()

	before := runtime.NumGoroutine()

	ch := make(chan int)

	if _, err := ec.BindRecvChan("foo", ch); err != nil {
		t.Fatalf("Failed to bind to a send channel: %v\n", err)
	}

	// Close the receive Channel
	close(ch)

	// The publish will trugger the close and shutdown of the Go routines
	ec.Publish("foo", 22)
	ec.Flush()

	time.Sleep(50 * time.Millisecond)

	after := runtime.NumGoroutine()

	if before != after {
		t.Fatalf("Leaked Go routine(s) : %d, closing channel should have closed them\n", after-before)
	}
}
Beispiel #10
0
func main() {
	fmt.Println("Please wait 5s....")
	ip := flag.String("i", "127.0.0.1:443", "dst server ip address(default 127.0.0.1:443)")
	num := flag.Int("n", 1, "process num")
	loop := flag.Bool("l", false, "loop flag")
	flag.Parse()
	result := make(chan int)

	go recieving(result, *loop, *num)

	t := time.Now()
	for i := 0; i < *num; i++ { //通信プロセスの作成
		go loopconnection(*ip, i, *loop, result)
	}
	f := time.Now()
	//通信プロセスの作成時間
	fmt.Println("Create process time: ", f.Sub(t))
	//現在動いてるプロセス数
	fmt.Println("Current Process num: ", runtime.NumGoroutine())
	//mainプロセスを待機させておく
	time.Sleep(3 * time.Second)
	for runtime.NumGoroutine() > 2 {
		time.Sleep(1 * time.Second)
		fmt.Println("Current Process num: ", runtime.NumGoroutine())
	}
}
Beispiel #11
0
func TestRecvChanAsyncLeakGoRoutines(t *testing.T) {
	s := RunDefaultServer()
	defer s.Shutdown()

	ec := NewEConn(t)
	defer ec.Close()

	// Call this to make sure that we have everything setup connection wise
	ec.Flush()

	before := runtime.NumGoroutine()

	ch := make(chan int)

	if _, err := ec.BindRecvChan("foo", ch); err != nil {
		t.Fatalf("Failed to bind to a send channel: %v\n", err)
	}

	// Close the receive Channel
	close(ch)

	// The publish will trigger the close and shutdown of the Go routines
	ec.Publish("foo", 22)
	ec.Flush()

	time.Sleep(100 * time.Millisecond)

	delta := (runtime.NumGoroutine() - before)

	if delta > 0 {
		t.Fatalf("Leaked Go routine(s) : %d, closing channel should have closed them\n", delta)
	}
}
Beispiel #12
0
func TestCloseLeakingGoRoutines(t *testing.T) {
	s := RunDefaultServer()
	defer s.Shutdown()

	// Give time for things to settle before capturing the number of
	// go routines
	time.Sleep(500 * time.Millisecond)

	base := runtime.NumGoroutine()

	nc := NewDefaultConnection(t)

	nc.Flush()
	nc.Close()

	// Give time for things to settle before capturing the number of
	// go routines
	time.Sleep(500 * time.Millisecond)

	delta := (runtime.NumGoroutine() - base)
	if delta > 0 {
		t.Fatalf("%d Go routines still exist post Close()", delta)
	}
	// Make sure we can call Close() multiple times
	nc.Close()
}
Beispiel #13
0
func TestActiveClose(t *testing.T) {
	go func() {
		log.Println(http.ListenAndServe("localhost:6060", nil))
	}()
	s := New()
	s.RegCb(&testcb{s})

	p := make([]runtime.StackRecord, 100)
	fn, _ := runtime.GoroutineProfile(p)
	Println(p[:fn])
	Println("runtime.NumGoroutine()", runtime.NumGoroutine())

	connid, err := s.ConnectBlock("10.1.9.34:9889", time.Second*10)
	if err != nil {
		return
	}
	time.Sleep(time.Second * 50)
	fn, _ = runtime.GoroutineProfile(p)
	Println(p[:fn])
	Println("runtime.NumGoroutine()", runtime.NumGoroutine())

	s.CloseConn(connid)

	time.Sleep(time.Second * 50)
	fn, _ = runtime.GoroutineProfile(p)
	Println(p[:fn])
	Println("runtime.NumGoroutine()", runtime.NumGoroutine())
}
Beispiel #14
0
func (this *Matrix) parallel_Traspose(i0, i1, j0, j1 int, res *Matrix, done chan<- bool, conjugate bool) {
	di := i1 - i0
	dj := j1 - j0
	done2 := make(chan bool, THRESHOLD)
	if di >= dj && di >= THRESHOLD && runtime.NumGoroutine() < maxGoRoutines {
		mi := i0 + di/2
		go this.parallel_Traspose(i0, mi, j0, j1, res, done2, conjugate)
		this.parallel_Traspose(mi, i1, j0, j1, res, done2, conjugate)
		<-done2
		<-done2
	} else if dj >= THRESHOLD && runtime.NumGoroutine() < maxGoRoutines {
		mj := j0 + dj/2

		go this.parallel_Traspose(i0, i1, j0, mj, res, done2, conjugate)
		this.parallel_Traspose(i0, i1, mj, i1, res, done2, conjugate)
		<-done2
		<-done2
	} else {

		if !conjugate {
			for i := i0; i <= i1; i++ {
				for j := j0; j <= j1; j++ {
					res.SetValue(j, i, this.GetValue(i, j))
				}
			}
		} else {
			for i := i0; i <= i1; i++ {
				for j := j0; j <= j1; j++ {
					res.SetValue(j, i, cmplx.Conj(this.GetValue(i, j)))
				}
			}
		}
	}
	done <- true
}
Beispiel #15
0
func main() {
	fmt.Println(runtime.NumGoroutine())
	//runtime.GOMAXPROCS(1)

	//生成随机种子
	rand.Seed(time.Now().Unix())

	begin := make(chan bool)
	go busy(begin)
	fmt.Println(runtime.NumGoroutine())
	<-begin
	fmt.Println("is f*****g!")

	var input string
	fmt.Scanln(&input)

	fmt.Println("done1")

	var name string
	for i := 0; i < 3; i++ {
		name = fmt.Sprintf("go_%02d", i) //生成ID
		//生成随机等待时间,从0-4秒
		go routine(name, time.Duration(rand.Intn(5))*time.Second)
	}

	fmt.Println(runtime.NumGoroutine())
	//让主进程停住,不然主进程退了,goroutine也就退了

	fmt.Scanln(&input)
	fmt.Println("done")
	fmt.Println(runtime.NumGoroutine())
}
Beispiel #16
0
func TestParallel(t *testing.T) {
	On("p-1", func() {
		for i := 1; i <= 10000; i++ {

		}
	})

	On("p-2", func() {
		for i := 1; i <= 10000; i++ {

		}
	})
	prev := runtime.NumGoroutine()
	FireBackground("p-1")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")
	FireBackground("p-2")

	now := runtime.NumGoroutine()
	assert.Equal(t, 8, now-prev)
	ClearEvents()
}
Beispiel #17
0
func main() {
	fmt.Println("Start: Number of running goroutines: %d", runtime.NumGoroutine())

	urls := []string{
		"http://ip.jsontest.com/",
		"http://date.jsontest.com",
		"http://md5.jsontest.com/?text=foo",
		"http://echo.jsontest.com/key/value/one/two",
		"http://headers.jsontest.com/"}

	input := make(chan string, 5)
	output := make(chan string, 5)
	quit := make(chan bool)

	for i, v := range urls {
		input <- v
		go func(i int) {
			getUrl(i, input, output, quit)
		}(i)
	}

	fmt.Println("After loop: Number of running goroutines: %d", runtime.NumGoroutine())

	close(quit)

	for i := 0; i < 5; i++ {
		msg := <-output
		fmt.Println(msg)
	}

}
Beispiel #18
0
func printGC(memStats *runtime.MemStats, gcstats *debug.GCStats) {

	if gcstats.NumGC > 0 {
		lastPause := gcstats.Pause[0]
		elapsed := time.Now().Sub(startTime)
		overhead := float64(gcstats.PauseTotal) / float64(elapsed) * 100
		allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds()
		log.Printf("[GC-Enabled]Connection:%d Goroutine:%d NumGC:%d Pause:%s Pause(Avg):%s Overhead:%3.2f%% Alloc:%s Sys:%s Alloc(Rate):%s/s Histogram:%s %s %s \n",
			ProxyConn,
			runtime.NumGoroutine(),
			gcstats.NumGC,
			toS(lastPause),
			toS(avg(gcstats.Pause)),
			overhead,
			toH(memStats.Alloc),
			toH(memStats.Sys),
			toH(uint64(allocatedRate)),
			toS(gcstats.PauseQuantiles[94]),
			toS(gcstats.PauseQuantiles[98]),
			toS(gcstats.PauseQuantiles[99]))
	} else {
		// while GC has disabled
		elapsed := time.Now().Sub(startTime)
		allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds()

		log.Printf("[GC-Disabled]Connection:%d Goroutine:%d Alloc:%s Sys:%s Alloc(Rate):%s/s\n",
			ProxyConn,
			runtime.NumGoroutine(),
			toH(memStats.Alloc),
			toH(memStats.Sys),
			toH(uint64(allocatedRate)))
	}
}
Beispiel #19
0
func main() {
	fmt.Printf("At start there are %d goroutines running\n",
		runtime.NumGoroutine())
	NUM_CPUS := runtime.NumCPU()
	loops_to_do := NUM_CPUS * 5
	estTime := (BigRunTime * loops_to_do) / NUM_CPUS
	fmt.Printf("est runtime %d to %d seconds...\n", estTime, estTime*2)
	throttle := make(chan int, NUM_CPUS)
	fmt.Printf("starting %d goroutines in total\n", loops_to_do)
	var wg sync.WaitGroup
	for i := 1; i <= loops_to_do; i++ {
		throttle <- 1
		wg.Add(1)
		go func(z int) {
			fmt.Printf("goroutine number[%d] started; %d are running now\n",
				z, runtime.NumGoroutine())

			rv := cpu_hog(z)
			_ = <-throttle
			wg.Done()
			fmt.Printf("cpu_hog(%d) = %.2f\n", z, rv)
		}(i)
	}
	fmt.Printf("%d new goroutines were started, %d are still running\n",
		loops_to_do, runtime.NumGoroutine())
	wg.Wait()
	fmt.Printf("At finish there are %d goroutines running\n",
		runtime.NumGoroutine())
}
Beispiel #20
0
func TestCloseLeak(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	if os.Getenv("TRAVIS") == "true" {
		t.Skip("this doesn't work well on travis")
	}

	var wg sync.WaitGroup

	runPair := func(p1, p2, num int) {
		a1 := strconv.Itoa(p1)
		a2 := strconv.Itoa(p2)
		ctx, cancel := context.WithCancel(context.Background())
		c1, c2 := setupConn(t, ctx, "/ip4/127.0.0.1/tcp/"+a1, "/ip4/127.0.0.1/tcp/"+a2)

		for i := 0; i < num; i++ {
			b1 := []byte("beep")
			c1.Out() <- b1
			b2 := <-c2.In()
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			b2 = []byte("boop")
			c2.Out() <- b2
			b1 = <-c1.In()
			if !bytes.Equal(b1, b2) {
				panic("bytes not equal")
			}

			<-time.After(time.Microsecond * 5)
		}

		c1.Close()
		c2.Close()
		cancel() // close the listener
		wg.Done()
	}

	var cons = 20
	var msgs = 100
	fmt.Printf("Running %d connections * %d msgs.\n", cons, msgs)
	for i := 0; i < cons; i++ {
		wg.Add(1)
		go runPair(2000+i, 2001+i, msgs)
	}

	fmt.Printf("Waiting...\n")
	wg.Wait()
	// done!

	<-time.After(time.Millisecond * 150)
	if runtime.NumGoroutine() > 20 {
		// panic("uncomment me to debug")
		t.Fatal("leaking goroutines:", runtime.NumGoroutine())
	}
}
Beispiel #21
0
// Test that we can open and close a file
func TestOpenClose(test *testing.T) {
	fs, proc := OpenMinixImage(test)

	file, err := proc.Open("/sample/europarl-en.txt", common.O_RDONLY, 0666)
	if err != nil {
		testutils.FatalHere(test, "Failed opening file: %s", err)
	}

	found, count := checkFileAndCount(proc, file)

	if !found {
		testutils.FatalHere(test, "Did not find open file in proc.files")
	}
	if count != 1 {
		testutils.FatalHere(test, "Open file count incorrect got %d, expected %d", count, 1)
	}

	// Now close the file and make sure things are cleaned up
	err = proc.Close(file)

	found, count = checkFileAndCount(proc, file)

	if found {
		testutils.FatalHere(test, "Found file in process table, should not have")
	}
	if count != 0 {
		testutils.FatalHere(test, "Open file count mismatch got %d, expected %d", count, 0)
	}

	// How many goroutines are open right now?
	numgoros := runtime.NumGoroutine()
	stacknow := make([]byte, 4096)
	runtime.Stack(stacknow, true)

	fs.Exit(proc)
	err = fs.Shutdown()
	if err != nil {
		testutils.FatalHere(test, "Failed when shutting down filesystem: %s", err)
	}

	// We expect shutdown to have killed the following goroutines
	//  * device
	//  * block cache
	//  * inode cache
	//  * allocation table
	//  * file server

	// This test is fragile, so be careful with it!
	expected := numgoros - 5
	if runtime.NumGoroutine() != expected {
		test.Logf("Original stack:\n%s\n", stacknow)
		newstack := make([]byte, 4096)
		runtime.Stack(newstack, true)
		test.Logf("Current stack:\n%s\n", newstack)
		testutils.FatalHere(test, "Goroutine count mismatch got %d, expected %d", expected, runtime.NumGoroutine())
	}
}
Beispiel #22
0
func TestFullWriteRead(t *testing.T) {
	const num = 1000
	const maxmax = 1e6
	const size = 1e6
	bufs := make([][]byte, num)
	numGoStart := runtime.NumGoroutine()
	buf := NewBuffer()
	defer func() {
		buf.Close()
		numGoStop := runtime.NumGoroutine()
		t.Log("NumGoroutine:", numGoStart, numGoStop, numGoStop-numGoStart)
	}()
	for i := 0; i < num; i++ {
		n, err := rand.Int63n(size, "go")
		if err != nil {
			t.Fatal(e.Trace(e.Forward(err)))
		}
		in, err := rand.Bytes(int(n)+1, "go")
		if err != nil {
			t.Fatal(e.Trace(e.Forward(err)))
		}
		bufs[i] = in
		_, err = buf.Write(in)
		if err != nil {
			t.Fatal(e.Trace(e.Forward(err)))
		}
	}
	for i := 0; i < num; i++ {
		n, err := rand.Int63n(maxmax, "go")
		if err != nil {
			t.Fatal(e.Trace(e.Forward(err)))
		}
		max := int(n) + 1

		in := bufs[i]
		size := len(in)
		if size > max {
			size = max
		}
		b := make([]byte, len(in))

		for count := 0; count < len(in); {
			end := len(b)
			if count+size < end {
				end = count + size
			}
			n, err := buf.Read(b[count:end])
			if err != nil {
				t.Fatal(e.Trace(e.Forward(err)))
			}
			count += n
		}
		if !bytes.Equal(in, b) {
			t.Fatal("data not equal")
		}
	}
}
Beispiel #23
0
func main() {
	defer func() {
		if r := recover(); r != nil {
			log.Printf("PANIC! %s\n", r)
		}
	}()
	configFile := flag.String("config", "config.json", "Config file")
	flag.Parse()

	config := parseConfig(*configFile)
	if config == nil {
		log.Fatal("No config")
		return
	}

	// This is an odd value full of voodoo.
	// The docs say that this should match the number of CPUs, only if you
	// set it to 1, go appears to not actually spawn any threads. (None of
	// the poundSock() calls are made.) If you give it something too excessive,
	// the scheduler blows chunks. 8 per CPU, while fairly arbitrary, seems
	// to provide the greatest stability.
	//
	// Go is a fun toy, but this is why you don't build hospitals out of lego.
	runtime.GOMAXPROCS(runtime.NumCPU() * 8)

	chans := make(map[int]chan int)
	cmd := make(chan int, config.Clients)

	// run as many clients as specified
	totalClients := config.Clients
	for cli := 0; cli < totalClients; cli++ {

		ctrl := make(chan int)
		chans[cli] = ctrl
		// open a socket to the Target
		log.Printf("Spawning %d\n", cli)

		go func(cli int) {
			poundSock(config.Target, config, cmd, ctrl, cli)
		}(cli)
	}
	lastTot := runtime.NumGoroutine()
	tc := time.NewTicker(time.Duration(time.Second * 5))
	for {
		select {
		case x := <-cmd:
			log.Printf("Exiting %d \n", x)
			totalClients = runtime.NumGoroutine()
		case <-tc.C:
			if totalClients != lastTot {
				log.Printf("Info: Active Clients: %d \n", totalClients)
				lastTot = totalClients
			}
		}
	}
}
Beispiel #24
0
func TestGracefullyStop(t *testing.T) {
	assert := assert.New(t)

	do := func(maxUpdatesPerSecond int, workers int) {
		ch := make(chan *points.Points, 1000)

		qa.Root(t, func(root string) {
			p := NewWhisper(root, nil, nil, ch)
			p.SetMaxUpdatesPerSecond(maxUpdatesPerSecond)
			p.SetWorkers(workers)

			storeWait := make(chan bool)
			var storeCount uint32

			p.mockStore = func(p *Whisper, values *points.Points) {
				<-storeWait
				atomic.AddUint32(&storeCount, 1)
			}

			var sentCount int

			p.Start()

		SEND_LOOP:
			for {
				select {
				case ch <- points.NowPoint(fmt.Sprintf("%d", sentCount), float64(sentCount)):
					sentCount++
				default:
					break SEND_LOOP
				}
			}

			time.Sleep(10 * time.Millisecond)
			close(storeWait)

			p.Stop()

			storeCount += uint32(len(ch))
			assert.Equal(sentCount, int(storeCount), "maxUpdatesPerSecond: %d, workers: %d", maxUpdatesPerSecond, workers)

		})
	}

	startGoroutineNum := runtime.NumGoroutine()
	for i := 0; i < 5; i++ {
		for _, maxUpdatesPerSecond := range []int{0, 4000} {
			for _, workers := range []int{1, 4} {
				do(maxUpdatesPerSecond, workers)
			}
		}
	}
	endGoroutineNum := runtime.NumGoroutine()
	// GC worker etc
	assert.InDelta(startGoroutineNum, endGoroutineNum, 2)
}
func TestSimpleGoServerShutdown(t *testing.T) {
	base := runtime.NumGoroutine()
	s := runDefaultServer()
	s.Shutdown()
	time.Sleep(10 * time.Millisecond)
	delta := (runtime.NumGoroutine() - base)
	if delta > 1 {
		t.Fatalf("%d Go routines still exist post Shutdown()", delta)
	}
}
Beispiel #26
0
func TestRouteGoServerShutdown(t *testing.T) {
	base := runtime.NumGoroutine()
	s, _ := runRouteServer(t)
	s.Shutdown()
	time.Sleep(50 * time.Millisecond)
	delta := (runtime.NumGoroutine() - base)
	if delta > 1 {
		t.Fatalf("%d Go routines still exist post Shutdown()", delta)
	}
}
func TestQueueConsumerRunProcessesMessages(t *testing.T) {
	// log to /dev/null because the deleter is chatty
	log.SetOutput(ioutil.Discard)
	defer func() {
		log.SetOutput(os.Stderr)
	}()

	ctl := gomock.NewController(t)
	defer ctl.Finish()

	// delay so that the cancel occurs during 2nd receive
	delay := func(x interface{}) {
		time.Sleep(10 * time.Millisecond)
	}
	m := mock.NewMockSQSAPI(ctl)
	received := &sqs.ReceiveMessageOutput{
		Messages: []*sqs.Message{
			&sqs.Message{MessageId: aws.String("i1"), ReceiptHandle: aws.String("r1")},
			&sqs.Message{MessageId: aws.String("i2"), ReceiptHandle: aws.String("r2")},
		},
	}

	// return 2 messages the first time, and an error the second time
	first := m.EXPECT().ReceiveMessage(gomock.Any()).Do(delay).Return(received, nil)
	m.EXPECT().ReceiveMessage(gomock.Any()).Do(delay).Return(nil, assert.AnError).After(first).AnyTimes()
	m.EXPECT().DeleteMessageBatch(gomock.Any()).AnyTimes().Return(&sqs.DeleteMessageBatchOutput{}, nil)

	// count messages processed
	var callCount int64
	fn := func(ctx context.Context, msg string) error {
		atomic.AddInt64(&callCount, 1)
		return nil
	}

	s := &SQSService{Svc: m}
	q := NewConsumer(s, fn)
	q.delayAfterReceiveError = time.Millisecond
	q.DeleteMessageDrainTimeout = 25 * time.Millisecond

	// wait long enough to ensure ReceiveMessage is running
	ctx, _ := context.WithTimeout(context.Background(), 15*time.Millisecond)

	// record number of goroutines before run to ensure no leaks
	ngo := runtime.NumGoroutine()

	// run the fetcher
	q.Run(ctx)

	// ensure no routines were leaked other than the receive messages goroutine (leaks on purpose)
	time.Sleep(time.Millisecond)
	assert.InDelta(t, ngo, runtime.NumGoroutine(), 2, "Should not leak goroutines")

	// ensure all messages were processed
	assert.Equal(t, int64(2), callCount)
}
func TestQueueConsumerRunDoesNotFetchMoreMessagesThanItCanProcess(t *testing.T) {
	// log to /dev/null because the deleter is chatty
	log.SetOutput(ioutil.Discard)
	defer func() {
		log.SetOutput(os.Stderr)
	}()

	ctl := gomock.NewController(t)
	defer ctl.Finish()

	m := mock.NewMockSQSAPI(ctl)
	received := &sqs.ReceiveMessageOutput{
		Messages: []*sqs.Message{
			&sqs.Message{MessageId: aws.String("i1"), ReceiptHandle: aws.String("r1")},
			&sqs.Message{MessageId: aws.String("i2"), ReceiptHandle: aws.String("r2")},
			&sqs.Message{MessageId: aws.String("i3"), ReceiptHandle: aws.String("r3")},
			&sqs.Message{MessageId: aws.String("i4"), ReceiptHandle: aws.String("r4")},
			&sqs.Message{MessageId: aws.String("i5"), ReceiptHandle: aws.String("r5")},
			&sqs.Message{MessageId: aws.String("i6"), ReceiptHandle: aws.String("r6")},
			&sqs.Message{MessageId: aws.String("i7"), ReceiptHandle: aws.String("r7")},
			&sqs.Message{MessageId: aws.String("i8"), ReceiptHandle: aws.String("r8")},
			&sqs.Message{MessageId: aws.String("i9"), ReceiptHandle: aws.String("r9")},
			&sqs.Message{MessageId: aws.String("i10"), ReceiptHandle: aws.String("r10")},
		},
	}

	// return 10 messages - the first 10 will never finish so the second batch will block and there will be no third request
	m.EXPECT().ReceiveMessage(gomock.Any()).Return(received, nil).Times(2)
	m.EXPECT().DeleteMessageBatch(gomock.Any()).AnyTimes().Return(&sqs.DeleteMessageBatchOutput{}, nil)
	m.EXPECT().ChangeMessageVisibilityBatch(gomock.Any()).AnyTimes()

	// hang until cancelled
	fn := func(ctx context.Context, msg string) error {
		<-ctx.Done()
		return nil
	}

	s := &SQSService{Svc: m}
	q := NewConsumer(s, fn)
	q.delayAfterReceiveError = time.Millisecond
	q.DeleteMessageDrainTimeout = 25 * time.Millisecond

	// wait long enough to ensure ReceiveMessage would have been invoked multiple times if it was too greedy
	ctx, _ := context.WithTimeout(context.Background(), 500*time.Millisecond)

	// record number of goroutines before run to ensure no leaks
	ngo := runtime.NumGoroutine()

	// run the fetcher
	q.Run(ctx)

	// ensure no routines were leaked
	time.Sleep(time.Millisecond)
	assert.InDelta(t, ngo, runtime.NumGoroutine(), 2, "Should not leak goroutines")
}
Beispiel #29
0
func main() {
	fmt.Println("CPU number:", runtime.NumCPU())
	fmt.Println("Goroutines start:", runtime.NumGoroutine())
	for i := 0; i < 5; i++ {
		go func(n int) {
			fmt.Println(n, runtime.NumGoroutine())
		}(i)
	}
	time.Sleep(5 * time.Second)
	fmt.Println("Goroutines over:", runtime.NumGoroutine())
}
Beispiel #30
0
func TestDumpRequest(t *testing.T) {
	numg0 := runtime.NumGoroutine()
	for i, tt := range dumpTests {
		setBody := func() {
			if tt.Body == nil {
				return
			}
			switch b := tt.Body.(type) {
			case []byte:
				tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b))
			case func() io.ReadCloser:
				tt.Req.Body = b()
			default:
				t.Fatalf("Test %d: unsupported Body of %T", i, tt.Body)
			}
		}
		setBody()
		if tt.Req.Header == nil {
			tt.Req.Header = make(http.Header)
		}

		if tt.WantDump != "" {
			setBody()
			dump, err := DumpRequest(&tt.Req, !tt.NoBody)
			if err != nil {
				t.Errorf("DumpRequest #%d: %s", i, err)
				continue
			}
			if string(dump) != tt.WantDump {
				t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
				continue
			}
		}

		if tt.WantDumpOut != "" {
			setBody()
			dump, err := DumpRequestOut(&tt.Req, !tt.NoBody)
			if err != nil {
				t.Errorf("DumpRequestOut #%d: %s", i, err)
				continue
			}
			if string(dump) != tt.WantDumpOut {
				t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump))
				continue
			}
		}
	}
	if dg := runtime.NumGoroutine() - numg0; dg > 4 {
		buf := make([]byte, 4096)
		buf = buf[:runtime.Stack(buf, true)]
		t.Errorf("Unexpectedly large number of new goroutines: %d new: %s", dg, buf)
	}
}