Example #1
0
func BenchmarkHbasePut(b *testing.B) {
	var (
		err error
		h   = NewHBaseClient()
		m   = &meta.Needle{}
		t   int64
	)
	ch := make(chan int64, 1000000)
	if err = Init("172.16.13.90:9090", 5*time.Second, 200, 200); err != nil {
		b.Errorf("Init failed")
		b.FailNow()
	}
	for j := 0; j < 1000000; j++ {
		k := int64(time.Now().UnixNano())
		ch <- k
	}
	b.ResetTimer()
	b.SetParallelism(8)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			t = <-ch
			m.Key = t
			if err = h.Put(m); err != nil {
				continue
			}
		}
	})
}
func benchPutItemParallel(p, c int, b *testing.B) {
	svc := dynamodb.New(&aws.Config{
		DisableSSL: aws.Bool(true),
	})

	av, err := dynamodbattribute.ConvertToMap(dbItem{Key: "MyKey", Data: "MyData"})
	if err != nil {
		b.Fatal("expect no ConvertToMap errors", err)
	}
	params := &dynamodb.PutItemInput{
		Item:      av,
		TableName: aws.String(testTableName),
	}
	b.N = c

	b.ResetTimer()
	b.SetParallelism(p)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			_, err = svc.PutItem(params)
			if err != nil {
				b.Error("expect no request errors", err)
			}
		}
	})
}
Example #3
0
func benchmarkStrconvComparison(i int, b *testing.B) {
	var IDMutex sync.Mutex
	highestID := "122222222"

	if i == 0 {
		for n := 0; n < b.N; n++ {
			postIDint, _ := strconv.Atoi("110312919")

			IDMutex.Lock()
			highestIDint, _ := strconv.Atoi(highestID)
			if postIDint >= highestIDint {
				highestID = "110312919"
			}
			IDMutex.Unlock()
		}
	} else {
		b.SetParallelism(i)
		b.RunParallel(func(pb *testing.PB) {
			for pb.Next() {
				postIDint, _ := strconv.Atoi("110312919")

				IDMutex.Lock()
				highestIDint, _ := strconv.Atoi(highestID)
				if postIDint >= highestIDint {
					highestID = "110312919"
				}
				IDMutex.Unlock()
			}
		})
	}
}
Example #4
0
func BenchmarkQuotaRequests(b *testing.B) {
	fmt.Println("Starting example client.")
	serverAddr := "127.0.0.1:10990"
	var opts []grpc.DialOption
	opts = append(opts, grpc.WithInsecure())
	conn, err := grpc.Dial(serverAddr, opts...)
	if err != nil {
		grpclog.Fatalf("fail to dial: %v", err)
	}
	defer conn.Close()

	client := pb.NewQuotaServiceClient(conn)

	req := &pb.AllowRequest{
		Namespace:       "test.namespace",
		BucketName:      "one",
		TokensRequested: 1}
	b.ResetTimer()
	b.SetParallelism(8)
	b.RunParallel(
		func(pb *testing.PB) {
			for pb.Next() {
				client.Allow(context.TODO(), req)
			}
		})
}
Example #5
0
func benchmarkInt64Comparison(i int, b *testing.B) {
	var IDMutex sync.Mutex
	highestID := int64(122222222)

	if i == 0 {
		for n := 0; n < b.N; n++ {
			idNew := int64(132145174)
			IDMutex.Lock()
			if highestID < idNew {
				highestID = idNew
			}
			IDMutex.Unlock()
		}
	} else {
		b.SetParallelism(i)
		b.RunParallel(func(pb *testing.PB) {
			for pb.Next() {
				idNew := int64(132145174)
				IDMutex.Lock()
				if highestID < idNew {
					highestID = idNew
				}
				IDMutex.Unlock()
			}
		})
	}

}
Example #6
0
File: store.go Project: raiqub/data
func BenchmarkAtomicIncrement(store data.Store, b *testing.B) {
	if err := store.SetLifetime(time.Second*30, data.ScopeAll); err != nil {
		b.Skip("Set lifetime to all items is not supported")
	}

	b.ResetTimer()

	b.SetParallelism(50)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			if _, err := store.Increment("key001"); err != nil {
				b.Errorf("Could not increment value: %v", err)
			}
		}
	})

	b.StopTimer()

	var result int
	if err := store.Get("key001", &result); err != nil {
		b.Errorf("Could not get stored value: %v", err)
	}
	if result != b.N {
		b.Errorf("Unexpected value: got %d instead of %d", result, b.N)
	}
}
Example #7
0
func BThreadsAtomicComplex(b *testing.B, sm api.StoredMap) {
	l := len(UniqKey)
	inserter := func(key string) {
		sm.Atomic(func(m api.Mapper) {
			if value, found := m.Find(key); found {
				_ = value.(string)
				return
			}
			m.SetKey(key)
			m.Update(key)
		})
	}
	b.SetParallelism(CntBenchWorks)
	b.ReportAllocs()
	b.SetBytes(2)
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		var k string
		var i int
		for pb.Next() {
			k = UniqKey[i%l]
			inserter(k)
			i++
		}
	})
}
Example #8
0
func BenchmarkNetrpcByteSlice(b *testing.B) {
	connC, connS := getTcpPipe(b)
	defer connC.Close()
	defer connS.Close()

	s := rpc.NewServer()
	if err := s.Register(&NetrpcService{}); err != nil {
		b.Fatalf("Error when registering rpc service: %s", err)
	}
	go s.ServeConn(connS)

	c := rpc.NewClient(connC)
	defer c.Close()

	req := []byte("byte slice byte slice aaa bbb ccc foobar")
	b.SetParallelism(250)
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		var resp []byte
		for i := 0; pb.Next(); i++ {
			if err := c.Call("NetrpcService.ByteSlice", req, &resp); err != nil {
				b.Fatalf("Unexpected error when calling NetrpcService.ByteSlice(%q): %s", req, err)
			}
			if !bytes.Equal(resp, req) {
				b.Fatalf("Unexpected response: %q. Expected %q", resp, req)
			}
		}
	})
}
Example #9
0
func BenchmarkBatch200RandomWritesParallel10(b *testing.B) {

	var term Term
	var data []map[string]interface{}

	b.SetParallelism(10)

	b.RunParallel(func(pb *testing.PB) {

		for pb.Next() {
			for is := 0; is < 200; is++ {
				r := rand.New(rand.NewSource(time.Now().UnixNano()))
				cid := map[string]interface{}{
					"customer_id": strconv.FormatInt(r.Int63(), 10),
				}
				data = append(data, cid)
			}

			// Insert the new item into the database
			term = DB("benchmarks").Table("benchmarks").Insert(data)

			// Insert the new item into the database
			_, err := term.RunWrite(session, RunOpts{
				MinBatchRows: 200,
				MaxBatchRows: 200,
			})
			if err != nil {
				b.Errorf("insert failed [%s] ", err)
			}
		}
	})

}
Example #10
0
func BenchmarkSequentialWritesParallel10(b *testing.B) {

	var mu sync.Mutex
	si := 0

	// p*GOMAXPROCS
	b.SetParallelism(10)

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			mu.Lock()
			si++
			mu.Unlock()

			data := map[string]interface{}{
				"customer_id": si,
			}

			// Insert the new item into the database
			_, err := Table(bTableName).Insert(data).RunWrite(bSess)
			if err != nil {
				b.Errorf("insert failed [%s] ", err)
				return
			}
		}
	})

}
func Benchmark_TerrorParallelGetting(b *testing.B) {
	errs := []Terror{
		E_IO{},
		E_Network{},
		TE0{},
		TE1{},
		TE2{},
		TE3{},
		TE4{},
		TE5{},
		TE6{},
		TE7{},
		TE8{},
		TE9{},
	}
	b.SetParallelism(4)
	b.RunParallel(func(b *testing.PB) {
		var which int
		for b.Next() {
			which = (which + 1) % 12
			err := errs[which]
			hier := GetHierarchy(reflect.TypeOf(err))
			_ = hier
		}
	})
}
Example #12
0
func BenchmarkMutexNoSpin(b *testing.B) {
	// This benchmark models a situation where spinning in the mutex should be
	// non-profitable and allows to confirm that spinning does not do harm.
	// To achieve this we create excess of goroutines most of which do local work.
	// These goroutines yield during local work, so that switching from
	// a blocked goroutine to other goroutines is profitable.
	// As a matter of fact, this benchmark still triggers some spinning in the mutex.
	var m Mutex
	var acc0, acc1 uint64
	b.SetParallelism(4)
	b.RunParallel(func(pb *testing.PB) {
		c := make(chan bool)
		var data [4 << 10]uint64
		for i := 0; pb.Next(); i++ {
			if i%4 == 0 {
				m.Lock()
				acc0 -= 100
				acc1 += 100
				m.Unlock()
			} else {
				for i := 0; i < len(data); i += 4 {
					data[i]++
				}
				// Elaborate way to say runtime.Gosched
				// that does not put the goroutine onto global runq.
				go func() {
					c <- true
				}()
				<-c
			}
		}
	})
}
Example #13
0
func BenchmarkRemoteClient(t *testing.B) {
	t.SetParallelism(4)

	t.RunParallel(func(pb *testing.PB) {

		for pb.Next() {
			for i := 0; i < t.N; i++ {
				p := packet.NewPacket(1, []byte("echo"))
				tmp := clientManager.FindRemoteClients([]string{"a"}, func(groupid string, c *client.RemotingClient) bool {
					return false
				})

				_, err := tmp["a"][0].WriteAndGet(*p, 500*time.Millisecond)
				clientf.WriteFlow.Incr(1)
				if nil != err {
					t.Fail()
					log.Printf("WAIT RESPONSE FAIL|%s\n", err)
				} else {
					// log.Printf("WAIT RESPONSE SUCC|%s\n", string(resp.([]byte)))
				}
			}
		}

	})
}
Example #14
0
func benchmarkMutex(b *testing.B, slack, work bool) {
	client := etcd.NewClient([]string{"http://127.0.0.1:4001"})
	client.Delete(key, true)

	mu := NewMutexFromClient(client, key, 0)
	if slack {
		b.SetParallelism(10)
	}
	b.RunParallel(func(pb *testing.PB) {
		foo := 0
		for pb.Next() {
			err := mu.Lock()
			if err != nil {

				b.Fatal("could not acquire lock, is etcd running?", err)
			}

			mu.Unlock()
			if work {
				for i := 0; i < 100; i++ {
					foo *= 2
					foo /= 2
				}
			}
		}
		_ = foo
	})
}
Example #15
0
func BenchmarkManyConcurrentQueries(b *testing.B) {
	b.ReportAllocs()
	// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
	const parallelism = 16

	db := newTestDB(b, "magicquery")
	defer closeDB(b, db)
	db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)

	stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
	if err != nil {
		b.Fatal(err)
	}
	defer stmt.Close()

	b.SetParallelism(parallelism)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			rows, err := stmt.Query("sleep", 1)
			if err != nil {
				b.Error(err)
				return
			}
			rows.Close()
		}
	})
}
Example #16
0
func benchmarkClientServerParallel(b *testing.B, parallelism int) {
	b.ReportAllocs()
	b.StopTimer()
	handler, err := NewHandler(func(in Ping, out *Pong) error {
		if in.Data != "ping" {
			b.Fatalf("expected ping, got %q", in.Data)
		}
		out.Data = "pong"
		return nil
	})
	if err != nil {
		b.Fatalf("NewHandler: %v", err)
	}

	s := npctest.NewServer(handler)
	defer s.Close()

	c := NewClient([]string{s.Listener.Addr().String()})
	defer c.Close()
	b.SetParallelism(parallelism)
	b.StartTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			var pong Pong
			err = c.Call(Ping{"ping"}, &pong)
			if err != nil {
				b.Fatalf("Call: %v", err)
			}
			if pong.Data != "pong" {
				b.Fatalf("expected pong, got %q", pong.Data)
			}
		}
	})
}
Example #17
0
func BenchmarkConnStress(b *testing.B) {
	const workers = 16

	cluster := createCluster()
	cluster.NumConns = 1
	session := createSessionFromCluster(cluster, b)
	defer session.Close()

	if err := createTable(session, "CREATE TABLE IF NOT EXISTS conn_stress (id int primary key)"); err != nil {
		b.Fatal(err)
	}

	var seed uint64
	writer := func(pb *testing.PB) {
		seed := atomic.AddUint64(&seed, 1)
		var i uint64 = 0
		for pb.Next() {
			if err := session.Query("insert into conn_stress (id) values (?)", i*seed).Exec(); err != nil {
				b.Error(err)
				return
			}
			i++
		}
	}

	b.SetParallelism(workers)
	b.RunParallel(writer)
}
Example #18
0
func BenchmarkConnRoutingKey(b *testing.B) {
	const workers = 16

	cluster := createCluster()
	cluster.NumConns = 1
	cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())
	session := createSessionFromCluster(cluster, b)
	defer session.Close()

	if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil {
		b.Fatal(err)
	}

	var seed uint64
	writer := func(pb *testing.PB) {
		seed := atomic.AddUint64(&seed, 1)
		var i uint64 = 0
		query := session.Query("insert into routing_key_stress (id) values (?)")

		for pb.Next() {
			if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil {
				b.Error(err)
				return
			}
			i++
		}
	}

	b.SetParallelism(workers)
	b.RunParallel(writer)
}
Example #19
0
func BenchmarkNetrpcInt(b *testing.B) {
	connC, connS := getTcpPipe(b)
	defer connC.Close()
	defer connS.Close()

	s := rpc.NewServer()
	if err := s.Register(&NetrpcService{}); err != nil {
		b.Fatalf("Error when registering rpc service: %s", err)
	}
	go s.ServeConn(connS)

	c := rpc.NewClient(connC)
	defer c.Close()

	b.SetParallelism(250)
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		var x int
		for i := 0; pb.Next(); i++ {
			if err := c.Call("NetrpcService.Int", i, &x); err != nil {
				b.Fatalf("Unexpected error when calling NetrpcService.Int(%d): %s", i, err)
			}
			if i != x {
				b.Fatalf("Unexpected response: %d. Expected %d", x, i)
			}
		}
	})
}
Example #20
0
func BenchmarkSequentialSoftWritesParallel10(b *testing.B) {

	var mu sync.Mutex
	si := 0

	// p*GOMAXPROCS
	b.SetParallelism(10)

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			mu.Lock()
			si++
			mu.Unlock()

			data := map[string]interface{}{
				"customer_id": si,
			}

			opts := InsertOpts{Durability: "soft"}

			// Insert the new item into the database
			_, err := Table("benchmarks").Insert(data, opts).RunWrite(session)
			if err != nil {
				b.Errorf("insert failed [%s] ", err)
				return
			}
		}
	})

}
Example #21
0
func benchmarkNetHTTPClientEndToEndBigResponseInmemory(b *testing.B, parallelism int) {
	bigResponse := createFixedBody(1024 * 1024)
	h := func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("Content-Type", "text/plain")
		w.Write(bigResponse)
	}
	ln := fasthttputil.NewInmemoryListener()

	ch := make(chan struct{})
	go func() {
		if err := http.Serve(ln, http.HandlerFunc(h)); err != nil && !strings.Contains(
			err.Error(), "use of closed network connection") {
			b.Fatalf("error when serving requests: %s", err)
		}
		close(ch)
	}()

	c := &http.Client{
		Transport: &http.Transport{
			Dial:                func(_, _ string) (net.Conn, error) { return ln.Dial() },
			MaxIdleConnsPerHost: parallelism * runtime.GOMAXPROCS(-1),
		},
		Timeout: 5 * time.Second,
	}

	requestURI := "/foo/bar?baz=123"
	url := "http://unused.host" + requestURI
	b.SetParallelism(parallelism)
	b.RunParallel(func(pb *testing.PB) {
		req, err := http.NewRequest("GET", url, nil)
		if err != nil {
			b.Fatalf("unexpected error: %s", err)
		}
		for pb.Next() {
			resp, err := c.Do(req)
			if err != nil {
				b.Fatalf("unexpected error: %s", err)
			}
			if resp.StatusCode != http.StatusOK {
				b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode, http.StatusOK)
			}
			body, err := ioutil.ReadAll(resp.Body)
			resp.Body.Close()
			if err != nil {
				b.Fatalf("unexpected error when reading response body: %s", err)
			}
			if !bytes.Equal(bigResponse, body) {
				b.Fatalf("unexpected response %q. Expecting %q", body, bigResponse)
			}
		}
	})

	ln.Close()
	select {
	case <-ch:
	case <-time.After(time.Second):
		b.Fatalf("server wasn't stopped")
	}
}
Example #22
0
func Benchmark_Chat_Parallel(b *testing.B) {
	b.SetParallelism(10)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			performChat(b, peerClientConn)
		}
	})
}
Example #23
0
func BenchmarkVolumeWrite(b *testing.B) {
	var (
		v     *Volume
		err   error
		file  = "./test/testb2"
		ifile = "./test/testb2.idx"
		data  = make([]byte, _16kb) // 16kb
	)
	os.Remove(file)
	os.Remove(ifile)
	defer os.Remove(file)
	defer os.Remove(ifile)
	if _, err = rand.Read(data); err != nil {
		b.Errorf("rand.Read() error(%v)", err)
		b.FailNow()
	}
	if v, err = NewVolume(1, file, ifile, testConf); err != nil {
		b.Errorf("NewVolume() error(%v)", err)
		b.FailNow()
	}
	defer v.Close()
	b.SetParallelism(8)
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		var (
			i, j int
			ts   int32
			t    int64
			err1 error
			n    *needle.Needle
			ns   = make([]needle.Needle, 9)
			buf  = make([]byte, 163840) // 16kb
		)
		for i = 0; i < 9; i++ {
			t = mrand.Int63()
			n = &ns[i]
			n.Init(t, 1, data)
			n.Write(buf[ts:])
			ts += n.TotalSize
		}
		for pb.Next() {
			for j = 0; j < 9; j++ {
				t = mrand.Int63()
				n = &ns[j]
				n.Key = t
				binary.BigEndian.PutInt64(buf[n.TotalSize+needle.KeyOffset:], n.Key)
			}
			if err1 = v.Write(ns, buf[:ts]); err1 != nil {
				b.Errorf("Add() error(%v)", err1)
				v.Unlock()
				b.FailNow()
			}
			b.SetBytes(int64(ts))
		}
	})
	os.Remove(file)
	os.Remove(ifile)
}
Example #24
0
func BenchmarkVolumeWrite(b *testing.B) {
	var (
		i     int
		v     *Volume
		err   error
		file  = "./test/testb2"
		ifile = "./test/testb2.idx"
		data  = make([]byte, 1*1024)
	)
	os.Remove(file)
	os.Remove(ifile)
	defer os.Remove(file)
	defer os.Remove(ifile)
	if _, err = rand.Read(data); err != nil {
		b.Errorf("rand.Read() error(%v)", err)
		b.FailNow()
	}
	if v, err = NewVolume(1, file, ifile); err != nil {
		b.Errorf("NewVolume() error(%v)", err)
		b.FailNow()
	}
	defer v.Close()
	b.SetParallelism(8)
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		var (
			t    int64
			err1 error
			n    = &Needle{}
		)
		if err1 = n.Parse(t, t, data); err1 != nil {
			b.Errorf("n.Parse() error(%v)", err1)
			b.FailNow()
		}
		for pb.Next() {
			v.Lock()
			for i = 0; i < 9; i++ {
				t = mrand.Int63()
				n.Key = t
				n.Cookie = t
				if err1 = v.Write(n); err1 != nil {
					b.Errorf("Add() error(%v)", err1)
					v.Unlock()
					b.FailNow()
				}
				if err1 = v.Flush(); err1 != nil {
					b.Errorf("Flush() error(%v)", err1)
					v.Unlock()
					b.FailNow()
				}
			}
			v.Unlock()
			b.SetBytes(int64(n.TotalSize) * 9)
		}
	})
	os.Remove(file)
	os.Remove(ifile)
}
Example #25
0
func benchmarkPipelineClient(b *testing.B, parallelism int) {
	h := func(ctx *RequestCtx) {
		ctx.WriteString("foobar")
	}
	ln := fasthttputil.NewInmemoryListener()

	ch := make(chan struct{})
	go func() {
		if err := Serve(ln, h); err != nil {
			b.Fatalf("error when serving requests: %s", err)
		}
		close(ch)
	}()

	var clients []*PipelineClient
	for i := 0; i < runtime.GOMAXPROCS(-1); i++ {
		c := &PipelineClient{
			Dial:               func(addr string) (net.Conn, error) { return ln.Dial() },
			ReadBufferSize:     1024 * 1024,
			WriteBufferSize:    1024 * 1024,
			MaxPendingRequests: parallelism,
		}
		clients = append(clients, c)
	}

	clientID := uint32(0)
	requestURI := "/foo/bar?baz=123"
	url := "http://unused.host" + requestURI
	b.SetParallelism(parallelism)
	b.RunParallel(func(pb *testing.PB) {
		n := atomic.AddUint32(&clientID, 1)
		c := clients[n%uint32(len(clients))]
		var req Request
		req.SetRequestURI(url)
		var resp Response
		for pb.Next() {
			if err := c.Do(&req, &resp); err != nil {
				b.Fatalf("unexpected error: %s", err)
			}
			if resp.StatusCode() != StatusOK {
				b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK)
			}
			body := resp.Body()
			if string(body) != "foobar" {
				b.Fatalf("unexpected response %q. Expecting %q", body, "foobar")
			}
		}
	})

	ln.Close()
	select {
	case <-ch:
	case <-time.After(time.Second):
		b.Fatalf("server wasn't stopped")
	}
}
Example #26
0
func BenchmarkRWMutexW(b *testing.B) {
	b.SetParallelism(10)
	var mtx sync.RWMutex
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			mtx.Lock()
			mtx.Unlock()
		}
	})
}
Example #27
0
func BenchmarkWithAtomic(b *testing.B) {
	var f float64

	b.SetParallelism(1000)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			AddFloat64(&f, 0.1)
		}
	})
}
Example #28
0
func BenchmarkLockW(b *testing.B) {
	b.SetParallelism(10)
	var l lock
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			if l.TryLock() {
				l.WUnlock()
			}
		}
	})
}
Example #29
0
func BenchmarkParallelSet1024(b *testing.B) {
	b.StopTimer()
	h := NewHashMap()
	i := 0

	b.SetParallelism(1024)

	b.StartTimer()

	ParallelSet(b, i, h)
}
Example #30
0
func benchmarkNetHTTPClientGetEndToEndTCP(b *testing.B, parallelism int) {
	addr := "127.0.0.1:8542"

	ln, err := net.Listen("tcp4", addr)
	if err != nil {
		b.Fatalf("cannot listen %q: %s", addr, err)
	}

	ch := make(chan struct{})
	go func() {
		if err := http.Serve(ln, http.HandlerFunc(nethttpEchoHandler)); err != nil && !strings.Contains(
			err.Error(), "use of closed network connection") {
			b.Fatalf("error when serving requests: %s", err)
		}
		close(ch)
	}()

	c := &http.Client{
		Transport: &http.Transport{
			MaxIdleConnsPerHost: parallelism * runtime.GOMAXPROCS(-1),
		},
	}

	requestURI := "/foo/bar?baz=123"
	url := "http://" + addr + requestURI
	b.SetParallelism(parallelism)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			resp, err := c.Get(url)
			if err != nil {
				b.Fatalf("unexpected error: %s", err)
			}
			if resp.StatusCode != http.StatusOK {
				b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode, http.StatusOK)
			}
			body, err := ioutil.ReadAll(resp.Body)
			resp.Body.Close()
			if err != nil {
				b.Fatalf("unexpected error when reading response body: %s", err)
			}
			if string(body) != requestURI {
				b.Fatalf("unexpected response %q. Expecting %q", body, requestURI)
			}
		}
	})

	ln.Close()
	select {
	case <-ch:
	case <-time.After(time.Second):
		b.Fatalf("server wasn't stopped")
	}
}