func BenchmarkHeaderWriteSubset(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { buf.Reset() testHeader.WriteSubset(&buf, nil) } }
// Benchmark_MoneyNewGetf 2000000 771 ns/op 208 B/op 7 allocs/op => Go 1.4.2 // Benchmark_MoneyNewGetf 3000000 404 ns/op 160 B/op 3 allocs/op => Go 1.5.0 func Benchmark_MoneyNewGetf(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { c := money.New(money.WithPrecision(100)).Setf(-123456.789) benchmarkMoneyNewGetf = c.Getf() } }
func BenchmarkConcurrentRandom(b *testing.B) { b.ReportAllocs() ct := new(concurrentRandomTest) for i := 0; i < b.N; i++ { doConcurrentTest(b, ct) } }
func BenchmarkClientServer(b *testing.B) { b.ReportAllocs() b.StopTimer() handler, err := NewHandler(func(in Ping, out *Pong) error { if in.Data != "ping" { b.Fatalf("expected ping, got %q", in.Data) } out.Data = "pong" return nil }) if err != nil { b.Fatalf("NewHandler: %v", err) } s := npctest.NewServer(handler) defer s.Close() c := NewClient([]string{s.Listener.Addr().String()}) defer c.Close() b.StartTimer() for i := 0; i < b.N; i++ { var pong Pong err = c.Call(Ping{"ping"}, &pong) if err != nil { b.Fatalf("Call: %v", err) } if pong.Data != "pong" { b.Fatalf("expected pong, got %q", pong.Data) } } }
func BenchmarkHandlerCall(b *testing.B) { fn := func(params *jsonrpc2.EmptyParams) (interface{}, error) { return nil, nil } handler := handler{ method: reflect.ValueOf(fn), params: reflect.New(reflect.ValueOf(fn).Type().In(0).Elem()).Interface().(jsonrpc2.Params), } data, _ := json.Marshal(&jsonrpc2.EmptyParams{}) params, err := handler.DecodeParams(data) if err != nil { b.Fatal(err.Error()) } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { handler.Call(params) } }
func BenchmarkNew(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min } }
func benchmarkDecodeFile(path string, b *testing.B) { b.ReportAllocs() p := "fixtures/" + path for n := 0; n < b.N; n++ { DecodeFile(p) } }
func BenchmarkManyConcurrentQueries(b *testing.B) { b.ReportAllocs() // To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required. const parallelism = 16 db := newTestDB(b, "magicquery") defer closeDB(b, db) db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism) stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?") if err != nil { b.Fatal(err) } defer stmt.Close() b.SetParallelism(parallelism) b.RunParallel(func(pb *testing.PB) { for pb.Next() { rows, err := stmt.Query("sleep", 1) if err != nil { b.Error(err) return } rows.Close() } }) }
func benchmarkConnections(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { cbConnections(false) } }
func BenchmarkSendWithContext(b *testing.B) { incrHandler := IncrHandler{} destination := HandlerFunc(func(ctx context.Context, rw http.ResponseWriter, r *http.Request) { b.StopTimer() rw.Write(ctx.Value("towrite").([]byte)) b.StartTimer() }) ctx := context.Background() h := NewHandler(ctx, HandlerFunc(destination)).Add(NextConstructor(addTowrite), HTTPConstructor(incrHandler.makeHTTP)) b.ReportAllocs() b.ResetTimer() b.StopTimer() for j := 0; j < b.N; j++ { rw := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/", nil) req.Header.Add("towrite", fmt.Sprintf("%d", j)) b.StartTimer() h.ServeHTTP(rw, req) b.StopTimer() assert.Equal(b, fmt.Sprintf("%d", j), rw.Body.String()) } }
func benchmarkServerQueryCount(b *testing.B, pointN int) { s := OpenDefaultServer(NewConfig()) defer s.Close() // Write data into server. var buf bytes.Buffer for i := 0; i < pointN; i++ { fmt.Fprintf(&buf, `cpu value=100 %d`, i+1) if i != pointN-1 { fmt.Fprint(&buf, "\n") } } s.MustWrite("db0", "rp0", buf.String(), nil) // Query simple count from server. b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { if results, err := s.Query(`SELECT count(value) FROM db0.rp0.cpu`); err != nil { b.Fatal(err) } else if results != fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",%d]]}]}]}`, pointN) { b.Fatalf("unexpected result: %s", results) } } }
// Benchmark_WithInitStoreByFormCookie-4 3000 481881 ns/op 189103 B/op 232 allocs/op => with debug enabled // Benchmark_WithInitStoreByFormCookie-4 300000 4797 ns/op 1016 B/op 16 allocs/op => debug disabled func Benchmark_WithInitStoreByFormCookie(b *testing.B) { store.PkgLog.SetLevel(log.StdLevelInfo) b.ReportAllocs() wantStoreCode := "nz" ctx := store.NewContextReader(context.Background(), getInitializedStoreService(scope.Option{Website: scope.MockID(2)})) mw := store.WithInitStoreByFormCookie()(benchValidationHandler(b, wantStoreCode)) rec := httptest.NewRecorder() req, err := http.NewRequest(httputils.MethodGet, "https://corestore.io/store/list/", nil) if err != nil { b.Fatal(err) } req.AddCookie(&http.Cookie{ Name: store.ParamName, Value: wantStoreCode, }) b.ResetTimer() for i := 0; i < b.N; i++ { if err := mw.ServeHTTPContext(ctx, rec, req); err != nil { b.Error(err) } } }
// Benchmark_WithValidateBaseUrl-4 3000 489089 ns/op 188333 B/op 272 allocs/op => with debug enabled // Benchmark_WithValidateBaseUrl-4 200000 8925 ns/op 2924 B/op 49 allocs/op => no debug func Benchmark_WithValidateBaseUrl(b *testing.B) { // todo: there is room for optimization with disabled debugging. too many allocs store.PkgLog.SetLevel(log.StdLevelInfo) req, err := http.NewRequest(httputils.MethodGet, "https://corestore.io/customer/comments/view?id=1916#tab=ratings", nil) if err != nil { b.Fatal(err) } finalHandler := store.WithValidateBaseURL(middlewareConfigReader)(ctxhttp.HandlerFunc(func(ctx context.Context, w http.ResponseWriter, r *http.Request) error { return errors.New("This handler should not be called!") })) want := "https://www.corestore.io/customer/comments/view?id=1916#tab=ratings" rec := httptest.NewRecorder() b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { if err := finalHandler.ServeHTTPContext(middlewareCtxStoreService, rec, req); err != nil { b.Error(err) } if rec.HeaderMap.Get("Location") != want { b.Errorf("Have: %s\nWant: %s", rec.HeaderMap.Get("Location"), want) } rec.HeaderMap = nil } }
func BenchmarkIsNotIoError(b *testing.B) { b.ReportAllocs() err := errors.New("blah") for i := 0; i < b.N; i++ { IsIoError(err) } }
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Create the database in memory or in a temporary directory. var db ethdb.Database if !disk { db, _ = ethdb.NewMemDatabase() } else { dir, err := ioutil.TempDir("", "exp-core-bench") if err != nil { b.Fatalf("cannot create temporary directory: %v", err) } defer os.RemoveAll(dir) db, err = ethdb.NewLDBDatabase(dir, 0) if err != nil { b.Fatalf("cannot create temporary database: %v", err) } defer db.Close() } // Generate a chain of b.N blocks using the supplied block // generator function. genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds}) chain, _ := GenerateChain(genesis, db, b.N, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. evmux := new(event.TypeMux) chainman, _ := NewBlockChain(db, FakePow{}, evmux) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() if i, err := chainman.InsertChain(chain); err != nil { b.Fatalf("insert error (block %d): %v\n", i, err) } }
func BenchmarkSharedWriterReserveOne(b *testing.B) { defer time.Sleep(DisruptorCleanup) runtime.GOMAXPROCS(2) defer runtime.GOMAXPROCS(1) controller := disruptor. Configure(RingBufferSize). WithConsumerGroup(SampleConsumer{}). BuildShared() controller.Start() defer controller.Stop() writer := controller.Writer() iterations := int64(b.N) sequence := disruptor.InitialSequenceValue b.ReportAllocs() b.ResetTimer() for sequence < iterations { sequence = writer.Reserve(ReserveOne) ringBuffer[sequence&RingBufferMask] = sequence writer.Commit(sequence, sequence) } b.StopTimer() }
func BenchmarkUpdateOneFile(b *testing.B) { local0 := fileList{ protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)}, protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)}, protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)}, protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)}, // A longer name is more realistic and causes more allocations protocol.FileInfo{Name: "zajksdhaskjdh/askjdhaskjdashkajshd/kasjdhaskjdhaskdjhaskdjash/dkjashdaksjdhaskdjahskdjh", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)}, } ldb, err := db.Open("testdata/benchmarkupdate.db") if err != nil { b.Fatal(err) } defer func() { ldb.Close() os.RemoveAll("testdata/benchmarkupdate.db") }() m := db.NewFileSet("test", ldb) m.Replace(protocol.LocalDeviceID, local0) l := local0[4:5] for i := 0; i < b.N; i++ { l[0].Version = l[0].Version.Update(myID) m.Update(protocol.LocalDeviceID, local0) } b.ReportAllocs() }
func BenchmarkSharedWriterReserveMany(b *testing.B) { defer time.Sleep(DisruptorCleanup) runtime.GOMAXPROCS(2) defer runtime.GOMAXPROCS(1) controller := disruptor. Configure(RingBufferSize). WithConsumerGroup(SampleConsumer{}). BuildShared() controller.Start() defer controller.Stop() writer := controller.Writer() iterations := int64(b.N) b.ReportAllocs() b.ResetTimer() previous, current := disruptor.InitialSequenceValue, disruptor.InitialSequenceValue for current < iterations { current = writer.Reserve(ReserveMany) for i := previous + 1; i <= current; i++ { ringBuffer[i&RingBufferMask] = i } writer.Commit(previous+1, current) previous = current } b.StopTimer() }
func BenchmarkParseKeyHash(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { ParseIdCookie("4ed44ed44ed44ed4c8116e41") } }
func BenchmarkNodeSetMerge(b *testing.B) { n, other := report.NodeSet{}, report.NodeSet{} for i := 0; i < 600; i++ { n = n.Add( report.MakeNode().WithID(fmt.Sprint(i)).WithLatests(map[string]string{ "a": "1", "b": "2", }), ) } for i := 400; i < 1000; i++ { other = other.Add( report.MakeNode().WithID(fmt.Sprint(i)).WithLatests(map[string]string{ "c": "1", "d": "2", }), ) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { benchmarkResult = n.Merge(other) } }
func runBenchmarkPub(b *testing.B, store string, msgSize int64) { if gw == nil { gw = newGatewayForTest(b, store) } b.ReportAllocs() httpReqRaw := strings.TrimSpace(fmt.Sprintf(` POST /topics/foobar/v1 HTTP/1.1 Host: localhost:9191 User-Agent: Go-http-client/1.1 Content-Length: %d Content-Type: application/x-www-form-urlencoded Appid: myappid Pubkey: mypubkey Accept-Encoding: gzip`, msgSize)) + "\r\n\r\n" b.SetBytes(msgSize + int64(len(httpReqRaw))) param := fasthttprouter.Params{ fasthttprouter.Param{Key: "topic", Value: "foobar"}, fasthttprouter.Param{Key: "ver", Value: "v1"}, } req := &fasthttp.Request{} req.SetRequestURI("/topics/foobar/v1") ctx := &fasthttp.RequestCtx{} ctx.Init(req, nil, nil) ctx.Request.SetBodyString(strings.Repeat("X", int(msgSize))) ctx.Request.Header.SetMethod("POST") for i := 0; i < b.N; i++ { ctx.Response.Reset() gw.pubHandler(ctx, param) } }
func BenchmarkRoundTrip(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { testdata := random(1 << 16) testRoundTrip(b, testdata, 8, 4) } }
func benchmarkClientServerParallel(b *testing.B, parallelism int) { b.ReportAllocs() b.StopTimer() handler, err := NewHandler(func(in Ping, out *Pong) error { if in.Data != "ping" { b.Fatalf("expected ping, got %q", in.Data) } out.Data = "pong" return nil }) if err != nil { b.Fatalf("NewHandler: %v", err) } s := npctest.NewServer(handler) defer s.Close() c := NewClient([]string{s.Listener.Addr().String()}) defer c.Close() b.SetParallelism(parallelism) b.StartTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { var pong Pong err = c.Call(Ping{"ping"}, &pong) if err != nil { b.Fatalf("Call: %v", err) } if pong.Data != "pong" { b.Fatalf("expected pong, got %q", pong.Data) } } }) }
// BenchmarkEmptyBlocks is a wrapper for benchmarkEmptyBlocks, which // handles error catching func BenchmarkAcceptEmptyBlocks(b *testing.B) { b.ReportAllocs() err := benchmarkAcceptEmptyBlocks(b) if err != nil { b.Fatal(err) } }
func BenchmarkConcurrentTxStmtQuery(b *testing.B) { b.ReportAllocs() ct := new(concurrentTxStmtQueryTest) for i := 0; i < b.N; i++ { doConcurrentTest(b, ct) } }
// Benchmark utility functions for ObjectLayer.PutObject(). // Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark. func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { var err error // obtains random bucket name. bucket := getRandomBucketName() // create bucket. err = obj.MakeBucket(bucket) if err != nil { b.Fatal(err) } // get text data generated for number of bytes equal to object size. textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. metadata := make(map[string]string) metadata["md5Sum"] = getMD5Hash(textData) sha256sum := "" // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() // the actual benchmark for PutObject starts here. Reset the benchmark timer. b.ResetTimer() for i := 0; i < b.N; i++ { // insert the object. objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum) if err != nil { b.Fatal(err) } if objInfo.MD5Sum != metadata["md5Sum"] { b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"]) } } // Benchmark ends here. Stop timer. b.StopTimer() }
func BenchmarkMiddleware(b *testing.B) { g := New() g.Sub("/a", func(a *Composer) { a.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) a.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) a.Sub("/c", func(c *Composer) { c.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) c.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) c.Get("/action", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { fmt.Fprintf(rw, "hello") })) }) }) routes := g.BuildRoutes() router := httprouter.New() for _, route := range routes { router.Handle(route.Method, route.Pattern, func(rw http.ResponseWriter, r *http.Request, params httprouter.Params) { route.Handler.ServeHTTP(rw, r) }) } b.ReportAllocs() b.ResetTimer() recorder := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/a/c/action", nil) for i := 0; i < b.N; i++ { router.ServeHTTP(recorder, request) } }
func BenchmarkUnmarshal(b *testing.B) { b.ReportAllocs() xml := []byte(atomXml) for i := 0; i < b.N; i++ { Unmarshal(xml, &Feed{}) } }
func BenchmarkExist(b *testing.B) { b.StopTimer() var d Interface if opt == "slice" { d = newMapToSlice() } else { d = newMapToMap() } k := "A" for _, v := range testVals { d.set(k, v) } // to make it not biased towards data structures // with an order, such as slice. ix := rand.Perm(testN) b.StartTimer() b.ReportAllocs() for _, idx := range ix { if !d.exist(k, testVals[idx]) { b.Errorf("%s should have existed!", testVals[idx]) } } }
func benchmarkLogEntryDecoderDecode(b *testing.B, sz int) { var buf bytes.Buffer enc := raft.NewLogEntryEncoder(&buf) dec := raft.NewLogEntryDecoder(&buf) // Encode a single record and record its size. enc.Encode(&raft.LogEntry{Data: make([]byte, sz)}) b.SetBytes(int64(buf.Len())) // Encode all the records on the buffer first. buf.Reset() for i := 0; i < b.N; i++ { if err := enc.Encode(&raft.LogEntry{Data: make([]byte, sz)}); err != nil { b.Fatalf("encode: %s", err) } } b.ReportAllocs() // Decode from the buffer. b.ResetTimer() for i := 0; i < b.N; i++ { var entry raft.LogEntry if err := dec.Decode(&entry); err != nil { b.Fatal(err) } } b.StopTimer() runtime.GC() }