func getParseChi(b *testing.B) xhandler.HandlerC { defer b.ResetTimer() if parseChi == nil { parseChi = loadChi(parseAPI) } return parseChi }
func getParseHTTPRouter(b *testing.B) http.Handler { defer b.ResetTimer() if parseHTTPRouter == nil { parseHTTPRouter = loadHTTPRouter(parseAPI) } return parseHTTPRouter }
func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } }
func BenchmarkCopyImagePaletted(b *testing.B) { img := image.NewPaletted(image.Rect(0, 0, 4096, 4096), palette.Plan9) b.ResetTimer() for i := 0; i < b.N; i++ { CopyImage(img) } }
func BenchmarkCopyImageNRGBA(b *testing.B) { img := image.NewNRGBA(image.Rect(0, 0, 4096, 4096)) b.ResetTimer() for i := 0; i < b.N; i++ { CopyImage(img) } }
// runMVCCScan first creates test data (and resets the benchmarking // timer). It then performs b.N MVCCScans in increments of numRows // keys over all of the data in the Engine instance, restarting at // the beginning of the keyspace, as many times as necessary. func runMVCCScan(emk engineMaker, numRows, numVersions, valueSize int, b *testing.B) { // Use the same number of keys for all of the mvcc scan // benchmarks. Using a different number of keys per test gives // preferential treatment to tests with fewer keys. Note that the // datasets all fit in cache and the cache is pre-warmed. const numKeys = 100000 eng, _ := setupMVCCData(emk, numVersions, numKeys, valueSize, b) defer eng.Close() b.SetBytes(int64(numRows * valueSize)) b.ResetTimer() keyBuf := append(make([]byte, 0, 64), []byte("key-")...) for i := 0; i < b.N; i++ { // Choose a random key to start scan. keyIdx := rand.Int31n(int32(numKeys - numRows)) startKey := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(keyIdx))) walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1)) ts := makeTS(walltime, 0) kvs, _, _, err := MVCCScan(context.Background(), eng, startKey, keyMax, int64(numRows), ts, true, nil) if err != nil { b.Fatalf("failed scan: %s", err) } if len(kvs) != numRows { b.Fatalf("failed to scan: %d != %d", len(kvs), numRows) } } b.StopTimer() }
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) eng := emk(b, fmt.Sprintf("cput_%d", valueSize)) defer eng.Close() b.SetBytes(int64(valueSize)) var expected *roachpb.Value if createFirst { for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } expected = &value } b.ResetTimer() for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil { b.Fatalf("failed put: %s", err) } } b.StopTimer() }
func getParseXmux(b *testing.B) xhandler.HandlerC { defer b.ResetTimer() if parseXmux == nil { parseXmux = loadXmux(parseAPI) } return parseXmux }
func BenchmarkMiddleware(b *testing.B) { g := New() g.Sub("/a", func(a *Composer) { a.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) a.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) a.Sub("/c", func(c *Composer) { c.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) c.Use().Func(func(rw http.ResponseWriter, r *http.Request) { }) c.Get("/action", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { fmt.Fprintf(rw, "hello") })) }) }) routes := g.BuildRoutes() router := httprouter.New() for _, route := range routes { router.Handle(route.Method, route.Pattern, func(rw http.ResponseWriter, r *http.Request, params httprouter.Params) { route.Handler.ServeHTTP(rw, r) }) } b.ReportAllocs() b.ResetTimer() recorder := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/a/c/action", nil) for i := 0; i < b.N; i++ { router.ServeHTTP(recorder, request) } }
func BenchmarkAddAndQueryPost(b *testing.B) { // Pre-build posts and queries so we're not measuring that. rand := rand.New(rand.NewSource(time.Now().UnixNano())) idx := &PostIndex{} posts := createPosts(aliceChain, 100000, rand) // Large number of posts to query for _, v := range posts { idx.AddPost(v.id, v.words) } posts = createPosts(aliceChain, b.N, rand) // New posts! queries := make([]string, b.N) for i := 0; i < len(queries); i++ { ql := rand.Intn(4) + 1 t := aliceChain.Generate(ql, rand) w := splitToWords(t) queries[i] = randomQuery(w, rand) } var index int32 = -1 // Count up to N but atomically b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { i := atomic.AddInt32(&index, 1) if rand.Intn(5) == 0 { p := posts[i] idx.AddPost(p.id, p.words) } else { q := queries[i] idx.QueryPosts(q, 100) } } }) }
func BenchmarkExecuteInParallel(b *testing.B) { numItems := int64(1000) qs := make([]*Queue, 0, b.N) for i := 0; i < b.N; i++ { q := New(numItems) for j := int64(0); j < numItems; j++ { q.Put(j) } qs = append(qs, q) } var counter int64 fn := func(ifc interface{}) { c := ifc.(int64) atomic.AddInt64(&counter, c) } b.ResetTimer() for i := 0; i < b.N; i++ { q := qs[i] ExecuteInParallel(q, fn) } }
func BenchmarkSingleConn(b *testing.B) { srv := NewTestServer(b, 3) defer srv.Stop() cluster := testCluster(srv.Address, 3) // Set the timeout arbitrarily low so that the query hits the timeout in a // timely manner. cluster.Timeout = 500 * time.Millisecond cluster.NumConns = 1 db, err := cluster.CreateSession() if err != nil { b.Fatalf("NewCluster: %v", err) } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { err := db.Query("void").Exec() if err != nil { b.Error(err) return } } }) }
func BenchmarkReadInts(b *testing.B) { var ls Struct bsr := &byteSliceReader{} var r io.Reader = bsr b.SetBytes(2 * (1 + 2 + 4 + 8)) b.ResetTimer() for i := 0; i < b.N; i++ { bsr.remain = big Read(r, BigEndian, &ls.Int8) Read(r, BigEndian, &ls.Int16) Read(r, BigEndian, &ls.Int32) Read(r, BigEndian, &ls.Int64) Read(r, BigEndian, &ls.Uint8) Read(r, BigEndian, &ls.Uint16) Read(r, BigEndian, &ls.Uint32) Read(r, BigEndian, &ls.Uint64) } b.StopTimer() want := s want.Float32 = 0 want.Float64 = 0 want.Complex64 = 0 want.Complex128 = 0 want.Array = [4]uint8{0, 0, 0, 0} want.Bool = false want.BoolArray = [4]bool{false, false, false, false} if b.N > 0 && !reflect.DeepEqual(ls, want) { b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", ls, want) } }
func benchmarkIterOnBatch(b *testing.B, writes int) { stopper := stop.NewStopper() defer stopper.Stop() engine := createTestEngine(stopper) for i := 0; i < writes; i++ { if err := engine.Put(makeKey(i), []byte(strconv.Itoa(i))); err != nil { b.Fatal(err) } } batch := engine.NewBatch() defer batch.Close() for i := 0; i < writes; i++ { if err := batch.Clear(makeKey(i)); err != nil { b.Fatal(err) } } r := rand.New(rand.NewSource(5)) b.ResetTimer() for i := 0; i < b.N; i++ { key := makeKey(r.Intn(writes)) iter := batch.NewIterator(true) iter.Seek(key) iter.Close() } }
func BenchmarkFirst16(b *testing.B) { const n = 5000 g := runtime.GOMAXPROCS(0) defer runtime.GOMAXPROCS(g) o := &Options{noClone: true} db, err := CreateTemp("_testdata", "temp", ".db", o) if err != nil { b.Fatal(err) } dbname := db.Name() defer func(n string) { db.Close() os.Remove(n) os.Remove(o._WAL) }(dbname) rng := fc() for i := 0; i < n; i++ { if err := db.Set(n2b(rng.Next()), n2b(rng.Next())); err != nil { b.Fatal(err) } } b.ResetTimer() for i := 0; i < b.N; i++ { db.First() } b.StopTimer() }
func runMVCCDeleteRange(emk engineMaker, valueBytes int, b *testing.B) { // 512 KB ranges so the benchmark doesn't take forever const rangeBytes = 512 * 1024 const overhead = 48 // Per key/value overhead (empirically determined) numKeys := rangeBytes / (overhead + valueBytes) eng, dir := setupMVCCData(emk, 1, numKeys, valueBytes, b) defer eng.Close() b.SetBytes(rangeBytes) b.StopTimer() b.ResetTimer() for i := 0; i < b.N; i++ { locDirty := dir + "_dirty" if err := os.RemoveAll(locDirty); err != nil { b.Fatal(err) } if err := shutil.CopyTree(dir, locDirty, nil); err != nil { b.Fatal(err) } dupEng := emk(b, locDirty) b.StartTimer() _, _, _, err := MVCCDeleteRange(context.Background(), dupEng, &enginepb.MVCCStats{}, roachpb.KeyMin, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, nil, false) if err != nil { b.Fatal(err) } b.StopTimer() dupEng.Close() } }
func BenchmarkRequest(b *testing.B) { db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel("/tmp", nil, "syncthing", "dev", db) m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"}) m.ScanRepo("default") const n = 1000 files := make([]protocol.FileInfo, n) t := time.Now().Unix() for i := 0; i < n; i++ { files[i] = protocol.FileInfo{ Name: fmt.Sprintf("file%d", i), Modified: t, Blocks: []protocol.BlockInfo{{0, 100, []byte("some hash bytes")}}, } } fc := FakeConnection{ id: node1, requestData: []byte("some data to return"), } m.AddConnection(fc, fc) m.Index(node1, "default", files) b.ResetTimer() for i := 0; i < b.N; i++ { data, err := m.requestGlobal(node1, "default", files[i%n].Name, 0, 32, nil) if err != nil { b.Error(err) } if data == nil { b.Error("nil data") } } }
func BenchmarkFilterLines(b *testing.B) { pattern := "sdk/*/cpp/*/*vars.html" lines := extractTestLines(b) var c uint b.ResetTimer() for i := 0; i < b.N; i++ { c = 0 for _, line := range lines { match, err := filter.Match(pattern, line) if err != nil { b.Fatal(err) } if match { c++ } } if c != 3 { b.Fatalf("wrong number of matches: expected 3, got %d", c) } } }
func BenchmarkTruncIndexNew(b *testing.B) { ids := []string{"banana", "bananaa", "bananab"} b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(ids) } }
func BenchmarkFilterPatterns(b *testing.B) { patterns := []string{ "sdk/*", "*.html", } lines := extractTestLines(b) var c uint b.ResetTimer() for i := 0; i < b.N; i++ { c = 0 for _, line := range lines { match, err := filter.List(patterns, line) if err != nil { b.Fatal(err) } if match { c++ } } if c != 22185 { b.Fatalf("wrong number of matches: expected 22185, got %d", c) } } }
func BenchmarkReplicaSnapshot(b *testing.B) { defer tracing.Disable()() storeCfg := TestStoreConfig(nil) storeCfg.TestingKnobs.DisableSplitQueue = true stopper := stop.NewStopper() defer stopper.Stop() store := createTestStoreWithConfig(b, stopper, &storeCfg) // We want to manually control the size of the raft log. store.SetRaftLogQueueActive(false) rep, err := store.GetReplica(rangeID) if err != nil { b.Fatal(err) } snapSize := rep.GetMaxBytes() if err := fillTestRange(rep, snapSize); err != nil { b.Fatal(err) } b.SetBytes(snapSize) b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := rep.GetSnapshot(context.Background(), "bench"); err != nil { b.Fatal(err) } rep.CloseOutSnap() } }
func getParseGoji(b *testing.B) http.Handler { defer b.ResetTimer() if parseGoji == nil { parseGoji = loadGoji(parseAPI) } return parseGoji }
func BenchmarkJSONFileLogger(b *testing.B) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { b.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") l, err := New(logger.Info{ ContainerID: cid, LogPath: filename, }) if err != nil { b.Fatal(err) } defer l.Close() testLine := "Line that thinks that it is log line from docker\n" msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() if err != nil { b.Fatal(err) } b.SetBytes(int64(len(jsonlog)+1) * 30) b.ResetTimer() for i := 0; i < b.N; i++ { for j := 0; j < 30; j++ { if err := l.Log(msg); err != nil { b.Fatal(err) } } } }
func benchPutItemParallel(p, c int, b *testing.B) { svc := dynamodb.New(&aws.Config{ DisableSSL: aws.Bool(true), }) av, err := dynamodbattribute.ConvertToMap(dbItem{Key: "MyKey", Data: "MyData"}) if err != nil { b.Fatal("expect no ConvertToMap errors", err) } params := &dynamodb.PutItemInput{ Item: av, TableName: aws.String(testTableName), } b.N = c b.ResetTimer() b.SetParallelism(p) b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, err = svc.PutItem(params) if err != nil { b.Error("expect no request errors", err) } } }) }
// BenchmarkDecodeNotFound performs a benchmark on how long it takes to decode // a notfound message with the maximum number of entries. func BenchmarkDecodeNotFound(b *testing.B) { // Create a message with the maximum number of entries. pver := ProtocolVersion var m MsgNotFound for i := 0; i < MaxInvPerMsg; i++ { hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } m.AddInvVect(NewInvVect(InvTypeBlock, hash)) } // Serialize it so the bytes are available to test the decode below. var bb bytes.Buffer if err := m.BtcEncode(&bb, pver, LatestEncoding); err != nil { b.Fatalf("MsgNotFound.BtcEncode: unexpected error: %v", err) } buf := bb.Bytes() r := bytes.NewReader(buf) var msg MsgNotFound b.ResetTimer() for i := 0; i < b.N; i++ { r.Seek(0, 0) msg.BtcDecode(r, pver, LatestEncoding) } }
// runMVCCGet first creates test data (and resets the benchmarking // timer). It then performs b.N MVCCGets. func runMVCCGet(emk engineMaker, numVersions, valueSize int, b *testing.B) { const overhead = 48 // Per key/value overhead (empirically determined) const targetSize = 512 << 20 // 512 MB // Adjust the number of keys so that each test has approximately the same // amount of data. numKeys := targetSize / ((overhead + valueSize) * (1 + (numVersions-1)/2)) eng, _ := setupMVCCData(emk, numVersions, numKeys, valueSize, b) defer eng.Close() b.SetBytes(int64(valueSize)) b.ResetTimer() keyBuf := append(make([]byte, 0, 64), []byte("key-")...) for i := 0; i < b.N; i++ { // Choose a random key to retrieve. keyIdx := rand.Int31n(int32(numKeys)) key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(keyIdx))) walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1)) ts := makeTS(walltime, 0) if v, _, err := MVCCGet(context.Background(), eng, key, ts, true, nil); err != nil { b.Fatalf("failed get: %s", err) } else if v == nil { b.Fatalf("failed get (key not found): %d@%d", keyIdx, walltime) } else if valueBytes, err := v.GetBytes(); err != nil { b.Fatal(err) } else if len(valueBytes) != valueSize { b.Fatalf("unexpected value size: %d", len(valueBytes)) } } b.StopTimer() }
// BenchmarkDecodeMerkleBlock performs a benchmark on how long it takes to // decode a reasonably sized merkleblock message. func BenchmarkDecodeMerkleBlock(b *testing.B) { // Create a message with random data. pver := ProtocolVersion var m MsgMerkleBlock hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", 10000)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } m.Header = *NewBlockHeader(1, hash, hash, 0, uint32(10000)) for i := 0; i < 105; i++ { hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) if err != nil { b.Fatalf("NewHashFromStr: unexpected error: %v", err) } m.AddTxHash(hash) if i%8 == 0 { m.Flags = append(m.Flags, uint8(i)) } } // Serialize it so the bytes are available to test the decode below. var bb bytes.Buffer if err := m.BtcEncode(&bb, pver, LatestEncoding); err != nil { b.Fatalf("MsgMerkleBlock.BtcEncode: unexpected error: %v", err) } buf := bb.Bytes() r := bytes.NewReader(buf) var msg MsgMerkleBlock b.ResetTimer() for i := 0; i < b.N; i++ { r.Seek(0, 0) msg.BtcDecode(r, pver, LatestEncoding) } }
func BenchmarkHandlerCall(b *testing.B) { fn := func(params *jsonrpc2.EmptyParams) (interface{}, error) { return nil, nil } handler := handler{ method: reflect.ValueOf(fn), params: reflect.New(reflect.ValueOf(fn).Type().In(0).Elem()).Interface().(jsonrpc2.Params), } data, _ := json.Marshal(&jsonrpc2.EmptyParams{}) params, err := handler.DecodeParams(data) if err != nil { b.Fatal(err.Error()) } b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { handler.Call(params) } }
// Benchmarking raw database/sql. func BenchmarkAppendTxRawSQL(b *testing.B) { var err error var sess db.Database var tx *sql.Tx if sess, err = db.Open(Adapter, settings); err != nil { b.Fatal(err) } defer sess.Close() driver := sess.Driver().(*sql.DB) if tx, err = driver.Begin(); err != nil { b.Fatal(err) } if _, err = tx.Exec("TRUNCATE TABLE `artist`"); err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err = tx.Exec("INSERT INTO `artist` (`name`) VALUES('Hayao Miyazaki')"); err != nil { b.Fatal(err) } } if err = tx.Commit(); err != nil { b.Fatal(err) } }
func BenchmarkDecode(b *testing.B) { fix := testMap(100) env := &Env{} type X struct { f string } // half a json for i, kv := range fix { if i%2 != 0 { if err := env.SetJson(kv[0], X{kv[1]}); err != nil { b.Fatal(err) } continue } env.Set(kv[0], kv[1]) } var writer bytes.Buffer env.Encode(&writer) denv := &Env{} reader := bytes.NewReader(writer.Bytes()) b.ResetTimer() for i := 0; i < b.N; i++ { err := denv.Decode(reader) if err != nil { b.Fatal(err) } reader.Seek(0, 0) } }