func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } }
func BenchmarkParseSize(b *testing.B) { b.SetBytes(1) n := []byte("12345678") for i := 0; i < b.N; i++ { parseSize(n) } }
func BenchmarkReplicaSnapshot(b *testing.B) { defer tracing.Disable()() storeCfg := TestStoreConfig(nil) storeCfg.TestingKnobs.DisableSplitQueue = true stopper := stop.NewStopper() defer stopper.Stop() store := createTestStoreWithConfig(b, stopper, &storeCfg) // We want to manually control the size of the raft log. store.SetRaftLogQueueActive(false) rep, err := store.GetReplica(rangeID) if err != nil { b.Fatal(err) } snapSize := rep.GetMaxBytes() if err := fillTestRange(rep, snapSize); err != nil { b.Fatal(err) } b.SetBytes(snapSize) b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := rep.GetSnapshot(context.Background(), "bench"); err != nil { b.Fatal(err) } rep.CloseOutSnap() } }
func BenchmarkUnmarshalText(b *testing.B) { pb := new(MyMessage) for i := 0; i < b.N; i++ { UnmarshalText(benchInput, pb) } b.SetBytes(int64(len(benchInput))) }
func BenchmarkIndex1024(b *testing.B) { i := ChecksumIndex{} i.weakChecksumLookup = make([]map[uint32]StrongChecksumList, 256) for x := 0; x < 1024; x++ { w := rand.Uint32() if i.weakChecksumLookup[w&255] == nil { i.weakChecksumLookup[w&255] = make(map[uint32]StrongChecksumList) } i.weakChecksumLookup[w&255][w] = append( i.weakChecksumLookup[w&255][w], chunks.ChunkChecksum{}, ) } b.SetBytes(1) b.StartTimer() for x := 0; x < b.N; x++ { i.FindWeakChecksum2(T) } b.StopTimer() }
func BenchmarkNoHack(b *testing.B) { client, srv := net.Pipe() done := make(chan struct{}) req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") read := make([]byte, 4096) b.SetBytes(int64(len(req) * 30)) go func() { for { if _, err := srv.Write(req); err != nil { srv.Close() break } } close(done) }() for i := 0; i < b.N; i++ { for i := 0; i < 30; i++ { if _, err := client.Read(read); err != nil && err != io.EOF { b.Fatal(err) } } } client.Close() <-done }
func BenchmarkReadInts(b *testing.B) { var ls Struct bsr := &byteSliceReader{} var r io.Reader = bsr b.SetBytes(2 * (1 + 2 + 4 + 8)) b.ResetTimer() for i := 0; i < b.N; i++ { bsr.remain = big Read(r, BigEndian, &ls.Int8) Read(r, BigEndian, &ls.Int16) Read(r, BigEndian, &ls.Int32) Read(r, BigEndian, &ls.Int64) Read(r, BigEndian, &ls.Uint8) Read(r, BigEndian, &ls.Uint16) Read(r, BigEndian, &ls.Uint32) Read(r, BigEndian, &ls.Uint64) } b.StopTimer() want := s want.Float32 = 0 want.Float64 = 0 want.Complex64 = 0 want.Complex128 = 0 want.Array = [4]uint8{0, 0, 0, 0} want.Bool = false want.BoolArray = [4]bool{false, false, false, false} if b.N > 0 && !reflect.DeepEqual(ls, want) { b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", ls, want) } }
func BenchmarkBenchReadliner(b *testing.B) { br := BenchReadliner{line: testLines[0], count: b.N} b.SetBytes(int64(len(br.line))) for i := 0; i < b.N; i++ { br.ReadLine() } }
func BenchmarkUsersProd(b *testing.B) { prodfile := "./prods/users.prod" text, err := ioutil.ReadFile(prodfile) if err != nil { b.Fatal(err) } seed := uint64(time.Now().UnixNano()) s := parsec.NewScanner(text) root, _ := Y(s) scope := root.(common.Scope) nterms := scope["_nonterminals"].(common.NTForms) scope = BuildContext(scope, seed, "./bags", prodfile) b.ResetTimer() out := 0 for i := 0; i < b.N; i++ { scope = scope.RebuildContext() if val, ok := EvalForms("root", scope, nterms["s"]).(string); !ok { b.Fatalf("not string `%v`\n", val) } else { out += len(val) } } b.SetBytes(int64(float64(out) / float64(b.N))) }
// Benchmark sealing a 1MiB message. func BenchmarkSealLongMsg(b *testing.B) { b.StopTimer() msg, err := ioutil.ReadFile("testdata/long.dat") if err != nil { fmt.Println("secretbox: failed to read short test message") fmt.Printf("%v\n", err) b.FailNow() } longMsg = msg b.StartTimer() for i := 0; i < b.N; i++ { _, ok := Seal(msg, testKey) if !ok { fmt.Println("secretbox: failed to seal message") b.FailNow() } b.SetBytes(int64(len(msg))) } b.StopTimer() var ok bool longBox, ok = Seal(msg, testKey) if !ok { fmt.Println("secretbox: failed to seal message") b.FailNow() } }
func BenchmarkUnma(b *testing.B) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) total := 0 data := new(bytes.Buffer) enc := gob.NewEncoder(data) dec := gob.NewDecoder(data) b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() p := NewPopulatedPreAccept(popr) enc.Encode(p) total += len(data.Bytes()) msg := &PreAccept{} b.StartTimer() dec.Decode(&msg) } b.SetBytes(int64(total / b.N)) }
func BenchmarkFixedStreamWriter4K(t *testing.B) { const totalinput = 10 << 20 input := getBufferSize(totalinput) const size = 4 << 10 b := input.Bytes() // Create some duplicates for i := 0; i < 500; i++ { // Read from 10 first blocks src := b[(i%10)*size : (i%10)*size+size] // Write into the following ones dst := b[(10+i)*size : (i+10)*size+size] copy(dst, src) } t.ResetTimer() t.SetBytes(totalinput) for i := 0; i < t.N; i++ { input = bytes.NewBuffer(b) w, _ := dedup.NewStreamWriter(ioutil.Discard, dedup.ModeFixed, size, 10*size) io.Copy(w, input) err := w.Close() if err != nil { t.Fatal(err) } } }
// Maximum block size:64k func BenchmarkDynamicFragments64K(t *testing.B) { const totalinput = 10 << 20 input := getBufferSize(totalinput) const size = 64 << 10 b := input.Bytes() // Create some duplicates for i := 0; i < 50; i++ { // Read from 10 first blocks src := b[(i%10)*size : (i%10)*size+size] // Write into the following ones dst := b[(10+i)*size : (i+10)*size+size] copy(dst, src) } t.ResetTimer() t.SetBytes(totalinput) for i := 0; i < t.N; i++ { out := make(chan dedup.Fragment, 10) go func() { for _ = range out { } }() input = bytes.NewBuffer(b) w, _ := dedup.NewSplitter(out, dedup.ModeDynamic, size) io.Copy(w, input) err := w.Close() if err != nil { t.Fatal(err) } } }
func BenchmarkEncode(b *testing.B) { data := &Block{Bytes: make([]byte, 65536)} b.SetBytes(int64(len(data.Bytes))) for i := 0; i < b.N; i++ { Encode(ioutil.Discard, data) } }
func BenchmarkNoDeferMutex(b *testing.B) { var mu sync.Mutex b.SetBytes(1) for i := 0; i < b.N; i++ { noDeferUnlock(&mu) } }
func runMVCCDeleteRange(emk engineMaker, valueBytes int, b *testing.B) { // 512 KB ranges so the benchmark doesn't take forever const rangeBytes = 512 * 1024 const overhead = 48 // Per key/value overhead (empirically determined) numKeys := rangeBytes / (overhead + valueBytes) eng, dir := setupMVCCData(emk, 1, numKeys, valueBytes, b) defer eng.Close() b.SetBytes(rangeBytes) b.StopTimer() b.ResetTimer() for i := 0; i < b.N; i++ { locDirty := dir + "_dirty" if err := os.RemoveAll(locDirty); err != nil { b.Fatal(err) } if err := shutil.CopyTree(dir, locDirty, nil); err != nil { b.Fatal(err) } dupEng := emk(b, locDirty) b.StartTimer() _, _, _, err := MVCCDeleteRange(context.Background(), dupEng, &enginepb.MVCCStats{}, roachpb.KeyMin, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, nil, false) if err != nil { b.Fatal(err) } b.StopTimer() dupEng.Close() } }
func BenchmarkEncode(b *testing.B) { b.StopTimer() bo := image.Rect(0, 0, 640, 480) rnd := rand.New(rand.NewSource(123)) // Restrict to a 256-color paletted image to avoid quantization path. palette := make(color.Palette, 256) for i := range palette { palette[i] = color.RGBA{ uint8(rnd.Intn(256)), uint8(rnd.Intn(256)), uint8(rnd.Intn(256)), 255, } } img := image.NewPaletted(image.Rect(0, 0, 640, 480), palette) for y := bo.Min.Y; y < bo.Max.Y; y++ { for x := bo.Min.X; x < bo.Max.X; x++ { img.Set(x, y, palette[rnd.Intn(256)]) } } b.SetBytes(640 * 480 * 4) b.StartTimer() for i := 0; i < b.N; i++ { Encode(ioutil.Discard, img, nil) } }
func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv, pins := getMockDagServ(b) _, n := getNode(b, dserv, 0, pins) ctx, cancel := context.WithCancel(context.Background()) defer cancel() wrsize := 4096 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) if err != nil { b.Fatal(err) } buf := make([]byte, b.N*wrsize) u.NewTimeSeededRand().Read(buf) b.StartTimer() b.SetBytes(int64(wrsize)) for i := 0; i < b.N; i++ { n, err := dagmod.Write(buf[i*wrsize : (i+1)*wrsize]) if err != nil { b.Fatal(err) } if n != wrsize { b.Fatal("Wrote bad size") } } }
func BenchmarkPipe(b *testing.B) { br := BenchReadliner{line: testLines[0], count: b.N} bs := BenchSender{} b.SetBytes(int64(len(br.line))) Pipe(&br, &bs, func([]byte) bool { return true }) }
func BenchmarkSiprng(b *testing.B) { b.SetBytes(8) p := &siprng{} for i := 0; i < b.N; i++ { p.Uint64() } }
func BenchmarkWithHack(b *testing.B) { client, srv := net.Pipe() done := make(chan struct{}) req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") read := make([]byte, 4096) b.SetBytes(int64(len(req) * 30)) l := MalformedHostHeaderOverrideConn{client, true} go func() { for { if _, err := srv.Write(req); err != nil { srv.Close() break } l.first = true // make sure each subsequent run uses the hack parsing } close(done) }() for i := 0; i < b.N; i++ { for i := 0; i < 30; i++ { if n, err := l.Read(read); err != nil && err != io.EOF { b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) } } } l.Close() <-done }
func BenchmarkReceiveRequestNoBuf(b *testing.B) { req := MCRequest{ Opcode: SET, Cas: 0, Opaque: 7242, VBucket: 824, Extras: []byte{1}, Key: []byte("somekey"), Body: []byte("somevalue"), } data := req.Bytes() data[0] = REQ_MAGIC rdr := bytes.NewReader(data) b.SetBytes(int64(len(data))) b.ResetTimer() for i := 0; i < b.N; i++ { req2 := MCRequest{} rdr.Seek(0, 0) _, err := req2.Receive(rdr, nil) if err != nil { b.Fatalf("Error receiving: %v", err) } } }
func benchFunc(b *testing.B, f func(b []byte) []byte, s string) { src := []byte(s) b.SetBytes(int64(len(src))) for i := 0; i < b.N; i++ { f(src) } }
func BenchmarkEncodeRGBOpaque(b *testing.B) { b.StopTimer() img := image.NewRGBA(image.Rect(0, 0, 640, 480)) // Set all pixels to 0xFF alpha to force opaque mode. bo := img.Bounds() rnd := rand.New(rand.NewSource(123)) for y := bo.Min.Y; y < bo.Max.Y; y++ { for x := bo.Min.X; x < bo.Max.X; x++ { img.Set(x, y, color.RGBA{ uint8(rnd.Intn(256)), uint8(rnd.Intn(256)), uint8(rnd.Intn(256)), 255}) } } if !img.Opaque() { b.Fatal("expected image to be opaque") } b.SetBytes(640 * 480 * 4) b.StartTimer() options := &Options{Quality: 90} for i := 0; i < b.N; i++ { Encode(ioutil.Discard, img, options) } }
func runBenchmarkPub(b *testing.B, store string, msgSize int64) { if gw == nil { gw = newGatewayForTest(b, store) } b.ReportAllocs() httpReqRaw := strings.TrimSpace(fmt.Sprintf(` POST /topics/foobar/v1 HTTP/1.1 Host: localhost:9191 User-Agent: Go-http-client/1.1 Content-Length: %d Content-Type: application/x-www-form-urlencoded Appid: myappid Pubkey: mypubkey Accept-Encoding: gzip`, msgSize)) + "\r\n\r\n" b.SetBytes(msgSize + int64(len(httpReqRaw))) param := fasthttprouter.Params{ fasthttprouter.Param{Key: "topic", Value: "foobar"}, fasthttprouter.Param{Key: "ver", Value: "v1"}, } req := &fasthttp.Request{} req.SetRequestURI("/topics/foobar/v1") ctx := &fasthttp.RequestCtx{} ctx.Init(req, nil, nil) ctx.Request.SetBodyString(strings.Repeat("X", int(msgSize))) ctx.Request.Header.SetMethod("POST") for i := 0; i < b.N; i++ { ctx.Response.Reset() gw.pubHandler(ctx, param) } }
// runMVCCScan first creates test data (and resets the benchmarking // timer). It then performs b.N MVCCScans in increments of numRows // keys over all of the data in the Engine instance, restarting at // the beginning of the keyspace, as many times as necessary. func runMVCCScan(emk engineMaker, numRows, numVersions, valueSize int, b *testing.B) { // Use the same number of keys for all of the mvcc scan // benchmarks. Using a different number of keys per test gives // preferential treatment to tests with fewer keys. Note that the // datasets all fit in cache and the cache is pre-warmed. const numKeys = 100000 eng, _ := setupMVCCData(emk, numVersions, numKeys, valueSize, b) defer eng.Close() b.SetBytes(int64(numRows * valueSize)) b.ResetTimer() keyBuf := append(make([]byte, 0, 64), []byte("key-")...) for i := 0; i < b.N; i++ { // Choose a random key to start scan. keyIdx := rand.Int31n(int32(numKeys - numRows)) startKey := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(keyIdx))) walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1)) ts := makeTS(walltime, 0) kvs, _, _, err := MVCCScan(context.Background(), eng, startKey, keyMax, int64(numRows), ts, true, nil) if err != nil { b.Fatalf("failed scan: %s", err) } if len(kvs) != numRows { b.Fatalf("failed to scan: %d != %d", len(kvs), numRows) } } b.StopTimer() }
// runMVCCGet first creates test data (and resets the benchmarking // timer). It then performs b.N MVCCGets. func runMVCCGet(emk engineMaker, numVersions, valueSize int, b *testing.B) { const overhead = 48 // Per key/value overhead (empirically determined) const targetSize = 512 << 20 // 512 MB // Adjust the number of keys so that each test has approximately the same // amount of data. numKeys := targetSize / ((overhead + valueSize) * (1 + (numVersions-1)/2)) eng, _ := setupMVCCData(emk, numVersions, numKeys, valueSize, b) defer eng.Close() b.SetBytes(int64(valueSize)) b.ResetTimer() keyBuf := append(make([]byte, 0, 64), []byte("key-")...) for i := 0; i < b.N; i++ { // Choose a random key to retrieve. keyIdx := rand.Int31n(int32(numKeys)) key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(keyIdx))) walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1)) ts := makeTS(walltime, 0) if v, _, err := MVCCGet(context.Background(), eng, key, ts, true, nil); err != nil { b.Fatalf("failed get: %s", err) } else if v == nil { b.Fatalf("failed get (key not found): %d@%d", keyIdx, walltime) } else if valueBytes, err := v.GetBytes(); err != nil { b.Fatal(err) } else if len(valueBytes) != valueSize { b.Fatalf("unexpected value size: %d", len(valueBytes)) } } b.StopTimer() }
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) eng := emk(b, fmt.Sprintf("cput_%d", valueSize)) defer eng.Close() b.SetBytes(int64(valueSize)) var expected *roachpb.Value if createFirst { for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } expected = &value } b.ResetTimer() for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil { b.Fatalf("failed put: %s", err) } } b.StopTimer() }
func BenchmarkJSONFileLogger(b *testing.B) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { b.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") l, err := New(logger.Info{ ContainerID: cid, LogPath: filename, }) if err != nil { b.Fatal(err) } defer l.Close() testLine := "Line that thinks that it is log line from docker\n" msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() if err != nil { b.Fatal(err) } b.SetBytes(int64(len(jsonlog)+1) * 30) b.ResetTimer() for i := 0; i < b.N; i++ { for j := 0; j < 30; j++ { if err := l.Log(msg); err != nil { b.Fatal(err) } } } }
func BenchmarkParseInt(b *testing.B) { b.SetBytes(1) n := "12345678" for i := 0; i < b.N; i++ { strconv.ParseInt(n, 10, 0) } }