func BenchmarkOutMessageGrowShrink(b *testing.B) { // A single buffer, which should fit in some level of CPU cache. b.Run("Single buffer", func(b *testing.B) { var om OutMessage for i := 0; i < b.N; i++ { om.Grow(MaxReadSize) om.ShrinkTo(OutMessageHeaderSize) } b.SetBytes(int64(MaxReadSize)) }) // Many megabytes worth of buffers, which should defeat the CPU cache. b.Run("Many buffers", func(b *testing.B) { // The number of messages; intentionally a power of two. const numMessages = 128 var oms [numMessages]OutMessage if s := unsafe.Sizeof(oms); s < 128<<20 { panic(fmt.Sprintf("Array is too small; total size: %d", s)) } for i := 0; i < b.N; i++ { oms[i%numMessages].Grow(MaxReadSize) oms[i%numMessages].ShrinkTo(OutMessageHeaderSize) } b.SetBytes(int64(MaxReadSize)) }) }
func BenchmarkScan(b *testing.B) { const x = 10 for _, base := range []int{2, 8, 10, 16} { for _, y := range []Word{10, 100, 1000, 10000, 100000} { if isRaceBuilder && y > 1000 { continue } b.Run(fmt.Sprintf("%d/Base%d", y, base), func(b *testing.B) { b.StopTimer() var z nat z = z.expWW(x, y) s := z.utoa(base) if t := itoa(z, base); !bytes.Equal(s, t) { b.Fatalf("scanning: got %s; want %s", s, t) } b.StartTimer() for i := 0; i < b.N; i++ { z.scan(bytes.NewReader(s), base, false) } }) } } }
func BenchmarkWrite(b *testing.B) { for _, p := range hashPairs { b.Run(p.label, func(b *testing.B) { MultiWrite(*benchn, *benchq, p.newFunc, b) }) } }
func BenchmarkXCryptoSum(b *testing.B) { for _, size := range sizes { b.Run(size.name, func(b *testing.B) { benchmarkSum(b, ref.Sum, size.l) }) } }
func BenchmarkResize(b *testing.B) { testCheckAvailable(b) hdr := &Handler{ Executable: testExecutable, } params := imageserver.Params{ param: imageserver.Params{ "width": 100, }, } for _, tc := range []struct { name string im *imageserver.Image }{ {"Small", testdata.Small}, {"Medium", testdata.Medium}, {"Large", testdata.Large}, {"Huge", testdata.Huge}, } { b.Run(tc.name, func(b *testing.B) { for i := 0; i < b.N; i++ { _, err := hdr.Handle(tc.im, params) if err != nil { b.Fatal(err) } } }) } }
func BenchmarkErrors(b *testing.B) { var toperr error type run struct { stack int std bool } runs := []run{ {10, false}, {10, true}, {100, false}, {100, true}, {1000, false}, {1000, true}, } for _, r := range runs { part := "pkg/errors" if r.std { part = "errors" } name := fmt.Sprintf("%s-stack-%d", part, r.stack) b.Run(name, func(b *testing.B) { var err error f := yesErrors if r.std { f = noErrors } b.ReportAllocs() for i := 0; i < b.N; i++ { err = f(0, r.stack) } b.StopTimer() toperr = err }) } }
// BenchmarkVarint64ArrayMixed shows the performance of lots of small messages, each // containing a small number of large (3, 4, and 5 byte) repeated int64s. func BenchmarkVarint64ArrayMixed(b *testing.B) { for i := uint(1); i <= 1<<5; i <<= 1 { dist := genUint64Dist([11]int{0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0}, int(i)) // number of sub fields for k := uint(1); k <= 1<<10; k <<= 2 { msg := &tpb.Message{} for m := uint(0); m < k; m++ { msg.Children = append(msg.Children, &tpb.Message{ Key: dist, }) } raw, err := proto.Marshal(msg) if err != nil { b.Error("wrong encode", err) } b.Run(fmt.Sprintf("Fields%vLen%v", k, i), func(b *testing.B) { scratchBuf := proto.NewBuffer(nil) b.ResetTimer() for k := 0; k < b.N; k++ { scratchBuf.SetBuf(raw) msgBlackhole.Reset() if err := scratchBuf.Unmarshal(msgBlackhole); err != nil { b.Error("wrong decode", err) } } }) } } }
func BenchmarkClusterRestore(b *testing.B) { defer tracing.Disable()() // TODO(dan): count=10000 has some issues replicating. Investigate. for _, numAccounts := range []int{10, 100, 1000} { b.Run(strconv.Itoa(numAccounts), func(b *testing.B) { ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(b, numAccounts) defer cleanupFn() // TODO(dan): Once mjibson's sql -> kv function is committed, use it // here on the output of bankDataInsert to generate the backup data // instead of this call. desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now()) if err != nil { b.Fatal(err) } b.SetBytes(desc.DataSize) rebalanceLeases(b, tc) b.ResetTimer() table := parser.TableName{DatabaseName: "bench", TableName: "bank"} for i := 0; i < b.N; i++ { if _, err := sql.Restore(ctx, *kvDB, dir, table); err != nil { b.Fatal(err) } } }) } }
func BenchmarkEncoder(b *testing.B) { buf, err := ioutil.ReadFile("../testdata/e.txt") if err != nil { b.Fatal(err) } if len(buf) == 0 { b.Fatalf("test file has no data") } for e := 4; e <= 6; e++ { n := int(math.Pow10(e)) buf0 := buf buf1 := make([]byte, n) for i := 0; i < n; i += len(buf0) { if len(buf0) > n-i { buf0 = buf0[:n-i] } copy(buf1[i:], buf0) } buf0 = nil runtime.GC() b.Run(fmt.Sprint("1e", e), func(b *testing.B) { b.SetBytes(int64(n)) for i := 0; i < b.N; i++ { w := NewWriter(ioutil.Discard, LSB, 8) w.Write(buf1) w.Close() } }) } }
func BenchmarkByteBuffer(b *testing.B) { input := []byte("test writing this sentence to a buffer") b.Run("byteBuffer", func(b *testing.B) { buf := NewByteBuffer(1024) b.ResetTimer() for i := 0; i < b.N; i++ { buf.Write(input) buf.Bytes() buf.Reset() } }) b.Run("bytes.Buffer", func(b *testing.B) { buf := bytes.NewBuffer(make([]byte, 0, 1024)) b.ResetTimer() for i := 0; i < b.N; i++ { buf.Write(input) buf.Bytes() buf.Reset() } }) }
func BenchmarkSubnetLookup(b *testing.B) { var subnetLookup SubnetLookup b.Run("load routes file", func(b *testing.B) { routesData, err := ioutil.ReadFile("test_routes.dat") if err != nil { b.Skipf("can't load test routes file: %s", err) } for n := 0; n < b.N; n++ { subnetLookup, err = NewSubnetLookupFromRoutes(routesData) if err != nil { b.Fatalf("NewSubnetLookup failed: %s", err) } } }) if subnetLookup == nil { b.Skipf("no test routes file") } b.Run("lookup random IP address", func(b *testing.B) { for n := 0; n < b.N; n++ { ip := make([]byte, 4) binary.BigEndian.PutUint32(ip, rand.Uint32()) _ = subnetLookup.ContainsIPAddress(net.IP(ip)) } }) }
func BenchmarkDecoder(b *testing.B) { buf, err := ioutil.ReadFile("../testdata/e.txt") if err != nil { b.Fatal(err) } if len(buf) == 0 { b.Fatalf("test file has no data") } for e := 4; e <= 6; e++ { n := int(math.Pow10(e)) b.Run(fmt.Sprint("1e", e), func(b *testing.B) { b.StopTimer() b.SetBytes(int64(n)) buf0 := buf compressed := new(bytes.Buffer) w := NewWriter(compressed, LSB, 8) for i := 0; i < n; i += len(buf0) { if len(buf0) > n-i { buf0 = buf0[:n-i] } w.Write(buf0) } w.Close() buf1 := compressed.Bytes() buf0, compressed, w = nil, nil, nil runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1), LSB, 8)) } }) } }
func BenchmarkServerGet(b *testing.B) { srv := &Server{ Root: testdata.Dir, } for _, tc := range []struct { name string filename string }{ {"Small", testdata.SmallFileName}, {"Medium", testdata.MediumFileName}, {"Large", testdata.LargeFileName}, {"Huge", testdata.HugeFileName}, } { b.Run(tc.name, func(b *testing.B) { params := imageserver.Params{ imageserver_source.Param: tc.filename, } var bs int b.ResetTimer() for i := 0; i < b.N; i++ { im, err := srv.Get(params) if err != nil { b.Fatal(err) } bs = len(im.Data) } b.SetBytes(int64(bs)) }) } }
func benchmarkSizes(b *testing.B, sizes []int, fn func(b *testing.B, n int)) { for _, n := range sizes { b.Run(fmt.Sprint(n), func(b *testing.B) { b.SetBytes(int64(n)) fn(b, n) }) } }
func runGCD(b *testing.B, aSize, bSize uint) { b.Run("WithoutXY", func(b *testing.B) { runGCDExt(b, aSize, bSize, false) }) b.Run("WithXY", func(b *testing.B) { runGCDExt(b, aSize, bSize, true) }) }
func BenchmarkNewTensor(b *testing.B) { var ( // Some sample sizes from the Inception image labeling model. // Where input tensors correspond to a 224x224 RGB image // flattened into a vector. vector [224 * 224 * 3]int32 ) b.Run("[150528]", func(b *testing.B) { benchmarkNewTensor(b, vector) }) }
func BenchmarkLeafSize(b *testing.B) { for n := 0; n <= 16; n++ { b.Run(fmt.Sprint(n), func(b *testing.B) { LeafSizeHelper(b, 10, n) }) } // Try some large lengths for _, n := range []int{32, 64} { b.Run(fmt.Sprint(n), func(b *testing.B) { LeafSizeHelper(b, 10, n) }) } }
func BenchmarkFormatFloat(b *testing.B) { for _, c := range ftoaBenches { b.Run(c.name, func(b *testing.B) { for i := 0; i < b.N; i++ { FormatFloat(c.float, c.fmt, c.prec, c.bitSize) } }) } }
func benchmarkGet(b *testing.B, name string, image *imageserver.Image, parallelism int) { b.Run(name, func(b *testing.B) { cch := newTestCache(b) defer func() { _ = cch.Pool.Close() }() cachetest.BenchmarkGet(b, cch, parallelism, image) }) }
func doBench(b *testing.B, f func(b *testing.B, p *Profile, s string)) { for _, bp := range benchProfiles { for _, d := range benchData { b.Run(bp.name+"/"+d.name, func(b *testing.B) { f(b, bp.p, d.str) }) } } }
func benchmarkReadWrite(b *testing.B, fracRead float64) { numReadGoRoutines := int(fracRead * float64(*benchq)) for _, p := range hashPairs { b.Run(p.label, func(b *testing.B) { ReadWrite(*benchn, numReadGoRoutines, *benchq-numReadGoRoutines, p.newFunc, b) }) } }
func BenchmarkLatency(b *testing.B) { for _, mode := range []string{"Max", "Dynamic"} { for _, kbps := range []int{200, 500, 1000, 2000, 5000} { name := fmt.Sprintf("%sPacket/%dkbps", mode, kbps) b.Run(name, func(b *testing.B) { latency(b, kbps*1000, mode == "Max") }) } } }
func BenchmarkThroughput(b *testing.B) { for _, mode := range []string{"Max", "Dynamic"} { for size := 1; size <= 64; size <<= 1 { name := fmt.Sprintf("%sPacket/%dMB", mode, size) b.Run(name, func(b *testing.B) { throughput(b, int64(size<<20), mode == "Max") }) } } }
func BenchmarkProbablyPrime(b *testing.B) { p, _ := new(Int).SetString("203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123", 10) for _, rep := range []int{1, 5, 10, 20} { b.Run(fmt.Sprintf("Rep=%d", rep), func(b *testing.B) { for i := 0; i < b.N; i++ { p.ProbablyPrime(rep) } }) } }
func BenchmarkAppendFloat(b *testing.B) { dst := make([]byte, 30) for _, c := range ftoaBenches { b.Run(c.name, func(b *testing.B) { for i := 0; i < b.N; i++ { AppendFloat(dst[:0], c.float, c.fmt, c.prec, c.bitSize) } }) } }
func benchmarkHMAC(b *testing.B, fn func() hash.Hash) { for _, size := range sizes { b.Run(size.name, func(b *testing.B) { var key [KeySize]byte h := hmac.New(fn, key[:]) benchmarkHash(b, h, size.l) }) } }
func BenchmarkToAndFrom(b *testing.B) { benchmarks := []struct { k int }{ {10}, {100}, {1000}, {10000}, {100000}, {1000000}, {10000000}, } for _, bm := range benchmarks { for which := 0; which < 3; which++ { var name string if which == 0 { fmt.Println() name = "Flatb" } else if which == 1 { name = "Fixed" } else if which == 2 { name = "Proto" } b.Run(fmt.Sprintf("%s-%d", name, bm.k), func(b *testing.B) { uids := make([]uint64, bm.k) for i := 0; i < bm.k; i++ { uids[i] = uint64(rand.Int63()) } var max, sz int b.ResetTimer() for i := 0; i < b.N; i++ { var err error if which == 0 { err, sz = ToAndFromFlat(uids) } else if which == 1 { err, sz = ToAndFromProtoAlt(uids) } else if which == 2 { err, sz = ToAndFromProto(uids) } if err != nil { b.Error(err) b.Fail() } if max < sz { max = sz } // runtime.GC() -- Actually makes FB looks worse. } }) } } }
func benchBytes(b *testing.B, sizes []int, f func(b *testing.B, n int)) { for _, n := range sizes { b.Run(valName(n), func(b *testing.B) { if len(bmbuf) < n { bmbuf = make([]byte, n) } b.SetBytes(int64(n)) f(b, n) }) } }
func runGCD(b *testing.B, aSize, bSize uint) { if isRaceBuilder && (aSize > 1000 || bSize > 1000) { b.Skip("skipping on race builder") } b.Run("WithoutXY", func(b *testing.B) { runGCDExt(b, aSize, bSize, false) }) b.Run("WithXY", func(b *testing.B) { runGCDExt(b, aSize, bSize, true) }) }
func BenchmarkAppendSliceLarge(b *testing.B) { for _, length := range []int{1 << 10, 4 << 10, 16 << 10, 64 << 10, 256 << 10, 1024 << 10} { y := make([]byte, length) b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) { for i := 0; i < b.N; i++ { blackhole = nil blackhole = append(blackhole, y...) } }) } }