func BenchmarkCacheComplete(b *testing.B) { b.StopTimer() paths := DefaultPaths() if len(paths) == 0 { b.Skip("No default paths available") } tests := []string{"mscorlib.dll", "System.dll"} c := Cache{paths: paths} for _, test := range tests { if _, err := c.Load(test); err != nil { b.Error(err) } } tests2 := []content.Type{ content.Type{Name: content.FullyQualifiedName{Absolute: "net://type/System.String"}}, } b.StartTimer() for i := 0; i < b.N; i++ { for _, test := range tests2 { if _, err := c.Complete(&test); err != nil { b.Error(err) } } } }
func Benchmark_Proto_Binary(b *testing.B) { b.Skip() srv := &binaryTestServer{&baseTestServer{}} port, err := srv.listen() assert.NoError(b, err) srv.start() defer srv.Stop() client, err := rpc.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port)) assert.NoError(b, err) defer client.Close() b.Log(port) b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() var res TestMessage req := TestMessage{ "12345678910", []byte("mama myla ramu"), } err = client.Call("ServiceFixture.Ping", &req, &res) b.StopTimer() assert.NoError(b, err) assert.Equal(b, req, res) b.SetBytes(int64(len(req.Data) * 2)) } }
func Benchmark_Proto_HTTP_Large(b *testing.B) { b.Skip() srv := &httpTestServer{&baseTestServer{}} port, err := srv.listen() assert.NoError(b, err) srv.start() defer srv.Stop() client, err := rpc.DialHTTPPath("tcp", fmt.Sprintf("127.0.0.1:%d", port), "/v1/rpc") assert.NoError(b, err) defer client.Close() b.Log(port, b.N) b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() var res TestMessage req := TestMessage{ fmt.Sprintf("%d", i), []byte(strings.Repeat("0", 1024*1024)), } err = client.Call("ServiceFixture.Ping", &req, &res) b.StopTimer() assert.NoError(b, err) assert.Equal(b, req, res) b.SetBytes(int64(len(req.Data) * 2)) } }
func bench(b *testing.B, size int, algo func(Interface), name string) { if stringspkg.HasSuffix(testenv.Builder(), "-race") && size > 1e4 { b.Skip("skipping slow benchmark on race builder") } b.StopTimer() data := make(intPairs, size) x := ^uint32(0) for i := 0; i < b.N; i++ { for n := size - 3; n <= size+3; n++ { for i := 0; i < len(data); i++ { x += x x ^= 1 if int32(x) < 0 { x ^= 0x88888eef } data[i].a = int(x % uint32(n/5)) } data.initB() b.StartTimer() algo(data) b.StopTimer() if !IsSorted(data) { b.Errorf("%s did not sort %d ints", name, n) } if name == "Stable" && !data.inOrder() { b.Errorf("%s unstable on %d ints", name, n) } } } }
func BenchmarkRequestsTLSoTCP(b *testing.B) { // Benchmarks the rate at which we can serve requests over a single, // TLS encrypted TCP channel over the loopback interface. // Load a certificate, skipping this benchmark if it doesn't exist cert, err := tls.LoadX509KeyPair("../../test/h1/cert.pem", "../../test/h1/key.pem") if err != nil { b.Skip(err) return } // Get a connected TCP pair conn0, conn1, err := getTCPConnectionPair() if err != nil { b.Fatal(err) } /// TLSify them conn0, conn1 = negotiateTLS(cert, conn0, conn1) defer conn0.Close() defer conn1.Close() // Bench it benchmarkRequestsConnPair(b, conn0, conn1) }
func BenchmarkAppendLog(b *testing.B) { bucketName := "bucketName" ownerName := "ownerName" artifactName := "artifactName" if testing.Short() { b.Skip("Skipping end-to-end test in short mode.") } client := setup(b) bucket, _ := client.NewBucket(bucketName, ownerName, 31) require.NotNil(b, bucket) artifact, _ := bucket.NewChunkedArtifact(artifactName) require.NotNil(b, artifact) // Typical logchunk of size ~4KB str := gen4KString() // Each 4K chunks sent takes ~1.3ms on a 4-core laptop (with synchronous_commit=off,fsync=off in // Postgres) b.ResetTimer() for i := 0; i < b.N; i++ { artifact.AppendLog(str) } // Make sure all logchunks are sent to API/DB. artifact.Flush() }
func BenchmarkStoreLeveldbDocs720(b *testing.B) { if testing.Short() { b.Skip("Skipping large store test in short mode") } runStoreBench(b, "leveldb", 720) }
func BenchmarkRedisClusterPing(b *testing.B) { if testing.Short() { b.Skip("skipping in short mode") } cluster := &clusterScenario{ ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"}, nodeIds: make([]string, 6), processes: make(map[string]*redisProcess, 6), clients: make(map[string]*redis.Client, 6), } if err := startCluster(cluster); err != nil { b.Fatal(err) } defer stopCluster(cluster) client := cluster.clusterClient(nil) defer client.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { if err := client.Ping().Err(); err != nil { b.Fatal(err) } } }) }
func Benchmark_Proto_GRPC_Large(b *testing.B) { b.Skip() logx.SetLevel(logx.DEBUG) srv1 := new(GRPCServer) srv1.Start() defer srv1.srv.Stop() conn, err := grpc.Dial(fmt.Sprintf("localhost:%d", srv1.Port), grpc.WithInsecure()) assert.NoError(b, err) defer conn.Close() c := pb3.NewBarClient(conn) b.Log(srv1.Port, b.N) b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() req := pb3.TestRep{ fmt.Sprintf("%d", i), []byte(strings.Repeat("m", 1024*1024)), } res, err := c.Test(context.Background(), &req) b.StopTimer() assert.NoError(b, err) assert.EqualValues(b, req, *res) b.SetBytes(int64(len(req.Body) * 2)) } }
func BenchmarkRoundtripFileBoom(b *testing.B) { if *file == "" { b.Skip("no file specified") } for i := 0; i < b.N; i++ { br, err := boom.OpenBAM(*file) if err != nil { b.Fatalf("Open failed: %v", err) } f, err := os.OpenFile("/dev/null", os.O_APPEND|os.O_RDWR, 0666) if err != nil { b.Fatalf("Open failed: %v", err) } bw, err := boom.OpenBAMFile(f, "bw", br.Header()) if err != nil { b.Fatalf("NewWriter failed: %v", err) } for { r, _, err := br.Read() if err != nil { break } _, err = bw.Write(r) if err != nil { b.Fatalf("Write failed: %v", err) } } br.Close() f.Close() } }
func BenchmarkAtomicIncrement(store data.Store, b *testing.B) { if err := store.SetLifetime(time.Second*30, data.ScopeAll); err != nil { b.Skip("Set lifetime to all items is not supported") } b.ResetTimer() b.SetParallelism(50) b.RunParallel(func(pb *testing.PB) { for pb.Next() { if _, err := store.Increment("key001"); err != nil { b.Errorf("Could not increment value: %v", err) } } }) b.StopTimer() var result int if err := store.Get("key001", &result); err != nil { b.Errorf("Could not get stored value: %v", err) } if result != b.N { b.Errorf("Unexpected value: got %d instead of %d", result, b.N) } }
func Benchmark_words_load(t *testing.B) { // wordlist was downloaded from http://www-personal.umich.edu/~jlawler/wordlist f, err := os.Open("d:\\wordlist.txt") if err != nil { t.Skip() return } defer f.Close() s := bufio.NewScanner(f) words := make([]string, 0, 80000) for s.Scan() { txt := s.Text() words = append(words, txt) } sort.Stable(byStringLengthDesc(words)) tree := NewPrefixedTree("") for i := range words { tree.Put(words[i], i) } of, err := os.Create("d:\\wordlist.json") panicif(err) defer of.Close() of.WriteString(tree.String()) t.Log("Nodes: ", tree.NodeCount()) t.Log("Max fanout: ", tree.root.MaxFanout()) histogram := make(map[int]int) tree.root.visit(func(n *Node) bool { count := len(n.Nodes) histogram[count] += 1 return true }) kvs := make([]kv, len(histogram)) i := 0 for k, v := range histogram { kvs[i] = kv{k, v} i++ } sort.Stable(byFanout(kvs)) for i := range kvs { t.Logf("%+v", kvs[i]) } t.Logf("=====") sort.Stable(byCount(kvs)) for i := range kvs { t.Logf("%+v", kvs[i]) } tree.root.Walk("abating", func(path string, h int) bool { t.Logf("%d(%s): %s ", h, path, words[h]) return true }) }
func BenchmarkArchiveDirectory(b *testing.B) { if BenchArchiveDirectory == "" { b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory") } for i := 0; i < b.N; i++ { archiveDirectory(b) } }
// Testing EnumerateBlobMeta because that's one of the few non-corpus index reading ops we still actually use. func BenchmarkEnumerateBlobMetaSQLite(b *testing.B) { if *flagBenchDir == "" { b.Skip("Enumerating benchmark needs -benchDir") } dbfile := filepath.Join(*flagBenchDir, "sqlite.db") enumerateMeta(b, dbfile, func(dbfile string) (sorted.KeyValue, error) { return sqlite.NewStorage(dbfile) }) }
func BenchmarkInterruptSQLite(b *testing.B) { if *flagBenchDir == "" { b.Skip("Interrupt benchmark needs -benchDir") } dbfile := filepath.Join(*flagBenchDir, "sqlite.db") benchmarkKillReindex(b, 15, dbfile, func(dbfile string) (sorted.KeyValue, error) { return sqlite.NewStorage(dbfile) }) }
func benchQuery(b *testing.B, query string, result interface{}) { b.Skip("current pq database-backed benchmarks are inconsistent") b.StopTimer() db := openTestConn(b) defer db.Close() b.StartTimer() for i := 0; i < b.N; i++ { benchQueryLoop(b, db, query, result) } }
func BenchmarkInterfacesAndMulticastAddrs(b *testing.B) { ifi := loopbackInterface() if ifi == nil { b.Skip("loopback interface not found") } for i := 0; i < b.N; i++ { if _, err := ifi.MulticastAddrs(); err != nil { b.Fatal(err) } } }
func BenchmarkInterfaceByName(b *testing.B) { ifi := loopbackInterface() if ifi == nil { b.Skip("loopback interface not found") } for i := 0; i < b.N; i++ { if _, err := InterfaceByName(ifi.Name); err != nil { b.Fatal(err) } } }
func BenchmarkInterfacesAndAddrs(b *testing.B) { ifi := loopbackInterface() if ifi == nil { b.Skip("loopback interface not found") } for i := 0; i < b.N; i++ { if _, err := ifi.Addrs(); err != nil { b.Fatalf("Interface.Addrs failed: %v", err) } } }
func BenchmarkInterfaceByIndex(b *testing.B) { ifi := loopbackInterface() if ifi == nil { b.Skip("loopback interface not found") } for i := 0; i < b.N; i++ { if _, err := InterfaceByIndex(ifi.Index); err != nil { b.Fatalf("InterfaceByIndex failed: %v", err) } } }
func runGCD(b *testing.B, aSize, bSize uint) { if isRaceBuilder && (aSize > 1000 || bSize > 1000) { b.Skip("skipping on race builder") } b.Run("WithoutXY", func(b *testing.B) { runGCDExt(b, aSize, bSize, false) }) b.Run("WithXY", func(b *testing.B) { runGCDExt(b, aSize, bSize, true) }) }
func BenchmarkModSqrt5430_3Mod4(b *testing.B) { if isRaceBuilder { b.Skip("skipping on race builder") } p := tri(5430) x := new(Int).SetUint64(2) for i := 0; i < b.N; i++ { x.SetUint64(2) x.modSqrt3Mod4Prime(x, p) } }
func BenchmarkGarden_Plants(b *testing.B) { g, err := NewGarden(test5.diagram, test5.children) if err != nil { b.Skip("BenchmarkGarden_Plants requires valid garden") } b.ResetTimer() for i := 0; i < b.N; i++ { for _, l := range test5.lookups { g.Plants(l.child) } } }
func BenchmarkBuildVocabAndVSharder(t *testing.B) { f, e := fs.Open("testdata/internet-zh.num") if e != nil { t.Skip(e) } defer f.Close() _, _, e = BuildVocabAndVSharder(f, 10, true) if e != nil { t.Skip(e) } }
func Benchmark_Manifest_NewFromBLOB_500MB(b *testing.B) { b.Skip() bn, err := fixtures.MakeBLOBPure(1024 * 1024 * 500) if err != nil { b.Fail() } defer fixtures.KillBLOB(bn) b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = fixtures.NewShadowFromFile(bn) } }
func BenchmarkInterfaceByName(b *testing.B) { testHookUninstaller.Do(func() { uninstallTestHooks() }) ifi := loopbackInterface() if ifi == nil { b.Skip("loopback interface not found") } for i := 0; i < b.N; i++ { if _, err := InterfaceByName(ifi.Name); err != nil { b.Fatal(err) } } }
func BenchmarkInterfacesAndAddrs(b *testing.B) { testHookUninstaller.Do(uninstallTestHooks) ifi := loopbackInterface() if ifi == nil { b.Skip("loopback interface not found") } for i := 0; i < b.N; i++ { if _, err := ifi.Addrs(); err != nil { b.Fatal(err) } } }
func BenchmarkQueryDB(b *testing.B) { if testDB == nil { b.Skip("DB is not available") } ip := net.ParseIP("8.8.8.8") nIP, _ := ip2int(ip) for i := 0; i < b.N; i++ { _, err := testDB.Lookup(ip, nIP) if err != nil { b.Fatal(err) } } }
func BenchmarkInterruptCznic(b *testing.B) { if *flagBenchDir == "" { b.Skip("Interrupt benchmark needs -benchDir") } dbfile := filepath.Join(*flagBenchDir, "kvfile.db") // since cznic is much slower than levelDB at reindexing, we interrupt // it way less often. otherwise we might even blow up the max test run time // (10m) anyway. benchmarkKillReindex(b, 10, dbfile, func(dbfile string) (sorted.KeyValue, error) { return kvfile.NewStorage(dbfile) }) }
func benchmarkReindex(b *testing.B, dbname string, sortedProvider func(dbfile string) (sorted.KeyValue, error)) { if *flagBenchDir == "" { b.Skip("Reindexing benchmark needs -benchDir") } dbfile := filepath.Join(*flagBenchDir, dbname) idx := reindex(b, dbfile, sortedProvider) if _, err := idx.KeepInMemory(); err != nil { b.Fatal(err) } if err := idx.Close(); err != nil { b.Fatal(err) } }