func BenchmarkCacheComplete(b *testing.B) { b.StopTimer() paths := DefaultPaths() if len(paths) == 0 { b.Skip("No default paths available") } tests := []string{"mscorlib.dll", "System.dll"} c := Cache{paths: paths} for _, test := range tests { if _, err := c.Load(test); err != nil { b.Error(err) } } tests2 := []content.Type{ content.Type{Name: content.FullyQualifiedName{Absolute: "net://type/System.String"}}, } b.StartTimer() for i := 0; i < b.N; i++ { for _, test := range tests2 { if _, err := c.Complete(&test); err != nil { b.Error(err) } } } }
func BenchmarkJsonUnmarshal(b *testing.B) { b.ReportAllocs() data, err := json.Marshal(&TestStruct) if err != nil { b.Error(err) } b.ResetTimer() for i := 0; i < b.N; i++ { var result CommonStruct err := json.Unmarshal(data, &result) if err != nil { b.Error(err) } } }
func BenchmarkConnStress(b *testing.B) { const workers = 16 cluster := createCluster() cluster.NumConns = 1 session := createSessionFromCluster(cluster, b) defer session.Close() if err := createTable(session, "CREATE TABLE IF NOT EXISTS conn_stress (id int primary key)"); err != nil { b.Fatal(err) } var seed uint64 writer := func(pb *testing.PB) { seed := atomic.AddUint64(&seed, 1) var i uint64 = 0 for pb.Next() { if err := session.Query("insert into conn_stress (id) values (?)", i*seed).Exec(); err != nil { b.Error(err) return } i++ } } b.SetParallelism(workers) b.RunParallel(writer) }
func BenchmarkSingleConn(b *testing.B) { srv := NewTestServer(b, 3) defer srv.Stop() cluster := testCluster(srv.Address, 3) // Set the timeout arbitrarily low so that the query hits the timeout in a // timely manner. cluster.Timeout = 500 * time.Millisecond cluster.NumConns = 1 db, err := cluster.CreateSession() if err != nil { b.Fatalf("NewCluster: %v", err) } defer db.Close() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { err := db.Query("void").Exec() if err != nil { b.Error(err) return } } }) }
func benchmarkBTreeSetACIDFiler(b *testing.B, sz int) { dir, testDbName := temp() defer os.RemoveAll(dir) f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) if err != nil { b.Fatal(err) } defer f.Close() wal, err := os.OpenFile(testDbName+".wal", os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) if err != nil { b.Fatal(err) } defer wal.Close() filer, err := NewACIDFiler(NewSimpleFileFiler(f), wal) if err != nil { b.Error(err) return } benchmarkBTreeSetFiler(b, filer, sz) }
func BenchmarkDomainCreateSnapshot(b *testing.B) { env := newTestEnvironment(b).withDomain() defer env.cleanUp() var xml bytes.Buffer data := newTestSnapshotData() if err := testSnapshotTmpl.Execute(&xml, data); err != nil { b.Fatal(err) } xmlStr := xml.String() b.ResetTimer() for n := 0; n < b.N; n++ { snap, err := env.dom.CreateSnapshot(xmlStr, SnapCreateDefault) if err != nil { b.Error(err) } defer snap.Free() if err := snap.Delete(SnapDeleteDefault); err != nil { b.Error(err) } } b.StopTimer() }
func BenchmarkQuery(b *testing.B) { dir1, dir2, ps, clog, err := prepare() if err != nil { b.Error(err) return } defer closeAll(dir1, dir2, clog) b.ResetTimer() for i := 0; i < b.N; i++ { gq, _, err := gql.Parse(q1) if err != nil { b.Error(err) return } g, err := query.ToSubGraph(gq, ps) if err != nil { b.Error(err) return } ch := make(chan error) go query.ProcessGraph(g, ch, ps) if err := <-ch; err != nil { b.Error(err) return } var l query.Latency _, err = g.ToJson(&l) if err != nil { b.Error(err) return } } }
func BenchmarkRequest(b *testing.B) { m := NewModel(1e6) m.AddRepo("default", "testdata", nil) m.ScanRepo("default") const n = 1000 files := make([]protocol.FileInfo, n) t := time.Now().Unix() for i := 0; i < n; i++ { files[i] = protocol.FileInfo{ Name: fmt.Sprintf("file%d", i), Modified: t, Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}}, } } fc := FakeConnection{ id: "42", requestData: []byte("some data to return"), } m.AddConnection(fc, fc) m.Index("42", "default", files) b.ResetTimer() for i := 0; i < b.N; i++ { data, err := m.requestGlobal("42", "default", files[i%n].Name, 0, 32, nil) if err != nil { b.Error(err) } if data == nil { b.Error("nil data") } } }
// Benchmark_WithValidateBaseUrl-4 3000 489089 ns/op 188333 B/op 272 allocs/op => with debug enabled // Benchmark_WithValidateBaseUrl-4 200000 8925 ns/op 2924 B/op 49 allocs/op => no debug func Benchmark_WithValidateBaseUrl(b *testing.B) { // todo: there is room for optimization with disabled debugging. too many allocs store.PkgLog.SetLevel(log.StdLevelInfo) req, err := http.NewRequest(httputils.MethodGet, "https://corestore.io/customer/comments/view?id=1916#tab=ratings", nil) if err != nil { b.Fatal(err) } finalHandler := store.WithValidateBaseURL(middlewareConfigReader)(ctxhttp.HandlerFunc(func(ctx context.Context, w http.ResponseWriter, r *http.Request) error { return errors.New("This handler should not be called!") })) want := "https://www.corestore.io/customer/comments/view?id=1916#tab=ratings" rec := httptest.NewRecorder() b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { if err := finalHandler.ServeHTTPContext(middlewareCtxStoreService, rec, req); err != nil { b.Error(err) } if rec.HeaderMap.Get("Location") != want { b.Errorf("Have: %s\nWant: %s", rec.HeaderMap.Get("Location"), want) } rec.HeaderMap = nil } }
func BenchmarkClientFutureParallel(b *testing.B) { var err error conn, err := Connect(server, opts) if err != nil { b.Errorf("No connection available") } _, err = conn.Replace(spaceNo, []interface{}{uint(1), "hello", "world"}) if err != nil { b.Errorf("No connection available") } b.RunParallel(func(pb *testing.PB) { exit := false for !exit { var fs [N]*Future var j int for j = 0; j < N && pb.Next(); j++ { fs[j] = conn.SelectAsync(spaceNo, indexNo, 0, 1, IterAll, []interface{}{uint(1)}) } exit = j < N for j > 0 { j-- _, err = fs[j].Get() if err != nil { b.Error(err) } } } }) }
func BenchmarkRemoteCollector1000(b *testing.B) { const ( nCollections = 1000 nAnnotations = 100 ) l, err := net.Listen("tcp", ":0") if err != nil { b.Fatal(err) } cs := NewServer(l, collectorFunc(func(span SpanID, anns ...Annotation) error { return nil })) go cs.Start() c := NewRemoteCollector(l.Addr().String()) // cc.Collector.(*RemoteCollector).Debug = true for n := 0; n < b.N; n++ { for nc := 0; nc < nCollections; nc++ { id := NewRootSpanID() anns := make([]Annotation, nAnnotations) for a := range anns { anns[a] = Annotation{"k1", []byte("v1")} } if err := c.Collect(NewRootSpanID(), anns...); err != nil { b.Fatalf("Collect(%+v, %v): %s", id, anns, err) } } } if err := c.Close(); err != nil { b.Error(err) } }
func BenchmarkClientFutureTyped(b *testing.B) { var err error conn, err := Connect(server, opts) if err != nil { b.Errorf("No connection available") } _, err = conn.Replace(spaceNo, []interface{}{uint(1), "hello", "world"}) if err != nil { b.Errorf("No connection available") } for i := 0; i < b.N; i += N { var fs [N]*Future for j := 0; j < N; j++ { fs[j] = conn.SelectAsync(spaceNo, indexNo, 0, 1, IterAll, []interface{}{uint(1)}) } for j := 0; j < N; j++ { var r []tuple err = fs[j].GetTyped(&r) if err != nil { b.Error(err) } if len(r) != 1 || r[0].Id != 12 { b.Errorf("Doesn't match %v", r) } } } }
func BenchmarkTimedAcquire(b *testing.B) { const P = 10 const UnitSize = 1000 N := int64(b.N / UnitSize / 4) s := newTestSemaphore(P) done := make(chan struct{}, P) for i := 0; i < P; i++ { go func() { for atomic.AddInt64(&N, int64(-1)) > 0 { for j := 0; j < UnitSize; j++ { if s.TimedAcquire(1, time.Duration(100)*time.Millisecond) { s.Release(1) } if s.TimedAcquire(2, time.Duration(100)*time.Millisecond) { s.Release(2) } } } done <- struct{}{} }() } for i := 0; i < P; i++ { <-done } if err := s.Error(); err != "" { b.Error(err) } }
func BenchmarkRpc(b *testing.B) { var d Daemon if err := d.init(); err != nil { b.Fatal(err) } go d.serverloop() defer d.close() if c, err := jsonrpc.Dial("tcp", fmt.Sprintf("127.0.0.1%s", port)); err != nil { b.Error(err) } else { defer c.Close() s := time.Now() for i := 0; i < b.N; i++ { var a content.CompleteAtArgs a.Location.Line = 95 a.Location.Column = 45 a.Location.File.Name = "../net/testdata/CompleteSharp.cs" var cmp1 content.CompletionResult if err := c.Call("Net.CompleteAt", &a, &cmp1); err != nil { b.Error(err) } } e := time.Since(s).Seconds() * 1000 b.Logf("%d calls in %f ms = %f ms/call", b.N, e, e/float64(b.N)) } }
func BenchmarkParsePrefix(b *testing.B) { for n := 0; n < b.N; n++ { if p := ParsePrefix(prefix); p == nil { b.Error("parsed prefix should not be nil") } } }
// Benchmark_WithInitStoreByFormCookie-4 3000 481881 ns/op 189103 B/op 232 allocs/op => with debug enabled // Benchmark_WithInitStoreByFormCookie-4 300000 4797 ns/op 1016 B/op 16 allocs/op => debug disabled func Benchmark_WithInitStoreByFormCookie(b *testing.B) { store.PkgLog.SetLevel(log.StdLevelInfo) b.ReportAllocs() wantStoreCode := "nz" ctx := store.NewContextReader(context.Background(), getInitializedStoreService(scope.Option{Website: scope.MockID(2)})) mw := store.WithInitStoreByFormCookie()(benchValidationHandler(b, wantStoreCode)) rec := httptest.NewRecorder() req, err := http.NewRequest(httputils.MethodGet, "https://corestore.io/store/list/", nil) if err != nil { b.Fatal(err) } req.AddCookie(&http.Cookie{ Name: store.ParamName, Value: wantStoreCode, }) b.ResetTimer() for i := 0; i < b.N; i++ { if err := mw.ServeHTTPContext(ctx, rec, req); err != nil { b.Error(err) } } }
func BenchmarkDomainSaveRestore(b *testing.B) { env := newTestEnvironment(b).withDomain() defer env.cleanUp() if err := env.dom.Create(DomCreateDefault); err != nil { b.Fatal(err) } file, ioerr := ioutil.TempFile("", fmt.Sprintf("%v-bench-save-restore_", env.domData.Name)) if ioerr != nil { b.Fatal(ioerr) } defer os.Remove(file.Name()) b.ResetTimer() for n := 0; n < b.N; n++ { if err := env.dom.Save(file.Name(), "", DomSaveDefault); err != nil { b.Error(err) } if err := env.conn.RestoreDomain(file.Name(), "", DomSaveDefault); err != nil { b.Error(err) } } b.StopTimer() }
func benchmarkAllocatorRndGetRollbackFiler(b *testing.B, sz int) { os.Remove(testDbName) f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) if err != nil { b.Fatal(err) } defer func() { f.Close() os.Remove(testDbName) }() g := NewSimpleFileFiler(f) var filer *RollbackFiler if filer, err = NewRollbackFiler( g, func(sz int64) error { if err = g.Truncate(sz); err != nil { return err } return g.Sync() }, g, ); err != nil { b.Error(err) return } benchmarkAllocatorRndGet(b, filer, sz) }
func BenchmarkRoute2DB1(b *testing.B) { b.StopTimer() var sp Sparrow sp.Initialize(nil) b.StartTimer() for i := 0; i < b.N; i++ { rc, err := sp.Route2DB("db", "table", "user_id", "id0041", false, false) if err != nil { b.Error(err) } if rc == nil || rc.DBNode == nil || len(rc.DBName) <= 0 || len(rc.TableName) <= 0 { b.Errorf("dbatom:%v, dbname: %v, tablename:%v\n", rc.DBNode, rc.DBName, rc.TableName) } if rc.DBNode.IsMaster { b.Errorf("dbname: %s, tablename: %d, should not be master.", rc.DBNode.Ip, rc.DBNode.Port) } if !rc.DBNode.DBEnable { b.Errorf("dbname: %s, tablename: %d, disable.", rc.DBNode.Ip, rc.DBNode.Port) } } }
func benchmarkAllocatorRndGetACIDFiler(b *testing.B, sz int) { os.Remove(testDbName) os.Remove(walName) f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) if err != nil { b.Fatal(err) } defer func() { f.Close() os.Remove(testDbName) }() wal, err := os.OpenFile(walName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) if err != nil { b.Fatal(err) } defer func() { wal.Close() os.Remove(walName) }() filer, err := NewACIDFiler(NewSimpleFileFiler(f), wal) if err != nil { b.Error(err) return } benchmarkAllocatorRndGet(b, filer, sz) }
func BenchmarkConfigBasic(b *testing.B) { pc := NewConfig() cc := NewConfig() if err := pc.Reload(cc); err != nil { b.Error("reload failed.") } }
func validateBatchBench(b *testing.B, db *DB) { var rollback = errors.New("sentinel error to cause rollback") validate := func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte("bench")) h := fnv.New32a() buf := make([]byte, 4) for id := uint32(0); id < 1000; id++ { binary.LittleEndian.PutUint32(buf, id) h.Reset() _, _ = h.Write(buf[:]) k := h.Sum(nil) v := bucket.Get(k) if v == nil { b.Errorf("not found id=%d key=%x", id, k) continue } if g, e := v, []byte("filler"); !bytes.Equal(g, e) { b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) } if err := bucket.Delete(k); err != nil { return err } } // should be empty now c := bucket.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { b.Errorf("unexpected key: %x = %q", k, v) } return rollback } if err := db.Update(validate); err != nil && err != rollback { b.Error(err) } }
func benchmarkBTreeSetRollbackFiler(b *testing.B, sz int) { dir, testDbName := temp() defer os.RemoveAll(dir) f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) if err != nil { b.Fatal(err) } defer f.Close() g := NewSimpleFileFiler(f) var filer *RollbackFiler if filer, err = NewRollbackFiler( g, func(sz int64) error { if err = g.Truncate(sz); err != nil { return err } return g.Sync() }, g, ); err != nil { b.Error(err) return } benchmarkBTreeSetFiler(b, filer, sz) }
func BenchmarkCallAsync(b *testing.B) { b.StopTimer() bus, err := SessionBus() if err != nil { b.Fatal(err) } name := bus.Names()[0] obj := bus.BusObject() c := make(chan *Call, 50) done := make(chan struct{}) go func() { for i := 0; i < b.N; i++ { v := <-c if v.Err != nil { b.Error(v.Err) } s := v.Body[0].(string) if s != name { b.Errorf("got %s, wanted %s", s, name) } } close(done) }() b.StartTimer() for i := 0; i < b.N; i++ { obj.Go("org.freedesktop.DBus.GetNameOwner", 0, c, name) } <-done }
func BenchmarkRequest(b *testing.B) { db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel("/tmp", nil, "syncthing", "dev", db) m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"}) m.ScanRepo("default") const n = 1000 files := make([]protocol.FileInfo, n) t := time.Now().Unix() for i := 0; i < n; i++ { files[i] = protocol.FileInfo{ Name: fmt.Sprintf("file%d", i), Modified: t, Blocks: []protocol.BlockInfo{{0, 100, []byte("some hash bytes")}}, } } fc := FakeConnection{ id: node1, requestData: []byte("some data to return"), } m.AddConnection(fc, fc) m.Index(node1, "default", files) b.ResetTimer() for i := 0; i < b.N; i++ { data, err := m.requestGlobal(node1, "default", files[i%n].Name, 0, 32, nil) if err != nil { b.Error(err) } if data == nil { b.Error("nil data") } } }
func BenchmarkManyConcurrentQueries(b *testing.B) { b.ReportAllocs() // To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required. const parallelism = 16 db := newTestDB(b, "magicquery") defer closeDB(b, db) db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism) stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?") if err != nil { b.Fatal(err) } defer stmt.Close() b.SetParallelism(parallelism) b.RunParallel(func(pb *testing.PB) { for pb.Next() { rows, err := stmt.Query("sleep", 1) if err != nil { b.Error(err) return } rows.Close() } }) }
func BenchmarkConnRoutingKey(b *testing.B) { const workers = 16 cluster := createCluster() cluster.NumConns = 1 cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy()) session := createSessionFromCluster(cluster, b) defer session.Close() if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil { b.Fatal(err) } var seed uint64 writer := func(pb *testing.PB) { seed := atomic.AddUint64(&seed, 1) var i uint64 = 0 query := session.Query("insert into routing_key_stress (id) values (?)") for pb.Next() { if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil { b.Error(err) return } i++ } } b.SetParallelism(workers) b.RunParallel(writer) }
func BenchmarkMatch(b *testing.B) { stignore := ` .frog .frog* .frogfox .whale .whale/* .dolphin .dolphin/* ~ferret~.* .ferret.* flamingo.* flamingo *.crow *.crow ` pats := New(false) err := pats.Parse(bytes.NewBufferString(stignore), ".stignore") if err != nil { b.Error(err) } b.ResetTimer() for i := 0; i < b.N; i++ { result = pats.Match("filename") } }
func benchPutItemParallel(p, c int, b *testing.B) { svc := dynamodb.New(&aws.Config{ DisableSSL: aws.Bool(true), }) av, err := dynamodbattribute.ConvertToMap(dbItem{Key: "MyKey", Data: "MyData"}) if err != nil { b.Fatal("expect no ConvertToMap errors", err) } params := &dynamodb.PutItemInput{ Item: av, TableName: aws.String(testTableName), } b.N = c b.ResetTimer() b.SetParallelism(p) b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, err = svc.PutItem(params) if err != nil { b.Error("expect no request errors", err) } } }) }
func BenchmarkGobDecoder(b *testing.B) { b.ReportAllocs() dataBuff := &bytes.Buffer{} err := gob.NewEncoder(dataBuff).Encode(&TestStruct) if err != nil { b.Error(err) } data := dataBuff.Bytes() buff := bytes.NewBuffer(data) b.ResetTimer() for i := 0; i < b.N; i++ { result := CommonStruct{} err := gob.NewDecoder(bufio.NewReader(buff)).Decode(&result) if err != nil { b.Error(err) } buff.Write(data) } }