// BenchmarkDAWG ... func BenchmarkDAWG(b *testing.B) { b.N = 0 initBenchCtx() dict := loadDict(benchCtx.Words100k) DAWG := loadDAWG() bytesDAWG := loadBytesDAWG() recordDAWG := loadRecordDAWG() intDAWG := loadIntDAWG() tests := []testOp{ {"get() (hits)", "M ops/sec", 10, 3, 5, getHitBench}, {"get() (misses)", "M ops/sec", 100, 5, 5, getMissesBench}, {"__contains__ (hits)", "M ops/sec", 10, 3, 5, containsHitBench}, {"__contains__ (misses)", "M ops/sec", 10, 3, 5, containsMissesBench}, {"items()", " ops/sec", 1, 1, 5, itemsBench}, {"keys()", " ops/sec", 1, 1, 5, keysBench}, } fmt.Printf("\n====== Benchmarks (100k unique unicode words) =======\n\n") for _, test := range tests { bench(dict, " dict", test) bench(DAWG, " DAWG", test) bench(bytesDAWG, " BytesDAWG", test) bench(recordDAWG, "RecordDAWG", test) bench(intDAWG, " IntDAWG", test) fmt.Println() } b.SkipNow() }
func BenchmarkJournalQueuePop(b *testing.B) { b.N = 1000000 var err error var p []byte var fq Queue if fq, err = NewJournalQueue(fpath); err != nil { panic(err) } defer fq.Close() defer os.RemoveAll(fpath) b.ResetTimer() b.StartTimer() var total int64 = 0 for i := 0; i < b.N; i++ { if p, err = fq.Pop(); err != nil { b.FailNow() return } total += int64(len(p)) c := int(binary.LittleEndian.Uint32(p)) l := int(binary.LittleEndian.Uint32(p[4:])) if c != i || l != len(p) { b.FailNow() } } b.SetBytes(total / int64(b.N)) b.StopTimer() }
func BenchmarkLinearSearch(b *testing.B) { b.N = 10000 var haystack []int b.StopTimer() for i := 0; i < b.N; i++ { haystack = append(haystack, i) } needles := rand.Perm(b.N) b.StartTimer() for n := range needles { if v := LinearSearch(n, haystack); v == -1 { b.Fatalf("%d not found", n) } } }
func BenchmarkPush(b *testing.B) { stack := NewStack() b.N = 102400 for i := 0; i < b.N; i++ { stack.Push(i) } }
func Benchmark_StringValue(b *testing.B) { b.N = 1e6 str := strings.Repeat("a", 1000) for i := 0; i < b.N; i++ { __value = NewStringValue(str) } }
func benchPutItemParallel(p, c int, b *testing.B) { svc := dynamodb.New(&aws.Config{ DisableSSL: aws.Bool(true), }) av, err := dynamodbattribute.ConvertToMap(dbItem{Key: "MyKey", Data: "MyData"}) if err != nil { b.Fatal("expect no ConvertToMap errors", err) } params := &dynamodb.PutItemInput{ Item: av, TableName: aws.String(testTableName), } b.N = c b.ResetTimer() b.SetParallelism(p) b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, err = svc.PutItem(params) if err != nil { b.Error("expect no request errors", err) } } }) }
func Benchmark_BytesValue(b *testing.B) { b.N = 1e6 barr := bytes.Repeat([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, 1000) for i := 0; i < b.N; i++ { __value = NewBytesValue(barr) } }
func BenchmarkGoRadixLookup(b *testing.B) { var urls []string tr := radix.New() f, err := os.Open("testdata/url.txt") if err != nil { b.Fatal(err) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { s := scanner.Text() tr.Insert(s, len(urls)) urls = append(urls, s) } if err := scanner.Err(); err != nil { b.Fatal(err) } b.N = len(urls) b.ResetTimer() for i := 0; i < b.N; i++ { if v, _ := tr.Get(urls[i]); v != i { b.Errorf("expect %d, got %d\n", i, v) } } }
func Benchmark_LongValue(b *testing.B) { b.N = 1e6 in := int64(10916927583729485) for i := 0; i < b.N; i++ { __value = NewLongValue(in) } }
func BenchmarkMapLookup(b *testing.B) { var urls []string m := map[string]int{} f, err := os.Open("testdata/url.txt") if err != nil { b.Fatal(err) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { s := scanner.Text() m[s] = len(urls) urls = append(urls, s) } if err := scanner.Err(); err != nil { b.Fatal(err) } b.N = len(urls) b.ResetTimer() for i := 0; i < b.N; i++ { if v, ok := m[urls[i]]; !ok || v != i { b.Errorf("expect %d, got %d\n", i, v) } } }
func Benchmark_IntegerValue(b *testing.B) { b.N = 1e6 in := 1091 for i := 0; i < b.N; i++ { __value = NewIntegerValue(in) } }
func BenchmarkSafeSetContainsConcurrent(b *testing.B) { b.StopTimer() b.N = 1000000 ss := NewSafeSet() for i := 0; i < b.N; i++ { ss.Add(fmt.Sprintf("%d", i)) } chars := []string{} for i := 0; i < b.N; i++ { chars = append(chars, fmt.Sprintf("%d", i)) } wg := sync.WaitGroup{} workers := runtime.NumCPU() each := b.N / workers wg.Add(workers) b.StartTimer() for i := 0; i < workers; i++ { go func(base int) { for i := 0; i < each; i++ { ss.Contains(chars[i]) } wg.Done() }(i * each) } wg.Wait() }
func bench(b *testing.B, grammar *ast.Grammar, parsers []testsuite.ResetParser, record bool) { num := len(parsers) if *bN != 0 { b.N = *bN } var m *mem.Mem var err error if record { m, err = mem.NewRecord(grammar) } else { m, err = mem.New(grammar) } if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { if err := parsers[i%num].Reset(); err != nil { b.Fatal(err) } if _, err := m.Validate(parsers[i%num]); err != nil { b.Fatal(err) } } }
func BenchmarkRawSql(b *testing.B) { DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable") DB.SetMaxIdleConns(10) insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id" querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1" updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3" deleteSql := "DELETE FROM orders WHERE id = $1" b.N = 2000 for x := 0; x < b.N; x++ { var id int64 e := strconv.Itoa(x) + "*****@*****.**" now := time.Now() email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: &now} // Insert DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id) // Query rows, _ := DB.Query(querySql, email.Email) rows.Close() // Update DB.Exec(updateSql, "new-"+e, time.Now(), id) // Delete DB.Exec(deleteSql, id) } }
func BenchmarkAddSettingNLimit(bt *testing.B) { bt.N = 1000 for i := 0; i < bt.N; i++ { add(a, b) } }
func BenchmarkAdd(b *testing.B) { b.StopTimer() if err := exec.Command("rm", "-rf", "/tmp/rrd").Run(); err != nil { b.Fatal(err) } if err := os.Mkdir("/tmp/rrd", 0755); err != nil { b.Fatal(err) } for i := 0; i < 256; i++ { if err := os.Mkdir(fmt.Sprintf("/tmp/rrd/%d", i), 0755); err != nil { b.Fatal(err) } } b.StartTimer() b.N = b_size n := b.N / work_size for j := 0; j < work_size; j++ { wg.Add(1) go func(j int) { defer wg.Done() for i := 0; i < n; i++ { filename := fmt.Sprintf("/tmp/rrd/%d/%d-%d.rrd", i%256, j, i) add(b, filename) } }(j) } wg.Wait() }
func BenchmarkCMSReadDataFrom(b *testing.B) { b.StopTimer() b.N = 10000 freq := 73 epsilon, delta := 0.001, 0.99 cms := NewCountMinSketch(epsilon, delta) a := []byte(`a`) for i := 0; i < freq; i++ { cms.Add(a) } var buf bytes.Buffer _, err := cms.WriteDataTo(&buf) if err != nil { b.Errorf("unexpected error %s\n", err) } data := make([]byte, 0) for i := 0; i < b.N; i++ { data = append(data, buf.Bytes()...) } rd := bytes.NewReader(data) newCMS := NewCountMinSketch(epsilon, delta) b.StartTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { _, err := newCMS.ReadDataFrom(rd) if err != nil { b.Errorf("unexpected error %s\n", err) } } }
func Benchmark_Pack_String____100(b *testing.B) { val := strings.Repeat("s", 100) b.N = 1000 runtime.GC() b.ResetTimer() doPack(val, b) }
func BenchmarkUpdate(b *testing.B) { b.N = b_size for i := 0; i < b.N; i++ { update(b, fmt.Sprintf("/tmp/rrd/%d/%d.rrd", i&0xff, i)) } }
func Benchmark_Pack_________Int32(b *testing.B) { val := rand.Int31() b.N = 1000 runtime.GC() b.ResetTimer() doPack(val, b) }
func Benchmark_Pack_Complex_Array_ListIter(b *testing.B) { val := myList([]string{strings.Repeat("s", 1), strings.Repeat("s", 2), strings.Repeat("s", 3), strings.Repeat("s", 4), strings.Repeat("s", 5), strings.Repeat("s", 6), strings.Repeat("s", 7), strings.Repeat("s", 8), strings.Repeat("s", 9), strings.Repeat("s", 10)}) b.N = 1000 runtime.GC() b.ResetTimer() doPack(val, b) }
func Benchmark_Pack_Complex_ValueArray(b *testing.B) { val := []Value{NewValue(1), NewValue(strings.Repeat("s", 100000)), NewValue(1.75), NewValue(nil)} b.N = 1000 runtime.GC() b.ResetTimer() doPack(val, b) }
func Benchmark_Pack_Complex_Array_Direct(b *testing.B) { val := []interface{}{1, 1, 1, "a simple string", nil, rand.Int63(), []byte{12, 198, 211}} b.N = 1000 runtime.GC() b.ResetTimer() doPack(val, b) }
func BenchmarkFilterConcurrent(b *testing.B) { b.StopTimer() b.N = 5000000 f := NewDataFilter("name", 3) f.SetFilter("1000", "gt", 2000.0) strmap := make([]string, 0) for i := 0; i < 10000; i++ { strmap = append(strmap, strconv.Itoa(i)) } wg := sync.WaitGroup{} workers := runtime.NumCPU() each := b.N / workers wg.Add(workers) b.StartTimer() for i := 0; i < workers; i++ { go func() { for i := 0; i < each; i++ { f.Filter(strmap[i%10000], float64(i), i) } wg.Done() }() } wg.Wait() if len(f.GetAllFiltered()) != 3 { b.Error("error, Filter") } }
func Benchmark_WriteCommand_String___1000(b *testing.B) { set := "put_bench_str_1000" value := strings.Repeat("s", 1000) b.N = 1000 runtime.GC() b.ResetTimer() doPut(set, value, b) }
func BenchmarkStandardFileMillion(b *testing.B) { f, _ := os.OpenFile("testdata/logfilemillion.log", os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666) log := logg.New(f, "", logg.LstdFlags) b.N = 1000000 for i := 0; i < b.N; i++ { log.Print("testing this is a testing about benchmark") } }
func Benchmark_WriteCommand_Complex_Array(b *testing.B) { set := "put_bench_str_10000" value := []interface{}{1, 1, 1, "a simple string", nil, rand.Int63(), []byte{12, 198, 211}} b.N = 1000 runtime.GC() b.ResetTimer() doPut(set, value, b) }
func Benchmark_PackFloat64(b *testing.B) { b.N = 1e6 testPacker := newPacker() for i := 0; i < b.N; i++ { testPacker.buffer.Reset() testPacker.PackFloat64(float64(i)) } }
func Benchmark_WriteCommand_________Int32(b *testing.B) { set := "put_bench_integer" value := rand.Int31() b.N = 1000 runtime.GC() b.ResetTimer() doPut(set, value, b) }
func Benchmark_PackInt16(b *testing.B) { b.N = 1e6 testPacker := newPacker() for i := 0; i < b.N; i++ { testPacker.buffer.Reset() testPacker.PackShort(0, int16(i)) } }