func BenchmarkSubnetLookup(b *testing.B) { var subnetLookup SubnetLookup b.Run("load routes file", func(b *testing.B) { routesData, err := ioutil.ReadFile("test_routes.dat") if err != nil { b.Skipf("can't load test routes file: %s", err) } for n := 0; n < b.N; n++ { subnetLookup, err = NewSubnetLookupFromRoutes(routesData) if err != nil { b.Fatalf("NewSubnetLookup failed: %s", err) } } }) if subnetLookup == nil { b.Skipf("no test routes file") } b.Run("lookup random IP address", func(b *testing.B) { for n := 0; n < b.N; n++ { ip := make([]byte, 4) binary.BigEndian.PutUint32(ip, rand.Uint32()) _ = subnetLookup.ContainsIPAddress(net.IP(ip)) } }) }
func benchmarkRez(b *testing.B, filter rez.Filter) { defer func() { err := recover() if err != nil { b.Skipf("skip: %s", err) } }() benchmark(b, newResizeFuncRez(filter)) }
func Benchmark_NewNetworkList(b *testing.B) { routesData, err := ioutil.ReadFile("test_routes.dat") if err != nil { b.Skipf("can't load test routes file: %s", err) } for n := 0; n < b.N; n++ { list, _ = NewNetworkList(routesData) } }
func BenchmarkExport2(b *testing.B) { b.StopTimer() r, err := http.Get("http://www.imdb.com/title/tt0118715/") if err != nil { b.Skipf("skipping, net/http err: %s", err.Error()) } b.StartTimer() for i := 0; i < b.N; i++ { Extract(r.Body) } }
func BenchmarkAppenderPostgresql(b *testing.B) { b.Skipf("Postgresql benchmark!") storage, err := NewStorage(PostgreSQL, "postgres://*****:*****@server/linear?sslmode=disable", "linearevents") if err != nil { b.Fatalf("Fatal error %s", err.Error()) } runDatabaseBenchmark(b, storage) }
func BenchmarkAppenderMsSql(b *testing.B) { b.Skipf("SQL Server benchmark!") db, err := NewStorage(MSSQL, "Server=xxx;Database=sss;User Id=xx;Password=xxx;", "Event") if err != nil { b.Fatalf("Fatal error %s", err.Error()) } runDatabaseBenchmark(b, db) }
func Benchmark_containsRandomAddr(b *testing.B) { if list == nil { b.Skipf("no test routes file") } rand.Seed(0) for n := 0; n < b.N; n++ { ip := make([]byte, 4) binary.BigEndian.PutUint32(ip, rand.Uint32()) isLocalAddr = list.ContainsIpAddress(net.IP(ip)) } }
func BenchmarkCounter(b *testing.B) { once.Do(func() { var buf []byte buf, err = ioutil.ReadFile("/usr/share/dict/connectives") if err != nil { return } for _, b := range bytes.Fields(buf) { words = append(words, string(b)) } }) if err != nil { b.Skipf("could not open dictionary: %v", err) } c := NewCounter() r := rand.New(rand.NewSource(1234)) z := rand.NewZipf(r, 2, 1, uint64(len(words)-1)) var seq [1024]int for i := 0; i < len(seq); i++ { // seq[i] = rand.Intn(len(words)) seq[i] = int(z.Uint64()) } _ = z b.ResetTimer() // for i := 0; i < b.N; i++ { // c.Add(words[b.N%len(words)]) // c.Add(words[rand.Intn(len(words))]) // } b.RunParallel(func(pb *testing.PB) { var i int for pb.Next() { // c.Add(words[rand.Intn(len(words))]) // c.Add(words[int(z.Uint64())]) c.Add(words[seq[i]]) i = (i + 1) % 1024 } }) b.StopTimer() if got := c.Sum(); got != b.N { b.Errorf("Sum=%d want %d", got, b.N) } }
func BenchmarkS3Get(b *testing.B) { if testutil.SkipS3 { b.SkipNow() } if !benchPutOk { b.Skipf("did not run BenchmarkS3Put") } destDir := filepath.Join(testutil.TempDir, "dest-s3-benchmark") for i := 0; i < b.N; i++ { n, err := Get(benchBucket, destDir) if err != nil { b.Fatalf("Get failed: %s", err) } b.SetBytes(n) } }
func benchmarkPostgres(b *testing.B, f func(b *testing.B, db *gosql.DB)) { // Note: the following uses SSL. To run this, make sure your local // Postgres server has SSL enabled. To use Cockroach's checked-in // testing certificates for Postgres' SSL, first determine the // location of your Postgres server's configuration file: // ``` // $ psql -h localhost -p 5432 -c 'SHOW config_file' // config_file // ----------------------------------------- // /usr/local/var/postgres/postgresql.conf // (1 row) //``` // // Now open this file and set the following values: // ``` // $ cat /usr/local/var/postgres/postgresql.conf | grep ssl // ssl = on # (change requires restart) // ssl_cert_file = '$GOATH/src/github.com/cockroachdb/cockroach/resource/test_certs/node.server.crt' # (change requires restart) // ssl_key_file = '$GOATH/src/github.com/cockroachdb/cockroach/resource/test_certs/node.server.key' # (change requires restart) // ssl_ca_file = '$GOATH/src/github.com/cockroachdb/cockroach/resource/test_certs/ca.crt' # (change requires restart) // ``` // Where `$GOATH/src/github.com/cockroachdb/cockroach` // is replaced with your local Cockroach source directory. // Be sure to restart Postgres for this to take effect. const addr = "localhost:5432" if conn, err := net.Dial("tcp", addr); err != nil { b.Skipf("unable to connect to postgres server on %s: %s", addr, err) } else { conn.Close() } db, err := gosql.Open("postgres", "sslmode=require host=localhost port=5432") if err != nil { b.Fatal(err) } defer db.Close() if _, err := db.Exec(`CREATE SCHEMA IF NOT EXISTS bench`); err != nil { b.Fatal(err) } f(b, db) }
func benchmarkMySQL(b *testing.B, f func(b *testing.B, db *gosql.DB)) { const addr = "localhost:3306" if conn, err := net.Dial("tcp", addr); err != nil { b.Skipf("unable to connect to mysql server on %s: %s", addr, err) } else { conn.Close() } db, err := gosql.Open("mysql", "root@tcp(localhost:3306)/") if err != nil { b.Fatal(err) } defer db.Close() if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil { b.Fatal(err) } f(b, db) }
func benchTemplate(b *testing.B, testFunc func(b *testing.B, file io.Reader)) { matches, err := filepath.Glob("./fixtures/bench[0-9]*.tar") if err != nil { b.Skipf("Error while getting benchmark fixtures:\n%s", err) } if len(matches) == 0 { b.Skip("No benchmark fixtures found!\nPut them in './fixtures/' with names like bench1.tar bench2.tar ...") } for _, match := range matches { b.StopTimer() b.ResetTimer() for i := 0; i < b.N; i++ { file, _ := os.Open(match) b.StartTimer() testFunc(b, file) b.StopTimer() file.Close() } b.StopTimer() } }
func downloadTestdata(b *testing.B, basename string) (errRet error) { filename := filepath.Join(*testdata, basename) if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { return nil } if !*download { b.Skipf("test data not found; skipping benchmark without the -download flag") } // Download the official snappy C++ implementation reference test data // files for benchmarking. if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) { return fmt.Errorf("failed to create testdata: %s", err) } f, err := os.Create(filename) if err != nil { return fmt.Errorf("failed to create %s: %s", filename, err) } defer f.Close() defer func() { if errRet != nil { os.Remove(filename) } }() url := baseURL + basename resp, err := http.Get(url) if err != nil { return fmt.Errorf("failed to download %s: %s", url, err) } defer resp.Body.Close() if s := resp.StatusCode; s != http.StatusOK { return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) } _, err = io.Copy(f, resp.Body) if err != nil { return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) } return nil }
func BenchmarkPipelineWalker(b *testing.B) { if *testWalkerPath == "" { b.Skipf("walkerpath not set, skipping BenchPipelineWalker") } var max time.Duration m := sync.Mutex{} fileWorker := func(wg *sync.WaitGroup, done <-chan struct{}, ch <-chan pipe.Entry) { defer wg.Done() for { select { case e, ok := <-ch: if !ok { // channel is closed return } // simulate backup //time.Sleep(10 * time.Millisecond) e.Result() <- true case <-done: // pipeline was cancelled return } } } dirWorker := func(wg *sync.WaitGroup, done <-chan struct{}, ch <-chan pipe.Dir) { defer wg.Done() for { select { case dir, ok := <-ch: if !ok { // channel is closed return } start := time.Now() // wait for all content for _, ch := range dir.Entries { <-ch } d := time.Since(start) m.Lock() if d > max { max = d } m.Unlock() dir.Result() <- true case <-done: // pipeline was cancelled return } } } for i := 0; i < b.N; i++ { max = 0 done := make(chan struct{}) entCh := make(chan pipe.Entry, 200) dirCh := make(chan pipe.Dir, 200) var wg sync.WaitGroup b.Logf("starting %d workers", *maxWorkers) for i := 0; i < *maxWorkers; i++ { wg.Add(2) go dirWorker(&wg, done, dirCh) go fileWorker(&wg, done, entCh) } jobs := make(chan pipe.Job, 200) wg.Add(1) go func() { pipe.Split(jobs, dirCh, entCh) close(entCh) close(dirCh) wg.Done() }() resCh := make(chan pipe.Result, 1) err := pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) OK(b, err) // wait for all workers to terminate wg.Wait() // wait for final result <-resCh b.Logf("max duration for a dir: %v", max) } }