Exemple #1
0
func BenchmarkVersion(b *testing.B) {
	ufs := new(ufs.Ufs)
	ufs.Dotu = false
	ufs.Id = "ufs"
	ufs.Debuglevel = *debug
	ufs.Msize = 8192
	ufs.Start(ufs)

	l, err := net.Listen("unix", "")
	if err != nil {
		b.Fatalf("Can not start listener: %v", err)
	}
	srvAddr := l.Addr().String()
	b.Logf("Server is at %v", srvAddr)
	go func() {
		if err = ufs.StartListener(l); err != nil {
			b.Fatalf("Can not start listener: %v", err)
		}
		b.Fatalf("Listener returned")
	}()
	var conn net.Conn
	for i := 0; i < b.N; i++ {
		if conn, err = net.Dial("unix", srvAddr); err != nil {
			// Sometimes, things just happen.
			//b.Logf("%v", err)
		} else {
			conn.Close()
		}
	}

}
func BenchmarkTop10of100000Scores(b *testing.B) {

	matches := make(search.DocumentMatchCollection, 0, 100000)
	for i := 0; i < 100000; i++ {
		matches = append(matches, &search.DocumentMatch{
			ID:    strconv.Itoa(i),
			Score: rand.Float64(),
		})
	}
	searcher := &stubSearcher{
		matches: matches,
	}

	collector := NewTopScorerCollector(10)
	b.ResetTimer()

	err := collector.Collect(searcher)
	if err != nil {
		b.Fatal(err)
	}
	res := collector.Results()
	for _, dm := range res {
		b.Logf("%s - %f\n", dm.ID, dm.Score)
	}
}
func BenchmarkGraphiteMemory(b *testing.B) {
	cfg := config.ConfigFile{}
	cfg.Graphite = config.GraphiteConfig{
		UDPListenPort: ":2000",
	}

	errorChannel := make(chan error, 1)

	go func() {
		for e := range errorChannel {
			b.Logf("%s", e)
		}
	}()

	p := ":2000"
	l := "/tmp/agent.db"
	ttl := "1h"
	aggregations.Init(&p, &l, &ttl, errorChannel)

	Init(&cfg, errorChannel)

	for n := 0; n < b.N; n++ {
		testGraphiteMemoryRun(b, 1)
	}

}
Exemple #4
0
func BenchmarkAPIHandler(b *testing.B) {
	app := testMemoryAppWithClients(1, 100)
	nCommands := 1000
	b.Logf("num channels: %v, num clients: %v, num unique clients %v, num commands: %v", app.clients.nChannels(), app.clients.nClients(), app.clients.nUniqueClients(), nCommands)
	commands := make([]map[string]interface{}, nCommands)
	command := map[string]interface{}{
		"method": "publish",
		"params": map[string]interface{}{
			"channel": "channel-0",
			"data":    map[string]bool{"benchmarking": true},
		},
	}
	for i := 0; i < nCommands; i++ {
		commands[i] = command
	}
	jsonData, _ := json.Marshal(commands)
	sign := auth.GenerateApiSign("secret", "test1", jsonData)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		rec := httptest.NewRecorder()
		req, _ := http.NewRequest("POST", "/api/test1", bytes.NewBuffer(jsonData))
		req.Header.Add("X-API-Sign", sign)
		req.Header.Add("Content-Type", "application/json")
		app.APIHandler(rec, req)
	}
	b.StopTimer()
}
Exemple #5
0
func BenchmarkPublish(b *testing.B) {
	ps := &PubSub{}
	c := make(chan int)
	ready := make(chan int)
	go func() {
		total := 0
		for i := range c {
			total += i
		}
		ready <- total
	}()
	s := ps.Subscribe(func(int) {
		c <- 1
	})
	for i := 0; i < b.N; i++ {
		ps.Publish(10)
		time.Sleep(1 * time.Microsecond)
	}
	s.Close()
	close(c)

	timer := time.NewTicker(5 * time.Second)
	var total int
	select {
	case <-timer.C:
		b.Fatal("timeout waiting for total")
	case total = <-ready:
	}
	b.Logf("%d: %+v", total, ps.Stats)
}
Exemple #6
0
// Ideally should perform much better than both BenchmarkUpdateDumb and
// BenchmarkUpdateWorst.
func BenchmarkUpdateFast(b *testing.B) {
	objs := benchObjs
	for len(objs) < nUpdateObjects {
		objs = append(objs, random())
	}
	benchObjs = objs

	tree := New()
	for _, o := range objs {
		tree.Add(o)
	}

	b.ResetTimer()

	fail := 0
	success := 0
	for n := 0; n < b.N; n++ {
		oldIndex := n % len(objs)
		oldObj := objs[oldIndex]
		newObj := offset(oldObj.(gfx.Bounds))
		if !tree.Update(oldObj, newObj) {
			fail++
		} else {
			success++
		}
		objs[oldIndex] = newObj
	}
	if fail > 0 {
		b.Logf("Update failure ratio: fail=%d success=%d.", fail, success)
		b.Fail()
	}
}
func benchmarkChunker(b *testing.B, hash hash.Hash) {
	size := 10 * 1024 * 1024
	rd := bytes.NewReader(getRandom(23, size))

	b.ResetTimer()
	b.SetBytes(int64(size))

	var chunks int
	for i := 0; i < b.N; i++ {
		chunks = 0

		rd.Seek(0, 0)
		ch := chunker.New(rd, testPol, hash)

		for {
			_, err := ch.Next()

			if err == io.EOF {
				break
			}

			if err != nil {
				b.Fatalf("Unexpected error occurred: %v", err)
			}

			chunks++
		}
	}

	b.Logf("%d chunks, average chunk size: %d bytes", chunks, size/chunks)
}
func BenchmarkDelete(b *testing.B) {
	var objmap interface{}
	client := Client{Organization: "yourorgname", Application: "sandbox", Uri: "https://api.usergrid.com"}
	params := map[string]string{"limit": strconv.Itoa(500)}
	err := client.Get("benchmark", params, JSONResponseHandler(&objmap))
	if err != nil {
		b.Logf("Test failed: %s\n", err)
		b.Fail()
	}
	omap := objmap.(map[string]interface{})
	if omap["entities"] == nil || len(omap["entities"].([]interface{})) == 0 {
		b.Logf("Test failed: no entities to delete\n")
		b.Fail()
	}
	entities := omap["entities"].([]interface{})
	var entity map[string]interface{}
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if len(entities) == 0 {
			b.Logf("Test failed: we ran out of entities\n")
			b.Fail()
			continue
		}
		entity, entities = entities[len(entities)-1].(map[string]interface{}), entities[:len(entities)-1]
		err := client.Delete("benchmark/"+entity["uuid"].(string), nil, NOOPResponseHandler(&objmap))
		if err != nil {
			b.Logf("BenchmarkDelete failed: %s\n", err)
			b.Fail()
		}
	}
	if b.Failed() {
		str, _ := json.MarshalIndent(objmap, "", "  ")
		b.Logf("RESPONSE: %s", str)
	}
}
Exemple #9
0
func BenchmarkConcurrentMap(b *testing.B) {
	keyType := reflect.TypeOf(int32(2))
	elemType := keyType
	cmap := NewConcurrentMap(keyType, elemType)
	var key, elem int32
	fmt.Printf("N=%d.\n", b.N)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		b.StopTimer()
		seed := int32(i)
		key = seed
		elem = seed << 10
		b.StartTimer()
		cmap.Put(key, elem)
		_ = cmap.Get(key)
		b.StopTimer()
		b.SetBytes(8)
		b.StartTimer()
	}
	ml := cmap.Len()
	b.StopTimer()
	mapType := fmt.Sprintf("ConcurrentMap<%s, %s>",
		keyType.Kind().String(), elemType.Kind().String())
	b.Logf("The length of %s value is %d.\n", mapType, ml)
	b.StartTimer()
}
Exemple #10
0
func BenchmarkMap(b *testing.B) {
	keyType := reflect.TypeOf(int32(2))
	elemType := keyType
	imap := make(map[interface{}]interface{})
	var key, elem int32
	fmt.Printf("N=%d.\n", b.N)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		b.StopTimer()
		seed := int32(i)
		key = seed
		elem = seed << 10
		b.StartTimer()
		imap[key] = elem
		b.StopTimer()
		_ = imap[key]
		b.StopTimer()
		b.SetBytes(8)
		b.StartTimer()
	}
	ml := len(imap)
	b.StopTimer()
	mapType := fmt.Sprintf("Map<%s, %s>",
		keyType.Kind().String(), elemType.Kind().String())
	b.Logf("The length of %s value is %d.\n", mapType, ml)
	b.StartTimer()
}
func BenchmarkExtract(b *testing.B) {
	config, err := ioutil.ReadFile("test/recursor_stats.json")
	if err != nil {
		b.Fatalf("could not read config file: %v", err.Error())
	}

	h := newPowerDNS(config)
	defer h.Close()

	hostURL, _ := url.Parse(h.URL)

	e := NewExporter("12345", "recursor", hostURL)

	var before, after runtime.MemStats
	runtime.GC()
	runtime.ReadMemStats(&before)
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		ch := make(chan prometheus.Metric)
		go func(ch chan prometheus.Metric) {
			for _ = range ch {
			}
		}(ch)

		e.Collect(ch)
		close(ch)
	}

	runtime.GC()
	runtime.ReadMemStats(&after)

	b.Logf("%d bytes used after %d runs", after.Alloc-before.Alloc, b.N)
}
Exemple #12
0
func BenchmarkMetalReviewExample(b *testing.B) {
	var n int
	var buf bytes.Buffer

	b.StopTimer()
	doc := LoadDoc("metalreview.html")
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		doc.Find(".slider-row:nth-child(1) .slider-item").Each(func(i int, s *Selection) {
			var band, title string
			var score float64
			var e error

			n++
			// For each item found, get the band, title and score, and print it
			band = s.Find("strong").Text()
			title = s.Find("em").Text()
			if score, e = strconv.ParseFloat(s.Find(".score").Text(), 64); e != nil {
				// Not a valid float, ignore score
				if n <= 4 {
					buf.WriteString(fmt.Sprintf("Review %d: %s - %s.\n", i, band, title))
				}
			} else {
				// Print all, including score
				if n <= 4 {
					buf.WriteString(fmt.Sprintf("Review %d: %s - %s (%2.1f).\n", i, band, title, score))
				}
			}
		})
	}
	b.Log(buf.String())
	b.Logf("MetalReviewExample=%d", n)
}
Exemple #13
0
func BenchmarkToJSON2(b *testing.B) {
	var (
		vals = make([]bench3.Bench, b.N)
		res  = []byte{}
		err  error
	)

	for i := 0; i < b.N; i++ {
		vals[i] = bench3.NewBench("demo", 42, json.Marshal)
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		res, err = vals[i].ToJSON()
		if err != nil {
			b.Logf("Failed test: %d with error: %q", i, err)
			b.Fail()
		}
	}

	b.StopTimer()

	byteEncoded = res
	runtime.GC()
}
Exemple #14
0
func BenchmarkToJSONString2(b *testing.B) {
	var (
		vals = make([]bench2.Bench, b.N)
		res  = ""
		err  error
	)

	for i := 0; i < b.N; i++ {
		vals[i] = bench2.Bench{FieldA: "demo", FieldB: 42}
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		res, err = vals[i].ToJSONString2()
		if err != nil {
			b.Logf("Failed test: %d with error: %q", i, err)
			b.Fail()
		}
	}

	b.StopTimer()

	stringEncoded = res
	runtime.GC()
}
Exemple #15
0
// benchmarkThrottlerParallel is the parallel version of benchmarkThrottler.
// Set -cpu to change the number of threads. The QPS should be distributed
// across all threads and the reported benchmark value should be similar
// to the value of benchmarkThrottler.
func benchmarkThrottlerParallel(b *testing.B, qps int64) {
	threadCount := runtime.GOMAXPROCS(0)
	throttler, _ := NewThrottler("test", "queries", threadCount, qps, ReplicationLagModuleDisabled)
	defer throttler.Close()
	threadIDs := make(chan int, threadCount)
	for id := 0; id < threadCount; id++ {
		threadIDs <- id
	}
	close(threadIDs)
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		threadID := <-threadIDs

		for pb.Next() {
			backedOff := 0
			for {
				backoff := throttler.Throttle(threadID)
				if backoff == NotThrottled {
					break
				}
				backedOff++
				if backedOff > 1 {
					b.Logf("did not wait long enough after backoff. threadID = %v, last backoff = %v", threadID, backoff)
				}
				time.Sleep(backoff)
			}
		}
		throttler.ThreadFinished(threadID)
	})
}
Exemple #16
0
func BenchmarkCertBundleParsing(b *testing.B) {
	for i := 0; i < b.N; i++ {
		cbuts := []*CertBundle{
			refreshRSACertBundle(),
			refreshRSACertBundleWithChain(),
			refreshRSA8CertBundle(),
			refreshRSA8CertBundleWithChain(),
			refreshECCertBundle(),
			refreshECCertBundleWithChain(),
			refreshEC8CertBundle(),
			refreshEC8CertBundleWithChain(),
		}

		for i, cbut := range cbuts {
			pcbut, err := cbut.ToParsedCertBundle()
			if err != nil {
				b.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
				b.Errorf("Error converting to parsed cert bundle: %s", err)
				continue
			}

			cbut, err = pcbut.ToCertBundle()
			if err != nil {
				b.Fatalf("Error converting to cert bundle: %s", err)
			}
		}
	}
}
// benchmarkMerge measures the overhead of merging profiles read from files.
// They must be the same type of profiles.
func benchmarkMerge(b *testing.B, files []string) {
	const path = "testdata/"

	p := make([]*Profile, len(files))

	for i, source := range files {
		inBytes, err := ioutil.ReadFile(filepath.Join(path, source))
		if err != nil {
			b.Fatal(err)
		}
		if p[i], err = Parse(bytes.NewBuffer(inBytes)); err != nil {
			b.Fatalf("%s: %s", source, err)
		}
	}

	var prof *Profile
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		prof, _ = Merge(p)
	}
	b.StopTimer()

	before := 0
	for _, p := range p {
		p.preEncode()
		buff := marshal(p)
		before += len(buff)
	}
	prof.preEncode()
	buff := marshal(prof)
	after := len(buff)
	b.Logf("Profile size before merge = %v, After merge = %v", before, after)
}
Exemple #18
0
func benchmarkClientServerParallel(b *testing.B, conc int) {
	b.StopTimer()
	ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
		fmt.Fprintf(rw, "Hello world.\n")
	}))
	defer ts.Close()
	b.StartTimer()

	numProcs := runtime.GOMAXPROCS(-1) * conc
	var wg sync.WaitGroup
	wg.Add(numProcs)
	n := int32(b.N)
	for p := 0; p < numProcs; p++ {
		go func() {
			for atomic.AddInt32(&n, -1) >= 0 {
				res, err := Get(ts.URL)
				if err != nil {
					b.Logf("Get: %v", err)
					continue
				}
				all, err := ioutil.ReadAll(res.Body)
				if err != nil {
					b.Logf("ReadAll: %v", err)
					continue
				}
				body := string(all)
				if body != "Hello world.\n" {
					panic("Got body: " + body)
				}
			}
			wg.Done()
		}()
	}
	wg.Wait()
}
Exemple #19
0
func BenchmarkRpc(b *testing.B) {
	var d Daemon
	if err := d.init(); err != nil {
		b.Fatal(err)
	}
	go d.serverloop()
	defer d.close()

	if c, err := jsonrpc.Dial("tcp", fmt.Sprintf("127.0.0.1%s", port)); err != nil {
		b.Error(err)
	} else {
		defer c.Close()

		s := time.Now()
		for i := 0; i < b.N; i++ {
			var a content.CompleteAtArgs
			a.Location.Line = 95
			a.Location.Column = 45
			a.Location.File.Name = "../net/testdata/CompleteSharp.cs"

			var cmp1 content.CompletionResult
			if err := c.Call("Net.CompleteAt", &a, &cmp1); err != nil {
				b.Error(err)
			}
		}
		e := time.Since(s).Seconds() * 1000
		b.Logf("%d calls in %f ms = %f ms/call", b.N, e, e/float64(b.N))
	}
}
Exemple #20
0
func BenchmarkMessages(b *testing.B) {
	numMessages := 10000

	adapter.count = numMessages

	var err error
	//b.ResetTimer()

	for i := 0; i < numMessages; i++ {
		go func() {
			//emsg := createTestBlock()
			emsg := createTestChaincodeEvent("0xffffffff", "event1")
			if err = producer.Send(emsg); err != nil {
				b.Fail()
				b.Logf("Error sending message %s", err)
			}
		}()
	}

	select {
	case <-adapter.notfy:
	case <-time.After(5 * time.Second):
		b.Fail()
		b.Logf("timed out on messge")
	}
}
Exemple #21
0
// killReindex starts a reindexing in a new process, and kills that process
// after killTime. It then (naively for now ?) verifies that the kv store file is
// not corrupted by reinitializing an (possibly incomplete) index (with a corpus)
// with it. If the indexing was completed before we could kill the process, it
// returns true, false otherwise.
func killReindex(b *testing.B, dbfile string, killTime time.Duration,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) bool {
	cmd := exec.Command(os.Args[0], "-test.run=TestChildIndexer")
	cmd.Env = append(cmd.Env, "TEST_BE_CHILD=1", "TEST_BE_CHILD_DBFILE="+dbfile)
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout
	cmd.Stderr = &stderr
	if err := cmd.Start(); err != nil {
		b.Fatal(err)
	}

	waitc := make(chan error)
	go func() {
		waitc <- cmd.Wait()
	}()
	fullIndex := false
	select {
	case err := <-waitc:
		if err == nil {
			// indexer finished before we killed it
			fullIndex = true
			b.Logf("Finished indexing before being killed at %v", killTime)
			break
		}
		// TODO(mpl): do better
		if err.Error() != "signal: killed" {
			b.Fatalf("unexpected (not killed) error from indexer process: %v %v %v", err, stdout.String(), stderr.String())
		}
	case <-time.After(killTime):
		if err := cmd.Process.Kill(); err != nil {
			b.Fatal(err)
		}
		err := <-waitc
		// TODO(mpl): do better
		if err != nil && err.Error() != "signal: killed" {
			b.Fatalf("unexpected (not killed) error from indexer process: %v %v %v", err, stdout.String(), stderr.String())
		}
	}

	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	if _, err := idx.KeepInMemory(); err != nil {
		b.Fatal(err)
	}
	if err := idx.Close(); err != nil {
		b.Fatal(err)
	}
	return fullIndex
}
func BenchmarkDelete(t *testing.B) {
	t.Logf("BenchmarkDelete|Delete|Start...")
	t.StopTimer()
	cleanSnapshot("./snapshot/")
	snapshot := NewMessageStore("./snapshot/", 1, 10, 1*time.Second, traverse)
	snapshot.Start()

	for j := 0; j < 20; j++ {
		d := []byte(fmt.Sprintf("%d|hello snapshot", j))
		cmd := NewCommand(-1, fmt.Sprint(j), d, nil)
		<-snapshot.Append(cmd)
	}

	time.Sleep(2 * time.Second)
	t.StartTimer()

	i := 0
	for ; i < t.N; i++ {
		id := int64(rand.Intn(20))
		cmd := NewCommand(id, "", nil, nil)
		snapshot.Delete(cmd)

	}

	t.StopTimer()
	snapshot.Destory()
	cleanSnapshot("./snapshot/")
	t.StartTimer()
	t.Logf("BenchmarkDelete|Delete|END...")
}
Exemple #23
0
func reindex(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("reindexing")
	if err := os.RemoveAll(dbfile); err != nil {
		b.Fatal(err)
	}
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)

	b.ResetTimer()
	if err := idx.Reindex(); err != nil {
		b.Fatal(err)
	}
	return idx
}
Exemple #24
0
func enumerateMeta(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) int {
	b.Logf("enumerating meta blobs")
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	defer idx.Close()

	ch := make(chan camtypes.BlobMeta, 100)
	go func() {
		if err := idx.EnumerateBlobMeta(nil, ch); err != nil {
			b.Fatal(err)
		}
	}()
	n := 0
	for range ch {
		n++
	}
	b.Logf("Enumerated %d meta blobs", n)
	return n
}
func BenchmarkExtract(b *testing.B) {
	config, err := ioutil.ReadFile("test/haproxy.csv")
	if err != nil {
		b.Fatalf("could not read config file: %v", err.Error())
	}

	h := newHaproxy(config)
	defer h.Close()

	e := NewExporter(h.URL, "", 5*time.Second)

	var before, after runtime.MemStats
	runtime.GC()
	runtime.ReadMemStats(&before)
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		ch := make(chan prometheus.Metric)
		go func(ch chan prometheus.Metric) {
			for _ = range ch {
			}
		}(ch)

		e.Collect(ch)
		close(ch)
	}

	runtime.GC()
	runtime.ReadMemStats(&after)

	b.Logf("%d bytes used after %d runs", after.Alloc-before.Alloc, b.N)
}
Exemple #26
0
func BenchmarkARC_Freq(b *testing.B) {
	l, err := NewARC(8192)
	if err != nil {
		b.Fatalf("err: %v", err)
	}

	trace := make([]int64, b.N*2)
	for i := 0; i < b.N*2; i++ {
		if i%2 == 0 {
			trace[i] = rand.Int63() % 16384
		} else {
			trace[i] = rand.Int63() % 32768
		}
	}

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		l.Add(trace[i], trace[i])
	}
	var hit, miss int
	for i := 0; i < b.N; i++ {
		_, ok := l.Get(trace[i])
		if ok {
			hit++
		} else {
			miss++
		}
	}
	b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
Exemple #27
0
func BenchmarkDescryptAesCbc(b *testing.B) {
	key := randomBytes(16)
	iv := randomBytes(16)
	encrypted, err := EncryptIV(original, key, iv)
	if err != nil {
		b.Fatal(err)
	}
	counter := 0
	dataLen := len(encrypted)

	aesCipher, err := aes.NewCipher(key)
	if err != nil {
		b.Fatal(err)
	}
	dec := cipher.NewCBCDecrypter(aesCipher, iv)
	t := time.Now()
	decrypted := make([]byte, len(encrypted))
	b.SetBytes(int64(len(encrypted)))
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		dec.CryptBlocks(decrypted, encrypted)
		counter += dataLen
	}
	b.StopTimer()
	ts := time.Since(t)
	b.Logf("decryption speed: %.2f Mbit/s", float64(counter)*8/ts.Seconds()/1024/1024)
}
Exemple #28
0
func BenchmarkFingerprinting(b *testing.B) {
	b.StopTimer()
	fps := []*Fingerprint{
		{
			Hash: 0,
			FirstCharacterOfFirstLabelName: "a",
			LabelMatterLength:              2,
			LastCharacterOfLastLabelValue:  "z",
		},
		{
			Hash: 0,
			FirstCharacterOfFirstLabelName: "a",
			LabelMatterLength:              2,
			LastCharacterOfLastLabelValue:  "z",
		},
	}
	for i := 0; i < 10; i++ {
		fps[0].Less(fps[1])
	}
	b.Logf("N: %v", b.N)
	b.StartTimer()

	var pre runtime.MemStats
	runtime.ReadMemStats(&pre)

	for i := 0; i < b.N; i++ {
		fps[0].Less(fps[1])
	}

	var post runtime.MemStats
	runtime.ReadMemStats(&post)

	b.Logf("allocs: %d items: ", post.TotalAlloc-pre.TotalAlloc)
}
func benchmarkBloomCount(b *testing.B, bloom Bloom, r []uint64) {
	setBloom(bloom, r)
	testBloom(bloom, r)
	b.ResetTimer()
	if bloom.Count() == 0 {
		b.Logf("Bad count\n")
	}
}
Exemple #30
0
func BenchmarkStruct(b *testing.B) {
	array := []string{"foo", "bar"}
	var str str1
	for i := 0; i < b.N; i++ {
		str = str1{array[0]}
	}
	b.Logf("%v", str)
}