Esempio n. 1
1
func MallocDisablePool(ebs map[int]*ElasticBuf, pool *MemPool) {
	runtime.GC()
	runtime.GC()
	pprof()
	for i := 0; i <= 100000; i++ {
		ebs[i] = NewElasticBuf(1, pool)
	}
	ebs = make(map[int]*ElasticBuf)
	runtime.GC()
	runtime.GC()
}
Esempio n. 2
0
func TestRuntimeMemStats(t *testing.T) {
	r := NewRegistry()
	RegisterRuntimeMemStats(r)
	CaptureRuntimeMemStatsOnce(r)
	zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
	runtime.GC()
	CaptureRuntimeMemStatsOnce(r)
	if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
		t.Fatal(count - zero)
	}
	runtime.GC()
	runtime.GC()
	CaptureRuntimeMemStatsOnce(r)
	if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
		t.Fatal(count - zero)
	}
	for i := 0; i < 256; i++ {
		runtime.GC()
	}
	CaptureRuntimeMemStatsOnce(r)
	if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
		t.Fatal(count - zero)
	}
	for i := 0; i < 257; i++ {
		runtime.GC()
	}
	CaptureRuntimeMemStatsOnce(r)
	if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
		t.Fatal(count - zero)
	}
}
Esempio n. 3
0
func benchCheck() {
	fn := func(name string, encfn benchFn, decfn benchFn) {
		benchBs = benchBs[0:0]
		buf := bytes.NewBuffer(benchBs)
		var err error
		runtime.GC()
		tnow := time.Now()
		if err = encfn(buf, &benchTs); err != nil {
			logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err)
		}
		encDur := time.Now().Sub(tnow)
		encLen := buf.Len()
		//log("\t%10s: encLen: %v, len: %v, cap: %v\n", name, encLen, len(benchBs), cap(benchBs))
		buf = bytes.NewBuffer(benchBs[0:encLen])
		runtime.GC()
		tnow = time.Now()
		if err = decfn(buf, new(TestStruc)); err != nil {
			logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err)
		}
		decDur := time.Now().Sub(tnow)
		logT(nil, "\t%10s: Encode Size: %5d, Encode Time: %8v, Decode Time: %8v", name, encLen, encDur, decDur)
	}
	logT(nil, "Benchmark One-Pass Unscientific Marshal Sizes:")
	fn("msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn)
	fn("gob", fnGobEncodeFn, fnGobDecodeFn)
	fn("bson", fnBsonEncodeFn, fnBsonDecodeFn)
	fn("json", fnJsonEncodeFn, fnJsonDecodeFn)
}
Esempio n. 4
0
func f() (r, s *T) {
	r = &T{0x30, 0x31, 0x32, 0x33}
	runtime.GC()
	s = &T{0x40, 0x41, 0x42, 0x43}
	runtime.GC()
	return
}
Esempio n. 5
0
func BenchmarkWatch(b *testing.B) {
	b.StopTimer()
	s := newStore()
	kvs, _ := generateNRandomKV(b.N, 128)
	b.StartTimer()

	memStats := new(runtime.MemStats)
	runtime.GC()
	runtime.ReadMemStats(memStats)

	for i := 0; i < b.N; i++ {
		w, _ := s.Watch(kvs[i][0], false, false, 0)

		e := newEvent("set", kvs[i][0], uint64(i+1), uint64(i+1))
		s.WatcherHub.notify(e)
		<-w.EventChan
		s.CurrentIndex++
	}

	s.WatcherHub.EventHistory = nil
	afterMemStats := new(runtime.MemStats)
	runtime.GC()
	runtime.ReadMemStats(afterMemStats)
	fmt.Printf("\nBefore Alloc: %v; After Alloc: %v\n",
		memStats.Alloc/1000, afterMemStats.Alloc/1000)
}
Esempio n. 6
0
func main() {
	runtime.MemProfileRate = 0

	c := make(chan int)
	dummy := make(chan int)

	// warm up
	go sender(c, 100000)
	receiver(c, dummy, 100000)
	runtime.GC()
	memstats := new(runtime.MemStats)
	runtime.ReadMemStats(memstats)
	alloc := memstats.Alloc

	// second time shouldn't increase footprint by much
	go sender(c, 100000)
	receiver(c, dummy, 100000)
	runtime.GC()
	runtime.ReadMemStats(memstats)

	// Be careful to avoid wraparound.
	if memstats.Alloc > alloc && memstats.Alloc-alloc > 1.1e5 {
		println("BUG: too much memory for 100,000 selects:", memstats.Alloc-alloc)
	}
}
Esempio n. 7
0
func sortUrls(urlHits *map[Key]HitCount) (Elems, uint64, string) {
	uniqueUrlsCount := len(*urlHits)
	sortedUrls := make(Elems, 0, uniqueUrlsCount)

	var largestHit uint64 = 0
	var largestHitURL string = ""

	if showHumanStatistics && aggregateBy == "url" {
		for key, value := range *urlHits {

			if uint64(value) > uint64(largestHit) {
				largestHit = uint64(value)

				largestHitURL = string(key)
			}

			sortedUrls = append(sortedUrls, &Elem{key, value})
		}
	} else {
		for key, value := range *urlHits {
			sortedUrls = append(sortedUrls, &Elem{key, value})
		}
	}

	*urlHits = make(map[Key]HitCount)

	runtime.GC()

	sort.Sort(ByReverseCount{sortedUrls})

	runtime.GC()

	return sortedUrls, largestHit, largestHitURL
}
Esempio n. 8
0
func main() {
	const N = 10000
	st := new(runtime.MemStats)
	memstats := new(runtime.MemStats)
	runtime.ReadMemStats(st)
	for i := 0; i < N; i++ {
		c := make(chan int, 10)
		_ = c
		if i%100 == 0 {
			for j := 0; j < 4; j++ {
				runtime.GC()
				runtime.Gosched()
				runtime.GC()
				runtime.Gosched()
			}
		}
	}

	runtime.ReadMemStats(memstats)
	obj := memstats.HeapObjects - st.HeapObjects
	if obj > N/5 {
		fmt.Println("too many objects left:", obj)
		os.Exit(1)
	}
}
Esempio n. 9
0
func doIndex() bool {
	idxSegm, err := gcse.IndexSegments.GenMaxSegment()
	if err != nil {
		log.Printf("GenMaxSegment failed: %v", err)
		return false
	}

	runtime.GC()
	gcse.DumpMemStats()

	log.Printf("Indexing to %v ...", idxSegm)

	fpDocDB := sophie.LocalFsPath(configs.DocsDBPath().S())
	ts, err := gcse.Index(kv.DirInput(fpDocDB), idxSegm.Join("").S())
	if err != nil {
		log.Printf("Indexing failed: %v", err)
		return false
	}

	if !func() bool {
		f, err := idxSegm.Join(gcse.IndexFn).Create()
		if err != nil {
			log.Printf("Create index file failed: %v", err)
			return false
		}
		defer f.Close()

		log.Printf("Saving index to %v ...", idxSegm)
		if err := ts.Save(f); err != nil {
			log.Printf("ts.Save failed: %v", err)
			return false
		}
		return true
	}() {
		return false
	}
	runtime.GC()
	gcse.DumpMemStats()

	storePath := idxSegm.Join(configs.FnStore)
	log.Printf("Saving store snapshot to %v", storePath)
	if err := store.SaveSnapshot(storePath.S()); err != nil {
		log.Printf("SaveSnapshot %v failed: %v", storePath, err)
	}

	if err := idxSegm.Done(); err != nil {
		log.Printf("segm.Done failed: %v", err)
		return false
	}

	log.Printf("Indexing success: %s (%d)", idxSegm, ts.DocCount())
	gcse.AddBiValueAndProcess(bi.Average, "index.doc-count", ts.DocCount())

	ts = nil
	gcse.DumpMemStats()
	runtime.GC()
	gcse.DumpMemStats()

	return true
}
Esempio n. 10
0
func threadList(bl []Board, cpu int) []interface{} {
	tlist := make([]interface{}, 0, 400000)
	ch := make(chan MiniThread, cpu*16)
	sync := make(chan bool, cpu)
	go func() {
		for {
			if data := <-ch; data.Sure != "" {
				tlist = append(tlist, data)
			} else {
				break
			}
		}
	}()
	for _, it := range bl {
		sync <- true
		runtime.GC()
		go func() {
			threadThread(it, ch)
			<-sync
		}()
	}
	for cpu > 0 {
		sync <- true
		runtime.GC()
		cpu--
	}
	close(ch)
	close(sync)
	return tlist
}
Esempio n. 11
0
func Test_Bind_Function(t *testing.T) {
	template := engine.NewObjectTemplate()

	goFunc1 := func(text string, obj *Object, callback *Function) {
		t.Log("fetch")
		for i := 0; i < 10; i++ {
			t.Log(i)
			callback.Call(engine.NewString(text), obj.Value)
			runtime.GC()
		}
	}

	goFunc2 := func(text1, text2 string) {
		t.Logf("print(%s, %s)", text1, text2)
	}

	template.Bind("fetch", goFunc1)

	template.Bind("print", goFunc2)

	engine.NewContext(template).Scope(func(cs ContextScope) {
		cs.Eval(`
		var testObj = {Name: function() {
			return "test object"
		}};
		fetch("test", testObj, function(text, obj) {
			print(text, obj.Name())
		});`)
	})

	runtime.GC()
}
func BenchmarkExtract(b *testing.B) {
	config, err := ioutil.ReadFile("test/recursor_stats.json")
	if err != nil {
		b.Fatalf("could not read config file: %v", err.Error())
	}

	h := newPowerDNS(config)
	defer h.Close()

	hostURL, _ := url.Parse(h.URL)

	e := NewExporter("12345", "recursor", hostURL)

	var before, after runtime.MemStats
	runtime.GC()
	runtime.ReadMemStats(&before)
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		ch := make(chan prometheus.Metric)
		go func(ch chan prometheus.Metric) {
			for _ = range ch {
			}
		}(ch)

		e.Collect(ch)
		close(ch)
	}

	runtime.GC()
	runtime.ReadMemStats(&after)

	b.Logf("%d bytes used after %d runs", after.Alloc-before.Alloc, b.N)
}
Esempio n. 13
0
func TestPauses(t *testing.T) {
	p := NewPauses(1024)

	// Reset GC count
	p.Update()
	p.Snapshot()

	// Run GC once
	runtime.GC()
	p.Update()
	if count := len(p.Snapshot()); count != 1 {
		t.Fatalf("captured %d gc runs, expect 1", count)
	}

	// Run GC twice
	runtime.GC()
	runtime.GC()
	p.Update()
	if count := len(p.Snapshot()); count != 2 {
		t.Fatalf("captured %d gc runs, expected 2", count)
	}

	// Wraps GC counts
	for i := 0; i < 257; i++ {
		runtime.GC()
	}
	p.Update()
	if count := len(p.Snapshot()); count != 256 {
		t.Fatalf("captured %d gc runs, expected 256", count)
	}
}
Esempio n. 14
0
// Cache memory leak for get
func testCacheGetMemoryLeak() {
	pc.OverrideLeaseSeconds(1)
	defer pc.OverrideLeaseSeconds(0)

	var memstats runtime.MemStats
	var initAlloc, midAlloc, finalAlloc uint64
	longValue := strings.Repeat("this sentence is 30 char long\n", 30)

	// Run garbage collection and get memory stats
	runtime.GC()
	runtime.ReadMemStats(&memstats)
	initAlloc = memstats.Alloc

	// Cache a lot of data
	for i := 0; i < 10000; i++ {
		key := fmt.Sprintf("keymemleakget:%d", i)
		pc.Reset()
		forceCacheGet(key, longValue)
		if pc.GetLeaseRequestCount() == 0 {
			LOGE.Println("FAIL: not requesting leases")
			failCount++
			return
		}
		pc.Reset()
		v, err := ls.Get(key)
		if checkError(err, false) {
			return
		}
		if v != longValue {
			LOGE.Println("FAIL: got wrong value")
			failCount++
			return
		}
		if pc.GetRpcCount() > 0 {
			LOGE.Println("FAIL: not caching data")
			failCount++
			return
		}
	}

	runtime.GC()
	runtime.ReadMemStats(&memstats)
	midAlloc = memstats.Alloc

	// Wait for data to expire and someone to cleanup
	time.Sleep(20 * time.Second)

	// Run garbage collection and get memory stats
	runtime.GC()
	runtime.ReadMemStats(&memstats)
	finalAlloc = memstats.Alloc

	if finalAlloc < initAlloc || (finalAlloc-initAlloc) < 5000000 {
		fmt.Fprintln(output, "PASS")
		passCount++
	} else {
		LOGE.Printf("FAIL: not cleaning cache - memory leak - init %d mid %d final %d\n", initAlloc, midAlloc, finalAlloc)
		failCount++
	}
}
Esempio n. 15
0
func TestLiveness(t *testing.T) {
	dir := testutil.TempDir()
	defer os.RemoveAll(dir)
	root := nodefs.NewDefaultNode()
	s, _, err := nodefs.MountRoot(dir, root, nil)
	if err != nil {
		t.Fatalf("MountRoot: %v", err)
	}
	go s.Serve()
	if err := s.WaitMount(); err != nil {
		t.Fatal("WaitMount", err)
	}
	defer s.Unmount()

	if _, err := ioutil.ReadDir(dir); err != nil {
		t.Fatalf("ReadDir: %v", err)
	}

	// We previously encountered a sitation where a finalizer would close our fd out from under us. Try to force both finalizers to run and object destruction to complete.
	runtime.GC()
	runtime.GC()

	if _, err := ioutil.ReadDir(dir); err != nil {
		t.Fatalf("ReadDir: %v", err)
	}
}
Esempio n. 16
0
// use one context and one script in different threads
//
func Test_ThreadSafe4(t *testing.T) {
	fail := false
	script := engine.Compile([]byte("'Hello ' + 'World!'"), nil, nil)
	context := engine.NewContext(nil)

	wg := new(sync.WaitGroup)
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			context.Scope(func(cs ContextScope) {
				rand_sched(200)

				value := script.Run()
				result := value.ToString()
				fail = fail || result != "Hello World!"
				runtime.GC()
				wg.Done()
			})
		}()
	}
	wg.Wait()
	runtime.GC()

	if fail {
		t.FailNow()
	}
}
func BenchmarkExtract(b *testing.B) {
	config, err := ioutil.ReadFile("test/haproxy.csv")
	if err != nil {
		b.Fatalf("could not read config file: %v", err.Error())
	}

	h := newHaproxy(config)
	defer h.Close()

	e := NewExporter(h.URL, "", 5*time.Second)

	var before, after runtime.MemStats
	runtime.GC()
	runtime.ReadMemStats(&before)
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		ch := make(chan prometheus.Metric)
		go func(ch chan prometheus.Metric) {
			for _ = range ch {
			}
		}(ch)

		e.Collect(ch)
		close(ch)
	}

	runtime.GC()
	runtime.ReadMemStats(&after)

	b.Logf("%d bytes used after %d runs", after.Alloc-before.Alloc, b.N)
}
Esempio n. 18
0
func getMemProfileRecords() []runtime.MemProfileRecord {
	// Force the runtime to update the object and byte counts.
	// This can take up to two GC cycles to get a complete
	// snapshot of the current point in time.
	runtime.GC()
	runtime.GC()

	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}
	return p
}
Esempio n. 19
0
func TestMessageTags(t *testing.T) {
	db, err := Open(dbPath, DBReadOnly)
	if err != nil {
		t.Fatal(err)
	}
	defer db.Close()

	thread, err := firstThread(db, "id:[email protected]")
	if err != nil {
		t.Fatal(err)
	}
	msgs := thread.Messages()
	msg := &Message{}
	for msgs.Next(msg) {
		if msg.ID() == "*****@*****.**" {
			break
		}
		// invoke the GC to make sure it's running smoothly.
		runtime.GC()
	}

	tags := msg.Tags().slice()
	// invoke the GC to make sure it's running smoothly.
	runtime.GC()
	if want, got := []string{"inbox", "unread"}, tags; !reflect.DeepEqual(want, got) {
		t.Errorf("msg.Tags(): want %v got %v", want, got)
	}
}
Esempio n. 20
0
func main() {
	runtime.GOMAXPROCS(4)
	go func() {}()
	go func() {}()
	go func() {}()
	st := &runtime.MemStats
	packages = append(packages, packages...)
	packages = append(packages, packages...)
	n := flag.Int("n", 4, "iterations")
	p := flag.Int("p", len(packages), "# of packages to keep in memory")
	flag.BoolVar(&st.DebugGC, "d", st.DebugGC, "print GC debugging info (pause times)")
	flag.Parse()

	var lastParsed []map[string]*ast.Package
	var t0 int64
	pkgroot := runtime.GOROOT() + "/src/pkg/"
	for pass := 0; pass < 2; pass++ {
		// Once the heap is grown to full size, reset counters.
		// This hides the start-up pauses, which are much smaller
		// than the normal pauses and would otherwise make
		// the average look much better than it actually is.
		st.NumGC = 0
		st.PauseTotalNs = 0
		t0 = time.Nanoseconds()

		for i := 0; i < *n; i++ {
			parsed := make([]map[string]*ast.Package, *p)
			for j := range parsed {
				parsed[j] = parseDir(pkgroot + packages[j%len(packages)])
			}
			if i+1 == *n && *serve != "" {
				lastParsed = parsed
			}
		}
		runtime.GC()
		runtime.GC()
	}
	t1 := time.Nanoseconds()

	fmt.Printf("Alloc=%d/%d Heap=%d Mallocs=%d PauseTime=%.3f/%d = %.3f\n",
		st.Alloc, st.TotalAlloc,
		st.Sys,
		st.Mallocs, float64(st.PauseTotalNs)/1e9,
		st.NumGC, float64(st.PauseTotalNs)/1e9/float64(st.NumGC))

	/*
		fmt.Printf("%10s %10s %10s\n", "size", "#alloc", "#free")
		for _, s := range st.BySize {
			fmt.Printf("%10d %10d %10d\n", s.Size, s.Mallocs, s.Frees)
		}
	*/
	// Standard gotest benchmark output, collected by build dashboard.
	gcstats("BenchmarkParser", *n, t1-t0)

	if *serve != "" {
		log.Fatal(http.ListenAndServe(*serve, nil))
		println(lastParsed)
	}
}
Esempio n. 21
0
func main() {
	solveLinearProgram()

	// Force GC to illustrate finalizer running
	runtime.GC()
	runtime.GC()
	runtime.GC()
}
Esempio n. 22
0
File: main.go Progetto: twmb/dash
func bench(quit, dead chan struct{}) {
	// Prime our virtual memory space.
	benchChan(qbench.Cfg{
		Enqueuers: 100,
		Dequeuers: 100,
		Messages:  *messages,
	})

	// Leave GC on for benchmarks, imitating more standard program behavior.
	for _, enqueuers := range []int{100, 10, 1} {
		for _, dequeuers := range []int{100, 10, 1} {
			for _, cpu := range []int{1, 8, 16, 24, 32, 40, 48} {
				select {
				case <-quit:
					fmt.Println("Quitting.")
					close(dead)
					return
				default:
				}
				runtime.GOMAXPROCS(cpu)
				fmt.Printf("Bench on: %dproc, %denq, %ddeq\n", cpu, enqueuers, dequeuers)
				cfg := qbench.Cfg{
					Enqueuers: enqueuers,
					Dequeuers: dequeuers,
					Messages:  *messages,
				}
				fmt.Println("channel... ")
				results := benchChan(cfg)
				processResults("channel", results)
				runtime.GC()
				fmt.Println("mpmcdvq... ")
				results = benchMpMcDVq(cfg)
				processResults("mpmcdvq", results)
				runtime.GC()
				if enqueuers == 1 {
					fmt.Println("spmcdvq... ")
					results = benchSpMcDVq(cfg)
					processResults("spmcdvq", results)
					runtime.GC()
				}
				if dequeuers == 1 {
					fmt.Println("mpscdvq... ")
					results = benchMpScDVq(cfg)
					processResults("mpscdvq", results)
					runtime.GC()
				}
				if enqueuers == 1 && dequeuers == 1 {
					fmt.Println("spscdvq... ")
					results = benchSpScDVq(cfg)
					processResults("spscdvq", results)
					runtime.GC()
				}
				fmt.Println("done.")
			}
		}
	}
	close(dead)
}
Esempio n. 23
0
func main() {
	assign()
	i = nil
	runtime.GC()
	// runtime.LockOSThread()
	f(1)
	runtime.GC()
	// runtime.Goexit()
}
Esempio n. 24
0
func measureMem(name string, action func(), b *testing.B) {
	var before, after runtime.MemStats
	runtime.GC()
	runtime.ReadMemStats(&before)
	action()
	runtime.GC()
	runtime.ReadMemStats(&after)
	b.Logf("%s: %.2fMB", name, float64(after.Alloc-before.Alloc)/float64(1<<20))
}
Esempio n. 25
0
// AddSPDY adds SPDY support to srv, and must be called before srv begins serving.
func AddSPDY(srv *http.Server) {
	npnStrings := npn()
	if len(npnStrings) <= 1 {
		return
	}
	if srv.TLSConfig == nil {
		srv.TLSConfig = new(tls.Config)
	}
	if srv.TLSConfig.NextProtos == nil {
		srv.TLSConfig.NextProtos = npnStrings
	} else {
		// Collect compatible alternative protocols.
		others := make([]string, 0, len(srv.TLSConfig.NextProtos))
		for _, other := range srv.TLSConfig.NextProtos {
			if !strings.Contains(other, "spdy/") && !strings.Contains(other, "http/") {
				others = append(others, other)
			}
		}

		// Start with spdy.
		srv.TLSConfig.NextProtos = make([]string, 0, len(others)+len(npnStrings))
		srv.TLSConfig.NextProtos = append(srv.TLSConfig.NextProtos, npnStrings[:len(npnStrings)-1]...)

		// Add the others.
		srv.TLSConfig.NextProtos = append(srv.TLSConfig.NextProtos, others...)
		srv.TLSConfig.NextProtos = append(srv.TLSConfig.NextProtos, "http/1.1")
	}
	if srv.TLSNextProto == nil {
		srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
	}
	for _, str := range npnStrings {
		switch str {
		case "spdy/2":
			srv.TLSNextProto[str] = func(s *http.Server, tlsConn *tls.Conn, handler http.Handler) {
				conn, err := NewServerConn(tlsConn, s, 2)
				if err != nil {
					log.Println(err)
					return
				}
				conn.Run()
				conn = nil
				runtime.GC()
			}
		case "spdy/3":
			srv.TLSNextProto[str] = func(s *http.Server, tlsConn *tls.Conn, handler http.Handler) {
				conn, err := NewServerConn(tlsConn, s, 3)
				if err != nil {
					log.Println(err)
					return
				}
				conn.Run()
				conn = nil
				runtime.GC()
			}
		}
	}
}
Esempio n. 26
0
func main() {
	setup()
	runtime.GC()
	runtime.GC()
	time.Sleep(10 * time.Millisecond)
	runtime.GC()
	runtime.GC()
	time.Sleep(10 * time.Millisecond)
}
Esempio n. 27
0
func memoryConsumption(name string, fn func()) {
	m := new(runtime.MemStats)
	runtime.GC()
	runtime.ReadMemStats(m)
	pre := m.HeapAlloc
	fn()
	runtime.GC()
	runtime.ReadMemStats(m)
	post := m.HeapAlloc
	fmt.Printf("The operation %s requires %d Bytes.\n", name, post-pre)
}
Esempio n. 28
0
func (hp *HeapProfile) Report(n lattice.Node) error {
	hp.count++
	if hp.count > hp.after && hp.count%hp.every == 0 {
		runtime.GC()
		runtime.GC()
		runtime.GC()
		return pprof.WriteHeapProfile(hp.f)
	} else {
		return nil
	}
}
Esempio n. 29
0
// Unit tests for the package github.com/mbenkmann/golib/bytes.
func Bytes_test() {
	fmt.Printf("\n==== bytes ===\n\n")

	rand.Seed(45678)
	testBuffer()
	// The following is for testing if the free()s are called. To do this,
	// use the program ltrace(1).
	runtime.GC()
	time.Sleep(1 * time.Second)
	runtime.GC()
}
Esempio n. 30
0
func benchmarkModBig(b *testing.B, w, exp uint32) {
	b.StopTimer()
	var n, mod big.Int
	n.Rand(rand.New(rand.NewSource(1)), New(w))
	n.SetBit(&n, int(w), 1)
	runtime.GC()
	runtime.GC()
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		mod.Mod(&n, New(exp))
	}
}