Beispiel #1
0
func MemoryStatistics() string {
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, false)
	for {
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, false)
		if ok {
			p = p[0:n]
			break
		}
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	var m runtime.MemStats
	runtime.ReadMemStats(&m)

	return fmt.Sprintf("%d in use objects (%d in use bytes) | Alloc: %d TotalAlloc: %d",
		total.InUseObjects(), total.InUseBytes(), m.Alloc, m.TotalAlloc)
}
Beispiel #2
0
func getMemProfileRecords() []runtime.MemProfileRecord {
	// Force the runtime to update the object and byte counts.
	// This can take up to two GC cycles to get a complete
	// snapshot of the current point in time.
	runtime.GC()
	runtime.GC()

	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}
	return p
}
Beispiel #3
0
func mem_in_go(include_zero bool) runtime.MemProfileRecord {
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, include_zero)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, include_zero)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}
	return total
}
Beispiel #4
0
func main() {
	runtime.MemProfileRate = 1
	// Allocate 1M 4-byte objects and set a finalizer for every third object.
	// Assuming that tiny block size is 16, some objects get finalizers setup
	// only for middle bytes. The finalizer resurrects that object.
	// As the result, all allocated memory must stay alive.
	const (
		N             = 1 << 20
		tinyBlockSize = 16 // runtime._TinySize
	)
	hold := make([]*int32, 0, N)
	for i := 0; i < N; i++ {
		x := new(int32)
		if i%3 == 0 {
			runtime.SetFinalizer(x, func(p *int32) {
				hold = append(hold, p)
			})
		}
	}
	// Finalize as much as possible.
	// Note: the sleep only increases probility of bug detection,
	// it cannot lead to false failure.
	for i := 0; i < 5; i++ {
		runtime.GC()
		time.Sleep(10 * time.Millisecond)
	}
	// Read memory profile.
	var prof []runtime.MemProfileRecord
	for {
		if n, ok := runtime.MemProfile(prof, false); ok {
			prof = prof[:n]
			break
		} else {
			prof = make([]runtime.MemProfileRecord, n+10)
		}
	}
	// See how much memory in tiny objects is profiled.
	var totalBytes int64
	for _, p := range prof {
		bytes := p.AllocBytes - p.FreeBytes
		nobj := p.AllocObjects - p.FreeObjects
		size := bytes / nobj
		if size == tinyBlockSize {
			totalBytes += bytes
		}
	}
	// 2*tinyBlockSize slack is for any boundary effects.
	if want := N*int64(unsafe.Sizeof(int32(0))) - 2*tinyBlockSize; totalBytes < want {
		println("got", totalBytes, "want >=", want)
		panic("some of the tiny objects are not profiled")
	}
	// Just to keep hold alive.
	if len(hold) != 0 && hold[0] == nil {
		panic("bad")
	}
}
Beispiel #5
0
func MonitorMemProfile() {
	go func() {
		for {
			time.Sleep(10e9)
			mem, _ := runtime.MemProfile(nil, false)
			runtime.GC()
			log.Printf("Mem# %d\n", mem)
		}
	}()
}
Beispiel #6
0
func TestGITForLeaks(t *testing.T) {
	gitcmd := envOrDefault("git", "/usr/local/git/bin/git")
	repo := "/Users/petar/popalg.org-git"
	filename := "sparsification-by-spanners"
	for i := 0; i < 10000; i++ {
		_, _, err := GITGetCreateUpdateTime(gitcmd, repo, filename)
		if err != nil {
			t.Errorf("git: %s", err)
		}
		if i%100 == 0 {
			mem, _ := runtime.MemProfile(nil, false)
			fmt.Printf("i= %d,    mem= %d             \r", i, mem)
		}
	}
	fmt.Printf("\n")
}
Beispiel #7
0
// WriteHeapProfile writes a pprof-formatted heap profile to w.
// If a write to w returns an error, WriteHeapProfile returns that error.
// Otherwise, WriteHeapProfile returns nil.
func WriteHeapProfile(w io.Writer) error {
	// Find out how many records there are (MemProfile(nil, false)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, false)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, false)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	b := bufio.NewWriter(w)
	fmt.Fprintf(b, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	for i := range p {
		r := &p[i]
		fmt.Fprintf(b, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(b, " %#x", pc)
		}
		fmt.Fprintf(b, "\n")
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people.
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)
	fmt.Fprintf(b, "\n# runtime.MemStats\n")
	fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc)
	fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc)
	fmt.Fprintf(b, "# Sys = %d\n", s.Sys)
	fmt.Fprintf(b, "# Lookups = %d\n", s.Lookups)
	fmt.Fprintf(b, "# Mallocs = %d\n", s.Mallocs)

	fmt.Fprintf(b, "# HeapAlloc = %d\n", s.HeapAlloc)
	fmt.Fprintf(b, "# HeapSys = %d\n", s.HeapSys)
	fmt.Fprintf(b, "# HeapIdle = %d\n", s.HeapIdle)
	fmt.Fprintf(b, "# HeapInuse = %d\n", s.HeapInuse)

	fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
	fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
	fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
	fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys)

	fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC)
	fmt.Fprintf(b, "# PauseNs = %d\n", s.PauseNs)
	fmt.Fprintf(b, "# NumGC = %d\n", s.NumGC)
	fmt.Fprintf(b, "# EnableGC = %v\n", s.EnableGC)
	fmt.Fprintf(b, "# DebugGC = %v\n", s.DebugGC)

	fmt.Fprintf(b, "# BySize = Size * (Active = Mallocs - Frees)\n")
	fmt.Fprintf(b, "# (Excluding large blocks.)\n")
	for _, t := range s.BySize {
		if t.Mallocs > 0 {
			fmt.Fprintf(b, "#   %d * (%d = %d - %d)\n", t.Size, t.Mallocs-t.Frees, t.Mallocs, t.Frees)
		}
	}
	return b.Flush()
}
Beispiel #8
0
// writeHeap writes the current runtime heap profile to w.
func writeHeap(w io.Writer, debug int) error {
	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
	}

	sort.Slice(p, func(i, j int) bool { return p[i].InUseBytes() > p[j].InUseBytes() })

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	prof := &profile.Profile{
		PeriodType: &profile.ValueType{Type: "space", Unit: "bytes"},
		SampleType: []*profile.ValueType{
			{Type: "alloc_objects", Unit: "count"},
			{Type: "alloc_space", Unit: "bytes"},
			{Type: "inuse_objects", Unit: "count"},
			{Type: "inuse_space", Unit: "bytes"},
		},
		Period: int64(runtime.MemProfileRate),
	}

	locs := make(map[uint64]*(profile.Location))
	for i := range p {
		var v1, v2, v3, v4, blocksize int64
		r := &p[i]
		v1, v2 = int64(r.InUseObjects()), int64(r.InUseBytes())
		v3, v4 = int64(r.AllocObjects), int64(r.AllocBytes)
		if (v1 == 0 && v2 != 0) || (v3 == 0 && v4 != 0) {
			return fmt.Errorf("error writing memory profile: inuse object count was 0 but inuse bytes was %d", v2)
		} else {
			if v1 != 0 {
				blocksize = v2 / v1
				v1, v2 = scaleHeapSample(v1, v2, prof.Period)
			}
			if v3 != 0 {
				v3, v4 = scaleHeapSample(v3, v4, prof.Period)
			}
		}
		value := []int64{v1, v2, v3, v4}
		var sloc []*profile.Location
		for _, pc := range r.Stack() {
			addr := uint64(pc)
			addr--
			loc := locs[addr]
			if locs[addr] == nil {
				loc = &(profile.Location{
					Address: addr,
				})
				prof.Location = append(prof.Location, loc)
				locs[addr] = loc
			}
			sloc = append(sloc, loc)
		}
		prof.Sample = append(prof.Sample, &profile.Sample{
			Value:    value,
			Location: sloc,
			NumLabel: map[string][]int64{"bytes": {blocksize}},
		})
	}
	prof.RemapAll()
	protopprof.Symbolize(prof)
	return prof.Write(w)
}
Beispiel #9
0
// countHeap returns the number of records in the heap profile.
func countHeap() int {
	n, _ := runtime.MemProfile(nil, true)
	return n
}
Beispiel #10
0
// writeHeap writes the current runtime heap profile to w.
func writeHeap(w io.Writer, debug int) error {
	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	sort.Sort(byInUseBytes(p))

	b := bufio.NewWriter(w)
	var tw *tabwriter.Writer
	w = b
	if debug > 0 {
		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
		w = tw
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	for i := range p {
		r := &p[i]
		fmt.Fprintf(w, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(w, " %#x", pc)
		}
		fmt.Fprintf(w, "\n")
		if debug > 0 {
			printStackRecord(w, r.Stack(), false)
		}
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)
	fmt.Fprintf(w, "\n# runtime.MemStats\n")
	fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
	fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
	fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
	fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
	fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
	fmt.Fprintf(w, "# Frees = %d\n", s.Frees)

	fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
	fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
	fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
	fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
	fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
	fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)

	fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
	fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
	fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
	fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)

	fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
	fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
	fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
	fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
	fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)

	if tw != nil {
		tw.Flush()
	}
	return b.Flush()
}
Beispiel #11
0
func init() {

	store.DefaultDBManager.AddFunc("select", func(db *store.DB, args []string) string {
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("info", func(db *store.DB, args []string) string {
		return reply.BulkReply("# Server\r\nredis_version:2.6.7\r\n")
	})

	store.DefaultDBManager.AddFunc("meminuse", func(db *store.DB, args []string) string {
		return reply.IntReply(int(store.MemInUse()))
	})

	store.DefaultDBManager.AddFunc("objectsinuse", func(db *store.DB, args []string) string {
		var memProfiles []runtime.MemProfileRecord
		n, _ := runtime.MemProfile(memProfiles, false)
		memProfiles = make([]runtime.MemProfileRecord, n)
		n, _ = runtime.MemProfile(memProfiles, false)

		objects := int64(0)
		for _, prof := range memProfiles {
			objects += prof.InUseObjects()
		}

		return reply.IntReply(int(objects))
	})

	store.DefaultDBManager.AddFunc("gc", func(db *store.DB, args []string) string {
		runtime.GC()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("freeosmemory", func(db *store.DB, args []string) string {
		debug.FreeOSMemory()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("save", func(db *store.DB, args []string) string {
		err := db.SaveToDiskSync()
		if err != nil {
			return reply.ErrorReply(err)
		} else {
			return reply.OKReply
		}
	})

	store.DefaultDBManager.AddFunc("bgsave", func(db *store.DB, args []string) string {
		store.DefaultDBManager.SaveToDiskAsync(nil)
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("__end_save_mode__", func(db *store.DB, args []string) string {
		db.EndSaveMode()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("dbsize", func(db *store.DB, args []string) string {
		return reply.IntReply(store.DefaultDBManager.DBSize())
	})

	store.DefaultDBManager.AddFunc("lastdump", func(db *store.DB, args []string) string {
		elem, ok, _ := db.StoreGet(args[0], data.Any)
		if ok {
			return reply.IntReply(int(elem.LastDump()))
		} else {
			return reply.NilReply
		}
	})

	store.DefaultDBManager.AddFunc("lastdumpdb", func(db *store.DB, args []string) string {
		return reply.IntReply(int(db.LastDump()))
	})

	store.DefaultDBManager.AddFunc("flush", func(db *store.DB, args []string) string {
		db.Flush()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("keysperdb", func(db *store.DB, args []string) string {
		var w reply.MultiBulkWriter
		dbs := store.DefaultDBManager.GetDBs()
		w.WriteCount(len(dbs))
		for _, db := range dbs {
			w.WriteString(fmt.Sprintf("%d", len(db.Store)))
		}
		return w.String()
	})

}
Beispiel #12
0
// Based on: https://github.com/golang/go/blob/6b8762104a90c93ebd51149e7a031738832c5cdc/src/runtime/pprof/pprof.go#L387
func Heap(w io.Writer, sortorder string) {
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	pm := make(map[uintptr]runtime.MemProfileRecord, len(p))

	for _, r := range p {
		// Based on: https://github.com/golang/go/blob/f9ed2f75c43cb8745a1593ec3e4208c46419216a/src/runtime/mprof.go#L150
		var h uintptr
		for _, pc := range r.Stack0 {
			h += pc
			h += h << 10
			h ^= h >> 6
		}
		h += h << 3
		h ^= h >> 11

		if _, ok := pm[h]; ok {
			r.AllocBytes += pm[h].AllocBytes
			r.FreeBytes += pm[h].FreeBytes
			r.AllocObjects += pm[h].AllocObjects
			r.FreeObjects += pm[h].FreeObjects
		}
		pm[h] = r
	}

	p = make([]runtime.MemProfileRecord, 0, len(pm))

	for _, r := range pm {
		p = append(p, r)
	}

	switch string(sortorder) {
	default:
		sort.Sort(byInUseBytes(p))
	case "allocbytes":
		sort.Sort(byAllocBytes(p))
	case "allocobjects":
		sort.Sort(byAllocObjects(p))
	case "inuseobjects":
		sort.Sort(byInUseObjects(p))
	}

	tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)

	var total runtime.MemProfileRecord
	for _, r := range p {
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	fmt.Fprintf(tw, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	fmt.Fprintf(tw, "# heap profile: %d: %s [%d: %s] @ heap/%d\n\n",
		total.InUseObjects(), formatSize(total.InUseBytes()),
		total.AllocObjects, formatSize(total.AllocBytes),
		2*runtime.MemProfileRate)

	for _, r := range p {
		fmt.Fprintf(tw, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(tw, " %#x", pc)
		}
		fmt.Fprintf(tw, "\n# %d: %s [%d: %s]\n",
			r.InUseObjects(), formatSize(r.InUseBytes()),
			r.AllocObjects, formatSize(r.AllocBytes))
		printStackRecord(tw, r.Stack(), false)
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)

	// Sort pauseNs in newer first,
	// and make it a nice to print duration.
	pauseNs := make([]time.Duration, 0, len(s.PauseNs))
	var pauseNsLongest time.Duration
	for i := (s.NumGC + 255) % 256; i > 0; i-- {
		d := time.Duration(int64(s.PauseNs[i]))
		if d > pauseNsLongest {
			pauseNsLongest = d
		}
		pauseNs = append(pauseNs, d)
	}
	for i := uint32(255); i > (s.NumGC+255)%256; i-- {
		d := time.Duration(int64(s.PauseNs[i]))
		if d > pauseNsLongest {
			pauseNsLongest = d
		}
		pauseNs = append(pauseNs, d)
	}

	pausePause := make([]time.Duration, 0, len(s.PauseEnd)-1)
	nextPause := time.Time{}
	for i := (s.NumGC + 255) % 256; i > 0; i-- {
		if s.PauseEnd[i] == 0 {
			break
		}
		t := time.Unix(0, int64(s.PauseEnd[i]))
		d := time.Duration(int64(s.PauseNs[i]))
		if !nextPause.IsZero() {
			pausePause = append(pausePause, nextPause.Sub(t))
		}
		nextPause = t.Add(-d)
	}
	for i := uint32(255); i > (s.NumGC+255)%256; i-- {
		if s.PauseEnd[i] == 0 {
			break
		}
		t := time.Unix(0, int64(s.PauseEnd[i]))
		d := time.Duration(int64(s.PauseNs[i]))
		pausePause = append(pausePause, nextPause.Sub(t))
		nextPause = t.Add(-d)
	}

	fmt.Fprintf(tw, "\n# runtime.MemStats\n")
	fmt.Fprintf(tw, "# Alloc = %d (%s)\n", s.Alloc, formatSize(int64(s.Alloc)))
	fmt.Fprintf(tw, "# TotalAlloc = %d (%s)\n", s.TotalAlloc, formatSize(int64(s.TotalAlloc)))
	fmt.Fprintf(tw, "# Sys = %d (%s)\n", s.Sys, formatSize(int64(s.Sys)))
	fmt.Fprintf(tw, "# Lookups = %d\n", s.Lookups)
	fmt.Fprintf(tw, "# Mallocs = %d\n", s.Mallocs)
	fmt.Fprintf(tw, "# Frees = %d\n", s.Frees)

	fmt.Fprintf(tw, "# HeapAlloc = %d (%s)\n", s.HeapAlloc, formatSize(int64(s.HeapAlloc)))
	fmt.Fprintf(tw, "# HeapSys = %d (%s)\n", s.HeapSys, formatSize(int64(s.HeapSys)))
	fmt.Fprintf(tw, "# HeapIdle = %d (%s)\n", s.HeapIdle, formatSize(int64(s.HeapIdle)))
	fmt.Fprintf(tw, "# HeapInuse = %d (%s)\n", s.HeapInuse, formatSize(int64(s.HeapInuse)))
	fmt.Fprintf(tw, "# HeapReleased = %d (%s)\n", s.HeapReleased, formatSize(int64(s.HeapReleased)))
	fmt.Fprintf(tw, "# HeapObjects = %d (%s)\n", s.HeapObjects, formatSize(int64(s.HeapObjects)))

	fmt.Fprintf(tw, "# Stack = %d (%s) / %d (%s)\n", s.StackInuse, formatSize(int64(s.StackInuse)), s.StackSys, formatSize(int64(s.StackSys)))
	fmt.Fprintf(tw, "# MSpan = %d (%s) / %d (%s)\n", s.MSpanInuse, formatSize(int64(s.MSpanInuse)), s.MSpanSys, formatSize(int64(s.MSpanSys)))
	fmt.Fprintf(tw, "# MCache = %d (%s) / %d (%s)\n", s.MCacheInuse, formatSize(int64(s.MCacheInuse)), s.MCacheSys, formatSize(int64(s.MCacheSys)))
	fmt.Fprintf(tw, "# BuckHashSys = %d\n", s.BuckHashSys)

	fmt.Fprintf(tw, "# NextGC = %d\n", s.NextGC)
	fmt.Fprintf(tw, "# PauseNs = %v\n", pauseNs)
	fmt.Fprintf(tw, "# PauseNsLongest = %v\n", pauseNsLongest)
	fmt.Fprintf(tw, "# PausePause = %v\n", pausePause)
	fmt.Fprintf(tw, "# NumGC = %d\n", s.NumGC)
	fmt.Fprintf(tw, "# EnableGC = %v\n", s.EnableGC)
	fmt.Fprintf(tw, "# DebugGC = %v\n", s.DebugGC)

	if tw != nil {
		tw.Flush()
	}
}
Beispiel #13
0
// writeHeap 将当前运行时堆的分析报告写入到 w 中。
func writeHeap(w io.Writer, debug int) error {
	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	// 找出这里有多少记录(MemProfile(nil, true)),为它们分配一些记录,并获取数据。
	// 这里有个竞争——在两次调用之间可能会添加更多记录——因此为安全起见,
	// 我们分配了额外的记录,如果不走运的话可以再试一次。
	// 此循环在一般情况下应当只执行一次迭代。
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		// 为稍大一点的分析报告分配空间,以防调用 MemProfile 时增加更多条目。
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
		// 分析报告增加,然后重试。
	}

	sort.Sort(byInUseBytes(p))

	b := bufio.NewWriter(w)
	var tw *tabwriter.Writer
	w = b
	if debug > 0 {
		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
		w = tw
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	// 技术上速率应为 MemProfileRate 而非 2*MemProfileRate,但早期版本的 C++
	// 堆分析器会报告2*MemProfileRate,所以这就是pprof必须这样预期的原因。
	fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	for i := range p {
		r := &p[i]
		fmt.Fprintf(w, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(w, " %#x", pc)
		}
		fmt.Fprintf(w, "\n")
		if debug > 0 {
			printStackRecord(w, r.Stack(), false)
		}
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people
	// 打印 memstats 信息。pprof 会忽略它,但这对人有用。
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)
	fmt.Fprintf(w, "\n# runtime.MemStats\n")
	fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
	fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
	fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
	fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
	fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
	fmt.Fprintf(w, "# Frees = %d\n", s.Frees)

	fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
	fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
	fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
	fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
	fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
	fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)

	fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
	fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
	fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
	fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)

	fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
	fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
	fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
	fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)

	if tw != nil {
		tw.Flush()
	}
	return b.Flush()
}