func mem_in_go(include_zero bool) runtime.MemProfileRecord { var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, include_zero) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to MemProfile. p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, include_zero) if ok { p = p[0:n] break } // Profile grew; try again. } var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } return total }
func MemoryStatistics() string { var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, false) for { p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, false) if ok { p = p[0:n] break } } var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } var m runtime.MemStats runtime.ReadMemStats(&m) return fmt.Sprintf("%d in use objects (%d in use bytes) | Alloc: %d TotalAlloc: %d", total.InUseObjects(), total.InUseBytes(), m.Alloc, m.TotalAlloc) }
// WriteHeapProfile writes a pprof-formatted heap profile to w. // If a write to w returns an error, WriteHeapProfile returns that error. // Otherwise, WriteHeapProfile returns nil. func WriteHeapProfile(w io.Writer) error { // Find out how many records there are (MemProfile(nil, false)), // allocate that many records, and get the data. // There's a race—more records might be added between // the two calls—so allocate a few extra records for safety // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, false) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to MemProfile. p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, false) if ok { p = p[0:n] break } // Profile grew; try again. } var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } // Technically the rate is MemProfileRate not 2*MemProfileRate, // but early versions of the C++ heap profiler reported 2*MemProfileRate, // so that's what pprof has come to expect. b := bufio.NewWriter(w) fmt.Fprintf(b, "heap profile: %d: %d [%d: %d] @ heap/%d\n", total.InUseObjects(), total.InUseBytes(), total.AllocObjects, total.AllocBytes, 2*runtime.MemProfileRate) for i := range p { r := &p[i] fmt.Fprintf(b, "%d: %d [%d: %d] @", r.InUseObjects(), r.InUseBytes(), r.AllocObjects, r.AllocBytes) for _, pc := range r.Stack() { fmt.Fprintf(b, " %#x", pc) } fmt.Fprintf(b, "\n") } // Print memstats information too. // Pprof will ignore, but useful for people. s := new(runtime.MemStats) runtime.ReadMemStats(s) fmt.Fprintf(b, "\n# runtime.MemStats\n") fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc) fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc) fmt.Fprintf(b, "# Sys = %d\n", s.Sys) fmt.Fprintf(b, "# Lookups = %d\n", s.Lookups) fmt.Fprintf(b, "# Mallocs = %d\n", s.Mallocs) fmt.Fprintf(b, "# HeapAlloc = %d\n", s.HeapAlloc) fmt.Fprintf(b, "# HeapSys = %d\n", s.HeapSys) fmt.Fprintf(b, "# HeapIdle = %d\n", s.HeapIdle) fmt.Fprintf(b, "# HeapInuse = %d\n", s.HeapInuse) fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys) fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys) fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys) fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys) fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC) fmt.Fprintf(b, "# PauseNs = %d\n", s.PauseNs) fmt.Fprintf(b, "# NumGC = %d\n", s.NumGC) fmt.Fprintf(b, "# EnableGC = %v\n", s.EnableGC) fmt.Fprintf(b, "# DebugGC = %v\n", s.DebugGC) fmt.Fprintf(b, "# BySize = Size * (Active = Mallocs - Frees)\n") fmt.Fprintf(b, "# (Excluding large blocks.)\n") for _, t := range s.BySize { if t.Mallocs > 0 { fmt.Fprintf(b, "# %d * (%d = %d - %d)\n", t.Size, t.Mallocs-t.Frees, t.Mallocs, t.Frees) } } return b.Flush() }
// writeHeap writes the current runtime heap profile to w. func writeHeap(w io.Writer, debug int) error { // Find out how many records there are (MemProfile(nil, true)), // allocate that many records, and get the data. // There's a race—more records might be added between // the two calls—so allocate a few extra records for safety // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, true) for { p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, true) if ok { p = p[0:n] break } } sort.Slice(p, func(i, j int) bool { return p[i].InUseBytes() > p[j].InUseBytes() }) var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } prof := &profile.Profile{ PeriodType: &profile.ValueType{Type: "space", Unit: "bytes"}, SampleType: []*profile.ValueType{ {Type: "alloc_objects", Unit: "count"}, {Type: "alloc_space", Unit: "bytes"}, {Type: "inuse_objects", Unit: "count"}, {Type: "inuse_space", Unit: "bytes"}, }, Period: int64(runtime.MemProfileRate), } locs := make(map[uint64]*(profile.Location)) for i := range p { var v1, v2, v3, v4, blocksize int64 r := &p[i] v1, v2 = int64(r.InUseObjects()), int64(r.InUseBytes()) v3, v4 = int64(r.AllocObjects), int64(r.AllocBytes) if (v1 == 0 && v2 != 0) || (v3 == 0 && v4 != 0) { return fmt.Errorf("error writing memory profile: inuse object count was 0 but inuse bytes was %d", v2) } else { if v1 != 0 { blocksize = v2 / v1 v1, v2 = scaleHeapSample(v1, v2, prof.Period) } if v3 != 0 { v3, v4 = scaleHeapSample(v3, v4, prof.Period) } } value := []int64{v1, v2, v3, v4} var sloc []*profile.Location for _, pc := range r.Stack() { addr := uint64(pc) addr-- loc := locs[addr] if locs[addr] == nil { loc = &(profile.Location{ Address: addr, }) prof.Location = append(prof.Location, loc) locs[addr] = loc } sloc = append(sloc, loc) } prof.Sample = append(prof.Sample, &profile.Sample{ Value: value, Location: sloc, NumLabel: map[string][]int64{"bytes": {blocksize}}, }) } prof.RemapAll() protopprof.Symbolize(prof) return prof.Write(w) }
// writeHeap writes the current runtime heap profile to w. func writeHeap(w io.Writer, debug int) error { // Find out how many records there are (MemProfile(nil, true)), // allocate that many records, and get the data. // There's a race—more records might be added between // the two calls—so allocate a few extra records for safety // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, true) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to MemProfile. p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, true) if ok { p = p[0:n] break } // Profile grew; try again. } sort.Sort(byInUseBytes(p)) b := bufio.NewWriter(w) var tw *tabwriter.Writer w = b if debug > 0 { tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0) w = tw } var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } // Technically the rate is MemProfileRate not 2*MemProfileRate, // but early versions of the C++ heap profiler reported 2*MemProfileRate, // so that's what pprof has come to expect. fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n", total.InUseObjects(), total.InUseBytes(), total.AllocObjects, total.AllocBytes, 2*runtime.MemProfileRate) for i := range p { r := &p[i] fmt.Fprintf(w, "%d: %d [%d: %d] @", r.InUseObjects(), r.InUseBytes(), r.AllocObjects, r.AllocBytes) for _, pc := range r.Stack() { fmt.Fprintf(w, " %#x", pc) } fmt.Fprintf(w, "\n") if debug > 0 { printStackRecord(w, r.Stack(), false) } } // Print memstats information too. // Pprof will ignore, but useful for people s := new(runtime.MemStats) runtime.ReadMemStats(s) fmt.Fprintf(w, "\n# runtime.MemStats\n") fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc) fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc) fmt.Fprintf(w, "# Sys = %d\n", s.Sys) fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups) fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs) fmt.Fprintf(w, "# Frees = %d\n", s.Frees) fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc) fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys) fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle) fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse) fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased) fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects) fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys) fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys) fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys) fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys) fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC) fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs) fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC) fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC) fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC) if tw != nil { tw.Flush() } return b.Flush() }
// Based on: https://github.com/golang/go/blob/6b8762104a90c93ebd51149e7a031738832c5cdc/src/runtime/pprof/pprof.go#L387 func Heap(w io.Writer, sortorder string) { var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, true) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to MemProfile. p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, true) if ok { p = p[0:n] break } // Profile grew; try again. } pm := make(map[uintptr]runtime.MemProfileRecord, len(p)) for _, r := range p { // Based on: https://github.com/golang/go/blob/f9ed2f75c43cb8745a1593ec3e4208c46419216a/src/runtime/mprof.go#L150 var h uintptr for _, pc := range r.Stack0 { h += pc h += h << 10 h ^= h >> 6 } h += h << 3 h ^= h >> 11 if _, ok := pm[h]; ok { r.AllocBytes += pm[h].AllocBytes r.FreeBytes += pm[h].FreeBytes r.AllocObjects += pm[h].AllocObjects r.FreeObjects += pm[h].FreeObjects } pm[h] = r } p = make([]runtime.MemProfileRecord, 0, len(pm)) for _, r := range pm { p = append(p, r) } switch string(sortorder) { default: sort.Sort(byInUseBytes(p)) case "allocbytes": sort.Sort(byAllocBytes(p)) case "allocobjects": sort.Sort(byAllocObjects(p)) case "inuseobjects": sort.Sort(byInUseObjects(p)) } tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0) var total runtime.MemProfileRecord for _, r := range p { total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } // Technically the rate is MemProfileRate not 2*MemProfileRate, // but early versions of the C++ heap profiler reported 2*MemProfileRate, // so that's what pprof has come to expect. fmt.Fprintf(tw, "heap profile: %d: %d [%d: %d] @ heap/%d\n", total.InUseObjects(), total.InUseBytes(), total.AllocObjects, total.AllocBytes, 2*runtime.MemProfileRate) fmt.Fprintf(tw, "# heap profile: %d: %s [%d: %s] @ heap/%d\n\n", total.InUseObjects(), formatSize(total.InUseBytes()), total.AllocObjects, formatSize(total.AllocBytes), 2*runtime.MemProfileRate) for _, r := range p { fmt.Fprintf(tw, "%d: %d [%d: %d] @", r.InUseObjects(), r.InUseBytes(), r.AllocObjects, r.AllocBytes) for _, pc := range r.Stack() { fmt.Fprintf(tw, " %#x", pc) } fmt.Fprintf(tw, "\n# %d: %s [%d: %s]\n", r.InUseObjects(), formatSize(r.InUseBytes()), r.AllocObjects, formatSize(r.AllocBytes)) printStackRecord(tw, r.Stack(), false) } // Print memstats information too. // Pprof will ignore, but useful for people s := new(runtime.MemStats) runtime.ReadMemStats(s) // Sort pauseNs in newer first, // and make it a nice to print duration. pauseNs := make([]time.Duration, 0, len(s.PauseNs)) var pauseNsLongest time.Duration for i := (s.NumGC + 255) % 256; i > 0; i-- { d := time.Duration(int64(s.PauseNs[i])) if d > pauseNsLongest { pauseNsLongest = d } pauseNs = append(pauseNs, d) } for i := uint32(255); i > (s.NumGC+255)%256; i-- { d := time.Duration(int64(s.PauseNs[i])) if d > pauseNsLongest { pauseNsLongest = d } pauseNs = append(pauseNs, d) } pausePause := make([]time.Duration, 0, len(s.PauseEnd)-1) nextPause := time.Time{} for i := (s.NumGC + 255) % 256; i > 0; i-- { if s.PauseEnd[i] == 0 { break } t := time.Unix(0, int64(s.PauseEnd[i])) d := time.Duration(int64(s.PauseNs[i])) if !nextPause.IsZero() { pausePause = append(pausePause, nextPause.Sub(t)) } nextPause = t.Add(-d) } for i := uint32(255); i > (s.NumGC+255)%256; i-- { if s.PauseEnd[i] == 0 { break } t := time.Unix(0, int64(s.PauseEnd[i])) d := time.Duration(int64(s.PauseNs[i])) pausePause = append(pausePause, nextPause.Sub(t)) nextPause = t.Add(-d) } fmt.Fprintf(tw, "\n# runtime.MemStats\n") fmt.Fprintf(tw, "# Alloc = %d (%s)\n", s.Alloc, formatSize(int64(s.Alloc))) fmt.Fprintf(tw, "# TotalAlloc = %d (%s)\n", s.TotalAlloc, formatSize(int64(s.TotalAlloc))) fmt.Fprintf(tw, "# Sys = %d (%s)\n", s.Sys, formatSize(int64(s.Sys))) fmt.Fprintf(tw, "# Lookups = %d\n", s.Lookups) fmt.Fprintf(tw, "# Mallocs = %d\n", s.Mallocs) fmt.Fprintf(tw, "# Frees = %d\n", s.Frees) fmt.Fprintf(tw, "# HeapAlloc = %d (%s)\n", s.HeapAlloc, formatSize(int64(s.HeapAlloc))) fmt.Fprintf(tw, "# HeapSys = %d (%s)\n", s.HeapSys, formatSize(int64(s.HeapSys))) fmt.Fprintf(tw, "# HeapIdle = %d (%s)\n", s.HeapIdle, formatSize(int64(s.HeapIdle))) fmt.Fprintf(tw, "# HeapInuse = %d (%s)\n", s.HeapInuse, formatSize(int64(s.HeapInuse))) fmt.Fprintf(tw, "# HeapReleased = %d (%s)\n", s.HeapReleased, formatSize(int64(s.HeapReleased))) fmt.Fprintf(tw, "# HeapObjects = %d (%s)\n", s.HeapObjects, formatSize(int64(s.HeapObjects))) fmt.Fprintf(tw, "# Stack = %d (%s) / %d (%s)\n", s.StackInuse, formatSize(int64(s.StackInuse)), s.StackSys, formatSize(int64(s.StackSys))) fmt.Fprintf(tw, "# MSpan = %d (%s) / %d (%s)\n", s.MSpanInuse, formatSize(int64(s.MSpanInuse)), s.MSpanSys, formatSize(int64(s.MSpanSys))) fmt.Fprintf(tw, "# MCache = %d (%s) / %d (%s)\n", s.MCacheInuse, formatSize(int64(s.MCacheInuse)), s.MCacheSys, formatSize(int64(s.MCacheSys))) fmt.Fprintf(tw, "# BuckHashSys = %d\n", s.BuckHashSys) fmt.Fprintf(tw, "# NextGC = %d\n", s.NextGC) fmt.Fprintf(tw, "# PauseNs = %v\n", pauseNs) fmt.Fprintf(tw, "# PauseNsLongest = %v\n", pauseNsLongest) fmt.Fprintf(tw, "# PausePause = %v\n", pausePause) fmt.Fprintf(tw, "# NumGC = %d\n", s.NumGC) fmt.Fprintf(tw, "# EnableGC = %v\n", s.EnableGC) fmt.Fprintf(tw, "# DebugGC = %v\n", s.DebugGC) if tw != nil { tw.Flush() } }
// writeHeap 将当前运行时堆的分析报告写入到 w 中。 func writeHeap(w io.Writer, debug int) error { // Find out how many records there are (MemProfile(nil, true)), // allocate that many records, and get the data. // There's a race—more records might be added between // the two calls—so allocate a few extra records for safety // and also try again if we're very unlucky. // The loop should only execute one iteration in the common case. // 找出这里有多少记录(MemProfile(nil, true)),为它们分配一些记录,并获取数据。 // 这里有个竞争——在两次调用之间可能会添加更多记录——因此为安全起见, // 我们分配了额外的记录,如果不走运的话可以再试一次。 // 此循环在一般情况下应当只执行一次迭代。 var p []runtime.MemProfileRecord n, ok := runtime.MemProfile(nil, true) for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added // since the call to MemProfile. // 为稍大一点的分析报告分配空间,以防调用 MemProfile 时增加更多条目。 p = make([]runtime.MemProfileRecord, n+50) n, ok = runtime.MemProfile(p, true) if ok { p = p[0:n] break } // Profile grew; try again. // 分析报告增加,然后重试。 } sort.Sort(byInUseBytes(p)) b := bufio.NewWriter(w) var tw *tabwriter.Writer w = b if debug > 0 { tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0) w = tw } var total runtime.MemProfileRecord for i := range p { r := &p[i] total.AllocBytes += r.AllocBytes total.AllocObjects += r.AllocObjects total.FreeBytes += r.FreeBytes total.FreeObjects += r.FreeObjects } // Technically the rate is MemProfileRate not 2*MemProfileRate, // but early versions of the C++ heap profiler reported 2*MemProfileRate, // so that's what pprof has come to expect. // 技术上速率应为 MemProfileRate 而非 2*MemProfileRate,但早期版本的 C++ // 堆分析器会报告2*MemProfileRate,所以这就是pprof必须这样预期的原因。 fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n", total.InUseObjects(), total.InUseBytes(), total.AllocObjects, total.AllocBytes, 2*runtime.MemProfileRate) for i := range p { r := &p[i] fmt.Fprintf(w, "%d: %d [%d: %d] @", r.InUseObjects(), r.InUseBytes(), r.AllocObjects, r.AllocBytes) for _, pc := range r.Stack() { fmt.Fprintf(w, " %#x", pc) } fmt.Fprintf(w, "\n") if debug > 0 { printStackRecord(w, r.Stack(), false) } } // Print memstats information too. // Pprof will ignore, but useful for people // 打印 memstats 信息。pprof 会忽略它,但这对人有用。 s := new(runtime.MemStats) runtime.ReadMemStats(s) fmt.Fprintf(w, "\n# runtime.MemStats\n") fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc) fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc) fmt.Fprintf(w, "# Sys = %d\n", s.Sys) fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups) fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs) fmt.Fprintf(w, "# Frees = %d\n", s.Frees) fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc) fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys) fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle) fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse) fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased) fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects) fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys) fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys) fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys) fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys) fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC) fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs) fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC) fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC) if tw != nil { tw.Flush() } return b.Flush() }