// Handler returns an http.HandlerFunc that returns pprof profiles // and additional metrics. // The handler must be accessible through the "/debug/_gom" route // in order for gom to display the stats from the debugged program. // See the godoc examples for usage. func Handler() http.HandlerFunc { // TODO(jbd): enable block profile. return func(w http.ResponseWriter, r *http.Request) { switch r.URL.Query().Get("view") { case "profile": name := r.URL.Query().Get("name") if name == "profile" { httppprof.Profile(w, r) return } httppprof.Handler(name).ServeHTTP(w, r) return case "symbol": httppprof.Symbol(w, r) return } n := &stats{ Goroutine: pprof.Lookup("goroutine").Count(), Thread: pprof.Lookup("threadcreate").Count(), Block: pprof.Lookup("block").Count(), Timestamp: time.Now().Unix(), } err := json.NewEncoder(w).Encode(n) if err != nil { w.WriteHeader(500) fmt.Fprint(w, err) } } }
func init() { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGHUP) go func() { for s := range ch { switch s { case syscall.SIGHUP: straceFile := "/tmp/costest.stack.trace" log.Debugf("receive signal HUP, output stack trace to %s", straceFile) f, err := os.OpenFile(straceFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { log.Error("create stack trace file error:", err) continue } fmt.Fprint(f, "\n\nGoroutines\n\n") pprof.Lookup("goroutine").WriteTo(f, 2) fmt.Fprint(f, "\n\nHeap\n\n") pprof.Lookup("heap").WriteTo(f, 1) fmt.Fprint(f, "\n\nThreadCreate\n\n") pprof.Lookup("threadcreate").WriteTo(f, 1) fmt.Fprint(f, "\n\nBlock\n\n") pprof.Lookup("block").WriteTo(f, 1) f.Close() } } }() }
// ShouldNotBeRunningGoroutines takes in the name of the current module as // `actual` and returns a blank string if no other goroutines are running // in that module, besides testing gorutines. // If there are other goroutines running, it will output the full stacktrace. // It does this by parsing the full stack trace of all currently running // goroutines and seeing if any of them are within this module and are not // testing goroutines. func ShouldNotBeRunningGoroutines(actual interface{}, _ ...interface{}) string { // this function has to take an interface{} type so that you can use it with // GoConvey's `So(...)` function. module := actual.(string) var b bytes.Buffer // passes 1 as the debug parameter so there are function names and line numbers pprof.Lookup("goroutine").WriteTo(&b, 1) scanner := bufio.NewScanner(&b) // each line of this stack trace is one path in one goroutine that is running for scanner.Scan() { t := scanner.Text() // now we wanna check when this line we are looking at shows a goroutine // that is running a file in this module that is not a test runningInModule := strings.Contains(t, module) runningTest := strings.Contains(t, "test") runningExternal := strings.Contains(t, "Godeps") || strings.Contains(t, "vendor") runningOtherFileInModule := runningInModule && !runningTest && !runningExternal if runningOtherFileInModule { // if we find that it is in fact running another goroutine from this // package then output the full stacktrace, with debug level 2 to show // more information pprof.Lookup("goroutine").WriteTo(&b, 2) return "Was running other goroutines: " + t + b.String() } } return "" }
func debug() { log.Println("Running debug report...") var m runtime.MemStats runtime.ReadMemStats(&m) log.Println("MEMORY STATS") log.Printf("%d,%d,%d,%d\n", m.HeapSys, m.HeapAlloc, m.HeapIdle, m.HeapReleased) log.Println("NUM CPU:", runtime.NumCPU()) //profiling f, err := os.Create("memprofileup.out") defer f.Close() fg, err := os.Create("goprof.out") fb, err := os.Create("blockprof.out") if err != nil { log.Fatal(err) } pprof.WriteHeapProfile(f) pprof.Lookup("goroutine").WriteTo(fg, 0) pprof.Lookup("block").WriteTo(fb, 0) f.Close() fg.Close() fb.Close() time.Sleep(1 * time.Second) panic("Debugging: Dump the stacks:") }
func DumpOnSignal(signals ...os.Signal) { c := make(chan os.Signal, 1) signal.Notify(c, signals...) for _ = range c { log.Printf("dump: goroutine...") pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) log.Printf("dump: heap...") pprof.Lookup("heap").WriteTo(os.Stderr, 1) } }
func handle(conn net.Conn, msg []byte) error { switch msg[0] { case signal.StackTrace: buf := make([]byte, 1<<16) n := runtime.Stack(buf, true) _, err := conn.Write(buf[:n]) return err case signal.GC: runtime.GC() _, err := conn.Write([]byte("ok")) return err case signal.MemStats: var s runtime.MemStats runtime.ReadMemStats(&s) fmt.Fprintf(conn, "alloc: %v bytes\n", s.Alloc) fmt.Fprintf(conn, "total-alloc: %v bytes\n", s.TotalAlloc) fmt.Fprintf(conn, "sys: %v bytes\n", s.Sys) fmt.Fprintf(conn, "lookups: %v\n", s.Lookups) fmt.Fprintf(conn, "mallocs: %v\n", s.Mallocs) fmt.Fprintf(conn, "frees: %v\n", s.Frees) fmt.Fprintf(conn, "heap-alloc: %v bytes\n", s.HeapAlloc) fmt.Fprintf(conn, "heap-sys: %v bytes\n", s.HeapSys) fmt.Fprintf(conn, "heap-idle: %v bytes\n", s.HeapIdle) fmt.Fprintf(conn, "heap-in-use: %v bytes\n", s.HeapInuse) fmt.Fprintf(conn, "heap-released: %v bytes\n", s.HeapReleased) fmt.Fprintf(conn, "heap-objects: %v\n", s.HeapObjects) fmt.Fprintf(conn, "stack-in-use: %v bytes\n", s.StackInuse) fmt.Fprintf(conn, "stack-sys: %v bytes\n", s.StackSys) fmt.Fprintf(conn, "next-gc: when heap-alloc >= %v bytes\n", s.NextGC) fmt.Fprintf(conn, "last-gc: %v ns\n", s.LastGC) fmt.Fprintf(conn, "gc-pause: %v ns\n", s.PauseTotalNs) fmt.Fprintf(conn, "num-gc: %v\n", s.NumGC) fmt.Fprintf(conn, "enable-gc: %v\n", s.EnableGC) fmt.Fprintf(conn, "debug-gc: %v\n", s.DebugGC) case signal.Version: fmt.Fprintf(conn, "%v\n", runtime.Version()) case signal.HeapProfile: pprof.Lookup("heap").WriteTo(conn, 0) case signal.CPUProfile: if err := pprof.StartCPUProfile(conn); err != nil { return nil } time.Sleep(30 * time.Second) pprof.StopCPUProfile() case signal.Vitals: fmt.Fprintf(conn, "goroutines: %v\n", runtime.NumGoroutine()) fmt.Fprintf(conn, "OS threads: %v\n", pprof.Lookup("threadcreate").Count()) fmt.Fprintf(conn, "GOMAXPROCS: %v\n", runtime.GOMAXPROCS(0)) fmt.Fprintf(conn, "num CPU: %v\n", runtime.NumCPU()) } return nil }
func init() { http.HandleFunc("/debug/pprofstats", func(w http.ResponseWriter, r *http.Request) { n := &stats{ Goroutine: pprof.Lookup("goroutine").Count(), Thread: pprof.Lookup("threadcreate").Count(), Block: pprof.Lookup("block").Count(), Timestamp: time.Now().Unix(), } err := json.NewEncoder(w).Encode(n) if err != nil { w.WriteHeader(500) fmt.Fprint(w, err) } }) }
// after runs after all testing. func after() { if *cpuProfile != "" { pprof.StopCPUProfile() // flushes profile to disk } if *traceFile != "" { trace.Stop() // flushes trace to disk } if *memProfile != "" { f, err := os.Create(toOutputDir(*memProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } runtime.GC() // materialize all statistics if err = pprof.WriteHeapProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err) os.Exit(2) } f.Close() } if *blockProfile != "" && *blockProfileRate >= 0 { f, err := os.Create(toOutputDir(*blockProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } if err = pprof.Lookup("block").WriteTo(f, 0); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) os.Exit(2) } f.Close() } if *mutexProfile != "" && *mutexProfileFraction >= 0 { f, err := os.Create(toOutputDir(*mutexProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } if err = pprof.Lookup("mutex").WriteTo(f, 0); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) os.Exit(2) } f.Close() } if cover.Mode != "" { coverReport() } }
// Stats handler returns an http.HandlerFunc that returns stats // about the number of current goroutines, threads, etc. // Stats handler must be accessible through "/debug/pprofstats" route // in order for gom to display the stats from the debugged program. func Stats() http.HandlerFunc { // TODO(jbd): enable block profile. return func(w http.ResponseWriter, r *http.Request) { n := &stats{ Goroutine: pprof.Lookup("goroutine").Count(), Thread: pprof.Lookup("threadcreate").Count(), Block: pprof.Lookup("block").Count(), Timestamp: time.Now().Unix(), } err := json.NewEncoder(w).Encode(n) if err != nil { w.WriteHeader(500) fmt.Fprint(w, err) } } }
func heap() { f, err := os.Create("profile") if err != nil { fmt.Printf("%s\n", err) return } defer f.Close() //================================== var p [1024]*people for i := 0; i < len(p); i++ { p[i] = &people{} p[i].age[1023] = 2 } doP(p[:]) for i := 0; i < len(p); i++ { p[i] = nil } var p2 [4 * 1024]*people for i := 0; i < len(p2); i++ { p2[i] = &people{} p2[i].age[1023] = 1 } time.Sleep(3 * time.Second) //================================== profile := pprof.Lookup("heap") if profile != nil { profile.WriteTo(f, 1) } }
func DumpOnSignal() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGUSR2) for _ = range c { pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) } }
func TestStartStop(t *testing.T) { assert := assert.New(t) startGoroutineNum := runtime.NumGoroutine() for i := 0; i < 10; i++ { qa.Root(t, func(root string) { configFile := TestConfig(root) app := carbon.New(configFile) assert.NoError(app.ParseConfig()) assert.NoError(app.Start()) app.Stop() }) } endGoroutineNum := runtime.NumGoroutine() // GC worker etc if !assert.InDelta(startGoroutineNum, endGoroutineNum, 3) { p := pprof.Lookup("goroutine") p.WriteTo(os.Stdout, 1) } }
func profileStartup() { if *profileHeap != "" { f, err := os.Create(*profileHeap) if err != nil { log.Fatal(err) } go func() { for { time.Sleep(time.Second * 5) l := <-profileLock f.Seek(0, os.SEEK_SET) f.Truncate(0) pprof.Lookup("heap").WriteTo(f, 0) profileLock <- l } }() } if *profileCPU != "" { f, err := os.Create(*profileCPU) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) } }
func main() { // Extract the command line arguments relayPort, clusterId, rsaKey := parseFlags() // Check for CPU profiling if *cpuProfile != "" { prof, err := os.Create(*cpuProfile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(prof) defer pprof.StopCPUProfile() } // Check for lock contention profiling if *blockProfile != "" { prof, err := os.Create(*blockProfile) if err != nil { log.Fatal(err) } runtime.SetBlockProfileRate(1) defer pprof.Lookup("block").WriteTo(prof, 0) } // Create and boot a new carrier log.Printf("main: booting iris overlay...") overlay := iris.New(clusterId, rsaKey) if peers, err := overlay.Boot(); err != nil { log.Fatalf("main: failed to boot iris overlay: %v.", err) } else { log.Printf("main: iris overlay converged with %v remote connections.", peers) } // Create and boot a new relay log.Printf("main: booting relay service...") rel, err := relay.New(relayPort, overlay) if err != nil { log.Fatalf("main: failed to create relay service: %v.", err) } if err := rel.Boot(); err != nil { log.Fatalf("main: failed to boot relay: %v.", err) } // Capture termination signals quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt) // Report success log.Printf("main: iris successfully booted, listening on port %d.", relayPort) // Wait for termination request, clean up and exit <-quit log.Printf("main: terminating relay service...") if err := rel.Terminate(); err != nil { log.Printf("main: failed to terminate relay service: %v.", err) } log.Printf("main: terminating carrier...") if err := overlay.Shutdown(); err != nil { log.Printf("main: failed to shutdown iris overlay: %v.", err) } log.Printf("main: iris terminated.") }
func Setup(r pork.Router) { r.RespondWithFunc("/debug/goroutine", func(w pork.ResponseWriter, r *http.Request) { p := pprof.Lookup("goroutine") w.Header().Set("Content-Type", "text/plain;charset=utf-8") p.WriteTo(w, 2) }) }
func connect_tcp(conn *net.TCPConn, header []byte, buff []byte) { for { _, err := io.ReadFull(conn, header) size := uint32(binary.BigEndian.Uint16(header)) full := buff[:size] _, err = io.ReadFull(conn, full) if err == nil { fullString := string(full) fmt.Println(fullString) if fullString == "lookup" { p := pprof.Lookup("goroutine") p.WriteTo(os.Stdout, 2) } v := len(full) buf := make([]byte, 2) buf[0] = byte(v >> 8) buf[1] = byte(v) data := append(buf, full...) conn.Write(data) } else { fmt.Println(err) return } } }
func dumpOnSignal(signals ...os.Signal) { c := make(chan os.Signal, 1) signal.Notify(c, signals...) for _ = range c { pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) } }
func (s *server) signalToggleCpuProfile() { if s.profileFile == nil { memFile, err := ioutil.TempFile("", common.ProductName+"_Mem_Profile_") if goshawk.CheckWarn(err) { return } if goshawk.CheckWarn(pprof.Lookup("heap").WriteTo(memFile, 0)) { return } if !goshawk.CheckWarn(memFile.Close()) { log.Println("Memory profile written to", memFile.Name()) } profFile, err := ioutil.TempFile("", common.ProductName+"_CPU_Profile_") if goshawk.CheckWarn(err) { return } if goshawk.CheckWarn(pprof.StartCPUProfile(profFile)) { return } s.profileFile = profFile log.Println("Profiling started in", profFile.Name()) } else { pprof.StopCPUProfile() if !goshawk.CheckWarn(s.profileFile.Close()) { log.Println("Profiling stopped in", s.profileFile.Name()) } s.profileFile = nil } }
func runTest(t *testing.T, clientFunc, serverFunc endpointHandler) { c1, c2 := net.Pipe() serverDone := make(chan error, 1) clientDone := make(chan error, 1) go runEndpoint(c2, true, serverFunc, serverDone) go runEndpoint(c1, false, clientFunc, clientDone) timeout := time.After(50 * time.Millisecond) for clientDone != nil || serverDone != nil { select { case err := <-clientDone: if err != nil { t.Fatalf("Client error: %s", err) } clientDone = nil case err := <-serverDone: if err != nil { t.Fatalf("Server error: %s", err) } serverDone = nil case <-timeout: pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) t.Fatalf("Timeout!") } } }
func goroutineStats() []byte { buf := new(bytes.Buffer) if err := pprof.Lookup("goroutine").WriteTo(buf, 2); err != nil { return nil } return buf.Bytes() }
func SpawnLocalPipeBench(b *testing.B, sender BenchMessageSender, receiver BenchMessageReceiver) { endClient := make(chan bool) endServer := make(chan bool) receiver1, sender1 := libchan.Pipe() go BenchClient(b, endClient, sender1, sender, b.N) go BenchServer(b, endServer, receiver1, receiver, b.N) timeout := time.After(time.Duration(b.N+1) * 50 * time.Millisecond) for endClient != nil || endServer != nil { select { case <-endClient: if b.Failed() { b.Fatal("Client failed") } endClient = nil case <-endServer: if b.Failed() { b.Fatal("Server failed") } endServer = nil case <-timeout: if DumpStackOnTimeout { pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) } b.Fatal("Timeout") } } }
// print a full goroutine stack trace to the log fd on SIGUSR2 func debugStackPrinter(out io.Writer) { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGUSR2) for range c { pprof.Lookup("goroutine").WriteTo(out, 1) } }
// after runs after all testing. func after() { if *cpuProfile != "" { pprof.StopCPUProfile() // flushes profile to disk } if *memProfile != "" { f, err := os.Create(toOutputDir(*memProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } if err = pprof.WriteHeapProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err) os.Exit(2) } f.Close() } if *blockProfile != "" && *blockProfileRate >= 0 { f, err := os.Create(toOutputDir(*blockProfile)) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } if err = pprof.Lookup("block").WriteTo(f, 0); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) os.Exit(2) } f.Close() } if cover.Mode != "" { coverReport() } }
func TestThatThereIsNoLeakingGoRoutine(t *testing.T) { logger := loggertesthelper.Logger() server := startHTTPServer() defer server.Close() authorizer := NewLogAccessAuthorizer(server.URL, true) authorizer("bearer something", "myAppId", logger) time.Sleep(10 * time.Millisecond) var buf bytes.Buffer goRoutineProfiles := pprof.Lookup("goroutine") goRoutineProfiles.WriteTo(&buf, 2) match, err := regexp.Match("readLoop", buf.Bytes()) if err != nil { t.Error("Unable to match /readLoop/ regexp against goRoutineProfile") goRoutineProfiles.WriteTo(os.Stdout, 2) } if match { t.Error("We are leaking readLoop goroutines.") } match, err = regexp.Match("writeLoop", buf.Bytes()) if err != nil { t.Error("Unable to match /writeLoop/ regexp against goRoutineProfile") } if match { t.Error("We are leaking writeLoop goroutines.") goRoutineProfiles.WriteTo(os.Stdout, 2) } }
func doprofile(fn string) { var err error var fc, fh, ft *os.File for i := 1; i > 0; i++ { fc, err = os.Create(fn + "-cpu-" + strconv.Itoa(i) + ".prof") if err != nil { log.Fatal(err) } pprof.StartCPUProfile(fc) time.Sleep(300 * time.Second) pprof.StopCPUProfile() fc.Close() fh, err = os.Create(fn + "-heap-" + strconv.Itoa(i) + ".prof") if err != nil { log.Fatal(err) } pprof.WriteHeapProfile(fh) fh.Close() ft, err = os.Create(fn + "-threadcreate-" + strconv.Itoa(i) + ".prof") if err != nil { log.Fatal(err) } pprof.Lookup("threadcreate").WriteTo(ft, 0) ft.Close() log.Println("Created CPU, heap and threadcreate profile of 300 seconds") } }
// after runs after all testing. func after() { if *cpuProfile != "" { pprof.StopCPUProfile() // flushes profile to disk } if *memProfile != "" { f, err := os.Create(*memProfile) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s", err) return } if err = pprof.WriteHeapProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err) } f.Close() } if *blockProfile != "" && *blockProfileRate >= 0 { f, err := os.Create(*blockProfile) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s", err) return } if err = pprof.Lookup("block").WriteTo(f, 0); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *blockProfile, err) } f.Close() } }
func dumpGoRoutine(dumpChan chan os.Signal) { for range dumpChan { goRoutineProfiles := pprof.Lookup("goroutine") if goRoutineProfiles != nil { goRoutineProfiles.WriteTo(os.Stdout, 2) } } }
func serveGoroutineProfile(w http.ResponseWriter, r *http.Request) { prof := pprof.Lookup("goroutine") if prof == nil { http.Error(w, "unknown profile name", 400) return } prof.WriteTo(w, 1) }
// Invoke runtime/pprof.Lookup(name, debug) then save to file. // // goroutine - stack traces of all current goroutines // heap - a sampling of all heap allocations // threadcreate - stack traces that led to the creation of new OS threads // block - stack traces that led to blocking on synchronization primitives // func SaveProfile(name, file string, debug int) error { f, err := os.Create(file) if err != nil { return err } defer f.Close() return pprof.Lookup(name).WriteTo(f, debug) }
func main() { data := make([]BigStruct, 1000) for i := 0; i < len(data); i++ { data[i] = BigStruct{} } p := pprof.Lookup("heap") p.WriteTo(os.Stdout, 2) }