Esempio n. 1
0
// Stats driver waits for samples and publishes results to listeners
func StartDriver() {
	go func() {
		var gcStats debug.GCStats
		var listeners = make(map[chan *Stats]string)
		stats := NewStats()
		statsUpdate := time.Tick(config.StatsUpdatePeriodMs)
		for {
			select {
			// Time to send a stats update
			case <-statsUpdate:
				// Update the stats GC
				debug.ReadGCStats(&gcStats)
				stats.GcCount = gcStats.NumGC
				if len(gcStats.Pause) > 0 {
					pauseTime := float64(gcStats.Pause[0]) / float64(time.Millisecond)
					stats.GcPauseTime = strconv.FormatFloat(pauseTime, 'f', 2, 64)
				}
				// Send to all listeners, that are idle, the most recent frame buffer
				for k, _ := range listeners {
					// Send a the latest stats if listener has processed the last one
					select {
					case k <- stats:
					default:
					}
				}
				// New stats so we don't corrupt last one sent by reference
				stats = NewStats()

			// New stats sample from somewhere
			case s := <-statSampleChannel:
				{
					switch s.StatType {
					case statFrameRenderTime:
						stats.addFrameRenderTimeSample(s.Src, s.Sample)
					case statFrameSyncJitter:
						stats.addFrameSyncJitterSample(s.Src, s.Sample)
					case statSerialSendTime:
						stats.addSerialSendTimeSample(s.Src, s.Sample)
					case statSerialDroppedFrame:
						stats.addSerialDroppedFrame(s.Src)
					case statFrameRenderDroppedFrame:
						stats.addFrameRenderDroppedFrame()
					default:
						panic("Unknown stat type " + strconv.Itoa(s.StatType))
					}
				}

			// Process new listener requests
			case newListener := <-addListener:
				log.WithField("name", newListener.name).Info("Stats listener added")
				listeners[newListener.src] = newListener.name

			// Process remove listener request
			case listenerToRemove := <-listenerDone:
				log.WithField("name", listeners[listenerToRemove]).Info("Stats listener removed")
				delete(listeners, listenerToRemove)
			}
		}
	}()
}
Esempio n. 2
0
// Get GC summary.
func GCSummary() *GCSummaryInfo {
	gcstats := debug.GCStats{PauseQuantiles: make([]time.Duration, 100)}
	debug.ReadGCStats(&gcstats)

	memStats := runtime.MemStats{}
	runtime.ReadMemStats(&memStats)

	elapsed := time.Now().Sub(startTime)

	summary := &GCSummaryInfo{
		Alloc:     memStats.Alloc,
		Sys:       memStats.Sys,
		AllocRate: uint64(float64(memStats.TotalAlloc) / elapsed.Seconds()),
	}

	if gcstats.NumGC > 0 {
		summary.NumGC = gcstats.NumGC
		summary.LastPause = gcstats.Pause[0]
		summary.PauseAvg = durationAvg(gcstats.Pause)
		summary.Overhead = float64(gcstats.PauseTotal) / float64(elapsed) * 100
		summary.Histogram1 = gcstats.PauseQuantiles[94]
		summary.Histogram2 = gcstats.PauseQuantiles[98]
		summary.Histogram3 = gcstats.PauseQuantiles[99]
	}

	return summary
}
Esempio n. 3
0
func memProfile() {
	var gcstats *debug.GCStats
	var stats *runtime.MemStats

	stats = &runtime.MemStats{}
	gcstats = &debug.GCStats{}

	for {
		fmt.Println("STATS")

		runtime.ReadMemStats(stats)
		fmt.Printf("EnableGC: %v.\n", stats.EnableGC)
		fmt.Printf("LastGC: %d.\n", stats.LastGC)
		fmt.Printf("Mallocs: %d.\n", stats.Mallocs)
		fmt.Printf("Frees: %d.\n", stats.Frees)
		fmt.Printf("Mallocs - Frees: %d.\n", stats.Mallocs-stats.Frees)

		debug.ReadGCStats(gcstats)
		fmt.Printf("LastGC: %v.\n", gcstats.LastGC)
		fmt.Printf("NumGC: %d.\n", gcstats.NumGC)

		time.Sleep(time.Second * 2)
		fmt.Println("")
		fmt.Println("")
	}
}
Esempio n. 4
0
func show_info(par string) {
	common.Busy_mutex.Lock()
	if common.BusyWith != "" {
		fmt.Println("Chain thread busy with:", common.BusyWith)
	} else {
		fmt.Println("Chain thread is idle")
	}
	common.Busy_mutex.Unlock()

	network.MutexRcv.Lock()
	fmt.Println("Last Header:", network.LastCommitedHeader.BlockHash.String(), "@", network.LastCommitedHeader.Height)
	discarded := len(network.DiscardedBlocks)
	cached := len(network.CachedBlocks)
	b2g_len := len(network.BlocksToGet)
	b2g_idx_len := len(network.IndexToBlocksToGet)
	lb2g := network.LowestIndexToBlocksToGet
	network.MutexRcv.Unlock()

	common.Last.Mutex.Lock()
	fmt.Println("Last Block :", common.Last.Block.BlockHash.String(), "@", common.Last.Block.Height)
	fmt.Printf("Timestamp: %s,  Diff: %.0f,  Got: %s ago,  ToGetFrom: %d\n",
		time.Unix(int64(common.Last.Block.Timestamp()), 0).Format("2006/01/02 15:04:05"),
		btc.GetDifficulty(common.Last.Block.Bits()), time.Now().Sub(common.Last.Time).String(),
		lb2g)
	fmt.Print("Median Time: ", time.Unix(int64(common.Last.Block.GetMedianTimePast()), 0).Format("2006/01/02 15:04:05"), ",   ")
	common.Last.Mutex.Unlock()

	network.Mutex_net.Lock()
	fmt.Printf("NetQueueSize:%d, NetConns:%d, Peers:%d, B2G:%d/%d\n", len(network.NetBlocks),
		len(network.OpenCons), peersdb.PeerDB.Count(), b2g_len, b2g_idx_len)
	network.Mutex_net.Unlock()

	network.TxMutex.Lock()
	fmt.Printf("TransactionsToSend:%d,  TransactionsRejected:%d,  TransactionsPending:%d/%d\n",
		len(network.TransactionsToSend), len(network.TransactionsRejected),
		len(network.TransactionsPending), len(network.NetTxs))
	fmt.Printf("WaitingForInputs:%d,  SpentOutputs:%d,  Hashrate:%s,  AverageFee:%.1f SpB\n",
		len(network.WaitingForInputs), len(network.SpentOutputs), usif.GetNetworkHashRate(), common.GetAverageFee())
	network.TxMutex.Unlock()

	common.PrintStats()

	// Memory used
	al, sy := sys.MemUsed()
	fmt.Println("Heap size:", al>>20, "MB    Sys mem used:", sy>>20, "MB    QDB extra mem:",
		atomic.LoadInt64(&qdb.ExtraMemoryConsumed)>>20, "MB in",
		atomic.LoadInt64(&qdb.ExtraMemoryAllocCnt), "recs")

	var gs debug.GCStats
	debug.ReadGCStats(&gs)
	fmt.Println("Go version:", runtime.Version(), "  LastGC:", time.Now().Sub(gs.LastGC).String(),
		"   NumGC:", gs.NumGC,
		"   PauseTotal:", gs.PauseTotal.String())

	fmt.Println("Gocoin:", gocoin.Version,
		"  Uptime:", time.Now().Sub(common.StartTime).String(),
		"  ECDSA cnt:", btc.EcdsaVerifyCnt,
		"  cach:", cached, "  dis:", discarded)
}
Esempio n. 5
0
// print gc information to io.Writer
func PrintGCSummary(w io.Writer) {
	memStats := &runtime.MemStats{}
	runtime.ReadMemStats(memStats)
	gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)}
	debug.ReadGCStats(gcstats)

	printGC(memStats, gcstats, w)
}
Esempio n. 6
0
func GCSummary() string {
	memStats := &runtime.MemStats{}
	runtime.ReadMemStats(memStats)
	gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)}
	debug.ReadGCStats(gcstats)

	return printGC(memStats, gcstats)
}
Esempio n. 7
0
File: pprof.go Progetto: dzhcool/eye
// print gc information to io.Writer
func PrintGCSummary(w http.ResponseWriter, r *http.Request) {
	memStats := &runtime.MemStats{}
	runtime.ReadMemStats(memStats)
	gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)}
	debug.ReadGCStats(gcstats)

	printGC(memStats, gcstats, w)
}
Esempio n. 8
0
func (msg *DebugCommand) HandleServer(server *Server) {
	client := msg.Client()
	if !client.flags[Operator] {
		return
	}

	switch msg.subCommand {
	case "GCSTATS":
		stats := debug.GCStats{
			Pause:          make([]time.Duration, 10),
			PauseQuantiles: make([]time.Duration, 5),
		}
		debug.ReadGCStats(&stats)

		server.Replyf(client, "last GC:     %s", stats.LastGC.Format(time.RFC1123))
		server.Replyf(client, "num GC:      %d", stats.NumGC)
		server.Replyf(client, "pause total: %s", stats.PauseTotal)
		server.Replyf(client, "pause quantiles min%%: %s", stats.PauseQuantiles[0])
		server.Replyf(client, "pause quantiles 25%%:  %s", stats.PauseQuantiles[1])
		server.Replyf(client, "pause quantiles 50%%:  %s", stats.PauseQuantiles[2])
		server.Replyf(client, "pause quantiles 75%%:  %s", stats.PauseQuantiles[3])
		server.Replyf(client, "pause quantiles max%%: %s", stats.PauseQuantiles[4])

	case "NUMGOROUTINE":
		count := runtime.NumGoroutine()
		server.Replyf(client, "num goroutines: %d", count)

	case "PROFILEHEAP":
		profFile := "ergonomadic.mprof"
		file, err := os.Create(profFile)
		if err != nil {
			server.Replyf(client, "error: %s", err)
			break
		}
		defer file.Close()
		pprof.Lookup("heap").WriteTo(file, 0)
		server.Replyf(client, "written to %s", profFile)

	case "STARTCPUPROFILE":
		profFile := "ergonomadic.prof"
		file, err := os.Create(profFile)
		if err != nil {
			server.Replyf(client, "error: %s", err)
			break
		}
		if err := pprof.StartCPUProfile(file); err != nil {
			defer file.Close()
			server.Replyf(client, "error: %s", err)
			break
		}

		server.Replyf(client, "CPU profile writing to %s", profFile)

	case "STOPCPUPROFILE":
		pprof.StopCPUProfile()
		server.Reply(client, "CPU profiling stopped")
	}
}
Esempio n. 9
0
func (w *StatsWorker) work() {
	w.stats["time"] = time.Now()
	w.stats["routes"] = w.collectRouteStats()
	w.stats["other"] = w.collectReporters()
	debug.ReadGCStats(w.gcstats)
	w.rt["gc"] = w.gcstats.NumGC
	w.rt["go"] = int64(runtime.NumGoroutine())
	w.save()
}
Esempio n. 10
0
// capture does a one time collection of DeferStats
func (c *Client) capture() {

	var mem runtime.MemStats
	var gc debug.GCStats

	mems := ""
	if c.GrabMem {
		runtime.ReadMemStats(&mem)
		mems = strconv.FormatUint(mem.Alloc, 10)
	}

	gcs := ""
	if c.GrabGC {
		debug.ReadGCStats(&gc)
		gcs = strconv.FormatInt(gc.NumGC, 10)
	}

	grs := ""
	if c.GrabGR {
		grs = strconv.Itoa(runtime.NumGoroutine())
	}

	cgos := ""
	if c.GrabCgo {
		cgos = strconv.FormatInt(runtime.NumCgoCall(), 10)
	}

	fds := ""
	if c.GrabFd {
		fds = strconv.Itoa(openFileCnt())
	}

	ds := DeferStats{
		Mem:        mems,
		GoRoutines: grs,
		Cgos:       cgos,
		Fds:        fds,
		HTTPs:      curlist.List(),
		DBs:        Querylist.List(),
		GC:         gcs,
	}

	// FIXME
	// empty our https/dbs
	curlist.Reset()
	Querylist.Reset()

	go func() {
		b, err := json.Marshal(ds)
		if err != nil {
			log.Println(err)
		}

		c.BaseClient.Postit(b, c.statsUrl)
	}()
}
Esempio n. 11
0
// Capture new values for the Go garbage collector statistics exported in
// debug.GCStats.  This is designed to be called in a background goroutine.
// Giving a registry which has not been given to RegisterDebugGCStats will
// panic.
func CaptureDebugGCStatsOnce(r Registry) {
	lastGC := gcStats.LastGC
	debug.ReadGCStats(&gcStats)
	r.Get("debug.GCStats.LastGC").(Gauge).Update(int64(gcStats.LastGC.UnixNano()))
	r.Get("debug.GCStats.NumGC").(Gauge).Update(int64(gcStats.NumGC))
	r.Get("debug.GCStats.PauseTotal").(Gauge).Update(int64(gcStats.PauseTotal))
	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
		r.Get("debug.GCStats.Pause").(Histogram).Update(int64(gcStats.Pause[0]))
	}
	//r.Get("debug.GCStats.PauseQuantiles").(Histogram).Update(gcStats.PauseQuantiles)
}
Esempio n. 12
0
// Capture new values for the Go garbage collector statistics exported in
// debug.GCStats.  This is designed to be called in a background goroutine.
// Giving a registry which has not been given to RegisterDebugGCStats will
// panic.
func CaptureDebugGCStatsOnce(r Registry) {
	lastGC := gcStats.LastGC
	t := time.Now()
	debug.ReadGCStats(&gcStats)
	debugMetrics.ReadGCStats.UpdateSince(t)

	debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
	debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
		debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
	}
	//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
	debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
}
Esempio n. 13
0
func show_info(par string) {
	common.Busy_mutex.Lock()
	if common.BusyWith != "" {
		fmt.Println("Chain thread busy with:", common.BusyWith)
	} else {
		fmt.Println("Chain thread is idle")
	}
	common.Busy_mutex.Unlock()

	common.Last.Mutex.Lock()
	fmt.Println("Last Block:", common.Last.Block.BlockHash.String())
	fmt.Printf("Height: %d @ %s,  Diff: %.0f,  Got: %s ago\n",
		common.Last.Block.Height,
		time.Unix(int64(common.Last.Block.Timestamp()), 0).Format("2006/01/02 15:04:05"),
		btc.GetDifficulty(common.Last.Block.Bits()), time.Now().Sub(common.Last.Time).String())
	common.Last.Mutex.Unlock()

	network.Mutex_net.Lock()
	fmt.Printf("BlocksCached: %d,  NetQueueSize: %d,  NetConns: %d,  Peers: %d\n",
		len(network.CachedBlocks), len(network.NetBlocks), len(network.OpenCons), network.PeerDB.Count())
	network.Mutex_net.Unlock()

	network.TxMutex.Lock()
	fmt.Printf("TransactionsToSend:%d,  TransactionsRejected:%d,  TransactionsPending:%d/%d\n",
		len(network.TransactionsToSend), len(network.TransactionsRejected),
		len(network.TransactionsPending), len(network.NetTxs))
	fmt.Printf("WaitingForInputs:%d,  SpentOutputs:%d,  Hashrate:%s\n",
		len(network.WaitingForInputs), len(network.SpentOutputs), usif.GetNetworkHashRate())
	network.TxMutex.Unlock()

	common.PrintStats()

	// Memory used
	var ms runtime.MemStats
	var gs debug.GCStats
	runtime.ReadMemStats(&ms)
	fmt.Println("Go version:", runtime.Version(),
		"   Heap size:", ms.Alloc>>20, "MB",
		"   Sys mem used", ms.Sys>>20, "MB")

	debug.ReadGCStats(&gs)
	fmt.Println("LastGC:", time.Now().Sub(gs.LastGC).String(),
		"   NumGC:", gs.NumGC,
		"   PauseTotal:", gs.PauseTotal.String())

	fmt.Println("Gocoin:", btc.SourcesTag,
		"  Threads:", btc.UseThreads,
		"  Uptime:", time.Now().Sub(common.StartTime).String(),
		"  ECDSA cnt:", btc.EcdsaVerifyCnt)
}
Esempio n. 14
0
//export damain
func damain() {
	var buffer bytes.Buffer
	var gc debug.GCStats

	var lastgc int64

	buffer.Write(make([]byte, 0, 1000000*5))

	debug.ReadGCStats(&gc)
	gcs := strconv.FormatInt(gc.NumGC, 10)
	fmt.Printf("gcs:%v\n", gcs)
	lastgc = gc.LastGC.UnixNano()
	fmt.Printf("lastgc:%v\n", lastgc)
}
Esempio n. 15
0
func (gcr *gcReader) updateMetrics() {
	debug.ReadGCStats(&gcStats)

	gcr.numGC.Mark(gcStats.NumGC - gcr.lastNumGC)
	gcr.lastNumGC = gcStats.NumGC

	gcr.pauseDur.Mark(int64(gcStats.PauseTotal - gcr.lastPauseTotal))
	gcr.lastPauseTotal = gcStats.PauseTotal

	if gcr.lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
		gcr.pauseTime.Update(int64(gcStats.Pause[0]))
	}

	gcr.lastGC = gcStats.LastGC
}
Esempio n. 16
0
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
	c.goroutines.Set(float64(runtime.NumGoroutine()))
	ch <- c.goroutines

	var stats debug.GCStats
	stats.PauseQuantiles = make([]time.Duration, 5)
	debug.ReadGCStats(&stats)

	quantiles := make(map[float64]float64)
	for idx, pq := range stats.PauseQuantiles[1:] {
		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
	}
	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
}
Esempio n. 17
0
func (stat *localStatist) Start() {
	//On : %v, Total : %v, TotalLeave : %v, Open : %v, ResData : %vk
	time.Sleep(10 * time.Second)
	for {
		logBuffer := new(bytes.Buffer)
		var gcStatus debug.GCStats
		debug.ReadGCStats(&gcStatus)
		if len(gcStatus.Pause) > 0 {
			fmt.Printf("******GCTime:(%v:%v),GCNum:%v, GCPause:%v,GCPT:%v******\n",
				gcStatus.LastGC.Minute(),
				gcStatus.LastGC.Second(),
				gcStatus.NumGC,
				gcStatus.Pause[0],
				gcStatus.PauseTotal.Seconds())
		}
		time.Sleep(2 * time.Second)
		fmt.Fprintf(logBuffer, "%v %v %v %v %v %v\n",
			atomic.AddInt32(&stat.ResOnCount, 0),
			atomic.AddInt32(&stat.TotalRes, 0),
			atomic.AddInt32(&stat.LeaveTotalCount, 0),
			atomic.AddInt32(&stat.OpenResCount, 0),
			atomic.AddUint64(&stat.RecvDataLen, 0)/M1,
			atomic.AddUint64(&stat.SendDataLen, 0)/M1)

		mylog.GetLocalLogger().Write(logBuffer.String())

		consoleBuffer := new(bytes.Buffer)
		fmt.Fprintf(consoleBuffer, "On:%v,Total:%v,TotalLeave:%v,Open:%v,RecvBand:%.2fmps,SendBand:%.2fmps,Delay:%.2fms\n",
			atomic.AddInt32(&stat.ResOnCount, 0),
			atomic.AddInt32(&stat.TotalRes, 0),
			atomic.AddInt32(&stat.LeaveTotalCount, 0),
			atomic.AddInt32(&stat.OpenResCount, 0),
			(stat.RecvBand.getBandWidth()*8)/M1,
			(stat.SendBand.getBandWidth()*8)/M1,
			(stat.Delay.getDelay()/1000000)/float64(stat.ResOnCount))
		curTime := time.Now()
		curBuffer := new(bytes.Buffer)
		fmt.Fprintf(curBuffer, "%02d:%02d:%02d",
			curTime.Hour(),
			curTime.Minute(),
			curTime.Second())
		fmt.Println(curBuffer.String(), consoleBuffer.String())
		time.Sleep(10 * time.Second)
		debug.FreeOSMemory()
	}

}
Esempio n. 18
0
func captureGCStats(r metrics.Registry, d time.Duration) {
	for {
		var val int64
		var gc debug.GCStats
		debug.ReadGCStats(&gc)

		if len(gc.Pause) > 0 {
			val = gc.Pause[0].Nanoseconds()
		}

		if g := getGauge(r, "local.GCStats.LastGCDuration"); g != nil {
			g.(metrics.Gauge).Update(val)
		}

		time.Sleep(d)
	}
}
Esempio n. 19
0
func TestDebugGCStatsBlocking(t *testing.T) {
	if g := runtime.GOMAXPROCS(0); g < 2 {
		t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g)
		return
	}
	ch := make(chan int)
	go testDebugGCStatsBlocking(ch)
	var gcStats debug.GCStats
	t0 := time.Now()
	debug.ReadGCStats(&gcStats)
	t1 := time.Now()
	t.Log("i++ during debug.ReadGCStats:", <-ch)
	go testDebugGCStatsBlocking(ch)
	d := t1.Sub(t0)
	t.Log(d)
	time.Sleep(d)
	t.Log("i++ during time.Sleep:", <-ch)
}
Esempio n. 20
0
func getRuntimeStats(w http.ResponseWriter, r *http.Request) {
	var stats struct {
		MemStats     runtime.MemStats
		NumGoroutine int
		NumCgoCall   int64
		NumCPU       int
		Version      string
		GCStats      debug.GCStats
	}
	runtime.ReadMemStats(&stats.MemStats)
	stats.NumGoroutine = runtime.NumGoroutine()
	stats.NumCgoCall = runtime.NumCgoCall()
	stats.NumCPU = runtime.NumCPU()
	stats.Version = runtime.Version()
	stats.GCStats.PauseQuantiles = make([]time.Duration, 11)
	debug.ReadGCStats(&stats.GCStats)
	json.NewEncoder(w).Encode(&stats)
}
Esempio n. 21
0
func GetGCStats() string {
	gcs := debug.GCStats{}
	debug.ReadGCStats(&gcs)
	var buf bytes.Buffer
	buf.WriteString(fmt.Sprintf("LastGC: %s\n", gcs.LastGC.UTC().String()))
	buf.WriteString(fmt.Sprintf("NumGC: %d\n", gcs.NumGC))
	buf.WriteString(fmt.Sprintf("PauseTotal: %v\n", gcs.PauseTotal))
	if gcs.Pause != nil {
		pauseStr := ""
		prefix := ""
		for p := range gcs.Pause {
			pauseStr += prefix + gcs.Pause[p].String()
			prefix = ", "
		}
		buf.WriteString(fmt.Sprintf("Pause History: %s\n", pauseStr))
	}
	return buf.String()
}
Esempio n. 22
0
func ShowGCStat() {
	go func() {
		var numGC int64

		interval := time.Duration(100) * time.Millisecond
		gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)}
		memStats := &runtime.MemStats{}
		for {
			debug.ReadGCStats(gcstats)
			if gcstats.NumGC > numGC {
				runtime.ReadMemStats(memStats)

				printGC(memStats, gcstats)
				numGC = gcstats.NumGC
			}
			time.Sleep(interval)
		}
	}()
}
Esempio n. 23
0
func (rm *RuntimeMetrics) Report() {
	var mem runtime.MemStats
	runtime.ReadMemStats(&mem)

	if rm.options.Memory {
		// bytes allocated and not yet freed
		rm.reportGauge("alloc", float64(mem.Alloc))

		// total number of allocated objects
		rm.reportGauge("heap_objects", float64(mem.HeapObjects))
	}

	if rm.options.GC {
		rm.reportGauge("pause_total_ns", float64(mem.PauseTotalNs))
		rm.reportGauge("num_gc", float64(mem.NumGC))
		rm.reportGauge("next_gc", float64(mem.NextGC))
		rm.reportGauge("gc_cpu_fraction", mem.GCCPUFraction)
	}

	if rm.options.GCQuantile {
		var gc debug.GCStats
		gc.PauseQuantiles = make([]time.Duration, 3)
		debug.ReadGCStats(&gc)
		rm.reportGauge("gc_pause_quantile_50", float64(gc.PauseQuantiles[1]/1000)/1000.0)
		rm.reportGauge("gc_pause_quantile_max", float64(gc.PauseQuantiles[2]/1000)/1000.0)
	}

	if rm.options.Goroutines {
		rm.reportGauge("num_goroutines", float64(runtime.NumGoroutine()))
	}

	if rm.options.Cgo {
		rm.reportGauge("num_cgo_call", float64(runtime.NumCgoCall()))
	}

	if rm.options.FDs {
		if num, err := getFDUsage(); err == nil {
			rm.reportGauge("num_fds_used", float64(num))
		}
	}
}
func MonitorApplicationInstrumentation() {
	log.Info("Starting application monitoring...")
	go func() {
		job := instrument.NewJob("GCActivity")
		job_rl := instrument.NewJob("Load")
		metadata := health.Kvs{"host": HostDetails.Hostname}
		applicationGCStats.PauseQuantiles = make([]time.Duration, 5)

		for {
			debug.ReadGCStats(&applicationGCStats)
			job.GaugeKv("pauses_quantile_min", float64(applicationGCStats.PauseQuantiles[0].Nanoseconds()), metadata)
			job.GaugeKv("pauses_quantile_25", float64(applicationGCStats.PauseQuantiles[1].Nanoseconds()), metadata)
			job.GaugeKv("pauses_quantile_50", float64(applicationGCStats.PauseQuantiles[2].Nanoseconds()), metadata)
			job.GaugeKv("pauses_quantile_75", float64(applicationGCStats.PauseQuantiles[3].Nanoseconds()), metadata)
			job.GaugeKv("pauses_quantile_max", float64(applicationGCStats.PauseQuantiles[4].Nanoseconds()), metadata)

			job_rl.GaugeKv("rps", float64(GlobalRate.Rate()), metadata)
			time.Sleep(5 * time.Second)
		}
	}()
}
Esempio n. 25
0
File: info.go Progetto: eswdd/bosun
func (i *info) dumpGC(buf *bytes.Buffer) {
	buf.WriteString("# GC\r\n")

	count := 5

	var st debug.GCStats
	st.Pause = make([]time.Duration, count)
	// st.PauseQuantiles = make([]time.Duration, count)
	debug.ReadGCStats(&st)

	h := make([]string, 0, count)

	for i := 0; i < count && i < len(st.Pause); i++ {
		h = append(h, st.Pause[i].String())
	}

	i.dumpPairs(buf, infoPair{"gc_last_time", st.LastGC.Format(gcTimeFormat)},
		infoPair{"gc_num", st.NumGC},
		infoPair{"gc_pause_total", st.PauseTotal.String()},
		infoPair{"gc_pause_history", strings.Join(h, ",")},
	)
}
Esempio n. 26
0
func (s *Server) initializeRuntime() (err error) {
	// install signals.
	// TODO: FIXME: when process the current signal, others may drop.
	signal.Notify(s.sigs)

	// apply the cpu profile.
	if err = s.applyCpuProfile(core.Conf); err != nil {
		return
	}

	// apply the gc percent.
	if err = s.applyGcPercent(core.Conf); err != nil {
		return
	}

	// show gc trace.
	go func() {
		stat := &debug.GCStats{}

		for {
			if core.Conf.Go.GcTrace > 0 {
				pgc := stat.NumGC
				debug.ReadGCStats(stat)
				if len(stat.Pause) > 3 {
					stat.Pause = append([]time.Duration{}, stat.Pause[:3]...)
				}
				if pgc < stat.NumGC {
					core.Trace.Println("gc", stat.NumGC, stat.PauseTotal, stat.Pause, stat.PauseQuantiles)
				}
				time.Sleep(time.Duration(core.Conf.Go.GcTrace) * time.Second)
			} else {
				time.Sleep(3 * time.Second)
			}
		}
	}()

	return
}
Esempio n. 27
0
func (a *App) watchdog() {
	repeat, _ := a.Cfg.GetInt("gop", "watchdog_secs", 30)
	ticker := time.Tick(time.Second * time.Duration(repeat))

	firstLoop := true
	for {
		sysMemBytesLimit, _ := a.Cfg.GetInt64("gop", "sysmem_bytes_limit", 0)
		allocMemBytesLimit, _ := a.Cfg.GetInt64("gop", "allocmem_bytes_limit", 0)
		numFDsLimit, _ := a.Cfg.GetInt64("gop", "numfds_limit", 0)
		numGorosLimit, _ := a.Cfg.GetInt64("gop", "numgoros_limit", 0)

		sysMemBytes, allocMemBytes := getMemInfo()
		numFDs, err := fdsInUse()
		numGoros := int64(runtime.NumGoroutine())
		if err != nil {
			a.Debug("Failed to get number of fds in use: %s", err.Error())
			// Continue without
		}

		appStats := a.GetStats()
		gcStats := debug.GCStats{PauseQuantiles: make([]time.Duration, 3)}
		debug.ReadGCStats(&gcStats)
		gcMin := gcStats.PauseQuantiles[0]
		gcMedian := gcStats.PauseQuantiles[1]
		gcMax := gcStats.PauseQuantiles[2]
		a.Info("TICK: sys=%d,alloc=%d,fds=%d,current_req=%d,total_req=%d,goros=%d,gc=%v/%v/%v",
			sysMemBytes,
			allocMemBytes,
			numFDs,
			appStats.currentReqs,
			appStats.totalReqs,
			numGoros,
			gcMin,
			gcMedian,
			gcMax)
		if firstLoop {
			// Zero some gauges at start, otherwise restarts get lost in the graphs
			// and it looks like app is continously using memory.
			a.Stats.Gauge("mem.sys", 0)
			a.Stats.Gauge("mem.alloc", 0)
			a.Stats.Gauge("numfds", 0)
			a.Stats.Gauge("numgoro", 0)
		} else {
			a.Stats.Gauge("mem.sys", sysMemBytes)
			a.Stats.Gauge("mem.alloc", allocMemBytes)
			a.Stats.Gauge("numfds", numFDs)
			a.Stats.Gauge("numgoro", numGoros)
		}

		if sysMemBytesLimit > 0 && sysMemBytes >= sysMemBytesLimit {
			a.Errorf("SYS MEM LIMIT REACHED [%d >= %d] - starting graceful restart", sysMemBytes, sysMemBytesLimit)
			a.StartGracefulRestart("Sys Memory limit reached")
		}
		if allocMemBytesLimit > 0 && allocMemBytes >= allocMemBytesLimit {
			a.Errorf("ALLOC MEM LIMIT REACHED [%d >= %d] - starting graceful restart", allocMemBytes, allocMemBytesLimit)
			a.StartGracefulRestart("Alloc Memory limit reached")
		}
		if numFDsLimit > 0 && numFDs >= numFDsLimit {
			a.Errorf("NUM FDS LIMIT REACHED [%d >= %d] - starting graceful restart", numFDs, numFDsLimit)
			a.StartGracefulRestart("Number of fds limit reached")
		}
		if numGorosLimit > 0 && numGoros >= numGorosLimit {
			a.Errorf("NUM GOROS LIMIT REACHED [%d >= %d] - starting graceful restart", numGoros, numGorosLimit)
			a.StartGracefulRestart("Number of goros limit reached")
		}

		restartAfterSecs, _ := a.Cfg.GetFloat32("gop", "restart_after_secs", 0)
		appRunTime := time.Since(appStats.startTime).Seconds()
		if restartAfterSecs > 0 && appRunTime > float64(restartAfterSecs) {
			a.Errorf("TIME LIMIT REACHED [%f >= %f] - starting graceful restart", appRunTime, restartAfterSecs)
			a.StartGracefulRestart("Run time limit reached")
		}
		firstLoop = false
		<-ticker
	}
}
Esempio n. 28
0
func main() {
	buckets := flag.String("buckets", "", "buckets file (file format: new-line separated list of bucket names)")
	config_file := flag.String("config", "", "Transport config file")
	flag.Parse()

	if *config_file == "" {
		log.Fatal("You must specify config file")
	}

	for _, h := range proxy_handlers {
		h.Estimator = estimator.NewEstimator()
	}
	estimator_scan_handlers = proxy_handlers

	var err error

	conf := &config.ProxyConfig{}
	err = conf.Load(*config_file)
	if err != nil {
		log.Fatalf("Could not load config %s: %q", *config_file, err)
	}

	if *buckets == "" && len(conf.Elliptics.BucketList) == 0 {
		log.Fatalf("There is no buckets file and there is no 'bucket-list' option in elliptics config.")
	}

	if len(conf.Proxy.Address) == 0 {
		log.Fatalf("'address' must be specified in proxy config '%s'\n", *config_file)
	}

	if conf.Proxy.RedirectPort == 0 || conf.Proxy.RedirectPort >= 65536 {
		log.Printf("redirect is not allowed because of invalid redirect port %d",
			conf.Proxy.RedirectPort)
	}

	proxy.last_errors = make([]ErrorInfo, last_errors_length, last_errors_length)

	proxy.ell, err = etransport.NewEllipticsTransport(conf)
	if err != nil {
		log.Fatalf("Could not create Elliptics transport: %v", err)
	}

	rand.Seed(time.Now().Unix())

	proxy.bctl, err = bucket.NewBucketCtl(proxy.ell, *buckets, *config_file)
	if err != nil {
		log.Fatalf("Could not create new bucket controller: %v", err)
	}

	go func() {
		debug.SetGCPercent(10000)
		var stats debug.GCStats

		for {
			time.Sleep(5 * time.Second)

			runtime.GC()
			debug.ReadGCStats(&stats)

			log.Printf("gc: start: %s, duration: %s\n", stats.LastGC.String(), stats.Pause[0].String())
		}
	}()

	if len(conf.Proxy.HTTPSAddress) != 0 {
		if len(conf.Proxy.CertFile) == 0 {
			log.Fatalf("If you have specified HTTPS address there MUST be certificate file option")
		}

		if len(conf.Proxy.KeyFile) == 0 {
			log.Fatalf("If you have specified HTTPS address there MUST be key file option")
		}

		// this is needed to allow both HTTPS and HTTP handlers
		go func() {
			server := proxy.getTimeoutServer(proxy.bctl.Conf.Proxy.HTTPSAddress, http.HandlerFunc(generic_handler))
			log.Fatal(server.ListenAndServeTLS(conf.Proxy.CertFile, conf.Proxy.KeyFile))
		}()
	}

	if len(conf.Proxy.Address) != 0 {
		server := proxy.getTimeoutServer(proxy.bctl.Conf.Proxy.Address, http.HandlerFunc(generic_handler))
		log.Fatal(server.ListenAndServe())
	}
}
func LastGCTime() string {
	var stats = new(debug.GCStats)
	debug.ReadGCStats(stats)

	return stats.LastGC.String()
}
Esempio n. 30
0
// capture does a one time collection of DeferStats
func (c *Client) capture() {
	defer func() {
		if rec := recover(); rec != nil {
			err := fmt.Sprintf("%q", rec)
			log.Println(err)
		}
	}()

	var mem runtime.MemStats
	var gc debug.GCStats

	mems := ""
	if c.GrabMem {
		runtime.ReadMemStats(&mem)
		mems = strconv.FormatUint(mem.Alloc, 10)
	}

	gcs := ""
	var lastgc int64
	if c.GrabGC {
		debug.ReadGCStats(&gc)
		gcs = strconv.FormatInt(gc.NumGC, 10)
		lastgc = gc.LastGC.UnixNano()
	}

	grs := ""
	if c.GrabGR {
		grs = strconv.Itoa(runtime.NumGoroutine())
	}

	cgos := ""
	if c.GrabCgo {
		cgos = strconv.FormatInt(runtime.NumCgoCall(), 10)
	}

	fds := ""
	if c.GrabFd {
		fds = strconv.Itoa(openFileCnt())
	}

	ds := DeferStats{
		Mem:        mems,
		GoRoutines: grs,
		Cgos:       cgos,
		Fds:        fds,
		GC:         gcs,
		DBs:        Querylist.List(),
	}

	// reset dbs
	Querylist.Reset()

	if c.GrabHTTP {
		dhs := curlist.List()
		ds.HTTPs = getHTTPPercentiles(dhs)
		ds.Rpms = rpms.List()

		// reset http list && rpm
		curlist.Reset()
		rpms.ResetRPM()
	}

	if c.GrabExpvar {
		expvars, err := c.GetExpvar()
		if err != nil {
			log.Println(err)
		}
		ds.Expvars = expvars
	}

	if lastgc != c.LastGC {
		c.LastGC = lastgc
		ds.LastGC = strconv.FormatInt(c.LastGC, 10)
		ds.LastPause = strconv.FormatInt(gc.Pause[0].Nanoseconds(), 10)
	}

	go func() {
		defer func() {
			if rec := recover(); rec != nil {
				err := fmt.Sprintf("%q", rec)
				log.Println(err)
			}
		}()

		b, err := json.Marshal(ds)
		if err != nil {
			log.Println(err)
		}

		c.BaseClient.Postit(b, c.statsUrl, true)
	}()
}