func TestCounterBatchFunc(t *testing.T) { metrics.Reset() var a, b uint64 metrics.Counter("whee").SetBatchFunc( "yay", func() { a, b = 1, 2 }, func() uint64 { return a }, ) metrics.Counter("woo").SetBatchFunc( "yay", func() { a, b = 1, 2 }, func() uint64 { return b }, ) counters, _ := metrics.Snapshot() if v, want := counters["whee"], uint64(1); v != want { t.Errorf("Counter was %v, but expected %v", v, want) } if v, want := counters["woo"], uint64(2); v != want { t.Errorf("Counter was %v, but expected %v", v, want) } }
// New wraps the datastore, providing metrics on the operations. The // metrics are registered with names starting with prefix and a dot. // // If prefix is not unique, New will panic. Call Close to release the // prefix. func New(prefix string, ds datastore.Datastore) DatastoreCloser { m := &measure{ backend: ds, putNum: metrics.Counter(prefix + ".Put.num"), putErr: metrics.Counter(prefix + ".Put.err"), putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3), putSize: metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3), getNum: metrics.Counter(prefix + ".Get.num"), getErr: metrics.Counter(prefix + ".Get.err"), getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3), getSize: metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3), hasNum: metrics.Counter(prefix + ".Has.num"), hasErr: metrics.Counter(prefix + ".Has.err"), hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3), deleteNum: metrics.Counter(prefix + ".Delete.num"), deleteErr: metrics.Counter(prefix + ".Delete.err"), deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3), queryNum: metrics.Counter(prefix + ".Query.num"), queryErr: metrics.Counter(prefix + ".Query.err"), queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3), } return m }
func init() { msg := &memStatGauges{} metrics.Counter("Mem.NumGC").SetBatchFunc(key{}, msg.init, msg.numGC) metrics.Counter("Mem.PauseTotalNs").SetBatchFunc(key{}, msg.init, msg.totalPause) metrics.Gauge("Mem.LastGC").SetBatchFunc(key{}, msg.init, msg.lastPause) metrics.Gauge("Mem.Alloc").SetBatchFunc(key{}, msg.init, msg.alloc) metrics.Gauge("Mem.HeapObjects").SetBatchFunc(key{}, msg.init, msg.objects) }
func TestCounter(t *testing.T) { metrics.Reset() metrics.Counter("whee").Add() metrics.Counter("whee").AddN(10) counters, _ := metrics.Snapshot() if v, want := counters["whee"], uint64(11); v != want { t.Errorf("Counter was %v, but expected %v", v, want) } }
func TestCounterRemove(t *testing.T) { metrics.Reset() metrics.Counter("whee").Add() metrics.Counter("whee").Remove() counters, _ := metrics.Snapshot() if v, ok := counters["whee"]; ok { t.Errorf("Counter was %v, but expected nothing", v) } }
func (e *Endpoint) processMsgs(msgs <-chan amqp.Delivery, cfg config.EndpointConfig) { defer e.ch.Close() var wg sync.WaitGroup defer func() { wg.Wait() close(e.exitResp) }() for { select { case <-e.exit: return case d, ok := <-msgs: if !ok { log.Printf("%s: delivery chan closed", cfg.Name) close(e.exit) return } wg.Add(1) go func(d amqp.Delivery, cfg config.EndpointConfig) { defer wg.Done() start := time.Now() metrics.Counter(epMetricName(e.Config.Name, "deliver-msg")).AddN(1) e.Strategy.Deliver(d, cfg) stop := time.Now() RecordDeliveryTime(cfg.Name, stop.Sub(start)) }(d, cfg) } } }
func (m *EndpointApp) connect() error { conn, connErr, err := DialRabbit(m.ap) if err != nil { return err } metrics.Counter(metricName("connect")).AddN(1) m.conn = conn m.connErr = connErr return nil }
func (e *Endpoint) start() error { metrics.Counter(epMetricName(e.Config.Name, "start")).AddN(1) log.Printf("%s: Starting Endpoint", e.Config.Name) msgs, err := e.Strategy.Consume(e.ch, e.Config) if err != nil { return err } go e.processMsgs(msgs, e.Config) return nil }
func TestEnforceQuota(t *testing.T) { defer metrics.Reset() client := int64(0x1234) quota := 1 SetQuota(client, quota) for i := 0; i < 5; i++ { updateOps() for k := 0; k < quota*flushIntervalSecond; k++ { metrics.Counter(stats.ClientCounterName(client)).Add() if !HasQuota(client) { t.Errorf("#%d.%d: unexpectedly out of quota", i, k) } } metrics.Counter(stats.ClientCounterName(client)).Add() if HasQuota(client) { t.Errorf("#%d: unexpectedly have quota", i) } } }
func TestCounterFunc(t *testing.T) { metrics.Reset() metrics.Counter("whee").SetFunc(func() uint64 { return 100 }) counters, _ := metrics.Snapshot() if v, want := counters["whee"], uint64(100); v != want { t.Errorf("Counter was %v, but expected %v", v, want) } }
func BenchmarkCounterAddN(b *testing.B) { metrics.Reset() b.ReportAllocs() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { metrics.Counter("test2").AddN(100) } }) }
func (r *ReportResource) Counter(res http.ResponseWriter, req *http.Request) { var ctr api.Counter if err := rest.Bind(req, &ctr); err != nil { log.Print(err) rest.SetBadRequestResponse(res) return } metrics.Counter(ctr.Name).AddN(ctr.N) if err := rest.SetOKResponse(res, ctr); err != nil { rest.SetInternalServerErrorResponse(res, err) return } }
id := rand.Int63() var lines []string for skip := 1; ; skip++ { pc, file, line, ok := runtime.Caller(skip) if !ok { break } if file[len(file)-1] == 'c' { continue } f := runtime.FuncForPC(pc) s := fmt.Sprintf("%s:%d %s()\n", file, line, f.Name()) lines = append(lines, s) } h.p(id, e, lines, r) body := fmt.Sprintf( "%s\n%016x", http.StatusText(http.StatusInternalServerError), id, ) http.Error(w, body, http.StatusInternalServerError) } }() h.h.ServeHTTP(w, r) } var panics = metrics.Counter("HTTP.Panics")
func Measure(h *metrics.Histogram, d time.Duration) { if h != nil && archaius.Conf.Collect { h.RecordValue(int64(d)) metrics.Counter(h.Name()).Add() } }
func (c *CounterType) Add() { metrics.Counter(c.disk + "_" + c.op).Add() if c.client != 0 { metrics.Counter(ClientCounterName(c.client)).Add() } }
// // HTTP.Requests // HTTP.Responses // HTTP.Latency.{P50,P75,P90,P95,P99,P999} // // By tracking incoming requests and outgoing responses, one can monitor not // only the requests per second, but also the number of requests being processed // at any given point in time. func Wrap(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { requests.Add() // inc requests defer responses.Add() // inc responses when we're done defer recordLatency(time.Now()) // record latency when we're done h.ServeHTTP(w, r) }) } var ( requests = metrics.Counter("HTTP.Requests") responses = metrics.Counter("HTTP.Responses") // a five-minute window tracking 1ms-3min latency = metrics.NewHistogram("HTTP.Latency", 1, 1000*60*3, 3) ) func recordLatency(start time.Time) { elapsedMS := time.Now().Sub(start).Seconds() * 1000.0 _ = latency.RecordValue(int64(elapsedMS)) }
func (b codaBackend) Increment(key string) { metrics.Counter(key).Add() }