func fanout( pubc chan Line, subc, unsubc chan Subscription, histc <-chan HistoryRequest, histSize int, ) { var ( hist = NewHistory(histSize) reg = registry{} publishs = expvar.NewMap("publishs") subscribes = expvar.NewMap("subscribes") unsubscribes = expvar.NewMap("unsubscribes") ) expvar.Publish("history", hist) expvar.Publish("subscriptions", reg) for { select { case l := <-pubc: for _, s := range reg[l.Topic()] { s.In() <- l } hist.Store(l) publishs.Add(l.Topic(), 1) case s := <-subc: _, ok := reg[s.Topic()] if !ok { reg[s.Topic()] = map[string]Subscription{} } reg[s.Topic()][s.ID()] = s subscribes.Add(s.Topic(), 1) logf("fanout: subscribed %s\n", s.ID()) case s := <-unsubc: subs, ok := reg[s.Topic()] if !ok { continue } _, ok = subs[s.ID()] if !ok { continue } delete(subs, s.ID()) unsubscribes.Add(s.Topic(), 1) logf("fanout: unsubscribed %s\n", s.ID()) case req := <-histc: req.Respond(hist.Get(req.Topic(), req.Size())) } } }
// NewExpHandler creates a new ExpHandler, publishes a new expvar.Map to track // it, sets a default Durations={"min": time.Minute}, sets Log=DefaultLogger, // and adds name to the exposed "exphttp" map so that stats polling code // can auto-discover. func NewExpHandler(name string, h ExpHandlerFunc) *ExpHandler { if expHandlers == nil { expHandlers = expvar.NewMap("exphttp") } e := &ExpHandler{ Name: name, Stats: expvar.NewMap(name), Durations: map[string]time.Duration{"min": time.Minute}, HandlerFunc: h, Log: DefaultLogger, } expHandlers.Add(name, 1) return e }
func NewRPCExt(name string, maxIdleConnections int, clientBuilder ClientBuilder) *RPCExt { r := &RPCExt{ name: name, clients: make([]*rpc.Client, 0, maxIdleConnections), maxIdleConnections: maxIdleConnections, clientBuilder: clientBuilder, closed: false, statRequests: metrics.NewCounter(), statEstablishedConnections: metrics.NewCounter(), statLiveConnections: metrics.NewCounter(), } m := expvar.NewMap(name + "-rpc") m.Set("requests", r.statRequests) m.Set("connections.established", r.statEstablishedConnections) m.Set("connections.inuse", r.statLiveConnections) m.Set("connections.idle", expvar.Func(func() interface{} { r.mu.RLock() n := len(r.clients) r.mu.RUnlock() return n })) return r }
func init() { m := expvar.NewMap("io") m.Set("w_B", &metrics.HistogramExport{ Histogram: writeBytes, Percentiles: []float64{0.1, 0.2, 0.80, 0.90, 0.99}, PercentileNames: []string{"p10", "p20", "p80", "p90", "p99"}}) m.Set("r_B", &metrics.HistogramExport{ Histogram: readBytes, Percentiles: []float64{0.1, 0.2, 0.80, 0.90, 0.99}, PercentileNames: []string{"p10", "p20", "p80", "p90", "p99"}}) expHistos = expvar.NewMap("cb") cb.ConnPoolCallback = recordConnPoolStat }
func NewRuntimeWare(prefixes []string, trackPageview bool, logInterval ...time.Duration) Middleware { expvar.NewString("at_server_start").Set(time.Now().Format("2006-01-02 15:04:05")) expvar.NewInt("cpu_count").Set(int64(runtime.NumCPU())) ware := &RuntimeWare{ serverStarted: time.Now(), trackPageview: trackPageview, ignoredUrls: prefixes, cQps: ratecounter.NewRateCounter(time.Minute), c4xx: ratecounter.NewRateCounter(5 * time.Minute), c5xx: ratecounter.NewRateCounter(5 * time.Minute), lc: NewLatencyCounter(50), hitsTotal: expvar.NewInt("hits_total"), hitsQps: expvar.NewInt("hits_per_minute"), hits4xx: expvar.NewInt("hits_4xx_per_5min"), hits5xx: expvar.NewInt("hits_5xx_per_5min"), hitsServed: expvar.NewString("latency_recent"), hitsLatMax: expvar.NewString("latency_max"), hitsLatMin: expvar.NewString("latency_min"), hitsLat95: expvar.NewString("latency_p95"), hitsLat50: expvar.NewString("latency_p50"), numGoroutine: expvar.NewInt("goroutine_count"), } if trackPageview { ware.pageviews = expvar.NewMap("hits_pageviews") } if len(logInterval) > 0 && logInterval[0] > 0 { go ware.logSnapshot(logInterval[0]) } return ware }
// NewStatistics returns an expvar-based map with the given key. Within that map // is another map. Within there "name" is the Measurement name, "tags" are the tags, // and values are placed at the key "values". func NewStatistics(key, name string, tags map[string]string) *expvar.Map { expvarMu.Lock() defer expvarMu.Unlock() // Add expvar for this service. var v expvar.Var if v = expvar.Get(key); v == nil { v = expvar.NewMap(key) } m := v.(*expvar.Map) // Set the name nameVar := &expvar.String{} nameVar.Set(name) m.Set("name", nameVar) // Set the tags tagsVar := &expvar.Map{} tagsVar.Init() for k, v := range tags { value := &expvar.String{} value.Set(v) tagsVar.Set(k, value) } m.Set("tags", tagsVar) // Create and set the values entry used for actual stats. statMap := &expvar.Map{} statMap.Init() m.Set("values", statMap) return statMap }
func init() { registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } cache := registry.(*expvar.Map).Get("cache") if cache == nil { cache = &expvar.Map{} cache.(*expvar.Map).Init() registry.(*expvar.Map).Set("cache", cache) } storage := cache.(*expvar.Map).Get("storage") if storage == nil { storage = &expvar.Map{} storage.(*expvar.Map).Init() cache.(*expvar.Map).Set("storage", storage) } storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { // no need for synchronous access: the increments are atomic and // during reading, we don't care if the data is up to date. The // numbers will always *eventually* be reported correctly. return blobStatterCacheMetrics })) }
func init() { couchbase.ConnPoolCallback = recordConnPoolStat expCb := expvar.NewMap("cb") expHistos = &expvar.Map{} expHistos.Init() expCb.Set("pools", expHistos) }
func init() { rm := expvar.NewMap("retention") rm.Set("SecondsSinceScanCompleted", expvar.Func(secondsSinceRetentionScanCompleted)) rm.Set("DeletesHist", expRetentionDeletesHist) rm.Set("DeletesTotal", expRetentionDeletesTotal) rm.Set("Period", expRetentionPeriod) rm.Set("RetainedHist", expRetainedHist) rm.Set("RetainedCurrent", expRetainedCurrent) }
func TestExport(t *testing.T) { features = map[FeatureFlag]bool{ unused: false, } m := expvar.NewMap("testing") Export(m) v := m.Get("unused") test.AssertEquals(t, v.String(), "false") }
func TestVHostCalculation(t *testing.T) { tests := []vhostTest{ vhostTest{ rawVHost: "www.howsmyssl.com", httpsAddr: "0:10443", expectedRouteHost: "www.howsmyssl.com", expectedRedirectHost: "www.howsmyssl.com", }, vhostTest{ rawVHost: "localhost:10443", httpsAddr: "localhost:10443", expectedRouteHost: "localhost", expectedRedirectHost: "localhost:10443", }, vhostTest{ rawVHost: "example.com:10443", httpsAddr: "localhost:10443", expectedRouteHost: "example.com", expectedRedirectHost: "example.com:10443", }, vhostTest{ rawVHost: "example.com:443", httpsAddr: "0:443", expectedRouteHost: "example.com", expectedRedirectHost: "example.com", }, } staticVars := expvar.NewMap("testStatic") staticHandler := makeStaticHandler("/static", staticVars) for i, vt := range tests { routeHost, redirectHost := calculateDomains(vt.rawVHost, vt.httpsAddr) if routeHost != vt.expectedRouteHost { t.Errorf("#%d vhost %#v, httpsAddr %#v: want routeHost %#v, got %s", i, vt.rawVHost, vt.httpsAddr, vt.expectedRouteHost, routeHost) } if redirectHost != vt.expectedRedirectHost { t.Errorf("#%d vhost %#v, httpsAddr %#v: want redirectHost %#v, got %#v", i, vt.rawVHost, vt.httpsAddr, vt.expectedRedirectHost, redirectHost) } tm := tlsMux(vt.expectedRouteHost, vt.expectedRedirectHost, staticHandler) r, err := http.NewRequest("GET", "https://howsmyssl.com/", nil) if err != nil { t.Fatalf("borked request") } w := httptest.NewRecorder() tm.ServeHTTP(w, r) expectedLocation := "https://" + vt.expectedRedirectHost + "/" location := w.Header()["Location"][0] if w.Code != http.StatusMovedPermanently { t.Errorf("#%d vhost %#v, httpsAddr %#v: want Code %d, got %d", i, vt.rawVHost, vt.httpsAddr, http.StatusMovedPermanently, w.Code) } if location != expectedLocation { t.Errorf("#%d vhost %#v, httpsAddr %#v: want Location %s, got %s", i, vt.rawVHost, vt.httpsAddr, expectedLocation, location) } } }
func init() { m := expvar.NewMap("io") m.Set("w_B", &metrics.HistogramExport{writeBytes, []float64{0.1, 0.2, 0.80, 0.90, 0.99}, []string{"p10", "p20", "p80", "p90", "p99"}}) m.Set("r_B", &metrics.HistogramExport{readBytes, []float64{0.1, 0.2, 0.80, 0.90, 0.99}, []string{"p10", "p20", "p80", "p90", "p99"}}) }
func init() { DBVersion, _, _ = sqlite3.Version() stats = expvar.NewMap("db") stats.Add(numExecutions, 0) stats.Add(numExecutionErrors, 0) stats.Add(numQueries, 0) stats.Add(numETx, 0) stats.Add(numQTx, 0) }
func init() { expvar.Publish("now", expvar.Func(func() interface{} { return time.Now().Format("\"2006-01-02 15:04:05\"") })) stats = &Stats{} expvar.Publish("stats", stats) hits = expvar.NewMap("hits").Init() }
/* PublishExpvarMetrics - Publishes the NumWorkers and NumPendingAsyncJobs to expvars */ func (pool *WorkPool) PublishExpvarMetrics(poolName string) { ret := expvar.NewMap(poolName) asyncJobsFn := func() string { return strconv.FormatInt(int64(pool.NumPendingAsyncJobs()), 10) } numWorkersFn := func() string { return strconv.FormatInt(int64(pool.NumWorkers()), 10) } ret.Set("pendingAsyncJobs", liveVarAccessor(asyncJobsFn)) ret.Set("numWorkers", liveVarAccessor(numWorkersFn)) }
func init() { m := expvar.NewMap("metricsd") m.Set("requests", statRequestCount) m.Set("requests_per_sec", statRequestRate) m.Set("graphite_latency_us", &metrics.HistogramExport{Histogram: statGraphiteLatency, Percentiles: []float64{0.5, 0.9, 0.99, 0.999}, PercentileNames: []string{"p50", "p90", "p99", "p999"}}) m.Set("librato_latency_us", &metrics.HistogramExport{Histogram: statLibratoLatency, Percentiles: []float64{0.5, 0.9, 0.99, 0.999}, PercentileNames: []string{"p50", "p90", "p99", "p999"}}) m.Set("stathat_latency_us", &metrics.HistogramExport{Histogram: statStatHatLatency, Percentiles: []float64{0.5, 0.9, 0.99, 0.999}, PercentileNames: []string{"p50", "p90", "p99", "p999"}}) }
func (s *chatBotStats) GetOrCreate(identifier string) (*expvar.Map, bool) { s.RLock() chatbotStats, ok := s.m[identifier] s.RUnlock() if !ok { chatbotStats = expvar.NewMap(identifier) s.Lock() s.m[identifier] = chatbotStats s.Unlock() } return chatbotStats, ok }
// setExpvar configures the expvar based collection for this service. It must be done within a // lock so previous registrations for this key can be checked. Re-registering a key will result // in a panic. func (s *Service) setExpvar() { expvarMu.Lock() defer expvarMu.Unlock() key := strings.Join([]string{"graphite", s.protocol, s.bindAddress}, ":") // Add expvar for this service. var m expvar.Var if m = expvar.Get(key); m == nil { m = expvar.NewMap(key) } s.statMap = m.(*expvar.Map) }
func init() { couchbase.ConnPoolCallback = recordConnPoolStat couchbase.ClientOpCallback = recordCBClientStat expCb := expvar.NewMap("cb") expPoolHistos = &expvar.Map{} expPoolHistos.Init() expCb.Set("pools", expPoolHistos) expOpsHistos = &expvar.Map{} expOpsHistos.Init() expCb.Set("ops", expOpsHistos) }
func init() { mcSent := &mcops{} mcRecvd := &mcops{} tapRecvd := &mcops{} memcached.TransmitHook = mcSent.countReq memcached.ReceiveHook = mcRecvd.countRes memcached.TapRecvHook = tapRecvd.countReq mcStats := expvar.NewMap("mc") mcStats.Set("xmit", mcSent) mcStats.Set("recv", mcRecvd) mcStats.Set("tap", tapRecvd) }
func TestSnapshotExpvarsMap(t *testing.T) { test := expvar.NewMap("testMap") test.Add("hello", 42) map2 := new(expvar.Map).Init() map2.Add("test", 5) test.Set("map2", map2) vals := map[string]int64{} snapshotExpvars(vals) assert.Equal(t, vals["testMap.hello"], int64(42)) assert.Equal(t, vals["testMap.map2.test"], int64(5)) }
// New creates new Kafka producer func New(topic string, notifsChan chan proto.Message, key sarama.Encoder, dataset string, encoder MessageEncoder, kafkaAddresses []string, kafkaConfig *sarama.Config) (Producer, error) { if notifsChan == nil { notifsChan = make(chan proto.Message) } if kafkaConfig == nil { kafkaConfig := sarama.NewConfig() hostname, err := os.Hostname() if err != nil { hostname = "" } kafkaConfig.ClientID = hostname kafkaConfig.Producer.Compression = sarama.CompressionSnappy kafkaConfig.Producer.Return.Successes = true } kafkaProducer, err := sarama.NewAsyncProducer(kafkaAddresses, kafkaConfig) if err != nil { return nil, err } // Setup monitoring structures histName := "kafkaProducerHistogram" statsName := "messagesStats" if id := atomic.AddUint32(&counter, 1); id > 1 { histName = fmt.Sprintf("%s-%d", histName, id) statsName = fmt.Sprintf("%s-%d", statsName, id) } hist := monitor.NewHistogram(histName, 32, 0.3, 1000, 0) statsMap := expvar.NewMap(statsName) p := &producer{ notifsChan: notifsChan, kafkaProducer: kafkaProducer, topic: topic, key: key, dataset: dataset, encoder: encoder, done: make(chan struct{}), wg: sync.WaitGroup{}, histogram: hist, } statsMap.Set("successes", &p.numSuccesses) statsMap.Set("failures", &p.numFailures) return p, nil }
func init() { pacStatementSplit = regexp.MustCompile(`\s*;\s*`) pacItemSplit = regexp.MustCompile(`\s+`) pacCallFindProxyForURLResultCount = new(expvar.Map).Init() pacCallFindProxyForURLParamHostCount = new(expvar.Map).Init() callFindProxyForURLMap := new(expvar.Map).Init() callFindProxyForURLMap.Set("resultCount", pacCallFindProxyForURLResultCount) callFindProxyForURLMap.Set("urlHostCount", pacCallFindProxyForURLParamHostCount) pacExpvarMap := expvar.NewMap("pac") pacExpvarMap.Set("callFindProxyForURL", callFindProxyForURLMap) }
func init() { m := expvar.NewMap("smtp") m.Set("ConnectsTotal", expConnectsTotal) m.Set("ConnectsHist", expConnectsHist) m.Set("ConnectsCurrent", expConnectsCurrent) m.Set("ReceivedTotal", expReceivedTotal) m.Set("ReceivedHist", expReceivedHist) m.Set("ErrorsTotal", expErrorsTotal) m.Set("ErrorsHist", expErrorsHist) m.Set("WarnsTotal", expWarnsTotal) m.Set("WarnsHist", expWarnsHist) t := time.NewTicker(time.Minute) go metricsTicker(t) }
func initTaskMetrics() { m := expvar.NewMap("tasks") for k := range globalPeriodicJobRecipes { taskDurations[k] = metrics.NewBiasedHistogram() } for k := range localPeriodicJobRecipes { taskDurations[k] = metrics.NewBiasedHistogram() } for k, v := range taskDurations { m.Set(k+"_ms", &metrics.HistogramExport{v, []float64{0.5, 0.9, 0.99, 0.999}, []string{"p50", "p90", "p99", "p999"}}) } }
// DebugServer starts a server to receive debug information. Typical // usage is to start it in a goroutine, configured with an address // from the appropriate configuration object: // // go cmd.DebugServer(c.XA.DebugAddr) func DebugServer(addr string) { m := expvar.NewMap("enabled-features") features.Export(m) if addr == "" { log.Fatalf("unable to boot debug server because no address was given for it. Set debugAddr.") } ln, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("unable to boot debug server on %#v", addr) } http.Handle("/metrics", promhttp.Handler()) err = http.Serve(ln, nil) if err != nil { log.Fatalf("unable to boot debug server: %v", err) } }
func init() { couchbase.ConnPoolCallback = recordConnPoolStat couchbase.ClientOpCallback = recordCBClientStat expCb := expvar.NewMap("cb") expPoolHistos = &expvar.Map{} expPoolHistos.Init() expCb.Set("pools", expPoolHistos) expOpsHistos = &expvar.Map{} expOpsHistos.Init() expCb.Set("ops", expOpsHistos) grTracker = &goroutineTracker{} expvar.Publish("goroutine_stats", grTracker) }
func init() { expvar.NewInt("NumCPUs").Set(int64(runtime.NumCPU())) revision, err := exec.Command("git", "log", "-1", "--pretty=oneline", "HEAD").Output() if err != nil { expvar.NewString("revision").Set(fmt.Sprintf("Could not determine git version: %s", err)) } else { expvar.NewString("revision").Set(strings.TrimSpace(string(revision))) } env := expvar.NewMap("env") for _, val := range os.Environ() { parts := strings.SplitN(val, "=", 2) if len(parts) >= 2 { env.Set(parts[0], exposedString{parts[1]}) } } }
func init() { // NOTE(stevvooe): Setup registry metrics structure to report to expvar. // Ideally, we do more metrics through logging but we need some nice // realtime metrics for queue state for now. registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } var notifications expvar.Map notifications.Init() notifications.Set("endpoints", expvar.Func(func() interface{} { endpoints.mu.Lock() defer endpoints.mu.Unlock() var names []interface{} for _, v := range endpoints.registered { var epjson struct { Name string `json:"name"` URL string `json:"url"` EndpointConfig Metrics EndpointMetrics } epjson.Name = v.Name() epjson.URL = v.URL() epjson.EndpointConfig = v.EndpointConfig v.ReadMetrics(&epjson.Metrics) names = append(names, epjson) } return names })) registry.(*expvar.Map).Set("notifications", ¬ifications) }
// NewRPCServer creates a new ExpRPCServer wrapping a rpc.Server, publishes a // new "exprpc" expvar.Map to track it, sets a default IntervalLabel="min" and // Interval=time.Minute, and sets Log to DefaultLogger. // // To register the wrapped RPC endpoint using the same protocol/endpoint as // the default rpc.HandleHTTP() method, use: // // expServer := exphttp.NewRPCServer(rpc.DefaultServer) // http.HandleFunc("/_goRPC_", expServer.HandleHTTP) // func NewRPCServer(srv *rpc.Server) *ExpRPCServer { if rpcStats == nil { rpcStats = expvar.NewMap("exprpc") reqRate = NewRateCounter(time.Minute) respRate = NewRateCounter(time.Minute) rpcStats.Set("requests.per_min", reqRate) rpcStats.Set("responses.per_min", respRate) } e := &ExpRPCServer{ srv: srv, IntervalLabel: "min", Interval: time.Minute, Log: DefaultLogger, rates: make(map[string]*RateCounter), startTimes: make(map[uint64]time.Time), } return e }