func NewGaugeDiff(name string, r metrics.Registry) *GaugeDiff { return &GaugeDiff{ Delta: metrics.NewRegisteredGauge(name, r), Absolute: metrics.NewRegisteredGauge(name+"-absolute", metrics.NewRegistry()), Previous: metrics.NewRegisteredGauge(name+"-previous", metrics.NewRegistry()), } }
func init() { Reg1s = IndexRegistry{ Registry: metrics.NewRegistry(), PrivateCPURegistry: metrics.NewRegistry(), PrivateInterfaceRegistry: metrics.NewRegistry(), PrivateDFRegistry: metrics.NewRegistry(), } Reg1s.PrivateCPUAll = /* *Reg1s.RegisterCPU */ *system.NewMetricCPU( /* pcreg := */ metrics.NewRegistry(), "all") // pcreg.Register("all", Reg1s.PrivateCPUAll) Reg1s.RAM = system.NewMetricRAM(Reg1s.Registry) Reg1s.Swap = operating.NewMetricSwap(Reg1s.Registry) Reg1s.Load = operating.NewMetricLoad(Reg1s.Registry) }
func newBot() (b MMJira) { b = MMJira{l: zap.NewJSON(zap.DebugLevel), reg: metrics.NewRegistry()} data, err := ioutil.ReadFile("config.yaml") if err != nil { b.l.Panic("not able to read the file", zap.Error(err)) } var config InstanceConfig if err = yaml.Unmarshal(data, &config); err != nil { b.l.Panic("not able to marshal the file", zap.Error(err)) } b.c = &config if !b.c.Debug { b.l.SetLevel(zap.ErrorLevel) } mmpost, err := mmcontroller.NewController(b.c.MMicon, b.c.MMuser, b.c.Hooks, b.c.Debug, metrics.NewPrefixedChildRegistry(b.reg, "mmc.")) if err != nil { panic(err) } b.m = mmpost b.l.Debug("outputting config", zap.Object("config", b.c)) b.r = mux.NewRouter() b.r.HandleFunc("/", b.homeHandler) b.r.HandleFunc("/hooks/", b.getHandler).Methods("GET") b.r.HandleFunc("/hooks/{hookid}", b.postHandler).Methods("POST") b.r.Handle("/metrics", exp.ExpHandler(b.reg)) b.r.HandleFunc("/config/", b.configGetHandler).Methods("GET") return b }
func newGCMetricaDataSource(pollInterval int) goMetricaDataSource { r := metrics.NewRegistry() metrics.RegisterDebugGCStats(r) go metrics.CaptureDebugGCStats(r, time.Duration(pollInterval)*time.Second) return goMetricaDataSource{r} }
func main() { if len(os.Args) < 2 { fmt.Println("Please provide one or more IP:Port pairs") os.Exit(1) } h := &httpmetrics.Handler{ Registries: make(map[string]*metrics.Registry), Socket: "/tmp/tcp-monitor.sock", } for _, addr := range os.Args[1:] { r := metrics.NewRegistry() a := &Addr{ Address: addr, Registry: &r, } addrs = append(addrs, a) a.startPing() h.Registries[addr] = &r } if err := h.CreateServer(); err != nil { panic(err) } }
func InitDefaultMetrics() { Gm = util.NewStreamingMetrics(metrics.NewRegistry()) metricsAddr := "tcp://127.0.0.1:5450" logPrefix := "Go-stream" go statsSender(&metricsAddr, &logPrefix) }
func ExtraNewMetricRAM(r metrics.Registry, extra RAMUpdater) *MetricRAM { return &MetricRAM{ Free: metrics.NewRegisteredGauge("memory.memory-free", r), Total: metrics.NewRegisteredGauge("memory.memory-total", metrics.NewRegistry()), Extra: extra, } }
// New will mint a new Instrumentation - getting statsd connection details from the config service and then looking out // for any changes. func New() *Instrumentation { ch := config.SubscribeChanges() addr := loadStatsdAddr() inst := &Instrumentation{ namespace: "default", confHash: addr, statsd: loadStatsd(addr), registry: metrics.NewRegistry(), savedTimers: make(map[string]metrics.Timer), savedCounters: make(map[string]metrics.Counter), savedGauges: make(map[string]metrics.Gauge), launched: time.Now(), instRuntime: false, } inst.StartRuntime() // Launch listener for config changes go func() { for _ = range ch { inst.mtx.Lock() if addr := loadStatsdAddr(); addr != inst.confHash { // @todo close old statsd here -- but no way to do this yet inst.statsd = loadStatsd(addr) } inst.mtx.Unlock() } }() return inst }
// monitorIssueTracker reads the counts for all the types of issues in the skia // issue tracker (code.google.com/p/skia) and stuffs the counts into Graphite. func monitorIssueTracker() { c := &http.Client{ Transport: &http.Transport{ Dial: dialTimeout, }, } if *useMetadata { *apikey = metadata.Must(metadata.ProjectGet(metadata.APIKEY)) } // Create a new metrics registry for the issue tracker metrics. addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } issueRegistry := metrics.NewRegistry() go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr) // IssueStatus has all the info we need to capture and record a single issue status. I.e. capture // the count of all issues with a status of "New". type IssueStatus struct { Name string Metric metrics.Gauge URL string } allIssueStatusLabels := []string{ "New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned", } issueStatus := []*IssueStatus{} for _, issueName := range allIssueStatusLabels { issueStatus = append(issueStatus, &IssueStatus{ Name: issueName, Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry), URL: "https://www.googleapis.com/projecthosting/v2/projects/skia/issues?fields=totalResults&key=" + *apikey + "&status=" + issueName, }) } liveness := imetrics.NewLiveness("issue-tracker") for _ = range time.Tick(ISSUE_TRACKER_PERIOD) { for _, issue := range issueStatus { resp, err := c.Get(issue.URL) jsonResp := map[string]int64{} dec := json.NewDecoder(resp.Body) if err := dec.Decode(&jsonResp); err != nil { glog.Warningf("Failed to decode JSON response: %s", err) util.Close(resp.Body) continue } issue.Metric.Update(jsonResp["totalResults"]) glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"]) if err == nil && resp.Body != nil { util.Close(resp.Body) } } liveness.Update() } }
// NewConfigLocal constructs a new ConfigLocal with default components. func NewConfigLocal() *ConfigLocal { config := &ConfigLocal{} config.SetClock(wallClock{}) config.SetReporter(NewReporterSimple(config.Clock(), 10)) config.SetConflictRenamer(TimeAndWriterConflictRenamer{config}) config.SetMDCache(NewMDCacheStandard(5000)) config.SetKeyCache(NewKeyCacheStandard(5000)) // Limit the block cache to 10K entries or 512 MB of bytes config.SetBlockCache(NewBlockCacheStandard(config, 10000, 512*1024*1024)) config.SetCodec(NewCodecMsgpack()) config.SetMDOps(&MDOpsStandard{config}) config.SetBlockOps(&BlockOpsStandard{config}) config.SetKeyOps(&KeyOpsStandard{config}) config.SetRekeyQueue(NewRekeyQueueStandard(config)) config.maxFileBytes = maxFileBytesDefault config.maxNameBytes = maxNameBytesDefault config.maxDirBytes = maxDirBytesDefault // Don't bother creating the registry if UseNilMetrics is set. if !metrics.UseNilMetrics { registry := metrics.NewRegistry() config.SetMetricsRegistry(registry) } return config }
func (ir *IndexRegistry) GetOrRegisterPrivateDF(fs sigar.FileSystem) operating.MetricDF { ir.PrivateMutex.Lock() defer ir.PrivateMutex.Unlock() if fs.DirName == "/" { fs.DevName = "root" } else { fs.DevName = strings.Replace(strings.TrimPrefix(fs.DevName, "/dev/"), "/", "-", -1) } if metric := ir.PrivateDFRegistry.Get(fs.DevName); metric != nil { return metric.(operating.MetricDF) } label := func(tail string) string { return fmt.Sprintf("df-%s.df_complex-%s", fs.DevName, tail) } r, unusedr := ir.Registry, metrics.NewRegistry() i := operating.MetricDF{ DF: &operating.DF{ DevName: &operating.StandardMetricString{}, // unregistered DirName: &operating.StandardMetricString{}, // unregistered Free: metrics.NewRegisteredGaugeFloat64(label("free"), r), Reserved: metrics.NewRegisteredGaugeFloat64(label("reserved"), r), Total: metrics.NewRegisteredGauge(label("total"), unusedr), Used: metrics.NewRegisteredGaugeFloat64(label("used"), r), Avail: metrics.NewRegisteredGauge(label("avail"), unusedr), UsePercent: metrics.NewRegisteredGaugeFloat64(label("usepercent"), unusedr), Inodes: metrics.NewRegisteredGauge(label("inodes"), unusedr), Iused: metrics.NewRegisteredGauge(label("iused"), unusedr), Ifree: metrics.NewRegisteredGauge(label("ifree"), unusedr), IusePercent: metrics.NewRegisteredGaugeFloat64(label("iusepercent"), unusedr), }, } ir.PrivateDFRegistry.Register(fs.DevName, i) // error is ignored // errs when the type is not derived from (go-)metrics types return i }
func newMemoryMetricaDataSource(pollInterval int) goMetricaDataSource { r := metrics.NewRegistry() metrics.RegisterRuntimeMemStats(r) metrics.CaptureRuntimeMemStatsOnce(r) go metrics.CaptureRuntimeMemStats(r, time.Duration(pollInterval)*time.Second) return goMetricaDataSource{r} }
func New(prefix string) *Metrics { return &Metrics{ Registry: gometrics.NewRegistry(), Timers: Timers{}, Counters: Counters{}, Prefix: prefix, } }
func newStats() *stats { return &stats{ registry: metrics.NewRegistry(), startTime: time.Now(), timers: make(map[Endpoint]endpointTimers), recordChan: make(chan *endpointResult, 100), terminate: make(chan chan bool), } }
// monitorIssueTracker reads the counts for all the types of issues in the Skia // issue tracker (bugs.chromium.org/p/skia) and stuffs the counts into Graphite. func monitorIssueTracker(c *http.Client) { // Create a new metrics registry for the issue tracker metrics. addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } issueRegistry := metrics.NewRegistry() go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr) // IssueStatus has all the info we need to capture and record a single issue status. I.e. capture // the count of all issues with a status of "New". type IssueStatus struct { Name string Metric metrics.Gauge URL string } allIssueStatusLabels := []string{ "New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned", } issueStatus := []*IssueStatus{} for _, issueName := range allIssueStatusLabels { q := url.Values{} q.Set("fields", "totalResults") q.Set("status", issueName) issueStatus = append(issueStatus, &IssueStatus{ Name: issueName, Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry), URL: issues.MONORAIL_BASE_URL + "?" + q.Encode(), }) } liveness := imetrics.NewLiveness("issue-tracker") for _ = range time.Tick(ISSUE_TRACKER_PERIOD) { for _, issue := range issueStatus { resp, err := c.Get(issue.URL) if err != nil { glog.Errorf("Failed to retrieve response from %s: %s", issue.URL, err) continue } jsonResp := map[string]int64{} dec := json.NewDecoder(resp.Body) if err := dec.Decode(&jsonResp); err != nil { glog.Warningf("Failed to decode JSON response: %s", err) util.Close(resp.Body) continue } issue.Metric.Update(jsonResp["totalResults"]) glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"]) if err == nil && resp.Body != nil { util.Close(resp.Body) } } liveness.Update() } }
func newStandardBucket(name string) standardBucket { registry := gometrics.NewRegistry() gometrics.RegisterRuntimeMemStats(registry) go gometrics.CaptureRuntimeMemStats(registry, RuntimeMemStatsSampleInterval) return standardBucket{ name: name, registry: registry, timers: make(map[string]Timer), gauges: make(map[string]Gauge), } }
func ExtraNewMetricCPU(r metrics.Registry, name string, extra CPUUpdater) *MetricCPU { return &MetricCPU{ CPU: &CPU{ N: Field(name), User: NewGaugePercent(name+".user", r), Nice: NewGaugePercent(name+".nice", r), Sys: NewGaugePercent(name+".system", r), Idle: NewGaugePercent(name+".idle", r), Total: NewGaugeDiff(name+"-total", metrics.NewRegistry()), Extra: extra, }, } }
// gmStatsDRegistry returns a go-metrics registry that reports to a StatsD server. func gmStatsDRegistry(prefix, addr string, interval time.Duration) (Registry, error) { if addr == "" { return nil, errors.New(" statsd addr missing") } a, err := net.ResolveUDPAddr("udp", addr) if err != nil { return nil, fmt.Errorf(" cannot connect to StatsD: %s", err) } r := gm.NewRegistry() go statsd.StatsD(r, interval, prefix, a) return &gmRegistry{r}, nil }
// gmGraphiteRegistry returns a go-metrics registry that reports to a Graphite server. func gmGraphiteRegistry(prefix, addr string, interval time.Duration) (Registry, error) { if addr == "" { return nil, errors.New(" graphite addr missing") } a, err := net.ResolveTCPAddr("tcp", addr) if err != nil { return nil, fmt.Errorf(" cannot connect to Graphite: %s", err) } r := gm.NewRegistry() go graphite.Graphite(r, interval, prefix, a) return &gmRegistry{r}, nil }
// start the ping for each ip, init metrics. func InitPing(pi *PingInfo) { for _, ip := range pi.Hosts { registry := metrics.NewRegistry() (*pi.Registries)[ip] = ®istry // ping each host listed -- print when complete go func(ip string, registry *metrics.Registry) { for { pi.connectAndPing(ip, registry) time.Sleep(time.Duration(pi.Repeat) * time.Second) } }(ip, (*pi.Registries)[ip]) } }
func TestFilters(t *testing.T) { mock := &MockPutMetricsClient{} cfg := &config.Config{ Client: mock, Filter: &config.AllFilter{}, } registry := metrics.NewRegistry() timer := metrics.GetOrRegisterTimer(fmt.Sprintf("timer"), registry) timer.Update(10 * time.Second) emitMetrics(registry, cfg) if mock.metricsPut > 0 { t.Fatal("Metrics Put") } }
func TestSimpleTransfer(t *testing.T) { log.SetFlags(log.Llongfile) slog.Init(slog.DEFAULT_STATS_LOG_NAME, slog.DEFAULT_STATS_LOG_LEVEL, slog.DEFAULT_STATS_LOG_PREFIX, baseutil.NewStreamingMetrics(metrics.NewRegistry()), slog.DEFAULT_STATS_ADDR) datach := make(chan stream.Object, 100) c := DefaultClient("127.0.0.1") c.SetIn(datach) s := DefaultServer() rcvch := make(chan stream.Object, 100) s.SetOut(rcvch) wg := &sync.WaitGroup{} defer wg.Wait() wg.Add(1) go func() { defer wg.Done() s.Run() }() wg.Add(1) go func() { defer wg.Done() c.Run() }() log.Println("Waiting to snd") for i := 0; i < 10; i++ { datach <- []byte(fmt.Sprintf("test %d", i)) } log.Println("Waiting to rcv") for i := 0; i < 10; i++ { //log.Println("Waiting to rcv", i) if res := <-rcvch; string(res.([]byte)) != fmt.Sprintf("test %d", i) { t.Fail() } } log.Println("Waiting to exit") s.Stop() c.Stop() }
func newConsumerMetrics(consumerName string) *consumerMetrics { kafkaMetrics := &consumerMetrics{ registry: metrics.NewRegistry(), } kafkaMetrics.fetchersIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("FetchersIdleTime-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.fetchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("FetchDuration-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.numWorkerManagersGauge = metrics.NewRegisteredGauge(fmt.Sprintf("NumWorkerManagers-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.activeWorkersCounter = metrics.NewRegisteredCounter(fmt.Sprintf("WMsActiveWorkers-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.pendingWMsTasksCounter = metrics.NewRegisteredCounter(fmt.Sprintf("WMsPendingTasks-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.wmsBatchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("WMsBatchDuration-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.wmsIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("WMsIdleTime-%s", consumerName), kafkaMetrics.registry) return kafkaMetrics }
func main() { defer common.LogPanic() common.InitWithMetrics("probeserver", graphiteServer) client, err := auth.NewDefaultJWTServiceAccountClient("https://www.googleapis.com/auth/userinfo.email") if err != nil { glog.Fatalf("Failed to create client for talking to the issue tracker: %s", err) } go monitorIssueTracker(client) glog.Infoln("Looking for Graphite server.") addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } glog.Infoln("Found Graphite server.") liveness := imetrics.NewLiveness("probes") // We have two sets of metrics, one for the probes and one for the probe // server itself. The server's metrics are handled by common.Init() probeRegistry := metrics.NewRegistry() go graphite.Graphite(probeRegistry, common.SAMPLE_PERIOD, *prefix, addr) // TODO(jcgregorio) Monitor config file and reload if it changes. cfg, err := readConfigFiles(*config) if err != nil { glog.Fatalln("Failed to read config file: ", err) } glog.Infoln("Successfully read config file.") // Register counters for each probe. for name, probe := range cfg { probe.failure = metrics.NewRegisteredGauge(name+".failure", probeRegistry) probe.latency = metrics.NewRegisteredGauge(name+".latency", probeRegistry) } // Create a client that uses our dialer with a timeout. c := &http.Client{ Transport: &http.Transport{ Dial: dialTimeout, }, } probeOneRound(cfg, c) for _ = range time.Tick(*runEvery) { probeOneRound(cfg, c) liveness.Update() } }
func TestHistograms(t *testing.T) { mock := &MockPutMetricsClient{} filter := &config.NoFilter{} cfg := &config.Config{ Client: mock, Filter: filter, } registry := metrics.NewRegistry() hist := metrics.GetOrRegisterHistogram(fmt.Sprintf("histo"), registry, metrics.NewUniformSample(1024)) hist.Update(1000) hist.Update(500) emitMetrics(registry, cfg) if mock.metricsPut < len(filter.Percentiles("")) { t.Fatal("No Metrics Put") } }
func NewPointInPolygonMetrics() *WOFPointInPolygonMetrics { registry := metrics.NewRegistry() cnt_lookups := metrics.NewCounter() cnt_unmarshal := metrics.NewCounter() cnt_cache_hit := metrics.NewCounter() cnt_cache_miss := metrics.NewCounter() cnt_cache_set := metrics.NewCounter() tm_unmarshal := metrics.NewTimer() tm_intersect := metrics.NewTimer() tm_inflate := metrics.NewTimer() tm_contain := metrics.NewTimer() tm_process := metrics.NewTimer() registry.Register("pip.reversegeo.lookups", cnt_lookups) registry.Register("pip.geojson.unmarshaled", cnt_unmarshal) registry.Register("pip.cache.hit", cnt_cache_hit) registry.Register("pip.cache.miss", cnt_cache_miss) registry.Register("pip.cache.set", cnt_cache_set) registry.Register("pip.timer.reversegeo", tm_process) registry.Register("pip.timer.unmarshal", tm_unmarshal) // registry.Register("time-to-intersect", tm_intersect) // registry.Register("time-to-inflate", tm_inflate) registry.Register("pip.timer.containment", tm_contain) m := WOFPointInPolygonMetrics{ Registry: ®istry, CountLookups: &cnt_lookups, CountUnmarshal: &cnt_unmarshal, CountCacheHit: &cnt_cache_hit, CountCacheMiss: &cnt_cache_miss, CountCacheSet: &cnt_cache_set, TimeToUnmarshal: &tm_unmarshal, TimeToIntersect: &tm_intersect, TimeToInflate: &tm_inflate, TimeToContain: &tm_contain, TimeToProcess: &tm_process, } metrics.RegisterRuntimeMemStats(registry) go metrics.CaptureRuntimeMemStats(registry, 10e9) return &m }
// NewShuttle returns a properly constructed Shuttle with a given config func NewShuttle(config Config) *Shuttle { b := make(chan Batch, config.BackBuff) mr := metrics.NewRegistry() return &Shuttle{ config: config, Batches: b, Drops: NewCounter(0), Lost: NewCounter(0), MetricsRegistry: mr, NewFormatterFunc: config.FormatterFunc, readers: make([]*LogLineReader, 0), oWaiter: new(sync.WaitGroup), rWaiter: new(sync.WaitGroup), Logger: discardLogger, ErrLogger: discardLogger, } }
func TestCloudwatchReporter(t *testing.T) { mock := &MockPutMetricsClient{} cfg := &config.Config{ Client: mock, Filter: &config.NoFilter{}, } registry := metrics.NewRegistry() for i := 0; i < 30; i++ { count := metrics.GetOrRegisterCounter(fmt.Sprintf("count-%d", i), registry) count.Inc(1) } emitMetrics(registry, cfg) if mock.metricsPut < 30 || mock.requests < 2 { t.Fatal("No Metrics Put") } }
func NewIssConfig() (IssConfig, error) { config := IssConfig{} err := envdecode.Decode(&config) if err != nil { return config, err } if config.PemFile != "" { pemFileData, err := ioutil.ReadFile(config.PemFile) if err != nil { return config, fmt.Errorf("Unable to read pemfile: %s", err) } cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(pemFileData); !ok { return config, fmt.Errorf("Error parsing PEM: %s", config.PemFile) } config.TlsConfig = &tls.Config{RootCAs: cp} } sp := make([]string, 0, 2) if config.LibratoSource != "" { sp = append(sp, config.LibratoSource) } if config.Dyno != "" { sp = append(sp, config.Dyno) } config.LibratoSource = strings.Join(sp, ".") config.MetricsRegistry = metrics.NewRegistry() return config, nil }
func NewGaugePercent(name string, r metrics.Registry) *GaugePercent { return &GaugePercent{ Percent: metrics.NewRegisteredGaugeFloat64(name, r), Previous: metrics.NewRegisteredGauge(name+"-previous", metrics.NewRegistry()), } }