func (m *MetricsReporter) CaptureRoutingResponse(b *route.Endpoint, res *http.Response, t time.Time, d time.Duration) { dropsondeMetrics.BatchIncrementCounter(getResponseCounterName(res)) dropsondeMetrics.BatchIncrementCounter("responses") latency := float64(d / time.Millisecond) unit := "ms" dropsondeMetrics.SendValue("latency", latency, unit) componentName, ok := b.Tags["component"] if ok && len(componentName) > 0 { dropsondeMetrics.SendValue(fmt.Sprintf("latency.%s", componentName), latency, unit) } }
func (s *SinkManagerMetrics) Dec(sink sinks.Sink) { switch sink.(type) { case *dump.DumpSink: dumpSinks := atomic.AddInt32(&s.dumpSinks, -1) metrics.SendValue("messageRouter.numberOfDumpSinks", float64(dumpSinks), "sinks") case *syslog.SyslogSink: syslogSinks := atomic.AddInt32(&s.syslogSinks, -1) metrics.SendValue("messageRouter.numberOfSyslogSinks", float64(syslogSinks), "sinks") case *websocket.WebsocketSink: websocketSinks := atomic.AddInt32(&s.websocketSinks, -1) metrics.SendValue("messageRouter.numberOfWebsocketSinks", float64(websocketSinks), "sinks") } }
func emitMetric(event observer.AddressChange) { if event.New == "" { return } key := getKey(event, "changed") metrics.SendValue(key, 1, "count") }
func (sinkManagerMetrics *SinkManagerMetrics) Dec(sink sinks.Sink) { sinkManagerMetrics.lock.Lock() defer sinkManagerMetrics.lock.Unlock() switch sink.(type) { case *dump.DumpSink: sinkManagerMetrics.dumpSinks-- metrics.SendValue("messageRouter.numberOfDumpSinks", float64(sinkManagerMetrics.dumpSinks), "sinks") case *syslog.SyslogSink: sinkManagerMetrics.syslogSinks-- metrics.SendValue("messageRouter.numberOfSyslogSinks", float64(sinkManagerMetrics.syslogSinks), "sinks") case *websocket.WebsocketSink: sinkManagerMetrics.websocketSinks-- metrics.SendValue("messageRouter.numberOfWebsocketSinks", float64(sinkManagerMetrics.websocketSinks), "sinks") } }
func (agent *Agent) processInstancesJson() { currentTasks, err := agent.readInstancesJson() metrics.SendValue("totalApps", float64(len(currentTasks)), "apps") if err != nil { return } agent.knownInstancesChan <- agent.processTasks(currentTasks) }
func (c *client) EmitMetric(m *metric) error { err := dmetrics.SendValue(m.Name, m.Value, m.Unit) if err != nil { return fmt.Errorf("Error emitting metric %v", m) } return nil }
func (u *UptimeMonitor) Start() { ticker := time.NewTicker(u.interval) for { select { case <-ticker.C: metrics.SendValue("Uptime", float64(time.Now().Unix()-u.started), "seconds") case stopped := <-u.doneChan: ticker.Stop() close(stopped) return } } }
func (u *UptimeMonitor) Start() { ticker := time.NewTicker(u.interval) u.wg.Add(1) defer u.wg.Done() for { select { case <-ticker.C: metrics.SendValue("Uptime", float64(time.Now().Unix()-u.started), "seconds") case <-u.doneChan: ticker.Stop() return } } }
func (agent *Agent) pollInstancesJson() { defer agent.Done() watcher, err := fsnotify.NewWatcher() if err != nil { panic(err) } for { time.Sleep(100 * time.Millisecond) err := watcher.Watch(path.Dir(agent.InstancesJsonFilePath)) if err != nil { agent.logger.Warnf("Reading failed, retrying. %s\n", err) continue } break } agent.logger.Info("Read initial tasks data") agent.processInstancesJson() for { select { case ev := <-watcher.Event: agent.logger.Debugf("Got Event: %v\n", ev) if ev.IsDelete() { agent.knownInstancesChan <- resetCache metrics.SendValue("totalApps", 0.0, "apps") } else { agent.processInstancesJson() } case err := <-watcher.Error: agent.logger.Warnf("Received error from file system notification: %s\n", err) case <-agent.stopChan: return } } }
func (l *LinuxFileDescriptor) Start() { l.logger.Info("Starting Open File Descriptor Monitor...") ticker := time.NewTicker(l.interval) l.logger.Infof("Starting FD monitor with pid %d", os.Getpid()) path := fmt.Sprintf("/proc/%d/fd", os.Getpid()) for { select { case <-ticker.C: finfos, err := ioutil.ReadDir(path) if err != nil { l.logger.Errorf("Could not read pid dir %s: %s", path, err) break } metrics.SendValue("LinuxFileDescriptor", float64(symlinks(finfos)), "File") case stopped := <-l.done: ticker.Stop() close(stopped) return } } }
func (c *MetricsReporter) CaptureRouteStats(totalRoutes int, msSinceLastUpdate uint64) { dropsondeMetrics.SendValue("total_routes", float64(totalRoutes), "") dropsondeMetrics.SendValue("ms_since_last_registry_update", float64(msSinceLastUpdate), "ms") }
func (name Duration) Send(duration time.Duration) { dropsonde_metrics.SendValue(string(name), float64(duration), "nanos") }
func (sinkManagerMetrics *SinkManagerMetrics) DecFirehose() { sinkManagerMetrics.lock.Lock() defer sinkManagerMetrics.lock.Unlock() sinkManagerMetrics.firehoseSinks-- metrics.SendValue("messageRouter.numberOfFirehoseSinks", float64(sinkManagerMetrics.firehoseSinks), "sinks") }
func sendMetric(metricName string, startTime time.Time) { elapsedMillisecond := float64(time.Since(startTime)) / float64(time.Millisecond) metrics.SendValue(fmt.Sprintf("dopplerProxy.%sLatency", metricName), elapsedMillisecond, "ms") }
defer lock.Unlock() receivedEvents[eventId] = true }() } }() httpListener, err := net.Listen("tcp", "localhost:0") Expect(err).ToNot(HaveOccurred()) defer httpListener.Close() httpHandler := dropsonde.InstrumentedHandler(FakeHandler{}) go http.Serve(httpListener, httpHandler) _, err = http.Get("http://" + httpListener.Addr().String()) Expect(err).ToNot(HaveOccurred()) metrics.SendValue("TestMetric", 0, "") metrics.IncrementCounter("TestIncrementCounter") expectedEventTypes := []string{"HttpStartClient", "HttpStartServer", "HttpStopServer", "HttpStopClient", "ValueMetricnumCPUS", "ValueMetricTestMetric", "CounterEventTestIncrementCounter"} for _, eventType := range expectedEventTypes { Eventually(func() bool { lock.RLock() defer lock.RUnlock() _, ok := receivedEvents[eventType] return ok }).Should(BeTrue(), fmt.Sprintf("missing %s", eventType)) } heartbeatUuid := heartbeatRequest.GetIdentifier().String() Eventually(heartbeatUuidsChan).Should(Receive(Equal(heartbeatUuid)))
func (name Requests) Send(value int) error { return metrics.SendValue(string(name), float64(value), "Req") }
func (c *MetricsReporter) CaptureLookupTime(t time.Duration) { unit := "ns" dropsondeMetrics.SendValue("route_lookup_time", float64(t.Nanoseconds()), unit) }
func (name Mebibytes) Send(mebibytes int) error { return metrics.SendValue(string(name), float64(mebibytes), "MiB") }
func (name Duration) Send(duration time.Duration) error { return metrics.SendValue(string(name), float64(duration), "nanos") }
func main() { flag.Parse() config, logger := parseConfig(*debug, *configFile, *logFilePath) dropsonde.Initialize(config.MetronAddress, "syslog_drain_binder") workPool, err := workpool.NewWorkPool(config.EtcdMaxConcurrentRequests) if err != nil { panic(err) } adapter := etcdstoreadapter.NewETCDStoreAdapter(config.EtcdUrls, workPool) updateInterval := time.Duration(config.UpdateIntervalSeconds) * time.Second politician := elector.NewElector(config.InstanceName, adapter, updateInterval, logger) drainTTL := time.Duration(config.DrainUrlTtlSeconds) * time.Second store := etcd_syslog_drain_store.NewEtcdSyslogDrainStore(adapter, drainTTL, logger) ticker := time.NewTicker(updateInterval) for { select { case <-cfcomponent.RegisterGoRoutineDumpSignalChannel(): cfcomponent.DumpGoRoutine() case <-ticker.C: if politician.IsLeader() { err = politician.StayAsLeader() if err != nil { logger.Errorf("Error when staying leader: %s", err.Error()) politician.Vacate() continue } } else { err = politician.RunForElection() if err != nil { logger.Errorf("Error when running for leader: %s", err.Error()) politician.Vacate() continue } } logger.Debugf("Polling %s for updates", config.CloudControllerAddress) drainUrls, err := Poll(config.CloudControllerAddress, config.BulkApiUsername, config.BulkApiPassword, config.PollingBatchSize, config.SkipCertVerify) if err != nil { logger.Errorf("Error when polling cloud controller: %s", err.Error()) politician.Vacate() continue } metrics.IncrementCounter("pollCount") var totalDrains int for _, drainList := range drainUrls { totalDrains += len(drainList) } metrics.SendValue("totalDrains", float64(totalDrains), "drains") logger.Debugf("Updating drain URLs for %d application(s)", len(drainUrls)) err = store.UpdateDrains(drainUrls) if err != nil { logger.Errorf("Error when updating ETCD: %s", err.Error()) politician.Vacate() continue } } } }
func (s *SinkManagerMetrics) DecFirehose() { firehoseSinks := atomic.AddInt32(&s.firehoseSinks, -1) metrics.SendValue("messageRouter.numberOfFirehoseSinks", float64(firehoseSinks), "sinks") }
func (name Metric) Send(value int) { dropsonde_metrics.SendValue(string(name), float64(value), "Metric") }
func (name BytesPerSecond) Send(value float64) error { return metrics.SendValue(string(name), value, "B/s") }
func (name RequestsPerSecond) Send(value float64) error { return metrics.SendValue(string(name), value, "Req/s") }
func (name Metric) Send(value int) error { return metrics.SendValue(string(name), float64(value), "Metric") }
. "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "time" ) var _ = Describe("Metrics", func() { var fakeMetricSender *fake.FakeMetricSender BeforeEach(func() { fakeMetricSender = fake.NewFakeMetricSender() metricBatcher := metricbatcher.New(fakeMetricSender, time.Millisecond) metrics.Initialize(fakeMetricSender, metricBatcher) }) It("delegates SendValue", func() { metrics.SendValue("metric", 42.42, "answers") Expect(fakeMetricSender.GetValue("metric").Value).To(Equal(42.42)) Expect(fakeMetricSender.GetValue("metric").Unit).To(Equal("answers")) }) It("delegates IncrementCounter", func() { metrics.IncrementCounter("count") Expect(fakeMetricSender.GetCounter("count")).To(BeEquivalentTo(1)) metrics.IncrementCounter("count") Expect(fakeMetricSender.GetCounter("count")).To(BeEquivalentTo(2)) })