func determineProperties(sizes []int) *expvar.Map { var sum int props := new(expvar.Map).Init() for _, n := range sizes { sum += n } // Determine properties sort.Ints(sizes) if len(sizes) > 0 { summary := map[string]int{ "min": sizes[0], "max": sizes[len(sizes)-1], "length": len(sizes), "sum": sum, "95e": sizes[int(float64(len(sizes))*0.95)], } mean := float64(sum) / float64(summary["length"]) // Pack them into an expvar Map for k, v := range summary { n := new(expvar.Int) n.Set(int64(v)) props.Set(k, n) } avge := new(expvar.Float) avge.Set(mean) props.Set("avg", avge) } return props }
// Export populates a expvar.Map with the state of all // of the features. func Export(m *expvar.Map) { fMu.RLock() defer fMu.RUnlock() for f, v := range features { m.Set(f.String(), boolVar(v)) } }
// HTTP Dynamic Streaming prober. // Parse and probe F4M playlists and report time statistics and errors. func SanjoseProber(ctl *bcast.Group, tasks chan *Task, debugvars *expvar.Map) { for { task := <-tasks result := ExecHTTP(task) task.ReplyTo <- result debugvars.Add("hds-tasks-done", 1) } }
func newListener(nl net.Listener, ns *expvar.Map) *listener { statNS := new(expvar.Map).Init() ns.Set("handshake", statNS) lis := &listener{ Listener: nl, handshakeStats: newHandshakeStats(statNS), } return lis }
// snapshotMap recursively walks expvar Maps and records their integer expvars // in a separate flat map. func snapshotMap(varsMap map[string]int64, path string, mp *expvar.Map) { mp.Do(func(kv expvar.KeyValue) { switch kv.Value.(type) { case *expvar.Int: varsMap[path+"."+kv.Key], _ = strconv.ParseInt(kv.Value.String(), 10, 64) case *expvar.Map: snapshotMap(varsMap, path+"."+kv.Key, kv.Value.(*expvar.Map)) } }) }
func statsPageHandler(w http.ResponseWriter, r *http.Request, bctx *BasePageContext) { ctx := &struct { *BasePageContext Stats []*stat }{ BasePageContext: bctx, Stats: []*stat{}, } var stats *expvar.Map stats = expvar.Get("counters").(*expvar.Map) servStat := expvar.Get("states").(*expvar.Map) errors := expvar.Get("errors").(*expvar.Map) for _, ep := range bctx.Globals.GetEndpoints() { epname := ep.Name all := stats.Get(epname) success := stats.Get(epname + "|pass") unauth := stats.Get(epname + "|401") fail := stats.Get(epname + "|403") status := servStat.Get(epname) statusSSL := servStat.Get(epname + "|ssl") err := errors.Get(epname) errSSL := errors.Get(epname + "|ssl") ctx.Stats = append(ctx.Stats, &stat{epname, fail, success, unauth, all, status, statusSSL, err, errSSL}) } RenderTemplateStd(w, ctx, "stats.tmpl") }
func resetVarMap(varMap *expvar.Map) { // There is no easy way to delete/clear expvar.Map. As such there is a slight // race here. *sigh* keys := []string{} varMap.Do(func(kv expvar.KeyValue) { keys = append(keys, kv.Key) }) for _, key := range keys { varMap.Set(key, new(expvar.Int)) } }
func writeHistogramForDuration(expvarMap *expvar.Map, duration time.Duration, prefix string) { if base.LogEnabled("PerfStats") { var durationMs int if duration < 1*time.Second { durationMs = int(duration/(100*time.Millisecond)) * 100 } else { durationMs = int(duration/(1000*time.Millisecond)) * 1000 } expvarMap.Add(fmt.Sprintf("%s-%06dms", prefix, durationMs), 1) } }
func TestSnapshotExpvarsMap(t *testing.T) { test := expvar.NewMap("testMap") test.Add("hello", 42) map2 := new(expvar.Map).Init() map2.Add("test", 5) test.Set("map2", map2) vals := map[string]int64{} snapshotExpvars(vals) assert.Equal(t, vals["testMap.hello"], int64(42)) assert.Equal(t, vals["testMap.map2.test"], int64(5)) }
func init() { pacStatementSplit = regexp.MustCompile(`\s*;\s*`) pacItemSplit = regexp.MustCompile(`\s+`) pacCallFindProxyForURLResultCount = new(expvar.Map).Init() pacCallFindProxyForURLParamHostCount = new(expvar.Map).Init() callFindProxyForURLMap := new(expvar.Map).Init() callFindProxyForURLMap.Set("resultCount", pacCallFindProxyForURLResultCount) callFindProxyForURLMap.Set("urlHostCount", pacCallFindProxyForURLParamHostCount) pacExpvarMap := expvar.NewMap("pac") pacExpvarMap.Set("callFindProxyForURL", callFindProxyForURLMap) }
func getExpvarAsString(expvar *expvar.Map, name string) string { value := expvar.Get(name) if value != nil { return value.String() } else { return "" } }
func newOriginAllower(blockedDomains []string, hostname string, gclog logClient, ns *expvar.Map) *originAllower { mu := &sync.RWMutex{} topKAllDomains := topk.New(100) topKOfflistDomains := topk.New(100) lifetime := new(expvar.Map).Init() ns.Set("lifetime", lifetime) lifetime.Set("top_all_domains", expvar.Func(func() interface{} { mu.RLock() defer mu.RUnlock() return topKAllDomains.Keys() })) lifetime.Set("top_offlist_domains", expvar.Func(func() interface{} { mu.RLock() defer mu.RUnlock() return topKOfflistDomains.Keys() })) oa := &originAllower{ m: make(map[string]struct{}), ns: ns, hostname: hostname, gclog: gclog, mu: mu, topKAllDomains: topKAllDomains, topKOfflistDomains: topKOfflistDomains, } for _, d := range blockedDomains { oa.m[d] = struct{}{} } return oa }
func init() { // NOTE(stevvooe): Setup registry metrics structure to report to expvar. // Ideally, we do more metrics through logging but we need some nice // realtime metrics for queue state for now. registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } var notifications expvar.Map notifications.Init() notifications.Set("endpoints", expvar.Func(func() interface{} { endpoints.mu.Lock() defer endpoints.mu.Unlock() var names []interface{} for _, v := range endpoints.registered { var epjson struct { Name string `json:"name"` URL string `json:"url"` EndpointConfig Metrics EndpointMetrics } epjson.Name = v.Name() epjson.URL = v.URL() epjson.EndpointConfig = v.EndpointConfig v.ReadMetrics(&epjson.Metrics) names = append(names, epjson) } return names })) registry.(*expvar.Map).Set("notifications", ¬ifications) }
// calculateTargetSummaryStats builds per-tier, per-target, metric-to-host summary stats func calculateTargetSummaryStats(tiers *[]Tier) { for _, tier := range *tiers { totalSizes := []int{} tierStats := new(expvar.Map).Init() // Determine summary stats per target for target, hosts := range tier.Mappings { sizes := []int{} for _, metrics := range hosts { sizes = append(sizes, len(metrics)) totalSizes = append(totalSizes, len(metrics)) } if len(sizes) == 0 { continue } // Build summary props := determineProperties(sizes) tierStats.Set(target, props) } props := determineProperties(totalSizes) tierStats.Set("total", props) distCounts.Set(tier.Name, tierStats) } }
// incrementMetric increments a value in the specified expvar.Map. The key // should be a windows syscall.Errno or a string. Any other types will be // reported under the "other" key. func incrementMetric(v *expvar.Map, key interface{}) { switch t := key.(type) { default: v.Add("other", 1) case string: v.Add(t, 1) case syscall.Errno: v.Add(strconv.Itoa(int(t)), 1) } }
func NewStatusStats(outer *expvar.Map) *statusStats { m := new(expvar.Map).Init() outer.Set("statuses", m) status1xx := &expvar.Int{} status2xx := &expvar.Int{} status3xx := &expvar.Int{} status4xx := &expvar.Int{} status5xx := &expvar.Int{} m.Set("1xx", status1xx) m.Set("2xx", status2xx) m.Set("3xx", status3xx) m.Set("4xx", status4xx) m.Set("5xx", status5xx) return &statusStats{ status1xx, status2xx, status3xx, status4xx, status5xx, } }
func getMetricSetExpvarMap(module, name string) (*expvar.Map, error) { key := fmt.Sprintf("%s-%s", module, name) fetchesLock.Lock() defer fetchesLock.Unlock() expVar := fetches.Get(key) switch m := expVar.(type) { case nil: expMap := new(expvar.Map).Init() fetches.Set(key, expMap) expMap.Add(successesKey, 0) expMap.Add(failuresKey, 0) expMap.Add(eventsKey, 0) return expMap, nil case *expvar.Map: return m, nil default: return nil, fmt.Errorf("unexpected expvar.Var type (%T) found for key '%s'", m, key) } }
// TODO к реализации // Probe HTTP with additional checks for Widevine. // Really now only http-range check supported. func WidevineProber(ctl *bcast.Group, tasks chan *Task, debugvars *expvar.Map) { var result *Result defer func() { if r := recover(); r != nil { fmt.Println("trace dumped in Widevine prober:", r) } }() for { queueCount := debugvars.Get("wv-tasks-queue") queueCount.(*expvar.Int).Set(int64(len(tasks))) task := <-tasks if time.Now().Before(task.TTL) { result = ExecHTTP(task) debugvars.Add("wv-tasks-done", 1) } else { result = TaskExpired(task) debugvars.Add("wv-tasks-expired", 1) } task.ReplyTo <- result } }
// Container keep single stream properties and regulary make tasks for appropriate probers. func StreamBox(ctl *bcast.Group, stream Stream, streamType StreamType, taskq chan *Task, debugvars *expvar.Map) { var checkCount uint64 // число прошедших проверок var addSleepToBrokenStream time.Duration var tid int64 = time.Now().Unix() // got increasing offset on each program start var min, max int var command Command var online bool = false var stats Stats defer func() { if r := recover(); r != nil { fmt.Printf("Stream %s trace: %s\n", stream.Name, r) } }() task := &Task{Stream: stream, ReplyTo: make(chan *Result)} switch streamType { case HTTP: task.ReadBody = false case HLS: task.ReadBody = true case HDS: task.ReadBody = true case WV: task.ReadBody = false default: task.ReadBody = false } ctlrcv := ctl.Join() // управление мониторингом timer := time.Tick(3 * time.Second) for { select { case recv := <-ctlrcv.In: command = recv.(Command) switch command { case START_MON: online = true case STOP_MON: online = false } case <-timer: SaveStats(stream, stats) default: if !online { time.Sleep(1 * time.Second) continue } max = int(cfg.Params(stream.Group).TimeBetweenTasks) min = int(cfg.Params(stream.Group).TimeBetweenTasks / 4. * 3.) time.Sleep(time.Duration(rand.Intn(max-min)+min)*time.Second + addSleepToBrokenStream) // randomize streams order tid++ task.Tid = tid task.TTL = time.Now().Add(time.Duration(cfg.Params(stream.Group).TaskTTL * time.Second)) stats.Checks++ // TODO potentially overflow taskq <- task debugvars.Add("requested-tasks", 1) result := <-task.ReplyTo if result.ErrType == TTLEXPIRED { continue } else { checkCount++ if checkCount > 144 { fmt.Printf("Repeated %d times %s\n", checkCount, task.Name) } } for _, subres := range result.SubResults { subres.Pid = result go SaveResult(stream, *subres) } go SaveResult(stream, *result) max = int(cfg.Params(stream.Group).CheckBrokenTime) min = int(cfg.Params(stream.Group).CheckBrokenTime / 4. * 3.) switch { // permanent error, not a timeout: case result.ErrType > CRITICAL_LEVEL, result.ErrType == TTLEXPIRED: addSleepToBrokenStream = time.Duration(rand.Intn(max-min)+min) * time.Second // works ok: case result.ErrType == SUCCESS: addSleepToBrokenStream = 0 default: addSleepToBrokenStream = 0 } if result.ErrType != TTLEXPIRED { if result.ErrType >= WARNING_LEVEL { go Log(ERROR, stream, *result) } else { if result.Elapsed >= cfg.Params(stream.Group).VerySlowWarningTimeout*time.Second { result.ErrType = VERYSLOW go Log(WARNING, stream, *result) } else if result.Elapsed >= cfg.Params(stream.Group).SlowWarningTimeout*time.Second { result.ErrType = SLOW go Log(WARNING, stream, *result) } } } } } }
// TODO: maybe we should use Func() and just keep a stats structure. // But that would require locking and nngh nngh. func setupExpvars() { var m expvar.Map m.Init() m.Set("notls", expvar.Func(notls.Stats)) m.Set("yakkers", expvar.Func(yakkers.Stats)) stats.Set("sizes", &m) stats.Set("dnsbl_hits", expvar.Func(dblcounts.Stats)) stats.Set("sbl_hits", expvar.Func(sblcounts.Stats)) // BUG: must remember to do this for all counters so they have // an initial value. var evts expvar.Map evts.Init() evts.Set("connections", &events.connections) evts.Set("tls_errors", &events.tlserrs) evts.Set("yakkers", &events.yakkers) evts.Set("notls_conns", &events.notlscnt) evts.Set("rules_errors", &events.ruleserr) evts.Set("yakker_adds", &events.yakads) evts.Set("rsetdrops", &events.rsetdrops) evts.Set("abandons", &events.abandons) evts.Set("refuseds", &events.refuseds) stats.Set("events", &evts) var mailevts expvar.Map var goodevts expvar.Map var cmds expvar.Map mailevts.Init() goodevts.Init() cmds.Init() // Maybe these should track refused commands? Not sure. cmds.Set("ehlo", &events.ehlo) cmds.Set("mailfrom", &events.mailfrom) cmds.Set("rcptto", &events.rcptto) cmds.Set("data", &events.data) mailevts.Set("commands", &cmds) // These are counts of *accepted* commands. // TODO: maybe revise how things are counted? Dunno. goodevts.Set("ehlo", &events.ehloAccept) goodevts.Set("mailfrom", &events.mailfromAccept) goodevts.Set("rcptto", &events.rcpttoAccept) goodevts.Set("data", &events.dataAccept) goodevts.Set("messages", &events.messages) mailevts.Set("accepted", &goodevts) mailevts.Set("ehlo_tlson", &events.tlson) mailevts.Set("quits", &events.quits) mailevts.Set("aborts", &events.aborts) mailevts.Set("rsets", &events.rsets) stats.Set("smtpcounts", &mailevts) // constants stats.Add("pid", int64(os.Getpid())) var stime expvar.String stime.Set(time.Now().String()) stats.Set("startTime", &stime) var conntime expvar.String times.Init() // We're going to have connections, so we set this now. times.Set("connection", &conntime) stats.Set("last", ×) }
// HTTP Live Streaming support. // Parse and probe M3U8 playlists (multi- and single bitrate) // and report time statistics and errors func CupertinoProber(ctl *bcast.Group, tasks chan *Task, debugvars *expvar.Map) { var result *Result defer func() { if r := recover(); r != nil { fmt.Println("trace dumped in HLS prober:", r) } }() queueCount := debugvars.Get("hls-tasks-queue") queueCount.(*expvar.Int).Set(int64(len(tasks))) for { task := <-tasks if time.Now().Before(task.TTL) { result = ExecHTTP(task) if result.ErrType < ERROR_LEVEL && result.HTTPCode < 400 && result.ContentLength > 0 { playlist, listType, err := m3u8.Decode(result.Body, true) if err != nil { result.ErrType = BADFORMAT } else { switch listType { case m3u8.MASTER: m := playlist.(*m3u8.MasterPlaylist) subresult := make(chan *Result, 24) mainuri, err := url.Parse(task.URI) if err != nil { result.ErrType = UNKERR goto End } for _, variant := range m.Variants { uri, err := url.Parse(variant.URI) if err != nil { subresult <- &Result{Task: &Task{Tid: task.Tid, Stream: Stream{variant.URI, HLS, task.Name, task.Title, task.Group}}, ErrType: BADURI, Started: time.Now()} continue } var suburi string if uri.IsAbs() { // absolute URI suburi = variant.URI } else { // relative URI if variant.URI[0] == '/' { // from the root suburi = fmt.Sprintf("%s://%s%s", mainuri.Scheme, mainuri.Host, variant.URI) } else { // last element splitted := strings.Split(task.URI, "/") splitted[len(splitted)-1] = variant.URI suburi = strings.Join(splitted, "/") } } subtask := &Task{Tid: task.Tid, Stream: Stream{suburi, HLS, task.Name, task.Title, task.Group}, ReadBody: task.ReadBody, TTL: task.TTL} go func(subtask *Task) { subresult <- ExecHTTP(subtask) }(subtask) } taskCount := len(m.Variants) for taskCount > 0 { select { case data := <-subresult: result.SubResults = append(result.SubResults, data) case <-time.After(60 * time.Second): } taskCount-- } case m3u8.MEDIA: p := playlist.(*m3u8.MediaPlaylist) p.Encode().String() default: result.ErrType = BADFORMAT } } } debugvars.Add("hls-tasks-done", 1) } else { result = TaskExpired(task) debugvars.Add("hls-tasks-expired", 1) } End: task.ReplyTo <- result debugvars.Add("hls-tasks-done", 1) } }
func newHandshakeStats(ns *expvar.Map) *handshakeStats { s := &handshakeStats{ Successes: &expvar.Int{}, Errs: &expvar.Int{}, ReadTimeouts: &expvar.Int{}, WriteTimeouts: &expvar.Int{}, UnknownTimeouts: &expvar.Int{}, EOFs: &expvar.Int{}, } ns.Set("successes", s.Successes) ns.Set("errors", s.Errs) ns.Set("read_timeouts", s.ReadTimeouts) ns.Set("write_timeouts", s.WriteTimeouts) ns.Set("unknown_timeouts", s.UnknownTimeouts) ns.Set("eofs", s.EOFs) return s }
func Listen(host string, port int, counts *expvar.Map, MessageRead func([]byte)) { conn, err := amqp.Dial(fmt.Sprintf("amqp://*****:*****@%s:%v/", host, port)) failOnError(err, "Failed to connect to RabbitMQ") defer conn.Close() ch, err := conn.Channel() failOnError(err, "Failed to open a channel") defer ch.Close() err = ch.ExchangeDeclare( "logs", // name "fanout", // type true, // durable false, // auto-deleted false, // internal false, // no-wait nil, // arguments ) failOnError(err, "Failed to declare an exchange") q, err := ch.QueueDeclare( "", // name false, // durable false, // delete when usused true, // exclusive false, // no-wait nil, // arguments ) failOnError(err, "Failed to declare a queue") err = ch.QueueBind( q.Name, // queue name "", // routing key "logs", // exchange false, nil) failOnError(err, "Failed to bind a queue") msgs, err := ch.Consume( q.Name, // queue "", // consumer true, // auto-ack false, // exclusive false, // no-local false, // no-wait nil, // args ) failOnError(err, "Failed to register a consumer") forever := make(chan bool) go func() { for d := range msgs { counts.Add("Rabbitmq get", 1) MessageRead(d.Body) } }() log.Printf(" [*] Waiting for logs. To exit press CTRL+C") <-forever }
func ExampleExpvarCollector() { expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ "memstats": prometheus.NewDesc( "expvar_memstats", "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", []string{"type"}, nil, ), "lone-int": prometheus.NewDesc( "expvar_lone_int", "Just an expvar int as an example.", nil, nil, ), "http-request-map": prometheus.NewDesc( "expvar_http_request_total", "How many http requests processed, partitioned by status code and http method.", []string{"code", "method"}, nil, ), }) prometheus.MustRegister(expvarCollector) // The Prometheus part is done here. But to show that this example is // doing anything, we have to manually export something via expvar. In // real-life use-cases, some library would already have exported via // expvar what we want to re-export as Prometheus metrics. expvar.NewInt("lone-int").Set(42) expvarMap := expvar.NewMap("http-request-map") var ( expvarMap1, expvarMap2 expvar.Map expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int ) expvarMap1.Init() expvarMap2.Init() expvarInt11.Set(3) expvarInt12.Set(13) expvarInt21.Set(11) expvarInt22.Set(212) expvarMap1.Set("POST", &expvarInt11) expvarMap1.Set("GET", &expvarInt12) expvarMap2.Set("POST", &expvarInt21) expvarMap2.Set("GET", &expvarInt22) expvarMap.Set("404", &expvarMap1) expvarMap.Set("200", &expvarMap2) // Results in the following expvar map: // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} // Let's see what the scrape would yield, but exclude the memstats metrics. metricStrings := []string{} metric := dto.Metric{} metricChan := make(chan prometheus.Metric) go func() { expvarCollector.Collect(metricChan) close(metricChan) }() for m := range metricChan { if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { metric.Reset() m.Write(&metric) metricStrings = append(metricStrings, metric.String()) } } sort.Strings(metricStrings) for _, s := range metricStrings { fmt.Println(strings.TrimRight(s, " ")) } // Output: // label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 > // label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 > // label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 > // label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 > // untyped:<value:42 > }