func TestPrometheusCounter(t *testing.T) { c := prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "test", Subsystem: "prometheus_counter", Name: "foobar", Help: "Lorem ipsum.", }, []string{}) c.Add(1) c.Add(2) if want, have := strings.Join([]string{ `# HELP test_prometheus_counter_foobar Lorem ipsum.`, `# TYPE test_prometheus_counter_foobar counter`, `test_prometheus_counter_foobar 3`, }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { t.Errorf("metric stanza not found or incorrect\n%s", have) } c.Add(3) c.Add(4) if want, have := strings.Join([]string{ `# HELP test_prometheus_counter_foobar Lorem ipsum.`, `# TYPE test_prometheus_counter_foobar counter`, `test_prometheus_counter_foobar 10`, }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { t.Errorf("metric stanza not found or incorrect\n%s", have) } }
func TestMultiWith(t *testing.T) { c := metrics.NewMultiCounter( "multifoo", expvar.NewCounter("foo"), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "test", Subsystem: "multi_with", Name: "bar", Help: "Bar counter.", }, []string{"a"}), ) c.Add(1) c.With(metrics.Field{Key: "a", Value: "1"}).Add(2) c.Add(3) if want, have := strings.Join([]string{ `# HELP test_multi_with_bar Bar counter.`, `# TYPE test_multi_with_bar counter`, `test_multi_with_bar{a="1"} 2`, `test_multi_with_bar{a="unknown"} 4`, }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) } }
func main() { var ( listen = flag.String("listen", ":8080", "HTTP listen address") proxy = flag.String("proxy", "", "Optional comma-separated list of URLs to proxy uppercase requests") ) flag.Parse() var logger log.Logger logger = log.NewLogfmtLogger(os.Stderr) logger = log.NewContext(logger).With("listen", *listen).With("caller", log.DefaultCaller) ctx := context.Background() fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)) countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "count_result", Help: "The result of each count method.", }, []string{}) var svc StringService svc = stringService{} svc = proxyingMiddleware(*proxy, ctx, logger)(svc) svc = loggingMiddleware(logger)(svc) svc = instrumentingMiddleware(requestCount, requestLatency, countResult)(svc) uppercaseHandler := httptransport.NewServer( ctx, makeUppercaseEndpoint(svc), decodeUppercaseRequest, encodeResponse, ) countHandler := httptransport.NewServer( ctx, makeCountEndpoint(svc), decodeCountRequest, encodeResponse, ) http.Handle("/uppercase", uppercaseHandler) http.Handle("/count", countHandler) http.Handle("/metrics", stdprometheus.Handler()) logger.Log("msg", "HTTP", "addr", *listen) logger.Log("err", http.ListenAndServe(*listen, nil)) }
func main() { ctx := context.Background() logger := log.NewLogfmtLogger(os.Stderr) fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)) countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "count_result", Help: "The result of each count method.", }, []string{}) // no fields here var svc StringService svc = stringService{} svc = loggingMiddleware{logger, svc} svc = instrumentingMiddleware{requestCount, requestLatency, countResult, svc} var uppercase endpoint.Endpoint uppercase = makeUppercaseEndpoint(svc) var count endpoint.Endpoint count = makeCountEndpoint(svc) uppercaseHandler := httptransport.Server{ Context: ctx, Endpoint: uppercase, DecodeRequestFunc: decodeUppercaseRequest, EncodeResponseFunc: encodeResponse, } countHandler := httptransport.Server{ Context: ctx, Endpoint: count, DecodeRequestFunc: decodeCountRequest, EncodeResponseFunc: encodeResponse, } http.Handle("/uppercase", uppercaseHandler) http.Handle("/count", countHandler) http.Handle("/metrics", stdprometheus.Handler()) stdlog.Fatal(http.ListenAndServe(":8080", nil)) }
func main() { ctx := context.Background() logger := log.NewLogfmtLogger(os.Stderr) //Define metrics to be collected for this service fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_latency_microseconds", Help: "Total duration of requests in Microseconds.", }, fieldKeys)) countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "count_result", Help: "The result of eac count method", }, []string{}) //Declare the string service var svc StringService svc = stringService{} svc = loggingMiddleware{logger, svc} svc = instrumentingMiddleware{requestCount, requestLatency, countResult, svc} //Declare and define a http server that exposes the string service to clients via httptransport uppercaseHandler := httptransport.NewServer( ctx, makeUppercaseEndpoint(svc), decodeUppercaseRequest, encodeResponse, ) countHandler := httptransport.NewServer( ctx, makeCountEndpoint(svc), decodeCountRequest, encodeResponse, ) //Define the content routes http.Handle("/uppercase", uppercaseHandler) http.Handle("/count", countHandler) http.Handle("/metrics", stdprometheus.Handler()) logger.Log("msg", "HTTP", "addr", ":8080") logger.Log("err", http.ListenAndServe(":8080", nil)) }
func TestPrometheusLabelBehavior(t *testing.T) { c := prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "test", Subsystem: "prometheus_label_behavior", Name: "foobar", Help: "Abc def.", }, []string{"used_key", "unused_key"}) c.With(metrics.Field{Key: "used_key", Value: "declared"}).Add(1) c.Add(1) if want, have := strings.Join([]string{ `# HELP test_prometheus_label_behavior_foobar Abc def.`, `# TYPE test_prometheus_label_behavior_foobar counter`, `test_prometheus_label_behavior_foobar{unused_key="unknown",used_key="declared"} 1`, `test_prometheus_label_behavior_foobar{unused_key="unknown",used_key="unknown"} 1`, }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { t.Errorf("metric stanza not found or incorrect\n%s", have) } }
func main() { var ( listen = flag.String("listen", ":8080", "HTTP listen address") target = flag.String("proxy", "https://www.marksandspencer.com", "target URL for reverse proxy") ) flag.Parse() var logger log.Logger logger = log.NewLogfmtLogger(os.Stderr) logger = log.NewContext(logger).With("listen", *listen).With("caller", log.DefaultCaller) fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)) u, err := url.Parse(*target) if err != nil { logger.Log(err) } var svc http.Handler svc = newReverseProxy(u) svc = loggingMiddleware(logger)(svc) svc = instrumentingMiddleware(requestCount, requestLatency)(svc) http.Handle("/", svc) http.Handle("/_metrics", stdprometheus.Handler()) logger.Log("msg", "HTTP", "addr", *listen) logger.Log("err", http.ListenAndServe(*listen, nil)) }
func Register() { ctx := context.Background() fieldKeys := []string{"method", "error"} requestCount := gkprometheus.NewCounter(prometheus.CounterOpts{ Namespace: "blueplanet", Subsystem: "bp2_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := gkmetrics.NewTimeHistogram(time.Microsecond, gkprometheus.NewSummary(prometheus.SummaryOpts{ Namespace: "blueplanet", Subsystem: "bp2_service", Name: "request_latency_microseconds", Help: "Total duraction of the requests in microseconds.", }, fieldKeys)) var svc StringService svc = stringService{} svc = instrumentingMiddleware{requestCount, requestLatency, svc} uppercaseHandler := gkhttptransport.NewServer( ctx, makeUppercaseEndpoint(svc), decodeUppercaseRequest, encodeResponse, ) countHandler := gkhttptransport.NewServer( ctx, makeCountEndpoint(svc), decodeCountRequest, encodeResponse, ) http.Handle("/string/uppercase", uppercaseHandler) http.Handle("/string/count", countHandler) http.Handle("/string/metrics", prometheus.Handler()) }
func TestMultiCounter(t *testing.T) { metrics.NewMultiCounter( expvar.NewCounter("alpha"), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "test", Subsystem: "multi_counter", Name: "beta", Help: "Beta counter.", }, []string{"a"}), ).With(metrics.Field{Key: "a", Value: "b"}).Add(123) if want, have := "123", stdexpvar.Get("alpha").String(); want != have { t.Errorf("expvar: want %q, have %q", want, have) } if want, have := strings.Join([]string{ `# HELP test_multi_counter_beta Beta counter.`, `# TYPE test_multi_counter_beta counter`, `test_multi_counter_beta{a="b"} 123`, }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) } }
func makeInstrumentation(namespace, name, helpCounter, helpDuration string) (metrics.Counter, metrics.TimeHistogram) { counter := metrics.NewMultiCounter( expvar.NewCounter(fmt.Sprintf("requests_%s", name)), statsd.NewCounter(ioutil.Discard, fmt.Sprintf("requests_%s_total", name), time.Second), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: name, Name: "requests_total", Help: helpCounter, }, []string{}), ) duration := metrics.NewTimeHistogram(time.Nanosecond, metrics.NewMultiHistogram( expvar.NewHistogram(fmt.Sprintf("duration_%s_nanoseconds_total", name), 0, 1e9, 3, 50, 95, 99), statsd.NewHistogram(ioutil.Discard, fmt.Sprintf("duration_%s_nanoseconds_total", name), time.Second), prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: namespace, Subsystem: name, Name: "duration_nanoseconds_total", Help: helpDuration, }, []string{}), )) return counter, duration }
func main() { fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "user_srv", Subsystem: "account_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "user_srv", Subsystem: "account_service", Name: "request_latency", Help: "Total duration of requests in microseconds.", }, fieldKeys)) countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "user_srv", Subsystem: "account_service", Name: "count_result", Help: "The result of each count method.", }, []string{}) var ctx context.Context { ctx = context.Background() } var logger log.Logger { logger = log.NewLogfmtLogger(os.Stderr) } var db batallion.Database { db = batallion.Database{ Host: "localhost", Name: "micro", } } db.Connect() var svc batallion.AccountService { svc = batallion.AccountSvc{Collection: db.Handler.C("accounts")} svc = batallion.LoggingMiddleware{logger, svc} svc = batallion.InstrumentingMiddleware{requestCount, requestLatency, countResult, svc} } var h http.Handler { h = batallion.MakeHTTPHandler(ctx, svc, log.NewContext(logger).With("component", "HTTP")) } errs := make(chan error) go func() { logger.Log("transport", "HTTP", "addr", "8010") errs <- http.ListenAndServe(":8010", h) }() logger.Log("exit", <-errs) }
func main() { // Flag domain. Note that gRPC transitively registers flags via its import // of glog. So, we define a new flag set, to keep those domains distinct. fs := flag.NewFlagSet("", flag.ExitOnError) var ( debugAddr = fs.String("debug.addr", ":8000", "Address for HTTP debug/instrumentation server") httpAddr = fs.String("http.addr", ":8001", "Address for HTTP (JSON) server") netrpcAddr = fs.String("netrpc.addr", ":8003", "Address for net/rpc server") proxyHTTPAddr = fs.String("proxy.http.url", "", "if set, proxy requests over HTTP to this addsvc") zipkinServiceName = fs.String("zipkin.service.name", "addsvc", "Zipkin service name") zipkinCollectorAddr = fs.String("zipkin.collector.addr", "", "Zipkin Scribe collector address (empty will log spans)") zipkinCollectorTimeout = fs.Duration("zipkin.collector.timeout", time.Second, "Zipkin collector timeout") zipkinCollectorBatchSize = fs.Int("zipkin.collector.batch.size", 100, "Zipkin collector batch size") zipkinCollectorBatchInterval = fs.Duration("zipkin.collector.batch.interval", time.Second, "Zipkin collector batch interval") ) flag.Usage = fs.Usage // only show our flags fs.Parse(os.Args[1:]) // `package log` domain var logger kitlog.Logger logger = kitlog.NewLogfmtLogger(os.Stderr) logger = kitlog.NewContext(logger).With("ts", kitlog.DefaultTimestampUTC) stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) // redirect stdlib logging to us stdlog.SetFlags(0) // flags are handled in our logger // `package metrics` domain requests := metrics.NewMultiCounter( expvar.NewCounter("requests"), statsd.NewCounter(ioutil.Discard, "requests_total", time.Second), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "addsvc", Subsystem: "add", Name: "requests_total", Help: "Total number of received requests.", }, []string{}), ) duration := metrics.NewTimeHistogram(time.Nanosecond, metrics.NewMultiHistogram( expvar.NewHistogram("duration_nanoseconds_total", 0, 1e9, 3, 50, 95, 99), statsd.NewHistogram(ioutil.Discard, "duration_nanoseconds_total", time.Second), prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "addsvc", Subsystem: "add", Name: "duration_nanoseconds_total", Help: "Total nanoseconds spend serving requests.", }, []string{}), )) _, _ = requests, duration // `package tracing` domain zipkinHostPort := "localhost:1234" // TODO Zipkin makes overly simple assumptions about services var zipkinCollector zipkin.Collector = loggingCollector{logger} if *zipkinCollectorAddr != "" { var err error if zipkinCollector, err = zipkin.NewScribeCollector( *zipkinCollectorAddr, *zipkinCollectorTimeout, zipkin.ScribeBatchSize(*zipkinCollectorBatchSize), zipkin.ScribeBatchInterval(*zipkinCollectorBatchInterval), zipkin.ScribeLogger(logger), ); err != nil { logger.Log("err", err) os.Exit(1) } } zipkinMethodName := "add" zipkinSpanFunc := zipkin.MakeNewSpanFunc(zipkinHostPort, *zipkinServiceName, zipkinMethodName) // Our business and operational domain var a add.Adder = pureAdd{} if *proxyHTTPAddr != "" { var e endpoint.Endpoint e = add.NewAdderAddHTTPClient("GET", *proxyHTTPAddr, zipkin.ToRequest(zipkinSpanFunc)) e = zipkin.AnnotateClient(zipkinSpanFunc, zipkinCollector)(e) a = add.MakeAdderClient(func(method string) endpoint.Endpoint { if method != "Add" { panic(fmt.Errorf("unknown method %s", method)) } return e }) } // This could happen at endpoint level. // a = logging(logger)(a) // a = instrument(requests, duration)(a) // Server domain var e endpoint.Endpoint e = add.MakeAdderEndpoints(a).Add e = zipkin.AnnotateServer(zipkinSpanFunc, zipkinCollector)(e) // Mechanical stuff rand.Seed(time.Now().UnixNano()) root := context.Background() errc := make(chan error) go func() { errc <- interrupt() }() // Transport: HTTP (debug/instrumentation) go func() { logger.Log("addr", *debugAddr, "transport", "debug") errc <- http.ListenAndServe(*debugAddr, nil) }() // Transport: HTTP (JSON) go func() { ctx, cancel := context.WithCancel(root) defer cancel() before := []httptransport.RequestFunc{zipkin.ToContext(zipkinSpanFunc, logger)} after := []httptransport.ResponseFunc{} handler := add.MakeAdderAddHTTPBinding(ctx, e, before, after) logger.Log("addr", *httpAddr, "transport", "HTTP/JSON") errc <- http.ListenAndServe(*httpAddr, handler) }() // Transport: net/rpc go func() { ctx, cancel := context.WithCancel(root) defer cancel() s := rpc.NewServer() s.RegisterName("Add", add.AdderAddNetrpcBinding{ctx, e}) s.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath) logger.Log("addr", *netrpcAddr, "transport", "net/rpc") errc <- http.ListenAndServe(*netrpcAddr, s) }() logger.Log("fatal", <-errc) }
func main() { // Flag domain. Note that gRPC transitively registers flags via its import // of glog. So, we define a new flag set, to keep those domains distinct. fs := flag.NewFlagSet("", flag.ExitOnError) var ( debugAddr = fs.String("debug.addr", ":8000", "Address for HTTP debug/instrumentation server") httpAddr = fs.String("http.addr", ":8001", "Address for HTTP (JSON) server") grpcAddr = fs.String("grpc.addr", ":8002", "Address for gRPC server") netrpcAddr = fs.String("netrpc.addr", ":8003", "Address for net/rpc server") thriftAddr = fs.String("thrift.addr", ":8004", "Address for Thrift server") thriftProtocol = fs.String("thrift.protocol", "binary", "binary, compact, json, simplejson") thriftBufferSize = fs.Int("thrift.buffer.size", 0, "0 for unbuffered") thriftFramed = fs.Bool("thrift.framed", false, "true to enable framing") proxyHTTPAddr = fs.String("proxy.http.url", "", "if set, proxy requests over HTTP to this addsvc") zipkinServiceName = fs.String("zipkin.service.name", "addsvc", "Zipkin service name") zipkinCollectorAddr = fs.String("zipkin.collector.addr", "", "Zipkin Scribe collector address (empty will log spans)") zipkinCollectorTimeout = fs.Duration("zipkin.collector.timeout", time.Second, "Zipkin collector timeout") zipkinCollectorBatchSize = fs.Int("zipkin.collector.batch.size", 100, "Zipkin collector batch size") zipkinCollectorBatchInterval = fs.Duration("zipkin.collector.batch.interval", time.Second, "Zipkin collector batch interval") ) flag.Usage = fs.Usage // only show our flags fs.Parse(os.Args[1:]) // `package log` domain var logger kitlog.Logger logger = kitlog.NewLogfmtLogger(os.Stderr) logger = kitlog.With(logger, "ts", kitlog.DefaultTimestampUTC) stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) // redirect stdlib logging to us stdlog.SetFlags(0) // flags are handled in our logger // `package metrics` domain requests := metrics.NewMultiCounter( expvar.NewCounter("requests"), statsd.NewCounter(ioutil.Discard, "requests_total", time.Second), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "addsvc", Subsystem: "add", Name: "requests_total", Help: "Total number of received requests.", }, []string{}), ) duration := metrics.NewTimeHistogram(time.Nanosecond, metrics.NewMultiHistogram( expvar.NewHistogram("duration_nanoseconds_total", 0, 1e9, 3, 50, 95, 99), statsd.NewHistogram(ioutil.Discard, "duration_nanoseconds_total", time.Second), prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "addsvc", Subsystem: "add", Name: "duration_nanoseconds_total", Help: "Total nanoseconds spend serving requests.", }, []string{}), )) // `package tracing` domain zipkinHostPort := "localhost:1234" // TODO Zipkin makes overly simple assumptions about services var zipkinCollector zipkin.Collector = loggingCollector{logger} if *zipkinCollectorAddr != "" { var err error if zipkinCollector, err = zipkin.NewScribeCollector( *zipkinCollectorAddr, *zipkinCollectorTimeout, *zipkinCollectorBatchSize, *zipkinCollectorBatchInterval, ); err != nil { logger.Log("err", err) os.Exit(1) } } zipkinMethodName := "add" zipkinSpanFunc := zipkin.MakeNewSpanFunc(zipkinHostPort, *zipkinServiceName, zipkinMethodName) zipkin.Log.Swap(logger) // log diagnostic/error details // Our business and operational domain var a Add = pureAdd if *proxyHTTPAddr != "" { var e endpoint.Endpoint e = httpclient.NewClient("GET", *proxyHTTPAddr, zipkin.ToRequest(zipkinSpanFunc)) e = zipkin.AnnotateClient(zipkinSpanFunc, zipkinCollector)(e) a = proxyAdd(e, logger) } a = logging(logger)(a) a = instrument(requests, duration)(a) // Server domain var e endpoint.Endpoint e = makeEndpoint(a) e = zipkin.AnnotateServer(zipkinSpanFunc, zipkinCollector)(e) // Mechanical stuff rand.Seed(time.Now().UnixNano()) root := context.Background() errc := make(chan error) go func() { errc <- interrupt() }() // Transport: HTTP (debug/instrumentation) go func() { logger.Log("addr", *debugAddr, "transport", "debug") errc <- http.ListenAndServe(*debugAddr, nil) }() // Transport: HTTP (JSON) go func() { ctx, cancel := context.WithCancel(root) defer cancel() before := []httptransport.BeforeFunc{zipkin.ToContext(zipkinSpanFunc)} after := []httptransport.AfterFunc{} handler := makeHTTPBinding(ctx, e, before, after) logger.Log("addr", *httpAddr, "transport", "HTTP/JSON") errc <- http.ListenAndServe(*httpAddr, handler) }() // Transport: gRPC go func() { ln, err := net.Listen("tcp", *grpcAddr) if err != nil { errc <- err return } s := grpc.NewServer() // uses its own context? pb.RegisterAddServer(s, grpcBinding{e}) logger.Log("addr", *grpcAddr, "transport", "gRPC") errc <- s.Serve(ln) }() // Transport: net/rpc go func() { ctx, cancel := context.WithCancel(root) defer cancel() s := rpc.NewServer() s.RegisterName("addsvc", NetrpcBinding{ctx, e}) s.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath) logger.Log("addr", *netrpcAddr, "transport", "net/rpc") errc <- http.ListenAndServe(*netrpcAddr, s) }() // Transport: Thrift go func() { ctx, cancel := context.WithCancel(root) defer cancel() var protocolFactory thrift.TProtocolFactory switch *thriftProtocol { case "binary": protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() case "compact": protocolFactory = thrift.NewTCompactProtocolFactory() case "json": protocolFactory = thrift.NewTJSONProtocolFactory() case "simplejson": protocolFactory = thrift.NewTSimpleJSONProtocolFactory() default: errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) return } var transportFactory thrift.TTransportFactory if *thriftBufferSize > 0 { transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) } else { transportFactory = thrift.NewTTransportFactory() } if *thriftFramed { transportFactory = thrift.NewTFramedTransportFactory(transportFactory) } transport, err := thrift.NewTServerSocket(*thriftAddr) if err != nil { errc <- err return } logger.Log("addr", *thriftAddr, "transport", "Thrift") errc <- thrift.NewTSimpleServer4( thriftadd.NewAddServiceProcessor(thriftBinding{ctx, e}), transport, transportFactory, protocolFactory, ).Serve() }() logger.Log("fatal", <-errc) }
func (s *LogService) Register(h *hitch.Hitch) error { walkthroughPlayed := prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "walkhub", Subsystem: "metrics", Name: "walkthrough_played", Help: "Number of walkthrough plays", }, []string{"uuid", "embedorigin"}) walkthroughVisited := prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "walkhub", Subsystem: "metrics", Name: "walkthrough_visited", Help: "Number of walkthrough visits", }, []string{"uuid", "embedorigin"}) h.Post("/api/log/helpcenteropened", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { l := helpCenterOpenedLog{} ab.MustDecode(r, &l) db := ab.GetDB(r) userid := getLogUserID(r) message := fmt.Sprintf("%s has opened the help center on %s", userid, l.URL) ab.MaybeFail(r, http.StatusInternalServerError, DBLog(db, "helpcenteropened", message)) })) h.Post("/api/log/walkthroughplayed", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { l := walkthroughPlayedLog{} ab.MustDecode(r, &l) db := ab.GetDB(r) userid := getLogUserID(r) wt, err := LoadActualRevision(db, l.UUID) ab.MaybeFail(r, http.StatusBadRequest, err) if wt == nil { ab.Fail(r, http.StatusNotFound, nil) } message := "" embedPart := "" if l.EmbedOrigin != "" { embedPart = "from the help center on " + l.EmbedOrigin + " " } wturl := s.BaseURL + "walkthrough/" + wt.UUID if l.ErrorMessage == "" { message = fmt.Sprintf("%s has played the walkthrough %s<%s|%s>", userid, embedPart, wturl, wt.Name) } else { message = fmt.Sprintf("%s has failed to play the walkthrough %s<%s|%s> with the error message %s", userid, embedPart, wturl, wt.Name, l.ErrorMessage) } ab.MaybeFail(r, http.StatusInternalServerError, DBLog(db, "walkthroughplayed", message)) walkthroughPlayed. With(metrics.Field{Key: "uuid", Value: l.UUID}). With(metrics.Field{Key: "embedorigin", Value: l.EmbedOrigin}). Add(1) })) h.Post("/api/log/walkthroughpagevisited", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { l := walkthroughPageVisitedLog{} ab.MustDecode(r, &l) db := ab.GetDB(r) userid := getLogUserID(r) wt, err := LoadActualRevision(db, l.UUID) ab.MaybeFail(r, http.StatusBadRequest, err) if wt == nil { ab.Fail(r, http.StatusNotFound, nil) } embedPart := "" if l.EmbedOrigin != "" { embedPart = "embedded on " + l.EmbedOrigin + " " } wturl := s.BaseURL + "walkthrough/" + wt.UUID message := fmt.Sprintf("%s has visited the walkthrough page %s<%s|%s>", userid, embedPart, wturl, wt.Name) ab.MaybeFail(r, http.StatusInternalServerError, DBLog(db, "walkthroughvisited", message)) walkthroughVisited. With(metrics.Field{Key: "uuid", Value: l.UUID}). With(metrics.Field{Key: "embedorigin", Value: l.EmbedOrigin}). Add(1) })) return nil }
func main() { var ( debugAddr = flag.String("debug.addr", ":8080", "Debug and metrics listen address") httpAddr = flag.String("http.addr", ":8081", "HTTP listen address") grpcAddr = flag.String("grpc.addr", ":8082", "gRPC (HTTP) listen address") thriftAddr = flag.String("thrift.addr", ":8083", "Thrift listen address") thriftProtocol = flag.String("thrift.protocol", "binary", "binary, compact, json, simplejson") thriftBufferSize = flag.Int("thrift.buffer.size", 0, "0 for unbuffered") thriftFramed = flag.Bool("thrift.framed", false, "true to enable framing") zipkinAddr = flag.String("zipkin.addr", "", "Enable Zipkin tracing via a Kafka server host:port") appdashAddr = flag.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") lightstepToken = flag.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") ) flag.Parse() // Logging domain. var logger log.Logger { logger = log.NewLogfmtLogger(os.Stdout) logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) logger = log.NewContext(logger).With("caller", log.DefaultCaller) } logger.Log("msg", "hello") defer logger.Log("msg", "goodbye") // Metrics domain. var ints, chars metrics.Counter { // Business level metrics. ints = prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "addsvc", Name: "integers_summed", Help: "Total count of integers summed via the Sum method.", }, []string{}) chars = prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "addsvc", Name: "characters_concatenated", Help: "Total count of characters concatenated via the Concat method.", }, []string{}) } var duration metrics.TimeHistogram { // Transport level metrics. duration = metrics.NewTimeHistogram(time.Nanosecond, prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "addsvc", Name: "request_duration_ns", Help: "Request duration in nanoseconds.", }, []string{"method", "success"})) } // Tracing domain. var tracer stdopentracing.Tracer { if *zipkinAddr != "" { logger := log.NewContext(logger).With("tracer", "Zipkin") logger.Log("addr", *zipkinAddr) collector, err := zipkin.NewKafkaCollector( strings.Split(*zipkinAddr, ","), zipkin.KafkaLogger(logger), ) if err != nil { logger.Log("err", err) os.Exit(1) } tracer, err = zipkin.NewTracer( zipkin.NewRecorder(collector, false, "localhost:80", "addsvc"), ) if err != nil { logger.Log("err", err) os.Exit(1) } } else if *appdashAddr != "" { logger := log.NewContext(logger).With("tracer", "Appdash") logger.Log("addr", *appdashAddr) tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) } else if *lightstepToken != "" { logger := log.NewContext(logger).With("tracer", "LightStep") logger.Log() // probably don't want to print out the token :) tracer = lightstep.NewTracer(lightstep.Options{ AccessToken: *lightstepToken, }) defer lightstep.FlushLightStepTracer(tracer) } else { logger := log.NewContext(logger).With("tracer", "none") logger.Log() tracer = stdopentracing.GlobalTracer() // no-op } } // Business domain. var service addsvc.Service { service = addsvc.NewBasicService() service = addsvc.ServiceLoggingMiddleware(logger)(service) service = addsvc.ServiceInstrumentingMiddleware(ints, chars)(service) } // Endpoint domain. var sumEndpoint endpoint.Endpoint { sumDuration := duration.With(metrics.Field{Key: "method", Value: "Sum"}) sumLogger := log.NewContext(logger).With("method", "Sum") sumEndpoint = addsvc.MakeSumEndpoint(service) sumEndpoint = opentracing.TraceServer(tracer, "Sum")(sumEndpoint) sumEndpoint = addsvc.EndpointInstrumentingMiddleware(sumDuration)(sumEndpoint) sumEndpoint = addsvc.EndpointLoggingMiddleware(sumLogger)(sumEndpoint) } var concatEndpoint endpoint.Endpoint { concatDuration := duration.With(metrics.Field{Key: "method", Value: "Concat"}) concatLogger := log.NewContext(logger).With("method", "Concat") concatEndpoint = addsvc.MakeConcatEndpoint(service) concatEndpoint = opentracing.TraceServer(tracer, "Concat")(concatEndpoint) concatEndpoint = addsvc.EndpointInstrumentingMiddleware(concatDuration)(concatEndpoint) concatEndpoint = addsvc.EndpointLoggingMiddleware(concatLogger)(concatEndpoint) } endpoints := addsvc.Endpoints{ SumEndpoint: sumEndpoint, ConcatEndpoint: concatEndpoint, } // Mechanical domain. errc := make(chan error) ctx := context.Background() // Interrupt handler. go func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) errc <- fmt.Errorf("%s", <-c) }() // Debug listener. go func() { logger := log.NewContext(logger).With("transport", "debug") m := http.NewServeMux() m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) m.Handle("/metrics", stdprometheus.Handler()) logger.Log("addr", *debugAddr) errc <- http.ListenAndServe(*debugAddr, m) }() // HTTP transport. go func() { logger := log.NewContext(logger).With("transport", "HTTP") h := addsvc.MakeHTTPHandler(ctx, endpoints, tracer, logger) logger.Log("addr", *httpAddr) errc <- http.ListenAndServe(*httpAddr, h) }() // gRPC transport. go func() { logger := log.NewContext(logger).With("transport", "gRPC") ln, err := net.Listen("tcp", *grpcAddr) if err != nil { errc <- err return } srv := addsvc.MakeGRPCServer(ctx, endpoints, tracer, logger) s := grpc.NewServer() pb.RegisterAddServer(s, srv) logger.Log("addr", *grpcAddr) errc <- s.Serve(ln) }() // Thrift transport. go func() { logger := log.NewContext(logger).With("transport", "Thrift") var protocolFactory thrift.TProtocolFactory switch *thriftProtocol { case "binary": protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() case "compact": protocolFactory = thrift.NewTCompactProtocolFactory() case "json": protocolFactory = thrift.NewTJSONProtocolFactory() case "simplejson": protocolFactory = thrift.NewTSimpleJSONProtocolFactory() default: errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) return } var transportFactory thrift.TTransportFactory if *thriftBufferSize > 0 { transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) } else { transportFactory = thrift.NewTTransportFactory() } if *thriftFramed { transportFactory = thrift.NewTFramedTransportFactory(transportFactory) } transport, err := thrift.NewTServerSocket(*thriftAddr) if err != nil { errc <- err return } logger.Log("addr", *thriftAddr) errc <- thrift.NewTSimpleServer4( thriftadd.NewAddServiceProcessor(addsvc.MakeThriftHandler(ctx, endpoints)), transport, transportFactory, protocolFactory, ).Serve() }() // Run! logger.Log("exit", <-errc) }
"gopkg.in/tylerb/graceful.v1" ) var ( defaultImageSize = "640x640" self = "" sortBeforeMerge = false httpBuckets = []float64{199, 299, 399, 400, 403, 404, 411, 412, 499, 500, 501, 502, 503, 504, 599} counts = expvar.NewMap("counters") mReqCount = kitprom.NewCounter(prometheus.CounterOpts{ Namespace: "agostle", Subsystem: "http", Name: "requests", Help: "The HTTP requests accepted", }, []string{"path"}) mReqSize = kitprom.NewSummary(prometheus.SummaryOpts{ Namespace: "agostle", Subsystem: "http", Name: "request_size_bytes", Help: "The request size in bytes", }, []string{"path"}) mRespStatus = kitprom.NewHistogram(prometheus.HistogramOpts{ Namespace: "agostle", Subsystem: "http", Name: "response_status", Help: "The response status code", Buckets: httpBuckets, }, []string{"path"})
func main() { var ( addr = envString("PORT", defaultPort) rsurl = envString("ROUTINGSERVICE_URL", defaultRoutingServiceURL) httpAddr = flag.String("http.addr", ":"+addr, "HTTP listen address") routingServiceURL = flag.String("service.routing", rsurl, "routing service URL") ctx = context.Background() ) flag.Parse() var logger log.Logger logger = log.NewLogfmtLogger(os.Stderr) logger = &serializedLogger{Logger: logger} logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) var ( cargos = repository.NewCargo() locations = repository.NewLocation() voyages = repository.NewVoyage() handlingEvents = repository.NewHandlingEvent() ) // Configure some questionable dependencies. var ( handlingEventFactory = cargo.HandlingEventFactory{ CargoRepository: cargos, VoyageRepository: voyages, LocationRepository: locations, } handlingEventHandler = handling.NewEventHandler( inspection.NewService(cargos, handlingEvents, nil), ) ) // Facilitate testing by adding some cargos. storeTestData(cargos) fieldKeys := []string{"method"} var rs routing.Service rs = routing.NewProxyingMiddleware(*routingServiceURL, ctx)(rs) var bs booking.Service bs = booking.NewService(cargos, locations, handlingEvents, rs) bs = booking.NewLoggingService(log.NewContext(logger).With("component", "booking"), bs) bs = booking.NewInstrumentingService( kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "api", Subsystem: "booking_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys), metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "api", Subsystem: "booking_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)), bs) var ts tracking.Service ts = tracking.NewService(cargos, handlingEvents) ts = tracking.NewLoggingService(log.NewContext(logger).With("component", "tracking"), ts) ts = tracking.NewInstrumentingService( kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "api", Subsystem: "tracking_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys), metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "api", Subsystem: "tracking_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)), ts) var hs handling.Service hs = handling.NewService(handlingEvents, handlingEventFactory, handlingEventHandler) hs = handling.NewLoggingService(log.NewContext(logger).With("component", "handling"), hs) hs = handling.NewInstrumentingService( kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "api", Subsystem: "handling_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys), metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "api", Subsystem: "handling_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)), hs) httpLogger := log.NewContext(logger).With("component", "http") mux := http.NewServeMux() mux.Handle("/booking/v1/", booking.MakeHandler(ctx, bs, httpLogger)) mux.Handle("/tracking/v1/", tracking.MakeHandler(ctx, ts, httpLogger)) mux.Handle("/handling/v1/", handling.MakeHandler(ctx, hs, httpLogger)) http.Handle("/", accessControl(mux)) http.Handle("/metrics", stdprometheus.Handler()) errs := make(chan error, 2) go func() { logger.Log("transport", "http", "address", *httpAddr, "msg", "listening") errs <- http.ListenAndServe(*httpAddr, nil) }() go func() { c := make(chan os.Signal) signal.Notify(c, syscall.SIGINT) errs <- fmt.Errorf("%s", <-c) }() logger.Log("terminated", <-errs) }
func main() { flag.Parse() // package log var logger log.Logger { logger = log.NewLogfmtLogger(os.Stderr) logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC).With("caller", log.DefaultCaller) stdlog.SetFlags(0) // flags are handled by Go kit's logger stdlog.SetOutput(log.NewStdlibAdapter(logger)) // redirect anything using stdlib log to us } // package metrics var ( requestCount metrics.Counter requestLatency metrics.TimeHistogram ) { fieldKeys := []string{"method", "error"} requestCount = kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "gmuch", Subsystem: "api", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency = metrics.NewTimeHistogram(time.Nanosecond, metrics.NewMultiHistogram( expvar.NewHistogram("request_duration_ns", 0, 5e9, 1, 50, 95, 99), kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "gmuch", Subsystem: "api", Name: "duration_ns", Help: "Request duration in nanoseconds.", }, fieldKeys), )) } // Business domain var g server.GmuchService { g = gmuch.New(*dbPath, logger) g = server.LoggingMiddleware(logger)(g) g = server.InstrumentingMiddleware(requestCount, requestLatency)(g) } // Mechanical stuff rand.Seed(time.Now().UnixNano()) root := context.Background() errc := make(chan error) go func() { errc <- interrupt() }() // Debug/instrumentation go func() { transportLogger := log.NewContext(logger).With("transport", "debug") _ = transportLogger.Log("addr", *debugAddr) errc <- http.ListenAndServe(*debugAddr, nil) // DefaultServeMux }() // Transport: HTTP/JSON go func() { transportLogger := log.NewContext(logger).With("transport", "HTTP/JSON") mux := http.NewServeMux() mux.Handle("/query", httptransport.NewServer( root, shttp.EndpointenizeQuery(g), shttp.DecodeQueryRequest, shttp.EncodeQueryResponse, httptransport.ServerErrorLogger(transportLogger), )) mux.Handle("/thread", httptransport.NewServer( root, shttp.EndpointenizeThread(g), shttp.DecodeThreadRequest, shttp.EncodeThreadResponse, httptransport.ServerErrorLogger(transportLogger), )) _ = transportLogger.Log("addr", *httpAddr) errc <- http.ListenAndServe(*httpAddr, mux) }() // Transport: gRPC go func() { transportLogger := log.NewContext(logger).With("transport", "gRPC") ln, err := net.Listen("tcp", *grpcAddr) if err != nil { errc <- err return } s := grpc.NewServer() // uses its own, internal context sgrpc.RegisterGmuchServer(s, sgrpc.Binding{g}) _ = transportLogger.Log("addr", *grpcAddr) errc <- s.Serve(ln) }() _ = logger.Log("fatal", <-errc) }
}, []string{}) FetchLatency = metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "glia", Subsystem: "fetcher", Name: "fetch_latency_microseconds", Help: "Total duration of fetching in microseconds.", }, []string{})) SendLatency = metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "glia", Subsystem: "sender", Name: "send_latency_microseconds", Help: "Total duration of sending in microseconds.", }, []string{})) FetchErrorCount = kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "glia", Subsystem: "fetcher", Name: "error_count", Help: "Total error count of fetching opertation", }, []string{}) SendErrorCount = kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "glia", Subsystem: "sender", Name: "error_count", Help: "Total error count of sending opertation", }, []string{}) )
func main() { ctx := context.Background() logger := log.NewLogfmtLogger(os.Stderr) // Instrumentation fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)) countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "count_result", Help: "The result of each count method.", }, fieldKeys) var svc StringService svc = stringService{} svc = appLoggingMiddleware{logger, svc} svc = instrumentationMiddleware{requestCount, requestLatency, countResult, svc} // Transport Logging var uppercase endpoint.Endpoint uppercase = makeUppercaseEndpoint(svc) uppercase = loggingMiddleware(log.NewContext(logger).With("method", "uppercase"))(uppercase) var count endpoint.Endpoint count = makeCountEndpoint(svc) count = loggingMiddleware(log.NewContext(logger).With("method", "count"))(count) uppercaseHandler := httptransport.NewServer( ctx, // makeUppercaseEndpoint(svc), uppercase, decodeUppercaseRequest, encodeResponse, ) countHandler := httptransport.NewServer( ctx, count, // makeCountEndpoint(svc) decodeCountRequest, encodeResponse, ) http.Handle("/uppercase", uppercaseHandler) http.Handle("/count", countHandler) http.Handle("/metrics", stdprometheus.Handler()) _ = logger.Log("msg", "HTTP", "addr", ":8080") _ = logger.Log("err", http.ListenAndServe(":8080", nil)) }
func logService(ec *ab.EntityController, baseurl string) ab.Service { res := ab.EntityResource(ec, &Log{}, ab.EntityResourceConfig{ DisableList: true, DisableGet: true, DisablePost: true, DisablePut: true, DisableDelete: true, }) res.ExtraEndpoints = func(srv *ab.Server) error { walkthroughPlayed := prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "walkhub", Subsystem: "metrics", Name: "walkthrough_played", Help: "Number of walkthrough plays", }, []string{"uuid", "embedorigin"}) walkthroughVisited := prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "walkhub", Subsystem: "metrics", Name: "walkthrough_visited", Help: "Number of walkthrough visits", }, []string{"uuid", "embedorigin"}) srv.Post("/api/log/helpcenteropened", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { l := helpCenterOpenedLog{} ab.MustDecode(r, &l) db := ab.GetDB(r) userid := getLogUserID(r, ec) message := fmt.Sprintf("%s has opened the help center on %s", userid, l.URL) ab.MaybeFail(http.StatusInternalServerError, DBLog(db, ec, "helpcenteropened", message)) })) srv.Post("/api/log/walkthroughplayed", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { l := walkthroughPlayedLog{} ab.MustDecode(r, &l) db := ab.GetDB(r) userid := getLogUserID(r, ec) wt, err := LoadActualRevision(db, ec, l.UUID) ab.MaybeFail(http.StatusBadRequest, err) if wt == nil { ab.Fail(http.StatusNotFound, nil) } message := "" embedPart := "" if l.EmbedOrigin != "" { embedPart = "from the help center on " + l.EmbedOrigin + " " } wturl := baseurl + "walkthrough/" + wt.UUID if l.ErrorMessage == "" { message = fmt.Sprintf("%s has played the walkthrough %s<%s|%s>", userid, embedPart, wturl, wt.Name) } else { message = fmt.Sprintf("%s has failed to play the walkthrough %s<%s|%s> with the error message %s", userid, embedPart, wturl, wt.Name, l.ErrorMessage) } ab.MaybeFail(http.StatusInternalServerError, DBLog(db, ec, "walkthroughplayed", message)) walkthroughPlayed. With(metrics.Field{Key: "uuid", Value: l.UUID}). With(metrics.Field{Key: "embedorigin", Value: l.EmbedOrigin}). Add(1) })) srv.Post("/api/log/walkthroughpagevisited", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { l := walkthroughPageVisitedLog{} ab.MustDecode(r, &l) db := ab.GetDB(r) userid := getLogUserID(r, ec) wt, err := LoadActualRevision(db, ec, l.UUID) ab.MaybeFail(http.StatusBadRequest, err) if wt == nil { ab.Fail(http.StatusNotFound, nil) } embedPart := "" if l.EmbedOrigin != "" { embedPart = "embedded on " + l.EmbedOrigin + " " } wturl := baseurl + "walkthrough/" + wt.UUID message := fmt.Sprintf("%s has visited the walkthrough page %s<%s|%s>", userid, embedPart, wturl, wt.Name) ab.MaybeFail(http.StatusInternalServerError, DBLog(db, ec, "walkthroughvisited", message)) walkthroughVisited. With(metrics.Field{Key: "uuid", Value: l.UUID}). With(metrics.Field{Key: "embedorigin", Value: l.EmbedOrigin}). Add(1) })) return nil } return res }