func TestWith(t *testing.T) { buf := &bytes.Buffer{} kvs := []interface{}{"a", 123} logger := log.NewJSONLogger(buf) logger = log.With(logger, kvs...) kvs[1] = 0 // With should copy its key values logger = log.With(logger, "b", "c") // With should stack if err := logger.Log("msg", "message"); err != nil { t.Fatal(err) } if want, have := `{"a":123,"b":"c","msg":"message"}`+"\n", buf.String(); want != have { t.Errorf("\nwant: %s\nhave: %s", want, have) } }
func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { logger = log.With(logger, "common_key", "common_value") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { f(logger) } }
func BenchmarkValueBindingCaller(b *testing.B) { logger := discard logger = log.With(logger, "caller", log.DefaultCaller) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Log("k", "v") } }
func BenchmarkOneWith(b *testing.B) { logger := discard logger = log.With(logger, "k", "v") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Log("k", "v") } }
func TestValueBinding(t *testing.T) { var output []interface{} logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { output = keyvals return nil })) start := time.Date(2015, time.April, 25, 0, 0, 0, 0, time.UTC) now := start mocktime := func() time.Time { now = now.Add(time.Second) return now } logger = log.With(logger, "ts", log.Timestamp(mocktime), "caller", log.DefaultCaller) logger.Log("foo", "bar") timestamp, ok := output[1].(time.Time) if !ok { t.Fatalf("want time.Time, have %T", output[1]) } if want, have := start.Add(time.Second), timestamp; want != have { t.Errorf("output[1]: want %v, have %v", want, have) } if want, have := "value_test.go:28", fmt.Sprint(output[3]); want != have { t.Errorf("output[3]: want %s, have %s", want, have) } // A second attempt to confirm the bindings are truly dynamic. logger.Log("foo", "bar") timestamp, ok = output[1].(time.Time) if !ok { t.Fatalf("want time.Time, have %T", output[1]) } if want, have := start.Add(2*time.Second), timestamp; want != have { t.Errorf("output[1]: want %v, have %v", want, have) } if want, have := "value_test.go:41", fmt.Sprint(output[3]); want != have { t.Errorf("output[3]: want %s, have %s", want, have) } }
// Test that With returns a Logger safe for concurrent use. This test // validates that the stored logging context does not get corrupted when // multiple clients concurrently log additional keyvals. // // This test must be run with go test -cpu 2 (or more) to achieve its goal. func TestWithConcurrent(t *testing.T) { // Create some buckets to count how many events each goroutine logs. const goroutines = 8 counts := [goroutines]int{} // This logger extracts a goroutine id from the last value field and // increments the referenced bucket. logger := log.LoggerFunc(func(kv ...interface{}) error { goroutine := kv[len(kv)-1].(int) counts[goroutine]++ return nil }) // With must be careful about handling slices that can grow without // copying the underlying array, so give it a challenge. l := log.With(logger, make([]interface{}, 0, 2)...) // Start logging concurrently. Each goroutine logs its id so the logger // can bucket the event counts. var wg sync.WaitGroup wg.Add(goroutines) const n = 10000 for i := 0; i < goroutines; i++ { go func(idx int) { defer wg.Done() for j := 0; j < n; j++ { l.Log("goroutineIdx", idx) } }(i) } wg.Wait() for bucket, have := range counts { if want := n; want != have { t.Errorf("bucket %d: want %d, have %d", bucket, want, have) // note Errorf } } }
func main() { // Flag domain. Note that gRPC transitively registers flags via its import // of glog. So, we define a new flag set, to keep those domains distinct. fs := flag.NewFlagSet("", flag.ExitOnError) var ( debugAddr = fs.String("debug.addr", ":8000", "Address for HTTP debug/instrumentation server") httpAddr = fs.String("http.addr", ":8001", "Address for HTTP (JSON) server") grpcAddr = fs.String("grpc.addr", ":8002", "Address for gRPC server") netrpcAddr = fs.String("netrpc.addr", ":8003", "Address for net/rpc server") thriftAddr = fs.String("thrift.addr", ":8004", "Address for Thrift server") thriftProtocol = fs.String("thrift.protocol", "binary", "binary, compact, json, simplejson") thriftBufferSize = fs.Int("thrift.buffer.size", 0, "0 for unbuffered") thriftFramed = fs.Bool("thrift.framed", false, "true to enable framing") proxyHTTPAddr = fs.String("proxy.http.url", "", "if set, proxy requests over HTTP to this addsvc") zipkinServiceName = fs.String("zipkin.service.name", "addsvc", "Zipkin service name") zipkinCollectorAddr = fs.String("zipkin.collector.addr", "", "Zipkin Scribe collector address (empty will log spans)") zipkinCollectorTimeout = fs.Duration("zipkin.collector.timeout", time.Second, "Zipkin collector timeout") zipkinCollectorBatchSize = fs.Int("zipkin.collector.batch.size", 100, "Zipkin collector batch size") zipkinCollectorBatchInterval = fs.Duration("zipkin.collector.batch.interval", time.Second, "Zipkin collector batch interval") ) flag.Usage = fs.Usage // only show our flags fs.Parse(os.Args[1:]) // `package log` domain var logger kitlog.Logger logger = kitlog.NewLogfmtLogger(os.Stderr) logger = kitlog.With(logger, "ts", kitlog.DefaultTimestampUTC) stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) // redirect stdlib logging to us stdlog.SetFlags(0) // flags are handled in our logger // `package metrics` domain requests := metrics.NewMultiCounter( expvar.NewCounter("requests"), statsd.NewCounter(ioutil.Discard, "requests_total", time.Second), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "addsvc", Subsystem: "add", Name: "requests_total", Help: "Total number of received requests.", }, []string{}), ) duration := metrics.NewTimeHistogram(time.Nanosecond, metrics.NewMultiHistogram( expvar.NewHistogram("duration_nanoseconds_total", 0, 1e9, 3, 50, 95, 99), statsd.NewHistogram(ioutil.Discard, "duration_nanoseconds_total", time.Second), prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "addsvc", Subsystem: "add", Name: "duration_nanoseconds_total", Help: "Total nanoseconds spend serving requests.", }, []string{}), )) // `package tracing` domain zipkinHostPort := "localhost:1234" // TODO Zipkin makes overly simple assumptions about services var zipkinCollector zipkin.Collector = loggingCollector{logger} if *zipkinCollectorAddr != "" { var err error if zipkinCollector, err = zipkin.NewScribeCollector( *zipkinCollectorAddr, *zipkinCollectorTimeout, *zipkinCollectorBatchSize, *zipkinCollectorBatchInterval, ); err != nil { logger.Log("err", err) os.Exit(1) } } zipkinMethodName := "add" zipkinSpanFunc := zipkin.MakeNewSpanFunc(zipkinHostPort, *zipkinServiceName, zipkinMethodName) zipkin.Log.Swap(logger) // log diagnostic/error details // Our business and operational domain var a Add = pureAdd if *proxyHTTPAddr != "" { var e endpoint.Endpoint e = httpclient.NewClient("GET", *proxyHTTPAddr, zipkin.ToRequest(zipkinSpanFunc)) e = zipkin.AnnotateClient(zipkinSpanFunc, zipkinCollector)(e) a = proxyAdd(e, logger) } a = logging(logger)(a) a = instrument(requests, duration)(a) // Server domain var e endpoint.Endpoint e = makeEndpoint(a) e = zipkin.AnnotateServer(zipkinSpanFunc, zipkinCollector)(e) // Mechanical stuff rand.Seed(time.Now().UnixNano()) root := context.Background() errc := make(chan error) go func() { errc <- interrupt() }() // Transport: HTTP (debug/instrumentation) go func() { logger.Log("addr", *debugAddr, "transport", "debug") errc <- http.ListenAndServe(*debugAddr, nil) }() // Transport: HTTP (JSON) go func() { ctx, cancel := context.WithCancel(root) defer cancel() before := []httptransport.BeforeFunc{zipkin.ToContext(zipkinSpanFunc)} after := []httptransport.AfterFunc{} handler := makeHTTPBinding(ctx, e, before, after) logger.Log("addr", *httpAddr, "transport", "HTTP/JSON") errc <- http.ListenAndServe(*httpAddr, handler) }() // Transport: gRPC go func() { ln, err := net.Listen("tcp", *grpcAddr) if err != nil { errc <- err return } s := grpc.NewServer() // uses its own context? pb.RegisterAddServer(s, grpcBinding{e}) logger.Log("addr", *grpcAddr, "transport", "gRPC") errc <- s.Serve(ln) }() // Transport: net/rpc go func() { ctx, cancel := context.WithCancel(root) defer cancel() s := rpc.NewServer() s.RegisterName("addsvc", NetrpcBinding{ctx, e}) s.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath) logger.Log("addr", *netrpcAddr, "transport", "net/rpc") errc <- http.ListenAndServe(*netrpcAddr, s) }() // Transport: Thrift go func() { ctx, cancel := context.WithCancel(root) defer cancel() var protocolFactory thrift.TProtocolFactory switch *thriftProtocol { case "binary": protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() case "compact": protocolFactory = thrift.NewTCompactProtocolFactory() case "json": protocolFactory = thrift.NewTJSONProtocolFactory() case "simplejson": protocolFactory = thrift.NewTSimpleJSONProtocolFactory() default: errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) return } var transportFactory thrift.TTransportFactory if *thriftBufferSize > 0 { transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) } else { transportFactory = thrift.NewTTransportFactory() } if *thriftFramed { transportFactory = thrift.NewTFramedTransportFactory(transportFactory) } transport, err := thrift.NewTServerSocket(*thriftAddr) if err != nil { errc <- err return } logger.Log("addr", *thriftAddr, "transport", "Thrift") errc <- thrift.NewTSimpleServer4( thriftadd.NewAddServiceProcessor(thriftBinding{ctx, e}), transport, transportFactory, protocolFactory, ).Serve() }() logger.Log("fatal", <-errc) }
package log_test import ( "testing" "github.com/go-kit/kit/log" ) func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { logger = log.With(logger, "common_key", "common_value") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { f(logger) } } var ( baseMessage = func(logger log.Logger) { logger.Log("foo_key", "foo_value") } withMessage = func(logger log.Logger) { log.With(logger, "a", "b").Log("c", "d") } )