// NewPublisherDetailed is the same as NewPublisher, but allows users to provide // an explicit lookup refresh ticker instead of a TTL, and specify the function // used to perform lookups instead of using net.LookupSRV. func NewPublisherDetailed( name string, refreshTicker *time.Ticker, lookupSRV func(service, proto, name string) (cname string, addrs []*net.SRV, err error), factory loadbalancer.Factory, logger log.Logger, ) *Publisher { p := &Publisher{ name: name, cache: loadbalancer.NewEndpointCache(factory, logger), logger: logger, quit: make(chan struct{}), } instances, err := p.resolve(lookupSRV) if err == nil { logger.Log("name", name, "instances", len(instances)) } else { logger.Log("name", name, "err", err) } p.cache.Replace(instances) go p.loop(refreshTicker, lookupSRV) return p }
func loggingMiddlware(l log.Logger) endpoint.Middleware { return func(next endpoint.Endpoint) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (result interface{}, err error) { var req, resp string defer func(b time.Time) { l.Log( "path", ctx.Value(pathKey), "request", req, "result", resp, "err", err, "request_id", ctx.Value(requestIDKey), "elapsed", time.Since(b), ) }(time.Now()) if r, ok := request.(fmt.Stringer); ok { req = r.String() } result, err = next(ctx, request) if r, ok := result.(fmt.Stringer); ok { resp = r.String() } return } } }
func main() { // `package log` domain var logger kitlog.Logger logger = kitlog.NewLogfmtLogger(os.Stderr) logger = kitlog.NewContext(logger).With("ts", kitlog.DefaultTimestampUTC) stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) // redirect stdlib logging to us stdlog.SetFlags(0) // flags are handled in our logger // read configuration from environment c, err := config.LoadConfiguration() if err != nil { logger.Log("fatal", err.Error()) return } // Mechanical stuff rand.Seed(time.Now().UnixNano()) root := context.Background() errc := make(chan error) go func() { errc <- interrupt() }() // Start bindings binding.StartApplicationSQSConsumer(logger, root, errc, c) binding.StartHealthCheckHTTPListener(logger, root, errc, c) logger.Log("fatal", <-errc) }
// NewPublisher returns a ZooKeeper publisher. ZooKeeper will start watching the // given path for changes and update the Publisher endpoints. func NewPublisher(c Client, path string, f loadbalancer.Factory, logger log.Logger) (*Publisher, error) { p := &Publisher{ client: c, path: path, cache: loadbalancer.NewEndpointCache(f, logger), logger: logger, quit: make(chan struct{}), } err := p.client.CreateParentNodes(p.path) if err != nil { return nil, err } // intial node retrieval and cache fill instances, eventc, err := p.client.GetEntries(p.path) if err != nil { logger.Log("path", p.path, "msg", "failed to retrieve entries", "err", err) return nil, err } logger.Log("path", p.path, "instances", len(instances)) p.cache.Replace(instances) // handle incoming path updates go p.loop(eventc) return p, nil }
// NewSubscriberDetailed is the same as NewSubscriber, but allows users to // provide an explicit lookup refresh ticker instead of a TTL, and specify the // lookup function instead of using net.LookupSRV. func NewSubscriberDetailed( name string, refresh *time.Ticker, lookup Lookup, factory sd.Factory, logger log.Logger, ) *Subscriber { p := &Subscriber{ name: name, cache: cache.New(factory, logger), logger: logger, quit: make(chan struct{}), } instances, err := p.resolve(lookup) if err == nil { logger.Log("name", name, "instances", len(instances)) } else { logger.Log("name", name, "err", err) } p.cache.Update(instances) go p.loop(refresh, lookup) return p }
func (a *Automator) checkAll(errorLogger log.Logger) { insts, err := a.cfg.InstanceDB.All() if err != nil { errorLogger.Log("err", err) return } for _, inst := range insts { for service, conf := range inst.Config.Services { if conf.Policy() != flux.PolicyAutomated { continue } _, err := a.cfg.Jobs.PutJob(inst.ID, jobs.Job{ // Key stops us getting two jobs for the same service Key: strings.Join([]string{ jobs.AutomatedServiceJob, string(inst.ID), string(service), }, "|"), Method: jobs.AutomatedServiceJob, Priority: jobs.PriorityBackground, Params: jobs.AutomatedServiceJobParams{ ServiceSpec: flux.ServiceSpec(service), }, }) if err != nil && err != jobs.ErrJobAlreadyQueued { errorLogger.Log("err", errors.Wrapf(err, "queueing automated service job")) } } } }
// NewPublisher returns a Consul publisher which returns Endpoints for the // requested service. It only returns instances for which all of the passed // tags are present. func NewPublisher( client Client, factory loadbalancer.Factory, logger log.Logger, service string, tags ...string, ) (*Publisher, error) { p := &Publisher{ cache: loadbalancer.NewEndpointCache(factory, logger), client: client, logger: logger, service: service, tags: tags, quitc: make(chan struct{}), } instances, index, err := p.getInstances(defaultIndex) if err == nil { logger.Log("service", service, "tags", strings.Join(tags, ", "), "instances", len(instances)) } else { logger.Log("service", service, "tags", strings.Join(tags, ", "), "err", err) } p.cache.Replace(instances) go p.loop(index) return p, nil }
// NewSubscriber returns a ZooKeeper subscriber. ZooKeeper will start watching // the given path for changes and update the Subscriber endpoints. func NewSubscriber(c Client, path string, factory sd.Factory, logger log.Logger) (*Subscriber, error) { s := &Subscriber{ client: c, path: path, cache: cache.New(factory, logger), logger: logger, quitc: make(chan struct{}), } err := s.client.CreateParentNodes(s.path) if err != nil { return nil, err } instances, eventc, err := s.client.GetEntries(s.path) if err != nil { logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) return nil, err } logger.Log("path", s.path, "instances", len(instances)) s.cache.Update(instances) go s.loop(eventc) return s, nil }
// NewCluster returns a usable cluster. Host should be of the form // "http://hostname:8080". func NewCluster(config *restclient.Config, kubectl string, logger log.Logger) (*Cluster, error) { client, err := k8sclient.New(config) if err != nil { return nil, err } extclient, err := k8sclient.NewExtensions(config) if err != nil { return nil, err } if kubectl == "" { kubectl, err = exec.LookPath("kubectl") if err != nil { return nil, err } } else { if _, err := os.Stat(kubectl); err != nil { return nil, err } } logger.Log("kubectl", kubectl) c := &Cluster{ config: config, client: extendedClient{client, extclient}, kubectl: kubectl, status: newStatusMap(), actionc: make(chan func()), logger: logger, } go c.loop() return c, nil }
func dial(d Dialer, network, address string, logger log.Logger) net.Conn { conn, err := d(network, address) if err != nil { logger.Log("err", err) conn = nil // just to be sure } return conn }
func main() { var ( listen = flag.String("listen", ":8080", "HTTP listen address") proxy = flag.String("proxy", "", "Optional comma-separated list of URLs to proxy uppercase requests") ) flag.Parse() var logger log.Logger logger = log.NewLogfmtLogger(os.Stderr) logger = log.NewContext(logger).With("listen", *listen).With("caller", log.DefaultCaller) ctx := context.Background() fieldKeys := []string{"method", "error"} requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_count", Help: "Number of requests received.", }, fieldKeys) requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "request_latency_microseconds", Help: "Total duration of requests in microseconds.", }, fieldKeys)) countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "my_group", Subsystem: "string_service", Name: "count_result", Help: "The result of each count method.", }, []string{}) var svc StringService svc = stringService{} svc = proxyingMiddleware(*proxy, ctx, logger)(svc) svc = loggingMiddleware(logger)(svc) svc = instrumentingMiddleware(requestCount, requestLatency, countResult)(svc) uppercaseHandler := httptransport.NewServer( ctx, makeUppercaseEndpoint(svc), decodeUppercaseRequest, encodeResponse, ) countHandler := httptransport.NewServer( ctx, makeCountEndpoint(svc), decodeCountRequest, encodeResponse, ) http.Handle("/uppercase", uppercaseHandler) http.Handle("/count", countHandler) http.Handle("/metrics", stdprometheus.Handler()) logger.Log("msg", "HTTP", "addr", *listen) logger.Log("err", http.ListenAndServe(*listen, nil)) }
func spam(logger log.Logger, count int) error { for i := 0; i < count; i++ { err := logger.Log("key", i) if err != nil { return err } } return nil }
// Transport Logging Middleware func loggingMiddleware(logger log.Logger) endpoint.Middleware { return func(next endpoint.Endpoint) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (interface{}, error) { logger.Log("msg", "calling endpoint") defer logger.Log("msg", "called endpoint") return next(ctx, request) } } }
func logging(logger log.Logger) func(Add) Add { return func(next Add) Add { return func(ctx context.Context, a, b int64) (v int64) { defer func(begin time.Time) { logger.Log("a", a, "b", b, "result", v, "took", time.Since(begin)) }(time.Now()) v = next(ctx, a, b) return } } }
// FromGRPCRequest returns a grpc RequestFunc that tries to join with an // OpenTracing trace found in `req` and starts a new Span called // `operationName` accordingly. If no trace could be found in `req`, the Span // will be a trace root. The Span is incorporated in the returned Context and // can be retrieved with opentracing.SpanFromContext(ctx). func FromGRPCRequest(tracer opentracing.Tracer, operationName string, logger log.Logger) func(ctx context.Context, md *metadata.MD) context.Context { return func(ctx context.Context, md *metadata.MD) context.Context { var span opentracing.Span wireContext, err := tracer.Extract(opentracing.TextMap, metadataReaderWriter{md}) if err != nil && err != opentracing.ErrSpanContextNotFound { logger.Log("err", err) } span = tracer.StartSpan(operationName, ext.RPCServerOption(wireContext)) return opentracing.ContextWithSpan(ctx, span) } }
// ToGRPCRequest returns a grpc RequestFunc that injects an OpenTracing Span // found in `ctx` into the grpc Metadata. If no such Span can be found, the // RequestFunc is a noop. func ToGRPCRequest(tracer opentracing.Tracer, logger log.Logger) func(ctx context.Context, md *metadata.MD) context.Context { return func(ctx context.Context, md *metadata.MD) context.Context { if span := opentracing.SpanFromContext(ctx); span != nil { // There's nothing we can do with an error here. if err := tracer.Inject(span.Context(), opentracing.TextMap, metadataReaderWriter{md}); err != nil { logger.Log("err", err) } } return ctx } }
// FromGRPCRequest returns a grpc RequestFunc that tries to join with an // OpenTracing trace found in `req` and starts a new Span called // `operationName` accordingly. If no trace could be found in `req`, the Span // will be a trace root. The Span is incorporated in the returned Context and // can be retrieved with opentracing.SpanFromContext(ctx). // // The logger is used to report errors and may be nil. func FromGRPCRequest(tracer opentracing.Tracer, operationName string, logger log.Logger) func(ctx context.Context, md *metadata.MD) context.Context { return func(ctx context.Context, md *metadata.MD) context.Context { span, err := tracer.Join(operationName, opentracing.TextMap, metadataReaderWriter{md}) if err != nil { span = tracer.StartSpan(operationName) if err != opentracing.ErrTraceNotFound { logger.Log("err", err) } } return opentracing.ContextWithSpan(ctx, span) } }
func makeEndpoints(addrs []*net.SRV, f loadbalancer.Factory, logger log.Logger) []endpoint.Endpoint { endpoints := make([]endpoint.Endpoint, 0, len(addrs)) for _, addr := range addrs { endpoint, err := f(addr2instance(addr)) if err != nil { logger.Log("instance", addr2instance(addr), "err", err) continue } endpoints = append(endpoints, endpoint) } return endpoints }
// EndpointLoggingMiddleware returns an endpoint middleware that logs the // duration of each invocation, and the resulting error, if any. func EndpointLoggingMiddleware(logger log.Logger) endpoint.Middleware { return func(next endpoint.Endpoint) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (response interface{}, err error) { defer func(begin time.Time) { logger.Log("error", err, "took", time.Since(begin)) }(time.Now()) return next(ctx, request) } } }
// NewClient returns a ZooKeeper client with a connection to the server cluster. // It will return an error if the server cluster cannot be resolved. func NewClient(servers []string, logger log.Logger, options ...Option) (Client, error) { defaultEventHandler := func(event zk.Event) { logger.Log("eventtype", event.Type.String(), "server", event.Server, "state", event.State.String(), "err", event.Err) } config := clientConfig{ acl: DefaultACL, connectTimeout: DefaultConnectTimeout, sessionTimeout: DefaultSessionTimeout, eventHandler: defaultEventHandler, logger: logger, } for _, option := range options { if err := option(&config); err != nil { return nil, err } } // dialer overrides the default ZooKeeper library Dialer so we can configure // the connectTimeout. The current library has a hardcoded value of 1 second // and there are reports of race conditions, due to slow DNS resolvers and // other network latency issues. dialer := func(network, address string, _ time.Duration) (net.Conn, error) { return net.DialTimeout(network, address, config.connectTimeout) } conn, eventc, err := zk.Connect(servers, config.sessionTimeout, withLogger(logger), zk.WithDialer(dialer)) if err != nil { return nil, err } if len(config.credentials) > 0 { err = conn.AddAuth("digest", config.credentials) if err != nil { return nil, err } } c := &client{conn, config, true, make(chan struct{})} // Start listening for incoming Event payloads and callback the set // eventHandler. go func() { for { select { case event := <-eventc: config.eventHandler(event) case <-c.quit: return } } }() return c, nil }
// NewPublisher returns a static endpoint Publisher. func NewPublisher(instances []string, factory loadbalancer.Factory, logger log.Logger) Publisher { logger = log.NewContext(logger).With("component", "Fixed Publisher") endpoints := []endpoint.Endpoint{} for _, instance := range instances { e, err := factory(instance) if err != nil { _ = logger.Log("instance", instance, "err", err) continue } endpoints = append(endpoints, e) } return Publisher{fixed.NewPublisher(endpoints)} }
func fromGRPC(newSpan NewSpanFunc, md metadata.MD, logger log.Logger) *Span { traceIDSlc := md[traceIDGRPCKey] pos := len(traceIDSlc) - 1 if pos < 0 { return nil } traceID, err := strconv.ParseInt(traceIDSlc[pos], 16, 64) if err != nil { logger.Log("msg", "invalid trace id found, ignoring trace", "err", err) return nil } spanIDSlc := md[spanIDGRPCKey] pos = len(spanIDSlc) - 1 if pos < 0 { spanIDSlc = make([]string, 1) pos = 0 } if spanIDSlc[pos] == "" { logger.Log("msg", "trace ID without span ID") // abnormal spanIDSlc[pos] = strconv.FormatInt(newID(), 64) // deal with it } spanID, err := strconv.ParseInt(spanIDSlc[pos], 16, 64) if err != nil { logger.Log(spanIDHTTPHeader, spanIDSlc, "err", err) // abnormal spanID = newID() // deal with it } parentSpanIDSlc := md[parentSpanIDGRPCKey] pos = len(parentSpanIDSlc) - 1 if pos < 0 { parentSpanIDSlc = make([]string, 1) pos = 0 } if parentSpanIDSlc[pos] == "" { parentSpanIDSlc[pos] = "0" // normal } parentSpanID, err := strconv.ParseInt(parentSpanIDSlc[pos], 16, 64) if err != nil { logger.Log(parentSpanIDHTTPHeader, parentSpanIDSlc, "err", err) // abnormal parentSpanID = 0 // the only way to deal with it } span := newSpan(traceID, spanID, parentSpanID) var sampledHdr string sampledSlc := md[sampledGRPCKey] pos = len(sampledSlc) - 1 if pos >= 0 { sampledHdr = sampledSlc[pos] } switch sampledHdr { case "0": span.runSampler = false span.sampled = false case "1": span.runSampler = false span.sampled = true default: // we don't know if the upstream trace was sampled. use our sampler span.runSampler = true } return span }
func fromHTTP(newSpan NewSpanFunc, r *http.Request, logger log.Logger) *Span { traceIDStr := r.Header.Get(traceIDHTTPHeader) if traceIDStr == "" { return newSpan(newID(), newID(), 0) // normal; just make a new one } traceID, err := strconv.ParseInt(traceIDStr, 16, 64) if err != nil { logger.Log(traceIDHTTPHeader, traceIDStr, "err", err) return newSpan(newID(), newID(), 0) } spanIDStr := r.Header.Get(spanIDHTTPHeader) if spanIDStr == "" { logger.Log("msg", "trace ID without span ID") // abnormal spanIDStr = strconv.FormatInt(newID(), 64) // deal with it } spanID, err := strconv.ParseInt(spanIDStr, 16, 64) if err != nil { logger.Log(spanIDHTTPHeader, spanIDStr, "err", err) // abnormal spanID = newID() // deal with it } parentSpanIDStr := r.Header.Get(parentSpanIDHTTPHeader) if parentSpanIDStr == "" { parentSpanIDStr = "0" // normal } parentSpanID, err := strconv.ParseInt(parentSpanIDStr, 16, 64) if err != nil { logger.Log(parentSpanIDHTTPHeader, parentSpanIDStr, "err", err) // abnormal parentSpanID = 0 // the only way to deal with it } return newSpan(traceID, spanID, parentSpanID) }
// Test that Context.Log has a consistent function stack depth when binding // log.Valuers, regardless of how many times Context.With has been called or // whether Context.Log is called via an interface typed variable or a concrete // typed variable. func TestContextStackDepth(t *testing.T) { t.Parallel() fn := fmt.Sprintf("%n", stack.Caller(0)) var output []interface{} logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { output = keyvals return nil })) stackValuer := log.Valuer(func() interface{} { for i, c := range stack.Trace() { if fmt.Sprintf("%n", c) == fn { return i } } t.Fatal("Test function not found in stack trace.") return nil }) concrete := log.NewContext(logger).With("stack", stackValuer) var iface log.Logger = concrete // Call through interface to get baseline. iface.Log("k", "v") want := output[1].(int) for len(output) < 10 { concrete.Log("k", "v") if have := output[1]; have != want { t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) } iface.Log("k", "v") if have := output[1]; have != want { t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) } wrapped := log.NewContext(concrete) wrapped.Log("k", "v") if have := output[1]; have != want { t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) } concrete = concrete.With("k", "v") iface = concrete } }
// FromHTTPRequest returns an http RequestFunc that tries to join with an // OpenTracing trace found in `req` and starts a new Span called // `operationName` accordingly. If no trace could be found in `req`, the Span // will be a trace root. The Span is incorporated in the returned Context and // can be retrieved with opentracing.SpanFromContext(ctx). func FromHTTPRequest(tracer opentracing.Tracer, operationName string, logger log.Logger) kithttp.RequestFunc { return func(ctx context.Context, req *http.Request) context.Context { // Try to join to a trace propagated in `req`. var span opentracing.Span wireContext, err := tracer.Extract( opentracing.TextMap, opentracing.HTTPHeadersCarrier(req.Header), ) if err != nil && err != opentracing.ErrSpanContextNotFound { logger.Log("err", err) } span = tracer.StartSpan(operationName, ext.RPCServerOption(wireContext)) return opentracing.ContextWithSpan(ctx, span) } }
func heartbeat(id JobID, h heartbeater, d time.Duration, cancel <-chan struct{}, done chan<- struct{}, logger log.Logger) { t := time.NewTicker(d) defer t.Stop() defer close(done) for { select { case <-t.C: if err := h.Heartbeat(id); err != nil { logger.Log("heartbeat", err) } case <-cancel: return } } }
// proxyAdd returns an implementation of Add that invokes a remote Add // service. func proxyAdd(e endpoint.Endpoint, logger log.Logger) Add { return func(ctx context.Context, a, b int64) int64 { resp, err := e(ctx, reqrep.AddRequest{A: a, B: b}) if err != nil { logger.Log("err", err) return 0 } addResp, ok := resp.(reqrep.AddResponse) if !ok { logger.Log("err", endpoint.ErrBadCast) return 0 } return addResp.V } }
func listen(l log.Logger) { listen := os.Getenv("LISTEN") if listen == "" { listen = ":8080" } http.Handle("/metrics", prometheus.Handler()) http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { http.Error(w, "OK", http.StatusOK) }) http.HandleFunc("/", http.NotFound) l.Log("level", "info", "msg", "Listening", "listen", listen) if err := http.ListenAndServe(listen, nil); err != nil { l.Log("level", "error", "msg", "Failed to listen", "listen", listen, "err", err) } }
func (c *Cluster) doReleaseCommand(logger log.Logger, newDefinition *apiObject, args ...string) error { cmd := c.kubectlCommand(args...) cmd.Stdin = bytes.NewReader(newDefinition.bytes) stderr := &bytes.Buffer{} cmd.Stderr = stderr logger.Log("cmd", strings.Join(args, " ")) begin := time.Now() err := cmd.Run() result := "success" if err != nil { result = stderr.String() err = errors.Wrap(errors.New(result), "running kubectl") } logger.Log("result", result, "took", time.Since(begin).String()) return err }
// ToHTTPRequest returns an http RequestFunc that injects an OpenTracing Span // found in `ctx` into the http headers. If no such Span can be found, the // RequestFunc is a noop. func ToHTTPRequest(tracer opentracing.Tracer, logger log.Logger) kithttp.RequestFunc { return func(ctx context.Context, req *http.Request) context.Context { // Try to find a Span in the Context. if span := opentracing.SpanFromContext(ctx); span != nil { // Add standard OpenTracing tags. ext.HTTPMethod.Set(span, req.URL.RequestURI()) host, portString, err := net.SplitHostPort(req.URL.Host) if err == nil { ext.PeerHostname.Set(span, host) if port, err := strconv.Atoi(portString); err != nil { ext.PeerPort.Set(span, uint16(port)) } } else { ext.PeerHostname.Set(span, req.URL.Host) } // There's nothing we can do with any errors here. if err = tracer.Inject( span.Context(), opentracing.TextMap, opentracing.HTTPHeadersCarrier(req.Header), ); err != nil { logger.Log("err", err) } } return ctx } }