// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, cp: cc.dopts.cp, dc: cc.dopts.dc, tracing: EnableTracing, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() cs.cbuf = new(bytes.Buffer) } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{r: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }
func (bp *Backplane) handleStats(w http.ResponseWriter, req *http.Request) { tr := trace.New("backend.internalstats", req.RequestURI) defer tr.Finish() hostname, err := os.Hostname() if err != nil { glog.Error("Unable to obtain hostname: ", err) tr.LazyPrintf("Unable to obtain hostname: ", err) } var data = struct { Backends []*Backend Frontends []*Frontend Pid int Hostname string Uptime time.Duration LimitAs, LimitFsize, LimitNofile syscall.Rlimit }{ Backends: bp.Backends, Frontends: bp.Frontends, Pid: os.Getpid(), Hostname: hostname, Uptime: time.Since(starttime), } syscall.Getrlimit(syscall.RLIMIT_AS, &data.LimitAs) syscall.Getrlimit(syscall.RLIMIT_FSIZE, &data.LimitFsize) syscall.Getrlimit(syscall.RLIMIT_NOFILE, &data.LimitNofile) err = StatsTemplate().Execute(w, &data) if err != nil { glog.Errorf("unable to execute template: %s", err) tr.LazyPrintf("unable to execute template: %s", err) } }
// NewSession creates and initializes new Session object. // ctx can be nil (in which case the Executor's context will be used). // remote can be nil. func NewSession(ctx context.Context, args SessionArgs, e *Executor, remote net.Addr) *Session { s := &Session{ Database: args.Database, User: args.User, Location: time.UTC, } cfg, cache := e.getSystemConfig() s.planner = planner{ leaseMgr: e.ctx.LeaseManager, systemConfig: cfg, databaseCache: cache, session: s, execCtx: &e.ctx, } s.PreparedStatements = makePreparedStatements(s) s.PreparedPortals = makePreparedPortals(s) remoteStr := "" if remote != nil { remoteStr = remote.String() } s.Trace = trace.New("sql."+args.User, remoteStr) s.Trace.SetMaxEvents(100) s.context, s.cancel = context.WithCancel(ctx) return s }
// netTraceIntegrator is passed into basictracer as NewSpanEventListener // and causes all traces to be registered with the net/trace endpoint. func netTraceIntegrator() func(basictracer.SpanEvent) { var tr trace.Trace return func(e basictracer.SpanEvent) { switch t := e.(type) { case basictracer.EventCreate: tr = trace.New("tracing", t.OperationName) tr.SetMaxEvents(maxLogsPerSpan) case basictracer.EventFinish: tr.Finish() case basictracer.EventTag: tr.LazyPrintf("%s:%v", t.Key, t.Value) case basictracer.EventLogFields: // TODO(radu): when LightStep supports arbitrary fields, we should make // the formatting of the message consistent with that. Until then we treat // legacy events that just have an "event" key specially. if len(t.Fields) == 1 && t.Fields[0].Key() == "event" { tr.LazyPrintf("%s", t.Fields[0].Value()) } else { var buf bytes.Buffer for i, f := range t.Fields { if i > 0 { buf.WriteByte(' ') } fmt.Fprintf(&buf, "%s:%v", f.Key(), f.Value()) } tr.LazyPrintf("%s", buf.String()) } case basictracer.EventLog: panic("EventLog is deprecated") } } }
func (s *CountersCollectingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { tr := trace.New(s.TraceFamily, r.RequestURI) tr.LazyPrintf("Request: %#v", r) defer tr.Finish() if s.RateLimiter != nil { if !s.RateLimiter.Accepted() { tr.LazyPrintf("Rate limited") tr.SetError() return nil, RateLimited } } s.stats.in() resp, err := s.RoundTripper.RoundTrip(r) s.stats.out() if resp != nil && err == nil { respBucket := resp.StatusCode / 100 if respBucket == 0 { respBucket = 2 } if respBucket > 0 && respBucket <= 5 { atomic.AddInt64(&s.stats.CountersByResponseCode[respBucket], 1) } } if err != nil { tr.LazyPrintf("Error in roundtripper: %s", err) tr.SetError() } tr.LazyPrintf("Response: %v", resp) return resp, err }
func (ntwe *netTraceWrapExtractor) JoinTrace(opName string, carrier interface{}) (opentracing.Span, error) { sp, err := ntwe.wrap.JoinTrace(opName, carrier) if err != nil { return nil, err } return &netTraceWrapSpan{Span: sp, tr: trace.New(family, opName)}, nil }
// Common handler for both javascript responses. func (s *Server) HTTPResponse(w http.ResponseWriter, r *http.Request) { tr := trace.New("http", "response") defer tr.Finish() if !s.ratelimiter.Allowed() { tr.LazyPrintf("rate limited") tr.SetError() http.Error(w, "too many requests", 429) return } key := keyFromRequest(r) tr.LazyPrintf("key: %s", key) op, ok := s.getOp(key) if !ok { tr.LazyPrintf("404 error") tr.SetError() http.NotFound(w, r) return } buf := make([]byte, 4*1024) n, err := r.Body.Read(buf) if err != nil && err != io.EOF { tr.LazyPrintf("400 error reading body: %v", err) tr.SetError() http.Error(w, "error reading body", http.StatusBadRequest) return } op.reply <- buf[:n] w.Write([]byte("success")) }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) } t, err := cc.wait(ctx) if err != nil { return nil, toRPCErr(err) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} return cs, nil }
// netTraceIntegrator is passed into basictracer as NewSpanEventListener // and causes all traces to be registered with the net/trace endpoint. func netTraceIntegrator() func(basictracer.SpanEvent) { var tr trace.Trace return func(e basictracer.SpanEvent) { switch t := e.(type) { case basictracer.EventCreate: tr = trace.New("tracing", t.OperationName) tr.SetMaxEvents(maxLogsPerSpan) case basictracer.EventFinish: tr.Finish() case basictracer.EventTag: tr.LazyPrintf("%s:%v", t.Key, t.Value) case basictracer.EventLogFields: var buf bytes.Buffer for i, f := range t.Fields { if i > 0 { buf.WriteByte(' ') } fmt.Fprintf(&buf, "%s:%v", f.Key(), f.Value()) } tr.LazyPrintf("%s", buf.String()) case basictracer.EventLog: if t.Payload != nil { tr.LazyPrintf("%s (payload %v)", t.Event, t.Payload) } else { tr.LazyPrintf("%s", t.Event) } } } }
func (c *cachingResolver) Maintain() { go c.back.Maintain() for range time.Tick(maintenancePeriod) { tr := trace.New("dnstox.Cache", "GC") var total, expired int c.mu.Lock() total = len(c.answer) for q, ans := range c.answer { newTTL := getTTL(ans) - maintenancePeriod if newTTL > 0 { // Don't modify in place, create a copy and override. // That way, we avoid races with users that have gotten a // cached answer and are returning it. newans := copyRRSlice(ans) setTTL(newans, newTTL) c.answer[q] = newans continue } delete(c.answer, q) expired++ } c.mu.Unlock() tr.LazyPrintf("total: %d expired: %d", total, expired) tr.Finish() } }
func (c *v3Conn) parseOptions(data []byte) error { defer func() { c.session.Trace = trace.New("sql."+c.opts.user, c.conn.RemoteAddr().String()) c.session.Trace.SetMaxEvents(100) }() buf := readBuffer{msg: data} for { key, err := buf.getString() if err != nil { return util.Errorf("error reading option key: %s", err) } if len(key) == 0 { return nil } value, err := buf.getString() if err != nil { return util.Errorf("error reading option value: %s", err) } switch key { case "database": c.session.Database = value case "user": c.opts.user = value default: if log.V(1) { log.Warningf("unrecognized configuration parameter %q", key) } } } }
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) (err error) { ss := &serverStream{ t: t, s: stream, p: &parser{s: stream}, codec: s.opts.codec, tracing: EnableTracing, } if ss.tracing { ss.traceInfo.tr = trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) ss.traceInfo.firstLine.client = false ss.traceInfo.tr.LazyLog(&ss.traceInfo.firstLine, false) defer func() { ss.mu.Lock() if err != nil && err != io.EOF { ss.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.traceInfo.tr.SetError() } ss.traceInfo.tr.Finish() ss.traceInfo.tr = nil ss.mu.Unlock() }() } if appErr := sd.Handler(srv.server, ss); appErr != nil { if err, ok := appErr.(rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc } else { ss.statusCode = convertCode(appErr) ss.statusDesc = appErr.Error() } } return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) }
// StartOperation creates a new LogEntry with the current time. // Should be immediately followed by a deferred call to FinishOperation. func StartOperation(name string, path string) *LogEntry { return &LogEntry{ name: name, path: path, startTime: time.Now(), trace: trace.New(name, path), } }
// Open implements http.FileSystem. func (t *traceFS) Open(path string) (http.File, error) { tr := trace.New(t.family, path) ctx := trace.NewContext(context.Background(), tr) f, err := t.FileSystem.Open(ctx, path) // TODO: Decide where this should be in general (requests can be on-going). tr.Finish() return f, err }
func routeTracing(route Route, handler xhandler.HandlerC) xhandler.HandlerC { rs := route.String() return xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { tr := trace.New(rs, fmt.Sprintf("%s %s", r.Method, r.URL.Path)) ctx = trace.NewContext(ctx, tr) handler.ServeHTTPC(ctx, w, r) tr.Finish() }) }
func StartFileOperation(name, path string, args string) *LogEntry { name = "File " + name return &LogEntry{ name: name, path: path, args: args, startTime: time.Now(), trace: trace.New(name, args), } }
func (t *Tracer) newTrace(family string, id, name string) *Trace { nt := ntrace.New(family, name) nt.SetMaxEvents(100) return &Trace{ ID: id, Name: name, family: family, tracer: t, nTrace: nt, } }
func (t *TraceFS) newTrace(ctx context.Context, name, ft string, args ...interface{}) (trace.Trace, context.Context) { argsFmt := fmt.Sprintf(ft, args...) v := fmt.Sprintf("%s-%s %s", t.Id, t.MountConfig.FSName, argsFmt) if ctx == nil { ctx = context.TODO() } r := trace.New(name, v) ctx = trace.NewContext(ctx, r) return r, ctx }
func WrapRequest(req *http.Request, traceFamily string) *Request { host, _, err := net.SplitHostPort(req.RemoteAddr) if err != nil { host = req.RemoteAddr } prefix := fmt.Sprintf("%s - %s - ", host, traceFamily) return &Request{ Request: req, tr: trace.New(traceFamily, req.URL.Path), log: log.New(os.Stdout, prefix, log.LstdFlags), } }
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. // If tracing is not enabled, it returns nil. func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { if !EnableTracing { return nil } trInfo = &traceInfo{ tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), } trInfo.firstLine.client = false trInfo.firstLine.remoteAddr = st.RemoteAddr() stream.TraceContext(trInfo.tr) if dl, ok := stream.Context().Deadline(); ok { trInfo.firstLine.deadline = dl.Sub(time.Now()) } return trInfo }
// StaticHandler returns an HTTP handler for the given path and content. func (s *Server) StaticHandler(path, content string) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { tr := trace.New("http", path) defer tr.Finish() if !s.ratelimiter.Allowed() { tr.LazyPrintf("rate limited") tr.SetError() http.Error(w, "too many requests", 429) return } w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(path))) w.Write([]byte(content)) } }
func (s *Server) Query(ctx context.Context, in *pb.RawMsg) (*pb.RawMsg, error) { tr := trace.New("grpctodns", "Query") defer tr.Finish() r := &dns.Msg{} err := r.Unpack(in.Data) if err != nil { return nil, err } if glog.V(3) { tr.LazyPrintf(util.QuestionsToString(r.Question)) } // TODO: we should create our own IDs, in case different users pick the // same id and we pass that upstream. from_up, err := dns.Exchange(r, s.Upstream) if err != nil { msg := fmt.Sprintf("dns exchange error: %v", err) glog.Info(msg) tr.LazyPrintf(msg) tr.SetError() return nil, err } if from_up == nil { err = fmt.Errorf("no response from upstream") tr.LazyPrintf(err.Error()) tr.SetError() return nil, err } if glog.V(3) { util.TraceAnswer(tr, from_up) } buf, err := from_up.Pack() if err != nil { glog.Infof(" error packing: %v", err) tr.LazyPrintf("error packing: %v", err) tr.SetError() return nil, err } return &pb.RawMsg{Data: buf}, nil }
// Serve the key-specific index. func (s *Server) IndexHandler(w http.ResponseWriter, r *http.Request) { tr := trace.New("http", "index") defer tr.Finish() if !s.ratelimiter.Allowed() { tr.LazyPrintf("rate limited") tr.SetError() http.Error(w, "too many requests", 429) return } key := keyFromRequest(r) tr.LazyPrintf("key: %s", key) op, ok := s.getOp(key) if !ok { tr.LazyPrintf("404 error") tr.SetError() http.NotFound(w, r) return } var err error data := struct { Message string Request string }{ op.msg, string(op.prepared), } switch op.rtype { case pb.Prepare_REGISTER: err = registerTmpl.Execute(w, data) case pb.Prepare_AUTHENTICATE: err = authenticateTmpl.Execute(w, data) default: err = fmt.Errorf("unknown operation type %v", op.rtype) } if err != nil { tr.LazyPrintf("render error: %v", err) tr.SetError() http.Error(w, "error rendering", http.StatusBadRequest) return } }
func (s *Server) serveStreams(st transport.ServerTransport) { defer s.removeConn(st) defer st.Close() var wg sync.WaitGroup st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) go func() { defer wg.Done() s.handleStream(st, stream, s.traceInfo(st, stream)) }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx } tr := trace.New("grpc.Recv."+methodFamily(method), method) return trace.NewContext(ctx, tr) }) wg.Wait() }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { <-s.Context().Done() cs.closeTransportStream(transport.ContextErr(s.Context().Err())) }() return cs, nil }
// NewSession creates and initializes new Session object. // remote can be nil. func NewSession(args SessionArgs, e *Executor, remote net.Addr) *Session { s := Session{Location: time.UTC} s.Database = args.Database s.User = args.User cfg, cache := e.getSystemConfig() s.planner = planner{ leaseMgr: e.ctx.LeaseManager, systemConfig: cfg, databaseCache: cache, session: &s, execCtx: &e.ctx, } remoteStr := "" if remote != nil { remoteStr = remote.String() } s.Trace = trace.New("sql."+args.User, remoteStr) s.Trace.SetMaxEvents(100) return &s }
func (b *Backend) ServeHTTP(w http.ResponseWriter, r *http.Request) { tr := trace.New("backend."+b.Cf.Name, r.RequestURI) tr.LazyPrintf("Request: %#v", r) defer tr.Finish() glog.V(3).Infof("Backend %s serving %s %s", b.Cf.Name, r.Host) ctx := context.GetRequestContext(r) ctx.Log.BackendName = b.Cf.Name ctx.Tr.LazyPrintf("using backend %s", b.Cf.Name) defer ctx.Tr.LazyPrintf("backend done") b.proxy.ServeHTTP(w, r) if wr, ok := w.(*stats.StatsCollectingResponseWriter); ok { tr.LazyPrintf("Response %d", wr.ResponseCode) tr.LazyPrintf("Response headers %v", wr.Header()) if wr.IsErrorResponse() { tr.SetError() } } }
// NewSession creates and initializes new Session object. // remote can be nil. func NewSession(args SessionArgs, e *Executor, remote net.Addr) *Session { s := Session{} s.Database = args.Database s.User = args.User cfg, cache := e.getSystemConfig() s.planner = planner{ // evalCtx is set in the Executor, for each Prepare or Execute. evalCtx: parser.EvalContext{}, leaseMgr: e.ctx.LeaseManager, systemConfig: cfg, databaseCache: cache, session: &s, execCtx: &e.ctx, } remoteStr := "" if remote != nil { remoteStr = remote.String() } s.Trace = trace.New("sql."+args.User, remoteStr) s.Trace.SetMaxEvents(100) return &s }
// Invoke sends the RPC request on the wire and returns after response is received. // Invoke is called by generated code. Also users can call Invoke directly when it // is really needed in their use cases. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } topts := &transport.Options{ Last: true, Delay: false, } for { var ( err error t transport.ClientTransport stream *transport.Stream // Record the put handler from Balancer.Get(...). It is called once the // RPC has completed or failed. put func() ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } gopts := BalancerGetOptions{ BlockingWait: !c.failFast, } t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := err.(*rpcError); ok { return err } if err == errConnClosing { if c.failFast { return Errorf(codes.Unavailable, "%v", errConnClosing) } continue } // All the other errors are treated as Internal errors. return Errorf(codes.Internal, "%v", err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok { if c.failFast { return toRPCErr(err) } continue } return toRPCErr(err) } // Receive the response err = recvResponse(cc.dopts, t, &c, stream, reply) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok { if c.failFast { return toRPCErr(err) } continue } t.CloseStream(stream, err) return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) if put != nil { put() put = nil } return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } }
// Open implements store.FileSystem func (r *rootTraceFS) Open(ctx context.Context, path string) (http.File, error) { tr := trace.New("request", path) defer tr.Finish() return r.FileSystem.Open(trace.NewContext(ctx, tr), path) }