// NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { // 1. 调用各种 options var opts options for _, o := range opt { o(&opts) } // 2. 设置codec, 默认为protobuf if opts.codec == nil { // Set the default codec. opts.codec = protoCodec{} } // 3. 创建一个Server, 默认情况下: lis 为空, conns为空 s := &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[io.Closer]bool), m: make(map[string]*service), } // 3.1. 如何Tracing, runtime? if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } return s }
// NewConn creates a Conn. func NewConn(cc *ClientConn) (*Conn, error) { if cc.target == "" { return nil, ErrUnspecTarget } c := &Conn{ target: cc.target, dopts: cc.dopts, shutdownChan: make(chan struct{}), } if EnableTracing { c.events = trace.NewEventLog("grpc.ClientConn", c.target) } if !c.dopts.insecure { var ok bool for _, cd := range c.dopts.copts.AuthOptions { if _, ok := cd.(credentials.TransportAuthenticator); !ok { continue } ok = true } if !ok { return nil, ErrNoTransportSecurity } } else { for _, cd := range c.dopts.copts.AuthOptions { if cd.RequireTransportSecurity() { return nil, ErrCredentialsMisuse } } } colonPos := strings.LastIndex(c.target, ":") if colonPos == -1 { colonPos = len(c.target) } c.authority = c.target[:colonPos] if c.dopts.codec == nil { // Set the default codec. c.dopts.codec = protoCodec{} } c.stateCV = sync.NewCond(&c.mu) if c.dopts.block { if err := c.resetTransport(false); err != nil { c.Close() return nil, err } // Start to monitor the error status of transport. go c.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { if err := c.resetTransport(false); err != nil { grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) c.Close() return } c.transportMonitor() }() } return c, nil }
// makeBaseQueue returns a new instance of baseQueue with the // specified shouldQueue function to determine which replicas to queue // and maxSize to limit the growth of the queue. Note that // maxSize doesn't prevent new replicas from being added, it just // limits the total size. Higher priority replicas can still be // added; their addition simply removes the lowest priority replica. func makeBaseQueue(name string, impl queueImpl, gossip *gossip.Gossip, maxSize int) baseQueue { return baseQueue{ name: name, impl: impl, gossip: gossip, maxSize: maxSize, incoming: make(chan struct{}, 1), Locker: new(sync.Mutex), replicas: map[roachpb.RangeID]*replicaItem{}, eventLog: queueLog{ traceLog: trace.NewEventLog("queue", name), prefix: fmt.Sprintf("[%s] ", name), }, } }
func (s *ProjectsService) SlowSearch(req *projects.ProjectsSearchRequest, srv projects.ProjectsService_SlowSearchServer) error { TraceClient(srv.Context()) l := trace.NewEventLog("ProjectsService", "SlowSearch") defer l.Finish() found := s.filter(req.SearchTerms) l.Printf("found %d projects for terms '%s'", len(found), req.SearchTerms) for _, p := range found { time.Sleep(10 * time.Second) l.Printf("sending project '%s'", p.Name) if err := srv.Send(p); err != nil { l.Errorf("problem sending project: %v", err) return err } } return nil }
// NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { var opts options for _, o := range opt { o(&opts) } if opts.codec == nil { // Set the default codec. opts.codec = protoCodec{} } s := &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[io.Closer]bool), m: make(map[string]*service), } if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } return s }
// makeBaseQueue returns a new instance of baseQueue with the // specified shouldQueue function to determine which replicas to queue // and maxSize to limit the growth of the queue. Note that // maxSize doesn't prevent new replicas from being added, it just // limits the total size. Higher priority replicas can still be // added; their addition simply removes the lowest priority replica. func makeBaseQueue( name string, impl queueImpl, gossip *gossip.Gossip, cfg queueConfig, ) baseQueue { bq := baseQueue{ name: name, impl: impl, gossip: gossip, queueConfig: cfg, incoming: make(chan struct{}, 1), eventLog: queueLog{ traceLog: trace.NewEventLog("queue", name), prefix: fmt.Sprintf("[%s] ", name), }, } bq.mu.Locker = new(sync.Mutex) bq.mu.replicas = map[roachpb.RangeID]*replicaItem{} return bq }
// NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { var opts options opts.maxMsgSize = defaultMaxMsgSize for _, o := range opt { o(&opts) } if opts.codec == nil { // Set the default codec. opts.codec = protoCodec{} } s := &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[io.Closer]bool), m: make(map[string]*service), } s.cv = sync.NewCond(&s.mu) s.ctx, s.cancel = context.WithCancel(context.Background()) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } return s }
func (cc *ClientConn) newAddrConn(addr Address, skipWait bool) error { ac := &addrConn{ cc: cc, addr: addr, dopts: cc.dopts, shutdownChan: make(chan struct{}), } if EnableTracing { ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) } if !ac.dopts.insecure { if ac.dopts.copts.TransportCredentials == nil { return errNoTransportSecurity } } else { if ac.dopts.copts.TransportCredentials != nil { return errCredentialsConflict } for _, cd := range ac.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return errTransportCredentialsMissing } } } // Insert ac into ac.cc.conns. This needs to be done before any getTransport(...) is called. ac.cc.mu.Lock() if ac.cc.conns == nil { ac.cc.mu.Unlock() return ErrClientConnClosing } stale := ac.cc.conns[ac.addr] ac.cc.conns[ac.addr] = ac ac.cc.mu.Unlock() if stale != nil { // There is an addrConn alive on ac.addr already. This could be due to // i) stale's Close is undergoing; // ii) a buggy Balancer notifies duplicated Addresses. stale.tearDown(errConnDrain) } ac.stateCV = sync.NewCond(&ac.mu) // skipWait may overwrite the decision in ac.dopts.block. if ac.dopts.block && !skipWait { if err := ac.resetTransport(false); err != nil { ac.tearDown(err) return err } // Start to monitor the error status of transport. go ac.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { if err := ac.resetTransport(false); err != nil { grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) ac.tearDown(err) return } ac.transportMonitor() }() } return nil }
// resetAddrConn creates an addrConn for addr and adds it to cc.conns. // If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. // If tearDownErr is nil, errConnDrain will be used instead. func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { ac := &addrConn{ cc: cc, addr: addr, dopts: cc.dopts, } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) ac.stateCV = sync.NewCond(&ac.mu) if EnableTracing { ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) } if !ac.dopts.insecure { if ac.dopts.copts.TransportCredentials == nil { return errNoTransportSecurity } } else { if ac.dopts.copts.TransportCredentials != nil { return errCredentialsConflict } for _, cd := range ac.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return errTransportCredentialsMissing } } } // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } stale := cc.conns[ac.addr] cc.conns[ac.addr] = ac cc.mu.Unlock() if stale != nil { // There is an addrConn alive on ac.addr already. This could be due to // 1) a buggy Balancer notifies duplicated Addresses; // 2) goaway was received, a new ac will replace the old ac. // The old ac should be deleted from cc.conns, but the // underlying transport should drain rather than close. if tearDownErr == nil { // tearDownErr is nil if resetAddrConn is called by // 1) Dial // 2) lbWatcher // In both cases, the stale ac should drain, not close. stale.tearDown(errConnDrain) } else { stale.tearDown(tearDownErr) } } // skipWait may overwrite the decision in ac.dopts.block. if ac.dopts.block && !skipWait { if err := ac.resetTransport(false); err != nil { ac.cc.mu.Lock() delete(ac.cc.conns, ac.addr) ac.cc.mu.Unlock() ac.tearDown(err) return err } // Start to monitor the error status of transport. go ac.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { if err := ac.resetTransport(false); err != nil { grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) ac.cc.mu.Lock() delete(ac.cc.conns, ac.addr) ac.cc.mu.Unlock() ac.tearDown(err) return } ac.transportMonitor() }() } return nil }
func NewEventLog(family, title string) *EventLog { return &EventLog{trace.NewEventLog(family, title)} }
// WithEventLog creates and embeds a trace.EventLog in the context, causing // future logging and event calls to go to the EventLog. The current context // must not have an existing open span. func WithEventLog(ctx context.Context, family, title string) context.Context { return withEventLogInternal(ctx, trace.NewEventLog(family, title)) }
// SetEventLog sets up an event log. Annotated contexts log into this event log // (unless there's an open Span). func (ac *AmbientContext) SetEventLog(family, title string) { ac.eventLog = &ctxEventLog{eventLog: trace.NewEventLog(family, title)} }
func NewDirEntryCache(ttl time.Duration) DirEntryCache { return &realDirEntryCache{ cache: cache.New(ttl, CachePurgeInterval), eventLog: trace.NewEventLog("DirEntryCache", ""), } }
func init() { indexLog = trace.NewEventLog("index", "Logger") }