// NewContext creates an rpc Context with the supplied values. func NewContext(baseCtx *base.Context, clock *hlc.Clock, stopper *stop.Stopper) *Context { ctx := &Context{ Context: baseCtx, } if clock != nil { ctx.localClock = clock } else { ctx.localClock = hlc.NewClock(hlc.UnixNano) } ctx.Stopper = stopper ctx.RemoteClocks = newRemoteClockMonitor(clock, 10*defaultHeartbeatInterval) ctx.HeartbeatInterval = defaultHeartbeatInterval ctx.HeartbeatTimeout = 2 * defaultHeartbeatInterval stopper.RunWorker(func() { <-stopper.ShouldQuiesce() ctx.conns.Lock() for key, meta := range ctx.conns.cache { ctx.removeConn(key, meta.conn) } ctx.conns.Unlock() }) return ctx }
// ListenAndServeGRPC creates a listener and serves the specified grpc Server // on it, closing the listener when signalled by the stopper. func ListenAndServeGRPC(stopper *stop.Stopper, server *grpc.Server, addr net.Addr) (net.Listener, error) { ln, err := net.Listen(addr.Network(), addr.String()) if err != nil { return ln, err } stopper.RunWorker(func() { <-stopper.ShouldQuiesce() server.Stop() }) stopper.RunWorker(func() { FatalIfUnexpected(server.Serve(ln)) }) return ln, nil }
// NewContext creates an rpc Context with the supplied values. func NewContext(baseCtx *base.Context, hlcClock *hlc.Clock, stopper *stop.Stopper) *Context { ctx := &Context{ Context: baseCtx, } if hlcClock != nil { ctx.localClock = hlcClock } else { ctx.localClock = hlc.NewClock(hlc.UnixNano) } ctx.breakerClock = breakerClock{ clock: ctx.localClock, } var cancel context.CancelFunc ctx.masterCtx, cancel = context.WithCancel(context.Background()) ctx.Stopper = stopper ctx.RemoteClocks = newRemoteClockMonitor(ctx.localClock, 10*defaultHeartbeatInterval) ctx.HeartbeatInterval = defaultHeartbeatInterval ctx.HeartbeatTimeout = 2 * defaultHeartbeatInterval ctx.conns.cache = make(map[string]*connMeta) stopper.RunWorker(func() { <-stopper.ShouldQuiesce() cancel() ctx.conns.Lock() for key, meta := range ctx.conns.cache { meta.Do(func() { // Make sure initialization is not in progress when we're removing the // conn. We need to set the error in case we win the race against the // real initialization code. if meta.err == nil { meta.err = &roachpb.NodeUnavailableError{} } }) ctx.removeConnLocked(key, meta) } ctx.conns.Unlock() }) return ctx }
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used // with LocalTestCluster. func InitSenderForLocalTestCluster( nodeDesc *roachpb.NodeDescriptor, tracer opentracing.Tracer, clock *hlc.Clock, latency time.Duration, stores client.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, ) client.Sender { retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() senderTransportFactory := SenderTransportFactory(tracer, stores) distSender := NewDistSender(&DistSenderConfig{ Clock: clock, RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize, RangeLookupMaxRanges: defaultRangeLookupMaxRanges, LeaseHolderCacheSize: defaultLeaseHolderCacheSize, RPCRetryOptions: &retryOpts, nodeDescriptor: nodeDesc, TransportFactory: func( opts SendOptions, rpcContext *rpc.Context, replicas ReplicaSlice, args roachpb.BatchRequest, ) (Transport, error) { transport, err := senderTransportFactory(opts, rpcContext, replicas, args) if err != nil { return nil, err } return &localTestClusterTransport{transport, latency}, nil }, RangeDescriptorDB: stores.(RangeDescriptorDB), // for descriptor lookup }, gossip) ctx := tracing.WithTracer(context.Background(), tracer) return NewTxnCoordSender(ctx, distSender, clock, false, /* !linearizable */ stopper, MakeTxnMetrics()) }