// NewContext creates an rpc Context with the supplied values. func NewContext( ambient log.AmbientContext, baseCtx *base.Config, hlcClock *hlc.Clock, stopper *stop.Stopper, ) *Context { ctx := &Context{ Config: baseCtx, } if hlcClock != nil { ctx.localClock = hlcClock } else { ctx.localClock = hlc.NewClock(hlc.UnixNano) } ctx.breakerClock = breakerClock{ clock: ctx.localClock, } var cancel context.CancelFunc ctx.masterCtx, cancel = context.WithCancel(ambient.AnnotateCtx(context.Background())) ctx.Stopper = stopper ctx.RemoteClocks = newRemoteClockMonitor( ctx.masterCtx, ctx.localClock, 10*defaultHeartbeatInterval) ctx.HeartbeatInterval = defaultHeartbeatInterval ctx.HeartbeatTimeout = 2 * defaultHeartbeatInterval ctx.conns.cache = make(map[string]*connMeta) stopper.RunWorker(func() { <-stopper.ShouldQuiesce() cancel() ctx.conns.Lock() for key, meta := range ctx.conns.cache { meta.Do(func() { // Make sure initialization is not in progress when we're removing the // conn. We need to set the error in case we win the race against the // real initialization code. if meta.err == nil { meta.err = &roachpb.NodeUnavailableError{} } }) ctx.removeConnLocked(key, meta) } ctx.conns.Unlock() }) return ctx }
func newRaftScheduler( ambient log.AmbientContext, metrics *StoreMetrics, processor raftProcessor, numWorkers int, ) *raftScheduler { s := &raftScheduler{ processor: processor, numWorkers: numWorkers, } muLogger := syncutil.ThresholdLogger( ambient.AnnotateCtx(context.Background()), defaultReplicaMuWarnThreshold, func(ctx context.Context, msg string, args ...interface{}) { log.Warningf(ctx, "raftScheduler.mu: "+msg, args...) }, func(t time.Duration) { if metrics != nil { metrics.MuSchedulerNanos.RecordValue(t.Nanoseconds()) } }, ) s.mu.TimedMutex = syncutil.MakeTimedMutex(muLogger) s.mu.cond = sync.NewCond(&s.mu.TimedMutex) s.mu.state = make(map[roachpb.RangeID]raftScheduleState) return s }