// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track // how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error { err := registerRateLimiterMetric(ownerName) if err != nil { return err } go wait.Forever(func() { metricsLock.Lock() defer metricsLock.Unlock() rateLimiterMetrics[ownerName].Set(rateLimiter.Saturation()) }, updatePeriod) return nil }
// SwapLimiter safely swaps current limiter for this queue with the passed one if capacities or qps's differ. func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) { q.limiterLock.Lock() defer q.limiterLock.Unlock() if q.limiter.QPS() == newQPS { return } var newLimiter flowcontrol.RateLimiter if newQPS <= 0 { newLimiter = flowcontrol.NewFakeNeverRateLimiter() } else { newLimiter = flowcontrol.NewTokenBucketRateLimiter(newQPS, evictionRateLimiterBurst) } // If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1 // TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep: // - saturation (percentage of used tokens) // - number of used tokens // - number of available tokens // - something else for q.limiter.Saturation() > newLimiter.Saturation() { // Check if we're not using fake limiter previousSaturation := newLimiter.Saturation() newLimiter.TryAccept() // It's a fake limiter if newLimiter.Saturation() == previousSaturation { break } } q.limiter.Stop() q.limiter = newLimiter }