Exemplo n.º 1
0
// redisLimiterCreate use to create limiter base on redis
func redisLimiterCreate(cfg limiterConfig) CreateLimiter {
	return func() Allow {
		floodFlags := ttlcache.NewCache(cfg.Interval)
		return func(id string) bool {
			_, hasFlood := floodFlags.Get(id)
			if cfg.FloodThreshold > 0 && hasFlood {
				return false
			}
			now := time.Now().UnixNano()
			key := fmt.Sprintf("%s-%s", "rl", id)
			before := now - cfg.Interval.Nanoseconds()

			total, firstReq, lastReq, err := checkRedis(cfg.RedisPool, key, before, now, cfg.Interval)
			if err != nil {
				return true
			}
			tooManyInInterval := total >= cfg.MaxInInterval

			isFlooded := cfg.FloodThreshold > 0 && tooManyInInterval && (total >= (cfg.FloodThreshold * cfg.MaxInInterval))
			if isFlooded {
				floodFlags.Set(id, "xx")
			}
			var lastReqPeriod int64
			if cfg.MinPeriod > 0 && lastReq > 0 {
				lastReqPeriod = now - lastReq
			}

			waitOpenTime := waitOpenTime(now, firstReq, tooManyInInterval, lastReqPeriod, cfg.MinPeriod, cfg.Interval)

			return waitOpenTime <= 0
		}
	}
}
Exemplo n.º 2
0
//SodaMiddleware ...
//returns a middleware which overrides the call
//and returns data from cache
func SodaMiddleware() gin.HandlerFunc {
	cache = ttlcache.NewCache(time.Second * 60)
	return func(c *gin.Context) {
		//pass if request method not GET
		if c.Request.Method != "GET" {
			return
		}

		key := keyFromRequest(c.Request)

		value, keyExists := cache.Get(key)
		if keyExists {
			if value != "" {
				return
			}

			var resp response
			err := json.Unmarshal([]byte(value), resp)
			if err != nil {
				cache.Set(key, "")
				return
			}

			for key, value := range resp.Header {
				c.Header(key, strings.Join(value, ", "))
			}
			c.Data(resp.Status, resp.ContentType, resp.Data)

			c.Abort()
		}
	}
}
Exemplo n.º 3
0
func NewURLCache(ttl time.Duration, billingProviderURL string, authToken string) urlCache {
	u := urlCache{
		cache:              ttlcache.NewCache(ttl),
		billingProviderURL: billingProviderURL,
		authToken:          authToken,
	}

	u.refresh()
	return u
}
Exemplo n.º 4
0
// memoryLimiterCreate use to create a limiter base on memory
func memoryLimiterCreate(cfg limiterConfig) CreateLimiter {
	return func() Allow {
		floodFlags := ttlcache.NewCache(cfg.Interval)
		timeoutTimers := make(map[string]*time.Timer)
		requestRecords := make(map[string][]int64)
		mutex := &sync.Mutex{}
		return func(id string) bool {
			_, hasFlood := floodFlags.Get(id)
			if cfg.FloodThreshold > 0 && hasFlood {
				return false
			}
			now := time.Now().UnixNano()
			before := now - cfg.Interval.Nanoseconds()

			timer, timerStarted := timeoutTimers[id]
			if timerStarted {
				timer.Stop()
			}

			inIntervalReqs := inIntervalRequest(mutex, requestRecords, id, before)

			tooManyInInterval := len(inIntervalReqs) >= cfg.MaxInInterval

			isFlooded := cfg.FloodThreshold > 0 && tooManyInInterval && (len(inIntervalReqs) >= (cfg.FloodThreshold * cfg.MaxInInterval))
			if isFlooded {
				floodFlags.Set(id, "xx")
			}

			lastReqPeriod := lastRequestPeriod(cfg.MinPeriod, inIntervalReqs, now)

			var firstReq int64
			if len(inIntervalReqs) == 0 {
				firstReq = 0
			} else {
				firstReq = inIntervalReqs[0]
			}

			waitOpenTime := waitOpenTime(now, firstReq, tooManyInInterval, lastReqPeriod, cfg.MinPeriod, cfg.Interval)

			user, ok := requestRecords[id]
			if !ok {
				user = []int64{}
			}
			user = append(user, now)
			requestRecords[id] = user

			timeoutTimers[id] = setTimeout(func() {
				delete(requestRecords, id)
			}, cfg.Interval)

			return waitOpenTime <= 0
		}
	}
}
Exemplo n.º 5
0
func PanicMiddleware(inner goji.Handler) goji.Handler {
	panicHandler := &PanicHandler{
		Cache: ttlcache.NewCache(24 * time.Hour),
	}

	return goji.HandlerFunc(func(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
		defer func() {
			if err := recover(); err != nil {
				panicHandler.ServeHTTPC(err, ctx, rw, r)
			}
		}()

		inner.ServeHTTPC(ctx, rw, r)
	})
}
Exemplo n.º 6
0
// Reset will (re)initialize internal caches
func (p *PacSandbox) Reset() {
	p.cache = ttlcache.NewCache(5 * time.Minute)
	p.resultCache = ttlcache.NewCache(30 * time.Second)
}