Esempio n. 1
0
func main() {
	logger := log.NewLogfmtLogger(os.Stdout)

	ctx := context.Background()

	c := &countService{}

	var svc endpoint.Endpoint
	svc = makeAddEndpoint(c)

	limit := ratelimit.NewBucket(2*time.Second, 1)
	svc = kitratelimit.NewTokenBucketLimiter(limit)(svc)

	requestCount := expvar.NewCounter("request.count")
	svc = metricsMiddleware(requestCount)(svc)
	svc = loggingMiddlware(logger)(svc)

	addHandler := httptransport.NewServer(
		ctx,
		svc,
		decodeAddRequest,
		encodeResponse,
		httptransport.ServerBefore(beforeIDExtractor, beforePATHExtractor),
	)

	http.Handle("/add", addHandler)

	port := os.Getenv("PORT")
	logger.Log("listening on", port)
	if err := http.ListenAndServe(":"+port, nil); err != nil {
		logger.Log("listen.error", err)
	}
}
Esempio n. 2
0
func limit(addr *net.UDPAddr) bool {
	key := addr.IP.String()

	lock.Lock()
	defer lock.Unlock()

	bkt, ok := limiter.Get(key)
	if ok {
		bkt := bkt.(*ratelimit.Bucket)
		if bkt.TakeAvailable(1) != 1 {
			// Rate limit exceeded; ignore packet
			if debug {
				log.Println("Rate limit exceeded for", key)
			}
			limited++
			return true
		}
	} else {
		if debug {
			log.Println("New limiter for", key)
		}
		// One packet per ten seconds average rate, burst ten packets
		limiter.Add(key, ratelimit.NewBucket(10*time.Second, 10))
	}

	return false
}
Esempio n. 3
0
// NewClient creates new Vultr API client. Options are optional and can be nil.
func NewClient(apiKey string, options *Options) *Client {
	userAgent := "vultr-go/" + Version
	client := http.DefaultClient
	endpoint, _ := url.Parse(DefaultEndpoint)
	rate := 505 * time.Millisecond
	attempts := 1

	if options != nil {
		if options.HTTPClient != nil {
			client = options.HTTPClient
		}
		if options.UserAgent != "" {
			userAgent = options.UserAgent
		}
		if options.Endpoint != "" {
			endpoint, _ = url.Parse(options.Endpoint)
		}
		if options.RateLimitation != 0 {
			rate = options.RateLimitation
		}
		if options.MaxRetries != 0 {
			attempts = options.MaxRetries + 1
		}
	}

	return &Client{
		UserAgent:   userAgent,
		client:      client,
		Endpoint:    endpoint,
		APIKey:      apiKey,
		MaxAttempts: attempts,
		bucket:      ratelimit.NewBucket(rate, 1),
	}
}
Esempio n. 4
0
func newHandler(mc *config, mailPipe chan<- *http.Request) *handler {
	return &handler{
		rlBucket: ratelimit.NewBucket(mc.rateLimitInterval, mc.rateLimitCapacity),
		reqPipe:  mailPipe,
		config:   mc,
	}
}
Esempio n. 5
0
// NewRateLimitingPlugin creates a NewRateLimitingPlugin
func NewRateLimitingPlugin(fillInterval time.Duration, capacity int64) *RateLimitingPlugin {
	tb := ratelimit.NewBucket(fillInterval, capacity)

	return &RateLimitingPlugin{
		FillInterval: fillInterval,
		Capacity:     capacity,
		bucket:       tb}
}
Esempio n. 6
0
// Throttle throttles the method for each incoming request. The throttle
// algorithm is based on token bucket implementation:
// http://en.wikipedia.org/wiki/Token_bucket. Rate determines the number of
// request which are allowed per frequency. Example: A capacity of 50 and
// fillInterval of two seconds means that initially it can handle 50 requests
// and every two seconds the bucket will be filled with one token until it hits
// the capacity. If there is a burst API calls, all tokens will be exhausted
// and clients need to be wait until the bucket is filled with time.  For
// example to have throttle with 30 req/second, you need to have a fillinterval
// of 33.33 milliseconds.
func (m *Method) Throttle(fillInterval time.Duration, capacity int64) *Method {
	// don't do anything if the bucket is initialized already
	if m.bucket != nil {
		return m
	}

	m.bucket = ratelimit.NewBucket(
		fillInterval, // interval
		capacity,     // token per interval
	)

	return m
}
Esempio n. 7
0
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
	l.Lock()
	if _, found := l.tokenBuckets[key]; !found {
		l.tokenBuckets[key] = ratelimit.NewBucket(l.TTL, l.Max)
	}

	_, isSoonerThanMaxWait := l.tokenBuckets[key].TakeMaxDuration(1, l.TTL)
	l.Unlock()

	if isSoonerThanMaxWait {
		return false
	}

	return true
}
Esempio n. 8
0
func getTestServerThrottled(body string) *httptest.Server {
	var rateLimiter *ratelimit.Bucket
	// Rate limit: 2 req/s, capacity 2
	rateLimiter = ratelimit.NewBucket(500*time.Millisecond, 2)
	return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		code := 200
		if tokens := rateLimiter.TakeAvailable(1); tokens == 0 {
			code = 503
		}

		w.WriteHeader(code)
		w.Header().Set("Content-Type", "application/json")
		fmt.Fprint(w, body)
	}))
}
Esempio n. 9
0
func (s *querysrv) limit(remote net.IP) bool {
	key := remote.String()

	bkt, ok := s.limiter.Get(key)
	if ok {
		bkt := bkt.(*ratelimit.Bucket)
		if bkt.TakeAvailable(1) != 1 {
			// Rate limit exceeded; ignore packet
			return true
		}
	} else {
		// One packet per ten seconds average rate, burst ten packets
		s.limiter.Add(key, ratelimit.NewBucket(10*time.Second/time.Duration(limitAvg), int64(limitBurst)))
	}

	return false
}
Esempio n. 10
0
func (a *aggregator) loop() error {
	// TODO(fwereade): 2016-03-17 lp:1558657
	timer := time.NewTimer(0)
	timer.Stop()
	var reqs []instanceInfoReq
	// We use a capacity of 1 so that sporadic requests will
	// be serviced immediately without having to wait.
	bucket := ratelimit.NewBucket(gatherTime, 1)
	for {
		select {
		case <-a.tomb.Dying():
			return tomb.ErrDying
		case req := <-a.reqc:
			if len(reqs) == 0 {
				waitTime := bucket.Take(1)
				timer.Reset(waitTime)
			}
			reqs = append(reqs, req)
		case <-timer.C:
			ids := make([]instance.Id, len(reqs))
			for i, req := range reqs {
				ids[i] = req.instId
			}
			insts, err := a.environ.Instances(ids)
			for i, req := range reqs {
				var reply instanceInfoReply
				if err != nil && err != environs.ErrPartialInstances {
					reply.err = err
				} else {
					reply.info, reply.err = a.instInfo(req.instId, insts[i])
				}
				select {
				case <-a.tomb.Dying():
					return tomb.ErrDying
				case req.reply <- reply:
				}
			}
			reqs = nil
		}
	}
}
Esempio n. 11
0
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, rate time.Duration, burst int64) bool {
	host, _, err := net.SplitHostPort(addr)
	if err != nil {
		return false
	}

	lock.RLock()
	bkt, ok := cache.Get(host)
	lock.RUnlock()
	if ok {
		bkt := bkt.(*ratelimit.Bucket)
		if bkt.TakeAvailable(1) != 1 {
			// Rate limit
			return true
		}
	} else {
		lock.Lock()
		cache.Add(host, ratelimit.NewBucket(rate, burst))
		lock.Unlock()
	}
	return false
}
Esempio n. 12
0
// NewClient creates new Vultr API client. Options are optional and can be nil.
func NewClient(apiKey string, options *Options) *Client {
	userAgent := "vultr-go/" + Version
	transport := &http.Transport{
		TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
	}
	client := http.DefaultClient
	client.Transport = transport
	endpoint, _ := url.Parse(DefaultEndpoint)
	rate := 505 * time.Millisecond
	attempts := 1

	if options != nil {
		if options.HTTPClient != nil {
			client = options.HTTPClient
		}
		if options.UserAgent != "" {
			userAgent = options.UserAgent
		}
		if options.Endpoint != "" {
			endpoint, _ = url.Parse(options.Endpoint)
		}
		if options.RateLimitation != 0 {
			rate = options.RateLimitation
		}
		if options.MaxRetries != 0 {
			attempts = options.MaxRetries + 1
		}
	}

	return &Client{
		UserAgent:   userAgent,
		client:      client,
		Endpoint:    endpoint,
		APIKey:      apiKey,
		MaxAttempts: attempts,
		bucket:      ratelimit.NewBucket(rate, 1),
	}
}
func newEntry(limit int) *entry {
	fillRatePerSec := 1000 / limit
	return &entry{
		bucket: ratelimit.NewBucket(time.Duration(fillRatePerSec)*time.Millisecond, int64(limit)),
	}
}
Esempio n. 14
0
func main() {
	flag.Parse()

	if *install {
		err := registerSource(*provider, *source)
		if err != nil {
			fmt.Fprintln(os.Stderr, err)
			os.Exit(1)
		}
		return
	}

	if *file == "" {
		fmt.Fprintln(os.Stderr, "-f is required")
		flag.Usage()
		os.Exit(1)
	}

	// Open a handle to the event log.
	log, err := eventlog.Open(*log)
	if err != nil {
		fmt.Fprintln(os.Stderr, "opening eventlog:", err)
		os.Exit(1)
	}

	// Open the file on disk.
	file, err := os.Open(*file)
	if err != nil {
		fmt.Fprintln(os.Stderr, "opening file:", err)
		os.Exit(1)
	}
	defer file.Close()

	eg := &eventgen{
		file: file,
		max:  uint32(*max),
		done: make(chan struct{}),
		log:  log,
	}
	eg.installSignalHandler()

	// Rate limit writing using a token bucket.
	if *rate != 0 {
		eg.tb = ratelimit.NewBucketWithRate(*rate, int64(math.Ceil(*rate)))
		eg.tb.TakeAvailable(eg.tb.Available())
	} else if *interval != 0 {
		eg.tb = ratelimit.NewBucket(*interval, 1)
		eg.tb.TakeAvailable(eg.tb.Available())
	}

	start := time.Now()

	// Start a new worker to read lines from the file.
	wg := &sync.WaitGroup{}
	wg.Add(1)
	go eg.reportEvents(wg)
	wg.Wait()

	elapsed := time.Since(start)
	fmt.Println("elapsed time:", elapsed)
	fmt.Println("event count:", atomic.LoadUint32(&eg.count))
}