func publish(bucket *ratelimit.Bucket) {
	conn, err := amqp.Dial("amqp://*****:*****@localhost:5672/")
	failOnError(err, "Failed to connect to RabbitMQ")
	defer conn.Close()
	ch, err := conn.Channel()
	failOnError(err, "Failed to open a channel")
	defer ch.Close()
	q, err := ch.QueueDeclare(
		"hello", // name
		false,   // durable
		false,   // delete when unused
		false,   // exclusive
		false,   // no-wait
		nil,     // arguments
	)
	failOnError(err, "Failed to declare a queue")
	for {
		bucket.Wait(1)
		body := "hello"
		err = ch.Publish(
			"",     // exchange
			q.Name, // routing key
			false,  // mandatory
			false,  // immediate
			amqp.Publishing{
				ContentType: "text/plain",
				Body:        []byte(body),
			})
		failOnError(err, "Failed to publish a message")
	}

}
Ejemplo n.º 2
0
// NewTokenBucketThrottler returns an endpoint.Middleware that acts as a
// request throttler based on a token-bucket algorithm. Requests that would
// exceed the maximum request rate are delayed via the parameterized sleep
// function. By default you may pass time.Sleep.
func NewTokenBucketThrottler(tb *ratelimit.Bucket, sleep func(time.Duration)) endpoint.Middleware {
	return func(next endpoint.Endpoint) endpoint.Endpoint {
		return func() error {
			sleep(tb.Take(1))
			return next()
		}
	}
}
Ejemplo n.º 3
0
// NewTokenBucketThrottler returns an endpoint.Middleware that acts as a
// request throttler based on a token-bucket algorithm. Requests that would
// exceed the maximum request rate are delayed via the parameterized sleep
// function. By default you may pass time.Sleep.
func NewTokenBucketThrottler(tb *ratelimit.Bucket, sleep func(time.Duration)) endpoint.Middleware {
	return func(next endpoint.Endpoint) endpoint.Endpoint {
		return func(ctx context.Context, request interface{}) (interface{}, error) {
			sleep(tb.Take(1))
			return next(ctx, request)
		}
	}
}
Ejemplo n.º 4
0
// NewTokenBucketLimiter returns an endpoint.Middleware that acts as a rate
// limiter based on a token-bucket algorithm. Requests that would exceed the
// maximum request rate are simply rejected with an error.
func NewTokenBucketLimiter(tb *ratelimit.Bucket) endpoint.Middleware {
	return func(next endpoint.Endpoint) endpoint.Endpoint {
		return func() error {
			if tb.TakeAvailable(1) == 0 {
				return ErrLimited
			}
			return next()
		}
	}
}
Ejemplo n.º 5
0
func limit(b *ratelimit.Bucket, wait bool, errId string) func() error {
	return func() error {
		if wait {
			time.Sleep(b.Take(1))
		} else if b.TakeAvailable(1) == 0 {
			return errors.New(errId, "too many request", 429)
		}
		return nil
	}
}
Ejemplo n.º 6
0
func NewTokenBucketLimiter(tb *ratelimit.Bucket) endpoint.Middleware {
	return func(next endpoint.Endpoint) endpoint.Endpoint {
		return func(ctx context.Context, request interface{}) (interface{}, error) {
			if tb.TakeAvailable(1) == 0 { // HL
				return nil, ErrLimited // HL
			} // HL
			return next(ctx, request) // HL
		}
	}
}
Ejemplo n.º 7
0
func getTestServerThrottled(body string) *httptest.Server {
	var rateLimiter *ratelimit.Bucket
	// Rate limit: 2 req/s, capacity 2
	rateLimiter = ratelimit.NewBucket(500*time.Millisecond, 2)
	return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		code := 200
		if tokens := rateLimiter.TakeAvailable(1); tokens == 0 {
			code = 503
		}

		w.WriteHeader(code)
		w.Header().Set("Content-Type", "application/json")
		fmt.Fprint(w, body)
	}))
}
Ejemplo n.º 8
0
func makeRateLimitFunc(sessionRateLimit, globalRateLimit *ratelimit.Bucket) func(int64) {
	// This may be a case of super duper premature optimization... We build an
	// optimized function to do the rate limiting here based on what we need
	// to do and then use it in the loop.

	if sessionRateLimit == nil && globalRateLimit == nil {
		// No limiting needed. We could equally well return a func(int64){} and
		// not do a nil check were we use it, but I think the nil check there
		// makes it clear that there will be no limiting if none is
		// configured...
		return nil
	}

	if sessionRateLimit == nil {
		// We only have a global limiter
		return func(bytes int64) {
			globalRateLimit.Wait(bytes)
		}
	}

	if globalRateLimit == nil {
		// We only have a session limiter
		return func(bytes int64) {
			sessionRateLimit.Wait(bytes)
		}
	}

	// We have both. Queue the bytes on both the global and session specific
	// rate limiters. Wait for both in parallell, so that the actual send
	// happens when both conditions are satisfied. In practice this just means
	// wait the longer of the two times.
	return func(bytes int64) {
		t0 := sessionRateLimit.Take(bytes)
		t1 := globalRateLimit.Take(bytes)
		if t0 > t1 {
			time.Sleep(t0)
		} else {
			time.Sleep(t1)
		}
	}
}