Example #1
0
func (m *Manager) init() {
	m.mu.Lock()
	if m.certCache == nil {
		m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20)
		m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20)
		m.certCache = map[string]*cacheEntry{}
		m.certTokens = map[string]*tls.Certificate{}
		m.watchChan = make(chan struct{}, 1)
		m.watchChan <- struct{}{}
	}
	m.mu.Unlock()
}
Example #2
0
func (s *stresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go s.run(ctx, kvc)
	}

	plog.Printf("stresser %q is started", s.Endpoint)
	return nil
}
Example #3
0
// NewBot initializes a number of things for proper operation. It will set appropriate flags
// for rlog and then creates a Nimbus config to pass to the internal nimbus IRC client. This
// client is embedded into an instance of Bot and returned. It has its fields initialized.
func NewBot(version string, rconf *Config) *Bot {
	rlog.SetFlags(rlog.Linfo | rlog.Lwarn | rlog.Lerror | rlog.Ldebug)
	rlog.SetLogFlags(0)

	nconf := GetNimbusConfig(rconf)

	bot := &Bot{
		/* Client     */ nimbus.NewClient(rconf.Server.Host, rconf.Server.Port,
			rconf.User.Nick, *nconf),
		/* Version    */ version,
		/* Modules    */ make(map[string]*Module),
		/* Channels   */ make(map[string]*Channel),
		/* ToJoinChs  */ make(map[string]string),
		/* Parser     */ parser.NewParser(rconf.Command.Prefix),
		/* Handler    */ NewHandler(),
		/* Inlim      */ rate.NewLimiter(3/5, 3),
		/* Outlim     */ rate.NewLimiter(rate.Every(time.Millisecond*750), 1),
		/* Config     */ rconf,
		/* ListenPort */ "0",
		/* Quit Chan  */ make(chan string),
		/* Mutex      */ sync.Mutex{},
	}

	return bot
}
Example #4
0
// connMonitor monitors the connection and handles retries
func (c *Client) connMonitor() {
	var err error

	defer func() {
		_, err = c.retryConnection(c.ctx.Err())
		c.mu.Lock()
		c.lastConnErr = err
		close(c.newconnc)
		c.mu.Unlock()
	}()

	limiter := rate.NewLimiter(rate.Every(minConnRetryWait), 1)
	for limiter.Wait(c.ctx) == nil {
		select {
		case err = <-c.reconnc:
		case <-c.ctx.Done():
			return
		}
		conn, connErr := c.retryConnection(err)
		c.mu.Lock()
		c.lastConnErr = connErr
		c.conn = conn
		close(c.newconnc)
		c.newconnc = make(chan struct{})
		c.reconnc = make(chan error, 1)
		c.mu.Unlock()
	}
}
Example #5
0
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
	l.Lock()
	defer l.Unlock()
	if _, found := l.tokenBuckets[key]; !found {
		l.tokenBuckets[key] = rate.NewLimiter(rate.Every(l.TTL), int(l.Max))
	}

	return !l.tokenBuckets[key].AllowN(time.Now(), 1)
}
Example #6
0
// NewProgressReader creates a new ProgressReader.
func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader {
	return &Reader{
		in:          in,
		out:         out,
		size:        size,
		id:          id,
		action:      action,
		rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),
	}
}
Example #7
0
func main() {
	st := time.Now()
	i := 0
	limiter := rate.NewLimiter(rate.Every(time.Second), 100)
	ctx, cancel := context.WithTimeout(context.TODO(), 2*time.Second)
	for limiter.Wait(ctx) == nil {
		i++
	}
	cancel()
	fmt.Println(i, "DONE. Took", time.Since(st))
	// 101 DONE. Took 1.00013873s
}
Example #8
0
func warningFor(dev protocol.DeviceID, msg string) {
	warningLimitersMut.Lock()
	defer warningLimitersMut.Unlock()
	lim, ok := warningLimiters[dev]
	if !ok {
		lim = rate.NewLimiter(rate.Every(perDeviceWarningIntv), 1)
		warningLimiters[dev] = lim
	}
	if lim.Allow() {
		l.Warnln(msg)
	}
}
Example #9
0
// Reserve returns how long the crawler should wait before crawling this
// URL.
func (l *Limiter) Reserve(u *url.URL) time.Duration {
	l.mu.Lock()
	defer l.mu.Unlock()

	h := u.Host
	v, ok := l.host[h]
	if !ok {
		d, burst := l.query(h)
		v = &entry{
			limiter: rate.NewLimiter(rate.Every(d), burst),
		}
		l.host[h] = v
	} else if l.updatable && v.count >= l.freq {
		d, _ := l.query(h)
		v.limiter.SetLimit(rate.Every(d))
		v.count = 0
	}
	if l.updatable {
		v.count++
	}
	return v.limiter.Reserve().Delay()
}
Example #10
0
func (c *containerAdapter) pullImage(ctx context.Context) error {
	rc, err := c.client.ImagePull(ctx, c.container.image(), c.container.imagePullOptions())
	if err != nil {
		return err
	}

	dec := json.NewDecoder(rc)
	dec.UseNumber()
	m := map[string]interface{}{}
	spamLimiter := rate.NewLimiter(rate.Every(1000*time.Millisecond), 1)

	lastStatus := ""
	for {
		if err := dec.Decode(&m); err != nil {
			if err == io.EOF {
				break
			}
			return err
		}
		l := log.G(ctx)
		// limit pull progress logs unless the status changes
		if spamLimiter.Allow() || lastStatus != m["status"] {
			// if we have progress details, we have everything we need
			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
				// first, log the image and status
				l = l.WithFields(logrus.Fields{
					"image":  c.container.image(),
					"status": m["status"],
				})
				// then, if we have progress, log the progress
				if progress["current"] != nil && progress["total"] != nil {
					l = l.WithFields(logrus.Fields{
						"current": progress["current"],
						"total":   progress["total"],
					})
				}
			}
			l.Debug("pull in progress")
		}
		// sometimes, we get no useful information at all, and add no fields
		if status, ok := m["status"].(string); ok {
			lastStatus = status
		}
	}
	// if the final stream object contained an error, return it
	if errMsg, ok := m["error"]; ok {
		return errors.Errorf("%v", errMsg)
	}
	return nil
}
Example #11
0
func main() {
	var (
		num int
		mu  sync.Mutex

		qps = 10
		wg  sync.WaitGroup
		N   = 10000
	)

	wg.Add(N)

	limiter := rate.NewLimiter(rate.Every(time.Second), qps)

	for i := 0; i < N; i++ {
		go func(i int) {
			defer wg.Done()
			for limiter.Wait(context.TODO()) == nil {
				mu.Lock()
				num++
				mu.Unlock()
			}
		}(i)
	}

	time.Sleep(time.Second)
	mu.Lock()
	fmt.Println("num:", num)
	mu.Unlock()

	fmt.Println("burst:", limiter.Burst())

	fmt.Println("blocking...")
	donec := make(chan struct{})
	go func() {
		wg.Wait()
		close(donec)
	}()
	select {
	case <-donec:
		fmt.Println("Done!")
	case <-time.After(time.Second):
		fmt.Println("Timed out!")
	}
}
Example #12
0
func main() {
	var (
		num int
		mu  sync.Mutex

		qps = 10
		wg  sync.WaitGroup
		N   = 10000
	)

	wg.Add(N)

	ctx, cancel := context.WithCancel(context.Background())

	limiter := rate.NewLimiter(rate.Every(time.Second), qps)

	for i := 0; i < N; i++ {
		go func() {
			defer wg.Done()

			for {
				if err := limiter.Wait(ctx); err == context.Canceled {
					return
				}

				mu.Lock()
				num++
				mu.Unlock()
			}
		}()
	}

	time.Sleep(time.Second)
	mu.Lock()
	fmt.Println("num:", num)
	mu.Unlock()

	fmt.Println("burst:", limiter.Burst())

	fmt.Println("canceling...")
	cancel()
	wg.Wait()
	fmt.Println("Done!")
}
Example #13
0
func (s *stresser) Start() {
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	// s.rateLimiter = rate.NewLimiter(rate.Limit(s.qps), s.qps)
	s.cancel = cancel
	s.canceled = false
	s.mu.Unlock()

	for i := 0; i < s.N; i++ {
		go s.run(ctx)
	}

	<-ctx.Done()
	fmt.Println("Start finished with", ctx.Err())
}
Example #14
0
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
	host, _, err := net.SplitHostPort(addr)
	if err != nil {
		return false
	}

	lock.RLock()
	bkt, ok := cache.Get(host)
	lock.RUnlock()
	if ok {
		bkt := bkt.(*rate.Limiter)
		if !bkt.Allow() {
			// Rate limit
			return true
		}
	} else {
		lock.Lock()
		cache.Add(host, rate.NewLimiter(rate.Every(intv), burst))
		lock.Unlock()
	}
	return false
}
Example #15
0
func (s *stresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	var stressEntries = []stressEntry{
		{weight: 0.7, f: newStressPut(kvc, s.KeySuffixRange, s.KeySize)},
		{weight: 0.07, f: newStressRange(kvc, s.KeySuffixRange)},
		{weight: 0.07, f: newStressRangePrefix(kvc, s.KeySuffixRange)},
		{weight: 0.07, f: newStressDelete(kvc, s.KeySuffixRange)},
		{weight: 0.07, f: newStressDeletePrefix(kvc, s.KeySuffixRange)},
	}
	s.stressTable = createStressTable(stressEntries)

	for i := 0; i < s.N; i++ {
		go s.run(ctx, kvc)
	}

	plog.Printf("stresser %q is started", s.Endpoint)
	return nil
}
Example #16
0
func (c *containerAdapter) pullImage(ctx context.Context) error {
	spec := c.container.spec()

	// if the image needs to be pulled, the auth config will be retrieved and updated
	var encodedAuthConfig string
	if spec.PullOptions != nil {
		encodedAuthConfig = spec.PullOptions.RegistryAuth
	}

	authConfig := &types.AuthConfig{}
	if encodedAuthConfig != "" {
		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
			logrus.Warnf("invalid authconfig: %v", err)
		}
	}

	pr, pw := io.Pipe()
	metaHeaders := map[string][]string{}
	go func() {
		err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw)
		pw.CloseWithError(err)
	}()

	dec := json.NewDecoder(pr)
	dec.UseNumber()
	m := map[string]interface{}{}
	spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)

	lastStatus := ""
	for {
		if err := dec.Decode(&m); err != nil {
			if err == io.EOF {
				break
			}
			return err
		}
		l := log.G(ctx)
		// limit pull progress logs unless the status changes
		if spamLimiter.Allow() || lastStatus != m["status"] {
			// if we have progress details, we have everything we need
			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
				// first, log the image and status
				l = l.WithFields(logrus.Fields{
					"image":  c.container.image(),
					"status": m["status"],
				})
				// then, if we have progress, log the progress
				if progress["current"] != nil && progress["total"] != nil {
					l = l.WithFields(logrus.Fields{
						"current": progress["current"],
						"total":   progress["total"],
					})
				}
			}
			l.Debug("pull in progress")
		}
		// sometimes, we get no useful information at all, and add no fields
		if status, ok := m["status"].(string); ok {
			lastStatus = status
		}
	}

	// if the final stream object contained an error, return it
	if errMsg, ok := m["error"]; ok {
		return fmt.Errorf("%v", errMsg)
	}
	return nil
}
Example #17
0
func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error {
	if err := r.checkClosed(); err != nil {
		return err
	}

	if err := r.waitReady(ctx); err != nil {
		return errors.Wrap(err, "container not ready for logs")
	}

	rc, err := r.adapter.logs(ctx, options)
	if err != nil {
		return errors.Wrap(err, "failed getting container logs")
	}
	defer rc.Close()

	var (
		// use a rate limiter to keep things under control but also provides some
		// ability coalesce messages.
		limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s
		msgctx  = api.LogContext{
			NodeID:    r.task.NodeID,
			ServiceID: r.task.ServiceID,
			TaskID:    r.task.ID,
		}
	)

	brd := bufio.NewReader(rc)
	for {
		// so, message header is 8 bytes, treat as uint64, pull stream off MSB
		var header uint64
		if err := binary.Read(brd, binary.BigEndian, &header); err != nil {
			if err == io.EOF {
				return nil
			}

			return errors.Wrap(err, "failed reading log header")
		}

		stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3))

		// limit here to decrease allocation back pressure.
		if err := limiter.WaitN(ctx, int(size)); err != nil {
			return errors.Wrap(err, "failed rate limiter")
		}

		buf := make([]byte, size)
		_, err := io.ReadFull(brd, buf)
		if err != nil {
			return errors.Wrap(err, "failed reading buffer")
		}

		// Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish
		parts := bytes.SplitN(buf, []byte(" "), 2)
		if len(parts) != 2 {
			return fmt.Errorf("invalid timestamp in log message: %v", buf)
		}

		ts, err := time.Parse(time.RFC3339Nano, string(parts[0]))
		if err != nil {
			return errors.Wrap(err, "failed to parse timestamp")
		}

		tsp, err := ptypes.TimestampProto(ts)
		if err != nil {
			return errors.Wrap(err, "failed to convert timestamp")
		}

		if err := publisher.Publish(ctx, api.LogMessage{
			Context:   msgctx,
			Timestamp: tsp,
			Stream:    api.LogStream(stream),

			Data: parts[1],
		}); err != nil {
			return errors.Wrap(err, "failed to publish log message")
		}
	}
}
	"mime/multipart"
	"net/http"
	"strings"
	"testing"
	"time"

	"golang.org/x/net/context"
	"golang.org/x/time/rate"

	"github.com/GoogleCloudPlatform/golang-samples/internal/aeintegrate"
	"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
)

// env:flex deployments are quite flaky when done in parallel.
// Offset each deployment by some amount of time.
var limit = rate.NewLimiter(rate.Every(15*time.Second), 1)

func TestHelloWorld(t *testing.T) {
	tc := testutil.EndToEndTest(t)
	limit.Wait(context.Background())

	helloworld := &aeintegrate.App{
		Name:      "hw",
		Dir:       tc.Path("appengine_flexible", "helloworld"),
		ProjectID: tc.ProjectID,
	}
	defer helloworld.Cleanup()

	bodyShouldContain(t, helloworld, "/", "Hello world!")
}
Example #19
0
// SetRateLimit sets the rate limit in seconds and burst size
func SetRateLimit(limit time.Duration, burst int) {
	logging.mu.Lock()
	logging.rateLimiter = rate.NewLimiter(rate.Every(limit), burst)
	logging.mu.Unlock()
}