Exemplo n.º 1
0
func NewServer(s *Store, c *conf.Config) (svr *Server, err error) {
	svr = &Server{
		store: s,
		conf:  c,
		rl:    rate.NewLimiter(rate.Limit(c.Limit.Read.Rate), c.Limit.Read.Brust),
		wl:    rate.NewLimiter(rate.Limit(c.Limit.Write.Rate), c.Limit.Write.Brust),
		dl:    rate.NewLimiter(rate.Limit(c.Limit.Delete.Rate), c.Limit.Delete.Brust),
	}
	if svr.statSvr, err = net.Listen("tcp", c.StatListen); err != nil {
		log.Errorf("net.Listen(%s) error(%v)", c.StatListen, err)
		return
	}
	if svr.apiSvr, err = net.Listen("tcp", c.ApiListen); err != nil {
		log.Errorf("net.Listen(%s) error(%v)", c.ApiListen, err)
		return
	}
	if svr.adminSvr, err = net.Listen("tcp", c.AdminListen); err != nil {
		log.Errorf("net.Listen(%s) error(%v)", c.AdminListen, err)
		return
	}
	go svr.startStat()
	go svr.startApi()
	go svr.startAdmin()
	if c.Pprof {
		go StartPprof(c.PprofListen)
	}
	return
}
Exemplo n.º 2
0
// NewBot initializes a number of things for proper operation. It will set appropriate flags
// for rlog and then creates a Nimbus config to pass to the internal nimbus IRC client. This
// client is embedded into an instance of Bot and returned. It has its fields initialized.
func NewBot(version string, rconf *Config) *Bot {
	rlog.SetFlags(rlog.Linfo | rlog.Lwarn | rlog.Lerror | rlog.Ldebug)
	rlog.SetLogFlags(0)

	nconf := GetNimbusConfig(rconf)

	bot := &Bot{
		/* Client     */ nimbus.NewClient(rconf.Server.Host, rconf.Server.Port,
			rconf.User.Nick, *nconf),
		/* Version    */ version,
		/* Modules    */ make(map[string]*Module),
		/* Channels   */ make(map[string]*Channel),
		/* ToJoinChs  */ make(map[string]string),
		/* Parser     */ parser.NewParser(rconf.Command.Prefix),
		/* Handler    */ NewHandler(),
		/* Inlim      */ rate.NewLimiter(3/5, 3),
		/* Outlim     */ rate.NewLimiter(rate.Every(time.Millisecond*750), 1),
		/* Config     */ rconf,
		/* ListenPort */ "0",
		/* Quit Chan  */ make(chan string),
		/* Mutex      */ sync.Mutex{},
	}

	return bot
}
Exemplo n.º 3
0
func newLimiter(cfg *config.Wrapper) *limiter {
	l := &limiter{
		write: rate.NewLimiter(rate.Inf, limiterBurstSize),
		read:  rate.NewLimiter(rate.Inf, limiterBurstSize),
	}
	cfg.Subscribe(l)
	prev := config.Configuration{Options: config.OptionsConfiguration{MaxRecvKbps: -1, MaxSendKbps: -1}}
	l.CommitConfiguration(prev, cfg.RawCopy())
	return l
}
Exemplo n.º 4
0
func (m *Manager) init() {
	m.mu.Lock()
	if m.certCache == nil {
		m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20)
		m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20)
		m.certCache = map[string]*cacheEntry{}
		m.certTokens = map[string]*tls.Certificate{}
		m.watchChan = make(chan struct{}, 1)
		m.watchChan <- struct{}{}
	}
	m.mu.Unlock()
}
Exemplo n.º 5
0
// NewVaultClient returns a Vault client from the given config. If the client
// couldn't be made an error is returned.
func NewVaultClient(c *config.VaultConfig, logger *log.Logger, purgeFn PurgeVaultAccessorFn) (*vaultClient, error) {
	if c == nil {
		return nil, fmt.Errorf("must pass valid VaultConfig")
	}

	if logger == nil {
		return nil, fmt.Errorf("must pass valid logger")
	}

	v := &vaultClient{
		config:   c,
		logger:   logger,
		limiter:  rate.NewLimiter(requestRateLimit, int(requestRateLimit)),
		revoking: make(map[*structs.VaultAccessor]time.Time),
		purgeFn:  purgeFn,
		tomb:     &tomb.Tomb{},
	}

	if v.config.Enabled {
		if err := v.buildClient(); err != nil {
			return nil, err
		}

		// Launch the required goroutines
		v.tomb.Go(wrapNilError(v.establishConnection))
		v.tomb.Go(wrapNilError(v.revokeDaemon))

		v.running = true
	}

	return v, nil
}
Exemplo n.º 6
0
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
	wp := &watchProxy{
		cw:           c.Watcher,
		ctx:          clientv3.WithRequireLeader(c.Ctx()),
		retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
		leaderc:      make(chan struct{}),
	}
	wp.ranges = newWatchRanges(wp)
	go func() {
		// a new streams without opening any watchers won't catch
		// a lost leader event, so have a special watch to monitor it
		rev := int64((uint64(1) << 63) - 2)
		for wp.ctx.Err() == nil {
			wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
			for range wch {
			}
			wp.mu.Lock()
			close(wp.leaderc)
			wp.leaderc = make(chan struct{})
			wp.mu.Unlock()
			wp.retryLimiter.Wait(wp.ctx)
		}
		wp.mu.Lock()
		<-wp.ctx.Done()
		wp.mu.Unlock()
		wp.wg.Wait()
		wp.ranges.stop()
	}()
	return wp
}
Exemplo n.º 7
0
// NewLimitedSampler returns a sampling policy that randomly samples a given
// fraction of requests.  It also enforces a limit on the number of traces per
// second.  It tries to trace every request with a trace header, but will not
// exceed the qps limit to do it.
func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) {
	if !(fraction >= 0) {
		return nil, fmt.Errorf("invalid fraction %f", fraction)
	}
	if !(maxqps >= 0) {
		return nil, fmt.Errorf("invalid maxqps %f", maxqps)
	}
	// Set a limit on the number of accumulated "tokens", to limit bursts of
	// traced requests.  Use one more than a second's worth of tokens, or 100,
	// whichever is smaller.
	// See https://godoc.org/golang.org/x/time/rate#NewLimiter.
	maxTokens := 100
	if maxqps < 99.0 {
		maxTokens = 1 + int(maxqps)
	}
	var seed int64
	if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil {
		seed = time.Now().UnixNano()
	}
	s := sampler{
		fraction: fraction,
		Limiter:  rate.NewLimiter(rate.Limit(maxqps), maxTokens),
		Rand:     rand.New(rand.NewSource(seed)),
	}
	return &s, nil
}
Exemplo n.º 8
0
func Test_Receiver_flushDs(t *testing.T) {
	// So we need to test that this calls queueblocking...
	r := &Receiver{flusherChs: make([]chan *dsFlushRequest, 1), flushLimiter: rate.NewLimiter(10, 10)}
	r.flusherChs[0] = make(chan *dsFlushRequest)
	called := 0
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			if _, ok := <-r.flusherChs[0]; !ok {
				break
			}
			called++
		}
	}()
	ds := rrd.NewDataSource(0, "", 0, 0, time.Time{}, 0)
	rra, _ := rrd.NewRoundRobinArchive(0, 0, "WMEAN", time.Second, 10, 10, 0, time.Time{})
	ds.SetRRAs([]*rrd.RoundRobinArchive{rra})
	ds.ProcessIncomingDataPoint(10, time.Unix(100, 0))
	ds.ProcessIncomingDataPoint(10, time.Unix(101, 0))
	rds := &receiverDs{DataSource: ds}
	r.SetMaxFlushRate(1)
	r.flushDs(rds, false)
	r.flushDs(rds, false)
	close(r.flusherChs[0])
	wg.Wait()
	if called != 1 {
		t.Errorf("flushDs call count not 1: %d", called)
	}
	if ds.PointCount() != 0 {
		t.Errorf("ClearRRAs was not called by flushDs")
	}
}
Exemplo n.º 9
0
func (s *stresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go s.run(ctx, kvc)
	}

	plog.Printf("stresser %q is started", s.Endpoint)
	return nil
}
Exemplo n.º 10
0
// connMonitor monitors the connection and handles retries
func (c *Client) connMonitor() {
	var err error

	defer func() {
		_, err = c.retryConnection(c.ctx.Err())
		c.mu.Lock()
		c.lastConnErr = err
		close(c.newconnc)
		c.mu.Unlock()
	}()

	limiter := rate.NewLimiter(rate.Every(minConnRetryWait), 1)
	for limiter.Wait(c.ctx) == nil {
		select {
		case err = <-c.reconnc:
		case <-c.ctx.Done():
			return
		}
		conn, connErr := c.retryConnection(err)
		c.mu.Lock()
		c.lastConnErr = connErr
		c.conn = conn
		close(c.newconnc)
		c.newconnc = make(chan struct{})
		c.reconnc = make(chan error, 1)
		c.mu.Unlock()
	}
}
Exemplo n.º 11
0
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
	l.Lock()
	defer l.Unlock()
	if _, found := l.tokenBuckets[key]; !found {
		l.tokenBuckets[key] = rate.NewLimiter(rate.Every(l.TTL), int(l.Max))
	}

	return !l.tokenBuckets[key].AllowN(time.Now(), 1)
}
Exemplo n.º 12
0
// NewProgressReader creates a new ProgressReader.
func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader {
	return &Reader{
		in:          in,
		out:         out,
		size:        size,
		id:          id,
		action:      action,
		rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),
	}
}
Exemplo n.º 13
0
func TestRateLimiting(t *testing.T) {
	inputSize := 100
	input := make([]byte, 0, inputSize*2)
	for i := 0; i < inputSize; i++ {
		inputLine := []byte{byte((i % 26) + 65), newLine}
		input = append(input, inputLine...)
	}
	fmt.Printf("input: %d", len(input))

	bridgeCapacity := 6
	reader := bytes.NewReader(input)

	lineLimit := 3
	metReg := metrics.NewRegistry()
	lb := NewLogBridge(reader,
		ioutil.Discard,
		ioutil.Discard,
		logging.DefaultLogger,
		lineLimit,
		1024,
		metReg,
		"log_lines",
		"log_bytes",
		"dropped_lines",
		"time_spent_throttled_ms")
	// We're testing these, so we finely control their parameters
	lb.logLineRateLimit = rate.NewLimiter(rate.Limit(inputSize), inputSize)
	lb.logByteRateLimit = rate.NewLimiter(rate.Limit(1024), 1024)
	lb.LossyCopy(reader, bridgeCapacity)

	loggedLines := lb.logLinesCount.Count()
	droppedLines := lb.droppedLineCount.Count()
	if loggedLines == 0 {
		t.Errorf("Expected some logs to get through.")
	}
	if loggedLines == int64(inputSize) {
		t.Errorf("Expected some lines to get dropped")
	}
	if droppedLines == 0 {
		t.Errorf("Expected dropped lines to be non-zero")
	}
}
Exemplo n.º 14
0
func main() {
	st := time.Now()
	i := 0
	limiter := rate.NewLimiter(rate.Every(time.Second), 100)
	ctx, cancel := context.WithTimeout(context.TODO(), 2*time.Second)
	for limiter.Wait(ctx) == nil {
		i++
	}
	cancel()
	fmt.Println(i, "DONE. Took", time.Since(st))
	// 101 DONE. Took 1.00013873s
}
Exemplo n.º 15
0
func warningFor(dev protocol.DeviceID, msg string) {
	warningLimitersMut.Lock()
	defer warningLimitersMut.Unlock()
	lim, ok := warningLimiters[dev]
	if !ok {
		lim = rate.NewLimiter(rate.Every(perDeviceWarningIntv), 1)
		warningLimiters[dev] = lim
	}
	if lim.Allow() {
		l.Warnln(msg)
	}
}
Exemplo n.º 16
0
func (cs *Server) serverInit() error {
	nonce, err := genNonce()
	if err != nil {
		return fmt.Errorf("error generating key for hmac: %v")
	}
	cs.keyHMAC = []byte(nonce)
	cs.nonceUsed = make(map[string]bool)
	cs.limiter = rate.NewLimiter(queriesRate, 1)
	cs.whiteList = make(map[string]struct{})
	cs.keyIDSeen = make(map[string]time.Time)
	cs.IPSeen = make(map[string]time.Time)
	return nil
}
Exemplo n.º 17
0
func (c *containerAdapter) pullImage(ctx context.Context) error {
	rc, err := c.client.ImagePull(ctx, c.container.image(), c.container.imagePullOptions())
	if err != nil {
		return err
	}

	dec := json.NewDecoder(rc)
	dec.UseNumber()
	m := map[string]interface{}{}
	spamLimiter := rate.NewLimiter(rate.Every(1000*time.Millisecond), 1)

	lastStatus := ""
	for {
		if err := dec.Decode(&m); err != nil {
			if err == io.EOF {
				break
			}
			return err
		}
		l := log.G(ctx)
		// limit pull progress logs unless the status changes
		if spamLimiter.Allow() || lastStatus != m["status"] {
			// if we have progress details, we have everything we need
			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
				// first, log the image and status
				l = l.WithFields(logrus.Fields{
					"image":  c.container.image(),
					"status": m["status"],
				})
				// then, if we have progress, log the progress
				if progress["current"] != nil && progress["total"] != nil {
					l = l.WithFields(logrus.Fields{
						"current": progress["current"],
						"total":   progress["total"],
					})
				}
			}
			l.Debug("pull in progress")
		}
		// sometimes, we get no useful information at all, and add no fields
		if status, ok := m["status"].(string); ok {
			lastStatus = status
		}
	}
	// if the final stream object contained an error, return it
	if errMsg, ok := m["error"]; ok {
		return errors.Errorf("%v", errMsg)
	}
	return nil
}
Exemplo n.º 18
0
func (s *querysrv) limit(remote net.IP) bool {
	key := remote.String()

	bkt, ok := s.limiter.Get(key)
	if ok {
		bkt := bkt.(*rate.Limiter)
		if !bkt.Allow() {
			// Rate limit exceeded; ignore packet
			return true
		}
	} else {
		// limitAvg is in packets per ten seconds.
		s.limiter.Add(key, rate.NewLimiter(rate.Limit(limitAvg)/10, limitBurst))
	}

	return false
}
Exemplo n.º 19
0
func Main() int {
	flag.Parse()
	if msgsPerSecond <= 0 {
		fmt.Printf("Messages per second cannot be <= 0")
		return 1
	}

	conn, err := amqp.Dial(amqpURLString)
	if err != nil {
		fmt.Printf("dial: %v\n", err)
		return 1
	}
	closed := make(chan *amqp.Error)
	conn.NotifyClose(closed)

	ch, err := conn.Channel()
	if err != nil {
		fmt.Printf("channel: %v\n", err)
		return 1
	}

	stresser := &stress.Stresser{
		Limit: rate.NewLimiter(rate.Limit(msgsPerSecond), msgsPerSecond),
		New: func() stress.Worker {
			return &AMQPPublisherWorker{
				Body:     body,
				Channel:  ch,
				Exchange: exchange,
			}
		},
	}
	go stresser.Start()
	defer stresser.Stop()

	interrupt := make(chan os.Signal)
	signal.Notify(interrupt, os.Kill, os.Interrupt)

	select {
	case <-interrupt:
		return 0
	case err := <-closed:
		fmt.Printf("amqp: ", err)
		return 1
	}

}
Exemplo n.º 20
0
func main() {
	var (
		num int
		mu  sync.Mutex

		qps = 10
		wg  sync.WaitGroup
		N   = 10000
	)

	wg.Add(N)

	limiter := rate.NewLimiter(rate.Every(time.Second), qps)

	for i := 0; i < N; i++ {
		go func(i int) {
			defer wg.Done()
			for limiter.Wait(context.TODO()) == nil {
				mu.Lock()
				num++
				mu.Unlock()
			}
		}(i)
	}

	time.Sleep(time.Second)
	mu.Lock()
	fmt.Println("num:", num)
	mu.Unlock()

	fmt.Println("burst:", limiter.Burst())

	fmt.Println("blocking...")
	donec := make(chan struct{})
	go func() {
		wg.Wait()
		close(donec)
	}()
	select {
	case <-donec:
		fmt.Println("Done!")
	case <-time.After(time.Second):
		fmt.Println("Timed out!")
	}
}
Exemplo n.º 21
0
func (s *stresser) start() {
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Limit(s.qps), s.qps)
	s.cancel = cancel
	s.mu.Unlock()

	for i := 0; i < s.N; i++ {
		go s.run(ctx)
	}

	<-ctx.Done()
}
Exemplo n.º 22
0
func main() {
	var (
		num int
		mu  sync.Mutex

		qps = 10
		wg  sync.WaitGroup
		N   = 10000
	)

	wg.Add(N)

	ctx, cancel := context.WithCancel(context.Background())

	limiter := rate.NewLimiter(rate.Every(time.Second), qps)

	for i := 0; i < N; i++ {
		go func() {
			defer wg.Done()

			for {
				if err := limiter.Wait(ctx); err == context.Canceled {
					return
				}

				mu.Lock()
				num++
				mu.Unlock()
			}
		}()
	}

	time.Sleep(time.Second)
	mu.Lock()
	fmt.Println("num:", num)
	mu.Unlock()

	fmt.Println("burst:", limiter.Burst())

	fmt.Println("canceling...")
	cancel()
	wg.Wait()
	fmt.Println("Done!")
}
Exemplo n.º 23
0
func TestLowerBound(t *testing.T) {
	worker := &workTracker{}
	stresser := &stress.Stresser{
		New: func() stress.Worker {
			return worker
		},
		Limit: rate.NewLimiter(20, 20),
	}

	go stresser.Start()
	defer stresser.Stop()

	timer := time.NewTimer(time.Second)
	<-timer.C

	if worker.numCalls < 10 {
		t.Errorf("incorrect number of calls, expected at least %d, got %d", 10, worker.numCalls)
	}
}
Exemplo n.º 24
0
func TestAllocatesWorkersFromPool(t *testing.T) {
	called := false
	stresser := &stress.Stresser{
		New: func() stress.Worker {
			called = true
			return emptyWorker{}
		},
		Limit: rate.NewLimiter(10, 10),
	}

	go stresser.Start()
	defer stresser.Stop()

	timer := time.NewTimer(time.Second)
	<-timer.C

	if called == false {
		t.Error("expected true, got false")
	}
}
Exemplo n.º 25
0
func (s *stresser) Start() {
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	// s.rateLimiter = rate.NewLimiter(rate.Limit(s.qps), s.qps)
	s.cancel = cancel
	s.canceled = false
	s.mu.Unlock()

	for i := 0; i < s.N; i++ {
		go s.run(ctx)
	}

	<-ctx.Done()
	fmt.Println("Start finished with", ctx.Err())
}
Exemplo n.º 26
0
func newStressBuilder(s string, sc *stressConfig) stressBuilder {
	switch s {
	case "nop":
		return func(*member) Stresser {
			return &nopStresser{
				start: time.Now(),
				qps:   sc.qps,
			}
		}
	case "default":
		// TODO: Too intensive stressers can panic etcd member with
		// 'out of memory' error. Put rate limits in server side.
		stressN := 100
		l := rate.NewLimiter(rate.Limit(sc.qps), sc.qps)

		return func(m *member) Stresser {
			if sc.v2 {
				return &stresserV2{
					Endpoint:       m.ClientURL,
					keySize:        sc.keySize,
					keySuffixRange: sc.keySuffixRange,
					N:              stressN,
				}
			} else {
				return &stresser{
					Endpoint:       m.grpcAddr(),
					keyLargeSize:   sc.keyLargeSize,
					keySize:        sc.keySize,
					keySuffixRange: sc.keySuffixRange,
					N:              stressN,
					rateLimiter:    l,
				}
			}
		}
	default:
		plog.Panicf("unknown stresser type: %s\n", s)
	}

	return nil // never reach here
}
Exemplo n.º 27
0
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
	host, _, err := net.SplitHostPort(addr)
	if err != nil {
		return false
	}

	lock.RLock()
	bkt, ok := cache.Get(host)
	lock.RUnlock()
	if ok {
		bkt := bkt.(*rate.Limiter)
		if !bkt.Allow() {
			// Rate limit
			return true
		}
	} else {
		lock.Lock()
		cache.Add(host, rate.NewLimiter(rate.Every(intv), burst))
		lock.Unlock()
	}
	return false
}
Exemplo n.º 28
0
// Reserve returns how long the crawler should wait before crawling this
// URL.
func (l *Limiter) Reserve(u *url.URL) time.Duration {
	l.mu.Lock()
	defer l.mu.Unlock()

	h := u.Host
	v, ok := l.host[h]
	if !ok {
		d, burst := l.query(h)
		v = &entry{
			limiter: rate.NewLimiter(rate.Every(d), burst),
		}
		l.host[h] = v
	} else if l.updatable && v.count >= l.freq {
		d, _ := l.query(h)
		v.limiter.SetLimit(rate.Every(d))
		v.count = 0
	}
	if l.updatable {
		v.count++
	}
	return v.limiter.Reserve().Delay()
}
Exemplo n.º 29
0
func TestRampUp(t *testing.T) {
	log.Println("Start Ramp Up")
	calls := 0
	stresser := &stress.Stresser{
		New: func() stress.Worker {
			calls++
			return &slowWorker{
				delay: time.Second / 2,
			}
		},
		Limit: rate.NewLimiter(10, 10),
	}

	go stresser.Start()
	defer stresser.Stop()

	timer := time.NewTimer(5 * time.Second)
	<-timer.C

	if calls == 1 {
		t.Errorf("incorrect number of workers created, expected at least more than %d, got %d", 1, calls)
	}
}
Exemplo n.º 30
0
func watchLatencyFunc(cmd *cobra.Command, args []string) {
	key := string(mustRandBytes(watchLKeySize))
	value := string(mustRandBytes(watchLValueSize))

	client := mustCreateConn()
	stream := v3.NewWatcher(client)
	wch := stream.Watch(context.TODO(), key)

	bar = pb.New(watchLTotal)
	bar.Format("Bom !")
	bar.Start()

	limiter := rate.NewLimiter(rate.Limit(watchLPutRate), watchLPutRate)
	r := newReport()
	rc := r.Run()

	for i := 0; i < watchLTotal; i++ {
		// limit key put as per reqRate
		if err := limiter.Wait(context.TODO()); err != nil {
			break
		}
		_, err := client.Put(context.TODO(), string(key), value)

		if err != nil {
			fmt.Fprintf(os.Stderr, "Failed to Put for watch latency benchmark: %v\n", err)
			os.Exit(1)
		}
		st := time.Now()
		<-wch
		r.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
		bar.Increment()
	}

	close(r.Results())
	bar.Finish()
	fmt.Printf("%s", <-rc)
}