Beispiel #1
0
func makeLoad(cfg *loadConfig) {
	client = fastclient.New(req, *t, *successStatusCode)
	startTime := time.Now()
	ctx, cancel := context.WithCancel(context.Background())
	throttle.SetLimit(cfg.qps)
	client.RunWorkers(cfg.c)
	go func() {
		stateTick := time.Tick(samplePeriod)
		timeout := time.After(*d)
		bar, progressTicker := acquireProgressBar(*d)
		for {
			select {
			case <-timeout:
				finishProgressBar(bar)
				printSummary("Loading test", startTime)
				throttle.Stop()
				cancel()
				return
			case <-progressTicker:
				bar.Increment()
			case <-stateTick:
				printState()
			}
		}
	}()
	load(ctx)
}
Beispiel #2
0
func burstThroughput(cfg *loadConfig) {
	client = fastclient.New(req, *t, *successStatusCode)
	startTime := time.Now()
	timeout := time.After(calibrateDuration)
	bar, progressTicker := acquireProgressBar(calibrateDuration)

	client.RunWorkers(*c)
	for {
		select {
		case <-timeout:
			finishProgressBar(bar)
			cfg.qps = float64(client.RequestSum()) / calibrateDuration.Seconds()
			cfg.c = client.Amount()
			if (client.Errors()/client.RequestSum())*100 > 2 { // just more than 2% of errors
				cfg.qps /= 2
				cfg.c /= 2
			}
			printSummary("Burst Throughput", startTime)
			return
		case <-progressTicker:
			bar.Increment()
		default:
			client.Jobsch <- struct{}{}
		}
	}
}
Beispiel #3
0
func calibrateThroughput(cfg *loadConfig) {
	client = fastclient.New(req, *t, *successStatusCode)
	t := time.Now()
	ctx, cancel := context.WithCancel(context.Background())

	throttle.SetLimit(cfg.qps)
	client.RunWorkers(cfg.c)
	go func() {
		timeout := time.After(adjustmentDuration)
		sampler := time.Tick(samplePeriod)
		bar, progressTicker := acquireProgressBar(adjustmentDuration)
		for {
			select {
			case <-timeout:
				finishProgressBar(bar)
				cfg.qps = throttle.Limit()
				cfg.c = client.Amount()
				printSummary("Adjustment test", t)
				cancel()
				return
			case <-progressTicker:
				bar.Increment()
			case <-sampler:
				printState()
				calibrate()
			}
		}
	}()

	load(ctx)
}