// function to process the incoming measurements and update the stats // this is also the default-reporter. All other reporters are in reporter.go func (test *TestStatistics) default_reporter(m Metric) { teststep := m.GetTeststep() elapsed := time.Duration(m.GetElapsed()) timestamp := time.Time(m.GetTimestamp()) err_count := int64(0) if len(m.GetError()) > 0 { err_count = 1 } test.lock.RLock() val, exists := test.stats[teststep] test.lock.RUnlock() if exists { val.avg = (time.Duration(val.count)*val.avg + elapsed) / time.Duration(val.count+1) if elapsed > val.max { val.max = elapsed } if elapsed < val.min { val.min = elapsed } val.last = timestamp val.count++ val.error += err_count test.lock.Lock() test.stats[teststep] = val test.lock.Unlock() } else { // create a new statistic for t test.lock.Lock() test.stats[teststep] = stats_value{elapsed, elapsed, elapsed, 1, err_count, timestamp} test.lock.Unlock() } }
// Run a testcase. Settings are specified in Seconds! func (test *TestScenario) Run(name string, testcase func(*Meta, Settings), delay float64, runfor float64, rampup float64, users int, pacing float64, settings Settings) { test.wg.Add(1) // the "Scheduler" itself is a goroutine! go func(test *TestScenario) { // ramp up the users defer test.wg.Done() time.Sleep(time.Duration(delay * float64(time.Second))) userStart := time.Now() test.wg.Add(int(users)) for i := 0; i < users; i++ { // start user go func(nbr int) { defer test.wg.Done() time.Sleep(time.Duration(float64(nbr) * rampup * float64(time.Second))) for j := 0; time.Now().Sub(userStart) < time.Duration(runfor*float64(time.Second)); j++ { // next iteration start := time.Now() meta := &Meta{Testcase: name, Iteration: j, User: nbr} if test.status == Stopping { break } testcase(meta, settings) if test.status == Stopping { break } test.paceMaker(time.Duration(pacing*float64(time.Second)), time.Now().Sub(start)) } }(i) } }(test) }
func TestPaceMakerStops(t *testing.T) { time.Freeze(time.Now()) defer time.Unfreeze() // create a fake loadmodel for testing var fake = NewTest() fake.status = Stopping fake.config["Scenario"] = "scenario1" start := time.Now() fake.paceMaker(time.Duration(10*time.Second), time.Duration(0)) sleep := float64(time.Now().Sub(start)) / float64(time.Millisecond) if sleep != 0 { t.Errorf("PaceMaker did not stop! It sleept: %v\n", sleep) } }
func (test *TestScenario) DoIterations(testcase func(*Meta, Settings), iterations int, pacing float64, parallel bool) { f := func(test *TestScenario) { settings := test.GetSettings() defer test.wg.Done() for i := 0; i < iterations; i++ { start := time.Now() meta := &Meta{Iteration: i, User: 0} if test.status == Stopping { break } testcase(meta, settings) if test.status == Stopping { break } test.paceMaker(time.Duration(pacing*float64(time.Second)), time.Now().Sub(start)) } } if parallel { test.wg.Add(1) go f(test) } else { test.wg.Wait() // sequential processing: wait for running goroutines to finish test.wg.Add(1) f(test) } }
// Thinktime takes ThinkTimeFactor and ThinkTimeVariance into account. // tt is given in Seconds. So for example 3.0 equates to 3 seconds; 0.3 to 300ms. func (test *TestScenario) Thinktime(tt float64) { if test.status == Running { _, ttf, ttv, _ := test.GetScenarioConfig() r := (rand.Float64() * 2.0) - 1.0 // r in [-1.0 - 1.0) v := float64(tt) * ttf * ((r * ttv) + 1.0) * float64(time.Second) time.Sleep(time.Duration(v)) } }
func TestPaceMakerVariance(t *testing.T) { // create a fake loadmodel for testing var fake = NewTest() fake.status = Running fake.config["Scenario"] = "scenario1" fake.config["ThinkTimeFactor"] = 2.0 fake.config["ThinkTimeVariance"] = 0.1 fake.config["PacingVariance"] = 0.1 min, max, avg := 1000.0, 1000.0, 0.0 time.Freeze(time.Now()) defer time.Unfreeze() for i := 0; i < 1000; i++ { start := time.Now() fake.paceMaker(time.Duration(1*time.Second), time.Duration(0)) sleep := float64(time.Now().Sub(start)) / float64(time.Millisecond) if sleep < min { min = sleep } if max < sleep { max = sleep } avg += sleep } avg = avg / 1000 if min < 900.0 { t.Errorf("Minimum pace time %f out of defined range!\n", min) } if max >= 1100.0 { t.Errorf("Maximum pace time %f out of defined range!", max) } if avg < 990.0 || avg > 1010.0 { t.Fatalf("Average pace time %f out of defined range!", avg) } }
// simulating that we read from a network func NewSlowReader(size int, latency time.Duration) (s *SlowReader) { t := 50 * time.Millisecond z := 1500 // find the right increments n := int(math.Ceil(float64(latency) / float64(50*time.Millisecond))) if (size / n) < z { z = size / n if latency < t { t = latency } } else { //n = size / z t = time.Duration(float64(latency) / float64(size) * float64(z)) } s = &SlowReader{size, latency, t, z, 0} return s }
func main() { // read argument from CLI usage := func() { fmt.Println("ab runs for given amount in time in seconds.") fmt.Println("after it ran for the given amaount of time it reports on the received requests.") fmt.Println("sampe use:") fmt.Println("$ airbiscuit 60") fmt.Println("GET count: 111") fmt.Println("POST count: 122") } if len(os.Args) != 2 { usage() return } wait, err := strconv.Atoi(os.Args[1]) if err != nil { usage() return } // assemble the server s := airbiscuit.NewStats(500 * time.Millisecond) r := airbiscuit.Router(s) srv := graceful.Server{ Timeout: 5 * time.Second, Server: &http.Server{ Handler: r, Addr: ":3001", }, } // stop server after wait time go func() { time.Sleep(time.Duration(wait * int(time.Second))) srv.Stop(100 * time.Millisecond) fmt.Printf("Get count: %d\n", s.G) fmt.Printf("Post count: %d\n", s.P) }() srv.ListenAndServe() }
// paceMaker is used internally. For testability it is not implemented as an internal function. // Parameter <pace> is given in nanoseconds. func (test *TestScenario) paceMaker(pacing time.Duration, elapsed time.Duration) { _, _, _, pv := test.GetScenarioConfig() const small = 2 * time.Second // calculate the variable pacing r := (rand.Float64() * 2.0) - 1.0 // r in [-1.0 - 1.0) v := float64(pacing) * ((r * pv) + 1.0) p := time.Duration(v - float64(elapsed)) if p < 0 { return } // split up in small intervals so we can stop out of this for ; p > small; p = p - small { if test.status != Running { break } time.Sleep(small) } // remaining sleep time if test.status == Running { time.Sleep(p) } }
func TestIntegrationOfHttpPackage(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } // init gg.Testscenario("scenario1", endurance) // main part err := gg.ReadConfigValidate(airbiscuitLoadmodel, gogrinder.LoadmodelSchema) if err != nil { t.Fatalf("Error while reading loadmodel config: %s!", err.Error()) } // start the airbiscuit server s := &airbiscuit.Stats{Sleep: 50 * time.Millisecond} r := airbiscuit.Router(s) srv := graceful.Server{ Timeout: 50 * time.Millisecond, Server: &http.Server{ Handler: r, Addr: ":3001", }, } // stop server after wait time go func() { time.Sleep(time.Duration(1050 * int(time.Millisecond))) srv.Stop(80 * time.Millisecond) fmt.Printf("Get count: %d\n", s.G) fmt.Printf("Post count: %d\n", s.P) }() go srv.ListenAndServe() // run the test start := time.Now() gg.Exec() // exec the scenario that has been selected in the config file execution := time.Now().Sub(start) // verify total run time of the endurance scenario if execution > 1100*time.Millisecond { t.Errorf("Error: execution time of scenario1 not as expected: %v\n", execution) } results := gg.Results("") // check 01_01_teststep (get requests) if results[0].Teststep != "01_01_teststep" { t.Errorf("Teststep name not as expected: %s!", results[0].Teststep) } if results[0].Count < 170 { t.Errorf("Less than 170 get requests: %v!", results[0].Count) } if results[0].Avg < 50.0 && results[0].Avg > 62.0 { t.Errorf("Average not as expected: %f!", results[0].Avg) } if results[0].Min < 50.0 && results[0].Min > 62.0 { t.Errorf("Minimum not as expected: %f!", results[0].Min) } if results[0].Max < 50.0 && results[0].Max > 62.0 { t.Errorf("Maximum not as expected: %f!", results[0].Max) } // check 02_01_teststep (post requests) if results[1].Teststep != "02_01_teststep" { t.Errorf("Teststep name not as expected: %s!", results[1].Teststep) } if results[1].Count < 170 { t.Errorf("Less than 170 get requests: %v!", results[1].Count) } if results[1].Avg < 50.0 && results[1].Avg > 62.0 { t.Errorf("Average not as expected: %f!", results[1].Avg) } if results[1].Min < 50.0 && results[1].Min > 62.0 { t.Errorf("Minimum not as expected: %f!", results[1].Min) } if results[1].Max < 50.0 && results[1].Max > 62.0 { t.Errorf("Maximum not as expected: %f!", results[1].Max) } }