// Run a testcase. Settings are specified in Seconds! func (test *TestScenario) Run(name string, testcase func(*Meta, Settings), delay float64, runfor float64, rampup float64, users int, pacing float64, settings Settings) { test.wg.Add(1) // the "Scheduler" itself is a goroutine! go func(test *TestScenario) { // ramp up the users defer test.wg.Done() time.Sleep(time.Duration(delay * float64(time.Second))) userStart := time.Now() test.wg.Add(int(users)) for i := 0; i < users; i++ { // start user go func(nbr int) { defer test.wg.Done() time.Sleep(time.Duration(float64(nbr) * rampup * float64(time.Second))) for j := 0; time.Now().Sub(userStart) < time.Duration(runfor*float64(time.Second)); j++ { // next iteration start := time.Now() meta := &Meta{Testcase: name, Iteration: j, User: nbr} if test.status == Stopping { break } testcase(meta, settings) if test.status == Stopping { break } test.paceMaker(time.Duration(pacing*float64(time.Second)), time.Now().Sub(start)) } }(i) } }(test) }
// define testcases using teststeps func tc1(m *gogrinder.Meta, s gogrinder.Settings) { //fmt.Println(meta["Iteration"]) b := gg.NewBracket("01_01_teststep") time.Sleep(20 * time.Millisecond) b.End(m) thinktime(0.050) b = gg.NewBracket("01_02_teststep") time.Sleep(30 * time.Millisecond) b.End(m) }
func TestRunSequential(t *testing.T) { time.Freeze(time.Now()) defer time.Unfreeze() fake := NewTest() fake.config["Scenario"] = "scenario1" var counter int = 0 // assemble testcase tc1 := func(meta *Meta, s Settings) { // check meta if meta.Iteration != counter { t.Errorf("Iteration %d but expected %d!", meta.Iteration, counter) } if meta.User != 0 { t.Error("User meta not as expected!") } time.Sleep(20) counter++ } // run the testcase start := time.Now() fake.DoIterations(tc1, 20, 0, false) if time.Now().Sub(start) != 400 { t.Error("Testcase execution time not as expected!") } if counter != 20 { t.Error("Testcase iteration counter not as expected!") } }
// define testcases using teststeps func tc1(m *Meta, s Settings) { //fmt.Println(meta["Iteration"]) b := gg.NewBracket("01_01_teststep") time.Sleep(50 * time.Millisecond) b.End(m) thinktime(0.050) }
// Thinktime takes ThinkTimeFactor and ThinkTimeVariance into account. // tt is given in Seconds. So for example 3.0 equates to 3 seconds; 0.3 to 300ms. func (test *TestScenario) Thinktime(tt float64) { if test.status == Running { _, ttf, ttv, _ := test.GetScenarioConfig() r := (rand.Float64() * 2.0) - 1.0 // r in [-1.0 - 1.0) v := float64(tt) * ttf * ((r * ttv) + 1.0) * float64(time.Second) time.Sleep(time.Duration(v)) } }
// paceMaker is used internally. For testability it is not implemented as an internal function. // Parameter <pace> is given in nanoseconds. func (test *TestScenario) paceMaker(pacing time.Duration, elapsed time.Duration) { _, _, _, pv := test.GetScenarioConfig() const small = 2 * time.Second // calculate the variable pacing r := (rand.Float64() * 2.0) - 1.0 // r in [-1.0 - 1.0) v := float64(pacing) * ((r * pv) + 1.0) p := time.Duration(v - float64(elapsed)) if p < 0 { return } // split up in small intervals so we can stop out of this for ; p > small; p = p - small { if test.status != Running { break } time.Sleep(small) } // remaining sleep time if test.status == Running { time.Sleep(p) } }
func (s *Stats) post(w http.ResponseWriter, r *http.Request) { // curl --data "abcd" http://localhost:3001/post_stuff // check the content // and return 200 if it is ok time.Sleep(s.Sleep) data, err := ioutil.ReadAll(r.Body) if err != nil { http.Error(w, fmt.Sprintf("error:while reading post body."), 500) return } if len(data) < 2000 { http.Error(w, fmt.Sprintf("error:not enough data."), 400) return } s.lock.Lock() s.P++ s.lock.Unlock() }
func main() { // read argument from CLI usage := func() { fmt.Println("ab runs for given amount in time in seconds.") fmt.Println("after it ran for the given amaount of time it reports on the received requests.") fmt.Println("sampe use:") fmt.Println("$ airbiscuit 60") fmt.Println("GET count: 111") fmt.Println("POST count: 122") } if len(os.Args) != 2 { usage() return } wait, err := strconv.Atoi(os.Args[1]) if err != nil { usage() return } // assemble the server s := airbiscuit.NewStats(500 * time.Millisecond) r := airbiscuit.Router(s) srv := graceful.Server{ Timeout: 5 * time.Second, Server: &http.Server{ Handler: r, Addr: ":3001", }, } // stop server after wait time go func() { time.Sleep(time.Duration(wait * int(time.Second))) srv.Stop(100 * time.Millisecond) fmt.Printf("Get count: %d\n", s.G) fmt.Printf("Post count: %d\n", s.P) }() srv.ListenAndServe() }
// implement io.Reader interface func (s *SlowReader) Read(buf []byte) (n int, err error) { // Read small chunks at a time, even if caller asks for more. // 1500 is the MTU for Ethernet, i.e. a likely maximum packet size. // the idea is that this works in 50 ms ~ 1500 byte increments // error if SlowReader is exhausted if s.bytes <= 0 { return 0, io.EOF } toRead := min(len(buf), min(s.z, s.bytes)) s.bytes -= toRead for i := 0; i < toRead; i++ { buf[i] = letterBytes[s.last%52] s.last++ } nap := quickest(s.latency, s.t) s.latency -= nap time.Sleep(nap) return toRead, nil }
func TestIntegrationOfHttpPackage(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } // init gg.Testscenario("scenario1", endurance) // main part err := gg.ReadConfigValidate(airbiscuitLoadmodel, gogrinder.LoadmodelSchema) if err != nil { t.Fatalf("Error while reading loadmodel config: %s!", err.Error()) } // start the airbiscuit server s := &airbiscuit.Stats{Sleep: 50 * time.Millisecond} r := airbiscuit.Router(s) srv := graceful.Server{ Timeout: 50 * time.Millisecond, Server: &http.Server{ Handler: r, Addr: ":3001", }, } // stop server after wait time go func() { time.Sleep(time.Duration(1050 * int(time.Millisecond))) srv.Stop(80 * time.Millisecond) fmt.Printf("Get count: %d\n", s.G) fmt.Printf("Post count: %d\n", s.P) }() go srv.ListenAndServe() // run the test start := time.Now() gg.Exec() // exec the scenario that has been selected in the config file execution := time.Now().Sub(start) // verify total run time of the endurance scenario if execution > 1100*time.Millisecond { t.Errorf("Error: execution time of scenario1 not as expected: %v\n", execution) } results := gg.Results("") // check 01_01_teststep (get requests) if results[0].Teststep != "01_01_teststep" { t.Errorf("Teststep name not as expected: %s!", results[0].Teststep) } if results[0].Count < 170 { t.Errorf("Less than 170 get requests: %v!", results[0].Count) } if results[0].Avg < 50.0 && results[0].Avg > 62.0 { t.Errorf("Average not as expected: %f!", results[0].Avg) } if results[0].Min < 50.0 && results[0].Min > 62.0 { t.Errorf("Minimum not as expected: %f!", results[0].Min) } if results[0].Max < 50.0 && results[0].Max > 62.0 { t.Errorf("Maximum not as expected: %f!", results[0].Max) } // check 02_01_teststep (post requests) if results[1].Teststep != "02_01_teststep" { t.Errorf("Teststep name not as expected: %s!", results[1].Teststep) } if results[1].Count < 170 { t.Errorf("Less than 170 get requests: %v!", results[1].Count) } if results[1].Avg < 50.0 && results[1].Avg > 62.0 { t.Errorf("Average not as expected: %f!", results[1].Avg) } if results[1].Min < 50.0 && results[1].Min > 62.0 { t.Errorf("Minimum not as expected: %f!", results[1].Min) } if results[1].Max < 50.0 && results[1].Max > 62.0 { t.Errorf("Maximum not as expected: %f!", results[1].Max) } }
// This is the "standard" gogrinder behaviour. If you need a special configuration // or setup then maybe you should start with this code. func GoGrinder(test Scenario) error { var err error filename, noExec, noReport, noFrontend, noPrometheus, jtl, port, logLevel, err := GetCLI() if err != nil { return err } ll, _ := log.ParseLevel(logLevel) log.SetLevel(ll) err = test.ReadConfig(filename) if err != nil { return err } // prepare reporter plugins if jtl { // initialize the jtl reporter fj, err := os.OpenFile("results.jtl", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) if err != nil { log.Error("can not open jtl file: %v", err) // we do not need to stop in this case... } defer fj.Close() test.AddReportPlugin(&JtlReporter{fj}) } else { // initialize the event reporter fe, err := os.OpenFile("event-log.txt", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) if err != nil { log.Error("can not open event log file: %v", err) } defer fe.Close() test.AddReportPlugin(&EventReporter{fe}) } // result reporter exec := func() { err = test.Exec() if !noReport { test.Report(stdout) } } frontend := func() { srv := NewTestServer(test) srv.Addr = fmt.Sprintf(":%d", port) err = srv.ListenAndServe() } // prometheus reporter needs to "wrap" all test executions var srv *graceful.Server if !noPrometheus { srv = NewPrometheusReporterServer() srv.Addr = fmt.Sprintf(":%d", 9110) go srv.ListenAndServe() // if for example the port is in use we continue... } // handle the different run modes // invalid mode of noExec && noFrontend is handled in cli.go if noExec { frontend() } if noFrontend { exec() } if !noExec && !noFrontend { // this is the "normal" case - webserver is blocking go exec() frontend() } // run for another +2 * scrape_interval so we read all metrics in if !noPrometheus { time.Sleep(11 * time.Second) srv.Stop(1 * time.Second) } return err }
func tc3(m *gogrinder.Meta, s gogrinder.Settings) { b := gg.NewBracket("03_01_teststep") time.Sleep(150 * time.Millisecond) b.End(m) thinktime(0.150) }
func (fb testReader) Read(p []byte) (n int, err error) { time.Sleep(55 * time.Millisecond) sr := strings.NewReader("markfink") return sr.Read(p) }
func tc2(m *Meta, s Settings) { b := gg.NewBracket("02_01_teststep") time.Sleep(100 * time.Millisecond) b.End(m) thinktime(0.100) }
func TestRouteHandlerStatisticsWithQuery(t *testing.T) { // test with 3 measurements (two stats) srv := TestServer{} fake := NewTest() srv.test = fake done := srv.test.Collect() // this needs a collector to unblock update t1 := time.Now().UTC() srv.test.Update(&Meta{Teststep: "sth", Elapsed: Elapsed(8 * time.Millisecond), Timestamp: Timestamp(t1)}) time.Sleep(5 * time.Millisecond) t2 := t1.Add(2 * time.Millisecond) srv.test.Update(&Meta{Teststep: "else", Elapsed: Elapsed(10 * time.Millisecond), Timestamp: Timestamp(t1)}) srv.test.Update(&Meta{Teststep: "else", Elapsed: Elapsed(2 * time.Millisecond), Timestamp: Timestamp(t2)}) t3 := t2.Add(2 * time.Millisecond) close(fake.measurements) <-done { // startTest //iso8601 := "2006-01-02T15:04:05.999Z" ts2 := t2.Format(ISO8601) req, _ := http.NewRequest("GET", "/statistics?since="+ts2, nil) rsp := httptest.NewRecorder() srv.Router().ServeHTTP(rsp, req) if rsp.Code != http.StatusOK { t.Fatalf("Status code expected: %s but was: %v", "200", rsp.Code) } results := rsp.Body.String() if results != fmt.Sprintf(`{"results":[{"teststep":"else","avg_ms":6,"min_ms":2,`+ `"max_ms":10,"count":2,"error":0,"last":"%s"}],"running":false}`, t2.Format(ISO8601)) { t.Errorf("Results not as expected: %s!", results) } } { // update but no new data ts3 := t3.Format(ISO8601) req, _ := http.NewRequest("GET", "/statistics?since="+ts3, nil) rsp := httptest.NewRecorder() srv.Router().ServeHTTP(rsp, req) if rsp.Code != http.StatusOK { t.Fatalf("Status code expected: %s but was: %v", "200", rsp.Code) } results := rsp.Body.String() if results != `{"results":[],"running":false}` { t.Errorf("Results not as expected: %s!", results) } } { // get all rows req, _ := http.NewRequest("GET", "/statistics", nil) rsp := httptest.NewRecorder() srv.Router().ServeHTTP(rsp, req) if rsp.Code != http.StatusOK { t.Fatalf("Status code expected: %s but was: %v", "200", rsp.Code) } results := rsp.Body.String() if results != fmt.Sprintf(`{"results":[{"teststep":"else","avg_ms":6,"min_ms":2,"max_ms":10,`+ `"count":2,"error":0,"last":"%s"},{"teststep":"sth","avg_ms":8,"min_ms":8,"max_ms":8,`+ `"count":1,"error":0,"last":"%s"}],"running":false}`, t2.Format(ISO8601), t1.Format(ISO8601)) { t.Errorf("Results not as expected: %s!", results) } } }