func benchmarkPub(seq int) { cf := api.DefaultConfig(appId, secret) cf.Pub.Endpoint = endpoint cf.Debug = debug client := api.NewClient(cf) var opt api.PubOption opt.Topic = topic opt.Ver = ver opt.Async = true msg, err := ioutil.ReadFile(msgfile) if err != nil { panic(err) } for i := 0; i < limit; i++ { err := client.Pub("", msg, opt) if err != nil { stress.IncCounter("fail", 1) log.Println(err) } else { stress.IncCounter("ok", 1) } } }
func pubKafkaLoop(seq int) { cf := sarama.NewConfig() cf.Producer.RequiredAcks = sarama.WaitForLocal cf.Producer.Partitioner = sarama.NewHashPartitioner cf.Producer.Timeout = time.Second //cf.Producer.Compression = sarama.CompressionSnappy cf.Producer.Retry.Max = 3 producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, cf) if err != nil { stress.IncCounter("fail", 1) log.Println(err) return } defer producer.Close() msg := strings.Repeat("X", sz) for i := 0; i < loops; i++ { _, _, err := producer.SendMessage(&sarama.ProducerMessage{ Topic: topic, Value: sarama.StringEncoder(msg), }) if err == nil { stress.IncCounter("ok", 1) } else { stress.IncCounter("fail", 1) } } }
func disqueLoop(seq int) { pool := disque.NewPool(disque.DialFunc(dial), "localhost:7711") c, err := pool.Get() if err != nil { stress.IncCounter("fail", 1) log.Println(err) return } defer c.Close() for i := 0; i < loops; i++ { _, err = c.Add(disque.AddRequest{ Job: disque.Job{ Queue: "demo", Data: []byte(fmt.Sprintf("hello world from %d:%d", seq, i)), }, Delay: time.Minute, Timeout: time.Millisecond * 100, }) if err == nil { stress.IncCounter("ok", 1) } else { if !suppressError { log.Println(err) } stress.IncCounter("fail", 1) } } }
func pubGatewayLoop(seq int) { httpClient := createHttpClient() url := fmt.Sprintf("%s/topics/%s/%s?", addr, topic, ver) if async { url += "async=1" } for n := 0; n < loops; n++ { req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(strings.Repeat("X", sz)))) if err != nil { log.Fatalf("Error Occured. %+v", err) stress.IncCounter("fail", 1) return } req.Header.Set("Appid", appid) req.Header.Set("Pubkey", pubkey) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // use httpClient to send request response, err := httpClient.Do(req) if err != nil && response == nil { stress.IncCounter("fail", 1) if !suppressError { log.Printf("Error sending request to API endpoint. %+v", err) } } else { if response.StatusCode != http.StatusOK { stress.IncCounter("fail", 1) if !suppressError { log.Printf("Error sending request to API endpoint. %+v", response.Status) } return } // Let's check if the work actually is done // We have seen inconsistencies even when we get 200 OK response body, err := ioutil.ReadAll(response.Body) if err != nil { log.Fatalf("Couldn't parse response body. %+v", err) } // Close the connection to reuse it response.Body.Close() stress.IncCounter("ok", 1) if false { log.Println("Response Body:", string(body)) } } if sleep > 0 { time.Sleep(sleep) } } }
func benchmarkSub(seq int) { cf := api.DefaultConfig(appId, secret) cf.Debug = false cf.Sub.Endpoint = endpoint client := api.NewClient(cf) opt := api.SubOption{ AppId: subAppid, Topic: topic, Ver: ver, Batch: batch, Wait: wait, Group: group, } var i int err := client.SubX(opt, func(statusCode int, msg []byte, r *api.SubXResult) error { if debug { if statusCode == 200 && batch > 1 { msgs := gateway.DecodeMessageSet(msg) for idx, m := range msgs { log.Printf("%d P:%d O:%d V:%s", idx, m.Partition, m.Offset, string(m.Value)) } } else { log.Println(statusCode, string(msg)) } } if statusCode == 200 { stress.IncCounter("ok", int64(batch)) } else { if debug { log.Println(string(msg)) } stress.IncCounter("fail", 1) } i++ if i > limit { return api.ErrSubStop } if sleep > 0 { time.Sleep(sleep) } return nil }) if err != nil { fmt.Println(err) } }
func getHttpLoop(seq int) { client := createHttpClient() req, _ := http.NewRequest("GET", "http://localhost:9090/", nil) for i := 0; i < loops; i++ { response, err := client.Do(req) if err == nil { ioutil.ReadAll(response.Body) response.Body.Close() // reuse the connection stress.IncCounter("ok", 1) } else { stress.IncCounter("fail", 1) //log.Println(err) } } }
func (this *Kateway) benchPub(seq int) { cf := api.DefaultConfig(this.benchApp, this.benchSecret) cf.Pub.Endpoint = this.benchPubEndpoint cli := api.NewClient(cf) for i := 0; i < 10000; i++ { pubMsg := fmt.Sprintf("gk kateway -bench generated by %s %d.%d", this.benchId, seq, i) if err := cli.Pub("", []byte(pubMsg), api.PubOption{ Topic: this.benchTopic, Ver: this.benchVer, Async: this.benchmarkAsync, }); err != nil { log.Printf("%s/%d/%d %s", this.benchId, seq, i, err) stress.IncCounter("fail", 1) } else { stress.IncCounter("ok", 1) } } }
func redisLoop(seq int) { conn, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) if err != nil { stress.IncCounter("fail", 1) log.Println(err) return } defer conn.Close() msg := strings.Repeat("X", sz) for i := 0; i < loops; i++ { _, err := conn.Do("SET", "key", msg) if err == nil { stress.IncCounter("ok", 1) } else { if !suppressError { log.Println(err) } stress.IncCounter("fail", 1) } } }
func (this *Produce) benchmarkProducer(seq int) { cf := sarama.NewConfig() cf.Producer.RequiredAcks = sarama.WaitForLocal if this.ackAll { cf.Producer.RequiredAcks = sarama.WaitForAll } p, err := sarama.NewSyncProducer(this.zkcluster.BrokerList(), cf) swallow(err) defer p.Close() msg := []byte(strings.Repeat("X", 1<<10)) for i := 0; i < 50000; i++ { _, _, err = p.SendMessage(&sarama.ProducerMessage{ Topic: this.topic, Value: sarama.ByteEncoder(msg), }) if err != nil { stress.IncCounter("fail", 1) } else { stress.IncCounter("ok", 1) } } }
func recvMsg() { stress.IncCounter("recv", 1) }
func sentMsg() { stress.IncCounter("sent", 1) }