func cmdBenchmarkSet() error { if len(globalBrokerList) == 0 { return errors.NotValidf("broker list") } if len(globalTopic) == 0 { return errors.NotValidf("Topic") } sendString := utils.GenTestMessage(globalMsgLength) producerConfig := siesta_producer.NewProducerConfig() producerConfig.Linger = time.Millisecond connConfig := siesta.NewConnectorConfig() brokerList := strings.Split(globalBrokerList, ",") producerConfig.BrokerList = brokerList connConfig.BrokerList = brokerList log.Printf("%v", brokerList) connector, err := siesta.NewDefaultConnector(connConfig) if err != nil { return errors.Trace(err) } // go func() { // timeout := time.Tick(producerConfig.MetadataExpire / 2) // for { // <-timeout // connector.RefreshMetadata([]string{globalTopic}) // } // }() producer := siesta_producer.NewKafkaProducer(producerConfig, siesta_producer.ByteSerializer, siesta_producer.ByteSerializer, connector) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { record := &siesta_producer.ProducerRecord{ Topic: globalTopic, Value: []byte(sendString), } recordMetadata := <-producer.Send(record) if recordMetadata.Error == siesta.ErrNoError { return nil } return recordMetadata.Error }, nil) return errors.Trace(bt.Run()) }
func cmdBenchmarkGet() error { if len(globalZkList) == 0 { return errors.NotValidf("zookeeper list") } if len(globalTopic) == 0 { return errors.NotValidf("Topic") } message := make(chan *go_kafka_client.Message) consumConfig := go_kafka_client.DefaultConsumerConfig() consumConfig.Groupid = "BenchmarkGroup-Test" consumConfig.AutoOffsetReset = go_kafka_client.SmallestOffset zkConfig := go_kafka_client.NewZookeeperConfig() zkConfig.ZookeeperConnect = strings.Split(globalZkList, ",") consumConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zkConfig) consumConfig.Strategy = func(_ *go_kafka_client.Worker, msg *go_kafka_client.Message, id go_kafka_client.TaskId) go_kafka_client.WorkerResult { message <- msg return go_kafka_client.NewSuccessfulResult(id) } consumConfig.WorkerFailureCallback = func(_ *go_kafka_client.WorkerManager) go_kafka_client.FailedDecision { return go_kafka_client.CommitOffsetAndContinue } consumConfig.WorkerFailedAttemptCallback = func(_ *go_kafka_client.Task, _ go_kafka_client.WorkerResult) go_kafka_client.FailedDecision { return go_kafka_client.CommitOffsetAndContinue } consumer := go_kafka_client.NewConsumer(consumConfig) topicCountMap := make(map[string]int) topicCountMap[globalTopic] = 1 go consumer.StartStatic(topicCountMap) time.Sleep(2e9) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { <-message return nil }, nil) return errors.Trace(bt.Run()) }
func cmdBenchmarkSetNoAck() error { if len(globalBrokerList) == 0 { return errors.NotValidf("broker list") } if len(globalTopic) == 0 { return errors.NotValidf("Topic") } sendString := utils.GenTestMessage(globalMsgLength) producerConfig := siesta_producer.NewProducerConfig() producerConfig.ClientID = "Benchmark" producerConfig.RequiredAcks = 0 connConfig := siesta.NewConnectorConfig() brokerList := strings.Split(globalBrokerList, ",") producerConfig.BrokerList = brokerList connConfig.BrokerList = brokerList log.Printf("%v", brokerList) connector, err := siesta.NewDefaultConnector(connConfig) if err != nil { return errors.Trace(err) } producer := siesta_producer.NewKafkaProducer(producerConfig, siesta_producer.ByteSerializer, siesta_producer.ByteSerializer, connector) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { record := &siesta_producer.ProducerRecord{ Topic: globalTopic, Value: []byte(sendString), } recordMetadata := <-producer.Send(record) if recordMetadata.Error == siesta.ErrNoError { return nil } return recordMetadata.Error }, nil) return errors.Trace(bt.Run()) }
func benchmarkMCGet() error { key := fmt.Sprintf("%s.%s", globalQueue, globalBiz) log.Printf("Test Key: %s", key) mc := memcache.New(globalHost) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { _, err := mc.Get(key) if err != nil { return err } return nil }, nil) return errors.Trace(bt.Run()) }
func benchmarkMCSet() error { key := fmt.Sprintf("%s.%s", globalQueue, globalBiz) sendString := utils.GenTestMessage(globalMsgLength) log.Printf("Test Key: %s, Data: %s", key, sendString) mc := memcache.New(globalHost) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { err := mc.Set(&memcache.Item{Key: key, Value: []byte(sendString)}) if err != nil { return err } return nil }, nil) return errors.Trace(bt.Run()) }
func benchmarkHttpGet() error { url := fmt.Sprintf("http://%s/msg?action=receive&queue=%s&group=%s", globalHost, globalQueue, globalBiz) log.Printf("Test URL: %s", url) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { resp, err := http.Get(url) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("http code %d", resp.StatusCode) } return nil }, nil) return errors.Trace(bt.Run()) }
func benchmarkHttpSet() error { url := fmt.Sprintf("http://%s/msg", globalHost) sendString := fmt.Sprintf("action=send&queue=%s&group=%s&msg=%s", globalQueue, globalBiz, utils.GenTestMessage(globalMsgLength)) log.Printf("Test URL: %s, Data: %s", url, sendString) bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { body := strings.NewReader(sendString) resp, err := http.Post(url, "application/x-www-form-urlencoded", body) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("http code %d", resp.StatusCode) } return nil }, nil) return errors.Trace(bt.Run()) }
func benchmarkHttpLatency() error { getURL := fmt.Sprintf("http://%s/msg?action=receive&queue=%s&group=%s", globalHost, globalQueue, globalBiz) setURL := fmt.Sprintf("http://%s/msg", globalHost) result := new([6]int64) statis := func(s int64) { switch { case s < 2: atomic.AddInt64(&(result[0]), 1) case s >= 2 && s < 5: atomic.AddInt64(&(result[1]), 1) case s >= 5 && s < 10: atomic.AddInt64(&(result[2]), 1) case s >= 10 && s < 20: atomic.AddInt64(&(result[3]), 1) case s >= 20 && s < 50: atomic.AddInt64(&(result[4]), 1) default: atomic.AddInt64(&(result[5]), 1) } } clean := func(bt *utils.BenchmarkTester) { total := int64(0) for _, i := range result { total += i } fmt.Printf("Benchmark Latency Result: %d\n", total) fmt.Printf("\t%2d%% ops < 2ms\n", result[0]*100/total) fmt.Printf("\t%2d%% ops 2-5ms\n", result[1]*100/total) fmt.Printf("\t%2d%% ops 5-10ms\n", result[2]*100/total) fmt.Printf("\t%2d%% ops 10-20ms\n", result[3]*100/total) fmt.Printf("\t%2d%% ops 20-50ms\n", result[4]*100/total) fmt.Printf("\t%2d%% ops >50ms\n", result[5]*100/total) fmt.Printf("\n\n") } bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration, func(bt *utils.BenchmarkTester, index int) error { if index%2 == 0 { //producer body := &bytes.Buffer{} fmt.Fprintf(body, "action=send&queue=%s&group=%s&msg=%d", globalQueue, globalBiz, time.Now().UnixNano()) resp, err := http.Post(setURL, "application/x-www-form-urlencoded", body) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("http code %d", resp.StatusCode) } return nil } resp, err := http.Get(getURL) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("http code %d", resp.StatusCode) } recvTime := time.Now().UnixNano() data, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("resp read :%s", err) } msg := &message{} err = json.Unmarshal(data, msg) if err != nil { return err } sendTime, err := strconv.ParseInt(msg.Msg, 10, 64) if err != nil { return err } diff := recvTime - sendTime statis(diff / 1000000) return nil }, clean) return errors.Trace(bt.Run()) }