func main() { flag.Parse() if *zkConnect == "" || *blueTopic == "" || *blueGroup == "" || *bluePattern == "" || *greenTopic == "" || *greenGroup == "" || *greenPattern == "" { flag.Usage() os.Exit(1) } blue := kafka.BlueGreenDeployment{*blueTopic, *bluePattern, *blueGroup} green := kafka.BlueGreenDeployment{*greenTopic, *greenPattern, *greenGroup} zkConfig := kafka.NewZookeeperConfig() zkConfig.ZookeeperConnect = []string{*zkConnect} zk := kafka.NewZookeeperCoordinator(zkConfig) zk.Connect() zk.RequestBlueGreenDeployment(blue, green) }
func main() { parseAndValidateArgs() ctrlc := make(chan os.Signal, 1) signal.Notify(ctrlc, os.Interrupt) producerConfig := kafka.DefaultProducerConfig() producerConfig.BrokerList = strings.Split(*brokerList, ",") zkConfig := kafka.NewZookeeperConfig() zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",") coordinator := kafka.NewZookeeperCoordinator(zkConfig) config := kafka.DefaultConsumerConfig() config.Debug = true config.Groupid = "perf-mirror" config.AutoOffsetReset = "smallest" config.Coordinator = coordinator config.WorkerFailedAttemptCallback = FailedAttemptCallback config.WorkerFailureCallback = FailedCallback if *siesta { config.LowLevelClient = kafka.NewSiestaClient(config) } if protobuf { setupProtoConfig(config) } else { producerConfig.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry) setupAvroConfig(config) } producer = kafka.NewSaramaProducer(producerConfig) consumer := kafka.NewConsumer(config) go consumer.StartStatic(map[string]int{*consumeTopic: 1}) <-ctrlc fmt.Println("Shutdown triggered, closing consumer") <-consumer.Close() producer.Close() }
func main() { parseAndValidateArgs() ctrlc := make(chan os.Signal, 1) signal.Notify(ctrlc, os.Interrupt) zkConfig := kafka.NewZookeeperConfig() zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",") coordinator := kafka.NewZookeeperCoordinator(zkConfig) config := kafka.DefaultConsumerConfig() config.Groupid = "perf-consumer" config.AutoOffsetReset = "smallest" config.Coordinator = coordinator config.WorkerFailedAttemptCallback = FailedAttemptCallback config.WorkerFailureCallback = FailedCallback if *siesta { config.LowLevelClient = kafka.NewSiestaClient(config) } if protobuf { setupLogLineProtoConfig(config) } else { setupAvroConfig(config) } consumer := kafka.NewConsumer(config) go consumer.StartStatic(map[string]int{*topic: 2}) go func() { latencies := make([]metrics.Histogram, 0) endToEnd := metrics.NewRegisteredHistogram(fmt.Sprint("Latency-end-to-end"), metrics.DefaultRegistry, metrics.NewUniformSample(10000)) go func() { for { time.Sleep(1 * time.Second) for i, meter := range latencies { fmt.Printf("Step %d: %f\n", i+1, meter.Mean()) } fmt.Printf("End-to-end: %f\n", endToEnd.Mean()) fmt.Println() } }() initialized := false for timing := range timings { if !initialized { for i := 1; i < len(timing); i++ { latencies = append(latencies, metrics.NewRegisteredHistogram(fmt.Sprintf("Latency-step-%d", i), metrics.DefaultRegistry, metrics.NewUniformSample(10000))) } initialized = true } if len(timing)-1 != len(latencies) { fmt.Println("Got wrong latencies, skipping..") continue } for i := 1; i < len(timing); i++ { latencies[i-1].Update(int64(timing[i] - timing[i-1])) } endToEnd.Update(int64(timing[len(timing)-1] - timing[0])) } }() <-ctrlc fmt.Println("Shutdown triggered, closing consumer") <-consumer.Close() close(timings) }
func resolveConfig() (*kafkaClient.ConsumerConfig, string, int, string, time.Duration) { rawConfig, err := kafkaClient.LoadConfiguration("consumers.properties") if err != nil { panic("Failed to load configuration file") } logLevel := rawConfig["log_level"] setLogLevel(logLevel) numConsumers, _ := strconv.Atoi(rawConfig["num_consumers"]) zkTimeout, _ := time.ParseDuration(rawConfig["zookeeper_timeout"]) numWorkers, _ := strconv.Atoi(rawConfig["num_workers"]) maxWorkerRetries, _ := strconv.Atoi(rawConfig["max_worker_retries"]) workerBackoff, _ := time.ParseDuration(rawConfig["worker_backoff"]) workerRetryThreshold, _ := strconv.Atoi(rawConfig["worker_retry_threshold"]) workerConsideredFailedTimeWindow, _ := time.ParseDuration(rawConfig["worker_considered_failed_time_window"]) workerTaskTimeout, _ := time.ParseDuration(rawConfig["worker_task_timeout"]) workerManagersStopTimeout, _ := time.ParseDuration(rawConfig["worker_managers_stop_timeout"]) rebalanceBarrierTimeout, _ := time.ParseDuration(rawConfig["rebalance_barrier_timeout"]) rebalanceMaxRetries, _ := strconv.Atoi(rawConfig["rebalance_max_retries"]) rebalanceBackoff, _ := time.ParseDuration(rawConfig["rebalance_backoff"]) partitionAssignmentStrategy, _ := rawConfig["partition_assignment_strategy"] excludeInternalTopics, _ := strconv.ParseBool(rawConfig["exclude_internal_topics"]) numConsumerFetchers, _ := strconv.Atoi(rawConfig["num_consumer_fetchers"]) fetchBatchSize, _ := strconv.Atoi(rawConfig["fetch_batch_size"]) fetchMessageMaxBytes, _ := strconv.Atoi(rawConfig["fetch_message_max_bytes"]) fetchMinBytes, _ := strconv.Atoi(rawConfig["fetch_min_bytes"]) fetchBatchTimeout, _ := time.ParseDuration(rawConfig["fetch_batch_timeout"]) requeueAskNextBackoff, _ := time.ParseDuration(rawConfig["requeue_ask_next_backoff"]) fetchWaitMaxMs, _ := strconv.Atoi(rawConfig["fetch_wait_max_ms"]) socketTimeout, _ := time.ParseDuration(rawConfig["socket_timeout"]) queuedMaxMessages, _ := strconv.Atoi(rawConfig["queued_max_messages"]) refreshLeaderBackoff, _ := time.ParseDuration(rawConfig["refresh_leader_backoff"]) fetchMetadataRetries, _ := strconv.Atoi(rawConfig["fetch_metadata_retries"]) fetchMetadataBackoff, _ := time.ParseDuration(rawConfig["fetch_metadata_backoff"]) time.ParseDuration(rawConfig["fetch_metadata_backoff"]) offsetsCommitMaxRetries, _ := strconv.Atoi(rawConfig["offsets_commit_max_retries"]) flushInterval, _ := time.ParseDuration(rawConfig["flush_interval"]) deploymentTimeout, _ := time.ParseDuration(rawConfig["deployment_timeout"]) zkConfig := kafkaClient.NewZookeeperConfig() zkConfig.ZookeeperConnect = []string{rawConfig["zookeeper_connect"]} zkConfig.ZookeeperTimeout = zkTimeout config := kafkaClient.DefaultConsumerConfig() config.Groupid = rawConfig["group_id"] config.NumWorkers = numWorkers config.MaxWorkerRetries = maxWorkerRetries config.WorkerBackoff = workerBackoff config.WorkerRetryThreshold = int32(workerRetryThreshold) config.WorkerThresholdTimeWindow = workerConsideredFailedTimeWindow config.WorkerTaskTimeout = workerTaskTimeout config.WorkerManagersStopTimeout = workerManagersStopTimeout config.BarrierTimeout = rebalanceBarrierTimeout config.RebalanceMaxRetries = int32(rebalanceMaxRetries) config.RebalanceBackoff = rebalanceBackoff config.PartitionAssignmentStrategy = partitionAssignmentStrategy config.ExcludeInternalTopics = excludeInternalTopics config.NumConsumerFetchers = numConsumerFetchers config.FetchBatchSize = fetchBatchSize config.FetchMessageMaxBytes = int32(fetchMessageMaxBytes) config.FetchMinBytes = int32(fetchMinBytes) config.FetchBatchTimeout = fetchBatchTimeout config.FetchTopicMetadataRetries = fetchMetadataRetries config.FetchTopicMetadataBackoff = fetchMetadataBackoff config.RequeueAskNextBackoff = requeueAskNextBackoff config.FetchWaitMaxMs = int32(fetchWaitMaxMs) config.SocketTimeout = socketTimeout config.QueuedMaxMessages = int32(queuedMaxMessages) config.RefreshLeaderBackoff = refreshLeaderBackoff config.Coordinator = kafkaClient.NewZookeeperCoordinator(zkConfig) config.AutoOffsetReset = rawConfig["auto_offset_reset"] config.OffsetsCommitMaxRetries = offsetsCommitMaxRetries config.DeploymentTimeout = deploymentTimeout config.OffsetCommitInterval = 10 * time.Second return config, rawConfig["topic"], numConsumers, rawConfig["graphite_connect"], flushInterval }