예제 #1
0
파일: consumer.go 프로젝트: going/kafka
// 172.31.22.222:2181,172.31.21.206:2181,172.31.17.130:2181
func NewConsumer(group string, topics, zk []string, strategy kafkalib.WorkerStrategy) (*Consumer, error) {
	consumer := &Consumer{
		topics: topics,
	}

	coordinatorConfig := kafkalib.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = zk
	coordinatorConfig.Root = "/kafka"
	coordinator := kafkalib.NewZookeeperCoordinator(coordinatorConfig)

	consumerConfig := kafkalib.DefaultConsumerConfig()
	consumerConfig.Groupid = group
	consumerConfig.Coordinator = coordinator
	consumerConfig.Strategy = strategy
	consumerConfig.AutoOffsetReset = kafkalib.SmallestOffset
	consumerConfig.OffsetCommitInterval = 1 * time.Minute
	consumerConfig.OffsetsCommitMaxRetries = 5
	consumerConfig.MaxWorkerRetries = 5
	consumerConfig.WorkerFailureCallback = func(wm *kafkalib.WorkerManager) kafkalib.FailedDecision {
		kafkalib.Error(consumer, "Failed to write . Shutting down...")
		return kafkalib.DoNotCommitOffsetAndStop
	}
	consumerConfig.WorkerFailedAttemptCallback = func(task *kafkalib.Task, result kafkalib.WorkerResult) kafkalib.FailedDecision {
		kafkalib.Errorf(consumer, "Failed to write %s to the database after %d retries", task.Id().String(), task.Retries)
		return kafkalib.DoNotCommitOffsetAndContinue
	}
	consumer.config = consumerConfig
	consumer.consumer = kafkalib.NewConsumer(consumerConfig)
	return consumer, nil
}
예제 #2
0
func (t *Transport) Connect() error {

	config := sarama.NewConfig()
	config.Producer.Compression = sarama.CompressionSnappy

	client, err := sarama.NewClient(t.Brokers, config)
	if err != nil {
		return err
	}
	t.client = client

	producer, err := sarama.NewAsyncProducerFromClient(t.client)
	if err != nil {
		return err
	}
	t.producer = producer

	// Consumer configuration
	zkConfig := kafkaClient.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = t.ZookeeperHosts

	consumerConfig := kafkaClient.DefaultConsumerConfig()
	consumerConfig.Coordinator = kafkaClient.NewZookeeperCoordinator(zkConfig)
	consumerConfig.RebalanceMaxRetries = 10
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.AutoOffsetReset = kafkaClient.LargestOffset
	t.consumerConfig = *consumerConfig

	return nil
}
예제 #3
0
func main() {
	parseArgs()

	go_kafka_client.Logger = go_kafka_client.NewDefaultLogger(go_kafka_client.ErrorLevel)
	kafkaProducer = producer.NewKafkaProducer(writeTopic, []string{broker})

	//Coordinator settings
	zookeeperConfig := go_kafka_client.NewZookeeperConfig()
	zookeeperConfig.ZookeeperConnect = []string{zookeeper}

	//Actual consumer settings
	consumerConfig := go_kafka_client.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = go_kafka_client.SmallestOffset
	consumerConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zookeeperConfig)
	consumerConfig.Groupid = group
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.KeyDecoder = go_kafka_client.NewKafkaAvroDecoder(schemaRepo)
	consumerConfig.ValueDecoder = consumerConfig.KeyDecoder

	consumerConfig.Strategy = func(worker *go_kafka_client.Worker, message *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		time.Sleep(2 * time.Second)
		record, ok := message.DecodedValue.(*avro.GenericRecord)
		if !ok {
			panic("Not a *GenericError, but expected one")
		}

		fmt.Printf("golang > received %s\n", fmt.Sprintf("{\"counter\": %d, \"name\": \"%s\", \"uuid\": \"%s\"}", record.Get("counter"), record.Get("name"), record.Get("uuid")))
		modify(record)
		encoded, err := encoder.Encode(record)
		if err != nil {
			panic(err)
		}

		if err := kafkaProducer.SendBytesSync(encoded); err != nil {
			panic(err)
		}

		return go_kafka_client.NewSuccessfulResult(taskId)
	}

	consumerConfig.WorkerFailureCallback = func(_ *go_kafka_client.WorkerManager) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *go_kafka_client.Task, _ go_kafka_client.WorkerResult) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}

	kafkaConsumer = go_kafka_client.NewConsumer(consumerConfig)

	pingPongLoop()
}
예제 #4
0
//DefaultConsumerConfig creates a ConsumerConfig with sane defaults. Note that several required config entries (like Strategy and callbacks) are still not set.
func DefaultConsumerConfig() *kafka.ConsumerConfig {
	config := &kafka.ConsumerConfig{}
	config.Groupid = "go-consumer-for-notify-queue-server1"
	config.SocketTimeout = 30 * time.Second
	config.FetchMessageMaxBytes = 1024 * 1024
	config.NumConsumerFetchers = 1
	config.QueuedMaxMessages = 3
	config.RebalanceMaxRetries = 4
	config.FetchMinBytes = 1
	config.FetchWaitMaxMs = 100
	config.RebalanceBackoff = 5 * time.Second
	config.RefreshLeaderBackoff = 200 * time.Millisecond
	config.OffsetsCommitMaxRetries = 5
	config.OffsetCommitInterval = 3 * time.Second

	config.AutoOffsetReset = kafka.LargestOffset
	config.Clientid = "go-client"
	config.ExcludeInternalTopics = true
	config.PartitionAssignmentStrategy = kafka.RangeStrategy /* select between "RangeStrategy", and "RoundRobinStrategy" */

	config.NumWorkers = 10
	config.MaxWorkerRetries = 3
	config.WorkerRetryThreshold = 100
	config.WorkerThresholdTimeWindow = 1 * time.Minute
	config.WorkerBackoff = 500 * time.Millisecond
	config.WorkerTaskTimeout = 1 * time.Minute
	config.WorkerManagersStopTimeout = 1 * time.Minute

	config.FetchBatchSize = 100
	config.FetchBatchTimeout = 10 * time.Millisecond

	config.FetchMaxRetries = 5
	config.RequeueAskNextBackoff = 5 * time.Second
	config.AskNextChannelSize = 1000
	config.FetchTopicMetadataRetries = 3
	config.FetchTopicMetadataBackoff = 1 * time.Second
	config.FetchRequestBackoff = 10 * time.Millisecond

	zkConfig := kafka.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = Conf.ZookeeperAddr
	config.Coordinator = kafka.NewZookeeperCoordinator(zkConfig)
	config.BlueGreenDeploymentEnabled = true
	config.DeploymentTimeout = 0 * time.Second
	config.BarrierTimeout = 30 * time.Second
	config.LowLevelClient = kafka.NewSaramaClient(config)

	config.KeyDecoder = &kafka.ByteDecoder{}
	config.ValueDecoder = config.KeyDecoder

	return config
}
예제 #5
0
func main() {
	flag.Parse()
	if *zkConnect == "" || *blueTopic == "" || *blueGroup == "" || *bluePattern == "" || *greenTopic == "" || *greenGroup == "" || *greenPattern == "" {
		flag.Usage()
		os.Exit(1)
	}
	blue := kafka.BlueGreenDeployment{*blueTopic, *bluePattern, *blueGroup}
	green := kafka.BlueGreenDeployment{*greenTopic, *greenPattern, *greenGroup}

	zkConfig := kafka.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = []string{*zkConnect}

	zk := kafka.NewZookeeperCoordinator(zkConfig)
	zk.Connect()

	zk.RequestBlueGreenDeployment(blue, green)
}
예제 #6
0
파일: mirror.go 프로젝트: ruo91/syscol
func main() {
	parseAndValidateArgs()
	ctrlc := make(chan os.Signal, 1)
	signal.Notify(ctrlc, os.Interrupt)

	producerConfig := kafka.DefaultProducerConfig()
	producerConfig.BrokerList = strings.Split(*brokerList, ",")

	zkConfig := kafka.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",")
	coordinator := kafka.NewZookeeperCoordinator(zkConfig)

	config := kafka.DefaultConsumerConfig()
	config.Debug = true
	config.Groupid = "perf-mirror"
	config.AutoOffsetReset = "smallest"
	config.Coordinator = coordinator
	config.WorkerFailedAttemptCallback = FailedAttemptCallback
	config.WorkerFailureCallback = FailedCallback
	if *siesta {
		config.LowLevelClient = kafka.NewSiestaClient(config)
	}

	if protobuf {
		setupProtoConfig(config)
	} else {
		producerConfig.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry)
		setupAvroConfig(config)
	}

	producer = kafka.NewSaramaProducer(producerConfig)
	consumer := kafka.NewConsumer(config)

	go consumer.StartStatic(map[string]int{*consumeTopic: 1})

	<-ctrlc
	fmt.Println("Shutdown triggered, closing consumer")
	<-consumer.Close()
	producer.Close()
}
예제 #7
0
func createConsumer(group string, workerStrategy go_kafka_client.WorkerStrategy) *go_kafka_client.Consumer {
	//Coordinator settings
	zookeeperConfig := go_kafka_client.NewZookeeperConfig()
	zookeeperConfig.ZookeeperConnect = []string{zookeeper}

	//Actual consumer settings
	consumerConfig := go_kafka_client.DefaultConsumerConfig()
	consumerConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zookeeperConfig)
	consumerConfig.Groupid = group
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.FetchBatchSize = 1
	consumerConfig.FetchBatchTimeout = 1 * time.Second
	consumerConfig.Strategy = workerStrategy
	consumerConfig.AutoOffsetReset = go_kafka_client.SmallestOffset
	consumerConfig.WorkerFailureCallback = func(*go_kafka_client.WorkerManager) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(*go_kafka_client.Task, go_kafka_client.WorkerResult) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}

	return go_kafka_client.NewConsumer(consumerConfig)
}
예제 #8
0
func (this *EventFetcher) createConsumer() (*kafka.Consumer, error) {
	fmt.Println(this.config.ZkConnect)
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				EventName:     record.Get("eventname").(string),
				Second:        record.Get("second").(int64),
				Framework:     record.Get("framework").(string),
				Latency:       record.Get("latency").(int64),
				ReceivedCount: record.Get("received_count").(int64),
				SentCount:     record.Get("sent_count").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	consumer, err := kafka.NewSlaveConsumer(consumerConfig)
	return consumer, err
}
예제 #9
0
파일: events.go 프로젝트: tomzhang/gauntlet
func (this *EventFetcher) createConsumer() *kafka.Consumer {
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				Topic:      record.Get("topic").(string),
				ConsumerId: record.Get("consumerid").(string),
				Partition:  record.Get("partition").(string),
				EventName:  record.Get("eventname").(string),
				Second:     record.Get("second").(int64),
				Operation:  record.Get("operation").(string),
				Value:      record.Get("value").(int64),
				Cnt:        record.Get("cnt").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	return kafka.NewConsumer(consumerConfig)
}
예제 #10
0
파일: consumers.go 프로젝트: pkoro/go-kafka
func resolveConfig() (*kafkaClient.ConsumerConfig, string, int, string, time.Duration) {
	rawConfig, err := kafkaClient.LoadConfiguration("consumers.properties")
	if err != nil {
		panic("Failed to load configuration file")
	}
	numConsumers, _ := strconv.Atoi(rawConfig["num_consumers"])
	zkTimeout, _ := time.ParseDuration(rawConfig["zookeeper_timeout"])

	numWorkers, _ := strconv.Atoi(rawConfig["num_workers"])
	maxWorkerRetries, _ := strconv.Atoi(rawConfig["max_worker_retries"])
	workerBackoff, _ := time.ParseDuration(rawConfig["worker_backoff"])
	workerRetryThreshold, _ := strconv.Atoi(rawConfig["worker_retry_threshold"])
	workerConsideredFailedTimeWindow, _ := time.ParseDuration(rawConfig["worker_considered_failed_time_window"])
	workerTaskTimeout, _ := time.ParseDuration(rawConfig["worker_task_timeout"])
	workerManagersStopTimeout, _ := time.ParseDuration(rawConfig["worker_managers_stop_timeout"])

	rebalanceBarrierTimeout, _ := time.ParseDuration(rawConfig["rebalance_barrier_timeout"])
	rebalanceMaxRetries, _ := strconv.Atoi(rawConfig["rebalance_max_retries"])
	rebalanceBackoff, _ := time.ParseDuration(rawConfig["rebalance_backoff"])
	partitionAssignmentStrategy, _ := rawConfig["partition_assignment_strategy"]
	excludeInternalTopics, _ := strconv.ParseBool(rawConfig["exclude_internal_topics"])

	numConsumerFetchers, _ := strconv.Atoi(rawConfig["num_consumer_fetchers"])
	fetchBatchSize, _ := strconv.Atoi(rawConfig["fetch_batch_size"])
	fetchMessageMaxBytes, _ := strconv.Atoi(rawConfig["fetch_message_max_bytes"])
	fetchMinBytes, _ := strconv.Atoi(rawConfig["fetch_min_bytes"])
	fetchBatchTimeout, _ := time.ParseDuration(rawConfig["fetch_batch_timeout"])
	requeueAskNextBackoff, _ := time.ParseDuration(rawConfig["requeue_ask_next_backoff"])
	fetchWaitMaxMs, _ := strconv.Atoi(rawConfig["fetch_wait_max_ms"])
	socketTimeout, _ := time.ParseDuration(rawConfig["socket_timeout"])
	queuedMaxMessages, _ := strconv.Atoi(rawConfig["queued_max_messages"])
	refreshLeaderBackoff, _ := time.ParseDuration(rawConfig["refresh_leader_backoff"])
	fetchMetadataRetries, _ := strconv.Atoi(rawConfig["fetch_metadata_retries"])
	fetchMetadataBackoff, _ := time.ParseDuration(rawConfig["fetch_metadata_backoff"])

	time.ParseDuration(rawConfig["fetch_metadata_backoff"])

	offsetsCommitMaxRetries, _ := strconv.Atoi(rawConfig["offsets_commit_max_retries"])

	flushInterval, _ := time.ParseDuration(rawConfig["flush_interval"])
	deploymentTimeout, _ := time.ParseDuration(rawConfig["deployment_timeout"])

	zkConfig := kafkaClient.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = []string{rawConfig["zookeeper_connect"]}
	zkConfig.ZookeeperTimeout = zkTimeout

	config := kafkaClient.DefaultConsumerConfig()
	config.Groupid = rawConfig["group_id"]
	config.NumWorkers = numWorkers
	config.MaxWorkerRetries = maxWorkerRetries
	config.WorkerBackoff = workerBackoff
	config.WorkerRetryThreshold = int32(workerRetryThreshold)
	config.WorkerThresholdTimeWindow = workerConsideredFailedTimeWindow
	config.WorkerTaskTimeout = workerTaskTimeout
	config.WorkerManagersStopTimeout = workerManagersStopTimeout
	config.BarrierTimeout = rebalanceBarrierTimeout
	config.RebalanceMaxRetries = int32(rebalanceMaxRetries)
	config.RebalanceBackoff = rebalanceBackoff
	config.PartitionAssignmentStrategy = partitionAssignmentStrategy
	config.ExcludeInternalTopics = excludeInternalTopics
	config.NumConsumerFetchers = numConsumerFetchers
	config.FetchBatchSize = fetchBatchSize
	config.FetchMessageMaxBytes = int32(fetchMessageMaxBytes)
	config.FetchMinBytes = int32(fetchMinBytes)
	config.FetchBatchTimeout = fetchBatchTimeout
	config.FetchTopicMetadataRetries = fetchMetadataRetries
	config.FetchTopicMetadataBackoff = fetchMetadataBackoff
	config.RequeueAskNextBackoff = requeueAskNextBackoff
	config.FetchWaitMaxMs = int32(fetchWaitMaxMs)
	config.SocketTimeout = socketTimeout
	config.QueuedMaxMessages = int32(queuedMaxMessages)
	config.RefreshLeaderBackoff = refreshLeaderBackoff
	config.Coordinator = kafkaClient.NewZookeeperCoordinator(zkConfig)
	config.OffsetsStorage = rawConfig["offsets_storage"]
	config.AutoOffsetReset = rawConfig["auto_offset_reset"]
	config.OffsetsCommitMaxRetries = offsetsCommitMaxRetries
	config.DeploymentTimeout = deploymentTimeout

	return config, rawConfig["topic"], numConsumers, rawConfig["graphite_connect"], flushInterval
}
예제 #11
0
func main() {
	parseAndValidateArgs()
	ctrlc := make(chan os.Signal, 1)
	signal.Notify(ctrlc, os.Interrupt)

	zkConfig := kafka.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",")
	coordinator := kafka.NewZookeeperCoordinator(zkConfig)

	config := kafka.DefaultConsumerConfig()
	config.Groupid = "perf-consumer"
	config.AutoOffsetReset = "smallest"
	config.Coordinator = coordinator
	config.WorkerFailedAttemptCallback = FailedAttemptCallback
	config.WorkerFailureCallback = FailedCallback
	if *siesta {
		config.LowLevelClient = kafka.NewSiestaClient(config)
	}

	if protobuf {
		setupLogLineProtoConfig(config)
	} else {
		setupAvroConfig(config)
	}

	consumer := kafka.NewConsumer(config)

	go consumer.StartStatic(map[string]int{*topic: 2})

	go func() {
		latencies := make([]metrics.Histogram, 0)
		endToEnd := metrics.NewRegisteredHistogram(fmt.Sprint("Latency-end-to-end"), metrics.DefaultRegistry, metrics.NewUniformSample(10000))
		go func() {
			for {
				time.Sleep(1 * time.Second)
				for i, meter := range latencies {
					fmt.Printf("Step %d: %f\n", i+1, meter.Mean())
				}
				fmt.Printf("End-to-end: %f\n", endToEnd.Mean())
				fmt.Println()
			}
		}()

		initialized := false
		for timing := range timings {
			if !initialized {
				for i := 1; i < len(timing); i++ {
					latencies = append(latencies, metrics.NewRegisteredHistogram(fmt.Sprintf("Latency-step-%d", i), metrics.DefaultRegistry, metrics.NewUniformSample(10000)))
				}
				initialized = true
			}

			if len(timing)-1 != len(latencies) {
				fmt.Println("Got wrong latencies, skipping..")
				continue
			}

			for i := 1; i < len(timing); i++ {
				latencies[i-1].Update(int64(timing[i] - timing[i-1]))
			}
			endToEnd.Update(int64(timing[len(timing)-1] - timing[0]))
		}
	}()

	<-ctrlc
	fmt.Println("Shutdown triggered, closing consumer")
	<-consumer.Close()
	close(timings)
}