Ejemplo n.º 1
0
// 172.31.22.222:2181,172.31.21.206:2181,172.31.17.130:2181
func NewConsumer(group string, topics, zk []string, strategy kafkalib.WorkerStrategy) (*Consumer, error) {
	consumer := &Consumer{
		topics: topics,
	}

	coordinatorConfig := kafkalib.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = zk
	coordinatorConfig.Root = "/kafka"
	coordinator := kafkalib.NewZookeeperCoordinator(coordinatorConfig)

	consumerConfig := kafkalib.DefaultConsumerConfig()
	consumerConfig.Groupid = group
	consumerConfig.Coordinator = coordinator
	consumerConfig.Strategy = strategy
	consumerConfig.AutoOffsetReset = kafkalib.SmallestOffset
	consumerConfig.OffsetCommitInterval = 1 * time.Minute
	consumerConfig.OffsetsCommitMaxRetries = 5
	consumerConfig.MaxWorkerRetries = 5
	consumerConfig.WorkerFailureCallback = func(wm *kafkalib.WorkerManager) kafkalib.FailedDecision {
		kafkalib.Error(consumer, "Failed to write . Shutting down...")
		return kafkalib.DoNotCommitOffsetAndStop
	}
	consumerConfig.WorkerFailedAttemptCallback = func(task *kafkalib.Task, result kafkalib.WorkerResult) kafkalib.FailedDecision {
		kafkalib.Errorf(consumer, "Failed to write %s to the database after %d retries", task.Id().String(), task.Retries)
		return kafkalib.DoNotCommitOffsetAndContinue
	}
	consumer.config = consumerConfig
	consumer.consumer = kafkalib.NewConsumer(consumerConfig)
	return consumer, nil
}
Ejemplo n.º 2
0
func startNewConsumer(config kafkaClient.ConsumerConfig, topic string) *kafkaClient.Consumer {
	config.Strategy = GetStrategy(config.Consumerid)
	config.WorkerFailureCallback = FailedCallback
	config.WorkerFailedAttemptCallback = FailedAttemptCallback
	consumer := kafkaClient.NewConsumer(&config)
	topics := map[string]int{topic: config.NumConsumerFetchers}
	go func() {
		consumer.StartStatic(topics)
	}()
	return consumer
}
Ejemplo n.º 3
0
func main() {
	parseArgs()

	go_kafka_client.Logger = go_kafka_client.NewDefaultLogger(go_kafka_client.ErrorLevel)
	kafkaProducer = producer.NewKafkaProducer(writeTopic, []string{broker})

	//Coordinator settings
	zookeeperConfig := go_kafka_client.NewZookeeperConfig()
	zookeeperConfig.ZookeeperConnect = []string{zookeeper}

	//Actual consumer settings
	consumerConfig := go_kafka_client.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = go_kafka_client.SmallestOffset
	consumerConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zookeeperConfig)
	consumerConfig.Groupid = group
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.KeyDecoder = go_kafka_client.NewKafkaAvroDecoder(schemaRepo)
	consumerConfig.ValueDecoder = consumerConfig.KeyDecoder

	consumerConfig.Strategy = func(worker *go_kafka_client.Worker, message *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		time.Sleep(2 * time.Second)
		record, ok := message.DecodedValue.(*avro.GenericRecord)
		if !ok {
			panic("Not a *GenericError, but expected one")
		}

		fmt.Printf("golang > received %s\n", fmt.Sprintf("{\"counter\": %d, \"name\": \"%s\", \"uuid\": \"%s\"}", record.Get("counter"), record.Get("name"), record.Get("uuid")))
		modify(record)
		encoded, err := encoder.Encode(record)
		if err != nil {
			panic(err)
		}

		if err := kafkaProducer.SendBytesSync(encoded); err != nil {
			panic(err)
		}

		return go_kafka_client.NewSuccessfulResult(taskId)
	}

	consumerConfig.WorkerFailureCallback = func(_ *go_kafka_client.WorkerManager) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *go_kafka_client.Task, _ go_kafka_client.WorkerResult) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}

	kafkaConsumer = go_kafka_client.NewConsumer(consumerConfig)

	pingPongLoop()
}
Ejemplo n.º 4
0
func startNewConsumer(config kafkaClient.ConsumerConfig, topic string, channel chan *api.Message) *kafkaClient.Consumer {

	// wait explicitly for topic to be created by the producer
	waitForTopicToBeReady(config, topic)

	config.Strategy = handleMessage(config.Consumerid, channel)
	config.WorkerFailureCallback = failedCallback
	config.WorkerFailedAttemptCallback = failedAttemptCallback
	//config.NumConsumerFetchers = 2
	consumer := kafkaClient.NewConsumer(&config)
	topics := map[string]int{topic: config.NumConsumerFetchers}

	go func() {
		consumer.StartStatic(topics)
	}()
	return consumer
}
Ejemplo n.º 5
0
func main() {
	parseAndValidateArgs()
	ctrlc := make(chan os.Signal, 1)
	signal.Notify(ctrlc, os.Interrupt)

	producerConfig := kafka.DefaultProducerConfig()
	producerConfig.BrokerList = strings.Split(*brokerList, ",")

	zkConfig := kafka.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",")
	coordinator := kafka.NewZookeeperCoordinator(zkConfig)

	config := kafka.DefaultConsumerConfig()
	config.Debug = true
	config.Groupid = "perf-mirror"
	config.AutoOffsetReset = "smallest"
	config.Coordinator = coordinator
	config.WorkerFailedAttemptCallback = FailedAttemptCallback
	config.WorkerFailureCallback = FailedCallback
	if *siesta {
		config.LowLevelClient = kafka.NewSiestaClient(config)
	}

	if protobuf {
		setupProtoConfig(config)
	} else {
		producerConfig.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry)
		setupAvroConfig(config)
	}

	producer = kafka.NewSaramaProducer(producerConfig)
	consumer := kafka.NewConsumer(config)

	go consumer.StartStatic(map[string]int{*consumeTopic: 1})

	<-ctrlc
	fmt.Println("Shutdown triggered, closing consumer")
	<-consumer.Close()
	producer.Close()
}
Ejemplo n.º 6
0
func createConsumer(group string, workerStrategy go_kafka_client.WorkerStrategy) *go_kafka_client.Consumer {
	//Coordinator settings
	zookeeperConfig := go_kafka_client.NewZookeeperConfig()
	zookeeperConfig.ZookeeperConnect = []string{zookeeper}

	//Actual consumer settings
	consumerConfig := go_kafka_client.DefaultConsumerConfig()
	consumerConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zookeeperConfig)
	consumerConfig.Groupid = group
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.FetchBatchSize = 1
	consumerConfig.FetchBatchTimeout = 1 * time.Second
	consumerConfig.Strategy = workerStrategy
	consumerConfig.AutoOffsetReset = go_kafka_client.SmallestOffset
	consumerConfig.WorkerFailureCallback = func(*go_kafka_client.WorkerManager) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(*go_kafka_client.Task, go_kafka_client.WorkerResult) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}

	return go_kafka_client.NewConsumer(consumerConfig)
}
Ejemplo n.º 7
0
func (this *EventFetcher) createConsumer() *kafka.Consumer {
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				Topic:      record.Get("topic").(string),
				ConsumerId: record.Get("consumerid").(string),
				Partition:  record.Get("partition").(string),
				EventName:  record.Get("eventname").(string),
				Second:     record.Get("second").(int64),
				Operation:  record.Get("operation").(string),
				Value:      record.Get("value").(int64),
				Cnt:        record.Get("cnt").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	return kafka.NewConsumer(consumerConfig)
}
Ejemplo n.º 8
0
func main() {
	parseAndValidateArgs()
	ctrlc := make(chan os.Signal, 1)
	signal.Notify(ctrlc, os.Interrupt)

	zkConfig := kafka.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",")
	coordinator := kafka.NewZookeeperCoordinator(zkConfig)

	config := kafka.DefaultConsumerConfig()
	config.Groupid = "perf-consumer"
	config.AutoOffsetReset = "smallest"
	config.Coordinator = coordinator
	config.WorkerFailedAttemptCallback = FailedAttemptCallback
	config.WorkerFailureCallback = FailedCallback
	if *siesta {
		config.LowLevelClient = kafka.NewSiestaClient(config)
	}

	if protobuf {
		setupLogLineProtoConfig(config)
	} else {
		setupAvroConfig(config)
	}

	consumer := kafka.NewConsumer(config)

	go consumer.StartStatic(map[string]int{*topic: 2})

	go func() {
		latencies := make([]metrics.Histogram, 0)
		endToEnd := metrics.NewRegisteredHistogram(fmt.Sprint("Latency-end-to-end"), metrics.DefaultRegistry, metrics.NewUniformSample(10000))
		go func() {
			for {
				time.Sleep(1 * time.Second)
				for i, meter := range latencies {
					fmt.Printf("Step %d: %f\n", i+1, meter.Mean())
				}
				fmt.Printf("End-to-end: %f\n", endToEnd.Mean())
				fmt.Println()
			}
		}()

		initialized := false
		for timing := range timings {
			if !initialized {
				for i := 1; i < len(timing); i++ {
					latencies = append(latencies, metrics.NewRegisteredHistogram(fmt.Sprintf("Latency-step-%d", i), metrics.DefaultRegistry, metrics.NewUniformSample(10000)))
				}
				initialized = true
			}

			if len(timing)-1 != len(latencies) {
				fmt.Println("Got wrong latencies, skipping..")
				continue
			}

			for i := 1; i < len(timing); i++ {
				latencies[i-1].Update(int64(timing[i] - timing[i-1]))
			}
			endToEnd.Update(int64(timing[len(timing)-1] - timing[0]))
		}
	}()

	<-ctrlc
	fmt.Println("Shutdown triggered, closing consumer")
	<-consumer.Close()
	close(timings)
}