Esempio n. 1
0
func main() {
	// connect to kafka cluster
	broker, err := kafka.Dial(kafkaAddrs, kafka.NewBrokerConf("test-client"))
	if err != nil {
		log.Fatalf("cannot connect to kafka cluster: %s", err)
	}
	defer broker.Close()

	go printConsumed(broker)
	produceStdin(broker)
}
Esempio n. 2
0
func TestNewTopic(t *testing.T) {
	IntegrationTest(t)
	const msgPerTopic = 10

	topic := "NewTopic"

	cluster := NewKafkaCluster("kafka-docker/", 1)
	if err := cluster.Start(); err != nil {
		t.Fatalf("cannot start kafka cluster: %s", err)
	}
	defer func() {
		_ = cluster.Stop()
	}()

	bconf := kafka.NewBrokerConf("producer-new-topic")
	bconf.Logger = &testLogger{t}
	bconf.AllowTopicCreation = true
	addrs, err := cluster.KafkaAddrs()
	if err != nil {
		t.Fatalf("cannot get kafka address: %s", err)
	}
	broker, err := kafka.Dial(addrs, bconf)
	if err != nil {
		t.Fatalf("cannot connect to cluster (%q): %s", addrs, err)
	}
	defer broker.Close()

	m := proto.Message{
		Value: []byte("Test message"),
	}
	pconf := kafka.NewProducerConf()
	producer := broker.Producer(pconf)

	if _, err := producer.Produce(topic, 0, &m); err != nil {
		t.Fatalf("cannot produce to %q: %s", topic, err)
	}

	consumer, err := broker.Consumer(kafka.NewConsumerConf(topic, 0))
	if err != nil {
		t.Fatalf("cannot create consumer for %q: %s", topic, err)
	}
	if _, err := consumer.Consume(); err != nil {
		t.Errorf("cannot consume message from %q: %s", topic, err)
	}
}
Esempio n. 3
0
func (w *writer) ensureInitialized() error {
	return w.retry.Do(func() (err error) {
		w.broker, err = kafka.Dial(w.endpoints, kafka.NewBrokerConf(w.clientId))
		if err != nil {
			return
		}
		var count int32
		count, err = w.broker.PartitionCount(w.topic)
		if err != nil {
			return
		}
		conf := kafka.NewProducerConf()
		conf.RequiredAcks = proto.RequiredAcksLocal
		producer := w.broker.Producer(conf)
		w.producer = newChannelProducer(producer, count)
		return
	})
}
Esempio n. 4
0
func NewKafkaClient(uri *url.URL, topicType string) (KafkaClient, error) {
	opts, err := url.ParseQuery(uri.RawQuery)
	if err != nil {
		return nil, fmt.Errorf("failed to parser url's query string: %s", err)
	}
	glog.V(3).Infof("kafka sink option: %v", opts)

	topic, err := getTopic(opts, topicType)
	if err != nil {
		return nil, err
	}

	var kafkaBrokers []string
	if len(opts["brokers"]) < 1 {
		return nil, fmt.Errorf("There is no broker assigned for connecting kafka")
	}
	kafkaBrokers = append(kafkaBrokers, opts["brokers"]...)
	glog.V(2).Infof("initializing kafka sink with brokers - %v", kafkaBrokers)

	//structure the config of broker
	brokerConf := kafka.NewBrokerConf(brokerClientID)
	brokerConf.DialTimeout = brokerDialTimeout
	brokerConf.DialRetryLimit = brokerDialRetryLimit
	brokerConf.DialRetryWait = brokerDialRetryWait
	brokerConf.LeaderRetryLimit = brokerLeaderRetryLimit
	brokerConf.LeaderRetryWait = brokerLeaderRetryWait
	brokerConf.AllowTopicCreation = brokerAllowTopicCreation

	// set up producer of kafka server.
	sinkProducer, err := setupProducer(kafkaBrokers, topic, brokerConf)
	if err != nil {
		return nil, fmt.Errorf("Failed to setup Producer: - %v", err)
	}

	return &kafkaSink{
		producer:  sinkProducer,
		dataTopic: topic,
	}, nil
}
Esempio n. 5
0
func (self *KafkaInput) Init(pcf *plugins.PluginCommonConfig, conf toml.Primitive) (err error) {
	log.Println("KafkaInput Init.")
	self.common = pcf
	hn, err := os.Hostname()
	if err != nil {
		hn = "kamanclient"
	}
	self.config = &KafkaInputConfig{
		ClientId:      hn,
		Partitions:    0,
		FlushInterval: 1000,
	}
	if err = toml.PrimitiveDecode(conf, self.config); err != nil {
		return fmt.Errorf("Can't unmarshal KafkaInput config: %s", err)
	}
	if len(self.config.Addrs) == 0 {
		return errors.New("addrs must have at least one entry")
	}
	if len(self.config.Topic) == 0 {
		return fmt.Errorf("topic is empty")
	}

	bcf := kafka.NewBrokerConf(self.config.ClientId)
	bcf.AllowTopicCreation = false
	//bcf.Logger = &stdLogger{}

	self.broker, err = kafka.Dial(self.config.Addrs, bcf)
	if err != nil {
		return fmt.Errorf("cannot connect to kafka cluster: %s", err)
	}

	defer self.broker.Close()
	consumerconf := kafka.NewConsumerConf(self.config.Topic, self.config.Partition)
	self.consumer, err = self.broker.Consumer(consumerconf)
	if err != nil {
		return fmt.Errorf("cannot create kafka consumer for %s:%d: %s", self.config.Topic, self.config.Partition, err)
	}
	return err
}
Esempio n. 6
0
func NewKafkaSink(uri *url.URL) (core.DataSink, error) {
	opts, err := url.ParseQuery(uri.RawQuery)
	if err != nil {
		return nil, fmt.Errorf("failed to parser url's query string: %s", err)
	}

	var topic string = dataTopic
	if len(opts["timeseriestopic"]) > 0 {
		topic = opts["timeseriestopic"][0]
	}

	var kafkaBrokers []string
	if len(opts["brokers"]) < 1 {
		return nil, fmt.Errorf("There is no broker assigned for connecting kafka")
	}
	kafkaBrokers = append(kafkaBrokers, opts["brokers"]...)
	glog.V(2).Infof("initializing kafka sink with brokers - %v", kafkaBrokers)

	//structure the config of broker
	brokerConf := kafka.NewBrokerConf(brokerClientID)
	brokerConf.DialTimeout = brokerDialTimeout
	brokerConf.DialRetryLimit = brokerDialRetryLimit
	brokerConf.DialRetryWait = brokerDialRetryWait
	brokerConf.LeaderRetryLimit = brokerLeaderRetryLimit
	brokerConf.LeaderRetryWait = brokerLeaderRetryWait
	brokerConf.AllowTopicCreation = true

	// set up producer of kafka server.
	sinkProducer, err := setupProducer(kafkaBrokers, brokerConf)
	if err != nil {
		return nil, fmt.Errorf("Failed to setup Producer: - %v", err)
	}

	return &kafkaSink{
		producer:  sinkProducer,
		dataTopic: topic,
	}, nil
}
Esempio n. 7
0
func NewKafkaSink(uri *url.URL, _ extpoints.HeapsterConf) ([]sink_api.ExternalSink, error) {
	var kafkaSink kafkaSink
	opts, err := url.ParseQuery(uri.RawQuery)
	if err != nil {
		return nil, fmt.Errorf("failed to parser url's query string: %s", err)
	}

	kafkaSink.timeSeriesTopic = timeSeriesTopic
	if len(opts["timeseriestopic"]) > 0 {
		kafkaSink.timeSeriesTopic = opts["timeseriestopic"][0]
	}

	kafkaSink.eventsTopic = eventsTopic
	if len(opts["eventstopic"]) > 0 {
		kafkaSink.eventsTopic = opts["eventstopic"][0]
	}

	if len(opts["brokers"]) < 1 {
		return nil, fmt.Errorf("There is no broker assigned for connecting kafka broker")
	}
	kafkaSink.sinkBrokerHosts = append(kafkaSink.sinkBrokerHosts, opts["brokers"]...)

	glog.V(2).Infof("initializing kafka sink with brokers - %v", kafkaSink.sinkBrokerHosts)
	//connect to kafka cluster
	brokerConf := kafka.NewBrokerConf(brokerClientID)
	brokerConf.DialTimeout = brokerDialTimeout
	brokerConf.DialRetryLimit = brokerDialRetryLimit
	brokerConf.DialRetryWait = brokerDialRetryWait
	brokerConf.LeaderRetryLimit = brokerLeaderRetryLimit
	brokerConf.LeaderRetryWait = brokerLeaderRetryWait
	brokerConf.AllowTopicCreation = true

	// Store broker configuration.
	kafkaSink.brokerConf = brokerConf
	kafkaSink.ci = sinkutil.NewClientInitializer("kafka", kafkaSink.setupClient, kafkaSink.ping, 10*time.Second)
	return []sink_api.ExternalSink{&kafkaSink}, nil
}
Esempio n. 8
0
func TestConsumerBrokenConnection(t *testing.T) {
	IntegrationTest(t)
	const msgPerTopic = 10

	topics := []string{"Topic3", "Topic4"}

	cluster := NewKafkaCluster("kafka-docker/", 4)
	if err := cluster.Start(); err != nil {
		t.Fatalf("cannot start kafka cluster: %s", err)
	}
	defer func() {
		_ = cluster.Stop()
	}()

	bconf := kafka.NewBrokerConf("producer-broken-connection")
	bconf.Logger = &testLogger{t}
	addrs, err := cluster.KafkaAddrs()
	if err != nil {
		t.Fatalf("cannot get kafka address: %s", err)
	}
	broker, err := kafka.Dial(addrs, bconf)
	if err != nil {
		t.Fatalf("cannot connect to cluster (%q): %s", addrs, err)
	}
	defer broker.Close()

	// produce big message to enforce TCP buffer flush
	m := proto.Message{
		Value: []byte(strings.Repeat("consumer broken connection message ", 1000)),
	}
	pconf := kafka.NewProducerConf()
	producer := broker.Producer(pconf)

	// send message to all topics
	for _, name := range topics {
		for i := 0; i < msgPerTopic; i++ {
			if _, err := producer.Produce(name, 0, &m); err != nil {
				t.Fatalf("cannot produce to %q: %s", name, err)
			}
		}
	}

	// close two kafka clusters and publish to all 3 topics - 2 of them should
	// retry sending, because lack of leader makes the request fail
	//
	// request should not succeed until nodes are back - bring them back after
	// small delay and make sure producing was successful
	containers, err := cluster.Containers()
	if err != nil {
		t.Fatalf("cannot get containers: %s", err)
	}
	var stopped []*Container
	for _, container := range containers {
		if container.RunningKafka() {
			if err := container.Kill(); err != nil {
				t.Fatalf("cannot kill %q kafka container: %s", container.ID, err)
			}
			stopped = append(stopped, container)
		}
		if len(stopped) == 2 {
			break
		}
	}

	// bring stopped containers back
	errc := make(chan error)
	go func() {
		time.Sleep(500 * time.Millisecond)
		for _, container := range stopped {
			if err := container.Start(); err != nil {
				errc <- err
			}
		}
		close(errc)
	}()

	// make sure data was persisted
	for _, name := range topics {
		consumer, err := broker.Consumer(kafka.NewConsumerConf(name, 0))
		if err != nil {
			t.Errorf("cannot create consumer for %q: %s", name, err)
			continue
		}
		for i := 0; i < msgPerTopic; i++ {
			if _, err := consumer.Consume(); err != nil {
				t.Errorf("cannot consume %d message from %q: %s", i, name, err)
			}
		}
	}

	for err := range errc {
		t.Errorf("cannot start container: %s", err)
	}
}
Esempio n. 9
0
func (self *KafkaOutput) Init(pcf *plugins.PluginCommonConfig, conf toml.Primitive) (err error) {
	log.Println("KafkaOutput Init.")
	self.common = pcf
	hn, err := os.Hostname()
	if err != nil {
		hn = "kamanclient"
	}
	self.config = &KafkaOutputConfig{
		ClientId:      hn,
		Distributer:   "None",
		Partitions:    0,
		FlushInterval: 1000,
	}
	if err = toml.PrimitiveDecode(conf, self.config); err != nil {
		return fmt.Errorf("Can't unmarshal KafkaOutput config: %s", err)
	}
	if len(self.config.Addrs) == 0 {
		return errors.New("addrs must have at least one entry")
	}
	if len(self.config.Topic) == 0 {
		return fmt.Errorf("topic is empty")
	}

	bcf := kafka.NewBrokerConf(self.config.ClientId)
	//bcf.AllowTopicCreation = true

	// connect to kafka cluster
	self.broker, err = kafka.Dial(self.config.Addrs, bcf)
	if err != nil {
		return fmt.Errorf("cannot connect to kafka cluster: %s", err)
	}

	defer self.broker.Close()
	pf := kafka.NewProducerConf()
	pf.RequiredAcks = 1
	self.producer = self.broker.Producer(pf)
	partitions, err := self.broker.PartitionCount(self.config.Topic)
	if err != nil {
		return fmt.Errorf("cannot count to topic partitions: %s", err)
	}
	log.Printf("topic\"%s\" has %d partitions\n", self.config.Topic, partitions)
	if (self.config.Partition + 1) > partitions {
		return fmt.Errorf("invalid partition: %d, topic have %d partitions",
			self.config.Partition, partitions)
	}
	if self.config.Partitions == 0 {
		self.config.Partitions = partitions
	}
	switch self.config.Distributer {
	case "Random":
		self.distributingProducer = kafka.NewRandomProducer(self.producer, self.config.Partitions)
	case "RoundRobin":
		self.distributingProducer = kafka.NewRoundRobinProducer(self.producer, self.config.Partitions)
	case "Hash":
		self.distributingProducer = kafka.NewHashProducer(self.producer, self.config.Partitions)
	case "None":
		self.distributingProducer = nil
	default:
		return fmt.Errorf("invalid distributer: %s, must be one of these: \"Random\",\"RoundRobin\",\"Hash\"", self.config.Distributer)
	}
	self.batchChan = make(chan *outBatch)
	self.backChan = make(chan *outBatch, 2) // Never block on the hand-back
	return err
}
Esempio n. 10
0
func (self *KafkaOutput) Run(runner plugins.OutputRunner) (err error) {
	var (
		timer         *time.Timer
		timerDuration time.Duration
		pack          *plugins.PipelinePack
		//message       *proto.Message
		//outMessages   []*proto.Message
	)
	errChan := make(chan error, 1)

	go self.committer(runner, errChan)

	out := newOutBatch()
	message := &proto.Message{Value: nil}
	ok := true
	var ticker = time.NewTicker(time.Duration(5) * time.Second)
	defer ticker.Stop()
	timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond
	timer = time.NewTimer(timerDuration)
	if self.distributingProducer != nil {
		for ok {
			select {
			case pack = <-runner.InChan():
				pack, err = plugins.PipeDecoder(self.common.Decoder, pack)
				if err != nil {
					log.Printf("PipeDecoder :%s", err)
					pack.Recycle()
					continue
				}
				pack, err = plugins.PipeEncoder(self.common.Encoder, pack)
				if err != nil {
					log.Printf("PipeEncoder :%s", err)
					pack.Recycle()
					continue
				}
				message = &proto.Message{Value: pack.Msg.MsgBytes}
				out.data = append(out.data, message)
				pack.Recycle()
			case <-timer.C:
				self.batchChan <- out
				out = <-self.backChan
				timer.Reset(timerDuration)
			case <-ticker.C:
				if err != nil {
					bcf := kafka.NewBrokerConf(self.config.ClientId)
					//bcf.AllowTopicCreation = true

					// connect to kafka cluster
					self.broker, err = kafka.Dial(self.config.Addrs, bcf)
					if err != nil {
						log.Printf("cannot reconnect to kafka cluster: %s", err)
					}
				}
			}
		}
	} else {
		for {
			pack = <-runner.InChan()
			message = &proto.Message{Value: pack.Msg.MsgBytes}
			if _, err = self.producer.Produce(self.config.Topic, self.config.Partition, message); err != nil {
				log.Printf("cannot produce message to %s:%d: %s", self.config.Topic, self.config.Partition, err)
			}
			pack.Recycle()
		}
	}
	return nil
}
Esempio n. 11
0
// NewMarshaler connects to a cluster (given broker addresses) and prepares to handle marshalling
// requests. Given the way this system works, the marshaler has to process all messages in the
// topic before it's safely able to begin operating. This might take a while.
func NewMarshaler(clientID, groupID string, brokers []string) (*Marshaler, error) {
	// TODO: It might be nice to make the marshaler agnostic of clients and able to support
	// requests from N clients/groups. For now, though, we require instantiating a new
	// marshaler for every client/group.
	brokerConf := kafka.NewBrokerConf("PortalMarshal")
	broker, err := kafka.Dial(brokers, brokerConf)
	if err != nil {
		return nil, err
	}

	// Get offset coordinator so we can look up (and save) committed offsets later.
	coordinatorConf := kafka.NewOffsetCoordinatorConf(groupID)
	coordinator, err := broker.OffsetCoordinator(coordinatorConf)
	if err != nil {
		return nil, err
	}

	ws := &Marshaler{
		quit:       new(int32),
		rsteps:     new(int32),
		instanceID: newInstanceID(),
		clientID:   clientID,
		groupID:    groupID,
		kafka:      broker,
		offsets:    coordinator,
		producer:   broker.Producer(kafka.NewProducerConf()),
		topics:     make(map[string]int),
		groups:     make(map[string]map[string]*topicState),
		jitters:    make(chan time.Duration, 100),
	}

	// Do an initial metadata fetch, this will block a bit
	err = ws.refreshMetadata()
	if err != nil {
		return nil, fmt.Errorf("Failed to get metadata: %s", err)
	}

	// If there is no marshal topic, then we can't run. The admins must go create the topic
	// before they can use this library. Please see the README.
	ws.partitions = ws.Partitions(MarshalTopic)
	if ws.partitions == 0 {
		return nil, errors.New("Marshalling topic not found. Please see the documentation.")
	}

	// Now we start a goroutine to start consuming each of the partitions in the marshal
	// topic. Note that this doesn't handle increasing the partition count on that topic
	// without stopping all consumers.
	ws.rationalizers.Add(ws.partitions)
	for id := 0; id < ws.partitions; id++ {
		go ws.rationalize(id, ws.kafkaConsumerChannel(id))
	}

	// A jitter calculator, just fills a channel with random numbers so that other
	// people don't have to build their own random generator...
	go func() {
		rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
		for {
			jitter := rnd.Intn(HeartbeatInterval/2) + (HeartbeatInterval / 2)
			ws.jitters <- time.Duration(jitter) * time.Second
		}
	}()

	// Now start the metadata refreshing goroutine
	go func() {
		for atomic.LoadInt32(ws.quit) != 1 {
			time.Sleep(<-ws.jitters)
			log.Infof("Refreshing topic metadata.")
			ws.refreshMetadata()

			// See if the number of partitions in the marshal topic went up. If so, this is a
			// fatal error as it means we lose coordination. In theory a mass die-off of workers
			// is bad, but so is upsharding the coordination topic without shutting down
			// everything. At least this limits the damage horizon?
			if ws.Partitions(MarshalTopic) != ws.partitions {
				log.Fatalf("Marshal topic partition count changed. FATAL!")
			}
		}
	}()

	// Wait for all rationalizers to come alive
	log.Infof("Waiting for all rationalizers to come alive.")
	ws.rationalizers.Wait()
	log.Infof("All rationalizers alive, Marshaler now alive.")

	return ws, nil
}