예제 #1
0
파일: scheduler.go 프로젝트: elodina/syphon
func NewElodinaTransportScheduler(config ElodinaTransportSchedulerConfig) *ElodinaTransportScheduler {
	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = config.ConsumerConfig.BrokerList
	connectorConfig.ClientID = config.ConsumerConfig.ClientID
	connectorConfig.CommitOffsetBackoff = config.ConsumerConfig.CommitOffsetBackoff
	connectorConfig.CommitOffsetRetries = config.ConsumerConfig.CommitOffsetRetries
	connectorConfig.ConnectTimeout = config.ConsumerConfig.ConnectTimeout
	connectorConfig.ConsumerMetadataBackoff = config.ConsumerConfig.ConsumerMetadataBackoff
	connectorConfig.ConsumerMetadataRetries = config.ConsumerConfig.ConsumerMetadataRetries
	connectorConfig.FetchMaxWaitTime = config.ConsumerConfig.FetchMaxWaitTime
	connectorConfig.FetchMinBytes = config.ConsumerConfig.FetchMinBytes
	connectorConfig.FetchSize = config.ConsumerConfig.FetchSize
	connectorConfig.KeepAlive = config.ConsumerConfig.KeepAlive
	connectorConfig.KeepAliveTimeout = config.ConsumerConfig.KeepAliveTimeout
	connectorConfig.MaxConnections = config.ConsumerConfig.MaxConnections
	connectorConfig.MaxConnectionsPerBroker = config.ConsumerConfig.MaxConnectionsPerBroker
	connectorConfig.MetadataBackoff = config.ConsumerConfig.MetadataBackoff
	connectorConfig.MetadataRetries = config.ConsumerConfig.MetadataRetries
	connectorConfig.ReadTimeout = config.ConsumerConfig.ReadTimeout
	connectorConfig.WriteTimeout = config.ConsumerConfig.WriteTimeout
	kafkaClient, err := siesta.NewDefaultConnector(connectorConfig)
	if err != nil {
		panic(err)
	}

	scheduler := &ElodinaTransportScheduler{
		config:            &config,
		taskIdToTaskState: make(map[string]*ElodinaTransport),
		kafkaClient:       kafkaClient,
	}

	scheduler.TakenTopicPartitions = consumer.NewTopicAndPartitionSet()

	return scheduler
}
예제 #2
0
파일: bench.go 프로젝트: ruo91/syscol
func testSiesta(brokerList string, topic string, partition int32, seconds int) {
	stop := false

	config := siesta.NewConnectorConfig()
	config.BrokerList = strings.Split(brokerList, ",")

	connector, err := siesta.NewDefaultConnector(config)
	if err != nil {
		panic(err)
	}

	messageChannel := make(chan []*siesta.MessageAndMetadata, 10000)
	count := 0
	go func() {
		for {
			messages := <-messageChannel
			count += len(messages)
		}
	}()

	//warm up
	fmt.Println("warming up")
	for i := 0; i < 5; i++ {
		connector.Fetch(topic, partition, 0)
	}
	fmt.Println("warm up finished, starting")

	go func() {
		time.Sleep(time.Duration(seconds) * time.Second)
		stop = true
	}()

	offset := int64(0)
	for !stop {
		response, err := connector.Fetch(topic, partition, offset)
		if err != nil {
			panic(err)
		}
		messages, err := response.GetMessages()
		if err != nil {
			panic(err)
		}
		messageChannel <- messages
		offset = messages[len(messages)-1].Offset
	}

	fmt.Printf("%d within %d secnods\n", count, seconds)
	fmt.Printf("%d average\n", count/seconds)
}
예제 #3
0
파일: bench.go 프로젝트: ruo91/syscol
func main() {
	config := siesta.NewConnectorConfig()
	config.BrokerList = []string{"localhost:9092"}

	connector, err := siesta.NewDefaultConnector(config)
	if err != nil {
		panic(err)
	}

	producerConfig := &siesta.ProducerConfig{
		BatchSize:       10000,
		ClientID:        "siesta",
		MaxRequests:     10,
		SendRoutines:    10,
		ReceiveRoutines: 10,
		ReadTimeout:     5 * time.Second,
		WriteTimeout:    5 * time.Second,
		RequiredAcks:    1,
		AckTimeoutMs:    2000,
		Linger:          1 * time.Second,
	}
	producer := siesta.NewKafkaProducer(producerConfig, siesta.ByteSerializer, siesta.StringSerializer, connector)

	metadataChannel := make(chan interface{}, 10000)
	count := 0
	start := time.Now()

	for i := 0; i < 10; i++ {
		go func() {
			for {
				<-metadataChannel
				count++

				elapsed := time.Since(start)
				if elapsed.Seconds() >= 1 {
					fmt.Println(fmt.Sprintf("Per Second %d", count))
					count = 0
					start = time.Now()
				}
			}
		}()
	}

	for {
		producer.Send(&siesta.ProducerRecord{Topic: "pr1", Value: "hello world"})
		metadataChannel <- nil
	}
}
// This will be called right after connecting to ConsumerCoordinator so this client can initialize itself
// with bootstrap broker list for example. May return an error to signal this client is unable to work with given configuration.
func (this *SiestaClient) Initialize() error {
	bootstrapBrokers, err := BootstrapBrokers(this.config.Coordinator)
	if err != nil {
		return err
	}

	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = bootstrapBrokers
	connectorConfig.ReadTimeout = this.config.SocketTimeout
	connectorConfig.WriteTimeout = this.config.SocketTimeout
	connectorConfig.ConnectTimeout = this.config.SocketTimeout
	connectorConfig.FetchSize = this.config.FetchMessageMaxBytes
	connectorConfig.ClientID = this.config.Clientid

	this.connector, err = siesta.NewDefaultConnector(connectorConfig)
	if err != nil {
		return err
	}

	return nil
}
예제 #5
0
func (e *Executor) newProducer(valueSerializer func(interface{}) ([]byte, error)) (*siesta.KafkaProducer, error) {
	producerConfig, err := siesta.ProducerConfigFromFile(Config.ProducerProperties)
	if err != nil {
		return nil, err
	}

	c, err := cfg.LoadNewMap(Config.ProducerProperties)
	if err != nil {
		return nil, err
	}

	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = strings.Split(c["bootstrap.servers"], ",")

	connector, err := siesta.NewDefaultConnector(connectorConfig)
	if err != nil {
		return nil, err
	}

	return siesta.NewKafkaProducer(producerConfig, siesta.ByteSerializer, valueSerializer, connector), nil
}
예제 #6
0
파일: consumer.go 프로젝트: elodina/syphon
func NewPartitionConsumer(consumerConfig PartitionConsumerConfig) *PartitionConsumer {
	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = consumerConfig.BrokerList
	connectorConfig.ClientID = consumerConfig.ClientID
	connectorConfig.CommitOffsetBackoff = consumerConfig.CommitOffsetBackoff
	connectorConfig.CommitOffsetRetries = consumerConfig.CommitOffsetRetries
	connectorConfig.ConnectTimeout = consumerConfig.ConnectTimeout
	connectorConfig.ConsumerMetadataBackoff = consumerConfig.ConsumerMetadataBackoff
	connectorConfig.ConsumerMetadataRetries = consumerConfig.ConsumerMetadataRetries
	connectorConfig.FetchMaxWaitTime = consumerConfig.FetchMaxWaitTime
	connectorConfig.FetchMinBytes = consumerConfig.FetchMinBytes
	connectorConfig.FetchSize = consumerConfig.FetchSize
	connectorConfig.KeepAlive = consumerConfig.KeepAlive
	connectorConfig.KeepAliveTimeout = consumerConfig.KeepAliveTimeout
	connectorConfig.MaxConnections = consumerConfig.MaxConnections
	connectorConfig.MaxConnectionsPerBroker = consumerConfig.MaxConnectionsPerBroker
	connectorConfig.MetadataBackoff = consumerConfig.MetadataBackoff
	connectorConfig.MetadataRetries = consumerConfig.MetadataRetries
	connectorConfig.ReadTimeout = consumerConfig.ReadTimeout
	connectorConfig.WriteTimeout = consumerConfig.WriteTimeout
	kafkaClient, err := siesta.NewDefaultConnector(connectorConfig)
	if err != nil {
		panic(err)
	}

	consumer := &PartitionConsumer{
		config:      consumerConfig,
		kafkaClient: kafkaClient,
		fetchers:    make(map[string]map[int32]*FetcherState),
	}

	commitTimer := time.NewTimer(consumerConfig.CommitInterval)
	go func() {
		for {
			select {
			case <-commitTimer.C:
				{

					for topic, partitions := range consumer.fetchers {
						for partition, fetcherState := range partitions {
							offsetToCommit := fetcherState.GetOffset()
							if offsetToCommit > fetcherState.LastCommitted {
								err := consumer.kafkaClient.CommitOffset(consumer.config.Group, topic, partition, offsetToCommit)
								if err != nil {
									log.Logger.Warn("Failed to commit offset: %s", err.Error())
								}
							}
							if fetcherState.Removed {
								inLock(&consumer.fetchersLock, func() {
									if consumer.fetchers[topic][partition].Removed {
										delete(consumer.fetchers[topic], partition)
									}
								})
							}
						}
					}
					commitTimer.Reset(consumerConfig.CommitInterval)
				}
			}
		}
	}()

	return consumer
}