forked from elodina/go-kafka
/
mirror_maker.go
282 lines (240 loc) · 8.74 KB
/
mirror_maker.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
/* Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package go_kafka_client
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/Shopify/sarama"
"hash/fnv"
"strings"
)
// MirrorMakerConfig defines configuration options for MirrorMaker
type MirrorMakerConfig struct {
// Whitelist of topics to mirror. Exactly one whitelist or blacklist is allowed.
Whitelist string
// Blacklist of topics to mirror. Exactly one whitelist or blacklist is allowed.
Blacklist string
// Consumer configurations to consume from a source cluster.
ConsumerConfigs []string
// Embedded producer config.
ProducerConfig string
// Number of producer instances.
NumProducers int
// Number of consumption streams.
NumStreams int
// Flag to preserve partition number. E.g. if message was read from partition 5 it'll be written to partition 5. Note that this can affect performance.
PreservePartitions bool
// Flag to preserve message order. E.g. message sequence 1, 2, 3, 4, 5 will remain 1, 2, 3, 4, 5 in destination topic. Note that this can affect performance.
PreserveOrder bool
// Destination topic prefix. E.g. if message was read from topic "test" and prefix is "dc1_" it'll be written to topic "dc1_test".
TopicPrefix string
// Number of messages that are buffered between the consumer and producer.
ChannelSize int
}
// Creates an empty MirrorMakerConfig.
func NewMirrorMakerConfig() *MirrorMakerConfig {
return &MirrorMakerConfig{}
}
// MirrorMaker is a tool to mirror source Kafka cluster into a target (mirror) Kafka cluster.
// It uses a Kafka consumer to consume messages from the source cluster, and re-publishes those messages to the target cluster.
type MirrorMaker struct {
config *MirrorMakerConfig
consumers []*Consumer
producers []*sarama.Producer
messageChannels []chan *Message
}
// Creates a new MirrorMaker using given MirrorMakerConfig.
func NewMirrorMaker(config *MirrorMakerConfig) *MirrorMaker {
return &MirrorMaker{
config: config,
}
}
// Starts the MirrorMaker. This method is blocking and should probably be run in a separate goroutine.
func (this *MirrorMaker) Start() {
this.initializeMessageChannels()
this.startConsumers()
this.startProducers()
}
// Gracefully stops the MirrorMaker.
func (this *MirrorMaker) Stop() {
consumerCloseChannels := make([]<-chan bool, 0)
for _, consumer := range this.consumers {
consumerCloseChannels = append(consumerCloseChannels, consumer.Close())
}
for _, ch := range consumerCloseChannels {
<-ch
}
for _, ch := range this.messageChannels {
close(ch)
}
//TODO maybe drain message channel first?
for _, producer := range this.producers {
producer.Close()
}
}
func (this *MirrorMaker) startConsumers() {
for _, consumerConfigFile := range this.config.ConsumerConfigs {
config, err := ConsumerConfigFromFile(consumerConfigFile)
if err != nil {
panic(err)
}
zkConfig, err := ZookeeperConfigFromFile(consumerConfigFile)
if err != nil {
panic(err)
}
config.NumWorkers = 1
config.AutoOffsetReset = SmallestOffset
config.Coordinator = NewZookeeperCoordinator(zkConfig)
config.WorkerFailureCallback = func(_ *WorkerManager) FailedDecision {
return CommitOffsetAndContinue
}
config.WorkerFailedAttemptCallback = func(_ *Task, _ WorkerResult) FailedDecision {
return CommitOffsetAndContinue
}
if this.config.PreserveOrder {
numProducers := this.config.NumProducers
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
this.messageChannels[topicPartitionHash(msg)%numProducers] <- msg
return NewSuccessfulResult(id)
}
} else {
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
this.messageChannels[0] <- msg
return NewSuccessfulResult(id)
}
}
consumer := NewConsumer(config)
this.consumers = append(this.consumers, consumer)
if this.config.Whitelist != "" {
go consumer.StartWildcard(NewWhiteList(this.config.Whitelist), this.config.NumStreams)
} else if this.config.Blacklist != "" {
go consumer.StartWildcard(NewBlackList(this.config.Blacklist), this.config.NumStreams)
} else {
panic("Consume pattern not specified")
}
}
}
func (this *MirrorMaker) initializeMessageChannels() {
if this.config.PreserveOrder {
for i := 0; i < this.config.NumProducers; i++ {
this.messageChannels = append(this.messageChannels, make(chan *Message, this.config.ChannelSize))
}
} else {
this.messageChannels = append(this.messageChannels, make(chan *Message, this.config.ChannelSize))
}
}
func (this *MirrorMaker) startProducers() {
for i := 0; i < this.config.NumProducers; i++ {
conf, err := ProducerConfigFromFile(this.config.ProducerConfig)
if err != nil {
panic(err)
}
if err = conf.Validate(); err != nil {
panic(err)
}
client, err := sarama.NewClient(conf.Clientid, conf.BrokerList, sarama.NewClientConfig())
if err != nil {
panic(err)
}
config := sarama.NewProducerConfig()
config.ChannelBufferSize = conf.SendBufferSize
switch strings.ToLower(conf.CompressionCodec) {
case "none":
config.Compression = sarama.CompressionNone
case "gzip":
config.Compression = sarama.CompressionGZIP
case "snappy":
config.Compression = sarama.CompressionSnappy
}
config.FlushByteCount = conf.FlushByteCount
config.FlushFrequency = conf.FlushTimeout
config.FlushMsgCount = conf.BatchSize
config.MaxMessageBytes = conf.MaxMessageBytes
config.MaxMessagesPerReq = conf.MaxMessagesPerRequest
if this.config.PreservePartitions {
config.Partitioner = NewIntPartitioner
} else {
config.Partitioner = sarama.NewRandomPartitioner
}
config.RequiredAcks = sarama.RequiredAcks(conf.Acks)
config.RetryBackoff = conf.RetryBackoff
config.Timeout = conf.Timeout
producer, err := sarama.NewProducer(client, config)
if err != nil {
panic(err)
}
this.producers = append(this.producers, producer)
if this.config.PreserveOrder {
go this.produceRoutine(producer, i)
} else {
go this.produceRoutine(producer, 0)
}
}
}
func (this *MirrorMaker) produceRoutine(producer *sarama.Producer, channelIndex int) {
for msg := range this.messageChannels[channelIndex] {
var key sarama.Encoder
if !this.config.PreservePartitions {
key = sarama.ByteEncoder(msg.Key)
} else {
key = Int32Encoder(msg.Partition)
}
producer.Input() <- &sarama.ProducerMessage{Topic: this.config.TopicPrefix + msg.Topic, Key: key, Value: sarama.ByteEncoder(msg.Value)}
}
}
func topicPartitionHash(msg *Message) int {
h := fnv.New32a()
h.Write([]byte(fmt.Sprintf("%s%d", msg.Topic, msg.Partition)))
return int(h.Sum32())
}
// IntPartitioner is used when we want to preserve partitions.
// This partitioner should be used ONLY with Int32Encoder as it contains unsafe conversions (for performance reasons mostly).
type IntPartitioner struct{}
// PartitionerConstructor function used by Sarama library.
func NewIntPartitioner() sarama.Partitioner {
return new(IntPartitioner)
}
// Partition takes the key and partition count and chooses a partition. IntPartitioner should ONLY receive Int32Encoder keys.
// Passing it a Int32Encoder(2) key means it should assign the incoming message partition = 2 (assuming this partition exists, otherwise there's no guarantee which partition will be picked).
func (this *IntPartitioner) Partition(key sarama.Encoder, numPartitions int32) (int32, error) {
if key == nil {
panic("IntPartitioner does not work without keys.")
}
b, err := key.Encode()
if err != nil {
return -1, err
}
buf := bytes.NewBuffer(b)
partition, err := binary.ReadUvarint(buf)
if err != nil {
return -1, err
}
return int32(partition) % numPartitions, nil
}
// Another Partitioner method used by Sarama.
func (this *IntPartitioner) RequiresConsistency() bool {
return true
}
// Int32Encoder takes an int32 and is able to transform it into a []byte.
type Int32Encoder int32
// Encodes the current value into a []byte. Should never return an error.
func (this Int32Encoder) Encode() ([]byte, error) {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, uint32(this))
return buf, nil
}
// Length for Int32Encoder is always 4.
func (this Int32Encoder) Length() int {
return 4
}