// Configure initializes this producer with values from a plugin config. func (prod *Socket) Configure(conf core.PluginConfig) error { err := prod.ProducerBase.Configure(conf) if err != nil { return err } prod.SetStopCallback(prod.close) prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192) prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2) prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount) prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second prod.bufferSizeByte = conf.GetInt("ConnectionBufferSizeKB", 1<<10) << 10 // 1 MB prod.acknowledge = shared.Unescape(conf.GetString("Acknowledge", "")) prod.ackTimeout = time.Duration(conf.GetInt("AckTimeoutMs", 2000)) * time.Millisecond prod.address, prod.protocol = shared.ParseAddress(conf.GetString("Address", ":5880")) if prod.protocol != "unix" { if prod.acknowledge != "" { prod.protocol = "tcp" } else { prod.protocol = "udp" } } prod.batch = core.NewMessageBatch(prod.batchMaxCount) prod.assembly = core.NewWriterAssembly(nil, prod.Drop, prod.GetFormatter()) prod.assembly.SetValidator(prod.validate) prod.assembly.SetErrorHandler(prod.onWriteError) prod.SetCheckFuseCallback(prod.tryConnect) return nil }
// Configure initializes this producer with values from a plugin config. func (prod *Socket) Configure(conf core.PluginConfig) error { err := prod.ProducerBase.Configure(conf) if err != nil { return err } bufferSizeMax := conf.GetInt("BatchSizeMaxKB", 8<<10) << 10 prod.batchSize = conf.GetInt("BatchSizeByte", 8192) prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second prod.bufferSizeKB = conf.GetInt("ConnectionBufferSizeKB", 1<<10) // 1 MB prod.acknowledge = shared.Unescape(conf.GetString("Acknowledge", "")) prod.address, prod.protocol = shared.ParseAddress(conf.GetString("Address", ":5880")) if prod.protocol != "unix" { if prod.acknowledge != "" { prod.protocol = "tcp" } else { prod.protocol = "udp" } } prod.batch = core.NewMessageBatch(bufferSizeMax, prod.ProducerBase.GetFormatter()) return nil }
func newFileState(bufferSizeMax int, timeout time.Duration) *fileState { return &fileState{ batch: core.NewMessageBatch(bufferSizeMax, nil), bgWriter: new(sync.WaitGroup), flushTimeout: timeout, } }
func newFileState(maxMessageCount int, formatter core.Formatter, drop func(core.Message), timeout time.Duration) *fileState { return &fileState{ batch: core.NewMessageBatch(maxMessageCount), bgWriter: new(sync.WaitGroup), flushTimeout: timeout, assembly: core.NewWriterAssembly(nil, drop, formatter), } }
// Configure initializes this producer with values from a plugin config. func (prod *Scribe) Configure(conf core.PluginConfig) error { err := prod.ProducerBase.Configure(conf) if err != nil { return err } prod.SetStopCallback(prod.close) host := conf.GetString("Address", "localhost:1463") prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192) prod.windowSize = prod.batchMaxCount prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2) prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount) prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second prod.batch = core.NewMessageBatch(prod.batchMaxCount) prod.bufferSizeByte = conf.GetInt("ConnectionBufferSizeKB", 1<<10) << 10 // 1 MB prod.category = conf.GetStreamMap("Category", "") // Initialize scribe connection prod.socket, err = thrift.NewTSocket(host) if err != nil { Log.Error.Print("Scribe socket error:", err) return err } prod.transport = thrift.NewTFramedTransport(prod.socket) binProtocol := thrift.NewTBinaryProtocol(prod.transport, false, false) prod.scribe = scribe.NewScribeClientProtocol(prod.transport, binProtocol, binProtocol) prod.lastMetricUpdate = time.Now() prod.counters = make(map[string]*int64) shared.Metric.New(scribeMetricWindowSize) shared.Metric.SetI(scribeMetricWindowSize, prod.windowSize) for _, category := range prod.category { shared.Metric.New(scribeMetricMessages + category) shared.Metric.New(scribeMetricMessagesSec + category) prod.counters[category] = new(int64) } prod.SetCheckFuseCallback(prod.tryOpenConnection) return nil }
func newSpoolFile(prod *Spooling, streamName string, source core.MessageSource) *spoolFile { spool := &spoolFile{ file: nil, batch: core.NewMessageBatch(prod.batchMaxCount), assembly: core.NewWriterAssembly(nil, prod.Drop, prod.GetFormatter()), fileCreated: time.Now(), streamName: streamName, basePath: prod.path + "/" + streamName, prod: prod, source: source, lastMetricUpdate: time.Now(), } shared.Metric.New(spoolingMetricWrite + streamName) shared.Metric.New(spoolingMetricWriteSec + streamName) shared.Metric.New(spoolingMetricRead + streamName) shared.Metric.New(spoolingMetricReadSec + streamName) go spool.read() return spool }
// Configure initializes this producer with values from a plugin config. func (prod *InfluxDB) Configure(conf core.PluginConfig) error { if err := prod.ProducerBase.Configure(conf); err != nil { return err } prod.SetStopCallback(prod.close) version := conf.GetInt("Version", 100) if conf.GetBool("UseVersion08", false) { version = 80 } switch { case version < 90: Log.Debug.Print("Using InfluxDB 0.8.x format") prod.writer = new(influxDBWriter08) case version == 90: Log.Debug.Print("Using InfluxDB 0.9.0 format") prod.writer = new(influxDBWriter09) default: Log.Debug.Print("Using InfluxDB 0.9.1+ format") prod.writer = new(influxDBWriter10) } if err := prod.writer.configure(conf, prod); err != nil { return err } prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192) prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2) prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount) prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second prod.batch = core.NewMessageBatch(prod.batchMaxCount) prod.assembly = core.NewWriterAssembly(prod.writer, prod.Drop, prod.GetFormatter()) return nil }
// Configure initializes this producer with values from a plugin config. func (prod *Kafka) Configure(conf core.PluginConfig) error { err := prod.ProducerBase.Configure(conf) if err != nil { return err } prod.SetStopCallback(prod.close) prod.servers = conf.GetStringArray("Servers", []string{"localhost:9092"}) prod.topic = conf.GetStreamMap("Topic", "") prod.clientID = conf.GetString("ClientId", "gollum") prod.lastMetricUpdate = time.Now() prod.config = kafka.NewConfig() prod.config.ClientID = conf.GetString("ClientId", "gollum") prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256) prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5) prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3) prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10 prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal))) prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond prod.config.Producer.Return.Errors = true prod.config.Producer.Return.Successes = true switch strings.ToLower(conf.GetString("Compression", compressNone)) { default: fallthrough case compressNone: prod.config.Producer.Compression = kafka.CompressionNone case compressGZIP: prod.config.Producer.Compression = kafka.CompressionGZIP case compressSnappy: prod.config.Producer.Compression = kafka.CompressionSnappy } switch strings.ToLower(conf.GetString("Partitioner", partRandom)) { case partRandom: prod.config.Producer.Partitioner = kafka.NewRandomPartitioner case partRoundrobin: prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner default: fallthrough case partHash: prod.config.Producer.Partitioner = kafka.NewHashPartitioner } prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192) prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1) prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0) prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3) prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond prod.batch = core.NewMessageBatch(conf.GetInt("Channel", 8192)) prod.counters = make(map[string]*int64) for _, topic := range prod.topic { shared.Metric.New(kafkaMetricMessages + topic) shared.Metric.New(kafkaMetricMessagesSec + topic) prod.counters[topic] = new(int64) } shared.Metric.New(kafkaMetricMissCount) prod.SetCheckFuseCallback(prod.tryOpenConnection) return nil }