Esempio n. 1
0
// Configure initializes this producer with values from a plugin config.
func (prod *Redis) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	fieldFormat, err := core.NewPluginWithType(conf.GetString("FieldFormatter", "format.Identifier"), conf)
	if err != nil {
		return err // ### return, plugin load error ###
	}
	prod.fieldFormat = fieldFormat.(core.Formatter)

	prod.password = conf.GetString("Password", "")
	prod.database = int64(conf.GetInt("Database", 0))
	prod.key = conf.GetString("Key", "default")
	prod.fieldFromParsed = conf.GetBool("FieldAfterFormat", false)
	prod.address, prod.protocol = shared.ParseAddress(conf.GetString("Address", ":6379"))

	switch strings.ToLower(conf.GetString("Storage", "hash")) {
	case "hash":
		prod.store = prod.storeHash
	case "list":
		prod.store = prod.storeList
	case "set":
		prod.store = prod.storeSet
	case "sortedset":
		prod.store = prod.storeSortedSet
	default:
		fallthrough
	case "string":
		prod.store = prod.storeString
	}

	return nil
}
Esempio n. 2
0
// Configure initializes this consumer with values from a plugin config.
func (cons *Http) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	cons.address = conf.GetString("Address", ":80")
	cons.readTimeoutSec = time.Duration(conf.GetInt("ReadTimeoutSec", 3)) * time.Second
	cons.withHeaders = conf.GetBool("WithHeaders", true)

	certificateFile := conf.GetString("Certificate", "")
	keyFile := conf.GetString("PrivateKey", "")

	if certificateFile != "" || keyFile != "" {
		if certificateFile == "" || keyFile == "" {
			return fmt.Errorf("There must always be a certificate and a private key or none of both")
		}

		cons.certificate = new(tls.Config)
		cons.certificate.NextProtos = []string{"http/1.1"}

		keypair, err := tls.LoadX509KeyPair(certificateFile, keyFile)
		if err != nil {
			return err
		}

		cons.certificate.Certificates = []tls.Certificate{keypair}
	}

	return err
}
Esempio n. 3
0
// Configure initializes this producer with values from a plugin config.
func (prod *Socket) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)

	prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192)
	prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2)
	prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.bufferSizeByte = conf.GetInt("ConnectionBufferSizeKB", 1<<10) << 10 // 1 MB

	prod.acknowledge = shared.Unescape(conf.GetString("Acknowledge", ""))
	prod.ackTimeout = time.Duration(conf.GetInt("AckTimeoutMs", 2000)) * time.Millisecond
	prod.address, prod.protocol = shared.ParseAddress(conf.GetString("Address", ":5880"))

	if prod.protocol != "unix" {
		if prod.acknowledge != "" {
			prod.protocol = "tcp"
		} else {
			prod.protocol = "udp"
		}
	}

	prod.batch = core.NewMessageBatch(prod.batchMaxCount)
	prod.assembly = core.NewWriterAssembly(nil, prod.Drop, prod.GetFormatter())
	prod.assembly.SetValidator(prod.validate)
	prod.assembly.SetErrorHandler(prod.onWriteError)

	prod.SetCheckFuseCallback(prod.tryConnect)
	return nil
}
Esempio n. 4
0
// Configure initializes this producer with values from a plugin config.
func (prod *Proxy) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)

	prod.bufferSizeKB = conf.GetInt("ConnectionBufferSizeKB", 1<<10) // 1 MB
	prod.address, prod.protocol = shared.ParseAddress(conf.GetString("Address", ":5880"))
	if prod.protocol == "udp" {
		return fmt.Errorf("Proxy does not support UDP")
	}

	prod.timeout = time.Duration(conf.GetInt("TimeoutSec", 1)) * time.Second

	delimiter := shared.Unescape(conf.GetString("Delimiter", "\n"))
	offset := conf.GetInt("Offset", 0)
	flags := shared.BufferedReaderFlagEverything // pass all messages as-is

	partitioner := strings.ToLower(conf.GetString("Partitioner", "delimiter"))
	switch partitioner {
	case "binary_be":
		flags |= shared.BufferedReaderFlagBigEndian
		fallthrough

	case "binary", "binary_le":
		switch conf.GetInt("Size", 4) {
		case 1:
			flags |= shared.BufferedReaderFlagMLE8
		case 2:
			flags |= shared.BufferedReaderFlagMLE16
		case 4:
			flags |= shared.BufferedReaderFlagMLE32
		case 8:
			flags |= shared.BufferedReaderFlagMLE64
		default:
			return fmt.Errorf("Size only supports the value 1,2,4 and 8")
		}

	case "fixed":
		flags |= shared.BufferedReaderFlagMLEFixed
		offset = conf.GetInt("Size", 1)

	case "ascii":
		flags |= shared.BufferedReaderFlagMLE

	case "delimiter":
		// Nothing to add

	default:
		return fmt.Errorf("Unknown partitioner: %s", partitioner)
	}

	prod.reader = shared.NewBufferedReader(prod.bufferSizeKB, flags, offset, delimiter)
	return nil
}
Esempio n. 5
0
// Configure initializes this consumer with values from a plugin config.
func (cons *Http) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	cons.address = conf.GetString("Address", ":80")
	cons.readTimeoutSec = time.Duration(conf.GetInt("ReadTimeoutSec", 3)) * time.Second
	cons.withHeaders = conf.GetBool("WithHeaders", true)
	return err
}
Esempio n. 6
0
// Configure initializes this producer with values from a plugin config.
func (prod *Websocket) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)

	prod.address = conf.GetString("Address", ":81")
	prod.path = conf.GetString("Path", "/")
	prod.readTimeoutSec = time.Duration(conf.GetInt("ReadTimeoutSec", 3)) * time.Second

	return nil
}
Esempio n. 7
0
// Configure initializes this producer with values from a plugin config.
func (prod *Spooling) Configure(conf core.PluginConfig) error {
	conf.Override("Formatter", "format.Serialize")
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)

	prod.path = conf.GetString("Path", "/var/run/gollum/spooling")

	prod.maxFileSize = int64(conf.GetInt("MaxFileSizeMB", 512)) << 20
	prod.maxFileAge = time.Duration(conf.GetInt("MaxFileAgeMin", 1)) * time.Minute
	prod.batchMaxCount = conf.GetInt("BatchMaxCount", 100)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.outfile = make(map[core.MessageStreamID]*spoolFile)
	prod.rotation = fileRotateConfig{
		timeout:  prod.maxFileAge,
		sizeByte: prod.maxFileSize,
		atHour:   -1,
		atMinute: -1,
		enabled:  true,
		compress: false,
	}

	return nil
}
Esempio n. 8
0
// Configure initializes this producer with values from a plugin config.
func (prod *Scribe) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	host := conf.GetString("Address", "localhost:1463")
	bufferSizeMax := conf.GetInt("BatchSizeMaxKB", 8<<10) << 1 // 8 MB

	prod.category = make(map[core.MessageStreamID]string, 0)
	prod.batchSize = conf.GetInt("BatchSizeByte", 8192)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.batch = createScribeMessageBatch(bufferSizeMax)
	prod.bufferSizeKB = conf.GetInt("ConnectionBufferSizeKB", 1<<10) // 1 MB
	prod.category = conf.GetStreamMap("Category", "")

	// Initialize scribe connection

	prod.socket, err = thrift.NewTSocket(host)
	if err != nil {
		Log.Error.Print("Scribe socket error:", err)
		return err
	}

	prod.transport = thrift.NewTFramedTransport(prod.socket)
	binProtocol := thrift.NewTBinaryProtocol(prod.transport, false, false)
	prod.scribe = scribe.NewScribeClientProtocol(prod.transport, binProtocol, binProtocol)

	return nil
}
Esempio n. 9
0
// Configure initializes this producer with values from a plugin config.
func (prod *Socket) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	bufferSizeMax := conf.GetInt("BatchSizeMaxKB", 8<<10) << 10

	prod.batchSize = conf.GetInt("BatchSizeByte", 8192)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.bufferSizeKB = conf.GetInt("ConnectionBufferSizeKB", 1<<10) // 1 MB

	prod.acknowledge = shared.Unescape(conf.GetString("Acknowledge", ""))
	prod.address, prod.protocol = shared.ParseAddress(conf.GetString("Address", ":5880"))

	if prod.protocol != "unix" {
		if prod.acknowledge != "" {
			prod.protocol = "tcp"
		} else {
			prod.protocol = "udp"
		}
	}

	prod.batch = core.NewMessageBatch(bufferSizeMax, prod.ProducerBase.GetFormatter())

	return nil
}
Esempio n. 10
0
// Configure initializes this consumer with values from a plugin config.
func (cons *PcapHTTPConsumer) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	cons.netInterface = conf.GetString("Interface", "eth0")
	cons.promiscuous = conf.GetBool("Promiscuous", true)
	cons.filter = conf.GetString("Filter", "dst port 80 and dst host 127.0.0.1")
	cons.sessions = make(pcapSessionMap)
	cons.sessionTimeout = time.Duration(conf.GetInt("TimeoutMs", 3000)) * time.Millisecond
	cons.sessionGuard = new(sync.Mutex)

	return nil
}
Esempio n. 11
0
// Configure initializes this consumer with values from a plugin config.
func (cons *LoopBack) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	routes := conf.GetStreamRoutes("Routes")
	cons.routes = make(map[core.MessageStreamID]map[core.MessageStreamID]core.Stream)

	for sourceID, targetIDs := range routes {
		for _, targetID := range targetIDs {
			cons.addRoute(sourceID, targetID)
		}
	}

	core.EnableRetryQueue(conf.GetInt("Channel", 8192))
	return nil
}
Esempio n. 12
0
// Configure initializes this consumer with values from a plugin config.
func (cons *Socket) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	cons.acknowledge = shared.Unescape(conf.GetString("Acknowledge", ""))
	cons.address, cons.protocol = shared.ParseAddress(conf.GetString("Address", ":5880"))

	if cons.protocol != "unix" {
		if cons.acknowledge != "" {
			cons.protocol = "tcp"
		} else {
			cons.protocol = "udp"
		}
	}

	cons.delimiter = shared.Unescape(conf.GetString("Delimiter", "\n"))
	cons.offset = conf.GetInt("Offset", 0)
	cons.flags = 0

	partitioner := strings.ToLower(conf.GetString("Partitioner", "delimiter"))
	switch partitioner {
	case "binary_be":
		cons.flags |= shared.BufferedReaderFlagBigEndian
		fallthrough

	case "binary", "binary_le":
		cons.flags |= shared.BufferedReaderFlagEverything
		switch conf.GetInt("Size", 4) {
		case 1:
			cons.flags |= shared.BufferedReaderFlagMLE8
		case 2:
			cons.flags |= shared.BufferedReaderFlagMLE16
		case 4:
			cons.flags |= shared.BufferedReaderFlagMLE32
		case 8:
			cons.flags |= shared.BufferedReaderFlagMLE64
		default:
			return fmt.Errorf("Size only supports the value 1,2,4 and 8")
		}

	case "fixed":
		cons.flags |= shared.BufferedReaderFlagMLEFixed
		cons.offset = conf.GetInt("Size", 1)

	case "ascii":
		cons.flags |= shared.BufferedReaderFlagMLE

	case "delimiter":
		// Nothing to add

	default:
		return fmt.Errorf("Unknown partitioner: %s", partitioner)
	}

	cons.quit = false
	return err
}
Esempio n. 13
0
// Configure initializes this consumer with values from a plugin config.
func (cons *Profiler) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}
	numTemplates := conf.GetInt("TemplateCount", 10)

	cons.profileRuns = conf.GetInt("Runs", 10000)
	cons.batches = conf.GetInt("Batches", 10)
	cons.chars = conf.GetString("Characters", profilerDefaultCharacters)
	cons.message = conf.GetString("Message", "%# %256s")
	cons.templates = make([][]byte, numTemplates)
	cons.delay = time.Duration(conf.GetInt("DelayMs", 0)) * time.Millisecond

	return nil
}
Esempio n. 14
0
// Configure initializes this producer with values from a plugin config.
func (prod *Scribe) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)
	host := conf.GetString("Address", "localhost:1463")

	prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192)
	prod.windowSize = prod.batchMaxCount
	prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2)
	prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.batch = core.NewMessageBatch(prod.batchMaxCount)

	prod.bufferSizeByte = conf.GetInt("ConnectionBufferSizeKB", 1<<10) << 10 // 1 MB
	prod.category = conf.GetStreamMap("Category", "")

	// Initialize scribe connection

	prod.socket, err = thrift.NewTSocket(host)
	if err != nil {
		Log.Error.Print("Scribe socket error:", err)
		return err
	}

	prod.transport = thrift.NewTFramedTransport(prod.socket)
	binProtocol := thrift.NewTBinaryProtocol(prod.transport, false, false)
	prod.scribe = scribe.NewScribeClientProtocol(prod.transport, binProtocol, binProtocol)
	prod.lastMetricUpdate = time.Now()
	prod.counters = make(map[string]*int64)

	shared.Metric.New(scribeMetricWindowSize)
	shared.Metric.SetI(scribeMetricWindowSize, prod.windowSize)

	for _, category := range prod.category {
		shared.Metric.New(scribeMetricMessages + category)
		shared.Metric.New(scribeMetricMessagesSec + category)
		prod.counters[category] = new(int64)
	}

	prod.SetCheckFuseCallback(prod.tryOpenConnection)
	return nil
}
Esempio n. 15
0
// Configure initializes this producer with values from a plugin config.
func (prod *InfluxDB) Configure(conf core.PluginConfig) error {
	if err := prod.ProducerBase.Configure(conf); err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)

	version := conf.GetInt("Version", 100)
	if conf.GetBool("UseVersion08", false) {
		version = 80
	}

	switch {
	case version < 90:
		Log.Debug.Print("Using InfluxDB 0.8.x format")
		prod.writer = new(influxDBWriter08)
	case version == 90:
		Log.Debug.Print("Using InfluxDB 0.9.0 format")
		prod.writer = new(influxDBWriter09)
	default:
		Log.Debug.Print("Using InfluxDB 0.9.1+ format")
		prod.writer = new(influxDBWriter10)
	}

	if err := prod.writer.configure(conf, prod); err != nil {
		return err
	}

	prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192)
	prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2)
	prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second

	prod.batch = core.NewMessageBatch(prod.batchMaxCount)
	prod.assembly = core.NewWriterAssembly(prod.writer, prod.Drop, prod.GetFormatter())
	return nil
}
Esempio n. 16
0
// Configure initializes this consumer with values from a plugin config.
func (cons *Socket) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	flags, err := strconv.ParseInt(conf.GetString("Permissions", "0770"), 8, 32)
	cons.fileFlags = os.FileMode(flags)
	if err != nil {
		return err
	}

	cons.clients = list.New()
	cons.clientLock = new(sync.Mutex)
	cons.acknowledge = shared.Unescape(conf.GetString("Acknowledge", ""))
	cons.address, cons.protocol = shared.ParseAddress(conf.GetString("Address", ":5880"))
	cons.reconnectTime = time.Duration(conf.GetInt("ReconnectAfterSec", 2)) * time.Second
	cons.ackTimeout = time.Duration(conf.GetInt("AckTimoutSec", 2)) * time.Second
	cons.readTimeout = time.Duration(conf.GetInt("ReadTimoutSec", 5)) * time.Second
	cons.clearSocket = conf.GetBool("RemoveOldSocket", true)

	if cons.protocol != "unix" {
		if cons.acknowledge != "" {
			cons.protocol = "tcp"
		} else {
			cons.protocol = "udp"
		}
	}

	cons.delimiter = shared.Unescape(conf.GetString("Delimiter", "\n"))
	cons.offset = conf.GetInt("Offset", 0)
	cons.flags = 0

	partitioner := strings.ToLower(conf.GetString("Partitioner", "delimiter"))
	switch partitioner {
	case "binary_be":
		cons.flags |= shared.BufferedReaderFlagBigEndian
		fallthrough

	case "binary", "binary_le":
		cons.flags |= shared.BufferedReaderFlagEverything
		switch conf.GetInt("Size", 4) {
		case 1:
			cons.flags |= shared.BufferedReaderFlagMLE8
		case 2:
			cons.flags |= shared.BufferedReaderFlagMLE16
		case 4:
			cons.flags |= shared.BufferedReaderFlagMLE32
		case 8:
			cons.flags |= shared.BufferedReaderFlagMLE64
		default:
			return fmt.Errorf("Size only supports the value 1,2,4 and 8")
		}

	case "fixed":
		cons.flags |= shared.BufferedReaderFlagMLEFixed
		cons.offset = conf.GetInt("Size", 1)

	case "ascii":
		cons.flags |= shared.BufferedReaderFlagMLE

	case "delimiter":
		// Nothing to add

	default:
		return fmt.Errorf("Unknown partitioner: %s", partitioner)
	}

	return err
}
Esempio n. 17
0
// Configure initializes this consumer with values from a plugin config.
func (cons *Kafka) Configure(conf core.PluginConfig) error {
	err := cons.ConsumerBase.Configure(conf)
	if err != nil {
		return err
	}

	if !conf.HasValue("Servers") {
		return core.NewConsumerError("No servers configured for consumer.Kafka")
	}

	cons.servers = conf.GetStringArray("Servers", []string{})
	cons.topic = conf.GetString("Topic", "default")
	cons.offsetFile = conf.GetString("OffsetFile", "")
	cons.persistTimeout = time.Duration(conf.GetInt("PresistTimoutMs", 5000)) * time.Millisecond
	cons.offsets = make(map[int32]int64)
	cons.MaxPartitionID = 0

	cons.config = kafka.NewConfig()
	cons.config.ClientID = conf.GetString("ClientId", "gollum")
	cons.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)

	cons.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
	cons.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
	cons.config.Net.ReadTimeout = cons.config.Net.DialTimeout
	cons.config.Net.WriteTimeout = cons.config.Net.DialTimeout

	cons.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
	cons.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
	cons.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond

	cons.config.Consumer.Fetch.Min = int32(conf.GetInt("MinFetchSizeByte", 1))
	cons.config.Consumer.Fetch.Max = int32(conf.GetInt("MaxFetchSizeByte", 0))
	cons.config.Consumer.Fetch.Default = int32(conf.GetInt("MaxFetchSizeByte", 32768))
	cons.config.Consumer.MaxWaitTime = time.Duration(conf.GetInt("FetchTimeoutMs", 250)) * time.Millisecond

	offsetValue := strings.ToLower(conf.GetString("DefaultOffset", kafkaOffsetNewest))
	switch offsetValue {
	case kafkaOffsetNewest:
		cons.defaultOffset = kafka.OffsetNewest

	case kafkaOffsetOldest:
		cons.defaultOffset = kafka.OffsetOldest

	default:
		cons.defaultOffset, _ = strconv.ParseInt(offsetValue, 10, 64)
		fileContents, err := ioutil.ReadFile(cons.offsetFile)
		if err != nil {
			return err
		}

		// Decode the JSON file into the partition -> offset map
		encodedOffsets := make(map[string]int64)
		err = json.Unmarshal(fileContents, &encodedOffsets)
		if err != nil {
			return err
		}

		for k, v := range encodedOffsets {
			id, err := strconv.Atoi(k)
			if err != nil {
				return err
			}
			cons.offsets[int32(id)] = v
		}
	}

	return nil
}
Esempio n. 18
0
// Configure initializes this producer with values from a plugin config.
func (prod *File) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	prod.SetRollCallback(prod.rotateLog)
	prod.SetStopCallback(prod.close)

	prod.filesByStream = make(map[core.MessageStreamID]*fileState)
	prod.files = make(map[string]*fileState)
	prod.batchMaxCount = conf.GetInt("BatchMaxCount", 8192)
	prod.batchFlushCount = conf.GetInt("BatchFlushCount", prod.batchMaxCount/2)
	prod.batchFlushCount = shared.MinI(prod.batchFlushCount, prod.batchMaxCount)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.overwriteFile = conf.GetBool("FileOverwrite", false)

	flags, err := strconv.ParseInt(conf.GetString("Permissions", "0664"), 8, 32)
	prod.permissions = os.FileMode(flags)
	if err != nil {
		return err
	}

	logFile := conf.GetString("File", "/var/log/gollum.log")
	prod.wildcardPath = strings.IndexByte(logFile, '*') != -1

	prod.fileDir = filepath.Dir(logFile)
	prod.fileExt = filepath.Ext(logFile)
	prod.fileName = filepath.Base(logFile)
	prod.fileName = prod.fileName[:len(prod.fileName)-len(prod.fileExt)]
	prod.timestamp = conf.GetString("RotateTimestamp", "2006-01-02_15")
	prod.flushTimeout = time.Duration(conf.GetInt("FlushTimeoutSec", 5)) * time.Second

	prod.rotate.enabled = conf.GetBool("Rotate", false)
	prod.rotate.timeout = time.Duration(conf.GetInt("RotateTimeoutMin", 1440)) * time.Minute
	prod.rotate.sizeByte = int64(conf.GetInt("RotateSizeMB", 1024)) << 20
	prod.rotate.atHour = -1
	prod.rotate.atMinute = -1
	prod.rotate.compress = conf.GetBool("Compress", false)

	prod.pruneCount = conf.GetInt("RotatePruneCount", 0)
	prod.pruneSize = int64(conf.GetInt("RotatePruneTotalSizeMB", 0)) << 20

	if prod.pruneSize > 0 && prod.rotate.sizeByte > 0 {
		prod.pruneSize -= prod.rotate.sizeByte >> 20
		if prod.pruneSize <= 0 {
			prod.pruneCount = 1
			prod.pruneSize = 0
		}
	}

	rotateAt := conf.GetString("RotateAt", "")
	if rotateAt != "" {
		parts := strings.Split(rotateAt, ":")
		rotateAtHour, _ := strconv.ParseInt(parts[0], 10, 8)
		rotateAtMin, _ := strconv.ParseInt(parts[1], 10, 8)

		prod.rotate.atHour = int(rotateAtHour)
		prod.rotate.atMinute = int(rotateAtMin)
	}

	return nil
}
Esempio n. 19
0
// Configure initializes this producer with values from a plugin config.
func (prod *ElasticSearch) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	defaultServer := []string{"localhost"}
	numConnections := conf.GetInt("Connections", 6)
	retrySec := conf.GetInt("RetrySec", 5)

	prod.conn = elastigo.NewConn()
	prod.conn.Hosts = conf.GetStringArray("Servers", defaultServer)
	prod.conn.Domain = conf.GetString("Domain", prod.conn.Hosts[0])
	prod.conn.ClusterDomains = prod.conn.Hosts
	prod.conn.Port = strconv.Itoa(conf.GetInt("Port", 9200))
	prod.conn.Username = conf.GetString("User", "")
	prod.conn.Password = conf.GetString("Password", "")

	prod.indexer = prod.conn.NewBulkIndexerErrors(numConnections, retrySec)
	prod.indexer.BufferDelayMax = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.indexer.BulkMaxBuffer = conf.GetInt("BatchSizeByte", 32768)
	prod.indexer.BulkMaxDocs = conf.GetInt("BatchMaxCount", 128)

	prod.indexer.Sender = func(buf *bytes.Buffer) error {
		_, err := prod.conn.DoCommand("POST", "/_bulk", nil, buf)
		if err != nil {
			Log.Error.Print("ElasticSearch response error - ", err)
		}
		return err
	}

	prod.index = conf.GetStreamMap("Index", "")
	prod.msgType = conf.GetStreamMap("Type", "log")
	prod.msgTTL = conf.GetString("TTL", "")
	prod.dayBasedIndex = conf.GetBool("DayBasedIndex", false)

	return nil
}
Esempio n. 20
0
// Configure initializes this producer with values from a plugin config.
func (prod *ElasticSearch) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	prod.SetStopCallback(prod.close)

	defaultServer := []string{"localhost"}
	numConnections := conf.GetInt("Connections", 6)
	retrySec := conf.GetInt("RetrySec", 5)

	prod.conn = elastigo.NewConn()
	prod.conn.Hosts = conf.GetStringArray("Servers", defaultServer)
	prod.conn.Domain = conf.GetString("Domain", prod.conn.Hosts[0])
	prod.conn.ClusterDomains = prod.conn.Hosts
	prod.conn.Port = strconv.Itoa(conf.GetInt("Port", 9200))
	prod.conn.Username = conf.GetString("User", "")
	prod.conn.Password = conf.GetString("Password", "")

	prod.indexer = prod.conn.NewBulkIndexerErrors(numConnections, retrySec)
	prod.indexer.BufferDelayMax = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second
	prod.indexer.BulkMaxBuffer = conf.GetInt("BatchSizeByte", 32768)
	prod.indexer.BulkMaxDocs = conf.GetInt("BatchMaxCount", 128)

	prod.indexer.Sender = func(buf *bytes.Buffer) error {
		_, err := prod.conn.DoCommand("POST", "/_bulk", nil, buf)
		if err != nil {
			Log.Error.Print("ElasticSearch response error - ", err)
		}
		return err
	}

	prod.index = conf.GetStreamMap("Index", "")
	prod.msgType = conf.GetStreamMap("Type", "log")
	prod.msgTTL = conf.GetString("TTL", "")
	prod.dayBasedIndex = conf.GetBool("DayBasedIndex", false)

	prod.counters = make(map[string]*int64)
	prod.lastMetricUpdate = time.Now()

	for _, index := range prod.index {
		shared.Metric.New(elasticMetricMessages + index)
		shared.Metric.New(elasticMetricMessagesSec + index)
		prod.counters[index] = new(int64)
	}

	prod.SetCheckFuseCallback(prod.isClusterUp)
	return nil
}
Esempio n. 21
0
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	if !conf.HasValue("Servers") {
		return core.NewProducerError("No servers configured for producer.Kafka")
	}

	prod.servers = conf.GetStringArray("Servers", []string{})
	prod.topic = conf.GetStreamMap("Topic", "")
	prod.clientID = conf.GetString("ClientId", "gollum")

	prod.config = kafka.NewConfig()
	prod.config.ClientID = conf.GetString("ClientId", "gollum")
	prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)

	prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
	prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
	prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
	prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout

	prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
	prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
	prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond

	prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
	prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
	prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond

	prod.config.Producer.Return.Errors = true
	prod.config.Producer.Return.Successes = false

	switch strings.ToLower(conf.GetString("Compression", compressNone)) {
	default:
		fallthrough
	case compressNone:
		prod.config.Producer.Compression = kafka.CompressionNone
	case compressGZIP:
		prod.config.Producer.Compression = kafka.CompressionGZIP
	case compressSnappy:
		prod.config.Producer.Compression = kafka.CompressionSnappy
	}

	switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
	case partRandom:
		prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
	case partRoundrobin:
		prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
	default:
		fallthrough
	case partHash:
		prod.config.Producer.Partitioner = kafka.NewHashPartitioner
	}

	prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
	prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
	prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
	prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
	prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
	prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond
	return nil
}
Esempio n. 22
0
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}
	prod.SetStopCallback(prod.close)

	prod.servers = conf.GetStringArray("Servers", []string{"localhost:9092"})
	prod.topic = conf.GetStreamMap("Topic", "")
	prod.clientID = conf.GetString("ClientId", "gollum")
	prod.lastMetricUpdate = time.Now()

	prod.config = kafka.NewConfig()
	prod.config.ClientID = conf.GetString("ClientId", "gollum")
	prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)

	prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
	prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
	prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
	prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout

	prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
	prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
	prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond

	prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
	prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
	prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond

	prod.config.Producer.Return.Errors = true
	prod.config.Producer.Return.Successes = true

	switch strings.ToLower(conf.GetString("Compression", compressNone)) {
	default:
		fallthrough
	case compressNone:
		prod.config.Producer.Compression = kafka.CompressionNone
	case compressGZIP:
		prod.config.Producer.Compression = kafka.CompressionGZIP
	case compressSnappy:
		prod.config.Producer.Compression = kafka.CompressionSnappy
	}

	switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
	case partRandom:
		prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
	case partRoundrobin:
		prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
	default:
		fallthrough
	case partHash:
		prod.config.Producer.Partitioner = kafka.NewHashPartitioner
	}

	prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
	prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
	prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
	prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
	prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
	prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond

	prod.batch = core.NewMessageBatch(conf.GetInt("Channel", 8192))
	prod.counters = make(map[string]*int64)

	for _, topic := range prod.topic {
		shared.Metric.New(kafkaMetricMessages + topic)
		shared.Metric.New(kafkaMetricMessagesSec + topic)
		prod.counters[topic] = new(int64)
	}

	shared.Metric.New(kafkaMetricMissCount)
	prod.SetCheckFuseCallback(prod.tryOpenConnection)
	return nil
}
Esempio n. 23
0
// Configure initializes this producer with values from a plugin config.
func (prod *File) Configure(conf core.PluginConfig) error {
	err := prod.ProducerBase.Configure(conf)
	if err != nil {
		return err
	}

	prod.filesByStream = make(map[core.MessageStreamID]*fileState)
	prod.files = make(map[uint32]*fileState)
	prod.bufferSizeMax = conf.GetInt("BatchSizeMaxKB", 8<<10) << 10 // 8 MB

	prod.batchSize = conf.GetInt("BatchSizeByte", 8192)
	prod.batchTimeout = time.Duration(conf.GetInt("BatchTimeoutSec", 5)) * time.Second

	logFile := conf.GetString("File", "/var/prod/gollum.log")
	prod.wildcardPath = strings.IndexByte(logFile, '*') != -1

	prod.fileDir = filepath.Dir(logFile)
	prod.fileExt = filepath.Ext(logFile)
	prod.fileName = filepath.Base(logFile)
	prod.fileName = prod.fileName[:len(prod.fileName)-len(prod.fileExt)]
	prod.timestamp = conf.GetString("RotateTimestamp", "2006-01-02_15")
	prod.flushTimeout = time.Duration(conf.GetInt("FlushTimeoutSec", 5)) * time.Second

	prod.rotate.enabled = conf.GetBool("Rotate", false)
	prod.rotate.timeout = time.Duration(conf.GetInt("RotateTimeoutMin", 1440)) * time.Minute
	prod.rotate.sizeByte = int64(conf.GetInt("RotateSizeMB", 1024)) << 20
	prod.rotate.atHour = -1
	prod.rotate.atMinute = -1
	prod.rotate.compress = conf.GetBool("Compress", false)

	rotateAt := conf.GetString("RotateAt", "")
	if rotateAt != "" {
		parts := strings.Split(rotateAt, ":")
		rotateAtHour, _ := strconv.ParseInt(parts[0], 10, 8)
		rotateAtMin, _ := strconv.ParseInt(parts[1], 10, 8)

		prod.rotate.atHour = int(rotateAtHour)
		prod.rotate.atMinute = int(rotateAtMin)
	}

	return nil
}