Example #1
0
func (l *UdpInput) Run(output chan common.MapStr) error {
	logp.Info("[UdpInput] Running UDP Input")
	addr := net.UDPAddr{
		Port: l.Port,
		IP:   net.ParseIP("0.0.0.0"),
	}
	server, err := net.ListenUDP("udp", &addr)
	server.SetReadBuffer(1048576)

	if err != nil {
		logp.Err("couldn't start listening: " + err.Error())
		return nil
	}

	logp.Info("[UdpInput] Listening on port %d", l.Port)

	i := 0
	for {
		i++
		buf := make([]byte, 4096)
		rlen, addr, err := server.ReadFromUDP(buf)
		if err != nil {
			logp.Err("couldn't read from UDP: " + err.Error())
		}
		go l.handlePacket(buf, rlen, i, addr, output)
	}
	return nil
}
Example #2
0
func (l *TailInput) doStuff(output chan common.MapStr) {
	now := func() time.Time {
		t := time.Now()
		return t
	}

	var line uint64 = 0
	var read_timeout = 30 * time.Second

	// open file
	// basic error handling, if we hit an error, log and return
	// this ends the currently running thread without impacting other threads
	f, err := os.Open(l.FileName)
	if err != nil {
		logp.Err("Error opening file " + err.Error())
		return
	}
	l.FileP = f

	// seek to end
	// for offset, we use the actual file offset
	// we initialize it to the end of the file at time of open
	l.offset, err = l.FileP.Seek(0, 2)
	if err != nil {
		logp.Err("Error seeking in file " + err.Error())
		return
	}
	l.LastOpen = time.Now()

	buffer := new(bytes.Buffer)
	reader := bufio.NewReader(l.FileP)

	for {
		l.CheckReopen()
		text, bytesread, err := readline(reader, buffer, read_timeout)
		if err != nil && err != io.EOF {
			// EOF errors are expected, since we are tailing the file
			logp.Err("Error reading file " + err.Error())
			return
		}

		if bytesread > 0 {
			l.offset += int64(bytesread)
			line++

			event := common.MapStr{}
			event["filename"] = l.FileName
			event["line"] = line
			event["message"] = text
			event["offset"] = l.offset
			event["type"] = l.Type

			event.EnsureTimestampField(now)
			event.EnsureCountField()

			logp.Debug("tailinput", "InputEvent: %v", event)
			output <- event // ship the new event downstream
		}
	}
}
Example #3
0
// Update the local topology map
func (out *ElasticsearchOutput) UpdateLocalTopologyMap() {

	// get all shippers IPs from Elasticsearch
	TopologyMapTmp := make(map[string]string)

	res, err := out.Conn.SearchUri(".packetbeat-topology", "server-ip", nil)
	if err == nil {
		for _, obj := range res.Hits.Hits {
			var result QueryResult
			err = json.Unmarshal(obj, &result)
			if err != nil {
				return
			}

			var pub PublishedTopology
			err = json.Unmarshal(result.Source, &pub)
			if err != nil {
				logp.Err("json.Unmarshal fails with: %s", err)
			}
			// add mapping
			ipaddrs := strings.Split(pub.IPs, ",")
			for _, addr := range ipaddrs {
				TopologyMapTmp[addr] = pub.Name
			}
		}
	} else {
		logp.Err("Getting topology map fails with: %s", err)
	}

	// update topology map
	out.TopologyMap = TopologyMapTmp

	logp.Debug("output_elasticsearch", "Topology map %s", out.TopologyMap)
}
Example #4
0
func (l *RedisInput) Run(output chan common.MapStr) error {
	logp.Debug("redisinput", "Running Redis Input")
	var keysScript = redis.NewScript(1, `return redis.call('KEYS', KEYS[1])`)

	go func() {
		redisURL := fmt.Sprintf("redis://%s:%d/%d", l.Host, l.Port, l.DB)
		dialConnectTimeout := redis.DialConnectTimeout(3 * time.Second)
		dialReadTimeout := redis.DialReadTimeout(10 * time.Second)
		var backOffCount = 0
		var backOffDuration time.Duration = 5 * time.Second
		for {
			logp.Debug("redisinput", "Connecting to: %s", redisURL)
			server, err := redis.DialURL(redisURL, dialConnectTimeout, dialReadTimeout)
			if err != nil {
				logp.Err("couldn't start listening: " + err.Error())
				return
			}
			logp.Debug("redisinput", "Connected to Redis Server")

			reply, err := keysScript.Do(server, "*")
			if err != nil {
				logp.Err("An error occured while executing KEYS command: %s\n", err)
				return
			}

			keys, err := redis.Strings(reply, err)
			if err != nil {
				logp.Err("An error occured while converting reply to String: %s\n", err)
				return
			}

			for _, key := range keys {
				logp.Debug("redisinput", "key is %s", key)
				lineCount, err := l.handleConn(server, output, key)
				if err == nil {
					logp.Debug("redisinput", "Read %v events", lineCount)
					backOffCount = 0
					backOffDuration = time.Duration(backOffCount) * time.Second
					time.Sleep(backOffDuration)
				} else {
					backOffCount++
					backOffDuration = time.Duration(backOffCount) * time.Second
					time.Sleep(backOffDuration)
				}
			}
			defer server.Close()
		}
	}()
	return nil
}
Example #5
0
func (out *KafkaOutput) SendMessagesGoroutine() {

	for {
		select {

		case queueMsg := <-out.sendingQueue:

			if !out.connected {
				logp.Debug("output_kafka", "Droping pkt ...")
				continue
			}
			logp.Debug("output_kafka", "Send event to kafka")

			out.Producer.Input() <- &sarama.ProducerMessage{
				Topic: out.Topic,
				Key:   nil,
				Value: &queueMsg,
			}

		case err := <-out.Producer.Errors():
			logp.Err("Failed to publish event to kafka: %s", err)
			out.connected = false
			out.Close()
			go out.Reconnect()
			return
		}
	}
}
Example #6
0
// Each shipper publishes a list of IPs together with its name to Elasticsearch
func (out *ElasticsearchOutput) PublishIPs(name string, localAddrs []string) error {
	if !out.ttlEnabled {
		logp.Debug("output_elasticsearch", "Not publishing IPs because TTL was not yet confirmed to be enabled")
		return nil
	}

	logp.Debug("output_elasticsearch", "Publish IPs %s with expiration time %d", localAddrs, out.TopologyExpire)
	params := map[string]string{
		"ttl":     fmt.Sprintf("%dms", out.TopologyExpire),
		"refresh": "true",
	}
	_, err := out.Conn.Index(
		".packetbeat-topology", /*index*/
		"server-ip",            /*type*/
		name,                   /* id */
		params,                 /* parameters */
		PublishedTopology{name, strings.Join(localAddrs, ",")} /* body */)

	if err != nil {
		logp.Err("Fail to publish IP addresses: %s", err)
		return err
	}

	out.UpdateLocalTopologyMap()

	return nil
}
Example #7
0
func (l *RedisInput) averageSortedEvents(sorted_events map[string][]common.MapStr) ([]common.MapStr, error) {
	var output_events []common.MapStr
	var merged_event common.MapStr
	var metric_value_string string
	//var metric_value_bytes []byte
	metric_value := 0.0
	for _, events := range sorted_events {
		metric_value = 0.0
		merged_event = common.MapStr{}
		for _, event := range events {
			merged_event.Update(event)
			logp.Debug("groupstuff", "metric value: %v", event["metric_value"])
			metric_value_string = event["metric_value"].(string)
			//			metric_value_bytes = []byte(metric_value_string)
			//			metric_value += float64(common.Bytes_Ntohll(metric_value_bytes))
			metric_value_float, err := strconv.ParseFloat(metric_value_string, 65)
			if err != nil {
				logp.Err("Error parsing metric_value: %v", err)
			}
			metric_value += metric_value_float
		}
		logp.Debug("groupstuff", "the summed values is %v", metric_value)
		logp.Debug("groupstuff", "the length is %v", float64(len(events)))
		metric_value = metric_value / float64(len(events))
		logp.Debug("groupstuff", "the avg value is %v", metric_value)
		merged_event["metric_value"] = metric_value
		output_events = append(output_events, merged_event)
	}
	return output_events, nil
}
Example #8
0
//TODO: Check for Errors Here
func (jsonexpander *JSONExpander) Filter(event common.MapStr) (common.MapStr, error) {
	text := event["message"]
	text_string := text.(*string)
	logp.Debug("jsonexpander", "Attempting to expand: %v", event)

	if isJSONString(*text_string) {
		data := []byte(*text_string)
		err := json.Unmarshal(data, &event)
		if err != nil {
			logp.Err("jsonexpander", "Could not expand json data")
			return event, nil
		}
	} else {
		logp.Debug("jsonexpander", "Message does not appear to be JSON data: %s", text_string)
	}

	now := func() time.Time {
		t := time.Now()
		return t
	}

	event.EnsureTimestampField(now)

	logp.Debug("jsonexpander", "Final Event: %v", event)
	return event, nil
}
Example #9
0
func (reader *ReaderType) PrintReaderEvent(event common.MapStr) {
	json, err := json.MarshalIndent(event, "", "  ")
	if err != nil {
		logp.Err("json.Marshal: %s", err)
	} else {
		logp.Debug("reader", "Reader: %s", string(json))
	}
}
Example #10
0
// On windows this creates a loop that only finishes when
// a Stop or Shutdown request is received. On non-windows
// platforms, the function does nothing. The stopCallback
// function is called when the Stop/Shutdown request is
// received.
func ProcessWindowsControlEvents(stopCallback func()) {
	err := svc.Run(os.Args[0], &beatService{})
	if err != nil {
		logp.Err("Error: %v", err)
	} else {
		stopCallback()
	}
}
Example #11
0
func PrintPublishEvent(event common.MapStr) {
	json, err := json.MarshalIndent(event, "", "  ")
	if err != nil {
		logp.Err("json.Marshal: %s", err)
	} else {
		logp.Debug("publish", "Publish: %s", string(json))
	}
}
Example #12
0
func (publisher *PublisherType) publishFromQueue() {
	for mapstr := range publisher.Queue {
		err := publisher.publishEvent(mapstr)
		if err != nil {
			logp.Err("Publishing failed: %v", err)
		}
	}
}
Example #13
0
// Insert a list of events in the bulkChannel
func (out *ElasticsearchOutput) InsertBulkMessage(bulkChannel chan interface{}) {
	close(bulkChannel)
	go func(channel chan interface{}) {
		_, err := out.Conn.Bulk("", "", nil, channel)
		if err != nil {
			logp.Err("Fail to perform many index operations in a single API call: %s", err)
		}
	}(bulkChannel)
}
Example #14
0
func writeHeapProfile(filename string) {
	f, err := os.Create(filename)
	if err != nil {
		logp.Err("Failed creating file %s: %s", filename, err)
		return
	}
	pprof.WriteHeapProfile(f)
	f.Close()

	logp.Info("Created memory profile file %s.", filename)
}
Example #15
0
func (out *KafkaOutput) PublishEvent(ts time.Time, event common.MapStr) error {

	json_event, err := json.Marshal(event)
	if err != nil {
		logp.Err("Failed to convert the event to JSON: %s", err)
		return err
	}

	out.sendingQueue <- KafkaQueueMsg{msg: json_event}

	logp.Debug("output_kafka", "Publish event")
	return nil
}
Example #16
0
func (reader *ReaderType) Run(output chan common.MapStr) error {
	logp.Info("Attempting to start %d inputs", len(reader.Input))

	for _, plugin := range reader.Input {
		err := plugin.Run(output)
		if err != nil {
			logp.Err("Fail to start input plugin %s : %s", plugin.InputType(), err)
			return err
		} else {
			logp.Info("Started input plugin %s", plugin.InputType())
		}
	}
	return nil
}
Example #17
0
func (l *TcpInput) Run(output chan common.MapStr) error {
	logp.Info("[TcpInput] Running TCP Input")
	server, err := net.Listen("tcp", ":"+strconv.Itoa(l.Port))
	if err != nil {
		logp.Err("couldn't start listening: " + err.Error())
		return nil
	}
	logp.Info("[TcpInput] Listening on port %d", l.Port)

	// dispatch the master listen thread
	go func(server net.Listener) {
		for {
			// accept incoming connections
			conn, err := server.Accept()
			if err != nil {
				logp.Err("Error accepting: ", err.Error())
			} else {
				// dispatch individual connection handler threads
				go l.handleConn(conn, output)
			}
		}
	}(server)
	return nil
}
Example #18
0
// Goroutine that reads the objects from the FiltersQueue,
// executes all filters on them and writes the modified objects
// in the results channel.
func (runner *FilterRunner) Run() error {
	for event := range runner.FiltersQueue {
		for _, plugin := range runner.order {
			var err error
			event, err = plugin.Filter(event)
			if err != nil {
				logp.Err("Error executing filter %s: %v. Dropping event.", plugin, err)
				break // drop event in case of errors
			}
		}

		runner.results <- event
	}
	return nil
}
Example #19
0
func (out *FileOutput) PublishEvent(ts time.Time, event common.MapStr) error {

	json_event, err := json.Marshal(event)
	if err != nil {
		logp.Err("Fail to convert the event to JSON: %s", err)
		return err
	}

	err = out.rotator.WriteLine(json_event)
	if err != nil {
		return err
	}

	return nil
}
Example #20
0
func (l *TailInput) CheckReopen() {
	// periodically reopen the file, in case the file has been rolled
	if time.Since(l.LastOpen) > l.RollTime {
		l.FileP.Close()
		var err error
		l.FileP, err = os.Open(l.FileName)
		if err != nil {
			logp.Err("Error opening file " + err.Error())
			return
		}

		// this time we do not seek to end
		// since in the case of a roll, we want to capture everything
		l.offset = 0
		l.LastOpen = time.Now()
	}
}
Example #21
0
func (reader *ReaderType) Init(inputMap map[string]inputs.MothershipConfig) error {
	logp.Info("reader input config", inputMap)

	var globalConf inputs.MothershipConfig

	for inputId, config := range inputMap {
		// default instance 0
		inputName, instance := inputId, "0"
		if strings.Contains(inputId, "_") {
			// otherwise grok tcp_2 as inputName = tcp, instance = 2
			sv := strings.Split(inputId, "_")
			inputName, instance = sv[0], sv[1]
		}
		logp.Info(fmt.Sprintf("input type: %s instance: %s\n", inputName, instance))
		logp.Debug("reader", "instance config: %s", config)

		// handling for "global" config section
		if inputName == "global" {
			logp.Info("global input configuration read")
			globalConf = config
		}

		plugin := newInputInstance(inputName)
		if plugin != nil && config.Enabled {
			config.Normalize(globalConf)
			err := plugin.Init(config)
			if err != nil {
				logp.Err("Fail to initialize %s plugin as input: %s", inputName, err)
				return err
			} else {
				logp.Info("Initialized %s plugin as input", inputName)
			}
			reader.Input = append(reader.Input, plugin)
		}
	}

	if len(reader.Input) == 0 {
		logp.Info("No inputs are defined. Please define one under the input section.")
		return errors.New("No input are defined. Please define one under the input section.")
	} else {
		logp.Info("%d inputs defined", len(reader.Input))
	}

	return nil
}
Example #22
0
func (publisher *PublisherType) GetServerName(ip string) string {
	// in case the IP is localhost, return current shipper name
	islocal, err := common.IsLoopback(ip)
	if err != nil {
		logp.Err("Parsing IP %s fails with: %s", ip, err)
		return ""
	} else {
		if islocal {
			return publisher.name
		}
	}
	// find the shipper with the desired IP
	if publisher.TopologyOutput != nil {
		return publisher.TopologyOutput.GetNameByIP(ip)
	} else {
		return ""
	}
}
Example #23
0
func (out *KafkaOutput) newProducer() (sarama.AsyncProducer, error) {

	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForLocal     // Only wait for the leader to ack
	config.Producer.Compression = sarama.CompressionSnappy // Compress messages
	config.Producer.Flush.Frequency = out.FlushInterval
	config.Producer.Return.Errors = true
	config.Net.DialTimeout = out.Timeout
	config.Net.ReadTimeout = out.Timeout
	config.Net.WriteTimeout = out.Timeout

	producer, err := sarama.NewAsyncProducer(out.BrokerList, config)
	if err != nil {
		logp.Err("Failed to start Sarama producer: %s", err)
		return nil, err
	}

	return producer, nil
}
Example #24
0
// Seperate events by metric_name, average the values for each metric, emit averaged metrics
func (l *RedisInput) GroupEvents(events []common.MapStr) ([]common.MapStr, error) {
	var metric_name string
	var empty_events []common.MapStr
	sorted_events := map[string][]common.MapStr{}
	for _, event := range events {
		if _, present := event["metric_name"]; present == false {
			logp.Err("No metric_name found for: %v", event)
			continue
			//return nil, new Error("No metric_name found")
		}
		metric_name = event["metric_name"].(string)
		if sorted_events[metric_name] == nil {
			sorted_events[metric_name] = empty_events
		}
		sorted_events[metric_name] = append(sorted_events[metric_name], event)
	}
	output_events, err := l.averageSortedEvents(sorted_events)
	return output_events, err
}
Example #25
0
// Goroutine that sends one or multiple events to Elasticsearch.
// If the flush_interval > 0, then the events are sent in batches. Otherwise, one by one.
func (out *ElasticsearchOutput) SendMessagesGoroutine() {
	flushChannel := make(<-chan time.Time)

	if out.FlushInterval > 0 {
		flushTicker := time.NewTicker(out.FlushInterval)
		flushChannel = flushTicker.C
	}

	bulkChannel := make(chan interface{}, out.BulkMaxSize)

	for {
		select {
		case msg := <-out.sendingQueue:
			index := fmt.Sprintf("%s-%d.%02d.%02d", out.Index, msg.Ts.Year(), msg.Ts.Month(), msg.Ts.Day())
			if out.FlushInterval > 0 {
				// insert the events in batches
				if len(bulkChannel)+2 > out.BulkMaxSize {
					logp.Debug("output_elasticsearch", "Channel size reached. Calling bulk")
					out.InsertBulkMessage(bulkChannel)
					bulkChannel = make(chan interface{}, out.BulkMaxSize)
				}
				bulkChannel <- map[string]interface{}{
					"index": map[string]interface{}{
						"_index": index,
						"_type":  msg.Event["type"].(string),
					},
				}
				bulkChannel <- msg.Event
			} else {
				// insert the events one by one
				_, err := out.Conn.Index(index, msg.Event["type"].(string), "", nil, msg.Event)
				if err != nil {
					logp.Err("Fail to insert a single event: %s", err)
				}
			}
		case _ = <-flushChannel:
			out.InsertBulkMessage(bulkChannel)
			bulkChannel = make(chan interface{}, out.BulkMaxSize)
		}
	}
}
Example #26
0
func (m *beatService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {

	const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
	changes <- svc.Status{State: svc.StartPending}
	changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}

loop:
	for c := range r {
		switch c.Cmd {
		case svc.Interrogate:
			changes <- c.CurrentStatus
			// Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
			time.Sleep(100 * time.Millisecond)
			changes <- c.CurrentStatus
		case svc.Stop, svc.Shutdown:
			break loop
		default:
			logp.Err("Unexpected control request: $%d. Ignored.", c)
		}
	}
	changes <- svc.Status{State: svc.StopPending}
	return
}
Example #27
0
func (publisher *PublisherType) PublishTopology(params ...string) error {

	var localAddrs []string = params

	if len(params) == 0 {
		addrs, err := common.LocalIpAddrsAsStrings(false)
		if err != nil {
			logp.Err("Getting local IP addresses fails with: %s", err)
			return err
		}
		localAddrs = addrs
	}

	if publisher.TopologyOutput != nil {
		logp.Debug("publish", "Add topology entry for %s: %s", publisher.name, localAddrs)

		err := publisher.TopologyOutput.PublishIPs(publisher.name, localAddrs)
		if err != nil {
			return err
		}
	}

	return nil
}
Example #28
0
func (publisher *PublisherType) Init(outputs map[string]outputs.MothershipConfig, shipper ShipperConfig) error {
	var err error
	publisher.IgnoreOutgoing = shipper.Ignore_outgoing

	publisher.disabled = *publishDisabled
	if publisher.disabled {
		logp.Info("Dry run mode. All output types except the file based one are disabled.")
	}

	publisher.GeoLite = common.LoadGeoIPData(shipper.Geoip)

	for outputId, plugin := range EnabledOutputPlugins {
		outputName := outputId.String()
		output, exists := outputs[outputName]
		if exists && output.Enabled && !publisher.disabled {
			err := plugin.Init(output, shipper.Topology_expire)
			if err != nil {
				logp.Err("Fail to initialize %s plugin as output: %s", outputName, err)
				return err
			}
			publisher.Output = append(publisher.Output, plugin)

			if output.Save_topology {
				if publisher.TopologyOutput != nil {
					logp.Err("Multiple outputs defined to store topology. Please add save_topology = true option only for one output.")
					return errors.New("Multiple outputs defined to store topology")
				}
				publisher.TopologyOutput = plugin
				logp.Info("Using %s to store the topology", outputName)
			}
		}
	}

	if !publisher.disabled {
		if len(publisher.Output) == 0 {
			logp.Info("No outputs are defined. Please define one under the shipper->output section.")
			return errors.New("No outputs are defined. Please define one under the shipper->output section.")
		}

		if publisher.TopologyOutput == nil {
			logp.Warn("No output is defined to store the topology. The server fields might not be filled.")
		}
	}

	publisher.name = shipper.Name
	if len(publisher.name) == 0 {
		// use the hostname
		publisher.name, err = os.Hostname()
		if err != nil {
			return err
		}

		logp.Info("No shipper name configured, using hostname '%s'", publisher.name)
	}

	publisher.tags = shipper.Tags

	if !publisher.disabled && publisher.TopologyOutput != nil {
		RefreshTopologyFreq := 10 * time.Second
		if shipper.Refresh_topology_freq != 0 {
			RefreshTopologyFreq = time.Duration(shipper.Refresh_topology_freq) * time.Second
		}
		publisher.RefreshTopologyTimer = time.Tick(RefreshTopologyFreq)
		logp.Info("Topology map refreshed every %s", RefreshTopologyFreq)

		// register shipper and its public IP addresses
		err = publisher.PublishTopology()
		if err != nil {
			logp.Err("Failed to publish topology: %s", err)
			return err
		}

		// update topology periodically
		go publisher.UpdateTopologyPeriodically()
	}

	publisher.Queue = make(chan common.MapStr, 10000)
	go publisher.publishFromQueue()

	return nil
}
Example #29
0
func (publisher *PublisherType) publishEvent(event common.MapStr) error {

	// the timestamp is mandatory
	ts, ok := event["timestamp"].(common.Time)
	if !ok {
		return errors.New("Missing 'timestamp' field from event.")
	}

	// the count is mandatory
	err := event.EnsureCountField()
	if err != nil {
		return err
	}

	// the type is mandatory
	_, ok = event["type"].(string)
	if !ok {
		return errors.New("Missing 'type' field from event.")
	}

	var src_server, dst_server string
	src, ok := event["src"].(*common.Endpoint)
	if ok {
		src_server = publisher.GetServerName(src.Ip)
		event["client_ip"] = src.Ip
		event["client_port"] = src.Port
		event["client_proc"] = src.Proc
		event["client_server"] = src_server
		delete(event, "src")
	}
	dst, ok := event["dst"].(*common.Endpoint)
	if ok {
		dst_server = publisher.GetServerName(dst.Ip)
		event["ip"] = dst.Ip
		event["port"] = dst.Port
		event["proc"] = dst.Proc
		event["server"] = dst_server
		delete(event, "dst")
	}

	if publisher.IgnoreOutgoing && dst_server != "" &&
		dst_server != publisher.name {
		// duplicated transaction -> ignore it
		logp.Debug("publish", "Ignore duplicated transaction on %s: %s -> %s", publisher.name, src_server, dst_server)
		return nil
	}

	event["shipper"] = publisher.name
	if len(publisher.tags) > 0 {
		event["tags"] = publisher.tags
	}

	if publisher.GeoLite != nil {
		real_ip, exists := event["real_ip"]
		if exists && len(real_ip.(string)) > 0 {
			loc := publisher.GeoLite.GetLocationByIP(real_ip.(string))
			if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 {
				event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude)
			}
		} else {
			if len(src_server) == 0 && src != nil { // only for external IP addresses
				loc := publisher.GeoLite.GetLocationByIP(src.Ip)
				if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 {
					event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude)
				}
			}
		}
	}

	if logp.IsDebug("publish") {
		PrintPublishEvent(event)
	}

	// add transaction
	has_error := false
	if !publisher.disabled {
		for i := 0; i < len(publisher.Output); i++ {
			err := publisher.Output[i].PublishEvent(time.Time(ts), event)
			if err != nil {
				logp.Err("Fail to publish event type on output %s: %v", publisher.Output[i], err)
				has_error = true
			}
		}
	}

	if has_error {
		return errors.New("Fail to publish event")
	}
	return nil
}
Example #30
0
// Initialize Elasticsearch as output
func (out *ElasticsearchOutput) Init(config outputs.MothershipConfig, topology_expire int) error {

	if len(config.Protocol) == 0 {
		config.Protocol = "http"
	}

	var urls []string

	if len(config.Hosts) > 0 {
		// use hosts setting
		for _, host := range config.Hosts {
			url := fmt.Sprintf("%s://%s%s", config.Protocol, host, config.Path)
			urls = append(urls, url)
		}
	} else {
		// use host and port settings
		url := fmt.Sprintf("%s://%s:%d%s", config.Protocol, config.Host, config.Port, config.Path)
		urls = append(urls, url)
	}

	es := NewElasticsearch(urls, config.Username, config.Password)
	out.Conn = es

	if config.Index != "" {
		out.Index = config.Index
	} else {
		out.Index = "packetbeat"
	}

	out.TopologyExpire = 15000
	if topology_expire != 0 {
		out.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec
	}

	out.FlushInterval = 1000 * time.Millisecond
	if config.Flush_interval != nil {
		out.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond
	}
	out.BulkMaxSize = 10000
	if config.Bulk_size != nil {
		out.BulkMaxSize = *config.Bulk_size
	}

	if config.Max_retries != nil {
		out.Conn.SetMaxRetries(*config.Max_retries)
	}

	logp.Info("[ElasticsearchOutput] Using Elasticsearch %s", urls)
	logp.Info("[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD", out.Index)
	logp.Info("[ElasticsearchOutput] Topology expires after %ds", out.TopologyExpire/1000)
	if out.FlushInterval > 0 {
		logp.Info("[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.", out.FlushInterval, out.BulkMaxSize)
	} else {
		logp.Info("[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.")
	}

	if config.Save_topology {
		err := out.EnableTTL()
		if err != nil {
			logp.Err("Fail to set _ttl mapping: %s", err)
			// keep trying in the background
			go func() {
				for {
					err := out.EnableTTL()
					if err == nil {
						break
					}
					logp.Err("Fail to set _ttl mapping: %s", err)
					time.Sleep(5 * time.Second)
				}
			}()
		}
	}

	out.sendingQueue = make(chan EventMsg, 1000)
	go out.SendMessagesGoroutine()

	return nil
}