//TODO: Check for Errors Here func (jsonexpander *JSONExpander) Filter(event common.MapStr) (common.MapStr, error) { text := event["message"] text_string := text.(*string) logp.Debug("jsonexpander", "Attempting to expand: %v", event) if isJSONString(*text_string) { data := []byte(*text_string) err := json.Unmarshal(data, &event) if err != nil { logp.Err("jsonexpander", "Could not expand json data") return event, nil } } else { logp.Debug("jsonexpander", "Message does not appear to be JSON data: %s", text_string) } now := func() time.Time { t := time.Now() return t } event.EnsureTimestampField(now) logp.Debug("jsonexpander", "Final Event: %v", event) return event, nil }
func (l *TailInput) doStuff(output chan common.MapStr) { now := func() time.Time { t := time.Now() return t } var line uint64 = 0 var read_timeout = 30 * time.Second // open file // basic error handling, if we hit an error, log and return // this ends the currently running thread without impacting other threads f, err := os.Open(l.FileName) if err != nil { logp.Err("Error opening file " + err.Error()) return } l.FileP = f // seek to end // for offset, we use the actual file offset // we initialize it to the end of the file at time of open l.offset, err = l.FileP.Seek(0, 2) if err != nil { logp.Err("Error seeking in file " + err.Error()) return } l.LastOpen = time.Now() buffer := new(bytes.Buffer) reader := bufio.NewReader(l.FileP) for { l.CheckReopen() text, bytesread, err := readline(reader, buffer, read_timeout) if err != nil && err != io.EOF { // EOF errors are expected, since we are tailing the file logp.Err("Error reading file " + err.Error()) return } if bytesread > 0 { l.offset += int64(bytesread) line++ event := common.MapStr{} event["filename"] = l.FileName event["line"] = line event["message"] = text event["offset"] = l.offset event["type"] = l.Type event.EnsureTimestampField(now) event.EnsureCountField() logp.Debug("tailinput", "InputEvent: %v", event) output <- event // ship the new event downstream } } }
func (l *RedisInput) averageSortedEvents(sorted_events map[string][]common.MapStr) ([]common.MapStr, error) { var output_events []common.MapStr var merged_event common.MapStr var metric_value_string string //var metric_value_bytes []byte metric_value := 0.0 for _, events := range sorted_events { metric_value = 0.0 merged_event = common.MapStr{} for _, event := range events { merged_event.Update(event) logp.Debug("groupstuff", "metric value: %v", event["metric_value"]) metric_value_string = event["metric_value"].(string) // metric_value_bytes = []byte(metric_value_string) // metric_value += float64(common.Bytes_Ntohll(metric_value_bytes)) metric_value_float, err := strconv.ParseFloat(metric_value_string, 65) if err != nil { logp.Err("Error parsing metric_value: %v", err) } metric_value += metric_value_float } logp.Debug("groupstuff", "the summed values is %v", metric_value) logp.Debug("groupstuff", "the length is %v", float64(len(events))) metric_value = metric_value / float64(len(events)) logp.Debug("groupstuff", "the avg value is %v", metric_value) merged_event["metric_value"] = metric_value output_events = append(output_events, merged_event) } return output_events, nil }
func (l *PackagesInput) doStuff(output chan common.MapStr) { now := func() time.Time { t := time.Now() return t } // construct event and write it to channel event := common.MapStr{} //text := "null event" //event["message"] = &text event["message"] = "packages event" event["type"] = l.Type event.EnsureTimestampField(now) event.EnsureCountField() ///////////// cmd := exec.Command("/bin/rpm", "-qa", "--queryformat", "%{NAME}:::%{VERSION}:::%{ARCH}##") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { logp.Info("Error occurred") return } items := strings.Split(out.String(), "##") rpmList := make([]RPMPackage, 0) for _, line := range items { item := strings.Split(line, ":::") if len(item) < 3 { continue } pkg := RPMPackage{ Name: item[0], Version: item[1], Arch: item[2], } rpmList = append(rpmList, pkg) } event["packages"] = rpmList output <- event }
func (l *StdinInput) doStuff(output chan common.MapStr) { reader := bufio.NewReader(os.Stdin) buffer := new(bytes.Buffer) var source string = fmt.Sprintf("%s:%s", os.Getenv("REMOTE_HOST"), os.Getenv("REMOTE_PORT")) var ssl_client_dn string = os.Getenv("SSL_CLIENT_DN") var offset int64 = 0 var line uint64 = 0 var read_timeout = 10 * time.Second logp.Debug("stdinput", "Handling New Connection from %s", source) now := func() time.Time { t := time.Now() return t } for { text, bytesread, err := l.readline(reader, buffer, read_timeout) if err != nil { logp.Info("Unexpected state reading from %v; error: %s\n", os.Getenv("SSL_CLIENT_DN"), err) return } logp.Debug("stdinputlines", "New Line: %s", &text) line++ event := common.MapStr{} event["ssl_client_dn"] = &ssl_client_dn event["source"] = &source event["offset"] = offset event["line"] = line event["message"] = text event["type"] = l.Type event.EnsureTimestampField(now) event.EnsureCountField() offset += int64(bytesread) logp.Debug("stdinput", "InputEvent: %v", event) output <- event // ship the new event downstream os.Stdout.Write([]byte("OK")) } logp.Debug("stdinput", "Closed Connection from %s", source) }
func (l *TcpInput) handleConn(client net.Conn, output chan common.MapStr) { reader := bufio.NewReader(client) buffer := new(bytes.Buffer) var source string = client.RemoteAddr().String() var offset int64 = 0 var line uint64 = 0 var read_timeout = 10 * time.Second logp.Debug("tcpinput", "Handling New Connection from %s", source) now := func() time.Time { t := time.Now() return t } for { text, bytesread, err := l.readline(reader, buffer, read_timeout) if err != nil { logp.Info("Unexpected state reading from %v; error: %s\n", client.RemoteAddr().String, err) return } logp.Debug("tcpinputlines", "New Line: %s", &text) line++ event := common.MapStr{} event["source"] = &source event["offset"] = offset event["line"] = line event["message"] = text event["type"] = l.Type event.EnsureTimestampField(now) event.EnsureCountField() offset += int64(bytesread) logp.Debug("tcpinput", "InputEvent: %v", event) output <- event // ship the new event downstream client.Write([]byte("OK")) } logp.Debug("tcpinput", "Closed Connection from %s", source) }
func (l *NullInput) doStuff(output chan common.MapStr) { now := func() time.Time { t := time.Now() return t } // construct event and write it to channel event := common.MapStr{} text := "null event" event["message"] = &text event["type"] = l.Type event.EnsureTimestampField(now) event.EnsureCountField() output <- event }
func (l *UdpInput) handlePacket(buffer []byte, rlen int, count int, source *net.UDPAddr, output chan common.MapStr) { now := func() time.Time { t := time.Now() return t } text := string(buffer[0:rlen]) logp.Debug("udpinputlines", "New Line: %s", &text) event := common.MapStr{} event["source"] = &source event["offset"] = rlen event["line"] = count event["message"] = &text event["type"] = l.Type event.EnsureTimestampField(now) event.EnsureCountField() logp.Debug("udpinput", "InputEvent: %v", event) output <- event // ship the new event downstream }
func scanProcs(output chan common.MapStr) { now := func() time.Time { t := time.Now() return t } if !pathExists(procfsdir) { return } ds, err := ioutil.ReadDir(procfsdir) if err != nil { return } event := common.MapStr{} processes := common.MapStr{} proc_detail := common.MapStr{} // get all numeric entries for _, d := range ds { n := d.Name() if isNumeric(n) { processes[n] = getCmdline(n) proc_detail[n] = getProcDetail(n) } } text := "process report" event["message"] = &text event["data"] = processes event["data_detail"] = proc_detail event["type"] = "report" event.EnsureTimestampField(now) event.EnsureCountField() output <- event }
func (publisher *PublisherType) publishEvent(event common.MapStr) error { // the timestamp is mandatory ts, ok := event["timestamp"].(common.Time) if !ok { return errors.New("Missing 'timestamp' field from event.") } // the count is mandatory err := event.EnsureCountField() if err != nil { return err } // the type is mandatory _, ok = event["type"].(string) if !ok { return errors.New("Missing 'type' field from event.") } var src_server, dst_server string src, ok := event["src"].(*common.Endpoint) if ok { src_server = publisher.GetServerName(src.Ip) event["client_ip"] = src.Ip event["client_port"] = src.Port event["client_proc"] = src.Proc event["client_server"] = src_server delete(event, "src") } dst, ok := event["dst"].(*common.Endpoint) if ok { dst_server = publisher.GetServerName(dst.Ip) event["ip"] = dst.Ip event["port"] = dst.Port event["proc"] = dst.Proc event["server"] = dst_server delete(event, "dst") } if publisher.IgnoreOutgoing && dst_server != "" && dst_server != publisher.name { // duplicated transaction -> ignore it logp.Debug("publish", "Ignore duplicated transaction on %s: %s -> %s", publisher.name, src_server, dst_server) return nil } event["shipper"] = publisher.name if len(publisher.tags) > 0 { event["tags"] = publisher.tags } if publisher.GeoLite != nil { real_ip, exists := event["real_ip"] if exists && len(real_ip.(string)) > 0 { loc := publisher.GeoLite.GetLocationByIP(real_ip.(string)) if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 { event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude) } } else { if len(src_server) == 0 && src != nil { // only for external IP addresses loc := publisher.GeoLite.GetLocationByIP(src.Ip) if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 { event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude) } } } } if logp.IsDebug("publish") { PrintPublishEvent(event) } // add transaction has_error := false if !publisher.disabled { for i := 0; i < len(publisher.Output); i++ { err := publisher.Output[i].PublishEvent(time.Time(ts), event) if err != nil { logp.Err("Fail to publish event type on output %s: %v", publisher.Output[i], err) has_error = true } } } if has_error { return errors.New("Fail to publish event") } return nil }
func (l *RedisInput) handleConn(server redis.Conn, output chan common.MapStr, key string) (uint64, error) { event_slice, offset, timestamp, tag_string, err := l.readKey(server, key) if err != nil { logp.Err("an error reading %s: %s\n", key, err) } now := func() time.Time { t := time.Now() return t } parsed_tags := strings.Fields(tag_string) tags := make(map[string]string) for _, v := range parsed_tags { tag := strings.Split(v, "=") tags[tag[0]] = tag[1] } t := time.Unix(timestamp, 0) data, _ := json.Marshal(t) // check that metric_name exists and metric_value metric_event := common.MapStr{} var value string for _, event := range event_slice { switch vartype := event["metric_value"].(type) { case int: logp.Debug("redisinput", fmt.Sprintf("vartype is %s", vartype)) value = fmt.Sprintf("%v", event["metric_value"].(int)) case float64: logp.Debug("redisinput", fmt.Sprintf("vartype is %s", vartype)) value = fmt.Sprintf("%v", event["metric_value"].(float64)) case string: logp.Debug("redisinput", fmt.Sprintf("vartype is %s", vartype)) value = fmt.Sprintf("%v", event["metric_value"].(string)) } metric_event[event["metric_name"].(string)] = value } event := common.MapStr{} event["name"] = strings.TrimSpace(key) event["offset"] = offset event["count"] = len(event_slice) event["type"] = strings.TrimSpace(l.Type) event["tags"] = tags event["metrics"] = metric_event event["timestamp"] = string(data) event.EnsureTimestampField(now) event.EnsureCountField() logp.Debug("redisinputlines", "event: %v", event) if event_slice != nil { output <- event // ship the new event downstream } logp.Debug("redisinput", "Finished reading from %s", key) return uint64(len(event_slice)), nil }
func (l *SyslogInput) Run(output chan common.MapStr) error { logp.Debug("sysloginput", "Running Syslog Input") logp.Debug("sysloginput", "Listening on %d", l.Port) listen := fmt.Sprintf("0.0.0.0:%d", l.Port) channel := make(syslog.LogPartsChannel) handler := syslog.NewChannelHandler(channel) server := syslog.NewServer() server.SetFormat(syslog.Automatic) server.SetHandler(handler) err := server.ListenUDP(listen) if err != nil { logp.Err("couldn't start ListenUDP: " + err.Error()) } err = server.ListenTCP(listen) if err != nil { logp.Err("couldn't start ListenTCP: " + err.Error()) } err = server.Boot() if err != nil { logp.Err("couldn't start server.Boot(): " + err.Error()) } go func(channel syslog.LogPartsChannel, output chan common.MapStr) { var line uint64 = 0 now := func() time.Time { t := time.Now() return t } for logParts := range channel { logp.Debug("sysloginput", "InputEvent: %v", logParts) line++ event := common.MapStr{} event["line"] = line event["type"] = l.Type for k, v := range logParts { event[k] = v } event["source"] = event["client"].(string) if event["message"] != nil { message := event["message"].(string) event["message"] = &message } else if event["content"] != nil { message := event["content"].(string) event["message"] = &message } // This syslog parser uses the standard name "tag" // which is usually the program that wrote it. // The logstash syslog_pri puts "program" for this field. if event["tag"] != nil { program := event["tag"].(string) event["program"] = &program } event.EnsureTimestampField(now) event.EnsureCountField() logp.Debug("sysloginput", "Output Event: %v", event) output <- event // ship the new event downstream } }(channel, output) return nil }
func (out *StdOutput) Print(event common.MapStr) { str := event.String() fmt.Println(str) }