Exemplo n.º 1
0
// Fluentd format [date source jsonmessage] parser
func FluentdFormatter(logstashType string, tags []string) LineTransform {
	return func(d *LineEvent) *Event {
		//2012-11-22 05:07:51 +0000 lio.home.ubuntu.log.collect.log.vm2: {"message":"runtime error: close of closed channel"}
		if lineParts := bytes.SplitN(d.Data, []byte{':', ' '}, 2); len(lineParts) > 1 {
			if len(lineParts[0]) > 26 {
				u.Debug("%s %s\n", string(lineParts[0]), string(lineParts[1]))
				bsrc := lineParts[0][26:]
				bdate := lineParts[0][0:25]
				var msg map[string]interface{}
				if err := json.Unmarshal(lineParts[1], &msg); err == nil {
					if t, err := time.Parse("2006-01-02 15:04:05 -0700", string(bdate)); err == nil {
						evt := NewTsEvent(logstashType, string(bsrc), "", t)
						if msgi, ok := msg["message"]; ok {
							if msgS, ok := msgi.(string); ok {
								evt.Message = msgS
								delete(msg, "message")
							}
						}
						evt.Tags = tags
						evt.Fields = msg
						return evt
					} else {
						u.Debug("%v", err)
						return NewEvent(logstashType, string(bsrc), string(lineParts[1]))
					}

				} else {
					u.Warn("bad message? %v", err)
				}

			}
		}
		return nil
	}
}
Exemplo n.º 2
0
// file format [date source jsonmessage] parser
func FileFormatter(logstashType string, tags []string) LineTransform {
	return func(d *LineEvent) *Event {

		//u.Infof("%v line event: %v  Metric?%v  json?%v", d.Ts, d.LogLevel, d.IsMetric(), d.IsJson())

		// Don't log out metrics
		if d.IsMetric() {
			return nil
		}
		if len(d.Data) < 10 {
			u.Warn("Invalid line?", string(d.Data))
			return nil
		} else if !d.Ts.IsZero() {
			if d.IsJson() {
				evt := NewTsEvent(logstashType, d.Source, string(d.Data), d.Ts)
				evt.Fields = make(map[string]interface{})
				evt.Fields["codefile"] = d.Prefix
				evt.Fields["host"] = hostName
				evt.Fields["level"] = d.LogLevel
				evt.Fields["WriteErrs"] = d.WriteErrs
				jd := json.RawMessage(d.Data)
				m := make(map[string]interface{})
				if err := json.Unmarshal(d.Data, &m); err == nil {
					evt.Raw = &jd
				}
				return evt
			}
			evt := NewTsEvent(logstashType, d.Source, d.Prefix+" "+string(d.Data), d.Ts)
			evt.Fields = make(map[string]interface{})
			evt.Fields["host"] = hostName
			evt.Fields["codefile"] = d.Prefix
			evt.Fields["level"] = d.LogLevel
			evt.Fields["WriteErrs"] = d.WriteErrs
			return evt

		}
		return nil
	}
}
Exemplo n.º 3
0
Arquivo: main.go Projeto: morya/loges
func main() {
	flag.Parse()
	u.SetupLogging(logLevel)
	u.SetColorIfTerminal() // this doesn't work if reading stdin
	if colorize {
		u.SetColorOutput()
	}

	done := make(chan bool)
	esHostName = cleanEsHost(esHostName)
	// if we have note specified tail files, then assume stdin
	if len(flag.Args()) == 0 && source == "tail" {
		source = "stdin"
	}

	u.Debugf("LOGES: filters=%s  es=%s argct=:%d source=%v ll=%s  args=%v",
		filters, esHostName, len(flag.Args()), source, logLevel, flag.Args())

	// Setup output first, to ensure its ready when Source starts
	// TODO:  suuport multiple outputs?
	switch output {
	case "elasticsearch":
		// update the Logstash date for the index occasionally
		go loges.UpdateLogstashIndex()
		// start an elasticsearch bulk worker, for sending to elasticsearch
		go loges.ToElasticSearch(msgChan, "golog", esHostName, ttl, exitIfNoMsgsDur, metricsToEs)
	case "stdout":
		u.Debug("setting output to stdout ", colorize)
		go loges.ToStdout(msgChan, colorize)
	default:
		Usage()
		os.Exit(1)
	}

	// TODO:  implement metrics out
	for _, metOut := range strings.Split(metricsOut, ",") {
		switch metOut {
		case "influxdb":
			// todo
		case "graphite":
			u.Infof("Registering Graphite Transform: host=%s prefix=%s", graphiteHost, graphitePrefix)
			loges.TransformRegister(loges.GraphiteTransform(logType, graphiteHost, graphitePrefix, true))
		}
	}

	// now set up the transforms/filters
	for _, filter := range strings.Split(filters, ",") {
		switch filter {
		case "stdfiles":
			loges.TransformRegister(loges.FileFormatter(logType, nil))
		case "fluentd":
			loges.TransformRegister(loges.FluentdFormatter(logType, nil))
		case "kafka":
			// TODO, finish conversion to sarama
			//loges.TransformRegister(kafka.KafkaFormatter)
		}
	}

	for _, sourceInput := range strings.Split(source, ",") {
		u.Warnf("source = %v", sourceInput)
		switch sourceInput {
		case "tail":
			for _, filename := range flag.Args() {
				tailDone := make(chan bool)
				go loges.TailFile(filename, tail.Config{Follow: true, ReOpen: true}, tailDone, msgChan)
			}
		case "http":
			go loges.HttpRun(httpPort, msgChan)
		//case "kafka":
		//	go kafka.RunKafkaConsumer(msgChan, partitionstr, topic, kafkaHost, offset, maxMsgCt, maxSize)
		case "stdin":
			go loges.StdinPruducer(msgChan)
		default:
			u.Error(sourceInput)
			println("No input set, required")
			Usage()
			os.Exit(1)
		}
	}
	u.Warn("end of main startup, until done")
	<-done
}
Exemplo n.º 4
0
// file format [date source jsonmessage] parser
func FileFormatter(logstashType string, tags []string) LineTransform {
	loc := time.UTC
	pos := 0
	posEnd := 0
	logLevel := ""

	return func(d *LineEvent) *Event {
		// 2013/05/26 13:07:47.606937 rw.go:70: [INFO] RW service is up
		// 2013/05/26 13:07:47.607 [DEBG] sink       Building sink for kafka from factory method
		line := string(d.Data)

		// Find first square brackets
		pos = strings.IndexRune(line, '[')
		posEnd = strings.IndexRune(line, ']')
		if pos > 0 && posEnd > 0 && posEnd > pos && len(line) > posEnd {
			logLevel = line[pos+1 : posEnd]
		} else {
			logLevel = "NONE"
		}
		// Don't log out These metrics
		if logLevel == "METRIC" || logLevel == "METR" {
			return nil
		}
		// if _, ok := expectedLevels[logLevel]; ok {
		// 	return nil
		// }
		//u.Debugf("dt='%s' line: %s", d.DataType, line)
		//u.Warn(line)
		if len(line) < 10 {
			u.Warn(line)
			return nil
		} else {
			parts := strings.SplitN(line, " ", 3)
			if len(parts) > 2 {
				datePart := parts[0] + " " + parts[1]
				// "2006/01/02 15:04:05.000000"
				if len(datePart) > 24 {
					if _, err := time.Parse("2006/01/02 15:04:05.000000", datePart); err == nil {
						evt := NewTsEvent(logstashType, d.Source, parts[2], time.Now().In(loc))
						evt.Fields = make(map[string]interface{})
						evt.Fields["host"] = hostName
						evt.Fields["level"] = logLevel
						evt.Fields["WriteErrs"] = d.WriteErrs
						//evt.Fields = msg
						//evt.Source = d.Source
						//u.Debug(evt.String())
						return evt
					}
				} else {
					if _, err := time.Parse("2006/01/02 15:04:05", datePart); err == nil {
						evt := NewTsEvent(logstashType, d.Source, parts[2], time.Now().In(loc))
						evt.Fields = make(map[string]interface{})
						evt.Fields["host"] = hostName
						evt.Fields["level"] = logLevel
						evt.Fields["WriteErrs"] = d.WriteErrs
						//evt.Fields = msg
						//evt.Source = d.Source
						//u.Debug(evt.String())
						return evt
					}
				}
				evt := NewTsEvent(logstashType, d.Source, line, time.Now())
				evt.Fields = make(map[string]interface{})
				evt.Fields["host"] = hostName
				evt.Fields["level"] = logLevel
				evt.Fields["WriteErrs"] = d.WriteErrs
				//evt.Fields = msg
				//evt.Source = d.Source
				//u.Debug(evt.String())
				return evt
			} else {
				//u.Warnf("bad? %s:%s ", logLevel, line)
			}
		}

		return nil
	}
}