Example #1
0
// First keyword was INSERT, REPLACE
func (m *Sqlbridge) parseSqlInsert() (*SqlInsert, error) {

	// insert into mytable (id, str) values (0, "a")
	req := NewSqlInsert()
	req.kw = m.Cur().T
	m.Next() // Consume Insert or Replace

	// INTO
	if m.Cur().T != lex.TokenInto {
		return nil, fmt.Errorf("expected INTO but got: %v", m.Cur())
	}
	m.Next() // Consume INTO

	// table name
	switch m.Cur().T {
	case lex.TokenTable:
		req.Table = m.Cur().V
		m.Next()
	default:
		return nil, fmt.Errorf("expected table name but got : %v", m.Cur().V)
	}

	// list of fields
	cols, err := m.parseFieldList()
	if err != nil {
		u.Error(err)
		return nil, err
	}
	req.Columns = cols

	m.Next() // left paren starts lisf of values
	switch m.Cur().T {
	case lex.TokenValues:
		m.Next() // Consume Values keyword
	case lex.TokenSelect:
		u.Infof("What is cur?%v", m.Cur())
		sel, err := m.parseSqlSelect()
		if err != nil {
			return nil, err
		}
		req.Select = sel
		return req, nil
	default:
		return nil, fmt.Errorf("expected values but got : %v", m.Cur().V)
	}

	//u.Debugf("found ?  %v", m.Cur())
	colVals, err := m.parseValueList()
	if err != nil {
		u.Error(err)
		return nil, err
	}
	req.Rows = colVals
	// we are good
	return req, nil
}
Example #2
0
// First keyword was UPDATE
func (m *Sqlbridge) parseSqlUpdate() (*SqlUpdate, error) {

	req := NewSqlUpdate()
	m.Next() // Consume UPDATE token

	//u.Debugf("token:  %v", m.Cur())
	switch m.Cur().T {
	case lex.TokenTable, lex.TokenIdentity:
		req.Table = m.Cur().V
	default:
		return nil, fmt.Errorf("expected table name but got : %v", m.Cur().V)
	}
	m.Next()
	if m.Cur().T != lex.TokenSet {
		return nil, fmt.Errorf("expected SET after table name but got : %v", m.Cur().V)
	}

	// list of name=value pairs
	m.Next()
	cols, err := m.parseUpdateList()
	if err != nil {
		u.Error(err)
		return nil, err
	}
	req.Values = cols

	// WHERE
	req.Where, err = m.parseWhere()
	if err != nil {
		return nil, err
	}

	return req, nil
}
Example #3
0
File: dep.go Project: araddon/depr
// Check if this folder/path is clean to determine if there are changes
// that are uncommited
func (d *Dep) Clean() bool {
	if err := d.control.CheckClean(d); err != nil {
		u.Error(err)
		return false
	}
	return true
}
Example #4
0
func RunKafkaConsumer(msgChan chan *loges.LineEvent, partitionstr, topic, kafkaHost string, offset, maxMsgCt uint64, maxSize uint) {
	var broker *kafka.BrokerConsumer

	u.Infof("Connecting to host=%s topic=%s part=%s", kafkaHost, topic, partitionstr)
	parts := strings.Split(partitionstr, ",")
	if len(parts) > 1 {
		tps := kafka.NewTopicPartitions(topic, partitionstr, offset, uint32(maxSize))
		broker = kafka.NewMultiConsumer(kafkaHost, tps)
	} else {
		partition, _ := strconv.Atoi(partitionstr)
		broker = kafka.NewBrokerConsumer(kafkaHost, topic, partition, offset, uint32(maxSize))
	}

	var msgCt int
	done := make(chan bool, 1)
	kafkaMsgs := make(chan *kafka.Message)
	go broker.ConsumeOnChannel(kafkaMsgs, 1000, done)
	for msg := range kafkaMsgs {
		if msg != nil {
			msgCt++
			if uint64(msgCt) > maxMsgCt {
				panic("ending")
			}
			//msg.Print()
			msgChan <- &loges.LineEvent{Data: msg.Payload(), Offset: msg.Offset(), Item: msg}
		} else {
			u.Error("No kafka message?")
			break
		}
	}
}
Example #5
0
// First keyword was INSERT
func (m *Sqlbridge) parseSqlInsert() (*SqlInsert, error) {

	// insert into mytable (id, str) values (0, "a")
	req := NewSqlInsert()
	m.Next() // Consume Insert

	// into
	//u.Debugf("token:  %v", m.Cur())
	if m.Cur().T != lex.TokenInto {
		return nil, fmt.Errorf("expected INTO but got: %v", m.Cur())
	} else {
		// table name
		m.Next()
		//u.Debugf("found into?  %v", m.Cur())
		switch m.Cur().T {
		case lex.TokenTable:
			req.Into = m.Cur().V
		default:
			return nil, fmt.Errorf("expected table name but got : %v", m.Cur().V)
		}
	}

	// list of fields
	m.Next()
	if err := m.parseFieldList(req); err != nil {
		u.Error(err)
		return nil, err
	}
	m.Next()
	//u.Debugf("found ?  %v", m.Cur())
	switch m.Cur().T {
	case lex.TokenValues:
		m.Next()
	default:
		return nil, fmt.Errorf("expected values but got : %v", m.Cur().V)
	}
	//u.Debugf("found ?  %v", m.Cur())
	if err := m.parseValueList(req); err != nil {
		u.Error(err)
		return nil, err
	}
	// we are good
	return req, nil
}
Example #6
0
func (b *BulkIndexor) Update(index string, _type string, id, ttl string, date *time.Time, data interface{}) error {
	//{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
	by, err := WriteBulkBytes("update", index, _type, id, ttl, date, data)
	if err != nil {
		u.Error(err)
		return err
	}
	b.bulkChannel <- by
	return nil
}
Example #7
0
func TailFile(filename string, config tail.Config, done chan bool, msgChan chan *LineEvent) {
	u.Debug("Watching file ", filename, config)
	t, err := tail.TailFile(filename, config)
	if err != nil {
		u.Error(err)
		return
	}
	//defer func() { done <- true }()
	lineHandler := MakeFileFlattener(filename, msgChan)
	for line := range t.Lines {
		lineHandler(line.Text)
	}
	err = t.Wait()
	if err != nil {
		u.Error(err)
	}
	if err := t.Stop(); err != nil {
		u.Info(err)
	}
}
Example #8
0
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *Parser) parseSelect(comment string) (*Ast, error) {

	selStmt := SelectStmt{}
	ast := Ast{Comments: comment, Select: &selStmt}
	//u.Infof("Comment:   %v", comment)

	// we have already parsed SELECTlex.Token to get here, so this should be first col
	m.curToken = m.l.NextToken()
	//u.Debug("FirstToken: ", m.curToken)
	if m.curToken.T != lex.TokenStar {
		if err := m.parseColumns(&selStmt); err != nil {
			u.Error(err)
			return nil, err
		}
		//u.Infof("resturned from cols: %v", len(selStmt.Columns))
	} else {
		// * mark as star?  TODO
		return nil, fmt.Errorf("not implemented")
	}

	// FROM - required
	//u.Debugf("token:  %s", m.curToken)
	if m.curToken.T != lex.TokenFrom {
		return nil, fmt.Errorf("expected From")
	} else {
		// table/metric
		m.curToken = m.l.NextToken()
		//u.Debugf("found from? %s", m.curToken)
		if m.curToken.T != lex.TokenIdentity && m.curToken.T != lex.TokenValue {
			//u.Warnf("No From? %v toktype:%v", m.curToken.V, m.curToken.T.String())
			return nil, fmt.Errorf("expected from name")
		} else if m.curToken.T == lex.TokenRegex {
			selStmt.From = &From{Value: m.curToken.V, Regex: true}
		}
	}

	// Where is optional
	if err := m.parseWhere(&selStmt); err != nil {
		return nil, err
	}
	// limit is optional
	// if err := m.parseLimit(&selStmt); err != nil {
	// 	return nil, err
	// }

	// we are finished, nice!
	return &ast, nil
}
Example #9
0
func main() {
	baseDir, err := os.Getwd()
	quitIfErr(err)

	skipDirFlag := flag.String("skip", "trash", "Comma-separated list of directories to skip")
	buildOnlyFlag := flag.Bool("buildOnly", false, "Do \"go build\" instead of \"go test\"")
	shortFlag := flag.Bool("short", false, `Run "go test" with "short" flag`)
	colorFlag := flag.Bool("c", true, "Use colorized log output, colored by severity")
	verboseFlag := flag.Bool("v", false, `Run "go test" with -v, also be more verbose elsewhere`)
	veryVerbose := flag.Bool("vv", false, `Very Verbose, comine stdout AND stderr to display`)
	raceFlag := flag.Bool("race", false, `Run "go test" with "race" flag`)
	flag.Parse()

	gou.SetLogger(log.New(os.Stderr, "", 0), "debug")
	if *colorFlag {
		gou.SetColorIfTerminal()
	}

	skipDirNames := strings.Split(*skipDirFlag, ",")
	skipDirStats := make([]os.FileInfo, 0)
	for _, skipDirName := range skipDirNames {
		if skipDirName == "" {
			continue
		}
		stat, err := os.Stat(skipDirName)
		if skipDirName == "trash" && err != nil {
			continue
		}
		if err != nil {
			gou.Errorf("Couldn't stat directory to skip %s: %s\n", skipDirName, err)
		}
		skipDirStats = append(skipDirStats, stat)
	}

	conf := NewConf(skipDirStats, *buildOnlyFlag, *shortFlag, *raceFlag, *verboseFlag, *veryVerbose)
	failedDirs := RunTestsRecursively(baseDir, baseDir, conf)

	if len(failedDirs) > 0 {
		gou.Error("\nFailed directories:")
		for _, dir := range failedDirs {
			gou.Errorf("  %s", dir)
		}
		os.Exit(1)
	} else {
		gou.Info("\nall tests/builds succeeded")
	}
}
Example #10
0
File: dep.go Project: araddon/depr
// ensure this path exists
func (d *Dep) createPath() error {
	fi, err := os.Stat(d.AsPath())
	if err != nil && strings.Contains(err.Error(), "no such file or directory") {
		d.exists = false
		u.Debugf("Creating dir %s", d.ParentDir())
		if err := os.MkdirAll(d.ParentDir(), os.ModeDir|0700); err != nil {
			u.Error(err)
			return err
		}
		d.control.Clone(d)
	}
	if fi != nil && fi.IsDir() {
		d.exists = true
	}

	return nil
}
Example #11
0
// Flush all current documents to ElasticSearch
func (b *BulkIndexor) Flush() {
	b.mu.Lock()
	if b.docCt > 0 {
		b.send(b.buf)
	}
	b.mu.Unlock()
	for {
		select {
		case <-wgChan(b.sendWg):
			// done
			u.Info("Normal Wait Group Shutdown")
			return
		case <-time.After(time.Second * time.Duration(MAX_SHUTDOWN_SECS)):
			// timeout!
			u.Error("Timeout in Shutdown!")
			return
		}
	}
}
Example #12
0
func (uv *NvMetrics) Value(name string) (interface{}, error) {
	if v := uv.Values.Get(name); len(v) > 0 {
		//u.Debug(name, "---", v)
		if li := strings.LastIndex(name, "."); li > 0 {
			metType := name[li+1:]
			switch metType {
			case "avg", "pct": // Gauge, Timer
				if f, err := strconv.ParseFloat(v, 64); err == nil {
					return f, nil
				} else {
					u.Error(err)
					return nil, err
				}
			case "ct":
				if iv, err := strconv.ParseInt(v, 10, 64); err == nil {
					return iv, nil
				} else {
					if f, err := strconv.ParseFloat(v, 64); err == nil {
						return int64(f), nil
					} else {
						u.Errorf(`Could not parse integer or float for   "%v.ct" v=%v`, name, v)
						return nil, err
					}
				}
			case "value":
				if fv, err := strconv.ParseFloat(v, 64); err == nil {
					return int64(fv), nil
				} else {
					if iv, err := strconv.ParseInt(v, 10, 64); err == nil {
						return iv, nil
					} else {
						u.Errorf(`Could not parse integer or float for   "%v.ct" v=%v`, name, v)
						return nil, err
					}
				}
			}
		}
	}
	return nil, errors.New("not converted")
}
Example #13
0
// First keyword was UPSERT
func (m *Sqlbridge) parseSqlUpsert() (*SqlUpsert, error) {

	var err error
	req := NewSqlUpsert()
	m.Next() // Consume UPSERT token

	if m.Cur().T == lex.TokenInto {
		m.Next() // consume Into
	}

	switch m.Cur().T {
	case lex.TokenTable, lex.TokenIdentity:
		req.Table = m.Cur().V
		m.Next()
	default:
		return nil, fmt.Errorf("expected table name but got : %v", m.Cur().V)
	}

	switch m.Cur().T {
	case lex.TokenSet:
		m.Next() // Consume Set
		// list of name=value pairs
		cols, err := m.parseUpdateList()
		if err != nil {
			u.Error(err)
			return nil, err
		}
		req.Values = cols
	case lex.TokenLeftParenthesis:

		// list of fields
		cols, err := m.parseFieldList()
		if err != nil {
			u.Error(err)
			return nil, err
		}
		req.Columns = cols

		m.Next() // left paren starts lisf of values
		switch m.Cur().T {
		case lex.TokenValues:
			m.Next() // Consume Values keyword
		default:
			return nil, fmt.Errorf("expected values but got : %v", m.Cur().V)
		}

		//u.Debugf("found ?  %v", m.Cur())
		colVals, err := m.parseValueList()
		if err != nil {
			u.Error(err)
			return nil, err
		}
		req.Rows = colVals
	default:
		return nil, fmt.Errorf("expected SET name=value, or (col1,col2) after table name but got : %v", m.Cur().V)
	}

	// WHERE
	req.Where, err = m.parseWhere()
	if err != nil {
		return nil, err
	}

	return req, nil
}
Example #14
0
// This formatter reads go files and performs:
//  1.  Squashes multiple lines into one (as needed), Tries to squash panics(go) into one line
//  2.  Reads out the LineType/Level [DEBUG,INFO,METRIC] into a field
//
// This expects log files in this format
//   2013-05-25 13:25:32.475 authctx.go:169: [DEBUG] sink       Building sink for kafka from factory method
func MakeFileFlattener(filename string, msgChan chan *LineEvent) func(string) {
	// Builder used to build the colored string.
	buf := new(bytes.Buffer)

	startsDate := false
	prevWasDate := false
	pos := 0
	posEnd := 0
	var dataType []byte
	var loglevel string
	var dateStr, prevDateStr string
	var prevLogTs time.Time
	lineCt := 0

	return func(line string) {
		lineCt++
		if len(line) < 8 {
			buf.WriteString(line)
			return
		}

		startsDate = false
		spaceCt := 0

		//    [DATE]                  [SOURCE]              [LEVEL] [MESSAGE]
		// 2014/07/10 11:04:20.653185 filter_fluentd.go:16: [DEBUG] %s
		for i := 0; i < len(line); i++ {
			r := line[i]
			if r == ' ' {
				if spaceCt == 1 {
					dateStr = string(line[:i])
					if dts, err := dateparse.ParseAny(dateStr); err == nil {
						startsDate = true
						defer func() {
							// defer will run after prevDateStr already used to send message
							prevLogTs = dts
							prevDateStr = dateStr
						}()
					}
					break
				}
				spaceCt++
			}
		}

		// Find first square bracket wrapper:   [WARN]
		// 2014/07/10 11:04:20.653185 filter_fluentd.go:16: [DEBUG] %s
		// datestr                                         pos, posEnd
		pos = strings.IndexRune(line, '[')
		posEnd = strings.IndexRune(line, ']')
		if pos > 0 && posEnd > 0 && pos < posEnd && len(line) > pos && len(line) > posEnd {
			loglevel = line[pos+1 : posEnd]
			// If we don't find, it probably wasn't one of [INFO],[WARN] etc so accumulate
			if _, ok := expectedLevels[loglevel]; !ok {
				buf.WriteString(line)
				return
			}
		}

		//u.Debugf("pos=%d datatype=%s num?=%v", pos, dataType, startsDate)
		//u.Infof("starts with date?=%v prev?%v pos=%d lvl=%s short[]%v len=%d buf.len=%d", startsDate, prevWasDate, pos, loglevel, (posEnd-pos) < 8, len(line), buf.Len())
		if pos == -1 && !prevWasDate {
			// accumulate in buffer, probably/possibly a panic?
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else if !startsDate {
			// accumulate in buffer
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else if posEnd-8 > pos {
			// position of [block]  too long, so ignore
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else if pos > 80 {
			// [WARN] should be at beginning of line
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else {

			// Line had [LEVEL] AND startsDate at start so go ahead and log it

			if buf.Len() == 0 {
				// lets buffer it, ensuring we have the completion of this line
				buf.WriteString(line)
				return
			}

			// we already have previous line in buffer
			data, err := ioutil.ReadAll(buf)
			if err == nil {
				pos = bytes.IndexRune(data, '[')
				posEnd = bytes.IndexRune(data, ']')
				preFix := ""
				if posEnd-8 > pos {
					//u.Warnf("level:%s  \n\nline=%s", string(data[pos+1:posEnd]), string(data))
					//buf.WriteString(line)
					return
				} else if pos > 0 && posEnd > 0 && pos < posEnd && len(data) > pos && len(data) > posEnd {
					dataType = data[pos+1 : posEnd]
					if len(data) > len(prevDateStr) {
						preFix = string(data[len(prevDateStr)+1 : posEnd])
						//                            [prefix             |- posEnd
						// 2016/09/14 02:33:01.465711 entity.go:179: [ERROR]
						preFixParts := strings.Split(preFix, ": ")
						if len(preFixParts) > 1 {
							preFix = preFixParts[0]
						}
						data = data[posEnd+1:]

					}

				} else {
					dataType = []byte("NA")
					//u.Warnf("level:%s  \n\nline=%s", string(data[pos+1:posEnd]), string(data))
				}
				// if !bytes.HasPrefix(data, datePrefix) {
				// 	u.Warnf("ct=%d level:%s  \n\nline=%s", lineCt, string(data[pos+1:posEnd]), string(data))
				// }
				le := LineEvent{Data: data, Prefix: preFix, Ts: prevLogTs, LogLevel: string(dataType), Source: filename, WriteErrs: 0}
				//u.Debugf("lineevent: %+v", le)
				msgChan <- &le

			} else {
				u.Error(err)
			}
			// now write this line for next analysis
			buf.WriteString(line)
		}
		prevWasDate = startsDate
	}
}
Example #15
0
File: main.go Project: morya/loges
func main() {
	flag.Parse()
	u.SetupLogging(logLevel)
	u.SetColorIfTerminal() // this doesn't work if reading stdin
	if colorize {
		u.SetColorOutput()
	}

	done := make(chan bool)
	esHostName = cleanEsHost(esHostName)
	// if we have note specified tail files, then assume stdin
	if len(flag.Args()) == 0 && source == "tail" {
		source = "stdin"
	}

	u.Debugf("LOGES: filters=%s  es=%s argct=:%d source=%v ll=%s  args=%v",
		filters, esHostName, len(flag.Args()), source, logLevel, flag.Args())

	// Setup output first, to ensure its ready when Source starts
	// TODO:  suuport multiple outputs?
	switch output {
	case "elasticsearch":
		// update the Logstash date for the index occasionally
		go loges.UpdateLogstashIndex()
		// start an elasticsearch bulk worker, for sending to elasticsearch
		go loges.ToElasticSearch(msgChan, "golog", esHostName, ttl, exitIfNoMsgsDur, metricsToEs)
	case "stdout":
		u.Debug("setting output to stdout ", colorize)
		go loges.ToStdout(msgChan, colorize)
	default:
		Usage()
		os.Exit(1)
	}

	// TODO:  implement metrics out
	for _, metOut := range strings.Split(metricsOut, ",") {
		switch metOut {
		case "influxdb":
			// todo
		case "graphite":
			u.Infof("Registering Graphite Transform: host=%s prefix=%s", graphiteHost, graphitePrefix)
			loges.TransformRegister(loges.GraphiteTransform(logType, graphiteHost, graphitePrefix, true))
		}
	}

	// now set up the transforms/filters
	for _, filter := range strings.Split(filters, ",") {
		switch filter {
		case "stdfiles":
			loges.TransformRegister(loges.FileFormatter(logType, nil))
		case "fluentd":
			loges.TransformRegister(loges.FluentdFormatter(logType, nil))
		case "kafka":
			// TODO, finish conversion to sarama
			//loges.TransformRegister(kafka.KafkaFormatter)
		}
	}

	for _, sourceInput := range strings.Split(source, ",") {
		u.Warnf("source = %v", sourceInput)
		switch sourceInput {
		case "tail":
			for _, filename := range flag.Args() {
				tailDone := make(chan bool)
				go loges.TailFile(filename, tail.Config{Follow: true, ReOpen: true}, tailDone, msgChan)
			}
		case "http":
			go loges.HttpRun(httpPort, msgChan)
		//case "kafka":
		//	go kafka.RunKafkaConsumer(msgChan, partitionstr, topic, kafkaHost, offset, maxMsgCt, maxSize)
		case "stdin":
			go loges.StdinPruducer(msgChan)
		default:
			u.Error(sourceInput)
			println("No input set, required")
			Usage()
			os.Exit(1)
		}
	}
	u.Warn("end of main startup, until done")
	<-done
}
Example #16
0
func GraphiteTransform(logstashType, addr, prefix string, metricsToEs bool) LineTransform {
	ticker := time.NewTicker(time.Second * 60)
	loc := time.UTC
	var mu sync.Mutex
	buf := &bytes.Buffer{}
	go func() {
		for {
			select {
			case <-ticker.C:
				conn, err := net.Dial("tcp", addr)
				if err != nil {
					u.Errorf("Failed to connect to graphite/carbon: %+v", err)
				} else {
					//u.Infof("Connected graphite to %v", addr)
					mu.Lock()
					//u.Debug(string(buf.Bytes()))
					io.Copy(conn, buf)
					mu.Unlock()
				}
				if conn != nil {
					conn.Close()
				}
				//case <-stopper:
				//	return
			}

		}
	}()

	return func(d *LineEvent) *Event {
		//u.Debugf("ll=%s   %s", d.DataType, string(d.Data))
		if d.DataType == "METRIC" || d.DataType == "METR" {
			line := string(d.Data)
			tsStr := strconv.FormatInt(time.Now().In(loc).Unix(), 10)
			if iMetric := strings.Index(line, d.DataType); iMetric > 0 {
				line = line[iMetric+len(d.DataType)+1:]
				line = strings.Trim(line, " ")
			}
			// 1.  Read nv/pairs
			nv, err := NewNvMetrics(line)
			if err != nil {
				u.Error(err)
				return nil
			}
			host := nv.Get("host")
			if len(host) == 0 {
				host = hostName
			}

			evt := NewTsEvent(logstashType, d.Source, line, time.Now().In(loc))
			evt.Fields = make(map[string]interface{})
			evt.Fields["host"] = hostName
			evt.Fields["level"] = d.DataType
			//u.Debugf("To Graphite! h='%s'  data=%s", host, string(d.Data))
			mu.Lock()
			defer mu.Unlock()
			// 2.  parse the .avg, .ct and switch
			for n, _ := range nv.Values {
				metType, val := nv.MetricTypeVal(n)
				if metVal, err := nv.Value(n); err == nil {
					grapiteName := strings.Replace(n, ".", "_", -1)
					evt.Fields[grapiteName] = metVal
				} else {
					continue
				}
				switch metType {
				case "avg", "pct": // Gauge, Timer
					//n = strings.Replace(n, ".avg", "", -1)
					if _, err = fmt.Fprintf(buf, "%s.%s.%s %s %s\n", prefix, host, n, val, tsStr); err != nil {
						u.Error(err)
						return nil
					}
				case "ct":
					n = strings.Replace(n, ".ct", ".count", -1)
					if _, err = fmt.Fprintf(buf, "%s.%s.%s %s %s\n", prefix, host, n, val, tsStr); err != nil {
						u.Error(err)
						return nil
					}
				case "value":
					n = strings.Replace(n, ".value", ".last", -1)
					if _, err = fmt.Fprintf(buf, "%s.%s.%s %s %s\n", prefix, host, n, val, tsStr); err != nil {
						u.Warnf("Could not convert value:  %v:%v", n, val)
						//return nil
					}
				default:
					// ?
					u.Warnf("could not recognize: %v", line)
				}
			}
			if metricsToEs {
				return evt
			}
		}
		return nil
	}
}
Example #17
0
// This formatter reads go files and performs:
//  1.  Squashes multiple lines into one (as needed), Tries to squash panics(go) into one line
//  2.  Reads out the LineType/Level [DEBUG,INFO,METRIC] into a field
//
// This expects log files in this format
//   2013-05-25 13:25:32.475 authctx.go:169: [DEBUG] sink       Building sink for kafka from factory method
func MakeFileFlattener(filename string, msgChan chan *LineEvent) func(string) {
	// Builder used to build the colored string.
	buf := new(bytes.Buffer)

	startsDate := false
	prevWasDate := false
	pos := 0
	posEnd := 0
	var dataType []byte
	var loglevel string
	var dateStr string
	lineCt := 0

	return func(line string) {
		lineCt++
		if len(line) < 8 {
			buf.WriteString(line)
			return
		}

		startsDate = false
		spaceCt := 0

		// 2014/07/10 11:04:20.653185 filter_fluentd.go:16: [DEBUG] %s %s
		for i := 0; i < len(line); i++ {
			r := line[i]
			if r == ' ' {
				if spaceCt == 1 {
					dateStr = string(line[:i])
					if _, err := dateparse.ParseAny(dateStr); err == nil {
						startsDate = true
					}
					break
				}
				spaceCt++
			}
		}

		// Find first square bracket wrapper:   [WARN]
		pos = strings.IndexRune(line, '[')
		posEnd = strings.IndexRune(line, ']')
		if pos > 0 && posEnd > 0 && pos < posEnd && len(line) > pos && len(line) > posEnd {
			loglevel = line[pos+1 : posEnd]
			if _, ok := expectedLevels[loglevel]; !ok {
				buf.WriteString(line)
				return
			}
		}

		//u.Debugf("pos=%d datatype=%s num?=%v", pos, dataType, startsDate)
		//u.Infof("starts with date?=%v prev?%v pos=%d lvl=%s short[]%v len=%d buf.len=%d", startsDate, prevWasDate, pos, loglevel, (posEnd-pos) < 8, len(line), buf.Len())
		if pos == -1 && !prevWasDate {
			// accumulate in buffer, probably/possibly a panic?
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else if !startsDate {
			// accumulate in buffer
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else if posEnd-8 > pos {
			// position of [block]  too long, so ignore
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else if pos > 80 {
			// [WARN] should be at beginning of line
			buf.WriteString(line)
			buf.WriteString(" \n")
		} else {
			// Line had [STUFF] AND startsDate at start

			if buf.Len() == 0 {
				// lets buffer it, ensuring we have the completion of this line
				buf.WriteString(line)
				return
			}

			// we already have previous line in buffer
			data, err := ioutil.ReadAll(buf)
			if err == nil {
				pos = bytes.IndexRune(data, '[')
				posEnd = bytes.IndexRune(data, ']')
				if posEnd-8 > pos {
					//u.Warnf("level:%s  \n\nline=%s", string(data[pos+1:posEnd]), string(data))
					//buf.WriteString(line)
					return
				} else if pos > 0 && posEnd > 0 && pos < posEnd && len(data) > pos && len(data) > posEnd {
					dataType = data[pos+1 : posEnd]
				} else {
					dataType = []byte("NA")
					//u.Warnf("level:%s  \n\nline=%s", string(data[pos+1:posEnd]), string(data))
				}
				// if !bytes.HasPrefix(data, datePrefix) {
				// 	u.Warnf("ct=%d level:%s  \n\nline=%s", lineCt, string(data[pos+1:posEnd]), string(data))
				// }
				//u.Debugf("dt='%s'  data=%s", string(dataType), string(data[0:20]))
				msgChan <- &LineEvent{Data: data, DataType: string(dataType), Source: filename, WriteErrs: 0}

			} else {
				u.Error(err)
			}
			// now write this line for next analysis
			buf.WriteString(line)
		}
		prevWasDate = startsDate
	}
}