Beispiel #1
1
func XXXTestBulkErrors(t *testing.T) {
	// lets set a bad port, and hope we get a conn refused error?
	c := NewTestConn()
	c.Port = "27845"
	defer func() {
		c.Port = "9200"
	}()
	indexer := c.NewBulkIndexerErrors(10, 1)
	indexer.Start()
	errorCt := 0
	go func() {
		for i := 0; i < 20; i++ {
			date := time.Unix(1257894000, 0)
			data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}
			indexer.Index("users", "user", strconv.Itoa(i), "", &date, data, true)
		}
	}()
	var errBuf *ErrorBuffer
	for errBuf = range indexer.ErrorChannel {
		errorCt++
		break
	}
	if errBuf.Buf.Len() > 0 {
		gou.Debug(errBuf.Err)
	}
	assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt))
	indexer.Stop()
}
Beispiel #2
0
// Fluentd format [date source jsonmessage] parser
func FluentdFormatter(logstashType string, tags []string) LineTransform {
	return func(d *LineEvent) *Event {
		//2012-11-22 05:07:51 +0000 lio.home.ubuntu.log.collect.log.vm2: {"message":"runtime error: close of closed channel"}
		if lineParts := bytes.SplitN(d.Data, []byte{':', ' '}, 2); len(lineParts) > 1 {
			if len(lineParts[0]) > 26 {
				u.Debug("%s %s\n", string(lineParts[0]), string(lineParts[1]))
				bsrc := lineParts[0][26:]
				bdate := lineParts[0][0:25]
				var msg map[string]interface{}
				if err := json.Unmarshal(lineParts[1], &msg); err == nil {
					if t, err := time.Parse("2006-01-02 15:04:05 -0700", string(bdate)); err == nil {
						evt := NewTsEvent(logstashType, string(bsrc), "", t)
						if msgi, ok := msg["message"]; ok {
							if msgS, ok := msgi.(string); ok {
								evt.Message = msgS
								delete(msg, "message")
							}
						}
						evt.Tags = tags
						evt.Fields = msg
						return evt
					} else {
						u.Debug("%v", err)
						return NewEvent(logstashType, string(bsrc), string(lineParts[1]))
					}

				} else {
					u.Warn("bad message? %v", err)
				}

			}
		}
		return nil
	}
}
Beispiel #3
0
func TestBulkErrors(t *testing.T) {
	// lets set a bad port, and hope we get a connection refused error?
	api.Port = "27845"
	defer func() {
		api.Port = "9200"
	}()
	BulkDelaySeconds = 1
	indexer := NewBulkIndexerErrors(10, 1)
	done := make(chan bool)
	indexer.Run(done)

	errorCt := 0
	go func() {
		for i := 0; i < 20; i++ {
			date := time.Unix(1257894000, 0)
			data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}
			indexer.Index("users", "user", strconv.Itoa(i), "", &date, data)
		}
	}()
	for errBuf := range indexer.ErrorChannel {
		errorCt++
		gou.Debug(errBuf.Err)
		break
	}
	assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt))
	done <- true
}
Beispiel #4
0
func MakeCustomHandler(msgsOut chan *LineEvent) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {

		qs := r.URL.Query()
		stream := qs.Get("stream")
		if len(stream) < 1 {
			stream = qs.Get(":stream")
			if len(stream) < 1 {
				io.WriteString(w, "Requires a 'stream' qs param ")
				return
			} else {
				qs.Del(":stream")
			}
		}
		var data []byte
		var err error
		if r.Body != nil {
			data, err = ioutil.ReadAll(r.Body)
			defer r.Body.Close()
			if err != nil {
				u.Log(u.ERROR, err.Error())
				io.WriteString(w, "Requires valid monit parse")
				return
			}
		} else {
			data = []byte(qs.Encode())
		}
		u.Debug(stream, string(data))
		//u.Error("Not implemented")
		msgsOut <- &LineEvent{Data: data, Source: stream}
	}
}
Beispiel #5
0
func verifyTokenTypes(t *testing.T, sql string, tt []TokenType) {
	l := NewSqlLexer(sql)
	u.Debug(sql)
	for _, tokenType := range tt {
		tok := l.NextToken()
		//u.Infof("%#v  expects:%v", tok, tokenType)
		assert.Equalf(t, tok.T, tokenType, "want='%v' has %v ", tokenType, tok.T)
	}
}
Beispiel #6
0
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *FilterQLParser) parseSelect() (*FilterStatement, error) {

	req := NewFilterStatement()
	req.Raw = m.l.RawInput()

	m.Next() // Consume the SELECT
	if m.Cur().T != lex.TokenStar && m.Cur().T != lex.TokenMultiply {
		u.Warnf("token? %v", m.Cur())
		return nil, fmt.Errorf("Must use SELECT * currently %s", req.Raw)
	}
	m.Next() // Consume   *

	// OPTIONAL From clause
	if m.Cur().T == lex.TokenFrom {
		m.Next()
		if m.Cur().T == lex.TokenIdentity || m.Cur().T == lex.TokenTable {
			req.From = m.Cur().V
			m.Next()
		}
	}

	if m.Cur().T != lex.TokenWhere {
		return nil, fmt.Errorf("Must use SELECT * FROM [table] WHERE: %s", req.Raw)
	}
	req.Keyword = m.Cur().T
	m.Next() // Consume WHERE

	// one top level filter which may be nested
	if err := m.parseWhereExpr(req); err != nil {
		u.Debug(err)
		return nil, err
	}

	// LIMIT
	if err := m.parseLimit(req); err != nil {
		return nil, err
	}

	// ALIAS
	if err := m.parseAlias(req); err != nil {
		return nil, err
	}

	if m.Cur().T == lex.TokenEOF || m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenRightParenthesis {

		// if err := req.Finalize(); err != nil {
		// 	u.Errorf("Could not finalize: %v", err)
		// 	return nil, err
		// }

		// we are good
		return req, nil
	}

	u.Warnf("Could not complete parsing, return error: %v %v", m.Cur(), m.l.PeekWord())
	return nil, fmt.Errorf("Did not complete parsing input: %v", m.LexTokenPager.Cur().V)
}
Beispiel #7
0
// We need to be able to re-write queries, as we during joins we have
// to re-write query that we are going to send to a single data source
func TestToSql(t *testing.T) {
	for _, sqlStrIn := range sqlStrings {
		u.Debug("parsing next one ", sqlStrIn)
		stmt1 := parseOrPanic(t, sqlStrIn)
		sqlSel1 := stmt1.(*SqlSelect)
		sqlRt := sqlSel1.StringAST()
		u.Warnf("About to parse roundtrip \n%v", sqlRt)
		stmt2 := parseOrPanic(t, sqlRt)
		compareAst(t, stmt1, stmt2)
	}
}
Beispiel #8
0
// sends messages from stdin for consumption
func StdinPruducer(msgChan chan *LineEvent) {
	b := bufio.NewReader(os.Stdin)
	lineHandler := MakeFileFlattener("stdin", msgChan)
	u.Debug("reading from stdin with lines defined by newline")
	for {
		if s, e := b.ReadString('\n'); e == nil {
			//u.Info(s)
			lineHandler(s)
		} else if e == io.EOF {
			return
		}
	}
}
Beispiel #9
0
// parse the request
func (m *Parser) parse() (*Ast, error) {

	comment := m.initialComment()
	u.Debug(comment)
	// Now, find First Keyword
	switch m.curToken.T {
	case lex.TokenSelect:
		m.initialKeyword = m.curToken
		return m.parseSelect(comment)
	default:
		return nil, fmt.Errorf("Unrecognized query, expected [SELECT] influx ql")
	}
	u.Warnf("Whoops, that didn't work: \n%v \n\t%v", m.curToken, m.qryText)
	return nil, fmt.Errorf("Unkwown error on request")
}
Beispiel #10
0
func TestBulkUpdate(t *testing.T) {
	InitTests(true)
	api.Port = "9200"
	indexer := NewBulkIndexer(3)
	indexer.BulkSendor = func(buf *bytes.Buffer) error {
		messageSets += 1
		totalBytesSent += buf.Len()
		buffers = append(buffers, buf)
		gou.Debug(string(buf.Bytes()))
		return BulkSend(buf)
	}
	done := make(chan bool)
	indexer.Run(done)

	date := time.Unix(1257894000, 0)
	user := map[string]interface{}{
		"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "count": 1,
	}

	// Lets make sure the data is in the index ...
	_, err := Index(true, "users", "user", "5", user)

	// script and params
	data := map[string]interface{}{
		"script": "ctx._source.count += 2",
	}
	err = indexer.Update("users", "user", "5", "", &date, data)
	// So here's the deal. Flushing does seem to work, you just have to give the
	// channel a moment to recieve the message ...
	//	<- time.After(time.Millisecond * 20)
	//	indexer.Flush()
	done <- true

	WaitFor(func() bool {
		return len(buffers) > 0
	}, 5)

	assert.T(t, BulkErrorCt == 0 && err == nil, fmt.Sprintf("Should not have any errors  %v", err))

	response, err := Get(true, "users", "user", "5")
	assert.T(t, err == nil, fmt.Sprintf("Should not have any errors  %v", err))
	newCount := response.Source.(map[string]interface{})["count"]
	assert.T(t, newCount.(float64) == 3,
		fmt.Sprintf("Should have update count: %#v ... %#v", response.Source.(map[string]interface{})["count"], response))
}
Beispiel #11
0
// Initial Creation of this repo
func (s *Git) Clone(d *Dep) error {
	if !d.exists {
		// new, initial clone?
		// [email protected]:lytics/cache.git
		parts := strings.Split(d.Src, "/")
		// 0: github.com  1:lytics   2:cache
		if len(parts) < 2 {
			return fmt.Errorf("Invalid src?  %s", d.Src)
		}
		gitPath := fmt.Sprintf("git@%s:%s/%s.git", parts[0], parts[1], parts[2])
		u.Warnf("cloning src? %s", gitPath)
		cmdgit := exec.Command("git", "clone", gitPath)
		cmdgit.Dir = d.ParentDir()
		out, err := cmdgit.Output()
		u.Debug(string(out), err)
		return err
	}
	return nil
}
Beispiel #12
0
func (d Dependencies) checkClean(allowNonClean bool) bool {
	var wg sync.WaitGroup
	hasErrors := false
	for _, dep := range d {
		wg.Add(1)
		go func(depIn *Dep) {
			depIn.createPath()
			// generally we are going to force clean on all directories unless overridden
			if !allowNonClean {
				if !depIn.Clean() {
					u.Debug(depIn)
					hasErrors = true
				}
			}
			wg.Done()
		}(dep)
	}
	wg.Wait()
	return hasErrors
}
Beispiel #13
0
func TailFile(filename string, config tail.Config, done chan bool, msgChan chan *LineEvent) {
	u.Debug("Watching file ", filename, config)
	t, err := tail.TailFile(filename, config)
	if err != nil {
		u.Error(err)
		return
	}
	//defer func() { done <- true }()
	lineHandler := MakeFileFlattener(filename, msgChan)
	for line := range t.Lines {
		lineHandler(line.Text)
	}
	err = t.Wait()
	if err != nil {
		u.Error(err)
	}
	if err := t.Stop(); err != nil {
		u.Info(err)
	}
}
Beispiel #14
0
func TestBulkIndexerBasic(t *testing.T) {
	InitTests(true)
	indexer := NewBulkIndexer(3)
	indexer.BulkSendor = func(buf *bytes.Buffer) error {
		messageSets += 1
		totalBytesSent += buf.Len()
		buffers = append(buffers, buf)
		gou.Debug(string(buf.Bytes()))
		return BulkSend(buf)
	}
	done := make(chan bool)
	indexer.Run(done)

	date := time.Unix(1257894000, 0)
	data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}
	err := indexer.Index("users", "user", "1", "", &date, data)

	WaitFor(func() bool {
		return len(buffers) > 0
	}, 5)
	// part of request is url, so lets factor that in
	//totalBytesSent = totalBytesSent - len(*eshost)
	assert.T(t, len(buffers) == 1, fmt.Sprintf("Should have sent one operation but was %d", len(buffers)))
	assert.T(t, BulkErrorCt == 0 && err == nil, fmt.Sprintf("Should not have any errors  %v", err))
	assert.T(t, totalBytesSent == 145, fmt.Sprintf("Should have sent 135 bytes but was %v", totalBytesSent))

	err = indexer.Index("users", "user", "2", "", nil, data)
	<-time.After(time.Millisecond * 10) // we need to wait for doc to hit send channel
	// this will test to ensure that Flush actually catches a doc
	indexer.Flush()
	totalBytesSent = totalBytesSent - len(*eshost)
	assert.T(t, err == nil, fmt.Sprintf("Should have nil error  =%v", err))
	assert.T(t, len(buffers) == 2, fmt.Sprintf("Should have another buffer ct=%d", len(buffers)))

	assert.T(t, BulkErrorCt == 0, fmt.Sprintf("Should not have any errors %d", BulkErrorCt))
	assert.T(t, CloseInt(totalBytesSent, 257), fmt.Sprintf("Should have sent 257 bytes but was %v", totalBytesSent))

	done <- true
}
Beispiel #15
0
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *Sqlbridge) parseSqlSelect() (*SqlSelect, error) {

	req := NewSqlSelect()
	req.Raw = m.l.RawInput()
	m.Next() // Consume Select?

	// Optional DISTINCT keyword always immediately after SELECT KW
	if m.Cur().T == lex.TokenDistinct {
		m.Next()
		req.Distinct = true
	}

	// columns
	//if m.Cur().T != lex.TokenStar {
	if err := parseColumns(m, m.funcs, m.buildVm, req); err != nil {
		u.Debug(err)
		return nil, err
	}
	// } else if err := m.parseSelectStar(req); err != nil {
	// 	u.Debug(err)
	// 	return nil, err
	// }

	//u.Debugf("cur? %v", m.Cur())
	// select @@myvar limit 1
	if m.Cur().T == lex.TokenLimit {
		if err := m.parseLimit(req); err != nil {
			return req, nil
		}
		if m.isEnd() {
			return req, nil
		}
	}

	// SPECIAL END CASE for simple selects
	// SELECT last_insert_id();
	if m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenEOF {
		// valid end
		return req, nil
	}

	// INTO
	if errreq := m.parseInto(req); errreq != nil {
		return nil, errreq
	}

	// FROM
	if errreq := m.parseSources(req); errreq != nil {
		return nil, errreq
	}

	// WHERE
	if errreq := m.parseWhereSelect(req); errreq != nil {
		return nil, errreq
	}

	// GROUP BY
	//u.Debugf("GroupBy?  : %v", m.Cur())
	if errreq := m.parseGroupBy(req); errreq != nil {
		return nil, errreq
	}

	// HAVING
	//u.Debugf("Having?  : %v", m.Cur())
	if errreq := m.parseHaving(req); errreq != nil {
		return nil, errreq
	}

	// ORDER BY
	//u.Debugf("OrderBy?  : %v", m.Cur())
	if errreq := m.parseOrderBy(req); errreq != nil {
		return nil, errreq
	}

	// LIMIT
	if err := m.parseLimit(req); err != nil {
		return nil, err
	}

	// WITH
	with, err := ParseWith(m.SqlTokenPager)
	if err != nil {
		return nil, err
	}
	req.With = with

	// ALIAS
	if err := m.parseAlias(req); err != nil {
		return nil, err
	}

	if m.Cur().T == lex.TokenEOF || m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenRightParenthesis {

		if err := req.Finalize(); err != nil {
			u.Errorf("Could not finalize: %v", err)
			return nil, err
		}

		// we are good
		return req, nil
	}

	u.Warnf("Could not complete parsing, return error: %v %v", m.Cur(), m.l.PeekWord())
	return nil, fmt.Errorf("Did not complete parsing input: %v", m.LexTokenPager.Cur().V)
}
Beispiel #16
0
func TestSearchFacetOne(t *testing.T) {
	/*
		A faceted search for what "type" of events there are
		- since we are not specifying an elasticsearch type it searches all ()

		{
		    "terms" : {
		      "_type" : "terms",
		      "missing" : 0,
		      "total" : 7561,
		      "other" : 0,
		      "terms" : [ {
		        "term" : "pushevent",
		        "count" : 4185
		      }, {
		        "term" : "createevent",
		        "count" : 786
		      }.....]
		    }
		 }

	*/
	qry := Search("github").Pretty().Facet(
		Facet().Fields("type").Size("25"),
	).Query(
		Query().All(),
	).Size("1")
	out, err := qry.Result()
	//log.Println(string(out.Facets))
	u.Debug(out)
	u.Assert(out != nil && err == nil, t, "Should have output")
	if out == nil {
		t.Fail()
		return
	}
	h := u.NewJsonHelper(out.Facets)
	u.Assert(h.Int("type.total") == 8084, t, "Should have 8084 results %v", h.Int("type.total"))
	u.Assert(len(h.List("type.terms")) == 16, t, "Should have 16 event types, %v", len(h.List("type.terms")))

	// Now, lets try changing size to 10
	qry.FacetVal.Size("10")
	out, err = qry.Result()
	h = u.NewJsonHelper(out.Facets)

	// still same doc count
	u.Assert(h.Int("type.total") == 8084, t, "Should have 8084 results %v", h.Int("type.total"))
	// make sure size worked
	u.Assert(len(h.List("type.terms")) == 10, t, "Should have 10 event types, %v", len(h.List("type.terms")))

	// now, lets add a type (out of the 16)
	out, _ = Search("github").Type("IssueCommentEvent").Pretty().Facet(
		Facet().Fields("type").Size("25"),
	).Query(
		Query().All(),
	).Result()
	h = u.NewJsonHelper(out.Facets)
	//log.Println(string(out.Facets))
	// still same doc count
	u.Assert(h.Int("type.total") == 685, t, "Should have 685 results %v", h.Int("type.total"))
	// we should only have one facettype because we limited to one type
	u.Assert(len(h.List("type.terms")) == 1, t, "Should have 1 event types, %v", len(h.List("type.terms")))

	// now, add a second type (chained)
	out, _ = Search("github").Type("IssueCommentEvent").Type("PushEvent").Pretty().Facet(
		Facet().Fields("type").Size("25"),
	).Query(
		Query().All(),
	).Result()
	h = u.NewJsonHelper(out.Facets)
	//log.Println(string(out.Facets))
	// still same doc count
	u.Assert(h.Int("type.total") == 4941, t, "Should have 4941 results %v", h.Int("type.total"))
	// make sure we now have 2 types
	u.Assert(len(h.List("type.terms")) == 2, t, "Should have 2 event types, %v", len(h.List("type.terms")))

	//and instead of faceting on type, facet on userid
	// now, add a second type (chained)
	out, _ = Search("github").Type("IssueCommentEvent,PushEvent").Pretty().Facet(
		Facet().Fields("actor").Size("500"),
	).Query(
		Query().All(),
	).Result()
	h = u.NewJsonHelper(out.Facets)
	// still same doc count
	u.Assert(h.Int("actor.total") == 5158, t, "Should have 5158 results %v", h.Int("actor.total"))
	// make sure size worked
	u.Assert(len(h.List("actor.terms")) == 500, t, "Should have 500 users, %v", len(h.List("actor.terms")))

}
Beispiel #17
0
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *Sqlbridge) parseSqlSelect() (*SqlSelect, error) {

	req := NewSqlSelect()
	req.Raw = m.l.RawInput()
	m.Next() // Consume Select?

	// columns
	if m.Cur().T != lex.TokenStar {
		if err := m.parseColumns(req); err != nil {
			u.Debug(err)
			return nil, err
		}
	} else if err := m.parseSelectStar(req); err != nil {
		u.Debug(err)
		return nil, err
	}

	//u.Debugf("cur? %v", m.Cur())
	// select @@myvar limit 1
	if m.Cur().T == lex.TokenLimit {
		if err := m.parseLimit(req); err != nil {
			return req, nil
		}
		if m.isEnd() {
			return req, nil
		}
	}

	// SPECIAL END CASE for simple selects
	// SELECT last_insert_id();
	if m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenEOF {
		// valid end
		return req, nil
	}

	// INTO
	//u.Debugf("token:  %v", m.Cur())
	if errreq := m.parseInto(req); errreq != nil {
		return nil, errreq
	}

	// FROM
	//u.Debugf("token:  %v", m.Cur())
	if errreq := m.parseTableReference(req); errreq != nil {
		return nil, errreq
	}

	// WHERE
	//u.Debugf("where? %v", m.Cur())
	if errreq := m.parseWhereSelect(req); errreq != nil {
		return nil, errreq
	}

	// GROUP BY
	//u.Debugf("GroupBy?  : %v", m.Cur())
	if errreq := m.parseGroupBy(req); errreq != nil {
		return nil, errreq
	}

	// HAVING
	//u.Debugf("Having?  : %v", m.Cur())
	if errreq := m.parseHaving(req); errreq != nil {
		return nil, errreq
	}

	// ORDER BY
	//u.Debugf("OrderBy?  : %v", m.Cur())
	if errreq := m.parseOrderBy(req); errreq != nil {
		return nil, errreq
	}

	// LIMIT
	if err := m.parseLimit(req); err != nil {
		return nil, err
	}

	// WITH
	if err := m.parseWith(req); err != nil {
		return nil, err
	}

	// ALIAS
	if err := m.parseAlias(req); err != nil {
		return nil, err
	}

	if m.Cur().T == lex.TokenEOF || m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenRightParenthesis {

		if err := req.Finalize(); err != nil {
			u.Errorf("Could not finalize: %v", err)
			return nil, err
		}

		// we are good
		return req, nil
	}

	u.Warnf("Could not complete parsing, return error: %v %v", m.Cur(), m.l.PeekWord())
	return nil, fmt.Errorf("Did not complete parsing input: %v", m.LexTokenPager.Cur().V)
}
Beispiel #18
0
func main() {
	flag.Parse()
	u.SetupLogging(logLevel)
	u.SetColorIfTerminal() // this doesn't work if reading stdin
	if colorize {
		u.SetColorOutput()
	}

	done := make(chan bool)
	esHostName = cleanEsHost(esHostName)
	// if we have note specified tail files, then assume stdin
	if len(flag.Args()) == 0 && source == "tail" {
		source = "stdin"
	}

	u.Debugf("LOGES: filters=%s  es=%s argct=:%d source=%v ll=%s  args=%v",
		filters, esHostName, len(flag.Args()), source, logLevel, flag.Args())

	// Setup output first, to ensure its ready when Source starts
	// TODO:  suuport multiple outputs?
	switch output {
	case "elasticsearch":
		// update the Logstash date for the index occasionally
		go loges.UpdateLogstashIndex()
		// start an elasticsearch bulk worker, for sending to elasticsearch
		go loges.ToElasticSearch(msgChan, "golog", esHostName, ttl, exitIfNoMsgsDur, metricsToEs)
	case "stdout":
		u.Debug("setting output to stdout ", colorize)
		go loges.ToStdout(msgChan, colorize)
	default:
		Usage()
		os.Exit(1)
	}

	// TODO:  implement metrics out
	for _, metOut := range strings.Split(metricsOut, ",") {
		switch metOut {
		case "influxdb":
			// todo
		case "graphite":
			u.Infof("Registering Graphite Transform: host=%s prefix=%s", graphiteHost, graphitePrefix)
			loges.TransformRegister(loges.GraphiteTransform(logType, graphiteHost, graphitePrefix, true))
		}
	}

	// now set up the transforms/filters
	for _, filter := range strings.Split(filters, ",") {
		switch filter {
		case "stdfiles":
			loges.TransformRegister(loges.FileFormatter(logType, nil))
		case "fluentd":
			loges.TransformRegister(loges.FluentdFormatter(logType, nil))
		case "kafka":
			// TODO, finish conversion to sarama
			//loges.TransformRegister(kafka.KafkaFormatter)
		}
	}

	for _, sourceInput := range strings.Split(source, ",") {
		u.Warnf("source = %v", sourceInput)
		switch sourceInput {
		case "tail":
			for _, filename := range flag.Args() {
				tailDone := make(chan bool)
				go loges.TailFile(filename, tail.Config{Follow: true, ReOpen: true}, tailDone, msgChan)
			}
		case "http":
			go loges.HttpRun(httpPort, msgChan)
		//case "kafka":
		//	go kafka.RunKafkaConsumer(msgChan, partitionstr, topic, kafkaHost, offset, maxMsgCt, maxSize)
		case "stdin":
			go loges.StdinPruducer(msgChan)
		default:
			u.Error(sourceInput)
			println("No input set, required")
			Usage()
			os.Exit(1)
		}
	}
	u.Warn("end of main startup, until done")
	<-done
}
Beispiel #19
0
func TestSqlDbConnFailure(t *testing.T) {
	// Where Statement on join on column (o.item_count) that isn't in query
	sqlText := `
		SELECT 
			u.user_id, o.item_id, u.reg_date, u.email, o.price, o.order_date
		FROM users AS u 
		INNER JOIN orders AS o 
			ON u.user_id = o.user_id
		WHERE o.price > 10;
	`
	db, err := sql.Open("qlbridge", "mockcsv")
	assert.Tf(t, err == nil, "no error: %v", err)
	assert.Tf(t, db != nil, "has conn: %v", db)

	defer func() {
		if err := db.Close(); err != nil {
			t.Fatalf("Should not error on close: %v", err)
		}
	}()

	rows, err := db.Query(sqlText)
	assert.Tf(t, err == nil, "no error: %v", err)
	assert.Tf(t, rows != nil, "has results: %v", rows)

	cols, err := rows.Columns()
	assert.Tf(t, err == nil, "no error: %v", err)
	assert.Tf(t, len(cols) == 6, "6 cols: %v", cols)
	userOrders := make([]userorder, 0)
	for rows.Next() {
		var uo userorder
		err = rows.Scan(&uo.UserId, &uo.ItemId, &uo.RegDate, &uo.Email, &uo.Price, &uo.OrderDate)
		assert.Tf(t, err == nil, "no error: %v", err)
		//u.Debugf("userorder=%+v", uo)
		userOrders = append(userOrders, uo)
	}
	assert.Tf(t, rows.Err() == nil, "no error: %v", err)
	assert.Tf(t, len(userOrders) == 2, "want 2 userOrders row: %+v", userOrders)

	uo1 := userOrders[0]
	assert.Tf(t, uo1.Email == "*****@*****.**", "%#v", uo1)
	assert.Tf(t, uo1.Price == 22.5, "? %#v", uo1)

	rows.Close()
	u.Debug("end 1\n\n\n")
	//return

	// Return same query, was failing for some reason?
	sqlText = `
		SELECT 
			u.user_id, o.item_id, u.reg_date, u.email, o.price, o.order_date
		FROM users AS u 
		INNER JOIN orders AS o 
			ON u.user_id = o.user_id
		WHERE o.price > 10;
	`

	rows2, err := db.Query(sqlText)
	assert.Tf(t, err == nil, "no error: %v", err)
	assert.Tf(t, rows2 != nil, "has results: %v", rows2)

	cols, err = rows2.Columns()
	assert.Tf(t, err == nil, "no error: %v", err)
	assert.Tf(t, len(cols) == 6, "6 cols: %v", cols)
	userOrders = make([]userorder, 0)
	for rows2.Next() {
		var uo userorder
		err = rows2.Scan(&uo.UserId, &uo.ItemId, &uo.RegDate, &uo.Email, &uo.Price, &uo.OrderDate)
		assert.Tf(t, err == nil, "no error: %v", err)
		//u.Debugf("userorder=%+v", uo)
		userOrders = append(userOrders, uo)
	}
	assert.Tf(t, rows2.Err() == nil, "no error: %v", err)
	assert.Tf(t, len(userOrders) == 2, "want 2 userOrders row: %+v", userOrders)

	uo1 = userOrders[0]
	assert.Tf(t, uo1.Email == "*****@*****.**", "%#v", uo1)
	assert.Tf(t, uo1.Price == 22.5, "? %#v", uo1)
	rows2.Close()
}
Beispiel #20
0
func RunTestsRecursively(rootDir, dirName string, conf *Conf) []string {

	if strings.Contains(dirName, "trash") {
		return nil
	}

	// Standard go tools skip files/dirs prefixed with _
	if strings.HasPrefix(path.Base(dirName), "_") {
		return nil
	}
	// Skip this directory if the user requested that we skip it
	stat, err := os.Stat(dirName)
	quitIfErr(err)
	for _, skipDir := range conf.skipDirs {
		if os.SameFile(stat, skipDir) {
			gou.Debugf("skipping directory %s as requested", dirName)
			return []string{}
		}
	}
	// Skip this directory if the user entered a .alltestignore file
	_, err = os.Stat(path.Join(dirName, ".alltestignore"))
	if err == nil {
		// If err == nil that means we found a file, thus should bail
		gou.Debugf("skipping directory %s as requested due to ignore file", dirName)
		return []string{}
	}

	infos, err := ioutil.ReadDir(dirName)
	quitIfErr(err)

	failures := []string{}

	anyTestsInDir := false
	anyGoSrcsInDir := false
	for _, info := range infos {
		if info.IsDir() {
			// Recursively run the tests in each subdirectory
			subDirName := path.Join(dirName, info.Name())
			failedSubDirs := RunTestsRecursively(rootDir, subDirName, conf)
			failures = append(failures, failedSubDirs...)
		} else if isTestFile(info) {
			anyTestsInDir = true
		} else if isGoFile(info) {
			anyGoSrcsInDir = true
		}
	}

	goRunOpts := []string{"test"}

	// Run "go test" in this directory if it has any tests
	if anyTestsInDir && !conf.buildOnly {
		if conf.short {
			goRunOpts = append(goRunOpts, "-short")
		}
		if conf.race {
			goRunOpts = append(goRunOpts, "-race")
		}
		if conf.veryVerbose {
			goRunOpts = append(goRunOpts, "-v")
		}
	} else if anyGoSrcsInDir {
		goRunOpts = []string{"build"}
	} else {
		return failures
	}
	err = os.Chdir(dirName)
	quitIfErr(err)

	var bytes []byte
	if conf.veryVerbose {
		bytes, err = exec.Command("go", goRunOpts...).CombinedOutput() // combined means stderr & stdout
	} else {
		bytes, err = exec.Command("go", goRunOpts...).Output()
	}

	if len(bytes) > 0 && bytes[len(bytes)-1] == '\n' {
		// lets get rid of last new line at end of this
		bytes = bytes[0 : len(bytes)-1]
	}

	thisDirPath := strings.Replace(dirName, rootDir, "", -1)
	if err != nil {
		if len(bytes) > 0 {
			gou.Errorf(string(bytes))
		}
		gou.Errorf(`Failed in directory: "%s"`, thisDirPath)
		failures = append(failures, thisDirPath)
	} else {
		if conf.verbose && len(bytes) > 0 {
			gou.Debug(string(bytes))
			//gou.Infof(`Success in directory: "%s"`, thisDirPath)
		}

	}
	return failures
}
Beispiel #21
0
func TransformRegister(txform LineTransform) {
	u.Debug("setting foramtter")
	transforms = append(transforms, txform)
}
// First keyword was SELECT, so use the SELECT parser rule-set
func (m *filterQLParser) parseSelect() (*FilterSelect, error) {

	req := &FilterSelect{FilterStatement: &FilterStatement{}}
	m.fs = req.FilterStatement
	req.Raw = m.l.RawInput()
	req.Description = m.comment
	m.Next() // Consume Select

	if err := parseColumns(m, nil, m.buildVm, req); err != nil {
		u.Debug(err)
		return nil, err
	}

	// OPTIONAL From clause
	if m.Cur().T == lex.TokenFrom {
		m.Next()
		if m.Cur().T == lex.TokenIdentity || m.Cur().T == lex.TokenTable {
			req.From = m.Next().V
		} else {
			return nil, fmt.Errorf("Expected FROM <identity> got %v", m.Cur())
		}
	}

	switch t := m.Next().T; t {
	case lex.TokenWhere:
		// one top level filter which may be nested
		if err := m.parseWhereExpr(req); err != nil {
			u.Debug(err)
			return nil, err
		}
	case lex.TokenFilter:
		//u.Warnf("starting filter %s", req.Raw)
		// one top level filter which may be nested
		filter, err := m.parseFirstFilters()
		if err != nil {
			u.Warnf("Could not parse filters %q err=%v", req.Raw, err)
			return nil, err
		}
		req.Filter = filter
	default:
		return nil, fmt.Errorf("expected SELECT * FROM <table> { <WHERE> | <FILTER> } but got %v instead of WHERE/FILTER", t)
	}

	m.discardNewLines()

	// LIMIT
	if limit, err := m.parseLimit(); err != nil {
		return nil, err
	} else {
		req.Limit = limit
	}

	// WITH
	with, err := ParseWith(m)
	if err != nil {
		return nil, err
	}
	req.With = with

	// ALIAS
	if alias, err := m.parseAlias(); err != nil {
		return nil, err
	} else {
		req.Alias = alias
	}

	if m.Cur().T == lex.TokenEOF || m.Cur().T == lex.TokenEOS || m.Cur().T == lex.TokenRightParenthesis {
		return req, nil
	}

	u.Warnf("Could not complete parsing, return error: %v %v", m.Cur(), m.l.PeekWord())
	return nil, fmt.Errorf("Did not complete parsing input: %v", m.LexTokenPager.Cur().V)
}
Beispiel #23
0
func (m *FilterQLParser) parseFilters() (*Filters, error) {

	var fe *FilterExpr
	var filters *Filters

	switch m.Cur().T {
	case lex.TokenLogicAnd, lex.TokenAnd, lex.TokenOr, lex.TokenLogicOr:
		// fine, we have nested parent expression (AND | OR)
		filters = NewFilters(m.Cur())
		m.Next()
	default:
		//return nil, fmt.Errorf("Expected ( AND | OR ) but got %v", m.Cur())
		filters = NewFilters(lex.Token{T: lex.TokenLogicAnd})
	}

	for {

		u.Debug(m.Cur())
		switch m.Cur().T {
		case lex.TokenAnd, lex.TokenOr, lex.TokenLogicAnd, lex.TokenLogicOr:
			innerf, err := m.parseFilters()
			if err != nil {
				return nil, err
			}
			fe = NewFilterExpr()
			fe.Filter = innerf
			filters.Filters = append(filters.Filters, fe)

		case lex.TokenInclude:
			// embed/include a named filter
			m.Next()
			if m.Cur().T != lex.TokenIdentity {
				return nil, fmt.Errorf("Expected identity for Include but got %v", m.Cur())
			}
			fe = NewFilterExpr()
			fe.Include = m.Cur().V
			m.Next()
			filters.Filters = append(filters.Filters, fe)
			continue

		case lex.TokenLeftParenthesis:
			m.Next()
			continue
		case lex.TokenUdfExpr:
			// we have a udf/functional expression filter
			fe = NewFilterExpr()
			filters.Filters = append(filters.Filters, fe)
			tree := NewTree(m.FilterTokenPager)
			if err := m.parseNode(tree); err != nil {
				u.Errorf("could not parse: %v", err)
				return nil, err
			}
			fe.Expr = tree.Root

		case lex.TokenNegate, lex.TokenIdentity, lex.TokenLike, lex.TokenExists, lex.TokenBetween,
			lex.TokenIN, lex.TokenValue:

			fe = NewFilterExpr()
			filters.Filters = append(filters.Filters, fe)
			tree := NewTree(m.FilterTokenPager)
			if err := m.parseNode(tree); err != nil {
				u.Errorf("could not parse: %v", err)
				return nil, err
			}
			fe.Expr = tree.Root
		}
		//u.Debugf("after filter start?:   %v  ", m.Cur())

		// since we can loop inside switch statement
		switch m.Cur().T {
		case lex.TokenLimit, lex.TokenEOS, lex.TokenEOF:
			return filters, nil
		case lex.TokenCommentSingleLine, lex.TokenCommentStart, lex.TokenCommentSlashes, lex.TokenComment,
			lex.TokenCommentEnd:
			// should we save this into filter?
		case lex.TokenRightParenthesis:
			// end of this filter expression
			m.Next()
			return filters, nil
		case lex.TokenComma:
			// keep looping, looking for more expressions
		default:
			return nil, fmt.Errorf("expected column but got: %v", m.Cur().String())
		}
		m.Next()
	}
	//u.Debugf("filters: %d", len(filters.Filters))
	return filters, nil
}
Beispiel #24
0
func main() {
	gou.SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile), "debug")
	gou.Debug("hello")
}