Ejemplo n.º 1
0
func (f *GeoipFilter) AddGeoIP(event RawEveEvent) {

	if f.db == nil {
		return
	}

	srcip, ok := event["src_ip"].(string)
	if ok && !IsRFC1918(srcip) {
		gip, err := f.db.LookupString(srcip)
		if err != nil {
			log.Debug("Failed to lookup geoip for %s", srcip)
		}

		// Need at least a continent code.
		if gip.ContinentCode != "" {
			event["geoip"] = gip
		}
	}
	if event["geoip"] == nil {
		destip, ok := event["dest_ip"].(string)
		if ok && !IsRFC1918(destip) {
			gip, err := f.db.LookupString(destip)
			if err != nil {
				log.Debug("Failed to lookup geoip for %s", destip)
			}
			// Need at least a continent code.
			if gip.ContinentCode != "" {
				event["geoip"] = gip
			}
		}
	}

}
Ejemplo n.º 2
0
func (m *Migrator) Migrate() error {

	var currentVersion int
	nextVersion := 0

	rows, err := m.db.Query("select max(version) from schema")
	if err == nil {
		if rows.Next() {
			if err := rows.Scan(&currentVersion); err != nil {
				return err
			}
			nextVersion = currentVersion + 1
		}
		log.Debug("Current database schema version: %d", currentVersion)
	} else {
		log.Debug("Initializing database.")
	}

	for {

		script, err := resources.AssetString(fmt.Sprintf("sqlite/V%d.sql", nextVersion))
		if err != nil {
			break
		}

		log.Info("Updating database to version %d.", nextVersion)

		tx, err := m.db.Begin()
		if err != nil {
			return err
		}

		_, err = tx.Exec(script)
		if err != nil {
			return err
		}

		err = m.setVersion(tx, nextVersion)
		if err != nil {
			return err
		}

		err = tx.Commit()
		if err != nil {
			return err
		}

		nextVersion++
	}

	return nil
}
Ejemplo n.º 3
0
func (s *DataStore) ArchiveAlertGroup(p core.AlertGroupQueryParams) error {

	sql := `UPDATE events SET archived = 1 WHERE`

	builder := SqlBuilder{}

	builder.WhereEquals("archived", 0)

	builder.WhereEquals(
		"json_extract(events.source, '$.alert.signature_id')",
		p.SignatureID)

	builder.WhereEquals(
		"json_extract(events.source, '$.src_ip')",
		p.SrcIP)

	builder.WhereEquals(
		"json_extract(events.source, '$.dest_ip')",
		p.DstIP)

	if p.MaxTimestamp != "" {
		ts, err := eveTs2SqliteTs(p.MaxTimestamp)
		if err != nil {
			return err
		}
		builder.WhereLte("timestamp", ts)
	}

	sql = strings.Replace(sql, "WHERE", builder.BuildWhere(), 1)

	start := time.Now()
	r, err := s.db.DB.Exec(sql, builder.args...)
	if err != nil {
		return err
	}
	rows, _ := r.RowsAffected()
	log.Debug("Archived %d alerts.", rows)
	duration := time.Now().Sub(start).Seconds()
	log.Debug("Archive query time: %v", duration)

	return nil
}
Ejemplo n.º 4
0
func (es *ElasticSearch) GetTemplate(name string) (JsonMap, error) {
	log.Debug("Fetching template [%s]", name)
	response, err := es.HttpClient.Get(fmt.Sprintf("_template/%s", name))
	if err != nil {
		return nil, err
	}

	template := JsonMap{}

	if err := es.Decode(response, &template); err != nil {
		return nil, err
	}

	return template, nil
}
Ejemplo n.º 5
0
// GetKeywordType is a crude way of determining if the template is using
// Logstash 5 keyword type, or Logstash 2 "raw" type.
func (es *ElasticSearch) GetKeywordType(index string) (string, error) {
	if index == "" {
		index = es.EventBaseIndex
	}
	template, err := es.GetTemplate(index)
	if err != nil {
		log.Warning("Failed to get template from Elastic Search, keyword resolution delayed.")
		return "", nil
	}

	version := template.GetMap(index).Get("version")
	log.Debug("Found template version %v", version)

	dynamicTemplates := template.GetMap(index).
		GetMap("mappings").
		GetMap("_default_").
		GetMapList("dynamic_templates")
	if dynamicTemplates == nil {
		log.Warning("Failed to parse template, keyword resolution delayed.")
		log.Warning("Template: %s", util.ToJson(template))
		return "", nil
	}
	for _, entry := range dynamicTemplates {
		if entry["string_fields"] != nil {
			mappingType := entry.GetMap("string_fields").
				GetMap("mapping").
				GetMap("fields").
				GetMap("keyword").
				Get("type")
			if mappingType == "keyword" {
				return "keyword", nil
			}

			if entry.GetMap("string_fields").GetMap("mapping").GetMap("fields").GetMap("raw") != nil {
				return "raw", nil
			}
		}
	}
	log.Warning("Failed to parse template, keyword resolution delayed.")
	log.Warning("Template: %s", util.ToJson(template))
	return "", nil
}
Ejemplo n.º 6
0
// RemoveTagsFromAlertGroup removes the given tags from all alerts matching
// the provided parameters.
func (s *EventService) RemoveTagsFromAlertGroup(p core.AlertGroupQueryParams, tags []string) error {

	filter := []interface{}{
		ExistsQuery("event_type"),
		KeywordTermQuery("event_type", "alert", s.es.keyword),
		RangeQuery{
			Field: "timestamp",
			Gte:   p.MinTimestamp,
			Lte:   p.MaxTimestamp,
		},
		KeywordTermQuery("src_ip", p.SrcIP, s.es.keyword),
		KeywordTermQuery("dest_ip", p.DstIP, s.es.keyword),
		TermQuery("alert.signature_id", p.SignatureID),
	}

	for _, tag := range tags {
		filter = append(filter, TermQuery("tags", tag))
	}

	query := m{
		"query": m{
			"bool": m{
				"filter": filter,
			},
		},
		"_source": "tags",
		"sort": l{
			"_doc",
		},
		"size": 10000,
	}

	log.Println(util.ToJson(query))

	searchResponse, err := s.es.SearchScroll(query, "1m")
	if err != nil {
		log.Error("Failed to initialize scroll: %v", err)
		return err
	}

	scrollID := searchResponse.ScrollId

	for {

		log.Debug("Search response total: %d; hits: %d",
			searchResponse.Hits.Total, len(searchResponse.Hits.Hits))

		if len(searchResponse.Hits.Hits) == 0 {
			break
		}

		// We do this in a retry loop as some documents may fail to be
		// updated. Most likely rejected due to max thread count or
		// something.
		maxRetries := 5
		retries := 0
		for {
			retry, err := bulkUpdateTags(s.es, searchResponse.Hits.Hits,
				nil, tags)
			if err != nil {
				log.Error("BulkAddTags failed: %v", err)
				return err
			}
			if !retry {
				break
			}
			retries++
			if retries > maxRetries {
				log.Warning("Errors occurred archive events, not all events may have been archived.")
				break
			}
		}

		// Get next set of events to archive.
		searchResponse, err = s.es.Scroll(scrollID, "1m")
		if err != nil {
			log.Error("Failed to fetch from scroll: %v", err)
			return err
		}

	}

	response, err := s.es.DeleteScroll(scrollID)
	if err != nil {
		log.Error("Failed to delete scroll id: %v", err)
	}
	io.Copy(ioutil.Discard, response.Body)

	s.es.Refresh()

	return nil
}
Ejemplo n.º 7
0
func (r *Router) GET(path string, handler http.Handler) {
	log.Debug("Adding GET route: %s", path)
	r.router.Handle(path, handler).Methods("GET")
}
Ejemplo n.º 8
0
func (s *DataStore) EventQuery(options core.EventQueryOptions) (interface{}, error) {

	size := int64(500)

	if options.Size > 0 {
		size = options.Size
	}

	sql := `select events.id, events.timestamp, events.source`

	sqlBuilder := SqlBuilder{}

	sqlBuilder.From("events")

	if options.EventType != "" {
		sqlBuilder.WhereEquals("json_extract(events.source, '$.event_type')", options.EventType)
	}

	fts := []string{}

	if options.QueryString != "" {

		words, _ := shellwords.Parse(options.QueryString)

		for _, word := range words {

			log.Debug("Word: %s", word)

			parts := strings.SplitN(word, "=", 2)

			if len(parts) == 2 {

				field := parts[0]
				valuestr := parts[1]
				var arg interface{}

				valueint, err := strconv.ParseInt(valuestr, 0, 64)
				if err == nil {
					arg = valueint
				} else {
					arg = valuestr
				}

				sqlBuilder.WhereEquals(
					fmt.Sprintf(" json_extract(events.source, '$.%s')", field),
					arg)
			} else {
				fts = append(fts, fmt.Sprintf("\"%s\"", parts[0]))
			}

		}
	}

	if options.MaxTs != "" {
		maxTs, err := time.Parse("2006-01-02T15:04:05.999999", options.MaxTs)
		if err != nil {
			return nil, fmt.Errorf("Bad timestamp: %s", options.MaxTs)
		}
		sqlBuilder.WhereLte("datetime(events.timestamp)", maxTs)
	}

	if options.MinTs != "" {
		minTs, err := time.Parse("2006-01-02T15:04:05.999999", options.MinTs)
		if err != nil {
			return nil, fmt.Errorf("Bad timestamp: %s", options.MinTs)
		}
		sqlBuilder.WhereGte("datetime(events.timestamp)", minTs)
	}

	if len(fts) > 0 {
		sqlBuilder.From("events_fts")
		sqlBuilder.Where("events.id == events_fts.id")
		sqlBuilder.Where(fmt.Sprintf("events_fts MATCH '%s'", strings.Join(fts, " AND ")))
	}

	sql += sqlBuilder.BuildFrom()

	if sqlBuilder.HasWhere() {
		sql += sqlBuilder.BuildWhere()
	}

	sql += fmt.Sprintf(" ORDER BY timestamp DESC")
	sql += fmt.Sprintf(" LIMIT %d", size)

	log.Println(sql)

	rows, err := s.db.Query(sql, sqlBuilder.args...)
	if err != nil {
		return nil, err
	}

	events := []interface{}{}

	for rows.Next() {
		var rawSource []byte
		var id uuid.UUID
		var timestamp string
		err = rows.Scan(&id, &timestamp, &rawSource)
		if err != nil {
			return nil, err
		}

		source := map[string]interface{}{}

		decoder := json.NewDecoder(bytes.NewReader(rawSource))
		decoder.UseNumber()
		err = decoder.Decode(&source)
		if err != nil {
			return nil, err
		}

		source["@timestamp"] = timestamp

		events = append(events, map[string]interface{}{
			"_id":     id.String(),
			"_source": source,
		})
	}

	return map[string]interface{}{
		"data": events,
	}, nil
}
Ejemplo n.º 9
0
func (er *EveReader) Reopen() error {
	log.Debug("Reopening %s", er.path)
	er.file.Close()
	return er.OpenFile()
}
Ejemplo n.º 10
0
func configure(args []string) Config {
	flagset = flag.NewFlagSet("import", flag.ExitOnError)
	flagset.Usage = usage

	configFilename := flagset.StringP("config", "c", "", "Configuration file")
	verbose := flagset.BoolP("verbose", "v", false, "Verbose output")
	elasticSearchUri := flagset.StringP("elasticsearch", "e", "", "Elastic Search URL")
	username := flagset.StringP("username", "u", "", "Username")
	password := flagset.StringP("password", "p", "", "Password")
	noCheckCertificate := flagset.BoolP("no-check-certificate", "k", false, "Disable certificate check")
	index := flagset.String("index", DEFAULT_INDEX, "Elastic Search index prefix")
	oneshot := flagset.Bool("oneshot", false, "One shot mode (exit on EOF)")
	stdout := flagset.Bool("stdout", false, "Print events to stdout")
	end := flagset.Bool("end", false, "Start at end of file")
	batchSize := flagset.Uint64("batch-size", 1000, "Batch import size")
	useBookmark := flagset.Bool("bookmark", false, "Bookmark location")
	bookmarkPath := flagset.String("bookmark-path", "", "Path to bookmark file")
	noGeoIp := flagset.Bool("no-geoip", false, "Disable GeoIP lookups")
	geoIpDatabase := flagset.String("geoip-database", "", "Path to GeoIP (v2) database file")

	flagset.Parse(args[1:])

	if *verbose {
		log.Info("Setting log level to debug")
		log.SetLevel(log.DEBUG)
	}

	configWrapper := ConfigWrapper{
		Config: Config{
			Index:     DEFAULT_INDEX,
			BatchSize: 1000,
		},
	}

	if *configFilename != "" {
		log.Debug("Loading configuration file %s", *configFilename)
		err := config.LoadConfigTo(*configFilename, &configWrapper)
		if err != nil {
			log.Fatal(err)
		}
	}
	conf := configWrapper.Config

	flagset.Visit(func(flag *flag.Flag) {
		log.Debug("Found command line argument %s -> %s", flag.Name,
			flag.Value.String())
		switch flag.Name {
		case "elasticsearch":
			conf.Url = *elasticSearchUri
		case "username":
			conf.Username = *username
		case "password":
			conf.Password = *password
		case "no-check-certificate":
			conf.DisableCertificateCheck = *noCheckCertificate
		case "index":
			conf.Index = *index
		case "oneshot":
			conf.oneshot = *oneshot
		case "stdout":
			conf.stdout = *stdout
		case "end":
			conf.End = *end
		case "batch-size":
			conf.BatchSize = *batchSize
		case "bookmark":
			conf.Bookmark = *useBookmark
		case "bookmark-path":
			conf.BookmarkPath = *bookmarkPath
		case "no-geoip":
			conf.DisableGeoIp = *noGeoIp
		case "geoip-database":
			conf.GeoIpDatabase = *geoIpDatabase
		case "verbose":
			conf.Verbose = *verbose
		case "config":
		default:
			log.Notice("Unhandle configuration flag %s", flag.Name)
		}
	})

	if len(flagset.Args()) == 1 {
		conf.InputFilename = flagset.Args()[0]
	} else if len(flagset.Args()) > 1 {
		log.Fatal("Multiple input filenames not allowed")
	}

	return conf
}
Ejemplo n.º 11
0
func Main(args []string) {

	conf := configure(args)

	if conf.BatchSize < 1 {
		log.Fatal("Batch size must be greater than 0")
	}

	if conf.Url == "" {
		log.Error("error: --elasticsearch is a required parameter")
		usage()
		os.Exit(1)
	}

	if conf.InputFilename == "" {
		log.Fatal("error: no input file provided")
	}

	if conf.Bookmark && conf.BookmarkPath == "" {
		conf.BookmarkPath = fmt.Sprintf("%s.bookmark", conf.InputFilename)
		log.Info("Using bookmark file %s", conf.BookmarkPath)
	}

	es := elasticsearch.New(conf.Url)
	es.DisableCertCheck(conf.DisableCertificateCheck)
	if conf.Username != "" || conf.Password != "" {
		if err := es.SetUsernamePassword(conf.Username,
			conf.Password); err != nil {
			log.Fatal("Failed to set username and password: %v", err)
		}
	}
	response, err := es.Ping()
	if err != nil {
		log.Fatal("error: failed to ping Elastic Search:", err)
	}
	log.Info("Connected to Elastic Search v%s (cluster:%s; name: %s)",
		response.Version.Number, response.ClusterName, response.Name)
	majorVersion := response.MajorVersion()

	// Check if the template exists.
	templateExists, err := es.CheckTemplate(conf.Index)
	if !templateExists {
		log.Info("Template %s does not exist, creating...", conf.Index)
		err = es.LoadTemplate(conf.Index, majorVersion)
		if err != nil {
			log.Fatal("Failed to create template:", err)
		}
	} else {
		log.Info("Template %s exists, will not create.", conf.Index)
	}

	var geoipFilter *eve.GeoipFilter
	tagsFilter := &eve.TagsFilter{}

	if !conf.DisableGeoIp {
		geoipdb, err := geoip.NewGeoIpDb(conf.GeoIpDatabase)
		if err != nil {
			log.Notice("Failed to load GeoIP database: %v", err)
		} else {
			log.Info("Using GeoIP database %s, %s", geoipdb.Type(), geoipdb.BuildDate())
			geoipFilter = eve.NewGeoipFilter(geoipdb)
		}
	}

	indexer := elasticsearch.NewIndexer(es, conf.DisableCertificateCheck)
	indexer.IndexPrefix = conf.Index

	reader, err := evereader.New(conf.InputFilename)
	if err != nil {
		log.Fatal(err)
	}

	// Initialize bookmarking...
	var bookmarker *evereader.Bookmarker = nil
	if conf.Bookmark {
		bookmarker = &evereader.Bookmarker{
			Filename: conf.BookmarkPath,
			Reader:   reader,
		}
		err := bookmarker.Init(conf.End)
		if err != nil {
			log.Fatal(err)
		}
	} else if conf.End {
		log.Info("Jumping to end of file.")
		err := reader.SkipToEnd()
		if err != nil {
			log.Fatal(err)
		}
	}

	count := uint64(0)
	lastStatTs := time.Now()
	lastStatCount := uint64(0)
	startTime := time.Now()

	// Number of EOFs in last stat interval.
	eofs := uint64(0)

	go func() {
		err := indexer.Run()
		if err != nil {
			log.Fatal("Elastic Search indexer connection unexpectedly closed:", err)
		} else {
			log.Debug("Indexer exited without issue.")
		}
	}()

	for {
		eof := false
		event, err := reader.Next()
		if err != nil {
			if err == io.EOF {
				eof = true
				eofs++
			} else if _, ok := err.(evereader.MalformedEventError); ok {
				log.Error("Failed to read event but will continue: %v", err)
			} else {
				log.Fatalf("Unrecoverable error reading event: %v", err)
			}
		}

		if event != nil {

			if geoipFilter != nil {
				geoipFilter.AddGeoIP(event)
			}

			tagsFilter.Filter(event)

			if conf.stdout {
				asJson, err := json.Marshal(event)
				if err != nil {
					log.Error("Failed to print event as json: %v", err)
				} else {
					fmt.Println(string(asJson))
				}
			}

			indexer.IndexRawEvent(event)
			count++
		}

		if eof || (count > 0 && count%conf.BatchSize == 0) {
			var bookmark *evereader.Bookmark = nil

			if conf.Bookmark {
				bookmark = bookmarker.GetBookmark()
			}

			response, err := indexer.FlushConnection()
			if err != nil {
				log.Fatal(err)
			}
			if response != nil {
				log.Debug("Indexed %d events {errors=%v}", len(response.Items),
					response.Errors)
			}

			if conf.Bookmark {
				bookmarker.WriteBookmark(bookmark)
			}
		}

		now := time.Now()
		if now.Sub(lastStatTs).Seconds() > 1 && now.Second() == 0 {

			// Calculate the lag in bytes, that is the number of bytes behind
			// the end of file we are.
			lag, err := GetLag(reader)
			if err != nil {
				log.Error("Failed to calculate lag: %v", err)
			}

			log.Info("Total: %d; Last minute: %d; Avg: %.2f/s, EOFs: %d; Lag (bytes): %d",
				count,
				count-lastStatCount,
				float64(count-lastStatCount)/(now.Sub(lastStatTs).Seconds()),
				eofs,
				lag)
			lastStatTs = now
			lastStatCount = count
			eofs = 0
		}

		if eof {
			if conf.oneshot {
				break
			} else {
				time.Sleep(1 * time.Second)
			}
		}
	}

	totalTime := time.Since(startTime)

	if conf.oneshot {
		log.Info("Indexed %d events: time=%.2fs; avg=%d/s", count, totalTime.Seconds(),
			uint64(float64(count)/totalTime.Seconds()))
	}
}