func (es *ElasticSearch) SetEventIndex(index string) { if strings.HasSuffix(index, "*") { baseIndex := strings.TrimSuffix(strings.TrimSuffix(index, "*"), "-") es.EventSearchIndex = index es.EventBaseIndex = baseIndex } else { es.EventBaseIndex = index es.EventSearchIndex = fmt.Sprintf("%s-*", index) } log.Info("Event base index: %s", es.EventBaseIndex) log.Info("Event search index: %s", es.EventSearchIndex) }
func NewService() (*Service, error) { args := fmt.Sprintf( "dbname=%s user=%s password=%s port=%s sslmode=%s", PGDATABASE, PGUSER, PGPASS, PGPORT, "disable") db, err := sql.Open("postgres", args) if err != nil { log.Fatal(err) } var pgVersion string err = db.QueryRow("select version()").Scan(&pgVersion) if err != nil { return nil, err } log.Info("Connected to PostgreSQL version %s.", pgVersion) return &Service{ db: db, }, nil }
func (es *ElasticSearch) InitKeyword() error { keyword, err := es.GetKeywordType(es.EventBaseIndex) if err != nil { return err } es.keyword = keyword log.Info("Elastic Search keyword initialized to \"%s\"", es.keyword) return nil }
func (m *Migrator) Migrate() error { var currentVersion int nextVersion := 0 rows, err := m.db.Query("select max(version) from schema") if err == nil { if rows.Next() { if err := rows.Scan(¤tVersion); err != nil { return err } nextVersion = currentVersion + 1 } log.Debug("Current database schema version: %d", currentVersion) } else { log.Debug("Initializing database.") } for { script, err := resources.AssetString(fmt.Sprintf("sqlite/V%d.sql", nextVersion)) if err != nil { break } log.Info("Updating database to version %d.", nextVersion) tx, err := m.db.Begin() if err != nil { return err } _, err = tx.Exec(script) if err != nil { return err } err = m.setVersion(tx, nextVersion) if err != nil { return err } err = tx.Commit() if err != nil { return err } nextVersion++ } return nil }
func ReadPipe(pipe io.ReadCloser, doLog bool, logPrefix string) error { reader := bufio.NewReader(pipe) for { line, err := reader.ReadBytes('\n') if err != nil && err == io.EOF { break } else if err != nil { return err } log.Info("%s: %s", logPrefix, strings.TrimSpace(string(line))) } return nil }
func (b *Bookmarker) Init(end bool) error { bookmark, err := b.ReadBookmark() if err == nil && b.BookmarkIsValid(bookmark) { err = b.Reader.SkipTo(bookmark.Offset) if err != nil { log.Error("Failed to skip to line %d, will skip to end of file: %s", err) b.Reader.SkipToEnd() } } else { log.Info("Failed to read bookmark: %s", err) if end { log.Info("Will start reading at end of file.") b.Reader.SkipToEnd() } else { log.Info("Will start reading at beginning of file.") } } // Test write. bookmark = b.GetBookmark() return b.WriteBookmark(bookmark) }
func Start(directory string) (*exec.Cmd, error) { // Get the absolute path if the data directory. path, err := filepath.Abs(directory) if err != nil { return nil, err } log.Info("Using postgres data directory %s", path) command := exec.Command("postgres", "-D", path, "-c", "log_destination=stderr", "-c", "logging_collector=off", "-k", path) stdout, err := command.StdoutPipe() if err != nil { log.Error("Failed to open postgres stdout, will not be logged.") stdout = nil } stderr, err := command.StderrPipe() if err != nil { log.Error("Failed to open postgres stderr, will not be logged.") stderr = nil } err = command.Start() if err != nil { log.Error("Failed to start postgres: %v", err) return nil, err } if stdout != nil { go func() { if err := ReadPipe(stdout, true, "postgres stdout"); err != nil { log.Error("Failed to read postgres stdout: %v", err) } }() } if stderr != nil { go func() { if err := ReadPipe(stderr, true, "postgres stderr"); err != nil { log.Error("Failed to read postgres stderr: %v", err) } }() } return command, nil }
func (i *BulkEveIndexer) CheckForRedirect() { httpClient := http.Client{ CheckRedirect: func(request *http.Request, via []*http.Request) error { if request.Response != nil { location, err := request.Response.Location() if err == nil { log.Info("Redirection to %s detected, updating Elastic Search base URL.", location.String()) i.baseUrl = location.String() } } return nil }, } httpClient.Head(i.es.baseUrl) }
func main() { // Look for sub-commands, then fall back to server. if len(os.Args) > 1 && os.Args[1][0] != '-' { switch os.Args[1] { case "version": VersionMain() return case "esimport": esimport.Main(os.Args[1:]) return case "evereader": evereader.Main(os.Args[1:]) return case "server": server.Main(os.Args[2:]) return case "pgimport": pgimport.Main(os.Args[2:]) return case "sqliteimport": sqliteimport.Main(os.Args[2:]) return default: log.Fatalf("Unknown command: %s", os.Args[1]) } } else if len(os.Args) > 1 { switch os.Args[1] { case "-h": Usage() os.Exit(0) } } log.Info("No command provided, defaulting to server.") server.Main(os.Args[1:]) }
func (s FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { var path string if r.URL.String() == "/" { path = "index.html" } else { path = strings.TrimPrefix(r.URL.String(), "/") } // Remove any query string parameters... parts := strings.SplitN(path, "?", 2) path = parts[0] log.Info("Static file request for %s.", path) asset := fmt.Sprintf("public/%s", path) bytes, err := Asset(asset) if err != nil { log.Error("Public file not found: %s", path) w.WriteHeader(http.StatusNotFound) } else { w.Write(bytes) } }
func Main(args []string) { var err error log.Info("This is EveBox Server version %v (rev: %v)", core.BuildVersion, core.BuildRev) flagset := flag.NewFlagSet("server", flag.ExitOnError) flagset.StringVarP(&opts.ElasticSearchUri, "elasticsearch", "e", "", "Elastic Search URI (default: http://localhost:9200") flagset.StringVarP(&opts.ElasticSearchIndex, "index", "i", "", "Elastic Search Index (default: logstash)") flagset.StringVarP(&opts.Port, "port", "p", "5636", "Port to bind to") flagset.StringVarP(&opts.Host, "host", "", "0.0.0.0", "Host to bind to") flagset.StringVarP(&opts.DevServerUri, "dev", "", "", "Frontend development server URI") flagset.BoolVarP(&opts.Version, "version", "", false, "Show version") flagset.StringVarP(&opts.Config, "config", "c", "", "Configuration filename") flagset.BoolVarP(&opts.NoCheckCertificate, "no-check-certificate", "k", false, "Disable certificate check for Elastic Search") flagset.BoolVarP(&opts.Sqlite, "sqlite", "", false, "Use SQLite for the event store") flagset.Parse(args[0:]) if opts.Version { VersionMain() return } log.SetLevel(log.DEBUG) // If no configuration was provided, see if evebox.yaml exists // in the current directory. if opts.Config == "" { _, err = os.Stat("./evebox.yaml") if err == nil { opts.Config = "./evebox.yaml" } } if opts.Config != "" { log.Printf("Loading configuration file %s.\n", opts.Config) conf, err = config.LoadConfig(opts.Config) if err != nil { log.Fatal(err) } } conf.ElasticSearchIndex = getElasticSearchIndex() log.Info("Using ElasticSearch Index %s.", conf.ElasticSearchIndex) appContext := server.AppContext{ Config: conf, } elasticSearch := elasticsearch.New(getElasticSearchUrl()) elasticSearch.SetEventIndex(conf.ElasticSearchIndex) elasticSearch.InitKeyword() pingResponse, err := elasticSearch.Ping() if err != nil { log.Error("Failed to ping Elastic Search: %v", err) } else { log.Info("Connected to Elastic Search (version: %s)", pingResponse.Version.Number) } appContext.ElasticSearch = elasticSearch appContext.EventService = elasticsearch.NewEventService(elasticSearch) appContext.AlertQueryService = elasticsearch.NewAlertQueryService(elasticSearch) appContext.EventQueryService = elasticsearch.NewEventQueryService(elasticSearch) appContext.ReportService = elasticsearch.NewReportService(elasticSearch) appContext.Vars.DevWebAppServerUrl = opts.DevServerUri var datastoreType string = "elasticsearch" if opts.Sqlite { datastoreType = "sqlite" } if datastoreType == "elasticsearch" { appContext.DataStore, err = elasticsearch.NewDataStore(elasticSearch) if err != nil { log.Fatal(err) } } else if datastoreType == "sqlite" { appContext.DataStore, err = sqlite.NewDataStore() if err != nil { log.Fatal(err) } } httpServer := server.NewServer(appContext) err = httpServer.Start(opts.Host + ":" + opts.Port) if err != nil { log.Fatal(err) } }
func Main(args []string) { conf := configure(args) if conf.BatchSize < 1 { log.Fatal("Batch size must be greater than 0") } if conf.Url == "" { log.Error("error: --elasticsearch is a required parameter") usage() os.Exit(1) } if conf.InputFilename == "" { log.Fatal("error: no input file provided") } if conf.Bookmark && conf.BookmarkPath == "" { conf.BookmarkPath = fmt.Sprintf("%s.bookmark", conf.InputFilename) log.Info("Using bookmark file %s", conf.BookmarkPath) } es := elasticsearch.New(conf.Url) es.DisableCertCheck(conf.DisableCertificateCheck) if conf.Username != "" || conf.Password != "" { if err := es.SetUsernamePassword(conf.Username, conf.Password); err != nil { log.Fatal("Failed to set username and password: %v", err) } } response, err := es.Ping() if err != nil { log.Fatal("error: failed to ping Elastic Search:", err) } log.Info("Connected to Elastic Search v%s (cluster:%s; name: %s)", response.Version.Number, response.ClusterName, response.Name) majorVersion := response.MajorVersion() // Check if the template exists. templateExists, err := es.CheckTemplate(conf.Index) if !templateExists { log.Info("Template %s does not exist, creating...", conf.Index) err = es.LoadTemplate(conf.Index, majorVersion) if err != nil { log.Fatal("Failed to create template:", err) } } else { log.Info("Template %s exists, will not create.", conf.Index) } var geoipFilter *eve.GeoipFilter tagsFilter := &eve.TagsFilter{} if !conf.DisableGeoIp { geoipdb, err := geoip.NewGeoIpDb(conf.GeoIpDatabase) if err != nil { log.Notice("Failed to load GeoIP database: %v", err) } else { log.Info("Using GeoIP database %s, %s", geoipdb.Type(), geoipdb.BuildDate()) geoipFilter = eve.NewGeoipFilter(geoipdb) } } indexer := elasticsearch.NewIndexer(es, conf.DisableCertificateCheck) indexer.IndexPrefix = conf.Index reader, err := evereader.New(conf.InputFilename) if err != nil { log.Fatal(err) } // Initialize bookmarking... var bookmarker *evereader.Bookmarker = nil if conf.Bookmark { bookmarker = &evereader.Bookmarker{ Filename: conf.BookmarkPath, Reader: reader, } err := bookmarker.Init(conf.End) if err != nil { log.Fatal(err) } } else if conf.End { log.Info("Jumping to end of file.") err := reader.SkipToEnd() if err != nil { log.Fatal(err) } } count := uint64(0) lastStatTs := time.Now() lastStatCount := uint64(0) startTime := time.Now() // Number of EOFs in last stat interval. eofs := uint64(0) go func() { err := indexer.Run() if err != nil { log.Fatal("Elastic Search indexer connection unexpectedly closed:", err) } else { log.Debug("Indexer exited without issue.") } }() for { eof := false event, err := reader.Next() if err != nil { if err == io.EOF { eof = true eofs++ } else if _, ok := err.(evereader.MalformedEventError); ok { log.Error("Failed to read event but will continue: %v", err) } else { log.Fatalf("Unrecoverable error reading event: %v", err) } } if event != nil { if geoipFilter != nil { geoipFilter.AddGeoIP(event) } tagsFilter.Filter(event) if conf.stdout { asJson, err := json.Marshal(event) if err != nil { log.Error("Failed to print event as json: %v", err) } else { fmt.Println(string(asJson)) } } indexer.IndexRawEvent(event) count++ } if eof || (count > 0 && count%conf.BatchSize == 0) { var bookmark *evereader.Bookmark = nil if conf.Bookmark { bookmark = bookmarker.GetBookmark() } response, err := indexer.FlushConnection() if err != nil { log.Fatal(err) } if response != nil { log.Debug("Indexed %d events {errors=%v}", len(response.Items), response.Errors) } if conf.Bookmark { bookmarker.WriteBookmark(bookmark) } } now := time.Now() if now.Sub(lastStatTs).Seconds() > 1 && now.Second() == 0 { // Calculate the lag in bytes, that is the number of bytes behind // the end of file we are. lag, err := GetLag(reader) if err != nil { log.Error("Failed to calculate lag: %v", err) } log.Info("Total: %d; Last minute: %d; Avg: %.2f/s, EOFs: %d; Lag (bytes): %d", count, count-lastStatCount, float64(count-lastStatCount)/(now.Sub(lastStatTs).Seconds()), eofs, lag) lastStatTs = now lastStatCount = count eofs = 0 } if eof { if conf.oneshot { break } else { time.Sleep(1 * time.Second) } } } totalTime := time.Since(startTime) if conf.oneshot { log.Info("Indexed %d events: time=%.2fs; avg=%d/s", count, totalTime.Seconds(), uint64(float64(count)/totalTime.Seconds())) } }
func Main(args []string) { var end bool var oneshot bool var useBookmark bool var bookmarkPath string var verbose bool flagset := flag.NewFlagSet("evereader", flag.ExitOnError) flagset.BoolVar(&end, "end", false, "Start at end of file") flagset.BoolVar(&oneshot, "oneshot", false, "One shot mode (exit on EOF)") flagset.BoolVar(&useBookmark, "bookmark", false, "Bookmark location") flagset.StringVar(&bookmarkPath, "bookmark-path", "", "Path to bookmark file") flagset.BoolVarP(&verbose, "verbose", "v", false, "Verbose output") flagset.Parse(args[1:]) if verbose { log.SetLevel(log.DEBUG) } if len(flagset.Args()) == 0 { log.Fatal("No input files provided.") } else if len(flagset.Args()) > 1 { log.Fatal("Only one input file allowed.") } inputFilename := flagset.Args()[0] // If useBookmark but no path, set a default. if useBookmark && bookmarkPath == "" { bookmarkPath = fmt.Sprintf("%s.bookmark", inputFilename) } encoder := json.NewEncoder(os.Stdout) reader, err := evereader.New(flagset.Args()[0]) if err != nil { log.Fatal(err) } // Initialize bookmark. var bookmarker *evereader.Bookmarker = nil if useBookmark { bookmarker = &evereader.Bookmarker{ Filename: bookmarkPath, Reader: reader, } err := bookmarker.Init(end) if err != nil { log.Fatal(err) } } else if end { log.Info("Jumping to end of file.") err := reader.SkipToEnd() if err != nil { log.Fatal(err) } } for { eof := false event, err := reader.Next() if err != nil { if err == io.EOF { if oneshot { break } eof = true } else { log.Fatal(err) } } if eof { time.Sleep(1 * time.Second) } else { encoder.Encode(event) if useBookmark { bookmark := bookmarker.GetBookmark() bookmarker.WriteBookmark(bookmark) } } } }
func Main(args []string) { var end bool var oneshot bool var useBookmark bool var bookmarkPath string var verbose bool var dbfile string flagset := flag.NewFlagSet("sqliteimport", flag.ExitOnError) flagset.StringVarP(&dbfile, "database", "D", "", "Database filename") flagset.BoolVar(&end, "end", false, "Start at end of file") flagset.BoolVar(&oneshot, "oneshot", false, "One shot mode (exit on EOF)") flagset.BoolVar(&useBookmark, "bookmark", false, "Bookmark location") flagset.StringVar(&bookmarkPath, "bookmark-path", "", "Path to bookmark file") flagset.BoolVarP(&verbose, "verbose", "v", false, "Verbose output") flagset.Parse(args) if verbose { log.SetLevel(log.DEBUG) } if dbfile == "" { log.Fatal("Database filename must be provided.") } if len(flagset.Args()) == 0 { log.Fatal("No input files provided.") } else if len(flagset.Args()) > 1 { log.Fatal("Only one input file allowed.") } inputFilename := flagset.Args()[0] // If useBookmark but no path, set a default. if useBookmark && bookmarkPath == "" { bookmarkPath = fmt.Sprintf("%s.bookmark", inputFilename) } reader, err := evereader.New(flagset.Args()[0]) if err != nil { log.Fatal(err) } // Initialize bookmark. var bookmarker *evereader.Bookmarker = nil if useBookmark { bookmarker = &evereader.Bookmarker{ Filename: bookmarkPath, Reader: reader, } err := bookmarker.Init(end) if err != nil { log.Fatal(err) } } else if end { log.Info("Jumping to end of file.") err := reader.SkipToEnd() if err != nil { log.Fatal(err) } } db, err := sqlite.NewSqliteService(dbfile) if err != nil { log.Fatal(err) } if err := db.Migrate(); err != nil { log.Fatal(err) } indexer, err := sqlite.NewSqliteIndexer(db) if err != nil { log.Fatal(err) } count := uint64(0) lastStatTs := time.Now() lastStatCount := uint64(0) // Number of EOFs in last stat interval. eofs := uint64(0) tagsFilter := eve.TagsFilter{} for { eof := false event, err := reader.Next() if err != nil { if err == io.EOF { eof = true eofs++ } else { log.Fatal(err) } } if event != nil { tagsFilter.Filter(event) indexer.IndexRawEve(event) count++ if useBookmark { bookmark := bookmarker.GetBookmark() bookmarker.WriteBookmark(bookmark) } } now := time.Now() if now.Sub(lastStatTs).Seconds() > 1 { log.Info("Total: %d; Last interval: %d; Avg: %.2f/s, EOFs: %d", count, count-lastStatCount, float64(count-lastStatCount)/(now.Sub(lastStatTs).Seconds()), eofs) lastStatTs = now lastStatCount = count eofs = 0 indexer.Flush() } if eof { if oneshot { break } else { indexer.Flush() time.Sleep(100 * time.Millisecond) } } } now := time.Now() log.Info("Total: %d; Last interval: %d; Avg: %.2f/s, EOFs: %d", count, count-lastStatCount, float64(count-lastStatCount)/(now.Sub(lastStatTs).Seconds()), eofs) indexer.Flush() }
func configure(args []string) Config { flagset = flag.NewFlagSet("import", flag.ExitOnError) flagset.Usage = usage configFilename := flagset.StringP("config", "c", "", "Configuration file") verbose := flagset.BoolP("verbose", "v", false, "Verbose output") elasticSearchUri := flagset.StringP("elasticsearch", "e", "", "Elastic Search URL") username := flagset.StringP("username", "u", "", "Username") password := flagset.StringP("password", "p", "", "Password") noCheckCertificate := flagset.BoolP("no-check-certificate", "k", false, "Disable certificate check") index := flagset.String("index", DEFAULT_INDEX, "Elastic Search index prefix") oneshot := flagset.Bool("oneshot", false, "One shot mode (exit on EOF)") stdout := flagset.Bool("stdout", false, "Print events to stdout") end := flagset.Bool("end", false, "Start at end of file") batchSize := flagset.Uint64("batch-size", 1000, "Batch import size") useBookmark := flagset.Bool("bookmark", false, "Bookmark location") bookmarkPath := flagset.String("bookmark-path", "", "Path to bookmark file") noGeoIp := flagset.Bool("no-geoip", false, "Disable GeoIP lookups") geoIpDatabase := flagset.String("geoip-database", "", "Path to GeoIP (v2) database file") flagset.Parse(args[1:]) if *verbose { log.Info("Setting log level to debug") log.SetLevel(log.DEBUG) } configWrapper := ConfigWrapper{ Config: Config{ Index: DEFAULT_INDEX, BatchSize: 1000, }, } if *configFilename != "" { log.Debug("Loading configuration file %s", *configFilename) err := config.LoadConfigTo(*configFilename, &configWrapper) if err != nil { log.Fatal(err) } } conf := configWrapper.Config flagset.Visit(func(flag *flag.Flag) { log.Debug("Found command line argument %s -> %s", flag.Name, flag.Value.String()) switch flag.Name { case "elasticsearch": conf.Url = *elasticSearchUri case "username": conf.Username = *username case "password": conf.Password = *password case "no-check-certificate": conf.DisableCertificateCheck = *noCheckCertificate case "index": conf.Index = *index case "oneshot": conf.oneshot = *oneshot case "stdout": conf.stdout = *stdout case "end": conf.End = *end case "batch-size": conf.BatchSize = *batchSize case "bookmark": conf.Bookmark = *useBookmark case "bookmark-path": conf.BookmarkPath = *bookmarkPath case "no-geoip": conf.DisableGeoIp = *noGeoIp case "geoip-database": conf.GeoIpDatabase = *geoIpDatabase case "verbose": conf.Verbose = *verbose case "config": default: log.Notice("Unhandle configuration flag %s", flag.Name) } }) if len(flagset.Args()) == 1 { conf.InputFilename = flagset.Args()[0] } else if len(flagset.Args()) > 1 { log.Fatal("Multiple input filenames not allowed") } return conf }
func (h ApiWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { response := h.handler.ServeHTTP(h.appContext, r) if response != nil { switch response := response.(type) { case error: log.Error("%+v", response) var message string switch cause := errors.Cause(response).(type) { case *elasticsearch.DatastoreError: message = cause.Message } if message == "" { message = response.Error() } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusBadRequest) encoder := json.NewEncoder(w) encoder.Encode(HttpStatusResponseBody{ StatusCode: http.StatusBadRequest, Message: message, }) case HttpResponse: statusCode := http.StatusOK contentType := "application/json" // Set status code if provided. if response.statusCode != 0 { statusCode = response.statusCode } // Set content type if provided. if response.contentType != "" { contentType = response.contentType } // Merge in provided headers. if response.headers != nil { for key, val := range response.headers { log.Info("Setting %s -> %s", key, val) w.Header().Set(key, val) } } w.Header().Set("Content-Type", contentType) w.WriteHeader(statusCode) if response.body != nil { switch body := response.body.(type) { case []byte: w.Write(body) default: encoder := json.NewEncoder(w) encoder.Encode(response.body) } } default: w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) encoder := json.NewEncoder(w) encoder.Encode(response) } } }
// bulkUpdateTags will add and/or remvoe tags from a set of documents using // the Elastic Search bulk API. func bulkUpdateTags(es *ElasticSearch, documents []map[string]interface{}, addTags []string, rmTags []string) (bool, error) { bulk := make([]string, 0) for _, item := range documents { doc := JsonMap(item) currentTags := doc.GetMap("_source").GetAsStrings("tags") tags := make([]string, 0) for _, tag := range currentTags { if rmTags == nil || !StringSliceContains(rmTags, tag) { tags = append(tags, tag) } } for _, tag := range addTags { if !StringSliceContains(tags, tag) { tags = append(tags, tag) } } id := doc.Get("_id").(string) docType := doc.Get("_type").(string) index := doc.Get("_index").(string) command := m{ "update": m{ "_id": id, "_type": docType, "_index": index, }, } bulk = append(bulk, util.ToJson(command)) partial := m{ "doc": m{ "tags": tags, }, } bulk = append(bulk, util.ToJson(partial)) } // Needs to finish with a new line. bulk = append(bulk, "") bulkString := strings.Join(bulk, "\n") response, err := es.HttpClient.PostString("_bulk", "application/json", bulkString) if err != nil { log.Error("Failed to update event tags: %v", err) return false, err } retry := false if response.StatusCode != http.StatusOK { return retry, NewElasticSearchError(response) } else { bulkResponse := BulkResponse{} if err := es.Decode(response, &bulkResponse); err != nil { log.Error("Failed to decode bulk response: %v", err) } else { log.Info("Tags updated on %d events; errors=%v", len(bulkResponse.Items), bulkResponse.Errors) if bulkResponse.Errors { retry = true for _, item := range bulkResponse.Items { logBulkUpdateError(item) } } } } return retry, nil }