func Start(directory string) (*exec.Cmd, error) { // Get the absolute path if the data directory. path, err := filepath.Abs(directory) if err != nil { return nil, err } log.Info("Using postgres data directory %s", path) command := exec.Command("postgres", "-D", path, "-c", "log_destination=stderr", "-c", "logging_collector=off", "-k", path) stdout, err := command.StdoutPipe() if err != nil { log.Error("Failed to open postgres stdout, will not be logged.") stdout = nil } stderr, err := command.StderrPipe() if err != nil { log.Error("Failed to open postgres stderr, will not be logged.") stderr = nil } err = command.Start() if err != nil { log.Error("Failed to start postgres: %v", err) return nil, err } if stdout != nil { go func() { if err := ReadPipe(stdout, true, "postgres stdout"); err != nil { log.Error("Failed to read postgres stdout: %v", err) } }() } if stderr != nil { go func() { if err := ReadPipe(stderr, true, "postgres stderr"); err != nil { log.Error("Failed to read postgres stderr: %v", err) } }() } return command, nil }
// Refresh refreshes all indices logging any error but not returning and // discarding the response so the caller doesn't have to. func (es *ElasticSearch) Refresh() { response, err := es.HttpClient.PostString("_refresh", "application/json", "{}") if err != nil { log.Error("Failed to refresh Elastic Search: %v", err) return } io.Copy(ioutil.Discard, response.Body) }
func (i *Indexer) AddEvent(event eve.RawEveEvent) error { uuid := uuid.NewV1() timestamp, err := event.GetTimestamp() if err != nil { log.Error("Failed to get timestamp from event: %v", err) } encoded, err := json.Marshal(&event) if err != nil { log.Error("Failed to encode event.") } _, err = i.stmt.Exec(uuid, timestamp, string(encoded)) if err != nil { log.Fatal(err) } return nil }
func Init(directory string) error { command := exec.Command("initdb", "-D", directory, "--encoding=UTF8") stdout, err := command.StdoutPipe() if err != nil { log.Error("Failed to open initdb stdout, will not be logged.") stdout = nil } stderr, err := command.StderrPipe() if err != nil { log.Error("Failed to open initdb stderr, will not be logged.") stderr = nil } err = command.Start() if err != nil { log.Error("Failed to start initdb: %v", err) return err } if stdout != nil { go func() { if err := ReadPipe(stdout, true, "initdb stdout"); err != nil { log.Error("Failed to read from stdout: %v", err) } }() } if stderr != nil { go func() { if err := ReadPipe(stderr, true, "initdb stderr"); err != nil { log.Error("Failed to read from stderr: %v", err) } }() } return command.Wait() }
func GetEventByIdHandler(appContext AppContext, r *http.Request) interface{} { eventId := mux.Vars(r)["id"] event, err := appContext.EventService.GetEventById(eventId) if err != nil { log.Error("%v", err) return err } if event == nil { return HttpNotFoundResponse(fmt.Sprintf("No event with ID %s", eventId)) } return event }
func ArchiveEventHandler(appContext AppContext, r *http.Request) interface{} { eventId := mux.Vars(r)["id"] err := appContext.EventService.AddTagsToEvent(eventId, []string{"archived", "evebox.archived"}) if err != nil { log.Error("%v", err) return err } return HttpOkResponse() }
func DeEscalateEventHandler(appContext AppContext, r *http.Request) interface{} { eventId := mux.Vars(r)["id"] err := appContext.EventService.RemoveTagsFromEvent(eventId, []string{"escalated", "evebox.escalated"}) if err != nil { log.Error("%v", err) return err } return HttpOkResponse() }
func ArchiveHandler(appContext AppContext, r *http.Request) interface{} { var request AlertGroupQueryParameters if err := DecodeRequestBody(r, &request); err != nil { return err } err := appContext.DataStore.ArchiveAlertGroup(request.ToCoreAlertGroupQueryParams()) if err != nil { log.Error("%v", err) return err } return HttpOkResponse() }
func AlertGroupRemoveTags(appContext AppContext, r *http.Request) interface{} { var request AlertGroupRemoveTagsRequest if err := DecodeRequestBody(r, &request); err != nil { return err } err := appContext.EventService.RemoveTagsFromAlertGroup( request.AlertGroup.ToCoreAlertGroupQueryParams(), request.Tags) if err != nil { log.Error("%v", err) return err } return HttpOkResponse() }
func (b *Bookmarker) Init(end bool) error { bookmark, err := b.ReadBookmark() if err == nil && b.BookmarkIsValid(bookmark) { err = b.Reader.SkipTo(bookmark.Offset) if err != nil { log.Error("Failed to skip to line %d, will skip to end of file: %s", err) b.Reader.SkipToEnd() } } else { log.Info("Failed to read bookmark: %s", err) if end { log.Info("Will start reading at end of file.") b.Reader.SkipToEnd() } else { log.Info("Will start reading at beginning of file.") } } // Test write. bookmark = b.GetBookmark() return b.WriteBookmark(bookmark) }
func (s *EventQueryService) EventQuery(options core.EventQueryOptions) (interface{}, error) { query := NewEventQuery() query.MustNot(TermQuery("event_type", "stats")) query.SortBy("@timestamp", "desc") if options.Size > 0 { query.Size = options.Size } else { query.Size = 500 } if options.QueryString != "" { query.AddFilter(QueryString(options.QueryString)) } if options.MinTs != "" { query.AddFilter(RangeGte("timestamp", options.MinTs)) } if options.MaxTs != "" { query.AddFilter(RangeLte("timestamp", options.MaxTs)) } if options.EventType != "" { query.AddFilter(TermQuery("event_type", options.EventType)) } response, err := s.es.Search(query) if err != nil { log.Error("%v", err) } hits := response.Hits.Hits return map[string]interface{}{ "data": hits, }, nil }
func (b *Bookmarker) BookmarkIsValid(bookmark *Bookmark) bool { if bookmark.Path != b.Reader.path { return false } fileInfo, err := b.Reader.GetFileInfo() if err == nil { // If the current file size is less than the bookmark file // size it was likely truncated, invalidate. if fileInfo.Size() < bookmark.Size { return false } if !SameSys(bookmark.Sys, GetSys(fileInfo)) { log.Error("Inodes don't match") } } return true }
func (s FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { var path string if r.URL.String() == "/" { path = "index.html" } else { path = strings.TrimPrefix(r.URL.String(), "/") } // Remove any query string parameters... parts := strings.SplitN(path, "?", 2) path = parts[0] log.Info("Static file request for %s.", path) asset := fmt.Sprintf("public/%s", path) bytes, err := Asset(asset) if err != nil { log.Error("Public file not found: %s", path) w.WriteHeader(http.StatusNotFound) } else { w.Write(bytes) } }
func Main(args []string) { conf := configure(args) if conf.BatchSize < 1 { log.Fatal("Batch size must be greater than 0") } if conf.Url == "" { log.Error("error: --elasticsearch is a required parameter") usage() os.Exit(1) } if conf.InputFilename == "" { log.Fatal("error: no input file provided") } if conf.Bookmark && conf.BookmarkPath == "" { conf.BookmarkPath = fmt.Sprintf("%s.bookmark", conf.InputFilename) log.Info("Using bookmark file %s", conf.BookmarkPath) } es := elasticsearch.New(conf.Url) es.DisableCertCheck(conf.DisableCertificateCheck) if conf.Username != "" || conf.Password != "" { if err := es.SetUsernamePassword(conf.Username, conf.Password); err != nil { log.Fatal("Failed to set username and password: %v", err) } } response, err := es.Ping() if err != nil { log.Fatal("error: failed to ping Elastic Search:", err) } log.Info("Connected to Elastic Search v%s (cluster:%s; name: %s)", response.Version.Number, response.ClusterName, response.Name) majorVersion := response.MajorVersion() // Check if the template exists. templateExists, err := es.CheckTemplate(conf.Index) if !templateExists { log.Info("Template %s does not exist, creating...", conf.Index) err = es.LoadTemplate(conf.Index, majorVersion) if err != nil { log.Fatal("Failed to create template:", err) } } else { log.Info("Template %s exists, will not create.", conf.Index) } var geoipFilter *eve.GeoipFilter tagsFilter := &eve.TagsFilter{} if !conf.DisableGeoIp { geoipdb, err := geoip.NewGeoIpDb(conf.GeoIpDatabase) if err != nil { log.Notice("Failed to load GeoIP database: %v", err) } else { log.Info("Using GeoIP database %s, %s", geoipdb.Type(), geoipdb.BuildDate()) geoipFilter = eve.NewGeoipFilter(geoipdb) } } indexer := elasticsearch.NewIndexer(es, conf.DisableCertificateCheck) indexer.IndexPrefix = conf.Index reader, err := evereader.New(conf.InputFilename) if err != nil { log.Fatal(err) } // Initialize bookmarking... var bookmarker *evereader.Bookmarker = nil if conf.Bookmark { bookmarker = &evereader.Bookmarker{ Filename: conf.BookmarkPath, Reader: reader, } err := bookmarker.Init(conf.End) if err != nil { log.Fatal(err) } } else if conf.End { log.Info("Jumping to end of file.") err := reader.SkipToEnd() if err != nil { log.Fatal(err) } } count := uint64(0) lastStatTs := time.Now() lastStatCount := uint64(0) startTime := time.Now() // Number of EOFs in last stat interval. eofs := uint64(0) go func() { err := indexer.Run() if err != nil { log.Fatal("Elastic Search indexer connection unexpectedly closed:", err) } else { log.Debug("Indexer exited without issue.") } }() for { eof := false event, err := reader.Next() if err != nil { if err == io.EOF { eof = true eofs++ } else if _, ok := err.(evereader.MalformedEventError); ok { log.Error("Failed to read event but will continue: %v", err) } else { log.Fatalf("Unrecoverable error reading event: %v", err) } } if event != nil { if geoipFilter != nil { geoipFilter.AddGeoIP(event) } tagsFilter.Filter(event) if conf.stdout { asJson, err := json.Marshal(event) if err != nil { log.Error("Failed to print event as json: %v", err) } else { fmt.Println(string(asJson)) } } indexer.IndexRawEvent(event) count++ } if eof || (count > 0 && count%conf.BatchSize == 0) { var bookmark *evereader.Bookmark = nil if conf.Bookmark { bookmark = bookmarker.GetBookmark() } response, err := indexer.FlushConnection() if err != nil { log.Fatal(err) } if response != nil { log.Debug("Indexed %d events {errors=%v}", len(response.Items), response.Errors) } if conf.Bookmark { bookmarker.WriteBookmark(bookmark) } } now := time.Now() if now.Sub(lastStatTs).Seconds() > 1 && now.Second() == 0 { // Calculate the lag in bytes, that is the number of bytes behind // the end of file we are. lag, err := GetLag(reader) if err != nil { log.Error("Failed to calculate lag: %v", err) } log.Info("Total: %d; Last minute: %d; Avg: %.2f/s, EOFs: %d; Lag (bytes): %d", count, count-lastStatCount, float64(count-lastStatCount)/(now.Sub(lastStatTs).Seconds()), eofs, lag) lastStatTs = now lastStatCount = count eofs = 0 } if eof { if conf.oneshot { break } else { time.Sleep(1 * time.Second) } } } totalTime := time.Since(startTime) if conf.oneshot { log.Info("Indexed %d events: time=%.2fs; avg=%d/s", count, totalTime.Seconds(), uint64(float64(count)/totalTime.Seconds())) } }
// RemoveTagsFromAlertGroup removes the given tags from all alerts matching // the provided parameters. func (s *EventService) RemoveTagsFromAlertGroup(p core.AlertGroupQueryParams, tags []string) error { filter := []interface{}{ ExistsQuery("event_type"), KeywordTermQuery("event_type", "alert", s.es.keyword), RangeQuery{ Field: "timestamp", Gte: p.MinTimestamp, Lte: p.MaxTimestamp, }, KeywordTermQuery("src_ip", p.SrcIP, s.es.keyword), KeywordTermQuery("dest_ip", p.DstIP, s.es.keyword), TermQuery("alert.signature_id", p.SignatureID), } for _, tag := range tags { filter = append(filter, TermQuery("tags", tag)) } query := m{ "query": m{ "bool": m{ "filter": filter, }, }, "_source": "tags", "sort": l{ "_doc", }, "size": 10000, } log.Println(util.ToJson(query)) searchResponse, err := s.es.SearchScroll(query, "1m") if err != nil { log.Error("Failed to initialize scroll: %v", err) return err } scrollID := searchResponse.ScrollId for { log.Debug("Search response total: %d; hits: %d", searchResponse.Hits.Total, len(searchResponse.Hits.Hits)) if len(searchResponse.Hits.Hits) == 0 { break } // We do this in a retry loop as some documents may fail to be // updated. Most likely rejected due to max thread count or // something. maxRetries := 5 retries := 0 for { retry, err := bulkUpdateTags(s.es, searchResponse.Hits.Hits, nil, tags) if err != nil { log.Error("BulkAddTags failed: %v", err) return err } if !retry { break } retries++ if retries > maxRetries { log.Warning("Errors occurred archive events, not all events may have been archived.") break } } // Get next set of events to archive. searchResponse, err = s.es.Scroll(scrollID, "1m") if err != nil { log.Error("Failed to fetch from scroll: %v", err) return err } } response, err := s.es.DeleteScroll(scrollID) if err != nil { log.Error("Failed to delete scroll id: %v", err) } io.Copy(ioutil.Discard, response.Body) s.es.Refresh() return nil }
func Main(args []string) { var err error log.Info("This is EveBox Server version %v (rev: %v)", core.BuildVersion, core.BuildRev) flagset := flag.NewFlagSet("server", flag.ExitOnError) flagset.StringVarP(&opts.ElasticSearchUri, "elasticsearch", "e", "", "Elastic Search URI (default: http://localhost:9200") flagset.StringVarP(&opts.ElasticSearchIndex, "index", "i", "", "Elastic Search Index (default: logstash)") flagset.StringVarP(&opts.Port, "port", "p", "5636", "Port to bind to") flagset.StringVarP(&opts.Host, "host", "", "0.0.0.0", "Host to bind to") flagset.StringVarP(&opts.DevServerUri, "dev", "", "", "Frontend development server URI") flagset.BoolVarP(&opts.Version, "version", "", false, "Show version") flagset.StringVarP(&opts.Config, "config", "c", "", "Configuration filename") flagset.BoolVarP(&opts.NoCheckCertificate, "no-check-certificate", "k", false, "Disable certificate check for Elastic Search") flagset.BoolVarP(&opts.Sqlite, "sqlite", "", false, "Use SQLite for the event store") flagset.Parse(args[0:]) if opts.Version { VersionMain() return } log.SetLevel(log.DEBUG) // If no configuration was provided, see if evebox.yaml exists // in the current directory. if opts.Config == "" { _, err = os.Stat("./evebox.yaml") if err == nil { opts.Config = "./evebox.yaml" } } if opts.Config != "" { log.Printf("Loading configuration file %s.\n", opts.Config) conf, err = config.LoadConfig(opts.Config) if err != nil { log.Fatal(err) } } conf.ElasticSearchIndex = getElasticSearchIndex() log.Info("Using ElasticSearch Index %s.", conf.ElasticSearchIndex) appContext := server.AppContext{ Config: conf, } elasticSearch := elasticsearch.New(getElasticSearchUrl()) elasticSearch.SetEventIndex(conf.ElasticSearchIndex) elasticSearch.InitKeyword() pingResponse, err := elasticSearch.Ping() if err != nil { log.Error("Failed to ping Elastic Search: %v", err) } else { log.Info("Connected to Elastic Search (version: %s)", pingResponse.Version.Number) } appContext.ElasticSearch = elasticSearch appContext.EventService = elasticsearch.NewEventService(elasticSearch) appContext.AlertQueryService = elasticsearch.NewAlertQueryService(elasticSearch) appContext.EventQueryService = elasticsearch.NewEventQueryService(elasticSearch) appContext.ReportService = elasticsearch.NewReportService(elasticSearch) appContext.Vars.DevWebAppServerUrl = opts.DevServerUri var datastoreType string = "elasticsearch" if opts.Sqlite { datastoreType = "sqlite" } if datastoreType == "elasticsearch" { appContext.DataStore, err = elasticsearch.NewDataStore(elasticSearch) if err != nil { log.Fatal(err) } } else if datastoreType == "sqlite" { appContext.DataStore, err = sqlite.NewDataStore() if err != nil { log.Fatal(err) } } httpServer := server.NewServer(appContext) err = httpServer.Start(opts.Host + ":" + opts.Port) if err != nil { log.Fatal(err) } }
func Stop(command *exec.Cmd) { err := command.Process.Signal(syscall.SIGTERM) if err != nil { log.Error("Failed to stop postgres: %v", err) } }
// bulkUpdateTags will add and/or remvoe tags from a set of documents using // the Elastic Search bulk API. func bulkUpdateTags(es *ElasticSearch, documents []map[string]interface{}, addTags []string, rmTags []string) (bool, error) { bulk := make([]string, 0) for _, item := range documents { doc := JsonMap(item) currentTags := doc.GetMap("_source").GetAsStrings("tags") tags := make([]string, 0) for _, tag := range currentTags { if rmTags == nil || !StringSliceContains(rmTags, tag) { tags = append(tags, tag) } } for _, tag := range addTags { if !StringSliceContains(tags, tag) { tags = append(tags, tag) } } id := doc.Get("_id").(string) docType := doc.Get("_type").(string) index := doc.Get("_index").(string) command := m{ "update": m{ "_id": id, "_type": docType, "_index": index, }, } bulk = append(bulk, util.ToJson(command)) partial := m{ "doc": m{ "tags": tags, }, } bulk = append(bulk, util.ToJson(partial)) } // Needs to finish with a new line. bulk = append(bulk, "") bulkString := strings.Join(bulk, "\n") response, err := es.HttpClient.PostString("_bulk", "application/json", bulkString) if err != nil { log.Error("Failed to update event tags: %v", err) return false, err } retry := false if response.StatusCode != http.StatusOK { return retry, NewElasticSearchError(response) } else { bulkResponse := BulkResponse{} if err := es.Decode(response, &bulkResponse); err != nil { log.Error("Failed to decode bulk response: %v", err) } else { log.Info("Tags updated on %d events; errors=%v", len(bulkResponse.Items), bulkResponse.Errors) if bulkResponse.Errors { retry = true for _, item := range bulkResponse.Items { logBulkUpdateError(item) } } } } return retry, nil }
func (h ApiWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { response := h.handler.ServeHTTP(h.appContext, r) if response != nil { switch response := response.(type) { case error: log.Error("%+v", response) var message string switch cause := errors.Cause(response).(type) { case *elasticsearch.DatastoreError: message = cause.Message } if message == "" { message = response.Error() } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusBadRequest) encoder := json.NewEncoder(w) encoder.Encode(HttpStatusResponseBody{ StatusCode: http.StatusBadRequest, Message: message, }) case HttpResponse: statusCode := http.StatusOK contentType := "application/json" // Set status code if provided. if response.statusCode != 0 { statusCode = response.statusCode } // Set content type if provided. if response.contentType != "" { contentType = response.contentType } // Merge in provided headers. if response.headers != nil { for key, val := range response.headers { log.Info("Setting %s -> %s", key, val) w.Header().Set(key, val) } } w.Header().Set("Content-Type", contentType) w.WriteHeader(statusCode) if response.body != nil { switch body := response.body.(type) { case []byte: w.Write(body) default: encoder := json.NewEncoder(w) encoder.Encode(response.body) } } default: w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) encoder := json.NewEncoder(w) encoder.Encode(response) } } }