func logBulkUpdateError(item map[string]interface{}) { update, ok := item["update"].(map[string]interface{}) if !ok || update == nil { return } error := update["error"] if error == nil { return } log.Notice("Elastic Search bulk update error: %s", util.ToJson(error)) }
func StaticHandlerFactory(appContext AppContext) http.Handler { if appContext.Vars.DevWebAppServerUrl != "" { log.Notice("Proxying static files to %v.", appContext.Vars.DevWebAppServerUrl) devServerProxyUrl, err := url.Parse(appContext.Vars.DevWebAppServerUrl) if err != nil { log.Fatal(err) } return httputil.NewSingleHostReverseProxy(devServerProxyUrl) } return resources.FileServer{} }
func configure(args []string) Config { flagset = flag.NewFlagSet("import", flag.ExitOnError) flagset.Usage = usage configFilename := flagset.StringP("config", "c", "", "Configuration file") verbose := flagset.BoolP("verbose", "v", false, "Verbose output") elasticSearchUri := flagset.StringP("elasticsearch", "e", "", "Elastic Search URL") username := flagset.StringP("username", "u", "", "Username") password := flagset.StringP("password", "p", "", "Password") noCheckCertificate := flagset.BoolP("no-check-certificate", "k", false, "Disable certificate check") index := flagset.String("index", DEFAULT_INDEX, "Elastic Search index prefix") oneshot := flagset.Bool("oneshot", false, "One shot mode (exit on EOF)") stdout := flagset.Bool("stdout", false, "Print events to stdout") end := flagset.Bool("end", false, "Start at end of file") batchSize := flagset.Uint64("batch-size", 1000, "Batch import size") useBookmark := flagset.Bool("bookmark", false, "Bookmark location") bookmarkPath := flagset.String("bookmark-path", "", "Path to bookmark file") noGeoIp := flagset.Bool("no-geoip", false, "Disable GeoIP lookups") geoIpDatabase := flagset.String("geoip-database", "", "Path to GeoIP (v2) database file") flagset.Parse(args[1:]) if *verbose { log.Info("Setting log level to debug") log.SetLevel(log.DEBUG) } configWrapper := ConfigWrapper{ Config: Config{ Index: DEFAULT_INDEX, BatchSize: 1000, }, } if *configFilename != "" { log.Debug("Loading configuration file %s", *configFilename) err := config.LoadConfigTo(*configFilename, &configWrapper) if err != nil { log.Fatal(err) } } conf := configWrapper.Config flagset.Visit(func(flag *flag.Flag) { log.Debug("Found command line argument %s -> %s", flag.Name, flag.Value.String()) switch flag.Name { case "elasticsearch": conf.Url = *elasticSearchUri case "username": conf.Username = *username case "password": conf.Password = *password case "no-check-certificate": conf.DisableCertificateCheck = *noCheckCertificate case "index": conf.Index = *index case "oneshot": conf.oneshot = *oneshot case "stdout": conf.stdout = *stdout case "end": conf.End = *end case "batch-size": conf.BatchSize = *batchSize case "bookmark": conf.Bookmark = *useBookmark case "bookmark-path": conf.BookmarkPath = *bookmarkPath case "no-geoip": conf.DisableGeoIp = *noGeoIp case "geoip-database": conf.GeoIpDatabase = *geoIpDatabase case "verbose": conf.Verbose = *verbose case "config": default: log.Notice("Unhandle configuration flag %s", flag.Name) } }) if len(flagset.Args()) == 1 { conf.InputFilename = flagset.Args()[0] } else if len(flagset.Args()) > 1 { log.Fatal("Multiple input filenames not allowed") } return conf }
func Main(args []string) { conf := configure(args) if conf.BatchSize < 1 { log.Fatal("Batch size must be greater than 0") } if conf.Url == "" { log.Error("error: --elasticsearch is a required parameter") usage() os.Exit(1) } if conf.InputFilename == "" { log.Fatal("error: no input file provided") } if conf.Bookmark && conf.BookmarkPath == "" { conf.BookmarkPath = fmt.Sprintf("%s.bookmark", conf.InputFilename) log.Info("Using bookmark file %s", conf.BookmarkPath) } es := elasticsearch.New(conf.Url) es.DisableCertCheck(conf.DisableCertificateCheck) if conf.Username != "" || conf.Password != "" { if err := es.SetUsernamePassword(conf.Username, conf.Password); err != nil { log.Fatal("Failed to set username and password: %v", err) } } response, err := es.Ping() if err != nil { log.Fatal("error: failed to ping Elastic Search:", err) } log.Info("Connected to Elastic Search v%s (cluster:%s; name: %s)", response.Version.Number, response.ClusterName, response.Name) majorVersion := response.MajorVersion() // Check if the template exists. templateExists, err := es.CheckTemplate(conf.Index) if !templateExists { log.Info("Template %s does not exist, creating...", conf.Index) err = es.LoadTemplate(conf.Index, majorVersion) if err != nil { log.Fatal("Failed to create template:", err) } } else { log.Info("Template %s exists, will not create.", conf.Index) } var geoipFilter *eve.GeoipFilter tagsFilter := &eve.TagsFilter{} if !conf.DisableGeoIp { geoipdb, err := geoip.NewGeoIpDb(conf.GeoIpDatabase) if err != nil { log.Notice("Failed to load GeoIP database: %v", err) } else { log.Info("Using GeoIP database %s, %s", geoipdb.Type(), geoipdb.BuildDate()) geoipFilter = eve.NewGeoipFilter(geoipdb) } } indexer := elasticsearch.NewIndexer(es, conf.DisableCertificateCheck) indexer.IndexPrefix = conf.Index reader, err := evereader.New(conf.InputFilename) if err != nil { log.Fatal(err) } // Initialize bookmarking... var bookmarker *evereader.Bookmarker = nil if conf.Bookmark { bookmarker = &evereader.Bookmarker{ Filename: conf.BookmarkPath, Reader: reader, } err := bookmarker.Init(conf.End) if err != nil { log.Fatal(err) } } else if conf.End { log.Info("Jumping to end of file.") err := reader.SkipToEnd() if err != nil { log.Fatal(err) } } count := uint64(0) lastStatTs := time.Now() lastStatCount := uint64(0) startTime := time.Now() // Number of EOFs in last stat interval. eofs := uint64(0) go func() { err := indexer.Run() if err != nil { log.Fatal("Elastic Search indexer connection unexpectedly closed:", err) } else { log.Debug("Indexer exited without issue.") } }() for { eof := false event, err := reader.Next() if err != nil { if err == io.EOF { eof = true eofs++ } else if _, ok := err.(evereader.MalformedEventError); ok { log.Error("Failed to read event but will continue: %v", err) } else { log.Fatalf("Unrecoverable error reading event: %v", err) } } if event != nil { if geoipFilter != nil { geoipFilter.AddGeoIP(event) } tagsFilter.Filter(event) if conf.stdout { asJson, err := json.Marshal(event) if err != nil { log.Error("Failed to print event as json: %v", err) } else { fmt.Println(string(asJson)) } } indexer.IndexRawEvent(event) count++ } if eof || (count > 0 && count%conf.BatchSize == 0) { var bookmark *evereader.Bookmark = nil if conf.Bookmark { bookmark = bookmarker.GetBookmark() } response, err := indexer.FlushConnection() if err != nil { log.Fatal(err) } if response != nil { log.Debug("Indexed %d events {errors=%v}", len(response.Items), response.Errors) } if conf.Bookmark { bookmarker.WriteBookmark(bookmark) } } now := time.Now() if now.Sub(lastStatTs).Seconds() > 1 && now.Second() == 0 { // Calculate the lag in bytes, that is the number of bytes behind // the end of file we are. lag, err := GetLag(reader) if err != nil { log.Error("Failed to calculate lag: %v", err) } log.Info("Total: %d; Last minute: %d; Avg: %.2f/s, EOFs: %d; Lag (bytes): %d", count, count-lastStatCount, float64(count-lastStatCount)/(now.Sub(lastStatTs).Seconds()), eofs, lag) lastStatTs = now lastStatCount = count eofs = 0 } if eof { if conf.oneshot { break } else { time.Sleep(1 * time.Second) } } } totalTime := time.Since(startTime) if conf.oneshot { log.Info("Indexed %d events: time=%.2fs; avg=%d/s", count, totalTime.Seconds(), uint64(float64(count)/totalTime.Seconds())) } }