// Find performs a path find with the provided query. func (f *Finder) Find(q paths.Query) (result []paths.Path, err error) { log.WithField("source_assets", q.SourceAssets). WithField("destination_asset", q.DestinationAsset). WithField("destination_amount", q.DestinationAmount). Info("Starting pathfind") if len(q.SourceAssets) == 0 { err = errors.New("No source assets") return } s := &search{ Query: q, Finder: f, } s.Init() s.Run() result, err = s.Results, s.Err log.WithField("found", len(s.Results)). WithField("err", s.Err). Info("Finished pathfind") return }
// Ticker triggers the system to update itself with any new data available. func (sys *System) Tick(ctx context.Context) { sys.Init(ctx) log.Debugln(ctx, "ticking txsub system") for _, hash := range sys.Pending.Pending(ctx) { r := sys.Results.ResultByHash(ctx, hash) if r.Err == nil { log.WithField(ctx, "hash", hash).Debug("finishing open submission") sys.Pending.Finish(ctx, r) continue } _, ok := r.Err.(*FailedTransactionError) if ok { log.WithField(ctx, "hash", hash).Debug("finishing open submission") sys.Pending.Finish(ctx, r) continue } if r.Err != ErrNoResults { log.WithStack(ctx, r.Err).Error(r.Err) } } stillOpen, err := sys.Pending.Clean(ctx, sys.SubmissionTimeout) if err != nil { log.WithStack(ctx, err).Error(err) } sys.Metrics.OpenSubmissionsGauge.Update(int64(stillOpen)) }
// GetRaw runs the provided postgres query and args against this sqlquery's db. func (q SqlQuery) GetRaw(ctx context.Context, query string, args []interface{}, dest interface{}) error { db := sqlx.NewDb(q.DB, "postgres") log.WithField(ctx, "sql", query).Info("query sql") log.WithField(ctx, "args", args).Debug("query args") return db.Get(dest, query, args...) }
// SelectRaw runs the provided postgres query and args against this sqlquery's db. func (q SqlQuery) SelectRaw(ctx context.Context, query string, args []interface{}, dest interface{}) error { db := sqlx.NewDb(q.DB, "postgres") log.WithField(ctx, "sql", query).Info("query sql") log.WithField(ctx, "args", args).Debug("query args") err := db.Select(dest, query, args...) if err != nil { err = errors.Wrap(err, 1) } return err }
// initSentry initialized the default sentry client with the configured DSN func initSentry(app *App) { if app.config.SentryDSN == "" { return } log.WithField("dsn", app.config.SentryDSN).Info("Initializing sentry") err := raven.SetDSN(app.config.SentryDSN) if err != nil { panic(err) } }
// Run initializes the provided application, but running every Initializer func (is *initializerSet) Run(app *App) { init := *is alreadyRun := make(map[string]bool) for { ranInitializer := false for _, i := range init { runnable := true // if we've already been run, skip if _, ok := alreadyRun[i.Name]; ok { runnable = false } // if any of our dependencies haven't been run, skip for _, d := range i.Deps { if _, ok := alreadyRun[d]; !ok { runnable = false break } } if !runnable { continue } log.WithField("init_name", i.Name).Debug("running initializer") i.Fn(app) alreadyRun[i.Name] = true ranInitializer = true } // If, after a full loop through the initializers we ran nothing // we are done if !ranInitializer { break } } // if we didn't get to run all initializers, we have a cycle if len(alreadyRun) != len(init) { log.Panic("initializer cycle detected") } }
// run is the workhorse of the stream pump system. It facilitates the triggering // of open streams by closing a new channel every time the input pump sends. func run() { for { select { case at, more := <-pump: log.WithField(ctx, "time", at).Debug("sse pump") prev := nextTick nextTick = make(chan struct{}) // trigger all listeners by closing the nextTick channel close(prev) if !more { return } case <-ctx.Done(): pump = nil return } } }
// LoggerMiddleware is the middleware that logs http requests and resposnes // to the logging subsytem of horizon. func LoggerMiddleware(c *web.C, h http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { ctx := gctx.FromC(*c) mw := mutil.WrapWriter(w) logger := log.WithField("req", middleware.GetReqID(*c)) ctx = log.Set(ctx, logger) gctx.Set(c, ctx) logStartOfRequest(ctx, r) then := time.Now() h.ServeHTTP(mw, r) duration := time.Now().Sub(then) logEndOfRequest(ctx, duration, mw) } return http.HandlerFunc(fn) }
// ReingestOutdated finds old ledgers and reimports them. func (i *System) ReingestOutdated() (n int, err error) { q := history.Q{Repo: i.HorizonDB} // NOTE: this loop will never terminate if some bug were cause a ledger // reingestion to silently fail. for { outdated := []int32{} err = q.OldestOutdatedLedgers(&outdated, CurrentVersion) if err != nil { return } if len(outdated) == 0 { return } log. WithField("lowest_sequence", outdated[0]). WithField("batch_size", len(outdated)). Info("reingest: outdated") var start, end int32 flush := func() error { ingested, ferr := i.ReingestRange(start, end) if ferr != nil { return ferr } n += ingested return nil } for idx := range outdated { seq := outdated[idx] if start == 0 { start = seq end = seq continue } if seq == end+1 { end = seq continue } err = flush() if err != nil { return } start = seq end = seq } err = flush() if err != nil { return } } }
log.Fatal(err) } passphrase := viper.GetString("network-passphrase") if passphrase == "" { log.Fatal("network-passphrase is blank: reingestion requires manually setting passphrase") } i := ingest.New(passphrase, cdb, hdb) logStatus := func(stage string) { count := i.Metrics.IngestLedgerTimer.Count() rate := i.Metrics.IngestLedgerTimer.RateMean() loadMean := time.Duration(i.Metrics.LoadLedgerTimer.Mean()) ingestMean := time.Duration(i.Metrics.IngestLedgerTimer.Mean()) clearMean := time.Duration(i.Metrics.IngestLedgerTimer.Mean()) hlog. WithField("count", count). WithField("rate", rate). WithField("means", fmt.Sprintf("load: %s clear: %s ingest: %s", loadMean, clearMean, ingestMean)). Infof("reingest: %s", stage) } done := make(chan error, 1) // run ingestion in separate goroutine go func() { _, err := reingest(i, args) done <- err logStatus("complete") }() // output metrics