func concatDomains(engine storage.Engine, w origins.Writer, domains []string, since, asof time.Time) int { var ( err error n, count int log *view.Log ) // Output facts for each domain in the order they are supplied. for _, d := range domains { log, err = view.OpenLog(engine, d, "commit") if err != nil { logrus.Fatal(err) } v := log.View(since, asof) n, err = origins.Copy(v, w) if err != nil { logrus.Fatal(err) } count += n } return count }
func mergeDomains(engine storage.Engine, w origins.Writer, domains []string, since, asof time.Time) int { var ( err error count int log *view.Log ) iters := make([]origins.Iterator, len(domains)) // Merge and output facts across domains. for i, d := range domains { log, err = view.OpenLog(engine, d, "commit") if err != nil { logrus.Fatal(err) } iters[i] = log.View(since, asof) } if count, err = origins.Copy(view.Merge(iters...), w); err != nil { logrus.Fatal(err) } return count }
func transactFile(tx *transactor.Transaction, r io.Reader, format, compression string) { var ( err error ) // Apply decompression. if compression != "" { logrus.Debugf("transact: applying %s decompression", compression) if r, err = origins.Decompressor(r, compression); err != nil { logrus.Fatalf("transact: %s", err) } } // Wrap in a reader to handle carriage returns before passing // it into the format reader. r = origins.NewUniversalReader(r) var iter origins.Iterator switch format { case "csv": iter = origins.NewCSVReader(r) default: logrus.Fatal("transact: unsupported file format", format) } if _, err = origins.Copy(iter, tx); err != nil { logrus.Fatal("transact:", err) } }
// Initializes an in-memory store and generates n transactions each with m // randomly generated facts that belong to one of the specified domains. func randMultidomainStorage(domains []string, n, m int) storage.Engine { engine, _ := origins.Init("memory", nil) for i := 0; i < m; i++ { tx, _ := transactor.New(engine, transactor.Options{ AllowDuplicates: true, }) gen := testutil.NewMultidomainGenerator(domains, tx.ID, n) origins.Copy(gen, tx) tx.Commit() } return engine }
// Initializes an in-memory store and generates n transactions each with m // facts randomly generated from the same dictionary of possible E, A, V values. // Varying the size of the dictionary relative to the size of the store // allows to guarantee repeating facts. func randStorageWRepeats(domain string, n, m, eLen, aLen, vLen int) storage.Engine { engine, _ := origins.Init("memory", nil) dictionary := testutil.NewEAVDictionary(eLen, aLen, vLen) for i := 0; i < m; i++ { tx, _ := transactor.New(engine, transactor.Options{ AllowDuplicates: true, }) gen := testutil.NewDictionaryBasedGenerator(dictionary, domain, tx.ID, n) origins.Copy(gen, tx) tx.Commit() } return engine }
func setup() storage.Engine { engine, _ := origins.Init("memory", nil) data, _ := testutil.Asset("assets/origins.csv") iter := origins.NewCSVReader(bytes.NewBuffer(data)) tx, _ := transactor.New(engine, transactor.Options{}) // Write the facts. if _, err := origins.Copy(iter, tx); err != nil { panic(err) } tx.Commit() return engine }
f.Operation = operation if f.Time.IsZero() { f.Time = t } }, } // Start the command. Stderr is already mapped, so only the exit // code needs to be handled here. if err := pcmd.Start(); err != nil { os.Exit(1) } origins.Copy(iterator, writer) // Wait until the command exits. if err := pcmd.Wait(); err != nil { os.Exit(1) } }, } // modWriter modifies the fact before writing it. type modWriter struct { writer origins.Writer modifier func(*origins.Fact) } func (w *modWriter) Write(f *origins.Fact) error {