func main() { flag.Parse() if gs == nil { flagutil.UsageError("missing required --graphstore flag") } else if *tablePath == "" { flagutil.UsageError("missing required --out flag") } db, err := leveldb.Open(*tablePath, nil) if err != nil { log.Fatal(err) } defer db.Close() ctx := context.Background() if err := profile.Start(ctx); err != nil { log.Fatal(err) } defer profile.Stop() if err := pipeline.Run(ctx, gs, db, &pipeline.Options{ MaxEdgePageSize: *maxEdgePageSize, }); err != nil { log.Fatal("FATAL ERROR: ", err) } }
func main() { flag.Parse() if gs == nil && *entriesFile == "" { flagutil.UsageError("missing --graphstore or --entries") } else if gs != nil && *entriesFile != "" { flagutil.UsageError("--graphstore and --entries are mutually exclusive") } else if *tablePath == "" { flagutil.UsageError("missing required --out flag") } db, err := leveldb.Open(*tablePath, nil) if err != nil { log.Fatal(err) } defer db.Close() ctx := context.Background() if err := profile.Start(ctx); err != nil { log.Fatal(err) } defer profile.Stop() var rd stream.EntryReader if gs != nil { rd = func(f func(e *spb.Entry) error) error { defer gs.Close(ctx) return gs.Scan(ctx, &spb.ScanRequest{}, f) } } else { f, err := vfs.Open(ctx, *entriesFile) if err != nil { log.Fatalf("Error opening %q: %v", *entriesFile, err) } defer f.Close() rd = stream.NewReader(f) } if err := pipeline.Run(ctx, rd, db, &pipeline.Options{ Verbose: *verbose, MaxPageSize: *maxPageSize, CompressShards: *compressShards, MaxShardSize: *maxShardSize, IOBufferSize: int(shardIOBufferSize.Bytes()), }); err != nil { log.Fatal("FATAL ERROR: ", err) } }
func main() { log.SetPrefix("write_entries: ") flag.Parse() if *numWorkers < 1 { flagutil.UsageErrorf("Invalid number of --workers %d (must be ≥ 1)", *numWorkers) } else if *batchSize < 1 { flagutil.UsageErrorf("Invalid --batch_size %d (must be ≥ 1)", *batchSize) } else if gs == nil { flagutil.UsageError("Missing --graphstore") } ctx := context.Background() defer gsutil.LogClose(ctx, gs) gsutil.EnsureGracefulExit(gs) if err := profile.Start(ctx); err != nil { log.Fatal(err) } defer profile.Stop() writes := graphstore.BatchWrites(stream.ReadEntries(os.Stdin), *batchSize) var ( wg sync.WaitGroup numEntries uint64 ) wg.Add(*numWorkers) for i := 0; i < *numWorkers; i++ { go func() { defer wg.Done() num, err := writeEntries(ctx, gs, writes) if err != nil { log.Fatal(err) } atomic.AddUint64(&numEntries, num) }() } wg.Wait() log.Printf("Wrote %d entries", numEntries) }