func unpackIndex(ctx context.Context, pack *indexpack.Archive, dir string) error { fetcher := pack.Fetcher(ctx) return pack.ReadUnits(ctx, formatKey, func(u interface{}) error { unit, ok := u.(*apb.CompilationUnit) if !ok { return fmt.Errorf("%T is not a CompilationUnit", u) } idx, err := kindex.FromUnit(unit, fetcher) if err != nil { return fmt.Errorf("error creating kindex: %v", err) } path := kindexPath(dir, idx) if !*quiet { log.Println("Writing compilation unit to", path) } f, err := vfs.Create(ctx, path) if err != nil { return fmt.Errorf("error creating output file: %v", err) } if _, err := idx.WriteTo(f); err != nil { f.Close() // try to close file before returning return fmt.Errorf("error writing output file: %v", err) } return f.Close() }) }
func main() { log.SetPrefix("write_entries: ") flag.Parse() if *numWorkers < 1 { flagutil.UsageErrorf("Invalid number of --workers %d (must be ≥ 1)", *numWorkers) } else if *batchSize < 1 { flagutil.UsageErrorf("Invalid --batch_size %d (must be ≥ 1)", *batchSize) } else if gs == nil { flagutil.UsageError("Missing --graphstore") } defer gsutil.LogClose(context.Background(), gs) gsutil.EnsureGracefulExit(gs) if *profCPU != "" { f, err := vfs.Create(context.Background(), *profCPU) if err != nil { log.Fatal(err) } defer f.Close() pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } writes := graphstore.BatchWrites(stream.ReadEntries(os.Stdin), *batchSize) var ( wg sync.WaitGroup numEntries uint64 ) wg.Add(*numWorkers) for i := 0; i < *numWorkers; i++ { go func() { defer wg.Done() num, err := writeEntries(context.Background(), gs, writes) if err != nil { log.Fatal(err) } atomic.AddUint64(&numEntries, num) }() } wg.Wait() log.Printf("Wrote %d entries", numEntries) }
// Start begins profiling the program, writing data to the file given with the // --cpu_profile flag. If --cpu_profile was not given, nothing happens. Start // must not be called again until Stop is called. The profile data in the // --cpu_profile flag is overwritten on each call to Start. func Start(ctx context.Context) error { mu.Lock() defer mu.Unlock() if *profCPU != "" { if file != nil { return errors.New("profiling already started") } f, err := vfs.Create(ctx, *profCPU) if err != nil { return fmt.Errorf("error creating profile file %q: %v", *profCPU) } file = f pprof.StartCPUProfile(f) } return nil }
func main() { flag.Parse() if len(flag.Args()) > 2 || (gs != nil && len(flag.Args()) > 1) { fmt.Fprintf(os.Stderr, "ERROR: too many arguments %v\n", flag.Args()) flag.Usage() os.Exit(1) } if gs != nil { defer gsutil.LogClose(context.Background(), gs) } var in io.ReadCloser = os.Stdin if gs == nil && len(flag.Args()) > 0 { file, err := vfs.Open(context.Background(), flag.Arg(0)) if err != nil { log.Fatalf("Failed to open input file %q: %v", flag.Arg(0), err) } defer file.Close() in = file } outIdx := 1 if gs != nil { outIdx = 0 } var out io.WriteCloser = os.Stdout if len(flag.Args()) > outIdx { file, err := vfs.Create(context.Background(), flag.Arg(outIdx)) if err != nil { log.Fatalf("Failed to create output file %q: %v", flag.Arg(outIdx), err) } defer file.Close() out = file } var ( entries <-chan *spb.Entry reverseEdges int triples int ) if gs == nil { entries = stream.ReadEntries(in) } else { ch := make(chan *spb.Entry) entries = ch go func() { defer close(ch) if err := gs.Scan(context.Background(), &spb.ScanRequest{}, func(e *spb.Entry) error { ch <- e return nil }); err != nil { log.Fatalf("Error scanning graphstore: %v", err) } }() } for entry := range entries { if schema.EdgeDirection(entry.EdgeKind) == schema.Reverse && !*keepReverseEdges { reverseEdges++ continue } t, err := toTriple(entry) if err != nil { log.Fatal(err) } fmt.Fprintln(out, t) triples++ } if !*quiet { if !*keepReverseEdges { log.Printf("Skipped %d reverse edges", reverseEdges) } log.Printf("Wrote %d triples", triples) } }
func main() { flag.Parse() if gs == nil { flagutil.UsageError("missing --graphstore") } else if *shardsToFiles != "" && *shards <= 0 { flagutil.UsageError("--sharded_file and --shards must be given together") } else if *shards > 0 && len(flag.Args()) > 0 { flagutil.UsageError("--shards and giving tickets for reads are mutually exclusive") } ctx := context.Background() wr := delimited.NewWriter(os.Stdout) var total int64 if *shards <= 0 { entryFunc := func(entry *spb.Entry) error { if *count { total++ return nil } return wr.PutProto(entry) } if len(flag.Args()) > 0 { if *targetTicket != "" || *factPrefix != "" { log.Fatal("--target and --fact_prefix are unsupported when given tickets") } if err := readEntries(ctx, gs, entryFunc, *edgeKind, flag.Args()); err != nil { log.Fatal(err) } } else { if err := scanEntries(ctx, gs, entryFunc, *edgeKind, *targetTicket, *factPrefix); err != nil { log.Fatal(err) } } if *count { fmt.Println(total) } return } sgs, ok := gs.(graphstore.Sharded) if !ok { log.Fatalf("Sharding unsupported for given GraphStore type: %T", gs) } else if *shardIndex >= *shards { log.Fatalf("Invalid shard index for %d shards: %d", *shards, *shardIndex) } if *count { cnt, err := sgs.Count(ctx, &spb.CountRequest{Index: *shardIndex, Shards: *shards}) if err != nil { log.Fatalf("ERROR: %v", err) } fmt.Println(cnt) return } else if *shardsToFiles != "" { var wg sync.WaitGroup wg.Add(int(*shards)) for i := int64(0); i < *shards; i++ { go func(i int64) { defer wg.Done() path := fmt.Sprintf("%s-%.5d-of-%.5d", *shardsToFiles, i, *shards) f, err := vfs.Create(ctx, path) if err != nil { log.Fatalf("Failed to create file %q: %v", path, err) } defer f.Close() wr := delimited.NewWriter(f) if err := sgs.Shard(ctx, &spb.ShardRequest{ Index: i, Shards: *shards, }, func(entry *spb.Entry) error { return wr.PutProto(entry) }); err != nil { log.Fatalf("GraphStore shard scan error: %v", err) } }(i) } wg.Wait() return } if err := sgs.Shard(ctx, &spb.ShardRequest{ Index: *shardIndex, Shards: *shards, }, func(entry *spb.Entry) error { return wr.PutProto(entry) }); err != nil { log.Fatalf("GraphStore shard scan error: %v", err) } }