func main() { flag.Parse() if gs == nil { flagutil.UsageError("missing required --graphstore flag") } else if *tablePath == "" { flagutil.UsageError("missing required --out flag") } db, err := leveldb.Open(*tablePath, nil) if err != nil { log.Fatal(err) } defer db.Close() ctx := context.Background() if err := profile.Start(ctx); err != nil { log.Fatal(err) } defer profile.Stop() if err := pipeline.Run(ctx, gs, db, &pipeline.Options{ MaxEdgePageSize: *maxEdgePageSize, }); err != nil { log.Fatal("FATAL ERROR: ", err) } }
func main() { flag.Parse() if *servingTable == "" { flagutil.UsageError("missing --serving_table") } else if *outPath == "" { flagutil.UsageError("missing --out") } ctx := context.Background() db, err := leveldb.Open(*servingTable, nil) if err != nil { log.Fatalf("Error opening db at %q: %v", *servingTable, err) } defer db.Close() tbl := &table.KVProto{db} esr := &esrchsrv.Table{&table.KVInverted{db}} ix := index.Create(*outPath) for _, ticket := range filesInTable(ctx, esr) { var fd srvpb.FileDecorations if err := tbl.Lookup(ctx, xsrv.DecorationsKey(ticket), &fd); err != nil { log.Fatalf("Error looking up decoration for %q: %v", ticket, err) } ix.Add(ticket, bytes.NewReader(fd.SourceText)) } ix.Flush() }
func main() { flag.Parse() if gs == nil && *entriesFile == "" { flagutil.UsageError("missing --graphstore or --entries") } else if gs != nil && *entriesFile != "" { flagutil.UsageError("--graphstore and --entries are mutually exclusive") } else if *tablePath == "" { flagutil.UsageError("missing required --out flag") } db, err := leveldb.Open(*tablePath, nil) if err != nil { log.Fatal(err) } defer db.Close() ctx := context.Background() if err := profile.Start(ctx); err != nil { log.Fatal(err) } defer profile.Stop() var rd stream.EntryReader if gs != nil { rd = func(f func(e *spb.Entry) error) error { defer gs.Close(ctx) return gs.Scan(ctx, &spb.ScanRequest{}, f) } } else { f, err := vfs.Open(ctx, *entriesFile) if err != nil { log.Fatalf("Error opening %q: %v", *entriesFile, err) } defer f.Close() rd = stream.NewReader(f) } if err := pipeline.Run(ctx, rd, db, &pipeline.Options{ Verbose: *verbose, MaxPageSize: *maxPageSize, CompressShards: *compressShards, MaxShardSize: *maxShardSize, IOBufferSize: int(shardIOBufferSize.Bytes()), }); err != nil { log.Fatal("FATAL ERROR: ", err) } }
func main() { flag.Parse() // done is sent a value when the analyzer should exit done := make(chan struct{}, 1) defer func() { done <- struct{}{} }() analyzerBin, analyzerArgs, compilations := parseAnalyzerCommand() if len(compilations) == 0 { flagutil.UsageError("Missing kindex-file paths") } cmd := exec.Command(analyzerBin, analyzerArgs...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr var proc *os.Process if err := process.StartAsync(cmd, &process.Callbacks{ OnStart: func(p *os.Process) { log.Printf("Starting analyzer subprocess: %s", strings.Join(cmd.Args, " ")) proc = p }, OnExit: func(state *os.ProcessState, err error) { select { case <-done: default: log.Fatalf("Analyzer subprocess exited unexpectedly (state:%v; error:%v)", state, err) } }, }); err != nil { log.Fatalf("Error starting analyzer: %v", err) } addr := fmt.Sprintf("localhost:%d", *analyzerPort) conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatalf("Error dialing analyzer %q: %v", addr, err) } defer conn.Close() queue := local.NewKIndexQueue(compilations) fdsAddr := launchFileDataService(queue) wr := delimited.NewWriter(os.Stdout) driver := &driver.Driver{ Analyzer: &remote.Analyzer{aspb.NewCompilationAnalyzerClient(conn)}, Output: func(_ context.Context, out *apb.AnalysisOutput) error { return wr.Put(out.Value) }, FileDataService: fdsAddr, Compilations: queue, } if err := driver.Run(context.Background()); err != nil { log.Fatal(err) } if err := proc.Signal(os.Interrupt); err != nil { log.Fatalf("Failed to send interrupt to analyzer: %v", err) } }
func main() { flag.Parse() if len(flag.Args()) == 0 { flagutil.UsageError("missing kindex-file path") } else if len(flag.Args()) > 1 { flagutil.UsageErrorf("unknown arguments: %v", flag.Args()[1:]) } path := flag.Arg(0) idx, err := kindex.Open(context.Background(), path) if err != nil { log.Fatalf("Error reading %q: %v", path, err) } en := json.NewEncoder(os.Stdout) if *printFiles { if err := en.Encode(idx); err != nil { log.Fatalf("Error encoding JSON: %v", err) } } else { if err := en.Encode(idx.Proto); err != nil { log.Fatalf("Error encoding JSON compilation: %v", err) } } }
func main() { flag.Parse() if gs == nil { flagutil.UsageError("missing required --graphstore flag") } else if *tablePath == "" { flagutil.UsageError("missing required --out flag") } db, err := leveldb.Open(*tablePath, nil) if err != nil { log.Fatal(err) } defer db.Close() if err := pipeline.Run(context.Background(), gs, db); err != nil { log.Fatal(err) } }
func main() { flag.Parse() if len(flag.Args()) == 0 { flagutil.UsageError("not given any files") } xs := xrefs.WebClient(*remoteAPI) for _, file := range flag.Args() { ticket := (&kytheuri.URI{Corpus: *corpus, Path: file}).String() decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{Ticket: ticket}, SourceText: true, References: true, }) if err != nil { log.Fatalf("Failed to get decorations for file %q", file) } nodes := xrefs.NodesMap(decor.Nodes) emitted := stringset.New() for _, r := range decor.Reference { if r.Kind != schema.DefinesBindingEdge || emitted.Contains(r.TargetTicket) { continue } ident := string(nodes[r.TargetTicket][identifierFact]) if ident == "" { continue } offset, err := strconv.Atoi(string(nodes[r.SourceTicket][schema.AnchorStartFact])) if err != nil { log.Printf("Invalid start offset for anchor %q", r.SourceTicket) continue } fields, err := getTagFields(xs, r.TargetTicket) if err != nil { log.Printf("Failed to get tagfields for %q: %v", r.TargetTicket, err) } fmt.Printf("%s\t%s\t%d;\"\t%s\n", ident, file, offsetLine(decor.SourceText, offset), strings.Join(fields, "\t")) emitted.Add(r.TargetTicket) } } }
func main() { log.SetPrefix("write_entries: ") flag.Parse() if *numWorkers < 1 { flagutil.UsageErrorf("Invalid number of --workers %d (must be ≥ 1)", *numWorkers) } else if *batchSize < 1 { flagutil.UsageErrorf("Invalid --batch_size %d (must be ≥ 1)", *batchSize) } else if gs == nil { flagutil.UsageError("Missing --graphstore") } defer gsutil.LogClose(context.Background(), gs) gsutil.EnsureGracefulExit(gs) if *profCPU != "" { f, err := vfs.Create(context.Background(), *profCPU) if err != nil { log.Fatal(err) } defer f.Close() pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } writes := graphstore.BatchWrites(stream.ReadEntries(os.Stdin), *batchSize) var ( wg sync.WaitGroup numEntries uint64 ) wg.Add(*numWorkers) for i := 0; i < *numWorkers; i++ { go func() { defer wg.Done() num, err := writeEntries(context.Background(), gs, writes) if err != nil { log.Fatal(err) } atomic.AddUint64(&numEntries, num) }() } wg.Wait() log.Printf("Wrote %d entries", numEntries) }
func main() { flag.Parse() if *offset < 0 { flagutil.UsageError("non-negative --offset required") } else if *signature == "" && *path == "" { flagutil.UsageError("must provide at least --path or --signature") } if strings.HasPrefix(*remoteAPI, "http://") || strings.HasPrefix(*remoteAPI, "https://") { xs = xrefs.WebClient(*remoteAPI) idx = search.WebClient(*remoteAPI) } else { conn, err := grpc.Dial(*remoteAPI) if err != nil { log.Fatalf("Error connecting to remote API %q: %v", *remoteAPI, err) } defer conn.Close() xs = xrefs.GRPC(xpb.NewXRefServiceClient(conn)) idx = search.GRPC(spb.NewSearchServiceClient(conn)) } relPath := *path if !*ignoreLocalRepo { if _, err := os.Stat(relPath); err == nil { absPath, err := filepath.Abs(relPath) if err != nil { log.Fatal(err) } kytheRoot := findKytheRoot(filepath.Dir(absPath)) if kytheRoot != "" { relPath, err = filepath.Rel(filepath.Join(kytheRoot, *root), absPath) if err != nil { log.Fatal(err) } } } } partialFile := &spb.VName{ Signature: *signature, Corpus: *corpus, Root: *root, Path: relPath, Language: *language, } reply, err := idx.Search(ctx, &spb.SearchRequest{ Partial: partialFile, Fact: fileFacts, }) if err != nil { log.Fatalf("Error locating file {%v}: %v", partialFile, err) } if len(reply.Ticket) == 0 { log.Fatalf("Could not locate file {%v}", partialFile) } else if len(reply.Ticket) > 1 { log.Fatalf("Ambiguous file {%v}; multiple results: %v", partialFile, reply.Ticket) } fileTicket := reply.Ticket[0] decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ // TODO(schroederc): limit Location to a SPAN around *offset Location: &xpb.Location{Ticket: fileTicket}, References: true, SourceText: true, DirtyBuffer: readDirtyBuffer(ctx), }) if err != nil { log.Fatal(err) } nodes := xrefs.NodesMap(decor.Node) en := json.NewEncoder(os.Stdout) for _, ref := range decor.Reference { start, end := parseAnchorSpan(nodes[ref.SourceTicket]) if start <= *offset && *offset < end { var r reference r.Span.Start = start r.Span.End = end r.Span.Text = string(decor.SourceText[start:end]) r.Kind = strings.TrimPrefix(ref.Kind, schema.EdgePrefix) r.Node.Ticket = ref.TargetTicket node := nodes[ref.TargetTicket] r.Node.Kind = string(node[schema.NodeKindFact]) r.Node.Subkind = string(node[schema.SubkindFact]) if eReply, err := xs.Edges(ctx, &xpb.EdgesRequest{ Ticket: []string{ref.TargetTicket}, Kind: []string{schema.NamedEdge, definedAtEdge}, }); err != nil { log.Printf("WARNING: error getting edges for %q: %v", ref.TargetTicket, err) } else { edges := xrefs.EdgesMap(eReply.EdgeSet)[ref.TargetTicket] for _, name := range edges[schema.NamedEdge] { if uri, err := kytheuri.Parse(name); err != nil { log.Printf("WARNING: named node ticket (%q) could not be parsed: %v", name, err) } else { r.Node.Names = append(r.Node.Names, uri.Signature) } } if !*skipDefinitions { for _, defAnchor := range edges[definedAtEdge] { def, err := completeDefinition(defAnchor) if err != nil { log.Printf("WARNING: failed to complete definition for %q: %v", defAnchor, err) } else { r.Node.Definitions = append(r.Node.Definitions, def) } } } } if err := en.Encode(r); err != nil { log.Fatal(err) } } } }
func main() { flag.Parse() if *servingTable == "" && gs == nil { flagutil.UsageError("missing either --serving_table or --graphstore") } else if *httpListeningAddr == "" && *grpcListeningAddr == "" { flagutil.UsageError("missing either --listen or --grpc_listen argument") } else if *servingTable != "" && gs != nil { flagutil.UsageError("--serving_table and --graphstore are mutually exclusive") } var ( xs xrefs.Service ft filetree.Service sr search.Service ) ctx := context.Background() if *servingTable != "" { db, err := leveldb.Open(*servingTable, nil) if err != nil { log.Fatalf("Error opening db at %q: %v", *servingTable, err) } defer db.Close() tbl := table.ProtoBatchParallel{&table.KVProto{db}} xs = xsrv.NewCombinedTable(tbl) ft = &ftsrv.Table{tbl} sr = &srchsrv.Table{&table.KVInverted{db}} } else { log.Println("WARNING: serving directly from a GraphStore can be slow; you may want to use a --serving_table") if f, ok := gs.(filetree.Service); ok { log.Printf("Using %T directly as filetree service", gs) ft = f } else { m := filetree.NewMap() if err := m.Populate(ctx, gs); err != nil { log.Fatalf("Error populating file tree from GraphStore: %v", err) } ft = m } if x, ok := gs.(xrefs.Service); ok { log.Printf("Using %T directly as xrefs service", gs) xs = x } else { if err := xstore.EnsureReverseEdges(ctx, gs); err != nil { log.Fatalf("Error ensuring reverse edges in GraphStore: %v", err) } xs = xstore.NewGraphStoreService(gs) } if s, ok := gs.(search.Service); ok { log.Printf("Using %T directly as search service", gs) sr = s } } if sr == nil { log.Println("Search API not supported") } if *grpcListeningAddr != "" { srv := grpc.NewServer() xpb.RegisterXRefServiceServer(srv, xs) ftpb.RegisterFileTreeServiceServer(srv, ft) if sr != nil { spb.RegisterSearchServiceServer(srv, sr) } go startGRPC(srv) } if *httpListeningAddr != "" { xrefs.RegisterHTTPHandlers(ctx, xs, http.DefaultServeMux) filetree.RegisterHTTPHandlers(ctx, ft, http.DefaultServeMux) if sr != nil { search.RegisterHTTPHandlers(ctx, sr, http.DefaultServeMux) } go startHTTP() } select {} // block forever }
func main() { flag.Parse() if flag.NArg() > 0 { flagutil.UsageErrorf("unknown non-flag argument(s): %v", flag.Args()) } else if *offset < 0 && (*lineNumber < 0 || *columnOffset < 0) { flagutil.UsageError("non-negative --offset (or --line and --column) required") } else if *signature == "" && *path == "" { flagutil.UsageError("must provide at least --path or --signature") } defer (*apiFlag).Close() xs, idx = *apiFlag, *apiFlag relPath := *path if *localRepoRoot != "NONE" { if _, err := os.Stat(relPath); err == nil { absPath, err := filepath.Abs(relPath) if err != nil { log.Fatal(err) } if *dirtyBuffer == "" { *dirtyBuffer = absPath } kytheRoot := *localRepoRoot if kytheRoot == "" { kytheRoot = findKytheRoot(filepath.Dir(absPath)) } if kytheRoot != "" { relPath, err = filepath.Rel(filepath.Join(kytheRoot, *root), absPath) if err != nil { log.Fatal(err) } } } } partialFile := &spb.VName{ Signature: *signature, Corpus: *corpus, Root: *root, Path: relPath, Language: *language, } reply, err := idx.Search(ctx, &spb.SearchRequest{ Partial: partialFile, Fact: fileFacts, }) if err != nil { log.Fatalf("Error locating file {%v}: %v", partialFile, err) } if len(reply.Ticket) == 0 { log.Fatalf("Could not locate file {%v}", partialFile) } else if len(reply.Ticket) > 1 { log.Fatalf("Ambiguous file {%v}; multiple results: %v", partialFile, reply.Ticket) } fileTicket := reply.Ticket[0] text := readDirtyBuffer(ctx) decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ // TODO(schroederc): limit Location to a SPAN around *offset Location: &xpb.Location{Ticket: fileTicket}, References: true, SourceText: true, DirtyBuffer: text, Filter: []string{ schema.NodeKindFact, schema.SubkindFact, schema.AnchorLocFilter, // TODO(schroederc): remove once backwards-compatibility fix below is removed }, }) if err != nil { log.Fatal(err) } if text == nil { text = decor.SourceText } nodes := xrefs.NodesMap(decor.Node) // Normalize point within source text point := normalizedPoint(text) en := json.NewEncoder(os.Stdout) for _, ref := range decor.Reference { var start, end int if ref.AnchorStart == nil || ref.AnchorEnd == nil { // TODO(schroederc): remove this backwards-compatibility fix start, end = parseAnchorSpan(nodes[ref.SourceTicket]) } else { start, end = int(ref.AnchorStart.ByteOffset), int(ref.AnchorEnd.ByteOffset) } if start <= point && point < end { var r reference r.Span.Start = start r.Span.End = end r.Span.Text = string(text[start:end]) r.Kind = strings.TrimPrefix(ref.Kind, schema.EdgePrefix) r.Node.Ticket = ref.TargetTicket node := nodes[ref.TargetTicket] r.Node.Kind = string(node[schema.NodeKindFact]) r.Node.Subkind = string(node[schema.SubkindFact]) if eReply, err := xrefs.AllEdges(ctx, xs, &xpb.EdgesRequest{ Ticket: []string{ref.TargetTicket}, Kind: []string{schema.NamedEdge, schema.TypedEdge, definedAtEdge, definedBindingAtEdge}, }); err != nil { log.Printf("WARNING: error getting edges for %q: %v", ref.TargetTicket, err) } else { edges := xrefs.EdgesMap(eReply.EdgeSet)[ref.TargetTicket] for _, name := range edges[schema.NamedEdge] { if uri, err := kytheuri.Parse(name); err != nil { log.Printf("WARNING: named node ticket (%q) could not be parsed: %v", name, err) } else { r.Node.Names = append(r.Node.Names, uri.Signature) } } if typed := edges[schema.TypedEdge]; len(typed) > 0 { r.Node.Typed = typed[0] } if !*skipDefinitions { defs := edges[definedAtEdge] if len(defs) == 0 { defs = edges[definedBindingAtEdge] } for _, defAnchor := range defs { def, err := completeDefinition(defAnchor) if err != nil { log.Printf("WARNING: failed to complete definition for %q: %v", defAnchor, err) } else { r.Node.Definitions = append(r.Node.Definitions, def) } } } } if err := en.Encode(r); err != nil { log.Fatal(err) } } } }
func main() { flag.Parse() if flag.NArg() > 0 { flagutil.UsageErrorf("unknown non-flag argument(s): %v", flag.Args()) } else if *offset < 0 && (*lineNumber < 0 || *columnOffset < 0) { flagutil.UsageError("non-negative --offset (or --line and --column) required") } else if *path == "" { flagutil.UsageError("must provide --path") } defer (*apiFlag).Close() xs = *apiFlag relPath := *path if *localRepoRoot != "NONE" { if _, err := os.Stat(relPath); err == nil { absPath, err := filepath.Abs(relPath) if err != nil { log.Fatal(err) } if *dirtyBuffer == "" { *dirtyBuffer = absPath } kytheRoot := *localRepoRoot if kytheRoot == "" { kytheRoot = findKytheRoot(filepath.Dir(absPath)) } if kytheRoot != "" { relPath, err = filepath.Rel(filepath.Join(kytheRoot, *root), absPath) if err != nil { log.Fatal(err) } } } } fileTicket := (&kytheuri.URI{Corpus: *corpus, Root: *root, Path: relPath}).String() point := &xpb.Location_Point{ ByteOffset: int32(*offset), LineNumber: int32(*lineNumber), ColumnOffset: int32(*columnOffset), } dirtyBuffer := readDirtyBuffer(ctx) decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{ Ticket: fileTicket, Kind: xpb.Location_SPAN, Start: point, End: point, }, SpanKind: xpb.DecorationsRequest_AROUND_SPAN, References: true, SourceText: true, DirtyBuffer: dirtyBuffer, Filter: []string{ facts.NodeKind, facts.Subkind, }, }) if err != nil { log.Fatal(err) } nodes := xrefs.NodesMap(decor.Nodes) en := json.NewEncoder(os.Stdout) for _, ref := range decor.Reference { start, end := int(ref.AnchorStart.ByteOffset), int(ref.AnchorEnd.ByteOffset) var r reference r.Span.Start = start r.Span.End = end if len(dirtyBuffer) > 0 { r.Span.Text = string(dirtyBuffer[start:end]) } // TODO(schroederc): add option to get anchor text from DecorationsReply r.Kind = strings.TrimPrefix(ref.Kind, edges.Prefix) r.Node.Ticket = ref.TargetTicket node := nodes[ref.TargetTicket] r.Node.Kind = string(node[facts.NodeKind]) r.Node.Subkind = string(node[facts.Subkind]) // TODO(schroederc): use CrossReferences method if eReply, err := xrefs.AllEdges(ctx, xs, &gpb.EdgesRequest{ Ticket: []string{ref.TargetTicket}, Kind: []string{edges.Named, edges.Typed, definedAtEdge, definedBindingAtEdge}, }); err != nil { log.Printf("WARNING: error getting edges for %q: %v", ref.TargetTicket, err) } else { matching := xrefs.EdgesMap(eReply.EdgeSets)[ref.TargetTicket] for name := range matching[edges.Named] { if uri, err := kytheuri.Parse(name); err != nil { log.Printf("WARNING: named node ticket (%q) could not be parsed: %v", name, err) } else { r.Node.Names = append(r.Node.Names, uri.Signature) } } for typed := range matching[edges.Typed] { r.Node.Typed = typed break } if !*skipDefinitions { defs := matching[definedAtEdge] if len(defs) == 0 { defs = matching[definedBindingAtEdge] } for defAnchor := range defs { def, err := completeDefinition(defAnchor) if err != nil { log.Printf("WARNING: failed to complete definition for %q: %v", defAnchor, err) } else { r.Node.Definitions = append(r.Node.Definitions, def) } } } } if err := en.Encode(r); err != nil { log.Fatal(err) } } }
func main() { flag.Parse() if len(flag.Args()) == 0 { flagutil.UsageError("not given any files") } xs := xrefs.WebClient(*remoteAPI) idx := search.WebClient(*remoteAPI) for _, file := range flag.Args() { results, err := idx.Search(ctx, &spb.SearchRequest{ Partial: &spb.VName{Path: file}, Fact: []*spb.SearchRequest_Fact{{ Name: schema.NodeKindFact, Value: []byte(schema.FileKind), }}, }) if err != nil { log.Fatalf("Error searching for ticket of file %q", file) } else if len(results.Ticket) == 0 { log.Printf("Could not find ticket for file %q", file) continue } else if len(results.Ticket) != 1 { log.Printf("Multiple tickets found for file %q; choosing first from %v", file, results.Ticket) } ticket := results.Ticket[0] decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{Ticket: ticket}, SourceText: true, References: true, }) if err != nil { log.Fatalf("Failed to get decorations for file %q", file) } nodes := xrefs.NodesMap(decor.Node) emitted := stringset.New() for _, r := range decor.Reference { if r.Kind != schema.DefinesBindingEdge || emitted.Contains(r.TargetTicket) { continue } ident := string(nodes[r.TargetTicket][identifierFact]) if ident == "" { continue } offset, err := strconv.Atoi(string(nodes[r.SourceTicket][schema.AnchorStartFact])) if err != nil { log.Printf("Invalid start offset for anchor %q", r.SourceTicket) continue } fields, err := getTagFields(xs, r.TargetTicket) if err != nil { log.Printf("Failed to get tagfields for %q: %v", r.TargetTicket, err) } fmt.Printf("%s\t%s\t%d;\"\t%s\n", ident, file, offsetLine(decor.SourceText, offset), strings.Join(fields, "\t")) emitted.Add(r.TargetTicket) } } }
func main() { flag.Parse() if *servingTable == "" && gs == nil { flagutil.UsageError("missing either --serving_table or --graphstore") } else if *httpListeningAddr == "" && *grpcListeningAddr == "" && *tlsListeningAddr == "" { flagutil.UsageError("missing either --listen, --tls_listen, or --grpc_listen argument") } else if *servingTable != "" && gs != nil { flagutil.UsageError("--serving_table and --graphstore are mutually exclusive") } else if *tlsListeningAddr != "" && (*tlsCertFile == "" || *tlsKeyFile == "") { flagutil.UsageError("--tls_cert_file and --tls_key_file are required if given --tls_listen") } else if flag.NArg() > 0 { flagutil.UsageErrorf("unknown non-flag arguments given: %v", flag.Args()) } var ( xs xrefs.Service ft filetree.Service sr search.Service ) ctx := context.Background() if *servingTable != "" { db, err := leveldb.Open(*servingTable, nil) if err != nil { log.Fatalf("Error opening db at %q: %v", *servingTable, err) } defer db.Close() tbl := table.ProtoBatchParallel{&table.KVProto{db}} xs = xsrv.NewCombinedTable(tbl) ft = &ftsrv.Table{tbl} sr = &srchsrv.Table{&table.KVInverted{db}} } else { log.Println("WARNING: serving directly from a GraphStore can be slow; you may want to use a --serving_table") if f, ok := gs.(filetree.Service); ok { log.Printf("Using %T directly as filetree service", gs) ft = f } else { m := filetree.NewMap() if err := m.Populate(ctx, gs); err != nil { log.Fatalf("Error populating file tree from GraphStore: %v", err) } ft = m } if x, ok := gs.(xrefs.Service); ok { log.Printf("Using %T directly as xrefs service", gs) xs = x } else { if err := xstore.EnsureReverseEdges(ctx, gs); err != nil { log.Fatalf("Error ensuring reverse edges in GraphStore: %v", err) } xs = xstore.NewGraphStoreService(gs) } if s, ok := gs.(search.Service); ok { log.Printf("Using %T directly as search service", gs) sr = s } } if sr == nil { log.Println("Search API not supported") } if *grpcListeningAddr != "" { srv := grpc.NewServer() xpb.RegisterXRefServiceServer(srv, xs) ftpb.RegisterFileTreeServiceServer(srv, ft) if sr != nil { spb.RegisterSearchServiceServer(srv, sr) } go startGRPC(srv) } if *httpListeningAddr != "" || *tlsListeningAddr != "" { apiMux := http.NewServeMux() http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if *httpAllowOrigin != "" { w.Header().Set("Access-Control-Allow-Origin", *httpAllowOrigin) } apiMux.ServeHTTP(w, r) }) xrefs.RegisterHTTPHandlers(ctx, xs, apiMux) filetree.RegisterHTTPHandlers(ctx, ft, apiMux) if sr != nil { search.RegisterHTTPHandlers(ctx, sr, apiMux) } if *publicResources != "" { log.Println("Serving public resources at", *publicResources) if s, err := os.Stat(*publicResources); err != nil { log.Fatalf("ERROR: could not get FileInfo for %q: %v", *publicResources, err) } else if !s.IsDir() { log.Fatalf("ERROR: %q is not a directory", *publicResources) } apiMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { http.ServeFile(w, r, filepath.Join(*publicResources, filepath.Clean(r.URL.Path))) }) } } if *httpListeningAddr != "" { go startHTTP() } if *tlsListeningAddr != "" { go startTLS() } select {} // block forever }
func main() { flag.Parse() if gs == nil { flagutil.UsageError("missing --graphstore") } else if *shardsToFiles != "" && *shards <= 0 { flagutil.UsageError("--sharded_file and --shards must be given together") } else if *shards > 0 && len(flag.Args()) > 0 { flagutil.UsageError("--shards and giving tickets for reads are mutually exclusive") } ctx := context.Background() wr := delimited.NewWriter(os.Stdout) var total int64 if *shards <= 0 { entryFunc := func(entry *spb.Entry) error { if *count { total++ return nil } return wr.PutProto(entry) } if len(flag.Args()) > 0 { if *targetTicket != "" || *factPrefix != "" { log.Fatal("--target and --fact_prefix are unsupported when given tickets") } if err := readEntries(ctx, gs, entryFunc, *edgeKind, flag.Args()); err != nil { log.Fatal(err) } } else { if err := scanEntries(ctx, gs, entryFunc, *edgeKind, *targetTicket, *factPrefix); err != nil { log.Fatal(err) } } if *count { fmt.Println(total) } return } sgs, ok := gs.(graphstore.Sharded) if !ok { log.Fatalf("Sharding unsupported for given GraphStore type: %T", gs) } else if *shardIndex >= *shards { log.Fatalf("Invalid shard index for %d shards: %d", *shards, *shardIndex) } if *count { cnt, err := sgs.Count(ctx, &spb.CountRequest{Index: *shardIndex, Shards: *shards}) if err != nil { log.Fatalf("ERROR: %v", err) } fmt.Println(cnt) return } else if *shardsToFiles != "" { var wg sync.WaitGroup wg.Add(int(*shards)) for i := int64(0); i < *shards; i++ { go func(i int64) { defer wg.Done() path := fmt.Sprintf("%s-%.5d-of-%.5d", *shardsToFiles, i, *shards) f, err := vfs.Create(ctx, path) if err != nil { log.Fatalf("Failed to create file %q: %v", path, err) } defer f.Close() wr := delimited.NewWriter(f) if err := sgs.Shard(ctx, &spb.ShardRequest{ Index: i, Shards: *shards, }, func(entry *spb.Entry) error { return wr.PutProto(entry) }); err != nil { log.Fatalf("GraphStore shard scan error: %v", err) } }(i) } wg.Wait() return } if err := sgs.Shard(ctx, &spb.ShardRequest{ Index: *shardIndex, Shards: *shards, }, func(entry *spb.Entry) error { return wr.PutProto(entry) }); err != nil { log.Fatalf("GraphStore shard scan error: %v", err) } }
func main() { flag.Parse() if flag.NArg() == 0 { flagutil.UsageError("Missing path to LevelDB") } var protoValueType reflect.Type if *protoValue != "" { protoValueType = proto.MessageType(*protoValue) if protoValueType == nil { flagutil.UsageErrorf("could not understand protocol buffer type: %q", *protoValue) } } var en *json.Encoder if *emitJSON { en = json.NewEncoder(os.Stdout) } for _, path := range flag.Args() { func() { db, err := leveldb.Open(path, nil) if err != nil { log.Fatalf("Error opening %q: %v", path, err) } defer db.Close() it, err := db.ScanPrefix([]byte(*keyPrefix), nil) if err != nil { log.Fatalf("Error creating iterator for %q: v", path, err) } defer it.Close() for { key, val, err := it.Next() if err == io.EOF { break } else if err != nil { log.Fatalf("Error during scan of %q: %v", path, err) } var k, v interface{} if protoValueType == nil { if *stringKey { k = strconv.Quote(string(key)) } else { k = base64.StdEncoding.EncodeToString(key) } if *stringValue { v = strconv.Quote(string(val)) } else { v = base64.StdEncoding.EncodeToString(val) } } else { p := reflect.New(protoValueType.Elem()).Interface().(proto.Message) if err := proto.Unmarshal(val, p); err != nil { log.Fatalf("Error unmarshaling value to %q: %v", *protoValue, err) } k, v = string(key), p } if en == nil { fmt.Println(strings.NewReplacer( "@key@", fmt.Sprintf("%s", k), "@value@", fmt.Sprintf("%s", v), ).Replace(*lineFormat)) } else { en.Encode(keyValue{k, v}) } } }() } }