func (tbl *testTable) Construct(t *testing.T) *Table { p := make(testProtoTable) var tickets stringset.Set for _, n := range tbl.Nodes { tickets.Add(n.Ticket) } for _, es := range tbl.EdgeSets { tickets.Discard(es.Source.Ticket) testutil.FatalOnErrT(t, "Error writing edge set: %v", p.Put(ctx, EdgeSetKey(mustFix(t, es.Source.Ticket)), es)) } // Fill in EdgeSets for zero-degree nodes for ticket := range tickets { es := &srvpb.PagedEdgeSet{ Source: getNode(ticket), } testutil.FatalOnErrT(t, "Error writing edge set: %v", p.Put(ctx, EdgeSetKey(mustFix(t, es.Source.Ticket)), es)) } for _, ep := range tbl.EdgePages { testutil.FatalOnErrT(t, "Error writing edge page: %v", p.Put(ctx, EdgePageKey(ep.PageKey), ep)) } for _, d := range tbl.Decorations { testutil.FatalOnErrT(t, "Error writing file decorations: %v", p.Put(ctx, DecorationsKey(mustFix(t, d.File.Ticket)), d)) } for _, cr := range tbl.RefSets { testutil.FatalOnErrT(t, "Error writing cross-references: %v", p.Put(ctx, CrossReferencesKey(mustFix(t, cr.SourceTicket)), cr)) } for _, crp := range tbl.RefPages { testutil.FatalOnErrT(t, "Error writing cross-references: %v", p.Put(ctx, CrossReferencesPageKey(crp.PageKey), crp)) } return NewCombinedTable(table.ProtoBatchParallel{p}) }
// Decorations implements part of the xrefs.Interface. func (d *DB) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if d.selectText == nil { var err error d.selectText, err = d.Prepare("SELECT text, text_encoding FROM Nodes WHERE ticket = $1;") if err != nil { return nil, fmt.Errorf("error preparing selectText statement: %v", err) } } // TODO(schroederc): dirty buffers // TODO(schroederc): span locations fileTicket, err := kytheuri.Fix(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid location ticket: %v", err) } req.Location.Ticket = fileTicket decor := &xpb.DecorationsReply{Location: req.Location} r := d.selectText.QueryRow(fileTicket) var text []byte var textEncoding sql.NullString if err := r.Scan(&text, &textEncoding); err != nil { return nil, err } norm := xrefs.NewNormalizer(text) if req.SourceText { decor.SourceText = text decor.Encoding = textEncoding.String } if req.References { var err error decor.Reference, err = d.scanReferences(fileTicket, norm) if err != nil { return nil, err } if len(req.Filter) > 0 && len(decor.Reference) > 0 { var nodeTickets stringset.Set for _, r := range decor.Reference { nodeTickets.Add(r.TargetTicket) } nodes, err := d.Nodes(ctx, &gpb.NodesRequest{ Ticket: nodeTickets.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error filtering nodes:%v", err) } decor.Nodes = nodes.Nodes } } return decor, nil }
func main() { flag.Parse() if len(flag.Args()) == 0 { flagutil.UsageError("not given any files") } xs := xrefs.WebClient(*remoteAPI) for _, file := range flag.Args() { ticket := (&kytheuri.URI{Corpus: *corpus, Path: file}).String() decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{Ticket: ticket}, SourceText: true, References: true, }) if err != nil { log.Fatalf("Failed to get decorations for file %q", file) } nmap := xrefs.NodesMap(decor.Nodes) var emitted stringset.Set for _, r := range decor.Reference { if r.Kind != edges.DefinesBinding || emitted.Contains(r.TargetTicket) { continue } ident := string(nmap[r.TargetTicket][identifierFact]) if ident == "" { continue } offset, err := strconv.Atoi(string(nmap[r.SourceTicket][facts.AnchorStart])) if err != nil { log.Printf("Invalid start offset for anchor %q", r.SourceTicket) continue } fields, err := getTagFields(xs, r.TargetTicket) if err != nil { log.Printf("Failed to get tagfields for %q: %v", r.TargetTicket, err) } fmt.Printf("%s\t%s\t%d;\"\t%s\n", ident, file, offsetLine(decor.SourceText, offset), strings.Join(fields, "\t")) emitted.Add(r.TargetTicket) } } }
func displayTargets(edges map[string]*gpb.EdgeSet) error { var targets stringset.Set for _, es := range edges { for _, g := range es.Groups { for _, e := range g.Edge { targets.Add(e.TargetTicket) } } } if *displayJSON { return json.NewEncoder(out).Encode(targets.Elements()) } for target := range targets { if _, err := fmt.Fprintln(out, target); err != nil { return err } } return nil }
// CrossReferences implements part of the xrefs Service interface. func (g *GraphStoreService) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { // TODO(zarko): Callgraph integration. if len(req.Ticket) == 0 { return nil, errors.New("no cross-references requested") } requestedPageSize := int(req.PageSize) if requestedPageSize == 0 { requestedPageSize = defaultXRefPageSize } eReply, err := g.Edges(ctx, &gpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: req.PageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet), NextPageToken: eReply.NextPageToken, } var allRelatedNodes stringset.Set if len(req.Filter) > 0 { reply.Nodes = make(map[string]*cpb.NodeInfo) } // Cache parent files across all anchors files := make(map[string]*fileNode) var totalXRefs int for { for source, es := range eReply.EdgeSets { xr, ok := reply.CrossReferences[source] if !ok { xr = &xpb.CrossReferencesReply_CrossReferenceSet{Ticket: source} } var count int for kind, grp := range es.Groups { switch { // TODO(schroeder): handle declarations case xrefs.IsDefKind(req.DefinitionKind, kind, false): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving definition anchors: %v", err) } count += len(anchors) xr.Definition = append(xr.Definition, anchors...) case xrefs.IsRefKind(req.ReferenceKind, kind): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving reference anchors: %v", err) } count += len(anchors) xr.Reference = append(xr.Reference, anchors...) case xrefs.IsDocKind(req.DocumentationKind, kind): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving documentation anchors: %v", err) } count += len(anchors) xr.Documentation = append(xr.Documentation, anchors...) case !allRelatedNodes.Empty() && !edges.IsAnchorEdge(kind): count += len(grp.Edge) for _, edge := range grp.Edge { xr.RelatedNode = append(xr.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ Ticket: edge.TargetTicket, RelationKind: kind, Ordinal: edge.Ordinal, }) allRelatedNodes.Add(edge.TargetTicket) } } } if count > 0 { reply.CrossReferences[xr.Ticket] = xr totalXRefs += count } } if reply.NextPageToken == "" || totalXRefs > 0 { break } // We need to return at least 1 xref, if there are any log.Println("Extra CrossReferences Edges call: ", reply.NextPageToken) eReply, err = g.Edges(ctx, &gpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: reply.NextPageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply.NextPageToken = eReply.NextPageToken } if !allRelatedNodes.Empty() { nReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: allRelatedNodes.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error retrieving related nodes: %v", err) } for ticket, n := range nReply.Nodes { reply.Nodes[ticket] = n } } return reply, nil }
// Decorations implements part of the Service interface. func (g *GraphStoreService) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if len(req.DirtyBuffer) > 0 { return nil, errors.New("UNIMPLEMENTED: dirty buffers") } else if req.GetLocation() == nil { // TODO(schroederc): allow empty location when given dirty buffer return nil, errors.New("missing location") } fileVName, err := kytheuri.ToVName(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid file ticket %q: %v", req.Location.Ticket, err) } text, encoding, err := getSourceText(ctx, g.gs, fileVName) if err != nil { return nil, fmt.Errorf("failed to retrieve file text: %v", err) } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{ Location: loc, Nodes: make(map[string]*cpb.NodeInfo), } // Handle DecorationsRequest.SourceText switch if req.SourceText { if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } reply.Encoding = encoding } // Handle DecorationsRequest.References switch if req.References { // Traverse the following chain of edges: // file --%/kythe/edge/childof-> []anchor --forwardEdgeKind-> []target // // Add []anchor and []target nodes to reply.Nodes // Add all {anchor, forwardEdgeKind, target} tuples to reply.Reference patterns := xrefs.ConvertFilters(req.Filter) children, err := getEdges(ctx, g.gs, fileVName, func(e *spb.Entry) bool { return e.EdgeKind == revChildOfEdgeKind }) if err != nil { return nil, fmt.Errorf("failed to retrieve file children: %v", err) } var targetSet stringset.Set for _, edge := range children { anchor := edge.Target ticket := kytheuri.ToString(anchor) anchorNodeReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: []string{ticket}, }) if err != nil { return nil, fmt.Errorf("failure getting reference source node: %v", err) } else if len(anchorNodeReply.Nodes) != 1 { return nil, fmt.Errorf("found %d nodes for {%+v}", len(anchorNodeReply.Nodes), anchor) } node, ok := xrefs.NodesMap(anchorNodeReply.Nodes)[ticket] if !ok { return nil, fmt.Errorf("failed to find info for node %q", ticket) } else if string(node[facts.NodeKind]) != nodes.Anchor { // Skip child if it isn't an anchor node continue } anchorStart, err := strconv.Atoi(string(node[facts.AnchorStart])) if err != nil { log.Printf("Invalid anchor start offset %q for node %q: %v", node[facts.AnchorStart], ticket, err) continue } anchorEnd, err := strconv.Atoi(string(node[facts.AnchorEnd])) if err != nil { log.Printf("Invalid anchor end offset %q for node %q: %v", node[facts.AnchorEnd], ticket, err) continue } if loc.Kind == xpb.Location_SPAN { // Check if anchor fits within/around requested source text window if !xrefs.InSpanBounds(req.SpanKind, int32(anchorStart), int32(anchorEnd), loc.Start.ByteOffset, loc.End.ByteOffset) { continue } else if anchorStart > anchorEnd { log.Printf("Invalid anchor offset span %d:%d", anchorStart, anchorEnd) continue } } targets, err := getEdges(ctx, g.gs, anchor, func(e *spb.Entry) bool { return edges.IsForward(e.EdgeKind) && e.EdgeKind != edges.ChildOf }) if err != nil { return nil, fmt.Errorf("failed to retrieve targets of anchor %v: %v", anchor, err) } if len(targets) == 0 { log.Printf("Anchor missing forward edges: {%+v}", anchor) continue } if node := filterNode(patterns, anchorNodeReply.Nodes[ticket]); node != nil { reply.Nodes[ticket] = node } for _, edge := range targets { targetTicket := kytheuri.ToString(edge.Target) targetSet.Add(targetTicket) reply.Reference = append(reply.Reference, &xpb.DecorationsReply_Reference{ SourceTicket: ticket, Kind: edge.Kind, TargetTicket: targetTicket, AnchorStart: norm.ByteOffset(int32(anchorStart)), AnchorEnd: norm.ByteOffset(int32(anchorEnd)), }) } } sort.Sort(bySpan(reply.Reference)) // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Ensure returned nodes are not duplicated. for ticket := range reply.Nodes { targetSet.Discard(ticket) } // Batch request all Reference target nodes nodesReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: targetSet.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting reference target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } } return reply, nil }
// Edges implements part of the Service interface. func (g *GraphStoreService) Edges(ctx context.Context, req *gpb.EdgesRequest) (*gpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } else if req.PageToken != "" { return nil, errors.New("UNIMPLEMENTED: page_token") } patterns := xrefs.ConvertFilters(req.Filter) allowedKinds := stringset.New(req.Kind...) var targetSet stringset.Set reply := &gpb.EdgesReply{ EdgeSets: make(map[string]*gpb.EdgeSet), Nodes: make(map[string]*cpb.NodeInfo), } for _, ticket := range req.Ticket { vname, err := kytheuri.ToVName(ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", ticket, err) } var ( // EdgeKind -> TargetTicket -> OrdinalSet filteredEdges = make(map[string]map[string]map[int32]struct{}) filteredFacts = make(map[string][]byte) ) if err := g.gs.Read(ctx, &spb.ReadRequest{ Source: vname, EdgeKind: "*", }, func(entry *spb.Entry) error { edgeKind := entry.EdgeKind if edgeKind == "" { // node fact if len(patterns) > 0 && xrefs.MatchesAny(entry.FactName, patterns) { filteredFacts[entry.FactName] = entry.FactValue } } else { // edge edgeKind, ordinal, _ := edges.ParseOrdinal(edgeKind) if len(req.Kind) == 0 || allowedKinds.Contains(edgeKind) { targets, ok := filteredEdges[edgeKind] if !ok { targets = make(map[string]map[int32]struct{}) filteredEdges[edgeKind] = targets } ticket := kytheuri.ToString(entry.Target) ordSet, ok := targets[ticket] if !ok { ordSet = make(map[int32]struct{}) targets[ticket] = ordSet } ordSet[int32(ordinal)] = struct{}{} } } return nil }); err != nil { return nil, fmt.Errorf("failed to retrieve entries for ticket %q", ticket) } // Only add a EdgeSet if there are targets for the requested edge kinds. if len(filteredEdges) > 0 { groups := make(map[string]*gpb.EdgeSet_Group) for edgeKind, targets := range filteredEdges { g := &gpb.EdgeSet_Group{} for target, ordinals := range targets { for ordinal := range ordinals { g.Edge = append(g.Edge, &gpb.EdgeSet_Group_Edge{ TargetTicket: target, Ordinal: ordinal, }) } targetSet.Add(target) } groups[edgeKind] = g } reply.EdgeSets[ticket] = &gpb.EdgeSet{ Groups: groups, } // In addition, only add a NodeInfo if the filters have resulting facts. if len(filteredFacts) > 0 { reply.Nodes[ticket] = &cpb.NodeInfo{ Facts: filteredFacts, } } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Eliminate redundant work by removing already requested nodes from targetSet for ticket := range reply.Nodes { targetSet.Discard(ticket) } // Batch request all leftover target nodes nodesReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: targetSet.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } return reply, nil }
// CrossReferences implements part of the xrefs.Service interface. func (t *Table) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } stats := refStats{ max: int(req.PageSize), } if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } var edgesPageToken string if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) edgesPageToken = t.SecondaryToken } pageToken := stats.skip reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet, len(req.Ticket)), Nodes: make(map[string]*cpb.NodeInfo, len(req.Ticket)), Total: &xpb.CrossReferencesReply_Total{}, } var nextToken *ipb.PageToken wantMoreCrossRefs := edgesPageToken == "" && (req.DefinitionKind != xpb.CrossReferencesRequest_NO_DEFINITIONS || req.DeclarationKind != xpb.CrossReferencesRequest_NO_DECLARATIONS || req.ReferenceKind != xpb.CrossReferencesRequest_NO_REFERENCES || req.DocumentationKind != xpb.CrossReferencesRequest_NO_DOCUMENTATION || req.CallerKind != xpb.CrossReferencesRequest_NO_CALLERS) for _, ticket := range tickets { // TODO(schroederc): retrieve PagedCrossReferences in parallel cr, err := t.crossReferences(ctx, ticket) if err == table.ErrNoSuchKey { log.Println("Missing CrossReferences:", ticket) continue } else if err != nil { return nil, fmt.Errorf("error looking up cross-references for ticket %q: %v", ticket, err) } crs := &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } if req.ExperimentalSignatures { crs.DisplayName, err = xrefs.SlowSignature(ctx, t, ticket) if err != nil { log.Printf("WARNING: error looking up signature for ticket %q: %v", ticket, err) } } for _, grp := range cr.Group { switch { case xrefs.IsDefKind(req.DefinitionKind, grp.Kind, cr.Incomplete): reply.Total.Definitions += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Definition, grp.Anchor, req.AnchorText) } case xrefs.IsDeclKind(req.DeclarationKind, grp.Kind, cr.Incomplete): reply.Total.Declarations += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Declaration, grp.Anchor, req.AnchorText) } case xrefs.IsDocKind(req.DocumentationKind, grp.Kind): reply.Total.Documentation += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Documentation, grp.Anchor, req.AnchorText) } case xrefs.IsRefKind(req.ReferenceKind, grp.Kind): reply.Total.References += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Reference, grp.Anchor, req.AnchorText) } } } if wantMoreCrossRefs && req.CallerKind != xpb.CrossReferencesRequest_NO_CALLERS { anchors, err := xrefs.SlowCallersForCrossReferences(ctx, t, req.CallerKind == xpb.CrossReferencesRequest_OVERRIDE_CALLERS, req.ExperimentalSignatures, ticket) if err != nil { return nil, fmt.Errorf("error in SlowCallersForCrossReferences: %v", err) } reply.Total.Callers += int64(len(anchors)) stats.addRelatedAnchors(&crs.Caller, anchors, req.AnchorText) } if wantMoreCrossRefs && req.DeclarationKind != xpb.CrossReferencesRequest_NO_DECLARATIONS { decls, err := xrefs.SlowDeclarationsForCrossReferences(ctx, t, ticket) if err != nil { return nil, fmt.Errorf("error in SlowDeclarations: %v", err) } for _, decl := range decls { if decl == ticket { continue // Added in the original loop above. } cr, err := t.crossReferences(ctx, decl) if err == table.ErrNoSuchKey { log.Println("Missing CrossReferences:", decl) continue } else if err != nil { return nil, fmt.Errorf("error looking up cross-references for ticket %q: %v", ticket, err) } for _, grp := range cr.Group { if xrefs.IsDeclKind(req.DeclarationKind, grp.Kind, cr.Incomplete) { reply.Total.Declarations += int64(len(grp.Anchor)) stats.addAnchors(&crs.Declaration, grp.Anchor, req.AnchorText) } } } } for _, idx := range cr.PageIndex { switch { case xrefs.IsDefKind(req.DefinitionKind, idx.Kind, cr.Incomplete): reply.Total.Definitions += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Definition, p.Group.Anchor, req.AnchorText) } case xrefs.IsDeclKind(req.DeclarationKind, idx.Kind, cr.Incomplete): reply.Total.Declarations += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Declaration, p.Group.Anchor, req.AnchorText) } case xrefs.IsDocKind(req.DocumentationKind, idx.Kind): reply.Total.Documentation += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Documentation, p.Group.Anchor, req.AnchorText) } case xrefs.IsRefKind(req.ReferenceKind, idx.Kind): reply.Total.References += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Reference, p.Group.Anchor, req.AnchorText) } } } if len(crs.Declaration) > 0 || len(crs.Definition) > 0 || len(crs.Reference) > 0 || len(crs.Documentation) > 0 || len(crs.Caller) > 0 { reply.CrossReferences[crs.Ticket] = crs } } if pageToken+stats.total != sumTotalCrossRefs(reply.Total) && stats.total != 0 { nextToken = &ipb.PageToken{Index: int32(pageToken + stats.total)} } if len(req.Filter) > 0 { er, err := t.edges(ctx, edgesRequest{ Tickets: tickets, Filters: req.Filter, Kinds: func(kind string) bool { return !edges.IsAnchorEdge(kind) }, PageToken: edgesPageToken, TotalOnly: (stats.max <= stats.total), PageSize: stats.max - stats.total, }) if err != nil { return nil, fmt.Errorf("error getting related nodes: %v", err) } reply.Total.RelatedNodesByRelation = er.TotalEdgesByKind for ticket, es := range er.EdgeSets { var nodes stringset.Set crs, ok := reply.CrossReferences[ticket] if !ok { crs = &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } } for kind, g := range es.Groups { for _, edge := range g.Edge { nodes.Add(edge.TargetTicket) crs.RelatedNode = append(crs.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ RelationKind: kind, Ticket: edge.TargetTicket, Ordinal: edge.Ordinal, }) } } if len(nodes) > 0 { for ticket, n := range er.Nodes { if nodes.Contains(ticket) { reply.Nodes[ticket] = n } } } if !ok && len(crs.RelatedNode) > 0 { reply.CrossReferences[ticket] = crs } } if er.NextPageToken != "" { nextToken = &ipb.PageToken{SecondaryToken: er.NextPageToken} } } if nextToken != nil { rec, err := proto.Marshal(nextToken) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } if req.NodeDefinitions { nodeTickets := make([]string, 0, len(reply.Nodes)) for ticket := range reply.Nodes { nodeTickets = append(nodeTickets, ticket) } // TODO(schroederc): cache this in the serving data defs, err := xrefs.SlowDefinitions(ctx, t, nodeTickets) if err != nil { return nil, fmt.Errorf("error retrieving node definitions: %v", err) } reply.DefinitionLocations = make(map[string]*xpb.Anchor, len(defs)) for ticket, def := range defs { node, ok := reply.Nodes[ticket] if !ok { panic(fmt.Sprintf("extra definition returned for unknown node %q: %v", ticket, def)) } node.Definition = def.Ticket if _, ok := reply.DefinitionLocations[def.Ticket]; !ok { reply.DefinitionLocations[def.Ticket] = def } } } return reply, nil }
// Decorations implements part of the xrefs Service interface. func (t *Table) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil || req.GetLocation().Ticket == "" { return nil, errors.New("missing location") } ticket, err := kytheuri.Fix(req.GetLocation().Ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", req.GetLocation().Ticket, err) } decor, err := t.fileDecorations(ctx, ticket) if err == table.ErrNoSuchKey { return nil, xrefs.ErrDecorationsNotFound } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } text := decor.File.Text if len(req.DirtyBuffer) > 0 { text = req.DirtyBuffer } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.File.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { patterns := xrefs.ConvertFilters(req.Filter) var patcher *xrefs.Patcher if len(req.DirtyBuffer) > 0 { patcher = xrefs.NewPatcher(decor.File.Text, req.DirtyBuffer) } // The span with which to constrain the set of returned anchor references. var startBoundary, endBoundary int32 spanKind := req.SpanKind if loc.Kind == xpb.Location_FILE { startBoundary = 0 endBoundary = int32(len(text)) spanKind = xpb.DecorationsRequest_WITHIN_SPAN } else { startBoundary = loc.Start.ByteOffset endBoundary = loc.End.ByteOffset } reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) reply.Nodes = make(map[string]*cpb.NodeInfo) var seenTarget stringset.Set // Reference.TargetTicket -> NodeInfo (superset of reply.Nodes) var nodes map[string]*cpb.NodeInfo if len(patterns) > 0 { nodes = make(map[string]*cpb.NodeInfo) for _, n := range decor.Target { nodes[n.Ticket] = nodeToInfo(patterns, n) } } // Reference.TargetTicket -> []Reference set var refs map[string][]*xpb.DecorationsReply_Reference // ExpandedAnchor.Ticket -> ExpandedAnchor var defs map[string]*srvpb.ExpandedAnchor if req.TargetDefinitions { refs = make(map[string][]*xpb.DecorationsReply_Reference) reply.DefinitionLocations = make(map[string]*xpb.Anchor) defs = make(map[string]*srvpb.ExpandedAnchor) for _, def := range decor.TargetDefinitions { defs[def.Ticket] = def } } var bindings []string for _, d := range decor.Decoration { start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) // Filter non-existent anchor. Anchors can no longer exist if we were // given a dirty buffer and the anchor was inside a changed region. if exists { if xrefs.InSpanBounds(spanKind, start, end, startBoundary, endBoundary) { d.Anchor.StartOffset = start d.Anchor.EndOffset = end r := decorationToReference(norm, d) if req.TargetDefinitions { if def, ok := defs[d.TargetDefinition]; ok { reply.DefinitionLocations[d.TargetDefinition] = a2a(def, false).Anchor } else { refs[r.TargetTicket] = append(refs[r.TargetTicket], r) } } else { r.TargetDefinition = "" } if req.ExtendsOverrides && r.Kind == edges.DefinesBinding { bindings = append(bindings, r.TargetTicket) } reply.Reference = append(reply.Reference, r) if !seenTarget.Contains(r.TargetTicket) && nodes != nil { reply.Nodes[r.TargetTicket] = nodes[r.TargetTicket] seenTarget.Add(r.TargetTicket) } } } } var extendsOverrides map[string][]*xpb.DecorationsReply_Override var extendsOverridesTargets stringset.Set if len(bindings) != 0 { extendsOverrides, err = xrefs.SlowOverrides(ctx, t, bindings) if err != nil { return nil, fmt.Errorf("lookup error for overrides tickets: %v", err) } if len(extendsOverrides) != 0 { reply.ExtendsOverrides = make(map[string]*xpb.DecorationsReply_Overrides, len(extendsOverrides)) for ticket, eos := range extendsOverrides { // Note: extendsOverrides goes out of scope after this loop, so downstream code won't accidentally // mutate reply.ExtendsOverrides via aliasing. pb := &xpb.DecorationsReply_Overrides{Override: eos} for _, eo := range eos { extendsOverridesTargets.Add(eo.Ticket) } reply.ExtendsOverrides[ticket] = pb } } } if len(extendsOverridesTargets) != 0 && len(patterns) > 0 { // Add missing NodeInfo. request := &gpb.NodesRequest{Filter: req.Filter} for ticket := range extendsOverridesTargets { if _, ok := reply.Nodes[ticket]; !ok { request.Ticket = append(request.Ticket, ticket) } } if len(request.Ticket) > 0 { nodes, err := t.Nodes(ctx, request) if err != nil { return nil, fmt.Errorf("lookup error for overrides nodes: %v", err) } for ticket, node := range nodes.Nodes { reply.Nodes[ticket] = node } } } // Only compute target definitions if the serving data doesn't contain any // TODO(schroederc): remove this once serving data is always populated if req.TargetDefinitions && len(defs) == 0 { targetTickets := make([]string, 0, len(refs)) for ticket := range refs { targetTickets = append(targetTickets, ticket) } for ticket := range extendsOverridesTargets { targetTickets = append(targetTickets, ticket) } defs, err := xrefs.SlowDefinitions(ctx, t, targetTickets) if err != nil { return nil, fmt.Errorf("error retrieving target definitions: %v", err) } reply.DefinitionLocations = make(map[string]*xpb.Anchor, len(defs)) for tgt, def := range defs { if extendsOverridesTargets.Contains(tgt) { if _, ok := reply.DefinitionLocations[tgt]; !ok { reply.DefinitionLocations[tgt] = def } } if refsTgt, ok := refs[tgt]; ok { for _, ref := range refsTgt { if def.Ticket != ref.SourceTicket { ref.TargetDefinition = def.Ticket if _, ok := reply.DefinitionLocations[def.Ticket]; !ok { reply.DefinitionLocations[def.Ticket] = def } } } } } } } return reply, nil }
func (t *Table) edges(ctx context.Context, req edgesRequest) (*gpb.EdgesReply, error) { stats := filterStats{ max: int(req.PageSize), } if req.TotalOnly { stats.max = 0 } else if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) } pageToken := stats.skip var nodeTickets stringset.Set rs, err := t.pagedEdgeSets(ctx, req.Tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors or early return for _ = range rs { } }() patterns := xrefs.ConvertFilters(req.Filters) reply := &gpb.EdgesReply{ EdgeSets: make(map[string]*gpb.EdgeSet), Nodes: make(map[string]*cpb.NodeInfo), TotalEdgesByKind: make(map[string]int64), } for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } pes := r.PagedEdgeSet countEdgeKinds(pes, req.Kinds, reply.TotalEdgesByKind) // Don't scan the EdgeSet_Groups if we're already at the specified page_size. if stats.total == stats.max { continue } groups := make(map[string]*gpb.EdgeSet_Group) for _, grp := range pes.Group { if req.Kinds == nil || req.Kinds(grp.Kind) { ng, ns := stats.filter(grp) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Nodes[n.Ticket] = nodeToInfo(patterns, n) } } groups[grp.Kind] = ng if stats.total == stats.max { break } } } } // TODO(schroederc): ensure that pes.EdgeSet.Groups and pes.PageIndexes of // the same kind are grouped together in the EdgesReply if stats.total != stats.max { for _, idx := range pes.PageIndex { if req.Kinds == nil || req.Kinds(idx.EdgeKind) { if stats.skipPage(idx) { log.Printf("Skipping EdgePage: %s", idx.PageKey) continue } log.Printf("Retrieving EdgePage: %s", idx.PageKey) ep, err := t.edgePage(ctx, idx.PageKey) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("internal error: missing edge page: %q", idx.PageKey) } else if err != nil { return nil, fmt.Errorf("edge page lookup error (page key: %q): %v", idx.PageKey, err) } ng, ns := stats.filter(ep.EdgesGroup) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Nodes[n.Ticket] = nodeToInfo(patterns, n) } } groups[ep.EdgesGroup.Kind] = ng if stats.total == stats.max { break } } } } } if len(groups) > 0 { reply.EdgeSets[pes.Source.Ticket] = &gpb.EdgeSet{Groups: groups} if len(patterns) > 0 && !nodeTickets.Contains(pes.Source.Ticket) { nodeTickets.Add(pes.Source.Ticket) reply.Nodes[pes.Source.Ticket] = nodeToInfo(patterns, pes.Source) } } } totalEdgesPossible := int(sumEdgeKinds(reply.TotalEdgesByKind)) if stats.total > stats.max { log.Panicf("totalEdges greater than maxEdges: %d > %d", stats.total, stats.max) } else if pageToken+stats.total > totalEdgesPossible && pageToken <= totalEdgesPossible { log.Panicf("pageToken+totalEdges greater than totalEdgesPossible: %d+%d > %d", pageToken, stats.total, totalEdgesPossible) } if pageToken+stats.total != totalEdgesPossible && stats.total != 0 { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageToken + stats.total)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// CrossReferences implements part of the xrefs.Interface. func (d *DB) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { // TODO(zarko): Callgraph integration. tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } pageSize := int(req.PageSize) if pageSize <= 0 { pageSize = defaultPageSize } else if pageSize > maxPageSize { pageSize = maxPageSize } var pageOffset int var edgesToken string if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } pageOffset = int(t.Index) edgesToken = t.SecondaryToken } reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet), Nodes: make(map[string]*cpb.NodeInfo), } setQ, ticketArgs := sqlSetQuery(1, tickets) var count int if edgesToken == "" { args := append(ticketArgs, pageSize+1, pageOffset) // +1 to check for next page rs, err := d.Query(fmt.Sprintf("SELECT ticket, kind, proto FROM CrossReferences WHERE ticket IN %s ORDER BY ticket LIMIT $%d OFFSET $%d;", setQ, len(tickets)+1, len(tickets)+2), args...) if err != nil { return nil, err } defer closeRows(rs) var xrs *xpb.CrossReferencesReply_CrossReferenceSet for rs.Next() { count++ if count > pageSize { continue } var ticket, kind string var rec []byte if err := rs.Scan(&ticket, &kind, &rec); err != nil { return nil, err } if xrs != nil && xrs.Ticket != ticket { if len(xrs.Definition) > 0 || len(xrs.Documentation) > 0 || len(xrs.Reference) > 0 || len(xrs.RelatedNode) > 0 { reply.CrossReferences[xrs.Ticket] = xrs } xrs = nil } if xrs == nil { xrs = &xpb.CrossReferencesReply_CrossReferenceSet{Ticket: ticket} } switch { // TODO(schroederc): handle declarations case xrefs.IsDefKind(req.DefinitionKind, kind, false): xrs.Definition, err = addRelatedAnchor(xrs.Definition, rec, req.AnchorText) if err != nil { return nil, err } case xrefs.IsDocKind(req.DocumentationKind, kind): xrs.Documentation, err = addRelatedAnchor(xrs.Documentation, rec, req.AnchorText) if err != nil { return nil, err } case xrefs.IsRefKind(req.ReferenceKind, kind): xrs.Reference, err = addRelatedAnchor(xrs.Reference, rec, req.AnchorText) if err != nil { return nil, err } } } if xrs != nil && (len(xrs.Definition) > 0 || len(xrs.Documentation) > 0 || len(xrs.Reference) > 0 || len(xrs.RelatedNode) > 0) { reply.CrossReferences[xrs.Ticket] = xrs } if count > pageSize { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageOffset + pageSize)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } } if len(req.Filter) > 0 && count <= pageSize { // TODO(schroederc): consolidate w/ LevelDB implementation er, err := d.edges(ctx, &gpb.EdgesRequest{ Ticket: tickets, Filter: req.Filter, PageSize: int32(pageSize - count), PageToken: edgesToken, }, func(kind string) bool { return !edges.IsAnchorEdge(kind) }) if err != nil { return nil, fmt.Errorf("error getting related nodes: %v", err) } for ticket, es := range er.EdgeSets { var nodes stringset.Set crs, ok := reply.CrossReferences[ticket] if !ok { crs = &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } } for kind, g := range es.Groups { if !edges.IsAnchorEdge(kind) { for _, edge := range g.Edge { nodes.Add(edge.TargetTicket) crs.RelatedNode = append(crs.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ RelationKind: kind, Ticket: edge.TargetTicket, Ordinal: edge.Ordinal, }) } } } if len(nodes) > 0 { for ticket, n := range er.Nodes { if nodes.Contains(ticket) { reply.Nodes[ticket] = n } } } if !ok && len(crs.RelatedNode) > 0 { reply.CrossReferences[ticket] = crs } } if er.NextPageToken != "" { rec, err := proto.Marshal(&ipb.PageToken{SecondaryToken: er.NextPageToken}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } } return reply, nil }
func (d *DB) edges(ctx context.Context, req *gpb.EdgesRequest, edgeFilter func(kind string) bool) (*gpb.EdgesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } pageSize := int(req.PageSize) if pageSize <= 0 { pageSize = defaultPageSize } else if pageSize > maxPageSize { pageSize = maxPageSize } var pageOffset int if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } pageOffset = int(t.Index) } // Select only the edges from the given source tickets setQ, args := sqlSetQuery(1, tickets) query := fmt.Sprintf(` SELECT * FROM AllEdges WHERE source IN %s`, setQ) // Filter by edges kinds, if given if len(req.Kind) > 0 { kSetQ, kArgs := sqlSetQuery(1+len(args), req.Kind) query += fmt.Sprintf(` AND kind IN %s`, kSetQ) args = append(args, kArgs...) } // Scan edge sets/groups in order; necessary for CrossReferences query += " ORDER BY source, kind, ordinal" // Seek to the requested page offset (req.PageToken.Index). We don't use // LIMIT here because we don't yet know how many edges will be filtered by // edgeFilter. query += fmt.Sprintf(" OFFSET $%d", len(args)+1) args = append(args, pageOffset) rs, err := d.Query(query, args...) if err != nil { return nil, fmt.Errorf("error querying for edges: %v", err) } defer closeRows(rs) var scanned int // edges := map { source -> kind -> target -> ordinal set } edges := make(map[string]map[string]map[string]map[int32]struct{}, len(tickets)) for count := 0; count < pageSize && rs.Next(); scanned++ { var source, kind, target string var ordinal int if err := rs.Scan(&source, &kind, &target, &ordinal); err != nil { return nil, fmt.Errorf("edges scan error: %v", err) } if edgeFilter != nil && !edgeFilter(kind) { continue } count++ groups, ok := edges[source] if !ok { groups = make(map[string]map[string]map[int32]struct{}) edges[source] = groups } targets, ok := groups[kind] if !ok { targets = make(map[string]map[int32]struct{}) groups[kind] = targets } ordinals, ok := targets[target] if !ok { ordinals = make(map[int32]struct{}) targets[target] = ordinals } ordinals[int32(ordinal)] = struct{}{} } reply := &gpb.EdgesReply{EdgeSets: make(map[string]*gpb.EdgeSet, len(edges))} var nodeTickets stringset.Set for src, groups := range edges { gs := make(map[string]*gpb.EdgeSet_Group, len(groups)) nodeTickets.Add(src) for kind, targets := range groups { edges := make([]*gpb.EdgeSet_Group_Edge, 0, len(targets)) for ticket, ordinals := range targets { for ordinal := range ordinals { edges = append(edges, &gpb.EdgeSet_Group_Edge{ TargetTicket: ticket, Ordinal: ordinal, }) } nodeTickets.Add(ticket) } sort.Sort(xrefs.ByOrdinal(edges)) gs[kind] = &gpb.EdgeSet_Group{ Edge: edges, } } reply.EdgeSets[src] = &gpb.EdgeSet{ Groups: gs, } } // If there is another row, there is a NextPageToken. if rs.Next() { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageOffset + scanned)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } // TODO(schroederc): faster node lookups if len(req.Filter) > 0 && !nodeTickets.Empty() { nodes, err := d.Nodes(ctx, &gpb.NodesRequest{ Ticket: nodeTickets.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error filtering nodes:%v", err) } reply.Nodes = nodes.Nodes } return reply, nil }