// Decorations implements part of the xrefs.Interface. func (d *DB) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if d.selectText == nil { var err error d.selectText, err = d.Prepare("SELECT text, text_encoding FROM Nodes WHERE ticket = $1;") if err != nil { return nil, fmt.Errorf("error preparing selectText statement: %v", err) } } // TODO(schroederc): dirty buffers // TODO(schroederc): span locations fileTicket, err := kytheuri.Fix(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid location ticket: %v", err) } req.Location.Ticket = fileTicket decor := &xpb.DecorationsReply{Location: req.Location} r := d.selectText.QueryRow(fileTicket) var text []byte var textEncoding sql.NullString if err := r.Scan(&text, &textEncoding); err != nil { return nil, err } norm := xrefs.NewNormalizer(text) if req.SourceText { decor.SourceText = text decor.Encoding = textEncoding.String } if req.References { var err error decor.Reference, err = d.scanReferences(fileTicket, norm) if err != nil { return nil, err } if len(req.Filter) > 0 && len(decor.Reference) > 0 { var nodeTickets stringset.Set for _, r := range decor.Reference { nodeTickets.Add(r.TargetTicket) } nodes, err := d.Nodes(ctx, &gpb.NodesRequest{ Ticket: nodeTickets.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error filtering nodes:%v", err) } decor.Nodes = nodes.Nodes } } return decor, nil }
func displayTargets(edges map[string]*gpb.EdgeSet) error { var targets stringset.Set for _, es := range edges { for _, g := range es.Groups { for _, e := range g.Edge { targets.Add(e.TargetTicket) } } } if *displayJSON { return json.NewEncoder(out).Encode(targets.Elements()) } for target := range targets { if _, err := fmt.Fprintln(out, target); err != nil { return err } } return nil }
// CrossReferences implements part of the xrefs Service interface. func (g *GraphStoreService) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { // TODO(zarko): Callgraph integration. if len(req.Ticket) == 0 { return nil, errors.New("no cross-references requested") } requestedPageSize := int(req.PageSize) if requestedPageSize == 0 { requestedPageSize = defaultXRefPageSize } eReply, err := g.Edges(ctx, &gpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: req.PageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet), NextPageToken: eReply.NextPageToken, } var allRelatedNodes stringset.Set if len(req.Filter) > 0 { reply.Nodes = make(map[string]*cpb.NodeInfo) } // Cache parent files across all anchors files := make(map[string]*fileNode) var totalXRefs int for { for source, es := range eReply.EdgeSets { xr, ok := reply.CrossReferences[source] if !ok { xr = &xpb.CrossReferencesReply_CrossReferenceSet{Ticket: source} } var count int for kind, grp := range es.Groups { switch { // TODO(schroeder): handle declarations case xrefs.IsDefKind(req.DefinitionKind, kind, false): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving definition anchors: %v", err) } count += len(anchors) xr.Definition = append(xr.Definition, anchors...) case xrefs.IsRefKind(req.ReferenceKind, kind): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving reference anchors: %v", err) } count += len(anchors) xr.Reference = append(xr.Reference, anchors...) case xrefs.IsDocKind(req.DocumentationKind, kind): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving documentation anchors: %v", err) } count += len(anchors) xr.Documentation = append(xr.Documentation, anchors...) case !allRelatedNodes.Empty() && !edges.IsAnchorEdge(kind): count += len(grp.Edge) for _, edge := range grp.Edge { xr.RelatedNode = append(xr.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ Ticket: edge.TargetTicket, RelationKind: kind, Ordinal: edge.Ordinal, }) allRelatedNodes.Add(edge.TargetTicket) } } } if count > 0 { reply.CrossReferences[xr.Ticket] = xr totalXRefs += count } } if reply.NextPageToken == "" || totalXRefs > 0 { break } // We need to return at least 1 xref, if there are any log.Println("Extra CrossReferences Edges call: ", reply.NextPageToken) eReply, err = g.Edges(ctx, &gpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: reply.NextPageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply.NextPageToken = eReply.NextPageToken } if !allRelatedNodes.Empty() { nReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: allRelatedNodes.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error retrieving related nodes: %v", err) } for ticket, n := range nReply.Nodes { reply.Nodes[ticket] = n } } return reply, nil }
// Decorations implements part of the Service interface. func (g *GraphStoreService) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if len(req.DirtyBuffer) > 0 { return nil, errors.New("UNIMPLEMENTED: dirty buffers") } else if req.GetLocation() == nil { // TODO(schroederc): allow empty location when given dirty buffer return nil, errors.New("missing location") } fileVName, err := kytheuri.ToVName(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid file ticket %q: %v", req.Location.Ticket, err) } text, encoding, err := getSourceText(ctx, g.gs, fileVName) if err != nil { return nil, fmt.Errorf("failed to retrieve file text: %v", err) } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{ Location: loc, Nodes: make(map[string]*cpb.NodeInfo), } // Handle DecorationsRequest.SourceText switch if req.SourceText { if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } reply.Encoding = encoding } // Handle DecorationsRequest.References switch if req.References { // Traverse the following chain of edges: // file --%/kythe/edge/childof-> []anchor --forwardEdgeKind-> []target // // Add []anchor and []target nodes to reply.Nodes // Add all {anchor, forwardEdgeKind, target} tuples to reply.Reference patterns := xrefs.ConvertFilters(req.Filter) children, err := getEdges(ctx, g.gs, fileVName, func(e *spb.Entry) bool { return e.EdgeKind == revChildOfEdgeKind }) if err != nil { return nil, fmt.Errorf("failed to retrieve file children: %v", err) } var targetSet stringset.Set for _, edge := range children { anchor := edge.Target ticket := kytheuri.ToString(anchor) anchorNodeReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: []string{ticket}, }) if err != nil { return nil, fmt.Errorf("failure getting reference source node: %v", err) } else if len(anchorNodeReply.Nodes) != 1 { return nil, fmt.Errorf("found %d nodes for {%+v}", len(anchorNodeReply.Nodes), anchor) } node, ok := xrefs.NodesMap(anchorNodeReply.Nodes)[ticket] if !ok { return nil, fmt.Errorf("failed to find info for node %q", ticket) } else if string(node[facts.NodeKind]) != nodes.Anchor { // Skip child if it isn't an anchor node continue } anchorStart, err := strconv.Atoi(string(node[facts.AnchorStart])) if err != nil { log.Printf("Invalid anchor start offset %q for node %q: %v", node[facts.AnchorStart], ticket, err) continue } anchorEnd, err := strconv.Atoi(string(node[facts.AnchorEnd])) if err != nil { log.Printf("Invalid anchor end offset %q for node %q: %v", node[facts.AnchorEnd], ticket, err) continue } if loc.Kind == xpb.Location_SPAN { // Check if anchor fits within/around requested source text window if !xrefs.InSpanBounds(req.SpanKind, int32(anchorStart), int32(anchorEnd), loc.Start.ByteOffset, loc.End.ByteOffset) { continue } else if anchorStart > anchorEnd { log.Printf("Invalid anchor offset span %d:%d", anchorStart, anchorEnd) continue } } targets, err := getEdges(ctx, g.gs, anchor, func(e *spb.Entry) bool { return edges.IsForward(e.EdgeKind) && e.EdgeKind != edges.ChildOf }) if err != nil { return nil, fmt.Errorf("failed to retrieve targets of anchor %v: %v", anchor, err) } if len(targets) == 0 { log.Printf("Anchor missing forward edges: {%+v}", anchor) continue } if node := filterNode(patterns, anchorNodeReply.Nodes[ticket]); node != nil { reply.Nodes[ticket] = node } for _, edge := range targets { targetTicket := kytheuri.ToString(edge.Target) targetSet.Add(targetTicket) reply.Reference = append(reply.Reference, &xpb.DecorationsReply_Reference{ SourceTicket: ticket, Kind: edge.Kind, TargetTicket: targetTicket, AnchorStart: norm.ByteOffset(int32(anchorStart)), AnchorEnd: norm.ByteOffset(int32(anchorEnd)), }) } } sort.Sort(bySpan(reply.Reference)) // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Ensure returned nodes are not duplicated. for ticket := range reply.Nodes { targetSet.Discard(ticket) } // Batch request all Reference target nodes nodesReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: targetSet.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting reference target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } } return reply, nil }
// Edges implements part of the Service interface. func (g *GraphStoreService) Edges(ctx context.Context, req *gpb.EdgesRequest) (*gpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } else if req.PageToken != "" { return nil, errors.New("UNIMPLEMENTED: page_token") } patterns := xrefs.ConvertFilters(req.Filter) allowedKinds := stringset.New(req.Kind...) var targetSet stringset.Set reply := &gpb.EdgesReply{ EdgeSets: make(map[string]*gpb.EdgeSet), Nodes: make(map[string]*cpb.NodeInfo), } for _, ticket := range req.Ticket { vname, err := kytheuri.ToVName(ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", ticket, err) } var ( // EdgeKind -> TargetTicket -> OrdinalSet filteredEdges = make(map[string]map[string]map[int32]struct{}) filteredFacts = make(map[string][]byte) ) if err := g.gs.Read(ctx, &spb.ReadRequest{ Source: vname, EdgeKind: "*", }, func(entry *spb.Entry) error { edgeKind := entry.EdgeKind if edgeKind == "" { // node fact if len(patterns) > 0 && xrefs.MatchesAny(entry.FactName, patterns) { filteredFacts[entry.FactName] = entry.FactValue } } else { // edge edgeKind, ordinal, _ := edges.ParseOrdinal(edgeKind) if len(req.Kind) == 0 || allowedKinds.Contains(edgeKind) { targets, ok := filteredEdges[edgeKind] if !ok { targets = make(map[string]map[int32]struct{}) filteredEdges[edgeKind] = targets } ticket := kytheuri.ToString(entry.Target) ordSet, ok := targets[ticket] if !ok { ordSet = make(map[int32]struct{}) targets[ticket] = ordSet } ordSet[int32(ordinal)] = struct{}{} } } return nil }); err != nil { return nil, fmt.Errorf("failed to retrieve entries for ticket %q", ticket) } // Only add a EdgeSet if there are targets for the requested edge kinds. if len(filteredEdges) > 0 { groups := make(map[string]*gpb.EdgeSet_Group) for edgeKind, targets := range filteredEdges { g := &gpb.EdgeSet_Group{} for target, ordinals := range targets { for ordinal := range ordinals { g.Edge = append(g.Edge, &gpb.EdgeSet_Group_Edge{ TargetTicket: target, Ordinal: ordinal, }) } targetSet.Add(target) } groups[edgeKind] = g } reply.EdgeSets[ticket] = &gpb.EdgeSet{ Groups: groups, } // In addition, only add a NodeInfo if the filters have resulting facts. if len(filteredFacts) > 0 { reply.Nodes[ticket] = &cpb.NodeInfo{ Facts: filteredFacts, } } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Eliminate redundant work by removing already requested nodes from targetSet for ticket := range reply.Nodes { targetSet.Discard(ticket) } // Batch request all leftover target nodes nodesReply, err := g.Nodes(ctx, &gpb.NodesRequest{ Ticket: targetSet.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } return reply, nil }
func (d *DB) edges(ctx context.Context, req *gpb.EdgesRequest, edgeFilter func(kind string) bool) (*gpb.EdgesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } pageSize := int(req.PageSize) if pageSize <= 0 { pageSize = defaultPageSize } else if pageSize > maxPageSize { pageSize = maxPageSize } var pageOffset int if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } pageOffset = int(t.Index) } // Select only the edges from the given source tickets setQ, args := sqlSetQuery(1, tickets) query := fmt.Sprintf(` SELECT * FROM AllEdges WHERE source IN %s`, setQ) // Filter by edges kinds, if given if len(req.Kind) > 0 { kSetQ, kArgs := sqlSetQuery(1+len(args), req.Kind) query += fmt.Sprintf(` AND kind IN %s`, kSetQ) args = append(args, kArgs...) } // Scan edge sets/groups in order; necessary for CrossReferences query += " ORDER BY source, kind, ordinal" // Seek to the requested page offset (req.PageToken.Index). We don't use // LIMIT here because we don't yet know how many edges will be filtered by // edgeFilter. query += fmt.Sprintf(" OFFSET $%d", len(args)+1) args = append(args, pageOffset) rs, err := d.Query(query, args...) if err != nil { return nil, fmt.Errorf("error querying for edges: %v", err) } defer closeRows(rs) var scanned int // edges := map { source -> kind -> target -> ordinal set } edges := make(map[string]map[string]map[string]map[int32]struct{}, len(tickets)) for count := 0; count < pageSize && rs.Next(); scanned++ { var source, kind, target string var ordinal int if err := rs.Scan(&source, &kind, &target, &ordinal); err != nil { return nil, fmt.Errorf("edges scan error: %v", err) } if edgeFilter != nil && !edgeFilter(kind) { continue } count++ groups, ok := edges[source] if !ok { groups = make(map[string]map[string]map[int32]struct{}) edges[source] = groups } targets, ok := groups[kind] if !ok { targets = make(map[string]map[int32]struct{}) groups[kind] = targets } ordinals, ok := targets[target] if !ok { ordinals = make(map[int32]struct{}) targets[target] = ordinals } ordinals[int32(ordinal)] = struct{}{} } reply := &gpb.EdgesReply{EdgeSets: make(map[string]*gpb.EdgeSet, len(edges))} var nodeTickets stringset.Set for src, groups := range edges { gs := make(map[string]*gpb.EdgeSet_Group, len(groups)) nodeTickets.Add(src) for kind, targets := range groups { edges := make([]*gpb.EdgeSet_Group_Edge, 0, len(targets)) for ticket, ordinals := range targets { for ordinal := range ordinals { edges = append(edges, &gpb.EdgeSet_Group_Edge{ TargetTicket: ticket, Ordinal: ordinal, }) } nodeTickets.Add(ticket) } sort.Sort(xrefs.ByOrdinal(edges)) gs[kind] = &gpb.EdgeSet_Group{ Edge: edges, } } reply.EdgeSets[src] = &gpb.EdgeSet{ Groups: gs, } } // If there is another row, there is a NextPageToken. if rs.Next() { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageOffset + scanned)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } // TODO(schroederc): faster node lookups if len(req.Filter) > 0 && !nodeTickets.Empty() { nodes, err := d.Nodes(ctx, &gpb.NodesRequest{ Ticket: nodeTickets.Elements(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error filtering nodes:%v", err) } reply.Nodes = nodes.Nodes } return reply, nil }