func (tbl *testTable) Construct(t *testing.T) xrefs.Service { p := make(testProtoTable) tickets := stringset.New() for _, n := range tbl.Nodes { tickets.Add(n.Ticket) } for _, es := range tbl.EdgeSets { tickets.Remove(es.Source.Ticket) testutil.FatalOnErrT(t, "Error writing edge set: %v", p.Put(ctx, EdgeSetKey(mustFix(t, es.Source.Ticket)), es)) } // Fill in EdgeSets for zero-degree nodes for ticket := range tickets { es := &srvpb.PagedEdgeSet{ Source: getNode(ticket), } testutil.FatalOnErrT(t, "Error writing edge set: %v", p.Put(ctx, EdgeSetKey(mustFix(t, es.Source.Ticket)), es)) } for _, ep := range tbl.EdgePages { testutil.FatalOnErrT(t, "Error writing edge page: %v", p.Put(ctx, EdgePageKey(ep.PageKey), ep)) } for _, d := range tbl.Decorations { testutil.FatalOnErrT(t, "Error writing file decorations: %v", p.Put(ctx, DecorationsKey(mustFix(t, d.File.Ticket)), d)) } for _, cr := range tbl.RefSets { testutil.FatalOnErrT(t, "Error writing cross-references: %v", p.Put(ctx, CrossReferencesKey(mustFix(t, cr.SourceTicket)), cr)) } for _, crp := range tbl.RefPages { testutil.FatalOnErrT(t, "Error writing cross-references: %v", p.Put(ctx, CrossReferencesPageKey(crp.PageKey), crp)) } return NewCombinedTable(table.ProtoBatchParallel{p}) }
func edgeSet(kinds []string, pes *srvpb.PagedEdgeSet, pages []*srvpb.EdgePage) *xpb.EdgeSet { set := stringset.New(kinds...) es := &xpb.EdgeSet{ SourceTicket: pes.EdgeSet.SourceTicket, } for _, g := range pes.EdgeSet.Group { if set.Contains(g.Kind) || len(set) == 0 { es.Group = append(es.Group, &xpb.EdgeSet_Group{ Kind: g.Kind, TargetTicket: g.TargetTicket, }) } } for _, ep := range pages { g := ep.EdgesGroup if set.Contains(g.Kind) || len(set) == 0 { es.Group = append(es.Group, &xpb.EdgeSet_Group{ Kind: g.Kind, TargetTicket: g.TargetTicket, }) } } return es }
// SourceFromEntries returns a new Source from the given a set of entries with // the same source VName. func SourceFromEntries(entries []*spb.Entry) *Source { if len(entries) == 0 { return nil } src := &Source{ Ticket: kytheuri.ToString(entries[0].Source), Facts: make(map[string][]byte), Edges: make(map[string][]string), } edgeTargets := make(map[string]stringset.Set) for _, e := range entries { if graphstore.IsEdge(e) { tgts, ok := edgeTargets[e.EdgeKind] if !ok { tgts = stringset.New() edgeTargets[e.EdgeKind] = tgts } tgts.Add(kytheuri.ToString(e.Target)) } else { src.Facts[e.FactName] = e.FactValue } } for kind, targets := range edgeTargets { src.Edges[kind] = targets.Slice() sort.Strings(src.Edges[kind]) } return src }
// Decorations implements part of the xrefs.Interface. func (d *DB) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if d.selectText == nil { var err error d.selectText, err = d.Prepare("SELECT text, text_encoding FROM Nodes WHERE ticket = $1;") if err != nil { return nil, fmt.Errorf("error preparing selectText statement: %v", err) } } // TODO(schroederc): dirty buffers // TODO(schroederc): span locations fileTicket, err := kytheuri.Fix(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid location ticket: %v", err) } req.Location.Ticket = fileTicket decor := &xpb.DecorationsReply{Location: req.Location} r := d.selectText.QueryRow(fileTicket) var text []byte var textEncoding sql.NullString if err := r.Scan(&text, &textEncoding); err != nil { return nil, err } norm := xrefs.NewNormalizer(text) if req.SourceText { decor.SourceText = text decor.Encoding = textEncoding.String } if req.References { var err error decor.Reference, err = d.scanReferences(fileTicket, norm) if err != nil { return nil, err } if len(req.Filter) > 0 && len(decor.Reference) > 0 { nodeTickets := stringset.New() for _, r := range decor.Reference { nodeTickets.Add(r.TargetTicket) } nodes, err := d.Nodes(ctx, &xpb.NodesRequest{ Ticket: nodeTickets.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error filtering nodes:%v", err) } decor.Nodes = nodes.Nodes } } return decor, nil }
func main() { flag.Parse() if len(flag.Args()) == 0 { flagutil.UsageError("not given any files") } xs := xrefs.WebClient(*remoteAPI) for _, file := range flag.Args() { ticket := (&kytheuri.URI{Corpus: *corpus, Path: file}).String() decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{Ticket: ticket}, SourceText: true, References: true, }) if err != nil { log.Fatalf("Failed to get decorations for file %q", file) } nodes := xrefs.NodesMap(decor.Nodes) emitted := stringset.New() for _, r := range decor.Reference { if r.Kind != schema.DefinesBindingEdge || emitted.Contains(r.TargetTicket) { continue } ident := string(nodes[r.TargetTicket][identifierFact]) if ident == "" { continue } offset, err := strconv.Atoi(string(nodes[r.SourceTicket][schema.AnchorStartFact])) if err != nil { log.Printf("Invalid start offset for anchor %q", r.SourceTicket) continue } fields, err := getTagFields(xs, r.TargetTicket) if err != nil { log.Printf("Failed to get tagfields for %q: %v", r.TargetTicket, err) } fmt.Printf("%s\t%s\t%d;\"\t%s\n", ident, file, offsetLine(decor.SourceText, offset), strings.Join(fields, "\t")) emitted.Add(r.TargetTicket) } } }
// Edges implements part of the xrefs Service interface. func (t *tableImpl) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } allowedKinds := stringset.New(req.Kind...) return t.edges(ctx, edgesRequest{ Tickets: tickets, Filters: req.Filter, Kinds: func(kind string) bool { return len(allowedKinds) == 0 || allowedKinds.Contains(kind) }, PageSize: int(req.PageSize), PageToken: req.PageToken, }) }
func displayTargets(edges []*xpb.EdgeSet) error { targets := stringset.New() for _, es := range edges { for _, g := range es.Group { targets.Add(g.TargetTicket...) } } if *displayJSON { return json.NewEncoder(out).Encode(targets.Slice()) } for target := range targets { if _, err := fmt.Fprintln(out, target); err != nil { return err } } return nil }
func writeEdges(ctx context.Context, t table.Proto, edges <-chan *spb.Entry, maxEdgePageSize int) error { defer drainEntries(edges) // ensure channel is drained on errors temp, err := tempTable("edge.groups") if err != nil { return fmt.Errorf("failed to create temporary table: %v", err) } edgeGroups := &table.KVProto{temp} defer func() { if err := edgeGroups.Close(ctx); err != nil { log.Println("Error closing edge groups table: %v", err) } }() log.Println("Writing temporary edges table") var ( src *spb.VName kind string targets stringset.Set ) for e := range edges { if src != nil && (!compare.VNamesEqual(e.Source, src) || kind != e.EdgeKind) { if err := writeWithReverses(ctx, edgeGroups, kytheuri.ToString(src), kind, targets.Slice()); err != nil { return err } src = nil } if src == nil { src = e.Source kind = e.EdgeKind targets = stringset.New() } targets.Add(kytheuri.ToString(e.Target)) } if src != nil { if err := writeWithReverses(ctx, edgeGroups, kytheuri.ToString(src), kind, targets.Slice()); err != nil { return err } } return writeEdgePages(ctx, t, edgeGroups, maxEdgePageSize) }
func edgeSet(kinds []string, pes *srvpb.PagedEdgeSet, pages []*srvpb.EdgePage) *xpb.EdgeSet { set := stringset.New(kinds...) es := &xpb.EdgeSet{Groups: make(map[string]*xpb.EdgeSet_Group, len(pes.Group))} for _, g := range pes.Group { if set.Contains(g.Kind) || len(set) == 0 { es.Groups[g.Kind] = &xpb.EdgeSet_Group{ Edge: e2e(g.Edge), } } } for _, ep := range pages { g := ep.EdgesGroup if set.Contains(g.Kind) || len(set) == 0 { es.Groups[g.Kind] = &xpb.EdgeSet_Group{ Edge: e2e(g.Edge), } } } return es }
// Edges implements part of the xrefs.Service interface. func (db *DB) Edges(req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { if req.PageSize != 0 || req.PageToken != "" { return nil, errors.New("edge pages unimplemented for SQL DB") } reply := &xpb.EdgesReply{} allowedKinds := stringset.New(req.Kind...) nodeTickets := stringset.New() for _, t := range req.Ticket { uri, err := kytheuri.Parse(t) if err != nil { return nil, err } edges := make(map[string]stringset.Set) var ( target kytheuri.URI kind string ) rows, err := db.edgesStmt.Query(uri.Signature, uri.Corpus, uri.Root, uri.Path, uri.Language) if err != nil { rows.Close() return nil, fmt.Errorf("forward edges query error: %v", err) } for rows.Next() { if err := rows.Scan(&target.Signature, &target.Corpus, &target.Root, &target.Path, &target.Language, &kind); err != nil { rows.Close() return nil, fmt.Errorf("forward edges scan error: %v", err) } else if len(allowedKinds) != 0 && !allowedKinds.Contains(kind) { continue } targets, ok := edges[kind] if !ok { targets = stringset.New() edges[kind] = targets } ticket := target.String() targets.Add(ticket) nodeTickets.Add(ticket) } if err := rows.Close(); err != nil { return nil, err } rows, err = db.revEdgesStmt.Query(uri.Signature, uri.Corpus, uri.Root, uri.Path, uri.Language) if err != nil { rows.Close() return nil, fmt.Errorf("reverse edges query error: %v", err) } for rows.Next() { if err := rows.Scan(&target.Signature, &target.Corpus, &target.Root, &target.Path, &target.Language, &kind); err != nil { rows.Close() return nil, fmt.Errorf("reverse edges scan error: %v", err) } kind = schema.MirrorEdge(kind) if len(allowedKinds) != 0 && !allowedKinds.Contains(kind) { continue } targets, ok := edges[kind] if !ok { targets = stringset.New() edges[kind] = targets } ticket := target.String() targets.Add(ticket) nodeTickets.Add(ticket) } if err := rows.Close(); err != nil { return nil, err } var g []*xpb.EdgeSet_Group for kind, targets := range edges { g = append(g, &xpb.EdgeSet_Group{ Kind: kind, TargetTicket: targets.Slice(), }) } if len(g) != 0 { reply.EdgeSet = append(reply.EdgeSet, &xpb.EdgeSet{ SourceTicket: t, Group: g, }) } } nodesReply, err := db.Nodes(&xpb.NodesRequest{ Ticket: nodeTickets.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("nodes request error: %v", err) } reply.Node = nodesReply.Node return reply, nil }
// CrossReferences implements part of the xrefs Service interface. func (g *GraphStoreService) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no cross-references requested") } requestedPageSize := int(req.PageSize) if requestedPageSize == 0 { requestedPageSize = defaultXRefPageSize } eReply, err := g.Edges(ctx, &xpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: req.PageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet), NextPageToken: eReply.NextPageToken, } var allRelatedNodes stringset.Set if len(req.Filter) > 0 { reply.Nodes = make(map[string]*xpb.NodeInfo) allRelatedNodes = stringset.New() } // Cache parent files across all anchors files := make(map[string]*fileNode) var totalXRefs int for { for source, es := range eReply.EdgeSets { xr, ok := reply.CrossReferences[source] if !ok { xr = &xpb.CrossReferencesReply_CrossReferenceSet{Ticket: source} } var count int for kind, grp := range es.Groups { switch { // TODO(schroeder): handle declarations case xrefs.IsDefKind(req.DefinitionKind, kind, false): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving definition anchors: %v", err) } count += len(anchors) xr.Definition = append(xr.Definition, anchors...) case xrefs.IsRefKind(req.ReferenceKind, kind): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving reference anchors: %v", err) } count += len(anchors) xr.Reference = append(xr.Reference, anchors...) case xrefs.IsDocKind(req.DocumentationKind, kind): anchors, err := completeAnchors(ctx, g, req.AnchorText, files, kind, edgeTickets(grp.Edge)) if err != nil { return nil, fmt.Errorf("error resolving documentation anchors: %v", err) } count += len(anchors) xr.Documentation = append(xr.Documentation, anchors...) case allRelatedNodes != nil && !schema.IsAnchorEdge(kind): count += len(grp.Edge) for _, edge := range grp.Edge { xr.RelatedNode = append(xr.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ Ticket: edge.TargetTicket, RelationKind: kind, Ordinal: edge.Ordinal, }) allRelatedNodes.Add(edge.TargetTicket) } } } if count > 0 { reply.CrossReferences[xr.Ticket] = xr totalXRefs += count } } if reply.NextPageToken == "" || totalXRefs > 0 { break } // We need to return at least 1 xref, if there are any log.Println("Extra CrossReferences Edges call: ", reply.NextPageToken) eReply, err = g.Edges(ctx, &xpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: reply.NextPageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply.NextPageToken = eReply.NextPageToken } if len(allRelatedNodes) > 0 { nReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: allRelatedNodes.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error retrieving related nodes: %v", err) } for ticket, n := range nReply.Nodes { reply.Nodes[ticket] = n } } return reply, nil }
// Edges implements part of the Service interface. func (g *GraphStoreService) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } else if req.PageToken != "" { return nil, errors.New("UNIMPLEMENTED: page_token") } patterns := xrefs.ConvertFilters(req.Filter) allowedKinds := stringset.New(req.Kind...) targetSet := stringset.New() reply := new(xpb.EdgesReply) for _, ticket := range req.Ticket { vname, err := kytheuri.ToVName(ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", ticket, err) } var ( // EdgeKind -> StringSet<TargetTicket> filteredEdges = make(map[string]stringset.Set) filteredFacts []*xpb.Fact ) if err := g.gs.Read(ctx, &spb.ReadRequest{ Source: vname, EdgeKind: "*", }, func(entry *spb.Entry) error { edgeKind := entry.EdgeKind if edgeKind == "" { // node fact if len(patterns) > 0 && xrefs.MatchesAny(entry.FactName, patterns) { filteredFacts = append(filteredFacts, entryToFact(entry)) } } else { // edge if len(req.Kind) == 0 || allowedKinds.Contains(edgeKind) { targets := filteredEdges[edgeKind] if targets == nil { targets = stringset.New() filteredEdges[edgeKind] = targets } targets.Add(kytheuri.ToString(entry.Target)) } } return nil }); err != nil { return nil, fmt.Errorf("failed to retrieve entries for ticket %q", ticket) } // Only add a EdgeSet if there are targets for the requested edge kinds. if len(filteredEdges) > 0 { var groups []*xpb.EdgeSet_Group for edgeKind, targets := range filteredEdges { g := &xpb.EdgeSet_Group{Kind: edgeKind} for target := range targets { g.TargetTicket = append(g.TargetTicket, target) targetSet.Add(target) } groups = append(groups, g) } reply.EdgeSet = append(reply.EdgeSet, &xpb.EdgeSet{ SourceTicket: ticket, Group: groups, }) // In addition, only add a NodeInfo if the filters have resulting facts. if len(filteredFacts) > 0 { reply.Node = append(reply.Node, &xpb.NodeInfo{ Ticket: ticket, Fact: filteredFacts, }) } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Ensure reply.Node is a unique set by removing already requested nodes from targetSet for _, n := range reply.Node { targetSet.Remove(n.Ticket) } // Batch request all leftover target nodes nodesReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: targetSet.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting target nodes: %v", err) } reply.Node = append(reply.Node, nodesReply.Node...) } return reply, nil }
// CrossReferences implements part of the xrefs.Service interface. func (t *tableImpl) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } stats := refStats{ max: int(req.PageSize), } if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } var edgesPageToken string if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) edgesPageToken = t.SecondaryToken } pageToken := stats.skip reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet, len(req.Ticket)), Nodes: make(map[string]*xpb.NodeInfo, len(req.Ticket)), Total: &xpb.CrossReferencesReply_Total{}, } var nextToken *ipb.PageToken wantMoreCrossRefs := edgesPageToken == "" && (req.DefinitionKind != xpb.CrossReferencesRequest_NO_DEFINITIONS || req.DeclarationKind != xpb.CrossReferencesRequest_NO_DECLARATIONS || req.ReferenceKind != xpb.CrossReferencesRequest_NO_REFERENCES || req.DocumentationKind != xpb.CrossReferencesRequest_NO_DOCUMENTATION || req.CallerKind != xpb.CrossReferencesRequest_NO_CALLERS) for _, ticket := range tickets { // TODO(schroederc): retrieve PagedCrossReferences in parallel cr, err := t.crossReferences(ctx, ticket) if err == table.ErrNoSuchKey { log.Println("Missing CrossReferences:", ticket) continue } else if err != nil { return nil, fmt.Errorf("error looking up cross-references for ticket %q: %v", ticket, err) } crs := &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } if req.ExperimentalSignatures { crs.DisplayName, err = xrefs.SlowSignature(ctx, t, ticket) if err != nil { log.Printf("WARNING: error looking up signature for ticket %q: %v", ticket, err) } } for _, grp := range cr.Group { switch { case xrefs.IsDefKind(req.DefinitionKind, grp.Kind, cr.Incomplete): reply.Total.Definitions += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Definition, grp.Anchor, req.AnchorText) } case xrefs.IsDeclKind(req.DeclarationKind, grp.Kind, cr.Incomplete): reply.Total.Declarations += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Declaration, grp.Anchor, req.AnchorText) } case xrefs.IsDocKind(req.DocumentationKind, grp.Kind): reply.Total.Documentation += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Documentation, grp.Anchor, req.AnchorText) } case xrefs.IsRefKind(req.ReferenceKind, grp.Kind): reply.Total.References += int64(len(grp.Anchor)) if wantMoreCrossRefs { stats.addAnchors(&crs.Reference, grp.Anchor, req.AnchorText) } } } if wantMoreCrossRefs && req.CallerKind != xpb.CrossReferencesRequest_NO_CALLERS { anchors, err := xrefs.SlowCallersForCrossReferences(ctx, t, req.CallerKind == xpb.CrossReferencesRequest_OVERRIDE_CALLERS, req.ExperimentalSignatures, ticket) if err != nil { return nil, fmt.Errorf("error in SlowCallersForCrossReferences: %v", err) } reply.Total.Callers += int64(len(anchors)) stats.addRelatedAnchors(&crs.Caller, anchors, req.AnchorText) } for _, idx := range cr.PageIndex { switch { case xrefs.IsDefKind(req.DefinitionKind, idx.Kind, cr.Incomplete): reply.Total.Definitions += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Definition, p.Group.Anchor, req.AnchorText) } case xrefs.IsDeclKind(req.DeclarationKind, idx.Kind, cr.Incomplete): reply.Total.Declarations += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Declaration, p.Group.Anchor, req.AnchorText) } case xrefs.IsDocKind(req.DocumentationKind, idx.Kind): reply.Total.Documentation += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Documentation, p.Group.Anchor, req.AnchorText) } case xrefs.IsRefKind(req.ReferenceKind, idx.Kind): reply.Total.References += int64(idx.Count) if wantMoreCrossRefs && !stats.skipPage(idx) { p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } stats.addAnchors(&crs.Reference, p.Group.Anchor, req.AnchorText) } } } if len(crs.Declaration) > 0 || len(crs.Definition) > 0 || len(crs.Reference) > 0 || len(crs.Documentation) > 0 || len(crs.Caller) > 0 { reply.CrossReferences[crs.Ticket] = crs } } if pageToken+stats.total != sumTotalCrossRefs(reply.Total) && stats.total != 0 { nextToken = &ipb.PageToken{Index: int32(pageToken + stats.total)} } if len(req.Filter) > 0 { er, err := t.edges(ctx, edgesRequest{ Tickets: tickets, Filters: req.Filter, Kinds: func(kind string) bool { return !schema.IsAnchorEdge(kind) }, PageToken: edgesPageToken, TotalOnly: (stats.max <= stats.total), PageSize: stats.max - stats.total, }) if err != nil { return nil, fmt.Errorf("error getting related nodes: %v", err) } reply.Total.RelatedNodesByRelation = er.TotalEdgesByKind for ticket, es := range er.EdgeSets { nodes := stringset.New() crs, ok := reply.CrossReferences[ticket] if !ok { crs = &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } } for kind, g := range es.Groups { for _, edge := range g.Edge { nodes.Add(edge.TargetTicket) crs.RelatedNode = append(crs.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ RelationKind: kind, Ticket: edge.TargetTicket, Ordinal: edge.Ordinal, }) } } if len(nodes) > 0 { for ticket, n := range er.Nodes { if nodes.Contains(ticket) { reply.Nodes[ticket] = n } } } if !ok && len(crs.RelatedNode) > 0 { reply.CrossReferences[ticket] = crs } } if er.NextPageToken != "" { nextToken = &ipb.PageToken{SecondaryToken: er.NextPageToken} } } if nextToken != nil { rec, err := proto.Marshal(nextToken) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } if req.NodeDefinitions { nodeTickets := make([]string, 0, len(reply.Nodes)) for ticket := range reply.Nodes { nodeTickets = append(nodeTickets, ticket) } // TODO(schroederc): cache this in the serving data defs, err := xrefs.SlowDefinitions(t, ctx, nodeTickets) if err != nil { return nil, fmt.Errorf("error retrieving node definitions: %v", err) } reply.DefinitionLocations = make(map[string]*xpb.Anchor, len(defs)) for ticket, def := range defs { node, ok := reply.Nodes[ticket] if !ok { panic(fmt.Sprintf("extra definition returned for unknown node %q: %v", ticket, def)) } node.Definition = def.Ticket if _, ok := reply.DefinitionLocations[def.Ticket]; !ok { reply.DefinitionLocations[def.Ticket] = def } } } return reply, nil }
func (t *tableImpl) edges(ctx context.Context, req edgesRequest) (*xpb.EdgesReply, error) { stats := filterStats{ max: int(req.PageSize), } if req.TotalOnly { stats.max = 0 } else if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) } pageToken := stats.skip nodeTickets := stringset.New() rs, err := t.pagedEdgeSets(ctx, req.Tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors or early return for _ = range rs { } }() patterns := xrefs.ConvertFilters(req.Filters) reply := &xpb.EdgesReply{ EdgeSets: make(map[string]*xpb.EdgeSet), Nodes: make(map[string]*xpb.NodeInfo), TotalEdgesByKind: make(map[string]int64), } for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } pes := r.PagedEdgeSet countEdgeKinds(pes, req.Kinds, reply.TotalEdgesByKind) // Don't scan the EdgeSet_Groups if we're already at the specified page_size. if stats.total == stats.max { continue } groups := make(map[string]*xpb.EdgeSet_Group) for _, grp := range pes.Group { if req.Kinds == nil || req.Kinds(grp.Kind) { ng, ns := stats.filter(grp) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Nodes[n.Ticket] = nodeToInfo(patterns, n) } } groups[grp.Kind] = ng if stats.total == stats.max { break } } } } // TODO(schroederc): ensure that pes.EdgeSet.Groups and pes.PageIndexes of // the same kind are grouped together in the EdgesReply if stats.total != stats.max { for _, idx := range pes.PageIndex { if req.Kinds == nil || req.Kinds(idx.EdgeKind) { if stats.skipPage(idx) { log.Printf("Skipping EdgePage: %s", idx.PageKey) continue } log.Printf("Retrieving EdgePage: %s", idx.PageKey) ep, err := t.edgePage(ctx, idx.PageKey) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("internal error: missing edge page: %q", idx.PageKey) } else if err != nil { return nil, fmt.Errorf("edge page lookup error (page key: %q): %v", idx.PageKey, err) } ng, ns := stats.filter(ep.EdgesGroup) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Nodes[n.Ticket] = nodeToInfo(patterns, n) } } groups[ep.EdgesGroup.Kind] = ng if stats.total == stats.max { break } } } } } if len(groups) > 0 { reply.EdgeSets[pes.Source.Ticket] = &xpb.EdgeSet{Groups: groups} if len(patterns) > 0 && !nodeTickets.Contains(pes.Source.Ticket) { nodeTickets.Add(pes.Source.Ticket) reply.Nodes[pes.Source.Ticket] = nodeToInfo(patterns, pes.Source) } } } totalEdgesPossible := int(sumEdgeKinds(reply.TotalEdgesByKind)) if stats.total > stats.max { log.Panicf("totalEdges greater than maxEdges: %d > %d", stats.total, stats.max) } else if pageToken+stats.total > totalEdgesPossible && pageToken <= totalEdgesPossible { log.Panicf("pageToken+totalEdges greater than totalEdgesPossible: %d+%d > %d", pageToken, stats.total, totalEdgesPossible) } if pageToken+stats.total != totalEdgesPossible && stats.total != 0 { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageToken + stats.total)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// Decorations implements part of the xrefs.Service interface. func (db *DB) Decorations(req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil { return nil, errors.New("missing location") } else if req.Location.Kind != xpb.Location_FILE { return nil, fmt.Errorf("%s location kind unimplemented", req.Location.Kind) } else if len(req.DirtyBuffer) != 0 { return nil, errors.New("dirty buffers unimplemented") } fileTicket := req.Location.Ticket edgesReply, err := db.Edges(&xpb.EdgesRequest{ Ticket: []string{fileTicket}, Kind: []string{revChildOfEdge}, Filter: []string{schema.NodeKindFact, schema.TextFact, schema.TextEncodingFact}, }) if err != nil { return nil, err } reply := &xpb.DecorationsReply{ Location: &xpb.Location{ Ticket: fileTicket, }, } nodes := xrefs.NodesMap(edgesReply.Node) if req.SourceText { if nodes[fileTicket] == nil { nodesReply, err := db.Nodes(&xpb.NodesRequest{Ticket: []string{fileTicket}}) if err != nil { return nil, err } nodes = xrefs.NodesMap(nodesReply.Node) } reply.SourceText = nodes[fileTicket][schema.TextFact] reply.Encoding = string(nodes[fileTicket][schema.TextEncodingFact]) } nodeTickets := stringset.New() if req.References { // Traverse the following chain of edges: // file --%/kythe/edge/childof-> []anchor --forwardEdgeKind-> []target edges := xrefs.EdgesMap(edgesReply.EdgeSet) for _, anchor := range edges[fileTicket][revChildOfEdge] { if string(nodes[anchor][schema.NodeKindFact]) != schema.AnchorKind { continue } uri, err := kytheuri.Parse(anchor) if err != nil { return nil, fmt.Errorf("invalid anchor ticket found %q: %v", anchor, err) } rows, err := db.anchorEdgesStmt.Query(schema.ChildOfEdge, uri.Signature, uri.Corpus, uri.Root, uri.Path, uri.Language) if err != nil { return nil, fmt.Errorf("anchor %q edge query error: %v", anchor, err) } for rows.Next() { var target kytheuri.URI var kind string if err := rows.Scan(&target.Signature, &target.Corpus, &target.Root, &target.Path, &target.Language, &kind); err != nil { return nil, fmt.Errorf("anchor %q edge scan error: %v", anchor, err) } ticket := target.String() reply.Reference = append(reply.Reference, &xpb.DecorationsReply_Reference{ SourceTicket: anchor, Kind: kind, TargetTicket: ticket, }) nodeTickets.Add(anchor, ticket) } } } nodesReply, err := db.Nodes(&xpb.NodesRequest{Ticket: nodeTickets.Slice()}) if err != nil { return nil, err } reply.Node = nodesReply.Node return reply, nil }
// Decorations implements part of the xrefs Service interface. func (t *tableImpl) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil || req.GetLocation().Ticket == "" { return nil, errors.New("missing location") } ticket, err := kytheuri.Fix(req.GetLocation().Ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", req.GetLocation().Ticket, err) } decor, err := t.fileDecorations(ctx, ticket) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("decorations not found for file %q", ticket) } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } text := decor.SourceText if len(req.DirtyBuffer) > 0 { text = req.DirtyBuffer } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { patterns := xrefs.ConvertFilters(req.Filter) nodeTickets := stringset.New() var patcher *xrefs.Patcher var offsetMapping map[string]span // Map from anchor ticket to patched span if len(req.DirtyBuffer) > 0 { patcher = xrefs.NewPatcher(decor.SourceText, req.DirtyBuffer) offsetMapping = make(map[string]span) } // The span with which to constrain the set of returned anchor references. var startBoundary, endBoundary int32 if loc.Kind == xpb.Location_FILE { startBoundary = 0 endBoundary = int32(len(text)) } else { startBoundary = loc.Start.ByteOffset endBoundary = loc.End.ByteOffset } reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) for _, d := range decor.Decoration { start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) // Filter non-existent anchor. Anchors can no longer exist if we were // given a dirty buffer and the anchor was inside a changed region. if exists { if start >= startBoundary && end <= endBoundary { if offsetMapping != nil { // Save the patched span to update the corresponding facts of the // anchor node in reply.Node. offsetMapping[d.Anchor.Ticket] = span{start, end} } reply.Reference = append(reply.Reference, decorationToReference(norm, d)) if len(patterns) > 0 && !nodeTickets.Contains(d.Target.Ticket) { nodeTickets.Add(d.Target.Ticket) reply.Node = append(reply.Node, nodeToInfo(patterns, d.Target)) } } } } } return reply, nil }
func main() { flag.Parse() if len(flag.Args()) == 0 { flagutil.UsageError("not given any files") } xs := xrefs.WebClient(*remoteAPI) idx := search.WebClient(*remoteAPI) for _, file := range flag.Args() { results, err := idx.Search(ctx, &spb.SearchRequest{ Partial: &spb.VName{Path: file}, Fact: []*spb.SearchRequest_Fact{{ Name: schema.NodeKindFact, Value: []byte(schema.FileKind), }}, }) if err != nil { log.Fatalf("Error searching for ticket of file %q", file) } else if len(results.Ticket) == 0 { log.Printf("Could not find ticket for file %q", file) continue } else if len(results.Ticket) != 1 { log.Printf("Multiple tickets found for file %q; choosing first from %v", file, results.Ticket) } ticket := results.Ticket[0] decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{Ticket: ticket}, SourceText: true, References: true, }) if err != nil { log.Fatalf("Failed to get decorations for file %q", file) } nodes := xrefs.NodesMap(decor.Node) emitted := stringset.New() for _, r := range decor.Reference { if r.Kind != schema.DefinesBindingEdge || emitted.Contains(r.TargetTicket) { continue } ident := string(nodes[r.TargetTicket][identifierFact]) if ident == "" { continue } offset, err := strconv.Atoi(string(nodes[r.SourceTicket][schema.AnchorStartFact])) if err != nil { log.Printf("Invalid start offset for anchor %q", r.SourceTicket) continue } fields, err := getTagFields(xs, r.TargetTicket) if err != nil { log.Printf("Failed to get tagfields for %q: %v", r.TargetTicket, err) } fmt.Printf("%s\t%s\t%d;\"\t%s\n", ident, file, offsetLine(decor.SourceText, offset), strings.Join(fields, "\t")) emitted.Add(r.TargetTicket) } } }
// CrossReferences implements part of the xrefs.Interface. func (d *DB) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } pageSize := int(req.PageSize) if pageSize <= 0 { pageSize = defaultPageSize } else if pageSize > maxPageSize { pageSize = maxPageSize } var pageOffset int var edgesToken string if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } pageOffset = int(t.Index) edgesToken = t.SecondaryToken } reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet), Nodes: make(map[string]*xpb.NodeInfo), } setQ, ticketArgs := sqlSetQuery(1, tickets) var count int if edgesToken == "" { args := append(ticketArgs, pageSize+1, pageOffset) // +1 to check for next page rs, err := d.Query(fmt.Sprintf("SELECT ticket, kind, proto FROM CrossReferences WHERE ticket IN %s ORDER BY ticket LIMIT $%d OFFSET $%d;", setQ, len(tickets)+1, len(tickets)+2), args...) if err != nil { return nil, err } defer closeRows(rs) var xrs *xpb.CrossReferencesReply_CrossReferenceSet for rs.Next() { count++ if count > pageSize { continue } var ticket, kind string var rec []byte if err := rs.Scan(&ticket, &kind, &rec); err != nil { return nil, err } if xrs != nil && xrs.Ticket != ticket { if len(xrs.Definition) > 0 || len(xrs.Documentation) > 0 || len(xrs.Reference) > 0 || len(xrs.RelatedNode) > 0 { reply.CrossReferences[xrs.Ticket] = xrs } xrs = nil } if xrs == nil { xrs = &xpb.CrossReferencesReply_CrossReferenceSet{Ticket: ticket} } switch { // TODO(schroederc): handle declarations case xrefs.IsDefKind(req.DefinitionKind, kind, false): xrs.Definition, err = addRelatedAnchor(xrs.Definition, rec, req.AnchorText) if err != nil { return nil, err } case xrefs.IsDocKind(req.DocumentationKind, kind): xrs.Documentation, err = addRelatedAnchor(xrs.Documentation, rec, req.AnchorText) if err != nil { return nil, err } case xrefs.IsRefKind(req.ReferenceKind, kind): xrs.Reference, err = addRelatedAnchor(xrs.Reference, rec, req.AnchorText) if err != nil { return nil, err } } } if xrs != nil && (len(xrs.Definition) > 0 || len(xrs.Documentation) > 0 || len(xrs.Reference) > 0 || len(xrs.RelatedNode) > 0) { reply.CrossReferences[xrs.Ticket] = xrs } if count > pageSize { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageOffset + pageSize)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } } if len(req.Filter) > 0 && count <= pageSize { // TODO(schroederc): consolidate w/ LevelDB implementation er, err := d.edges(ctx, &xpb.EdgesRequest{ Ticket: tickets, Filter: req.Filter, PageSize: int32(pageSize - count), PageToken: edgesToken, }, func(kind string) bool { return !schema.IsAnchorEdge(kind) }) if err != nil { return nil, fmt.Errorf("error getting related nodes: %v", err) } for ticket, es := range er.EdgeSets { nodes := stringset.New() crs, ok := reply.CrossReferences[ticket] if !ok { crs = &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } } for kind, g := range es.Groups { if !schema.IsAnchorEdge(kind) { for _, edge := range g.Edge { nodes.Add(edge.TargetTicket) crs.RelatedNode = append(crs.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ RelationKind: kind, Ticket: edge.TargetTicket, Ordinal: edge.Ordinal, }) } } } if len(nodes) > 0 { for ticket, n := range er.Nodes { if nodes.Contains(ticket) { reply.Nodes[ticket] = n } } } if !ok && len(crs.RelatedNode) > 0 { reply.CrossReferences[ticket] = crs } } if er.NextPageToken != "" { rec, err := proto.Marshal(&ipb.PageToken{SecondaryToken: er.NextPageToken}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } } return reply, nil }
// CrossReferences returns the cross-references for the given tickets using the // given NodesEdgesService. func CrossReferences(ctx context.Context, xs NodesEdgesService, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { log.Println("WARNING: using experimental CrossReferences API") if len(req.Ticket) == 0 { return nil, errors.New("no cross-references requested") } requestedPageSize := int(req.PageSize) if requestedPageSize == 0 { requestedPageSize = defaultXRefPageSize } eReply, err := xs.Edges(ctx, &xpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: req.PageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet), NextPageToken: eReply.NextPageToken, } var allRelatedNodes stringset.Set if len(req.Filter) > 0 { reply.Nodes = make(map[string]*xpb.NodeInfo) allRelatedNodes = stringset.New() } // Cache parent files across all anchors files := make(map[string]*fileNode) var totalXRefs int for { for _, es := range eReply.EdgeSet { xr, ok := reply.CrossReferences[es.SourceTicket] if !ok { xr = &xpb.CrossReferencesReply_CrossReferenceSet{Ticket: es.SourceTicket} } var count int for _, g := range es.Group { switch { case isDefKind(req.DefinitionKind, g.Kind): anchors, err := completeAnchors(ctx, xs, req.AnchorText, files, g.Kind, g.TargetTicket) if err != nil { return nil, fmt.Errorf("error resolving definition anchors: %v", err) } count += len(anchors) xr.Definition = append(xr.Definition, anchors...) case isRefKind(req.ReferenceKind, g.Kind): anchors, err := completeAnchors(ctx, xs, req.AnchorText, files, g.Kind, g.TargetTicket) if err != nil { return nil, fmt.Errorf("error resolving reference anchors: %v", err) } count += len(anchors) xr.Reference = append(xr.Reference, anchors...) case isDocKind(req.DocumentationKind, g.Kind): anchors, err := completeAnchors(ctx, xs, req.AnchorText, files, g.Kind, g.TargetTicket) if err != nil { return nil, fmt.Errorf("error resolving documentation anchors: %v", err) } count += len(anchors) xr.Documentation = append(xr.Documentation, anchors...) case allRelatedNodes != nil && !schema.IsAnchorEdge(g.Kind): count += len(g.TargetTicket) for _, target := range g.TargetTicket { xr.RelatedNode = append(xr.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ Ticket: target, RelationKind: g.Kind, }) } allRelatedNodes.Add(g.TargetTicket...) } } if count > 0 { reply.CrossReferences[xr.Ticket] = xr totalXRefs += count } } if reply.NextPageToken == "" || totalXRefs > 0 { break } // We need to return at least 1 xref, if there are any log.Println("Extra CrossReferences Edges call: ", reply.NextPageToken) eReply, err = xs.Edges(ctx, &xpb.EdgesRequest{ Ticket: req.Ticket, PageSize: int32(requestedPageSize), PageToken: reply.NextPageToken, }) if err != nil { return nil, fmt.Errorf("error getting edges for cross-references: %v", err) } reply.NextPageToken = eReply.NextPageToken } if len(allRelatedNodes) > 0 { nReply, err := xs.Nodes(ctx, &xpb.NodesRequest{ Ticket: allRelatedNodes.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error retrieving related nodes: %v", err) } for _, n := range nReply.Node { reply.Nodes[n.Ticket] = n } } return reply, nil }
func (d *DB) edges(ctx context.Context, req *xpb.EdgesRequest, edgeFilter func(kind string) bool) (*xpb.EdgesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } pageSize := int(req.PageSize) if pageSize <= 0 { pageSize = defaultPageSize } else if pageSize > maxPageSize { pageSize = maxPageSize } var pageOffset int if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } pageOffset = int(t.Index) } // Select only the edges from the given source tickets setQ, args := sqlSetQuery(1, tickets) query := fmt.Sprintf(` SELECT * FROM AllEdges WHERE source IN %s`, setQ) // Filter by edges kinds, if given if len(req.Kind) > 0 { kSetQ, kArgs := sqlSetQuery(1+len(args), req.Kind) query += fmt.Sprintf(` AND kind IN %s`, kSetQ) args = append(args, kArgs...) } // Scan edge sets/groups in order; necessary for CrossReferences query += " ORDER BY source, kind, ordinal" // Seek to the requested page offset (req.PageToken.Index). We don't use // LIMIT here because we don't yet know how many edges will be filtered by // edgeFilter. query += fmt.Sprintf(" OFFSET $%d", len(args)+1) args = append(args, pageOffset) rs, err := d.Query(query, args...) if err != nil { return nil, fmt.Errorf("error querying for edges: %v", err) } defer closeRows(rs) var scanned int // edges := map { source -> kind -> target -> ordinal set } edges := make(map[string]map[string]map[string]map[int32]struct{}, len(tickets)) for count := 0; count < pageSize && rs.Next(); scanned++ { var source, kind, target string var ordinal int if err := rs.Scan(&source, &kind, &target, &ordinal); err != nil { return nil, fmt.Errorf("edges scan error: %v", err) } if edgeFilter != nil && !edgeFilter(kind) { continue } count++ groups, ok := edges[source] if !ok { groups = make(map[string]map[string]map[int32]struct{}) edges[source] = groups } targets, ok := groups[kind] if !ok { targets = make(map[string]map[int32]struct{}) groups[kind] = targets } ordinals, ok := targets[target] if !ok { ordinals = make(map[int32]struct{}) targets[target] = ordinals } ordinals[int32(ordinal)] = struct{}{} } reply := &xpb.EdgesReply{EdgeSets: make(map[string]*xpb.EdgeSet, len(edges))} nodeTickets := stringset.New() for src, groups := range edges { gs := make(map[string]*xpb.EdgeSet_Group, len(groups)) nodeTickets.Add(src) for kind, targets := range groups { edges := make([]*xpb.EdgeSet_Group_Edge, 0, len(targets)) for ticket, ordinals := range targets { for ordinal := range ordinals { edges = append(edges, &xpb.EdgeSet_Group_Edge{ TargetTicket: ticket, Ordinal: ordinal, }) } nodeTickets.Add(ticket) } sort.Sort(xrefs.ByOrdinal(edges)) gs[kind] = &xpb.EdgeSet_Group{ Edge: edges, } } reply.EdgeSets[src] = &xpb.EdgeSet{ Groups: gs, } } // If there is another row, there is a NextPageToken. if rs.Next() { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageOffset + scanned)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } // TODO(schroederc): faster node lookups if len(req.Filter) > 0 && len(nodeTickets) > 0 { nodes, err := d.Nodes(ctx, &xpb.NodesRequest{ Ticket: nodeTickets.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error filtering nodes:%v", err) } reply.Nodes = nodes.Nodes } return reply, nil }
// Edges implements part of the xrefs Service interface. func (t *tableImpl) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } stats := filterStats{ max: int(req.PageSize), } if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t srvpb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) } pageToken := stats.skip var totalEdgesPossible int allowedKinds := stringset.New(req.Kind...) nodeTickets := stringset.New() reply := &xpb.EdgesReply{} for _, rawTicket := range req.Ticket { ticket, err := kytheuri.Fix(rawTicket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", rawTicket, err) } pes, err := t.pagedEdgeSet(ctx, ticket) if err == table.ErrNoSuchKey { continue } else if err != nil { return nil, fmt.Errorf("lookup error for node edges %q: %v", ticket, err) } totalEdgesPossible += int(pes.TotalEdges) var groups []*xpb.EdgeSet_Group for _, grp := range pes.EdgeSet.Group { if len(allowedKinds) == 0 || allowedKinds.Contains(grp.Kind) { ng := stats.filter(grp) if ng != nil { nodeTickets.Add(ng.TargetTicket...) groups = append(groups, ng) if stats.total == stats.max { break } } } } for _, idx := range pes.PageIndex { if len(allowedKinds) == 0 || allowedKinds.Contains(idx.EdgeKind) { ep, err := t.edgePage(ctx, idx.PageKey) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("missing edge page: %q", idx.PageKey) } else if err != nil { return nil, fmt.Errorf("lookup error for node edges %q: %v", ticket, err) } ng := stats.filter(ep.EdgesGroup) if ng != nil { nodeTickets.Add(ng.TargetTicket...) groups = append(groups, ng) if stats.total == stats.max { break } } } } if len(groups) > 0 { nodeTickets.Add(pes.EdgeSet.SourceTicket) reply.EdgeSet = append(reply.EdgeSet, &xpb.EdgeSet{ SourceTicket: pes.EdgeSet.SourceTicket, Group: groups, }) } } if stats.total > stats.max { log.Panicf("totalEdges greater than maxEdges: %d > %d", stats.total, stats.max) } else if pageToken+stats.total > totalEdgesPossible && pageToken <= totalEdgesPossible { log.Panicf("pageToken+totalEdges greater than totalEdgesPossible: %d+%d > %d", pageToken, stats.total, totalEdgesPossible) } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { nReply, err := t.Nodes(ctx, &xpb.NodesRequest{ Ticket: nodeTickets.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error getting nodes: %v", err) } reply.Node = nReply.Node } if pageToken+stats.total != totalEdgesPossible && stats.total != 0 { // TODO: take into account an empty last page (due to kind filters) rec, err := proto.Marshal(&srvpb.PageToken{Index: int32(pageToken + stats.total)}) if err != nil { return nil, fmt.Errorf("error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// Decorations implements part of the xrefs Service interface. func (t *tableImpl) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil || req.GetLocation().Ticket == "" { return nil, errors.New("missing location") } ticket, err := kytheuri.Fix(req.GetLocation().Ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", req.GetLocation().Ticket, err) } decor, err := t.fileDecorations(ctx, ticket) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("decorations not found for file %q", ticket) } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } text := decor.SourceText if len(req.DirtyBuffer) > 0 { text = req.DirtyBuffer } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { // Set of node tickets for which to retrieve facts. These are the nodes // used in the returned references (both anchor sources and node targets). nodeTickets := stringset.New() var patcher *xrefs.Patcher var offsetMapping map[string]span // Map from anchor ticket to patched span if len(req.DirtyBuffer) > 0 { patcher = xrefs.NewPatcher(decor.SourceText, req.DirtyBuffer) offsetMapping = make(map[string]span) } // The span with which to constrain the set of returned anchor references. var startBoundary, endBoundary int32 if loc.Kind == xpb.Location_FILE { startBoundary = 0 endBoundary = int32(len(text)) } else { startBoundary = loc.Start.ByteOffset endBoundary = loc.End.ByteOffset } reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) for _, d := range decor.Decoration { start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) // Filter non-existent anchor. Anchors can no longer exist if we were // given a dirty buffer and the anchor was inside a changed region. if exists { if start >= startBoundary && end <= endBoundary { if offsetMapping != nil { // Save the patched span to update the corresponding facts of the // anchor node in reply.Node. offsetMapping[d.Anchor.Ticket] = span{start, end} } reply.Reference = append(reply.Reference, decorationToReference(norm, d)) nodeTickets.Add(d.Anchor.Ticket) nodeTickets.Add(d.TargetTicket) } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Retrieve facts for all nodes referenced in the file decorations. nodesReply, err := t.Nodes(ctx, &xpb.NodesRequest{ Ticket: nodeTickets.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error getting nodes: %v", err) } reply.Node = nodesReply.Node } // Patch anchor node facts in reply to match dirty buffer if len(offsetMapping) > 0 { for _, n := range reply.Node { if span, ok := offsetMapping[n.Ticket]; ok { for _, f := range n.Fact { switch f.Name { case schema.AnchorStartFact: f.Value = []byte(strconv.Itoa(int(span.start))) case schema.AnchorEndFact: f.Value = []byte(strconv.Itoa(int(span.end))) } } } } } } return reply, nil }
// CrossReferences implements part of the xrefs.Service interface. func (t *tableImpl) CrossReferences(ctx context.Context, req *xpb.CrossReferencesRequest) (*xpb.CrossReferencesReply, error) { tickets, err := fixTickets(req.Ticket) if err != nil { return nil, err } stats := refStats{ max: int(req.PageSize), } if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } var edgesPageToken string if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t srvpb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) edgesPageToken = t.SecondaryToken } pageToken := stats.skip var totalRefsPossible int reply := &xpb.CrossReferencesReply{ CrossReferences: make(map[string]*xpb.CrossReferencesReply_CrossReferenceSet, len(req.Ticket)), Nodes: make(map[string]*xpb.NodeInfo, len(req.Ticket)), } var nextToken *srvpb.PageToken if edgesPageToken == "" && (req.DefinitionKind != xpb.CrossReferencesRequest_NO_DEFINITIONS || req.ReferenceKind != xpb.CrossReferencesRequest_NO_REFERENCES || req.DocumentationKind != xpb.CrossReferencesRequest_NO_DOCUMENTATION) { for _, ticket := range tickets { // TODO(schroederc): retrieve PagedCrossReferences in parallel cr, err := t.crossReferences(ctx, ticket) if err == table.ErrNoSuchKey { log.Println("Missing CrossReferences:", ticket) continue } else if err != nil { return nil, fmt.Errorf("error looking up cross-references for ticket %q: %v", ticket, err) } crs := &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } for _, grp := range cr.Group { if xrefs.IsDefKind(req.DefinitionKind, grp.Kind) { totalRefsPossible += len(grp.Anchor) if stats.addAnchors(&crs.Definition, grp.Anchor, req.AnchorText) { break } } else if xrefs.IsDocKind(req.DocumentationKind, grp.Kind) { totalRefsPossible += len(grp.Anchor) if stats.addAnchors(&crs.Documentation, grp.Anchor, req.AnchorText) { break } } else if xrefs.IsRefKind(req.ReferenceKind, grp.Kind) { totalRefsPossible += len(grp.Anchor) if stats.addAnchors(&crs.Reference, grp.Anchor, req.AnchorText) { break } } } if stats.total < stats.max { for _, idx := range cr.PageIndex { // TODO(schroederc): skip entire read if s.skip >= idx.Count p, err := t.crossReferencesPage(ctx, idx.PageKey) if err != nil { return nil, fmt.Errorf("internal error: error retrieving cross-references page: %v", idx.PageKey) } if xrefs.IsDefKind(req.DefinitionKind, p.Group.Kind) { totalRefsPossible += len(p.Group.Anchor) if stats.addAnchors(&crs.Definition, p.Group.Anchor, req.AnchorText) { break } } else if xrefs.IsDocKind(req.DocumentationKind, p.Group.Kind) { totalRefsPossible += len(p.Group.Anchor) if stats.addAnchors(&crs.Documentation, p.Group.Anchor, req.AnchorText) { break } } else { totalRefsPossible += len(p.Group.Anchor) if stats.addAnchors(&crs.Reference, p.Group.Anchor, req.AnchorText) { break } } } } if len(crs.Definition) > 0 || len(crs.Reference) > 0 || len(crs.Documentation) > 0 { reply.CrossReferences[crs.Ticket] = crs } } if pageToken+stats.total != totalRefsPossible && stats.total != 0 { nextToken = &srvpb.PageToken{Index: int32(pageToken + stats.total)} } } if len(req.Filter) > 0 && stats.total < stats.max { er, err := t.edges(ctx, edgesRequest{ Tickets: tickets, Filters: req.Filter, Kinds: func(kind string) bool { return !schema.IsAnchorEdge(kind) }, PageToken: edgesPageToken, PageSize: stats.max - stats.total, }) if err != nil { return nil, fmt.Errorf("error getting related nodes: %v", err) } for _, es := range er.EdgeSet { ticket := es.SourceTicket nodes := stringset.New() crs, ok := reply.CrossReferences[ticket] if !ok { crs = &xpb.CrossReferencesReply_CrossReferenceSet{ Ticket: ticket, } } for _, g := range es.Group { if !schema.IsAnchorEdge(g.Kind) { nodes.Add(g.TargetTicket...) for _, t := range g.TargetTicket { crs.RelatedNode = append(crs.RelatedNode, &xpb.CrossReferencesReply_RelatedNode{ RelationKind: g.Kind, Ticket: t, }) } } } if len(nodes) > 0 { for _, n := range er.Node { if nodes.Contains(n.Ticket) { reply.Nodes[n.Ticket] = n } } } if !ok && len(crs.RelatedNode) > 0 { reply.CrossReferences[ticket] = crs } } if er.NextPageToken != "" { nextToken = &srvpb.PageToken{SecondaryToken: er.NextPageToken} } } if nextToken != nil { rec, err := proto.Marshal(nextToken) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// Edges implements part of the xrefs Service interface. func (t *tableImpl) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { tickets, err := fixTickets(req.Ticket) if err != nil { return nil, err } stats := filterStats{ max: int(req.PageSize), } if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t srvpb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) } pageToken := stats.skip var totalEdgesPossible int allowedKinds := stringset.New(req.Kind...) nodeTickets := stringset.New() rs, err := t.pagedEdgeSets(ctx, tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors or early return for _ = range rs { } }() reply := &xpb.EdgesReply{} for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } pes := r.PagedEdgeSet totalEdgesPossible += int(pes.TotalEdges) var groups []*xpb.EdgeSet_Group for _, grp := range pes.EdgeSet.Group { if len(allowedKinds) == 0 || allowedKinds.Contains(grp.Kind) { ng := stats.filter(grp) if ng != nil { nodeTickets.Add(ng.TargetTicket...) groups = append(groups, ng) if stats.total == stats.max { break } } } } // TODO(schroederc): ensure that pes.EdgeSet.Groups and pes.PageIndexes of // the same kind are grouped together in the EdgesReply if stats.total != stats.max { for _, idx := range pes.PageIndex { if len(allowedKinds) == 0 || allowedKinds.Contains(idx.EdgeKind) { if stats.skipPage(idx) { log.Printf("Skipping EdgePage: %s", idx.PageKey) continue } log.Printf("Retrieving EdgePage: %s", idx.PageKey) ep, err := t.edgePage(ctx, idx.PageKey) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("missing edge page: %q", idx.PageKey) } else if err != nil { return nil, fmt.Errorf("edge page lookup error (page key: %q): %v", idx.PageKey, err) } ng := stats.filter(ep.EdgesGroup) if ng != nil { nodeTickets.Add(ng.TargetTicket...) groups = append(groups, ng) if stats.total == stats.max { break } } } } } if len(groups) > 0 { nodeTickets.Add(pes.EdgeSet.SourceTicket) reply.EdgeSet = append(reply.EdgeSet, &xpb.EdgeSet{ SourceTicket: pes.EdgeSet.SourceTicket, Group: groups, }) } if stats.total == stats.max { break } } if stats.total > stats.max { log.Panicf("totalEdges greater than maxEdges: %d > %d", stats.total, stats.max) } else if pageToken+stats.total > totalEdgesPossible && pageToken <= totalEdgesPossible { log.Panicf("pageToken+totalEdges greater than totalEdgesPossible: %d+%d > %d", pageToken, stats.total, totalEdgesPossible) } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 && len(nodeTickets) > 0 { nReply, err := t.Nodes(ctx, &xpb.NodesRequest{ Ticket: nodeTickets.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("error getting nodes: %v", err) } reply.Node = nReply.Node } if pageToken+stats.total != totalEdgesPossible && stats.total != 0 { // TODO: take into account an empty last page (due to kind filters) rec, err := proto.Marshal(&srvpb.PageToken{Index: int32(pageToken + stats.total)}) if err != nil { return nil, fmt.Errorf("error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// Edges implements part of the Service interface. func (g *GraphStoreService) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } else if req.PageToken != "" { return nil, errors.New("UNIMPLEMENTED: page_token") } patterns := xrefs.ConvertFilters(req.Filter) allowedKinds := stringset.New(req.Kind...) targetSet := stringset.New() reply := &xpb.EdgesReply{ EdgeSets: make(map[string]*xpb.EdgeSet), Nodes: make(map[string]*xpb.NodeInfo), } for _, ticket := range req.Ticket { vname, err := kytheuri.ToVName(ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", ticket, err) } var ( // EdgeKind -> TargetTicket -> OrdinalSet filteredEdges = make(map[string]map[string]map[int32]struct{}) filteredFacts = make(map[string][]byte) ) if err := g.gs.Read(ctx, &spb.ReadRequest{ Source: vname, EdgeKind: "*", }, func(entry *spb.Entry) error { edgeKind := entry.EdgeKind if edgeKind == "" { // node fact if len(patterns) > 0 && xrefs.MatchesAny(entry.FactName, patterns) { filteredFacts[entry.FactName] = entry.FactValue } } else { // edge edgeKind, ordinal, _ := schema.ParseOrdinal(edgeKind) if len(req.Kind) == 0 || allowedKinds.Contains(edgeKind) { targets, ok := filteredEdges[edgeKind] if !ok { targets = make(map[string]map[int32]struct{}) filteredEdges[edgeKind] = targets } ticket := kytheuri.ToString(entry.Target) ordSet, ok := targets[ticket] if !ok { ordSet = make(map[int32]struct{}) targets[ticket] = ordSet } ordSet[int32(ordinal)] = struct{}{} } } return nil }); err != nil { return nil, fmt.Errorf("failed to retrieve entries for ticket %q", ticket) } // Only add a EdgeSet if there are targets for the requested edge kinds. if len(filteredEdges) > 0 { groups := make(map[string]*xpb.EdgeSet_Group) for edgeKind, targets := range filteredEdges { g := &xpb.EdgeSet_Group{} for target, ordinals := range targets { for ordinal := range ordinals { g.Edge = append(g.Edge, &xpb.EdgeSet_Group_Edge{ TargetTicket: target, Ordinal: ordinal, }) } targetSet.Add(target) } groups[edgeKind] = g } reply.EdgeSets[ticket] = &xpb.EdgeSet{ Groups: groups, } // In addition, only add a NodeInfo if the filters have resulting facts. if len(filteredFacts) > 0 { reply.Nodes[ticket] = &xpb.NodeInfo{ Facts: filteredFacts, } } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Eliminate redundant work by removing already requested nodes from targetSet for ticket := range reply.Nodes { targetSet.Remove(ticket) } // Batch request all leftover target nodes nodesReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: targetSet.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } return reply, nil }
// Decorations implements part of the xrefs Service interface. func (t *tableImpl) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil || req.GetLocation().Ticket == "" { return nil, errors.New("missing location") } ticket, err := kytheuri.Fix(req.GetLocation().Ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", req.GetLocation().Ticket, err) } decor, err := t.fileDecorations(ctx, ticket) if err == table.ErrNoSuchKey { return nil, xrefs.ErrDecorationsNotFound } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } text := decor.File.Text if len(req.DirtyBuffer) > 0 { text = req.DirtyBuffer } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.File.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { patterns := xrefs.ConvertFilters(req.Filter) var patcher *xrefs.Patcher if len(req.DirtyBuffer) > 0 { patcher = xrefs.NewPatcher(decor.File.Text, req.DirtyBuffer) } // The span with which to constrain the set of returned anchor references. var startBoundary, endBoundary int32 spanKind := req.SpanKind if loc.Kind == xpb.Location_FILE { startBoundary = 0 endBoundary = int32(len(text)) spanKind = xpb.DecorationsRequest_WITHIN_SPAN } else { startBoundary = loc.Start.ByteOffset endBoundary = loc.End.ByteOffset } reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) reply.Nodes = make(map[string]*xpb.NodeInfo) seenTarget := stringset.New() // Reference.TargetTicket -> NodeInfo (superset of reply.Nodes) var nodes map[string]*xpb.NodeInfo if len(patterns) > 0 { nodes = make(map[string]*xpb.NodeInfo) for _, n := range decor.Target { nodes[n.Ticket] = nodeToInfo(patterns, n) } } // Reference.TargetTicket -> []Reference set var refs map[string][]*xpb.DecorationsReply_Reference // ExpandedAnchor.Ticket -> ExpandedAnchor var defs map[string]*srvpb.ExpandedAnchor if req.TargetDefinitions { refs = make(map[string][]*xpb.DecorationsReply_Reference) reply.DefinitionLocations = make(map[string]*xpb.Anchor) defs = make(map[string]*srvpb.ExpandedAnchor) for _, def := range decor.TargetDefinitions { defs[def.Ticket] = def } } for _, d := range decor.Decoration { start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) // Filter non-existent anchor. Anchors can no longer exist if we were // given a dirty buffer and the anchor was inside a changed region. if exists { if xrefs.InSpanBounds(spanKind, start, end, startBoundary, endBoundary) { d.Anchor.StartOffset = start d.Anchor.EndOffset = end r := decorationToReference(norm, d) if req.TargetDefinitions { if def, ok := defs[d.TargetDefinition]; ok { reply.DefinitionLocations[d.TargetDefinition] = a2a(def, false).Anchor } else { refs[r.TargetTicket] = append(refs[r.TargetTicket], r) } } else { r.TargetDefinition = "" } reply.Reference = append(reply.Reference, r) if !seenTarget.Contains(r.TargetTicket) && nodes != nil { reply.Nodes[r.TargetTicket] = nodes[r.TargetTicket] seenTarget.Add(r.TargetTicket) } } } } // Only compute target definitions if the serving data doesn't contain any // TODO(schroederc): remove this once serving data is always populated if req.TargetDefinitions && len(defs) == 0 { targetTickets := make([]string, 0, len(refs)) for ticket := range refs { targetTickets = append(targetTickets, ticket) } defs, err := xrefs.SlowDefinitions(t, ctx, targetTickets) if err != nil { return nil, fmt.Errorf("error retrieving target definitions: %v", err) } reply.DefinitionLocations = make(map[string]*xpb.Anchor, len(defs)) for tgt, def := range defs { for _, ref := range refs[tgt] { if def.Ticket != ref.SourceTicket { ref.TargetDefinition = def.Ticket if _, ok := reply.DefinitionLocations[def.Ticket]; !ok { reply.DefinitionLocations[def.Ticket] = def } } } } } } return reply, nil }
// Decorations implements part of the Service interface. func (g *GraphStoreService) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if len(req.DirtyBuffer) > 0 { return nil, errors.New("UNIMPLEMENTED: dirty buffers") } else if req.GetLocation() == nil { // TODO(schroederc): allow empty location when given dirty buffer return nil, errors.New("missing location") } fileVName, err := kytheuri.ToVName(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid file ticket %q: %v", req.Location.Ticket, err) } text, encoding, err := getSourceText(ctx, g.gs, fileVName) if err != nil { return nil, fmt.Errorf("failed to retrieve file text: %v", err) } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{ Location: loc, Nodes: make(map[string]*xpb.NodeInfo), } // Handle DecorationsRequest.SourceText switch if req.SourceText { if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } reply.Encoding = encoding } // Handle DecorationsRequest.References switch if req.References { // Traverse the following chain of edges: // file --%/kythe/edge/childof-> []anchor --forwardEdgeKind-> []target // // Add []anchor and []target nodes to reply.Nodes // Add all {anchor, forwardEdgeKind, target} tuples to reply.Reference patterns := xrefs.ConvertFilters(req.Filter) children, err := getEdges(ctx, g.gs, fileVName, func(e *spb.Entry) bool { return e.EdgeKind == revChildOfEdgeKind }) if err != nil { return nil, fmt.Errorf("failed to retrieve file children: %v", err) } targetSet := stringset.New() for _, edge := range children { anchor := edge.Target ticket := kytheuri.ToString(anchor) anchorNodeReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: []string{ticket}, }) if err != nil { return nil, fmt.Errorf("failure getting reference source node: %v", err) } else if len(anchorNodeReply.Nodes) != 1 { return nil, fmt.Errorf("found %d nodes for {%+v}", len(anchorNodeReply.Nodes), anchor) } node, ok := xrefs.NodesMap(anchorNodeReply.Nodes)[ticket] if !ok { return nil, fmt.Errorf("failed to find info for node %q", ticket) } else if string(node[schema.NodeKindFact]) != schema.AnchorKind { // Skip child if it isn't an anchor node continue } anchorStart, err := strconv.Atoi(string(node[schema.AnchorStartFact])) if err != nil { log.Printf("Invalid anchor start offset %q for node %q: %v", node[schema.AnchorStartFact], ticket, err) continue } anchorEnd, err := strconv.Atoi(string(node[schema.AnchorEndFact])) if err != nil { log.Printf("Invalid anchor end offset %q for node %q: %v", node[schema.AnchorEndFact], ticket, err) continue } if loc.Kind == xpb.Location_SPAN { // Check if anchor fits within/around requested source text window if !xrefs.InSpanBounds(req.SpanKind, int32(anchorStart), int32(anchorEnd), loc.Start.ByteOffset, loc.End.ByteOffset) { continue } else if anchorStart > anchorEnd { log.Printf("Invalid anchor offset span %d:%d", anchorStart, anchorEnd) continue } } targets, err := getEdges(ctx, g.gs, anchor, func(e *spb.Entry) bool { return schema.EdgeDirection(e.EdgeKind) == schema.Forward && e.EdgeKind != schema.ChildOfEdge }) if err != nil { return nil, fmt.Errorf("failed to retrieve targets of anchor %v: %v", anchor, err) } if len(targets) == 0 { log.Printf("Anchor missing forward edges: {%+v}", anchor) continue } if node := filterNode(patterns, anchorNodeReply.Nodes[ticket]); node != nil { reply.Nodes[ticket] = node } for _, edge := range targets { targetTicket := kytheuri.ToString(edge.Target) targetSet.Add(targetTicket) reply.Reference = append(reply.Reference, &xpb.DecorationsReply_Reference{ SourceTicket: ticket, Kind: edge.Kind, TargetTicket: targetTicket, AnchorStart: norm.ByteOffset(int32(anchorStart)), AnchorEnd: norm.ByteOffset(int32(anchorEnd)), }) } } sort.Sort(bySpan(reply.Reference)) // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Ensure returned nodes are not duplicated. for ticket := range reply.Nodes { targetSet.Remove(ticket) } // Batch request all Reference target nodes nodesReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: targetSet.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting reference target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } } return reply, nil }
func displayEdgeGraph(reply *xpb.EdgesReply) error { nodes := xrefs.NodesMap(reply.Nodes) edges := make(map[string]map[string]stringset.Set) for source, es := range reply.EdgeSets { for gKind, g := range es.Groups { for _, edge := range g.Edge { tgt := edge.TargetTicket src, kind := source, gKind if schema.EdgeDirection(kind) == schema.Reverse { src, kind, tgt = tgt, schema.MirrorEdge(kind), src } groups, ok := edges[src] if !ok { groups = make(map[string]stringset.Set) edges[src] = groups } targets, ok := groups[kind] if !ok { targets = stringset.New() groups[kind] = targets } targets.Add(tgt) } } } if _, err := fmt.Println("digraph kythe {"); err != nil { return err } for ticket, node := range nodes { if _, err := fmt.Printf(` %q [label=<<table><tr><td colspan="2">%s</td></tr>`, ticket, html.EscapeString(ticket)); err != nil { return err } var facts []string for fact := range node { facts = append(facts, fact) } sort.Strings(facts) for _, fact := range facts { if _, err := fmt.Printf("<tr><td>%s</td><td>%s</td></tr>", html.EscapeString(fact), html.EscapeString(string(node[fact]))); err != nil { return err } } if _, err := fmt.Println("</table>> shape=plaintext];"); err != nil { return err } } if _, err := fmt.Println(); err != nil { return err } for src, groups := range edges { for kind, targets := range groups { for tgt := range targets { if _, err := fmt.Printf("\t%q -> %q [label=%q];\n", src, tgt, kind); err != nil { return err } } } } if _, err := fmt.Println("}"); err != nil { return err } return nil }
// Decorations implements part of the xrefs Service interface. func (t *Table) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if len(req.DirtyBuffer) > 0 { log.Println("TODO: implement DecorationsRequest.DirtyBuffer") return nil, errors.New("dirty buffers unimplemented") } else if req.GetLocation() == nil { // TODO(schroederc): allow empty location when given dirty buffer return nil, errors.New("missing location") } var decor srvpb.FileDecorations ticket := req.GetLocation().Ticket if err := t.Lookup(DecorationsKey(ticket), &decor); err == table.ErrNoSuchKey { return nil, fmt.Errorf("decorations not found for file %q", ticket) } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } loc, err := xrefs.NewNormalizer(decor.SourceText).Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = decor.SourceText } else { reply.SourceText = decor.SourceText[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { nodeTickets := stringset.New() if loc.Kind == xpb.Location_FILE { reply.Reference = make([]*xpb.DecorationsReply_Reference, len(decor.Decoration)) for i, d := range decor.Decoration { reply.Reference[i] = decorationToReference(d) nodeTickets.Add(d.Anchor.Ticket) nodeTickets.Add(d.TargetTicket) } } else { for _, d := range decor.Decoration { if d.Anchor.StartOffset >= loc.Start.ByteOffset && d.Anchor.EndOffset <= loc.End.ByteOffset { reply.Reference = append(reply.Reference, decorationToReference(d)) nodeTickets.Add(d.Anchor.Ticket) nodeTickets.Add(d.TargetTicket) } } } nodesReply, err := t.Nodes(ctx, &xpb.NodesRequest{Ticket: nodeTickets.Slice()}) if err != nil { return nil, fmt.Errorf("error getting nodes: %v", err) } reply.Node = nodesReply.Node } return reply, nil }