// Nodes implements part of the Service interface. func (g *GraphStoreService) Nodes(ctx context.Context, req *xpb.NodesRequest) (*xpb.NodesReply, error) { patterns := xrefs.ConvertFilters(req.Filter) var names []*spb.VName for _, ticket := range req.Ticket { name, err := kytheuri.ToVName(ticket) if err != nil { return nil, err } names = append(names, name) } nodes := make(map[string]*xpb.NodeInfo) for i, vname := range names { ticket := req.Ticket[i] info := &xpb.NodeInfo{Facts: make(map[string][]byte)} if err := g.gs.Read(ctx, &spb.ReadRequest{Source: vname}, func(entry *spb.Entry) error { if len(patterns) == 0 || xrefs.MatchesAny(entry.FactName, patterns) { info.Facts[entry.FactName] = entry.FactValue } return nil }); err != nil { return nil, err } if len(info.Facts) > 0 { nodes[ticket] = info } } return &xpb.NodesReply{Nodes: nodes}, nil }
// Nodes implements part of the xrefs Service interface. func (t *tableImpl) Nodes(ctx context.Context, req *xpb.NodesRequest) (*xpb.NodesReply, error) { reply := &xpb.NodesReply{} patterns := xrefs.ConvertFilters(req.Filter) for _, rawTicket := range req.Ticket { ticket, err := kytheuri.Fix(rawTicket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", rawTicket, err) } n, err := t.node(ctx, ticket) if err == table.ErrNoSuchKey { continue } else if err != nil { return nil, fmt.Errorf("lookup error for node %q: %v", ticket, err) } ni := &xpb.NodeInfo{Ticket: n.Ticket} for _, fact := range n.Fact { if len(patterns) == 0 || xrefs.MatchesAny(fact.Name, patterns) { ni.Fact = append(ni.Fact, &xpb.Fact{Name: fact.Name, Value: fact.Value}) } } if len(ni.Fact) > 0 { reply.Node = append(reply.Node, ni) } } return reply, nil }
// Nodes implements part of the Service interface. func (g *GraphStoreService) Nodes(ctx context.Context, req *xpb.NodesRequest) (*xpb.NodesReply, error) { patterns := xrefs.ConvertFilters(req.Filter) var names []*spb.VName for _, ticket := range req.Ticket { name, err := kytheuri.ToVName(ticket) if err != nil { return nil, err } names = append(names, name) } var nodes []*xpb.NodeInfo for i, vname := range names { info := &xpb.NodeInfo{Ticket: req.Ticket[i]} if err := g.gs.Read(ctx, &spb.ReadRequest{Source: vname}, func(entry *spb.Entry) error { if len(patterns) == 0 || xrefs.MatchesAny(entry.FactName, patterns) { info.Fact = append(info.Fact, entryToFact(entry)) } return nil }); err != nil { return nil, err } if len(info.Fact) > 0 { nodes = append(nodes, info) } } return &xpb.NodesReply{Node: nodes}, nil }
// Nodes implements part of the xrefs Service interface. func (t *tableImpl) Nodes(ctx context.Context, req *xpb.NodesRequest) (*xpb.NodesReply, error) { tickets, err := fixTickets(req.Ticket) if err != nil { return nil, err } rs, err := t.pagedEdgeSets(ctx, tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors for _ = range rs { } }() reply := &xpb.NodesReply{} patterns := xrefs.ConvertFilters(req.Filter) for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } node := r.PagedEdgeSet.EdgeSet.Source ni := &xpb.NodeInfo{Ticket: node.Ticket} for _, f := range node.Fact { if len(patterns) == 0 || xrefs.MatchesAny(f.Name, patterns) { ni.Fact = append(ni.Fact, &xpb.Fact{Name: f.Name, Value: f.Value}) } } if len(ni.Fact) > 0 { sort.Sort(xrefs.ByName(ni.Fact)) reply.Node = append(reply.Node, ni) } } return reply, nil }
// Nodes implements part of the xrefs.Service interface. func (db *DB) Nodes(req *xpb.NodesRequest) (*xpb.NodesReply, error) { reply := &xpb.NodesReply{} patterns := xrefs.ConvertFilters(req.Filter) for _, t := range req.Ticket { uri, err := kytheuri.Parse(t) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", t, err) } rows, err := db.nodeFactsStmt.Query(uri.Signature, uri.Corpus, uri.Root, uri.Language, uri.Path) if err != nil { return nil, fmt.Errorf("sql select error for node %q: %v", t, err) } var facts []*xpb.Fact for rows.Next() { var fact xpb.Fact err = rows.Scan(&fact.Name, &fact.Value) if err != nil { rows.Close() return nil, fmt.Errorf("sql scan error for node %q: %v", t, err) } if len(patterns) == 0 || xrefs.MatchesAny(fact.Name, patterns) { facts = append(facts, &fact) } } if err := rows.Close(); err != nil { return nil, fmt.Errorf("error closing rows for node %q: %v", t, err) } if len(facts) != 0 { reply.Node = append(reply.Node, &xpb.NodeInfo{ Ticket: t, Fact: facts, }) } } return reply, nil }
// Nodes implements part of the xrefs Service interface. func (t *tableImpl) Nodes(ctx context.Context, req *xpb.NodesRequest) (*xpb.NodesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } rs, err := t.pagedEdgeSets(ctx, tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors for _ = range rs { } }() reply := &xpb.NodesReply{Nodes: make(map[string]*xpb.NodeInfo, len(req.Ticket))} patterns := xrefs.ConvertFilters(req.Filter) for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } node := r.PagedEdgeSet.Source ni := &xpb.NodeInfo{Facts: make(map[string][]byte, len(node.Fact))} for _, f := range node.Fact { if len(patterns) == 0 || xrefs.MatchesAny(f.Name, patterns) { ni.Facts[f.Name] = f.Value } } if len(ni.Facts) > 0 { reply.Nodes[node.Ticket] = ni } } return reply, nil }
// Decorations implements part of the xrefs Service interface. func (t *tableImpl) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil || req.GetLocation().Ticket == "" { return nil, errors.New("missing location") } ticket, err := kytheuri.Fix(req.GetLocation().Ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", req.GetLocation().Ticket, err) } decor, err := t.fileDecorations(ctx, ticket) if err == table.ErrNoSuchKey { return nil, xrefs.ErrDecorationsNotFound } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } text := decor.File.Text if len(req.DirtyBuffer) > 0 { text = req.DirtyBuffer } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.File.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { patterns := xrefs.ConvertFilters(req.Filter) var patcher *xrefs.Patcher if len(req.DirtyBuffer) > 0 { patcher = xrefs.NewPatcher(decor.File.Text, req.DirtyBuffer) } // The span with which to constrain the set of returned anchor references. var startBoundary, endBoundary int32 spanKind := req.SpanKind if loc.Kind == xpb.Location_FILE { startBoundary = 0 endBoundary = int32(len(text)) spanKind = xpb.DecorationsRequest_WITHIN_SPAN } else { startBoundary = loc.Start.ByteOffset endBoundary = loc.End.ByteOffset } reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) reply.Nodes = make(map[string]*xpb.NodeInfo) seenTarget := stringset.New() // Reference.TargetTicket -> NodeInfo (superset of reply.Nodes) var nodes map[string]*xpb.NodeInfo if len(patterns) > 0 { nodes = make(map[string]*xpb.NodeInfo) for _, n := range decor.Target { nodes[n.Ticket] = nodeToInfo(patterns, n) } } // Reference.TargetTicket -> []Reference set var refs map[string][]*xpb.DecorationsReply_Reference // ExpandedAnchor.Ticket -> ExpandedAnchor var defs map[string]*srvpb.ExpandedAnchor if req.TargetDefinitions { refs = make(map[string][]*xpb.DecorationsReply_Reference) reply.DefinitionLocations = make(map[string]*xpb.Anchor) defs = make(map[string]*srvpb.ExpandedAnchor) for _, def := range decor.TargetDefinitions { defs[def.Ticket] = def } } for _, d := range decor.Decoration { start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) // Filter non-existent anchor. Anchors can no longer exist if we were // given a dirty buffer and the anchor was inside a changed region. if exists { if xrefs.InSpanBounds(spanKind, start, end, startBoundary, endBoundary) { d.Anchor.StartOffset = start d.Anchor.EndOffset = end r := decorationToReference(norm, d) if req.TargetDefinitions { if def, ok := defs[d.TargetDefinition]; ok { reply.DefinitionLocations[d.TargetDefinition] = a2a(def, false).Anchor } else { refs[r.TargetTicket] = append(refs[r.TargetTicket], r) } } else { r.TargetDefinition = "" } reply.Reference = append(reply.Reference, r) if !seenTarget.Contains(r.TargetTicket) && nodes != nil { reply.Nodes[r.TargetTicket] = nodes[r.TargetTicket] seenTarget.Add(r.TargetTicket) } } } } // Only compute target definitions if the serving data doesn't contain any // TODO(schroederc): remove this once serving data is always populated if req.TargetDefinitions && len(defs) == 0 { targetTickets := make([]string, 0, len(refs)) for ticket := range refs { targetTickets = append(targetTickets, ticket) } defs, err := xrefs.SlowDefinitions(t, ctx, targetTickets) if err != nil { return nil, fmt.Errorf("error retrieving target definitions: %v", err) } reply.DefinitionLocations = make(map[string]*xpb.Anchor, len(defs)) for tgt, def := range defs { for _, ref := range refs[tgt] { if def.Ticket != ref.SourceTicket { ref.TargetDefinition = def.Ticket if _, ok := reply.DefinitionLocations[def.Ticket]; !ok { reply.DefinitionLocations[def.Ticket] = def } } } } } } return reply, nil }
func (t *tableImpl) edges(ctx context.Context, req edgesRequest) (*xpb.EdgesReply, error) { stats := filterStats{ max: int(req.PageSize), } if req.TotalOnly { stats.max = 0 } else if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t ipb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) } pageToken := stats.skip nodeTickets := stringset.New() rs, err := t.pagedEdgeSets(ctx, req.Tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors or early return for _ = range rs { } }() patterns := xrefs.ConvertFilters(req.Filters) reply := &xpb.EdgesReply{ EdgeSets: make(map[string]*xpb.EdgeSet), Nodes: make(map[string]*xpb.NodeInfo), TotalEdgesByKind: make(map[string]int64), } for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } pes := r.PagedEdgeSet countEdgeKinds(pes, req.Kinds, reply.TotalEdgesByKind) // Don't scan the EdgeSet_Groups if we're already at the specified page_size. if stats.total == stats.max { continue } groups := make(map[string]*xpb.EdgeSet_Group) for _, grp := range pes.Group { if req.Kinds == nil || req.Kinds(grp.Kind) { ng, ns := stats.filter(grp) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Nodes[n.Ticket] = nodeToInfo(patterns, n) } } groups[grp.Kind] = ng if stats.total == stats.max { break } } } } // TODO(schroederc): ensure that pes.EdgeSet.Groups and pes.PageIndexes of // the same kind are grouped together in the EdgesReply if stats.total != stats.max { for _, idx := range pes.PageIndex { if req.Kinds == nil || req.Kinds(idx.EdgeKind) { if stats.skipPage(idx) { log.Printf("Skipping EdgePage: %s", idx.PageKey) continue } log.Printf("Retrieving EdgePage: %s", idx.PageKey) ep, err := t.edgePage(ctx, idx.PageKey) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("internal error: missing edge page: %q", idx.PageKey) } else if err != nil { return nil, fmt.Errorf("edge page lookup error (page key: %q): %v", idx.PageKey, err) } ng, ns := stats.filter(ep.EdgesGroup) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Nodes[n.Ticket] = nodeToInfo(patterns, n) } } groups[ep.EdgesGroup.Kind] = ng if stats.total == stats.max { break } } } } } if len(groups) > 0 { reply.EdgeSets[pes.Source.Ticket] = &xpb.EdgeSet{Groups: groups} if len(patterns) > 0 && !nodeTickets.Contains(pes.Source.Ticket) { nodeTickets.Add(pes.Source.Ticket) reply.Nodes[pes.Source.Ticket] = nodeToInfo(patterns, pes.Source) } } } totalEdgesPossible := int(sumEdgeKinds(reply.TotalEdgesByKind)) if stats.total > stats.max { log.Panicf("totalEdges greater than maxEdges: %d > %d", stats.total, stats.max) } else if pageToken+stats.total > totalEdgesPossible && pageToken <= totalEdgesPossible { log.Panicf("pageToken+totalEdges greater than totalEdgesPossible: %d+%d > %d", pageToken, stats.total, totalEdgesPossible) } if pageToken+stats.total != totalEdgesPossible && stats.total != 0 { rec, err := proto.Marshal(&ipb.PageToken{Index: int32(pageToken + stats.total)}) if err != nil { return nil, fmt.Errorf("internal error: error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// Decorations implements part of the Service interface. func (g *GraphStoreService) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if len(req.DirtyBuffer) > 0 { return nil, errors.New("UNIMPLEMENTED: dirty buffers") } else if req.GetLocation() == nil { // TODO(schroederc): allow empty location when given dirty buffer return nil, errors.New("missing location") } fileVName, err := kytheuri.ToVName(req.Location.Ticket) if err != nil { return nil, fmt.Errorf("invalid file ticket %q: %v", req.Location.Ticket, err) } text, encoding, err := getSourceText(ctx, g.gs, fileVName) if err != nil { return nil, fmt.Errorf("failed to retrieve file text: %v", err) } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{ Location: loc, Nodes: make(map[string]*xpb.NodeInfo), } // Handle DecorationsRequest.SourceText switch if req.SourceText { if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } reply.Encoding = encoding } // Handle DecorationsRequest.References switch if req.References { // Traverse the following chain of edges: // file --%/kythe/edge/childof-> []anchor --forwardEdgeKind-> []target // // Add []anchor and []target nodes to reply.Nodes // Add all {anchor, forwardEdgeKind, target} tuples to reply.Reference patterns := xrefs.ConvertFilters(req.Filter) children, err := getEdges(ctx, g.gs, fileVName, func(e *spb.Entry) bool { return e.EdgeKind == revChildOfEdgeKind }) if err != nil { return nil, fmt.Errorf("failed to retrieve file children: %v", err) } targetSet := stringset.New() for _, edge := range children { anchor := edge.Target ticket := kytheuri.ToString(anchor) anchorNodeReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: []string{ticket}, }) if err != nil { return nil, fmt.Errorf("failure getting reference source node: %v", err) } else if len(anchorNodeReply.Nodes) != 1 { return nil, fmt.Errorf("found %d nodes for {%+v}", len(anchorNodeReply.Nodes), anchor) } node, ok := xrefs.NodesMap(anchorNodeReply.Nodes)[ticket] if !ok { return nil, fmt.Errorf("failed to find info for node %q", ticket) } else if string(node[schema.NodeKindFact]) != schema.AnchorKind { // Skip child if it isn't an anchor node continue } anchorStart, err := strconv.Atoi(string(node[schema.AnchorStartFact])) if err != nil { log.Printf("Invalid anchor start offset %q for node %q: %v", node[schema.AnchorStartFact], ticket, err) continue } anchorEnd, err := strconv.Atoi(string(node[schema.AnchorEndFact])) if err != nil { log.Printf("Invalid anchor end offset %q for node %q: %v", node[schema.AnchorEndFact], ticket, err) continue } if loc.Kind == xpb.Location_SPAN { // Check if anchor fits within/around requested source text window if !xrefs.InSpanBounds(req.SpanKind, int32(anchorStart), int32(anchorEnd), loc.Start.ByteOffset, loc.End.ByteOffset) { continue } else if anchorStart > anchorEnd { log.Printf("Invalid anchor offset span %d:%d", anchorStart, anchorEnd) continue } } targets, err := getEdges(ctx, g.gs, anchor, func(e *spb.Entry) bool { return schema.EdgeDirection(e.EdgeKind) == schema.Forward && e.EdgeKind != schema.ChildOfEdge }) if err != nil { return nil, fmt.Errorf("failed to retrieve targets of anchor %v: %v", anchor, err) } if len(targets) == 0 { log.Printf("Anchor missing forward edges: {%+v}", anchor) continue } if node := filterNode(patterns, anchorNodeReply.Nodes[ticket]); node != nil { reply.Nodes[ticket] = node } for _, edge := range targets { targetTicket := kytheuri.ToString(edge.Target) targetSet.Add(targetTicket) reply.Reference = append(reply.Reference, &xpb.DecorationsReply_Reference{ SourceTicket: ticket, Kind: edge.Kind, TargetTicket: targetTicket, AnchorStart: norm.ByteOffset(int32(anchorStart)), AnchorEnd: norm.ByteOffset(int32(anchorEnd)), }) } } sort.Sort(bySpan(reply.Reference)) // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Ensure returned nodes are not duplicated. for ticket := range reply.Nodes { targetSet.Remove(ticket) } // Batch request all Reference target nodes nodesReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: targetSet.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting reference target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } } return reply, nil }
// Edges implements part of the Service interface. func (g *GraphStoreService) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } else if req.PageToken != "" { return nil, errors.New("UNIMPLEMENTED: page_token") } patterns := xrefs.ConvertFilters(req.Filter) allowedKinds := stringset.New(req.Kind...) targetSet := stringset.New() reply := &xpb.EdgesReply{ EdgeSets: make(map[string]*xpb.EdgeSet), Nodes: make(map[string]*xpb.NodeInfo), } for _, ticket := range req.Ticket { vname, err := kytheuri.ToVName(ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", ticket, err) } var ( // EdgeKind -> TargetTicket -> OrdinalSet filteredEdges = make(map[string]map[string]map[int32]struct{}) filteredFacts = make(map[string][]byte) ) if err := g.gs.Read(ctx, &spb.ReadRequest{ Source: vname, EdgeKind: "*", }, func(entry *spb.Entry) error { edgeKind := entry.EdgeKind if edgeKind == "" { // node fact if len(patterns) > 0 && xrefs.MatchesAny(entry.FactName, patterns) { filteredFacts[entry.FactName] = entry.FactValue } } else { // edge edgeKind, ordinal, _ := schema.ParseOrdinal(edgeKind) if len(req.Kind) == 0 || allowedKinds.Contains(edgeKind) { targets, ok := filteredEdges[edgeKind] if !ok { targets = make(map[string]map[int32]struct{}) filteredEdges[edgeKind] = targets } ticket := kytheuri.ToString(entry.Target) ordSet, ok := targets[ticket] if !ok { ordSet = make(map[int32]struct{}) targets[ticket] = ordSet } ordSet[int32(ordinal)] = struct{}{} } } return nil }); err != nil { return nil, fmt.Errorf("failed to retrieve entries for ticket %q", ticket) } // Only add a EdgeSet if there are targets for the requested edge kinds. if len(filteredEdges) > 0 { groups := make(map[string]*xpb.EdgeSet_Group) for edgeKind, targets := range filteredEdges { g := &xpb.EdgeSet_Group{} for target, ordinals := range targets { for ordinal := range ordinals { g.Edge = append(g.Edge, &xpb.EdgeSet_Group_Edge{ TargetTicket: target, Ordinal: ordinal, }) } targetSet.Add(target) } groups[edgeKind] = g } reply.EdgeSets[ticket] = &xpb.EdgeSet{ Groups: groups, } // In addition, only add a NodeInfo if the filters have resulting facts. if len(filteredFacts) > 0 { reply.Nodes[ticket] = &xpb.NodeInfo{ Facts: filteredFacts, } } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Eliminate redundant work by removing already requested nodes from targetSet for ticket := range reply.Nodes { targetSet.Remove(ticket) } // Batch request all leftover target nodes nodesReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: targetSet.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting target nodes: %v", err) } for ticket, node := range nodesReply.Nodes { reply.Nodes[ticket] = node } } return reply, nil }
// Decorations implements part of the xrefs Service interface. func (t *tableImpl) Decorations(ctx context.Context, req *xpb.DecorationsRequest) (*xpb.DecorationsReply, error) { if req.GetLocation() == nil || req.GetLocation().Ticket == "" { return nil, errors.New("missing location") } ticket, err := kytheuri.Fix(req.GetLocation().Ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", req.GetLocation().Ticket, err) } decor, err := t.fileDecorations(ctx, ticket) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("decorations not found for file %q", ticket) } else if err != nil { return nil, fmt.Errorf("lookup error for file decorations %q: %v", ticket, err) } text := decor.SourceText if len(req.DirtyBuffer) > 0 { text = req.DirtyBuffer } norm := xrefs.NewNormalizer(text) loc, err := norm.Location(req.GetLocation()) if err != nil { return nil, err } reply := &xpb.DecorationsReply{Location: loc} if req.SourceText { reply.Encoding = decor.Encoding if loc.Kind == xpb.Location_FILE { reply.SourceText = text } else { reply.SourceText = text[loc.Start.ByteOffset:loc.End.ByteOffset] } } if req.References { patterns := xrefs.ConvertFilters(req.Filter) nodeTickets := stringset.New() var patcher *xrefs.Patcher var offsetMapping map[string]span // Map from anchor ticket to patched span if len(req.DirtyBuffer) > 0 { patcher = xrefs.NewPatcher(decor.SourceText, req.DirtyBuffer) offsetMapping = make(map[string]span) } // The span with which to constrain the set of returned anchor references. var startBoundary, endBoundary int32 if loc.Kind == xpb.Location_FILE { startBoundary = 0 endBoundary = int32(len(text)) } else { startBoundary = loc.Start.ByteOffset endBoundary = loc.End.ByteOffset } reply.Reference = make([]*xpb.DecorationsReply_Reference, 0, len(decor.Decoration)) for _, d := range decor.Decoration { start, end, exists := patcher.Patch(d.Anchor.StartOffset, d.Anchor.EndOffset) // Filter non-existent anchor. Anchors can no longer exist if we were // given a dirty buffer and the anchor was inside a changed region. if exists { if start >= startBoundary && end <= endBoundary { if offsetMapping != nil { // Save the patched span to update the corresponding facts of the // anchor node in reply.Node. offsetMapping[d.Anchor.Ticket] = span{start, end} } reply.Reference = append(reply.Reference, decorationToReference(norm, d)) if len(patterns) > 0 && !nodeTickets.Contains(d.Target.Ticket) { nodeTickets.Add(d.Target.Ticket) reply.Node = append(reply.Node, nodeToInfo(patterns, d.Target)) } } } } } return reply, nil }
// Edges implements part of the xrefs Service interface. func (t *tableImpl) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { tickets, err := fixTickets(req.Ticket) if err != nil { return nil, err } stats := filterStats{ max: int(req.PageSize), } if stats.max < 0 { return nil, fmt.Errorf("invalid page_size: %d", req.PageSize) } else if stats.max == 0 { stats.max = defaultPageSize } else if stats.max > maxPageSize { stats.max = maxPageSize } if req.PageToken != "" { rec, err := base64.StdEncoding.DecodeString(req.PageToken) if err != nil { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } var t srvpb.PageToken if err := proto.Unmarshal(rec, &t); err != nil || t.Index < 0 { return nil, fmt.Errorf("invalid page_token: %q", req.PageToken) } stats.skip = int(t.Index) } pageToken := stats.skip var totalEdgesPossible int allowedKinds := stringset.New(req.Kind...) nodeTickets := stringset.New() rs, err := t.pagedEdgeSets(ctx, tickets) if err != nil { return nil, err } defer func() { // drain channel in case of errors or early return for _ = range rs { } }() patterns := xrefs.ConvertFilters(req.Filter) reply := &xpb.EdgesReply{} for r := range rs { if r.Err == table.ErrNoSuchKey { continue } else if r.Err != nil { return nil, r.Err } pes := r.PagedEdgeSet totalEdgesPossible += int(pes.TotalEdges) var groups []*xpb.EdgeSet_Group for _, grp := range pes.EdgeSet.Group { if len(allowedKinds) == 0 || allowedKinds.Contains(grp.Kind) { ng, ns := stats.filter(grp) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Node = append(reply.Node, nodeToInfo(patterns, n)) } } groups = append(groups, ng) if stats.total == stats.max { break } } } } // TODO(schroederc): ensure that pes.EdgeSet.Groups and pes.PageIndexes of // the same kind are grouped together in the EdgesReply if stats.total != stats.max { for _, idx := range pes.PageIndex { if len(allowedKinds) == 0 || allowedKinds.Contains(idx.EdgeKind) { if stats.skipPage(idx) { log.Printf("Skipping EdgePage: %s", idx.PageKey) continue } log.Printf("Retrieving EdgePage: %s", idx.PageKey) ep, err := t.edgePage(ctx, idx.PageKey) if err == table.ErrNoSuchKey { return nil, fmt.Errorf("missing edge page: %q", idx.PageKey) } else if err != nil { return nil, fmt.Errorf("edge page lookup error (page key: %q): %v", idx.PageKey, err) } ng, ns := stats.filter(ep.EdgesGroup) if ng != nil { for _, n := range ns { if len(patterns) > 0 && !nodeTickets.Contains(n.Ticket) { nodeTickets.Add(n.Ticket) reply.Node = append(reply.Node, nodeToInfo(patterns, n)) } } groups = append(groups, ng) if stats.total == stats.max { break } } } } } if len(groups) > 0 { reply.EdgeSet = append(reply.EdgeSet, &xpb.EdgeSet{ SourceTicket: pes.EdgeSet.Source.Ticket, Group: groups, }) if len(patterns) > 0 && !nodeTickets.Contains(pes.EdgeSet.Source.Ticket) { nodeTickets.Add(pes.EdgeSet.Source.Ticket) reply.Node = append(reply.Node, nodeToInfo(patterns, pes.EdgeSet.Source)) } } if stats.total == stats.max { break } } if stats.total > stats.max { log.Panicf("totalEdges greater than maxEdges: %d > %d", stats.total, stats.max) } else if pageToken+stats.total > totalEdgesPossible && pageToken <= totalEdgesPossible { log.Panicf("pageToken+totalEdges greater than totalEdgesPossible: %d+%d > %d", pageToken, stats.total, totalEdgesPossible) } if pageToken+stats.total != totalEdgesPossible && stats.total != 0 { // TODO: take into account an empty last page (due to kind filters) rec, err := proto.Marshal(&srvpb.PageToken{Index: int32(pageToken + stats.total)}) if err != nil { return nil, fmt.Errorf("error marshalling page token: %v", err) } reply.NextPageToken = base64.StdEncoding.EncodeToString(rec) } return reply, nil }
// Nodes implements part of the xrefs.Interface. func (d *DB) Nodes(ctx context.Context, req *xpb.NodesRequest) (*xpb.NodesReply, error) { tickets, err := xrefs.FixTickets(req.Ticket) if err != nil { return nil, err } setQ, args := sqlSetQuery(1, tickets) rs, err := d.Query(fmt.Sprintf("SELECT * FROM Nodes WHERE ticket IN %s;", setQ), args...) if err != nil { return nil, fmt.Errorf("error querying for nodes: %v", err) } defer closeRows(rs) var reply xpb.NodesReply for rs.Next() { var ticket, nodeKind string var subkind, textEncoding sql.NullString var text, otherFacts []byte var startOffset, endOffset, snippetStart, snippetEnd sql.NullInt64 var otherFactsNum int if err := rs.Scan(&ticket, &nodeKind, &subkind, &text, &textEncoding, &startOffset, &endOffset, &snippetStart, &snippetEnd, &otherFactsNum, &otherFacts); err != nil { return nil, fmt.Errorf("error scanning nodes: %v", err) } n := new(xpb.NodeInfo) if otherFactsNum > 0 { if err := proto.Unmarshal(otherFacts, n); err != nil { return nil, fmt.Errorf("unexpected node internal format: %v", err) } } if nodeKind != "" { n.Facts[ticket] = []byte(nodeKind) } if subkind.Valid { n.Facts[ticket] = []byte(subkind.String) } if text != nil { // TODO(schroederc): NULL text n.Facts[ticket] = text } if textEncoding.Valid { n.Facts[ticket] = []byte(textEncoding.String) } if startOffset.Valid { n.Facts[ticket] = []byte(strconv.FormatInt(startOffset.Int64, 10)) } if endOffset.Valid { n.Facts[ticket] = []byte(strconv.FormatInt(endOffset.Int64, 10)) } if snippetStart.Valid { n.Facts[ticket] = []byte(strconv.FormatInt(snippetStart.Int64, 10)) } if snippetEnd.Valid { n.Facts[ticket] = []byte(strconv.FormatInt(snippetEnd.Int64, 10)) } if len(req.Filter) > 0 { patterns := xrefs.ConvertFilters(req.Filter) matched := make(map[string][]byte, len(n.Facts)) for name, value := range n.Facts { if xrefs.MatchesAny(name, patterns) { matched[name] = value } } n.Facts = matched } if len(n.Facts) > 0 { reply.Nodes[ticket] = n } } return &reply, nil }
// Edges implements part of the Service interface. func (g *GraphStoreService) Edges(ctx context.Context, req *xpb.EdgesRequest) (*xpb.EdgesReply, error) { if len(req.Ticket) == 0 { return nil, errors.New("no tickets specified") } else if req.PageToken != "" { return nil, errors.New("UNIMPLEMENTED: page_token") } patterns := xrefs.ConvertFilters(req.Filter) allowedKinds := stringset.New(req.Kind...) targetSet := stringset.New() reply := new(xpb.EdgesReply) for _, ticket := range req.Ticket { vname, err := kytheuri.ToVName(ticket) if err != nil { return nil, fmt.Errorf("invalid ticket %q: %v", ticket, err) } var ( // EdgeKind -> StringSet<TargetTicket> filteredEdges = make(map[string]stringset.Set) filteredFacts []*xpb.Fact ) if err := g.gs.Read(ctx, &spb.ReadRequest{ Source: vname, EdgeKind: "*", }, func(entry *spb.Entry) error { edgeKind := entry.EdgeKind if edgeKind == "" { // node fact if len(patterns) > 0 && xrefs.MatchesAny(entry.FactName, patterns) { filteredFacts = append(filteredFacts, entryToFact(entry)) } } else { // edge if len(req.Kind) == 0 || allowedKinds.Contains(edgeKind) { targets := filteredEdges[edgeKind] if targets == nil { targets = stringset.New() filteredEdges[edgeKind] = targets } targets.Add(kytheuri.ToString(entry.Target)) } } return nil }); err != nil { return nil, fmt.Errorf("failed to retrieve entries for ticket %q", ticket) } // Only add a EdgeSet if there are targets for the requested edge kinds. if len(filteredEdges) > 0 { var groups []*xpb.EdgeSet_Group for edgeKind, targets := range filteredEdges { g := &xpb.EdgeSet_Group{Kind: edgeKind} for target := range targets { g.TargetTicket = append(g.TargetTicket, target) targetSet.Add(target) } groups = append(groups, g) } reply.EdgeSet = append(reply.EdgeSet, &xpb.EdgeSet{ SourceTicket: ticket, Group: groups, }) // In addition, only add a NodeInfo if the filters have resulting facts. if len(filteredFacts) > 0 { reply.Node = append(reply.Node, &xpb.NodeInfo{ Ticket: ticket, Fact: filteredFacts, }) } } } // Only request Nodes when there are fact filters given. if len(req.Filter) > 0 { // Ensure reply.Node is a unique set by removing already requested nodes from targetSet for _, n := range reply.Node { targetSet.Remove(n.Ticket) } // Batch request all leftover target nodes nodesReply, err := g.Nodes(ctx, &xpb.NodesRequest{ Ticket: targetSet.Slice(), Filter: req.Filter, }) if err != nil { return nil, fmt.Errorf("failure getting target nodes: %v", err) } reply.Node = append(reply.Node, nodesReply.Node...) } return reply, nil }