Example #1
0
func main() {
	flag.Parse()
	if len(flag.Args()) == 0 {
		flagutil.UsageError("missing kindex-file path")
	} else if len(flag.Args()) > 1 {
		flagutil.UsageErrorf("unknown arguments: %v", flag.Args()[1:])
	}

	path := flag.Arg(0)
	idx, err := kindex.Open(context.Background(), path)
	if err != nil {
		log.Fatalf("Error reading %q: %v", path, err)
	}

	en := json.NewEncoder(os.Stdout)
	if *printFiles {
		if err := en.Encode(idx); err != nil {
			log.Fatalf("Error encoding JSON: %v", err)
		}
	} else {
		if err := en.Encode(idx.Proto); err != nil {
			log.Fatalf("Error encoding JSON compilation: %v", err)
		}
	}
}
Example #2
0
func main() {
	flag.Parse()
	if len(flag.Args()) != 0 {
		flagutil.UsageErrorf("unknown arguments: %v", flag.Args())
	}

	written := make(map[[sha512.Size384]byte]struct{})

	var skipped uint64
	rd := delimited.NewReader(os.Stdin)
	wr := delimited.NewWriter(os.Stdout)
	for {
		rec, err := rd.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			log.Fatal(err)
		}

		hash := sha512.Sum384(rec)
		if _, ok := written[hash]; ok {
			skipped++
			continue
		}
		if err := wr.Put(rec); err != nil {
			log.Fatal(err)
		}
		written[hash] = struct{}{}
	}
	log.Printf("dedup_stream: skipped %d records", skipped)
}
Example #3
0
func main() {
	log.SetPrefix("write_entries: ")

	flag.Parse()
	if *numWorkers < 1 {
		flagutil.UsageErrorf("Invalid number of --workers %d (must be ≥ 1)", *numWorkers)
	} else if *batchSize < 1 {
		flagutil.UsageErrorf("Invalid --batch_size %d (must be ≥ 1)", *batchSize)
	} else if gs == nil {
		flagutil.UsageError("Missing --graphstore")
	}

	defer gsutil.LogClose(context.Background(), gs)
	gsutil.EnsureGracefulExit(gs)

	if *profCPU != "" {
		f, err := vfs.Create(context.Background(), *profCPU)
		if err != nil {
			log.Fatal(err)
		}
		defer f.Close()
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	writes := graphstore.BatchWrites(stream.ReadEntries(os.Stdin), *batchSize)

	var (
		wg         sync.WaitGroup
		numEntries uint64
	)
	wg.Add(*numWorkers)
	for i := 0; i < *numWorkers; i++ {
		go func() {
			defer wg.Done()
			num, err := writeEntries(context.Background(), gs, writes)
			if err != nil {
				log.Fatal(err)
			}

			atomic.AddUint64(&numEntries, num)
		}()
	}
	wg.Wait()

	log.Printf("Wrote %d entries", numEntries)
}
Example #4
0
func main() {
	flag.Parse()
	if len(flag.Args()) > 0 {
		flagutil.UsageErrorf("unknown arguments: %v", flag.Args())
	}

	in := os.Stdin
	var entries <-chan *spb.Entry
	if *readJSON {
		entries = stream.ReadJSONEntries(in)
	} else {
		entries = stream.ReadEntries(in)
	}
	if *sortStream || *entrySets {
		entries = sortEntries(entries)
	}

	encoder := json.NewEncoder(os.Stdout)
	wr := delimited.NewWriter(os.Stdout)

	var set entrySet
	entryCount := 0
	for entry := range entries {
		if *countOnly {
			entryCount++
		} else if *entrySets {
			if compare.VNamesEqual(set.Source, entry.Source) || !compare.VNamesEqual(set.Target, entry.Target) || set.EdgeKind != entry.EdgeKind {
				if len(set.Properties) != 0 {
					failOnErr(encoder.Encode(set))
				}
				set.Source = entry.Source
				set.EdgeKind = entry.EdgeKind
				set.Target = entry.Target
				set.Properties = make(map[string]string)
			}
			set.Properties[entry.FactName] = string(entry.FactValue)
		} else if *writeJSON {
			failOnErr(encoder.Encode(entry))
		} else {
			rec, err := proto.Marshal(entry)
			failOnErr(err)
			failOnErr(wr.Put(rec))
		}
	}
	if len(set.Properties) != 0 {
		failOnErr(encoder.Encode(set))
	}
	if *countOnly {
		fmt.Println(entryCount)
	}
}
Example #5
0
func main() {
	flag.Parse()
	if flag.NArg() != 0 {
		flagutil.UsageErrorf("unknown arguments: %v", flag.Args())
	}

	rd, err := delimited.NewUniqReader(delimited.NewReader(os.Stdin), int(cacheSize.Bytes()))
	if err != nil {
		log.Fatalf("Error creating UniqReader: %v", err)
	}
	wr := delimited.NewWriter(os.Stdout)
	if err := delimited.Copy(wr, rd); err != nil {
		log.Fatal(err)
	}
	log.Printf("dedup_stream: skipped %d records", rd.Skipped())
}
Example #6
0
func main() {
	flag.Parse()
	if len(flag.Args()) > 0 {
		flagutil.UsageErrorf("unknown arguments: %v", flag.Args())
	}

	in := bufio.NewReaderSize(os.Stdin, 2*4096)
	out := bufio.NewWriter(os.Stdout)

	var rd stream.EntryReader
	if *readJSON {
		rd = stream.NewJSONReader(in)
	} else {
		rd = stream.NewReader(in)
	}

	if *sortStream || *entrySets || *uniqEntries {
		var err error
		rd, err = sortEntries(rd)
		failOnErr(err)
	}

	if *uniqEntries {
		rd = dedupEntries(rd)
	}

	switch {
	case *countOnly:
		var count int
		failOnErr(rd(func(_ *spb.Entry) error {
			count++
			return nil
		}))
		fmt.Println(count)
	case *entrySets:
		encoder := json.NewEncoder(out)
		var set entrySet
		failOnErr(rd(func(entry *spb.Entry) error {
			if !compare.VNamesEqual(set.Source, entry.Source) || !compare.VNamesEqual(set.Target, entry.Target) || set.EdgeKind != entry.EdgeKind {
				if len(set.Properties) != 0 {
					if err := encoder.Encode(set); err != nil {
						return err
					}
				}
				set.Source = entry.Source
				set.EdgeKind = entry.EdgeKind
				set.Target = entry.Target
				set.Properties = make(map[string]string)
			}
			set.Properties[entry.FactName] = string(entry.FactValue)
			return nil
		}))
		if len(set.Properties) != 0 {
			failOnErr(encoder.Encode(set))
		}
	case *writeJSON:
		encoder := json.NewEncoder(out)
		failOnErr(rd(func(entry *spb.Entry) error {
			return encoder.Encode(entry)
		}))
	default:
		wr := delimited.NewWriter(out)
		failOnErr(rd(func(entry *spb.Entry) error {
			rec, err := proto.Marshal(entry)
			if err != nil {
				return err
			}
			return wr.Put(rec)
		}))
	}
	failOnErr(out.Flush())
}
Example #7
0
func main() {
	flag.Parse()
	if flag.NArg() > 0 {
		flagutil.UsageErrorf("unknown non-flag argument(s): %v", flag.Args())
	} else if *offset < 0 && (*lineNumber < 0 || *columnOffset < 0) {
		flagutil.UsageError("non-negative --offset (or --line and --column) required")
	} else if *signature == "" && *path == "" {
		flagutil.UsageError("must provide at least --path or --signature")
	}

	defer (*apiFlag).Close()
	xs, idx = *apiFlag, *apiFlag

	relPath := *path
	if *localRepoRoot != "NONE" {
		if _, err := os.Stat(relPath); err == nil {
			absPath, err := filepath.Abs(relPath)
			if err != nil {
				log.Fatal(err)
			}
			if *dirtyBuffer == "" {
				*dirtyBuffer = absPath
			}

			kytheRoot := *localRepoRoot
			if kytheRoot == "" {
				kytheRoot = findKytheRoot(filepath.Dir(absPath))
			}
			if kytheRoot != "" {
				relPath, err = filepath.Rel(filepath.Join(kytheRoot, *root), absPath)
				if err != nil {
					log.Fatal(err)
				}
			}
		}
	}
	partialFile := &spb.VName{
		Signature: *signature,
		Corpus:    *corpus,
		Root:      *root,
		Path:      relPath,
		Language:  *language,
	}
	reply, err := idx.Search(ctx, &spb.SearchRequest{
		Partial: partialFile,
		Fact:    fileFacts,
	})
	if err != nil {
		log.Fatalf("Error locating file {%v}: %v", partialFile, err)
	}
	if len(reply.Ticket) == 0 {
		log.Fatalf("Could not locate file {%v}", partialFile)
	} else if len(reply.Ticket) > 1 {
		log.Fatalf("Ambiguous file {%v}; multiple results: %v", partialFile, reply.Ticket)
	}

	fileTicket := reply.Ticket[0]
	text := readDirtyBuffer(ctx)
	decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{
		// TODO(schroederc): limit Location to a SPAN around *offset
		Location:    &xpb.Location{Ticket: fileTicket},
		References:  true,
		SourceText:  true,
		DirtyBuffer: text,
		Filter: []string{
			schema.NodeKindFact,
			schema.SubkindFact,
			schema.AnchorLocFilter, // TODO(schroederc): remove once backwards-compatibility fix below is removed
		},
	})
	if err != nil {
		log.Fatal(err)
	}
	if text == nil {
		text = decor.SourceText
	}
	nodes := xrefs.NodesMap(decor.Node)

	// Normalize point within source text
	point := normalizedPoint(text)

	en := json.NewEncoder(os.Stdout)
	for _, ref := range decor.Reference {
		var start, end int
		if ref.AnchorStart == nil || ref.AnchorEnd == nil {
			// TODO(schroederc): remove this backwards-compatibility fix
			start, end = parseAnchorSpan(nodes[ref.SourceTicket])
		} else {
			start, end = int(ref.AnchorStart.ByteOffset), int(ref.AnchorEnd.ByteOffset)
		}

		if start <= point && point < end {
			var r reference
			r.Span.Start = start
			r.Span.End = end
			r.Span.Text = string(text[start:end])
			r.Kind = strings.TrimPrefix(ref.Kind, schema.EdgePrefix)
			r.Node.Ticket = ref.TargetTicket

			node := nodes[ref.TargetTicket]
			r.Node.Kind = string(node[schema.NodeKindFact])
			r.Node.Subkind = string(node[schema.SubkindFact])

			if eReply, err := xrefs.AllEdges(ctx, xs, &xpb.EdgesRequest{
				Ticket: []string{ref.TargetTicket},
				Kind:   []string{schema.NamedEdge, schema.TypedEdge, definedAtEdge, definedBindingAtEdge},
			}); err != nil {
				log.Printf("WARNING: error getting edges for %q: %v", ref.TargetTicket, err)
			} else {
				edges := xrefs.EdgesMap(eReply.EdgeSet)[ref.TargetTicket]
				for _, name := range edges[schema.NamedEdge] {
					if uri, err := kytheuri.Parse(name); err != nil {
						log.Printf("WARNING: named node ticket (%q) could not be parsed: %v", name, err)
					} else {
						r.Node.Names = append(r.Node.Names, uri.Signature)
					}
				}

				if typed := edges[schema.TypedEdge]; len(typed) > 0 {
					r.Node.Typed = typed[0]
				}

				if !*skipDefinitions {
					defs := edges[definedAtEdge]
					if len(defs) == 0 {
						defs = edges[definedBindingAtEdge]
					}
					for _, defAnchor := range defs {
						def, err := completeDefinition(defAnchor)
						if err != nil {
							log.Printf("WARNING: failed to complete definition for %q: %v", defAnchor, err)
						} else {
							r.Node.Definitions = append(r.Node.Definitions, def)
						}
					}
				}
			}

			if err := en.Encode(r); err != nil {
				log.Fatal(err)
			}
		}
	}
}
Example #8
0
func main() {
	flag.Parse()
	if flag.NArg() > 0 {
		flagutil.UsageErrorf("unknown non-flag argument(s): %v", flag.Args())
	} else if *offset < 0 && (*lineNumber < 0 || *columnOffset < 0) {
		flagutil.UsageError("non-negative --offset (or --line and --column) required")
	} else if *path == "" {
		flagutil.UsageError("must provide --path")
	}

	defer (*apiFlag).Close()
	xs = *apiFlag

	relPath := *path
	if *localRepoRoot != "NONE" {
		if _, err := os.Stat(relPath); err == nil {
			absPath, err := filepath.Abs(relPath)
			if err != nil {
				log.Fatal(err)
			}
			if *dirtyBuffer == "" {
				*dirtyBuffer = absPath
			}

			kytheRoot := *localRepoRoot
			if kytheRoot == "" {
				kytheRoot = findKytheRoot(filepath.Dir(absPath))
			}
			if kytheRoot != "" {
				relPath, err = filepath.Rel(filepath.Join(kytheRoot, *root), absPath)
				if err != nil {
					log.Fatal(err)
				}
			}
		}
	}

	fileTicket := (&kytheuri.URI{Corpus: *corpus, Root: *root, Path: relPath}).String()
	point := &xpb.Location_Point{
		ByteOffset:   int32(*offset),
		LineNumber:   int32(*lineNumber),
		ColumnOffset: int32(*columnOffset),
	}
	dirtyBuffer := readDirtyBuffer(ctx)
	decor, err := xs.Decorations(ctx, &xpb.DecorationsRequest{
		Location: &xpb.Location{
			Ticket: fileTicket,
			Kind:   xpb.Location_SPAN,
			Start:  point,
			End:    point,
		},
		SpanKind:    xpb.DecorationsRequest_AROUND_SPAN,
		References:  true,
		SourceText:  true,
		DirtyBuffer: dirtyBuffer,
		Filter: []string{
			facts.NodeKind,
			facts.Subkind,
		},
	})
	if err != nil {
		log.Fatal(err)
	}
	nodes := xrefs.NodesMap(decor.Nodes)

	en := json.NewEncoder(os.Stdout)
	for _, ref := range decor.Reference {
		start, end := int(ref.AnchorStart.ByteOffset), int(ref.AnchorEnd.ByteOffset)

		var r reference
		r.Span.Start = start
		r.Span.End = end
		if len(dirtyBuffer) > 0 {
			r.Span.Text = string(dirtyBuffer[start:end])
		} // TODO(schroederc): add option to get anchor text from DecorationsReply
		r.Kind = strings.TrimPrefix(ref.Kind, edges.Prefix)
		r.Node.Ticket = ref.TargetTicket

		node := nodes[ref.TargetTicket]
		r.Node.Kind = string(node[facts.NodeKind])
		r.Node.Subkind = string(node[facts.Subkind])

		// TODO(schroederc): use CrossReferences method
		if eReply, err := xrefs.AllEdges(ctx, xs, &gpb.EdgesRequest{
			Ticket: []string{ref.TargetTicket},
			Kind:   []string{edges.Named, edges.Typed, definedAtEdge, definedBindingAtEdge},
		}); err != nil {
			log.Printf("WARNING: error getting edges for %q: %v", ref.TargetTicket, err)
		} else {
			matching := xrefs.EdgesMap(eReply.EdgeSets)[ref.TargetTicket]
			for name := range matching[edges.Named] {
				if uri, err := kytheuri.Parse(name); err != nil {
					log.Printf("WARNING: named node ticket (%q) could not be parsed: %v", name, err)
				} else {
					r.Node.Names = append(r.Node.Names, uri.Signature)
				}
			}

			for typed := range matching[edges.Typed] {
				r.Node.Typed = typed
				break
			}

			if !*skipDefinitions {
				defs := matching[definedAtEdge]
				if len(defs) == 0 {
					defs = matching[definedBindingAtEdge]
				}
				for defAnchor := range defs {
					def, err := completeDefinition(defAnchor)
					if err != nil {
						log.Printf("WARNING: failed to complete definition for %q: %v", defAnchor, err)
					} else {
						r.Node.Definitions = append(r.Node.Definitions, def)
					}
				}
			}
		}

		if err := en.Encode(r); err != nil {
			log.Fatal(err)
		}
	}
}
Example #9
0
func main() {
	flag.Parse()
	if *servingTable == "" && gs == nil {
		flagutil.UsageError("missing either --serving_table or --graphstore")
	} else if *httpListeningAddr == "" && *grpcListeningAddr == "" && *tlsListeningAddr == "" {
		flagutil.UsageError("missing either --listen, --tls_listen, or --grpc_listen argument")
	} else if *servingTable != "" && gs != nil {
		flagutil.UsageError("--serving_table and --graphstore are mutually exclusive")
	} else if *tlsListeningAddr != "" && (*tlsCertFile == "" || *tlsKeyFile == "") {
		flagutil.UsageError("--tls_cert_file and --tls_key_file are required if given --tls_listen")
	} else if flag.NArg() > 0 {
		flagutil.UsageErrorf("unknown non-flag arguments given: %v", flag.Args())
	}

	var (
		xs xrefs.Service
		ft filetree.Service
		sr search.Service
	)

	ctx := context.Background()
	if *servingTable != "" {
		db, err := leveldb.Open(*servingTable, nil)
		if err != nil {
			log.Fatalf("Error opening db at %q: %v", *servingTable, err)
		}
		defer db.Close()
		tbl := table.ProtoBatchParallel{&table.KVProto{db}}
		xs = xsrv.NewCombinedTable(tbl)
		ft = &ftsrv.Table{tbl}
		sr = &srchsrv.Table{&table.KVInverted{db}}
	} else {
		log.Println("WARNING: serving directly from a GraphStore can be slow; you may want to use a --serving_table")
		if f, ok := gs.(filetree.Service); ok {
			log.Printf("Using %T directly as filetree service", gs)
			ft = f
		} else {
			m := filetree.NewMap()
			if err := m.Populate(ctx, gs); err != nil {
				log.Fatalf("Error populating file tree from GraphStore: %v", err)
			}
			ft = m
		}

		if x, ok := gs.(xrefs.Service); ok {
			log.Printf("Using %T directly as xrefs service", gs)
			xs = x
		} else {
			if err := xstore.EnsureReverseEdges(ctx, gs); err != nil {
				log.Fatalf("Error ensuring reverse edges in GraphStore: %v", err)
			}
			xs = xstore.NewGraphStoreService(gs)
		}

		if s, ok := gs.(search.Service); ok {
			log.Printf("Using %T directly as search service", gs)
			sr = s
		}
	}

	if sr == nil {
		log.Println("Search API not supported")
	}

	if *grpcListeningAddr != "" {
		srv := grpc.NewServer()
		xpb.RegisterXRefServiceServer(srv, xs)
		ftpb.RegisterFileTreeServiceServer(srv, ft)
		if sr != nil {
			spb.RegisterSearchServiceServer(srv, sr)
		}
		go startGRPC(srv)
	}

	if *httpListeningAddr != "" || *tlsListeningAddr != "" {
		apiMux := http.NewServeMux()
		http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
			if *httpAllowOrigin != "" {
				w.Header().Set("Access-Control-Allow-Origin", *httpAllowOrigin)
			}
			apiMux.ServeHTTP(w, r)
		})

		xrefs.RegisterHTTPHandlers(ctx, xs, apiMux)
		filetree.RegisterHTTPHandlers(ctx, ft, apiMux)
		if sr != nil {
			search.RegisterHTTPHandlers(ctx, sr, apiMux)
		}
		if *publicResources != "" {
			log.Println("Serving public resources at", *publicResources)
			if s, err := os.Stat(*publicResources); err != nil {
				log.Fatalf("ERROR: could not get FileInfo for %q: %v", *publicResources, err)
			} else if !s.IsDir() {
				log.Fatalf("ERROR: %q is not a directory", *publicResources)
			}
			apiMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
				http.ServeFile(w, r, filepath.Join(*publicResources, filepath.Clean(r.URL.Path)))
			})
		}
	}
	if *httpListeningAddr != "" {
		go startHTTP()
	}
	if *tlsListeningAddr != "" {
		go startTLS()
	}

	select {} // block forever
}
Example #10
0
func main() {
	flag.Parse()

	if flag.NArg() == 0 {
		flagutil.UsageError("Missing path to LevelDB")
	}

	var protoValueType reflect.Type
	if *protoValue != "" {
		protoValueType = proto.MessageType(*protoValue)
		if protoValueType == nil {
			flagutil.UsageErrorf("could not understand protocol buffer type: %q", *protoValue)
		}
	}

	var en *json.Encoder
	if *emitJSON {
		en = json.NewEncoder(os.Stdout)
	}

	for _, path := range flag.Args() {
		func() {
			db, err := leveldb.Open(path, nil)
			if err != nil {
				log.Fatalf("Error opening %q: %v", path, err)
			}
			defer db.Close()

			it, err := db.ScanPrefix([]byte(*keyPrefix), nil)
			if err != nil {
				log.Fatalf("Error creating iterator for %q: v", path, err)
			}
			defer it.Close()

			for {
				key, val, err := it.Next()
				if err == io.EOF {
					break
				} else if err != nil {
					log.Fatalf("Error during scan of %q: %v", path, err)
				}

				var k, v interface{}

				if protoValueType == nil {
					if *stringKey {
						k = strconv.Quote(string(key))
					} else {
						k = base64.StdEncoding.EncodeToString(key)
					}
					if *stringValue {
						v = strconv.Quote(string(val))
					} else {
						v = base64.StdEncoding.EncodeToString(val)
					}
				} else {
					p := reflect.New(protoValueType.Elem()).Interface().(proto.Message)
					if err := proto.Unmarshal(val, p); err != nil {
						log.Fatalf("Error unmarshaling value to %q: %v", *protoValue, err)
					}

					k, v = string(key), p
				}

				if en == nil {
					fmt.Println(strings.NewReplacer(
						"@key@", fmt.Sprintf("%s", k),
						"@value@", fmt.Sprintf("%s", v),
					).Replace(*lineFormat))
				} else {
					en.Encode(keyValue{k, v})
				}
			}
		}()
	}
}