Пример #1
0
func main() {
	var err error
	config, err = cbtrc.Load()
	if err != nil {
		log.Fatal(err)
	}
	config.RegisterFlags()

	flag.Parse()
	if err := config.CheckFlags(); err != nil {
		log.Fatal(err)
	}
	if config.Creds != "" {
		os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
	}
	if flag.NArg() != 0 {
		flag.Usage()
		os.Exit(1)
	}

	var options []option.ClientOption
	if *poolSize > 1 {
		options = append(options, option.WithGRPCConnectionPool(*poolSize))
	}

	var csvFile *os.File
	if *csvOutput != "" {
		csvFile, err = os.Create(*csvOutput)
		if err != nil {
			log.Fatalf("creating csv output file: %v", err)
		}
		defer csvFile.Close()
		log.Printf("Writing statistics to %q ...", *csvOutput)
	}

	log.Printf("Dialing connections...")
	client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...)
	if err != nil {
		log.Fatalf("Making bigtable.Client: %v", err)
	}
	defer client.Close()
	adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
	if err != nil {
		log.Fatalf("Making bigtable.AdminClient: %v", err)
	}
	defer adminClient.Close()

	// Create a scratch table.
	log.Printf("Setting up scratch table...")
	if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
		log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
	}
	if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
		log.Fatalf("Making scratch table column family: %v", err)
	}
	// Upon a successful run, delete the table. Don't bother checking for errors.
	defer adminClient.DeleteTable(context.Background(), *scratchTable)

	log.Printf("Starting load test... (run for %v)", *runFor)
	tbl := client.Open(*scratchTable)
	sem := make(chan int, *reqCount) // limit the number of requests happening at once
	var reads, writes stats
	stopTime := time.Now().Add(*runFor)
	var wg sync.WaitGroup
	for time.Now().Before(stopTime) {
		sem <- 1
		wg.Add(1)
		go func() {
			defer wg.Done()
			defer func() { <-sem }()

			ok := true
			opStart := time.Now()
			var stats *stats
			defer func() {
				stats.Record(ok, time.Since(opStart))
			}()

			row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows

			switch rand.Intn(10) {
			default:
				// read
				stats = &reads
				_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
				if err != nil {
					log.Printf("Error doing read: %v", err)
					ok = false
				}
			case 0, 1, 2, 3, 4:
				// write
				stats = &writes
				mut := bigtable.NewMutation()
				mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
				if err := tbl.Apply(context.Background(), row, mut); err != nil {
					log.Printf("Error doing mutation: %v", err)
					ok = false
				}
			}
		}()
	}
	wg.Wait()

	readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok)
	writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok)
	log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg)
	log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg)

	if csvFile != nil {
		stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile)
	}
}
Пример #2
0
// handleSearch responds to search queries, returning links and snippets for matching documents.
func handleSearch(w http.ResponseWriter, r *http.Request, table *bigtable.Table) {
	ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
	query := r.FormValue("q")
	// Split the query into words.
	words := tokenize(query)
	if len(words) == 0 {
		http.Error(w, "Empty query.", http.StatusBadRequest)
		return
	}

	// readRows reads from many rows concurrently.
	readRows := func(rows []string) ([]bigtable.Row, error) {
		results := make([]bigtable.Row, len(rows))
		errors := make([]error, len(rows))
		var wg sync.WaitGroup
		for i, row := range rows {
			wg.Add(1)
			go func(i int, row string) {
				defer wg.Done()
				results[i], errors[i] = table.ReadRow(ctx, row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
			}(i, row)
		}
		wg.Wait()
		for _, err := range errors {
			if err != nil {
				return nil, err
			}
		}
		return results, nil
	}

	// For each query word, get the list of documents containing it.
	results, err := readRows(words)
	if err != nil {
		http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError)
		return
	}

	// Count how many of the query words each result contained.
	hits := make(map[string]int)
	for _, r := range results {
		for _, r := range r[indexColumnFamily] {
			hits[r.Column]++
		}
	}

	// Build a slice of all the documents that matched every query word.
	var matches []string
	for doc, count := range hits {
		if count == len(words) {
			matches = append(matches, doc[len(indexColumnFamily+":"):])
		}
	}

	// Fetch the content of those documents from the Bigtable.
	content, err := readRows(matches)
	if err != nil {
		http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError)
		return
	}

	type result struct{ Title, Snippet string }
	data := struct {
		Query   string
		Results []result
	}{query, nil}

	// Output links and snippets.
	for i, doc := range matches {
		var text string
		c := content[i][contentColumnFamily]
		if len(c) > 0 {
			text = string(c[0].Value)
		}
		if len(text) > 100 {
			text = text[:100] + "..."
		}
		data.Results = append(data.Results, result{doc, text})
	}
	var buf bytes.Buffer
	if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil {
		http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError)
		return
	}
	io.Copy(w, &buf)
}