func doRead(ctx context.Context, args ...string) { if len(args) < 1 { log.Fatalf("usage: cbt read <table> [args ...]") } tbl := getClient().Open(args[0]) parsed := make(map[string]string) for _, arg := range args[1:] { i := strings.Index(arg, "=") if i < 0 { log.Fatalf("Bad arg %q", arg) } key, val := arg[:i], arg[i+1:] switch key { default: log.Fatalf("Unknown arg key %q", key) case "limit": // Be nicer; we used to support this, but renamed it to "end". log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") case "start", "end", "prefix", "count": parsed[key] = val } } if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { log.Fatal(`"start"/"end" may not be mixed with "prefix"`) } var rr bigtable.RowRange if start, end := parsed["start"], parsed["end"]; end != "" { rr = bigtable.NewRange(start, end) } else if start != "" { rr = bigtable.InfiniteRange(start) } if prefix := parsed["prefix"]; prefix != "" { rr = bigtable.PrefixRange(prefix) } var opts []bigtable.ReadOption if count := parsed["count"]; count != "" { n, err := strconv.ParseInt(count, 0, 64) if err != nil { log.Fatalf("Bad count %q: %v", count, err) } opts = append(opts, bigtable.LimitRows(n)) } // TODO(dsymonds): Support filters. err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { printRow(r) return true }, opts...) if err != nil { log.Fatalf("Reading rows: %v", err) } }
func main() { flag.Usage = func() { fmt.Printf("Usage: scantest [options] <table_name>\n\n") flag.PrintDefaults() } var err error config, err = cbtconfig.Load() if err != nil { log.Fatal(err) } config.RegisterFlags() flag.Parse() if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { log.Fatal(err) } if config.Creds != "" { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) } if flag.NArg() != 1 { flag.Usage() os.Exit(1) } table := flag.Arg(0) log.Printf("Dialing connections...") client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) if err != nil { log.Fatalf("Making bigtable.Client: %v", err) } defer client.Close() log.Printf("Starting scan test... (run for %v)", *runFor) tbl := client.Open(table) sem := make(chan int, *numScans) // limit the number of requests happening at once var scans stats stopTime := time.Now().Add(*runFor) var wg sync.WaitGroup for time.Now().Before(stopTime) { sem <- 1 wg.Add(1) go func() { defer wg.Done() defer func() { <-sem }() ok := true opStart := time.Now() defer func() { scans.Record(ok, time.Since(opStart)) }() // Start at a random row key key := fmt.Sprintf("user%d", rand.Int63()) limit := bigtable.LimitRows(int64(*rowLimit)) noop := func(bigtable.Row) bool { return true } if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { log.Printf("Error during scan: %v", err) ok = false } }() } wg.Wait() agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", scans.ok, scans.tries, agg, throughputString(agg)) }