func doRead(ctx context.Context, args ...string) { if len(args) < 1 { log.Fatalf("usage: cbt read <table> [args ...]") } tbl := getClient().Open(args[0]) parsed := make(map[string]string) for _, arg := range args[1:] { i := strings.Index(arg, "=") if i < 0 { log.Fatalf("Bad arg %q", arg) } key, val := arg[:i], arg[i+1:] switch key { default: log.Fatalf("Unknown arg key %q", key) case "limit": // Be nicer; we used to support this, but renamed it to "end". log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") case "start", "end", "prefix", "count": parsed[key] = val } } if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { log.Fatal(`"start"/"end" may not be mixed with "prefix"`) } var rr bigtable.RowRange if start, end := parsed["start"], parsed["end"]; end != "" { rr = bigtable.NewRange(start, end) } else if start != "" { rr = bigtable.InfiniteRange(start) } if prefix := parsed["prefix"]; prefix != "" { rr = bigtable.PrefixRange(prefix) } var opts []bigtable.ReadOption if count := parsed["count"]; count != "" { n, err := strconv.ParseInt(count, 0, 64) if err != nil { log.Fatalf("Bad count %q: %v", count, err) } opts = append(opts, bigtable.LimitRows(n)) } // TODO(dsymonds): Support filters. err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { printRow(r) return true }, opts...) if err != nil { log.Fatalf("Reading rows: %v", err) } }
// copyTable copies data from one table to another. func copyTable(src, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) error { if src == "" || src == dst { return nil } ctx, _ := context.WithTimeout(context.Background(), time.Minute) // Open the source and destination tables. srcTable := client.Open(src) dstTable := client.Open(dst) var ( writeErr error // Set if any write fails. mu sync.Mutex // Protects writeErr wg sync.WaitGroup // Used to wait for all writes to finish. ) copyRowToTable := func(row bigtable.Row) bool { mu.Lock() failed := writeErr != nil mu.Unlock() if failed { return false } mut := bigtable.NewMutation() for family, items := range row { for _, item := range items { // Get the column name, excluding the column family name and ':' character. columnWithoutFamily := item.Column[len(family)+1:] mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) } } wg.Add(1) go func() { // TODO: should use a semaphore to limit the number of concurrent writes. if err := dstTable.Apply(ctx, row.Key(), mut); err != nil { mu.Lock() writeErr = err mu.Unlock() } wg.Done() }() return true } // Create a filter that only accepts the column families we're interested in. filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) // Read every row from srcTable, and call copyRowToTable to copy it to our table. err := srcTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) wg.Wait() if err != nil { return err } return writeErr }
func doCount(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatal("usage: cbt count <table>") } tbl := getClient().Open(args[0]) n := 0 err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { n++ return true }, bigtable.RowFilter(bigtable.StripValueFilter())) if err != nil { log.Fatalf("Reading rows: %v", err) } fmt.Println(n) }