// Put key-value pairs. Note that it could be more efficient to use the Batcher // interface so you don't have to create and keep a slice of KeyValue. Some // databases like leveldb will copy on batch put anyway. func (db *BigTable) PutRange(ctx storage.Context, TKeyValue []storage.TKeyValue) error { if db == nil { return fmt.Errorf("Can't call PutRange() on nil BigTable") } if ctx == nil { return fmt.Errorf("Received nil context in PutRange()") } for _, tkeyvalue := range TKeyValue { unvKey, verKey, err := ctx.SplitKey(tkeyvalue.K) if err != nil { dvid.Errorf("Error in PutRange(): %v\n", err) } mut := api.NewMutation() mut.Set(familyName, encodeKey(verKey), 0, tkeyvalue.V) err = tbl.Apply(db.ctx, encodeKey(unvKey), mut) if err != nil { dvid.Errorf("Failed to Put value in PutRange()") } } return nil }
func doSet(ctx context.Context, args ...string) { if len(args) < 3 { log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...") } tbl := getClient().Open(args[0]) row := args[1] mut := bigtable.NewMutation() for _, arg := range args[2:] { m := setArg.FindStringSubmatch(arg) if m == nil { log.Fatalf("Bad set arg %q", arg) } val := m[3] ts := bigtable.Now() if i := strings.LastIndex(val, "@"); i >= 0 { // Try parsing a timestamp. n, err := strconv.ParseInt(val[i+1:], 0, 64) if err == nil { val = val[:i] ts = bigtable.Timestamp(n) } } mut.Set(m[1], m[2], ts, []byte(val)) } if err := tbl.Apply(ctx, row, mut); err != nil { log.Fatalf("Applying mutation: %v", err) } }
func ExampleNewServer() { srv, err := bttest.NewServer("127.0.0.1:0") if err != nil { log.Fatalln(err) } ctx := context.Background() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { log.Fatalln(err) } proj, instance := "proj", "instance" adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) if err != nil { log.Fatalln(err) } if err = adminClient.CreateTable(ctx, "example"); err != nil { log.Fatalln(err) } if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { log.Fatalln(err) } client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) if err != nil { log.Fatalln(err) } tbl := client.Open("example") mut := bigtable.NewMutation() mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { log.Fatalln(err) } if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { log.Fatalln(err) } else { for _, column := range row["links"] { fmt.Println(column.Column) fmt.Println(string(column.Value)) } } // Output: // links:golang.org // Gophers! }
func doDeleteRow(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt deleterow <table> <row>") } tbl := getClient().Open(args[0]) mut := bigtable.NewMutation() mut.DeleteRow() if err := tbl.Apply(ctx, args[1], mut); err != nil { log.Fatalf("Deleting row: %v", err) } }
// copyTable copies data from one table to another. func copyTable(src, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) error { if src == "" || src == dst { return nil } ctx, _ := context.WithTimeout(context.Background(), time.Minute) // Open the source and destination tables. srcTable := client.Open(src) dstTable := client.Open(dst) var ( writeErr error // Set if any write fails. mu sync.Mutex // Protects writeErr wg sync.WaitGroup // Used to wait for all writes to finish. ) copyRowToTable := func(row bigtable.Row) bool { mu.Lock() failed := writeErr != nil mu.Unlock() if failed { return false } mut := bigtable.NewMutation() for family, items := range row { for _, item := range items { // Get the column name, excluding the column family name and ':' character. columnWithoutFamily := item.Column[len(family)+1:] mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) } } wg.Add(1) go func() { // TODO: should use a semaphore to limit the number of concurrent writes. if err := dstTable.Apply(ctx, row.Key(), mut); err != nil { mu.Lock() writeErr = err mu.Unlock() } wg.Done() }() return true } // Create a filter that only accepts the column families we're interested in. filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) // Read every row from srcTable, and call copyRowToTable to copy it to our table. err := srcTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) wg.Wait() if err != nil { return err } return writeErr }
// Delete deletes a key-value pair so that subsequent Get on the key returns nil. func (db *BigTable) Delete(ctx storage.Context, tkey storage.TKey) error { if db == nil { return fmt.Errorf("Can't call Delete() on nil BigTable") } if ctx == nil { return fmt.Errorf("Received nil context in Delete()") } unvKey, verKey, err := ctx.SplitKey(tkey) if err != nil { dvid.Errorf("Error in Delete(): %v\n", err) } r, err := tbl.ReadRow(db.ctx, encodeKey(unvKey), api.RowFilter(api.StripValueFilter())) //A missing row will return a zero-length map and a nil error if len(r) == 0 { return fmt.Errorf("Error in Delete(): This unvKey doesn't exists") } if err != nil { return err } if len(r[familyName]) == 0 { return fmt.Errorf("Error in Delete(): This row is empty") } _, err = getValue(r, verKey) if err != nil { return fmt.Errorf("Error in Delete(): The version to be deleted doesn't exist") } mut := api.NewMutation() //There is only one version left, and is the one we are trying to delete\ //remove the whole row if len(r[familyName]) == 1 { mut.DeleteRow() } else { mut.DeleteCellsInColumn(familyName, encodeKey(verKey)) } err = tbl.Apply(db.ctx, encodeKey(unvKey), mut) if err != nil { return fmt.Errorf("Error in Delete(): %v\n", err) } return err }
// RawPut is a low-level function that puts a key-value pair using full keys. // This can be used in conjunction with RawRangeQuery. func (db *BigTable) RawPut(fullKey storage.Key, value []byte) error { if db == nil { return fmt.Errorf("Can't call RawPut() on nil BigTable") } unvKey, verKey, err := storage.SplitKey(fullKey) if err != nil { return fmt.Errorf("Error in RawPut(): %v\n", err) } mut := api.NewMutation() mut.Set(familyName, encodeKey(verKey), 0, value) err = tbl.Apply(db.ctx, encodeKey(unvKey), mut) if err != nil { return fmt.Errorf("Error in RawPut(): %v\n", err) } return err }
// DeleteRange removes all key-value pairs with keys in the given range. // For all versions func (db *BigTable) DeleteRange(ctx storage.Context, TkBeg, TkEnd storage.TKey) error { if db == nil { return fmt.Errorf("Can't call DeleteRange() on nil BigTable") } if ctx == nil { return fmt.Errorf("Received nil context in DeleteRange()") } unvKeyBeg, _, err := ctx.SplitKey(TkBeg) if err != nil { return fmt.Errorf("Error in DeleteRange(): %v\n", err) } unvKeyEnd, _, err := ctx.SplitKey(TkEnd) if err != nil { return fmt.Errorf("Error in DeleteRange(): %v\n", err) } rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd)) err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool { unvKeyRow, err := decodeKey(r.Key()) if err != nil { dvid.Errorf("Error in DeleteRange(): %v\n", err) return false } mut := api.NewMutation() mut.DeleteRow() err = tbl.Apply(db.ctx, encodeKey(unvKeyRow), mut) if err != nil { dvid.Errorf("Failed to delete row in DeleteRange()") } return true // keep going }, api.RowFilter(api.StripValueFilter())) return err }
// Put writes a value with given key in a possibly versioned context. func (db *BigTable) Put(ctx storage.Context, tkey storage.TKey, value []byte) error { if db == nil { return fmt.Errorf("Can't call Put() on nil BigTable") } if ctx == nil { return fmt.Errorf("Received nil context in Put()") } unvKey, verKey, err := ctx.SplitKey(tkey) if err != nil { dvid.Errorf("Error in Put(): %v\n", err) } mut := api.NewMutation() // dvid.Infof("Putting value %s\n", string(value)) mut.Set(familyName, encodeKey(verKey), 0, value) err = tbl.Apply(db.ctx, encodeKey(unvKey), mut) if err != nil { return fmt.Errorf("Error in Put(): %v\n", err) } return err }
func main() { var err error config, err = cbtrc.Load() if err != nil { log.Fatal(err) } config.RegisterFlags() flag.Parse() if err := config.CheckFlags(); err != nil { log.Fatal(err) } if config.Creds != "" { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) } if flag.NArg() != 0 { flag.Usage() os.Exit(1) } var options []option.ClientOption if *poolSize > 1 { options = append(options, option.WithGRPCConnectionPool(*poolSize)) } var csvFile *os.File if *csvOutput != "" { csvFile, err = os.Create(*csvOutput) if err != nil { log.Fatalf("creating csv output file: %v", err) } defer csvFile.Close() log.Printf("Writing statistics to %q ...", *csvOutput) } log.Printf("Dialing connections...") client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) if err != nil { log.Fatalf("Making bigtable.Client: %v", err) } defer client.Close() adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) if err != nil { log.Fatalf("Making bigtable.AdminClient: %v", err) } defer adminClient.Close() // Create a scratch table. log.Printf("Setting up scratch table...") if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { log.Fatalf("Making scratch table %q: %v", *scratchTable, err) } if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { log.Fatalf("Making scratch table column family: %v", err) } // Upon a successful run, delete the table. Don't bother checking for errors. defer adminClient.DeleteTable(context.Background(), *scratchTable) log.Printf("Starting load test... (run for %v)", *runFor) tbl := client.Open(*scratchTable) sem := make(chan int, *reqCount) // limit the number of requests happening at once var reads, writes stats stopTime := time.Now().Add(*runFor) var wg sync.WaitGroup for time.Now().Before(stopTime) { sem <- 1 wg.Add(1) go func() { defer wg.Done() defer func() { <-sem }() ok := true opStart := time.Now() var stats *stats defer func() { stats.Record(ok, time.Since(opStart)) }() row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows switch rand.Intn(10) { default: // read stats = &reads _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) if err != nil { log.Printf("Error doing read: %v", err) ok = false } case 0, 1, 2, 3, 4: // write stats = &writes mut := bigtable.NewMutation() mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write if err := tbl.Apply(context.Background(), row, mut); err != nil { log.Printf("Error doing mutation: %v", err) ok = false } } }() } wg.Wait() readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) if csvFile != nil { stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) } }
// DeleteAll removes all key-value pairs for the context. If allVersions is true, // then all versions of the data instance are deleted. func (db *BigTable) DeleteAll(ctx storage.Context, allVersions bool) error { if db == nil { return fmt.Errorf("Can't call DeleteAll() on nil BigTable") } if ctx == nil { return fmt.Errorf("Received nil context in DeleteAll()") } //Row range corresponde to all keys corresponding to this data instace. min, max := ctx.KeyRange() rr := api.NewRange(encodeKey(min), encodeKey(max)) err := tbl.ReadRows(db.ctx, rr, func(r api.Row) bool { unvKeyRow, err := decodeKey(r.Key()) if err != nil { dvid.Errorf("Error in DeleteAll(): %v\n", err) return false } if allVersions { mut := api.NewMutation() mut.DeleteRow() err := tbl.Apply(db.ctx, encodeKey(unvKeyRow), mut) if err != nil { dvid.Errorf("Failed to delete row") } } else { emptyTkey := make([]byte, 0) _, versionToDelete, err := ctx.SplitKey(emptyTkey) if err != nil { dvid.Errorf("Error in DeleteAll(): %v\n", err) return false } for _, readItem := range r[familyName] { verKey, err := decodeKey(readItem.Column) if err != nil { dvid.Errorf("Error in DeleteAll(): %v\n", err) return false } if bytes.Equal(verKey, versionToDelete) { mut := api.NewMutation() mut.DeleteCellsInColumn(familyName, encodeKey(verKey)) err := tbl.Apply(db.ctx, encodeKey(unvKeyRow), mut) if err != nil { dvid.Errorf("Failed to DeleteCellsInColumn in DeleteAll()") } return true // One I found the version I don't have to keep serching for it. } } } return true // keep going }, api.RowFilter(api.StripValueFilter())) return err }
func main() { project := flag.String("project", "", "The Google Cloud Platform project ID. Required.") instance := flag.String("instance", "", "The Google Cloud Bigtable instance ID. Required.") flag.Parse() for _, f := range []string{"project", "instance"} { if flag.Lookup(f).Value.String() == "" { log.Fatalf("The %s flag is required.", f) } } ctx := context.Background() // Set up admin client, tables, and column families. // NewAdminClient uses Application Default Credentials to authenticate. adminClient, err := bigtable.NewAdminClient(ctx, *project, *instance) if err != nil { log.Fatalf("Could not create admin client: %v", err) } tables, err := adminClient.Tables(ctx) if err != nil { log.Fatalf("Could not fetch table list: %v", err) } if !sliceContains(tables, tableName) { log.Printf("Creating table %s", tableName) if err := adminClient.CreateTable(ctx, tableName); err != nil { log.Fatalf("Could not create table %s: %v", tableName, err) } } tblInfo, err := adminClient.TableInfo(ctx, tableName) if err != nil { log.Fatalf("Could not read info for table %s: %v", tableName, err) } if !sliceContains(tblInfo.Families, columnFamilyName) { if err := adminClient.CreateColumnFamily(ctx, tableName, columnFamilyName); err != nil { log.Fatalf("Could not create column family %s: %v", columnFamilyName, err) } } // Set up Bigtable data operations client. // NewClient uses Application Default Credentials to authenticate. client, err := bigtable.NewClient(ctx, *project, *instance) if err != nil { log.Fatalf("Could not create data operations client: %v", err) } tbl := client.Open(tableName) muts := make([]*bigtable.Mutation, len(greetings)) rowKeys := make([]string, len(greetings)) log.Printf("Writing greeting rows to table") for i, greeting := range greetings { muts[i] = bigtable.NewMutation() muts[i].Set(columnFamilyName, columnName, bigtable.Now(), []byte(greeting)) // Each row has a unique row key. // // Note: This example uses sequential numeric IDs for simplicity, but // this can result in poor performance in a production application. // Since rows are stored in sorted order by key, sequential keys can // result in poor distribution of operations across nodes. // // For more information about how to design a Bigtable schema for the // best performance, see the documentation: // // https://cloud.google.com/bigtable/docs/schema-design rowKeys[i] = fmt.Sprintf("%s%d", columnName, i) } rowErrs, err := tbl.ApplyBulk(ctx, rowKeys, muts) if err != nil { log.Fatalf("Could not apply bulk row mutation: %v", err) } if rowErrs != nil { for _, rowErr := range rowErrs { log.Printf("Error writing row: %v", rowErr) } log.Fatalf("Could not write some rows") } log.Printf("Getting a single greeting by row key:") row, err := tbl.ReadRow(ctx, rowKeys[0], bigtable.RowFilter(bigtable.ColumnFilter(columnName))) if err != nil { log.Fatalf("Could not read row with key %s: %v", rowKeys[0], err) } log.Printf("\t%s = %s\n", rowKeys[0], string(row[columnFamilyName][0].Value)) log.Printf("Reading all greeting rows:") err = tbl.ReadRows(ctx, bigtable.PrefixRange(columnName), func(row bigtable.Row) bool { item := row[columnFamilyName][0] log.Printf("\t%s = %s\n", item.Row, string(item.Value)) return true }, bigtable.RowFilter(bigtable.ColumnFilter(columnName))) if err = client.Close(); err != nil { log.Fatalf("Could not close data operations client: %v", err) } log.Printf("Deleting the table") if err = adminClient.DeleteTable(ctx, tableName); err != nil { log.Fatalf("Could not delete table %s: %v", tableName, err) } if err = adminClient.Close(); err != nil { log.Fatalf("Could not close admin client: %v", err) } }
// handleAddDoc adds a document to the index. func handleAddDoc(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { if r.Method != "POST" { http.Error(w, "POST requests only", http.StatusMethodNotAllowed) return } ctx, _ := context.WithTimeout(context.Background(), time.Minute) name := r.FormValue("name") if len(name) == 0 { http.Error(w, "Empty document name!", http.StatusBadRequest) return } content := r.FormValue("content") if len(content) == 0 { http.Error(w, "Empty document content!", http.StatusBadRequest) return } var ( writeErr error // Set if any write fails. mu sync.Mutex // Protects writeErr wg sync.WaitGroup // Used to wait for all writes to finish. ) // writeOneColumn writes one column in one row, updates err if there is an error, // and signals wg that one operation has finished. writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) { mut := bigtable.NewMutation() mut.Set(family, column, ts, []byte(value)) err := table.Apply(ctx, row, mut) if err != nil { mu.Lock() writeErr = err mu.Unlock() } } // Start a write to store the document content. wg.Add(1) go func() { writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now()) wg.Done() }() // Start writes to store the document name in the index for each word in the document. words := tokenize(content) for _, word := range words { var ( row = word family = indexColumnFamily column = name value = "" ts = bigtable.Now() ) wg.Add(1) go func() { // TODO: should use a semaphore to limit the number of concurrent writes. writeOneColumn(row, family, column, value, ts) wg.Done() }() } wg.Wait() if writeErr != nil { http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError) return } var buf bytes.Buffer if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil { http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) return } io.Copy(w, &buf) }