func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { srv, err := bttest.NewServer("127.0.0.1:0", opt...) if err != nil { return nil, nil, err } conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { return nil, nil, err } client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) if err != nil { return nil, nil, err } adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) if err != nil { return nil, nil, err } if err := adminClient.CreateTable(context.Background(), "table"); err != nil { return nil, nil, err } if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { return nil, nil, err } t := client.Open("table") cleanupFunc := func() { adminClient.Close() client.Close() srv.Close() } return t, cleanupFunc, nil }
// NewClient returns a new logging client associated with the provided project ID. // // By default NewClient uses AdminScope. To use a different scope, call // NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { // Check for '/' in project ID to reserve the ability to support various owning resources, // in the form "{Collection}/{Name}", for instance "organizations/my-org". if strings.ContainsRune(projectID, '/') { return nil, errors.New("logging: project ID contains '/'") } opts = append([]option.ClientOption{ option.WithEndpoint(internal.ProdAddr), option.WithScopes(logging.AdminScope), }, opts...) lc, err := vkit.NewClient(ctx, opts...) if err != nil { return nil, err } // TODO(jba): pass along any client options that should be provided to all clients. sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) if err != nil { return nil, err } mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) if err != nil { return nil, err } lc.SetGoogleClientInfo("logging", internal.Version) sc.SetGoogleClientInfo("logging", internal.Version) mc.SetGoogleClientInfo("logging", internal.Version) client := &Client{ lClient: lc, sClient: sc, mClient: mc, projectID: projectID, } return client, nil }
func ExampleNewServer() { srv, err := bttest.NewServer("127.0.0.1:0") if err != nil { log.Fatalln(err) } ctx := context.Background() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { log.Fatalln(err) } proj, instance := "proj", "instance" adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) if err != nil { log.Fatalln(err) } if err = adminClient.CreateTable(ctx, "example"); err != nil { log.Fatalln(err) } if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { log.Fatalln(err) } client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) if err != nil { log.Fatalln(err) } tbl := client.Open("example") mut := bigtable.NewMutation() mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { log.Fatalln(err) } if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { log.Fatalln(err) } else { for _, column := range row["links"] { fmt.Println(column.Column) fmt.Println(string(column.Value)) } } // Output: // links:golang.org // Gophers! }
// NewClient returns a new logging client associated with the provided project ID. // // By default NewClient uses WriteScope. To use a different scope, call // NewClient using a WithScopes option. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { // Check for '/' in project ID to reserve the ability to support various owning resources, // in the form "{Collection}/{Name}", for instance "organizations/my-org". if strings.ContainsRune(projectID, '/') { return nil, errors.New("logging: project ID contains '/'") } opts = append([]option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(WriteScope)}, opts...) lc, err := vkit.NewClient(ctx, opts...) if err != nil { return nil, err } // TODO(jba): pass along any client options that should be provided to all clients. sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) if err != nil { return nil, err } mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) if err != nil { return nil, err } lc.SetGoogleClientInfo("logging", version) sc.SetGoogleClientInfo("logging", version) mc.SetGoogleClientInfo("logging", version) client := &Client{ lClient: lc, sClient: sc, mClient: mc, projectID: projectID, errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors donec: make(chan struct{}), OnError: func(e error) { log.Printf("logging client: %v", e) }, } // Call the user's function synchronously, to make life easier for them. go func() { for err := range client.errc { // This reference to OnError is memory-safe if the user sets OnError before // calling any client methods. The reference happens before the first read from // client.errc, which happens before the first write to client.errc, which // happens before any call, which happens before the user sets OnError. if fn := client.OnError; fn != nil { fn(err) } else { log.Printf("logging (project ID %q): %v", projectID, err) } } }() return client, nil }
// NewClient creates a new PubSub client. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { var o []option.ClientOption // Environment variables for gcloud emulator: // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("grpc.Dial: %v", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} } else { o = []option.ClientOption{option.WithUserAgent(userAgent)} } o = append(o, opts...) s, err := newPubSubService(ctx, o) if err != nil { return nil, fmt.Errorf("constructing pubsub client: %v", err) } c := &Client{ projectID: projectID, s: s, } return c, nil }
// NewClient builds a new connected data client for this environment func (e *EmulatedEnv) NewClient() (*Client, error) { timeout := 20 * time.Second ctx, _ := context.WithTimeout(context.Background(), timeout) conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) if err != nil { return nil, err } return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) }
func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiService, error) { pubc, err := vkit.NewPublisherClient(ctx, opts...) if err != nil { return nil, err } subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) if err != nil { _ = pubc.Close() // ignore error return nil, err } pubc.SetGoogleClientInfo("pubsub", version) subc.SetGoogleClientInfo("pubsub", version) return &apiService{pubc: pubc, subc: subc}, nil }
func TestMain(m *testing.M) { flag.Parse() // needed for testing.Short() ctx := context.Background() testProjectID = testutil.ProjID() var newClient func(ctx context.Context, projectID string) *Client if testProjectID == "" || testing.Short() { integrationTest = false if testProjectID != "" { log.Print("Integration tests skipped in short mode (using fake instead)") } testProjectID = "PROJECT_ID" addr, err := ltesting.NewServer() if err != nil { log.Fatalf("creating fake server: %v", err) } newClient = func(ctx context.Context, projectID string) *Client { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatalf("dialing %q: %v", addr, err) } c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } return c } } else { integrationTest = true ts := testutil.TokenSource(ctx, logging.AdminScope) if ts == nil { log.Fatal("The project key must be set. See CONTRIBUTING.md for details") } log.Printf("running integration tests with project %s", testProjectID) newClient = func(ctx context.Context, projectID string) *Client { c, err := NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } return c } } client = newClient(ctx, testProjectID) initMetrics(ctx) cleanup := initSinks(ctx) exit := m.Run() cleanup() client.Close() os.Exit(exit) }
func TestAdminIntegration(t *testing.T) { srv, err := bttest.NewServer("127.0.0.1:0") if err != nil { t.Fatal(err) } defer srv.Close() t.Logf("bttest.Server running on %s", srv.Addr) ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { t.Fatalf("grpc.Dial: %v", err) } adminClient, err := NewAdminClient(ctx, "proj", "instance", option.WithGRPCConn(conn)) if err != nil { t.Fatalf("NewAdminClient: %v", err) } defer adminClient.Close() list := func() []string { tbls, err := adminClient.Tables(ctx) if err != nil { t.Fatalf("Fetching list of tables: %v", err) } sort.Strings(tbls) return tbls } if err := adminClient.CreateTable(ctx, "mytable"); err != nil { t.Fatalf("Creating table: %v", err) } if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { t.Fatalf("Creating table: %v", err) } if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) { t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) } if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { t.Fatalf("Deleting table: %v", err) } if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) { t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) } }
// NewClient creates a new Client for a given dataset. // If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. // If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value // to connect to a locally-running datastore emulator. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { var o []option.ClientOption // Environment variables for gcd emulator: // https://cloud.google.com/datastore/docs/tools/datastore-emulator // If the emulator is available, dial it directly (and don't pass any credentials). if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("grpc.Dial: %v", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} } else { o = []option.ClientOption{ option.WithEndpoint(prodAddr), option.WithScopes(ScopeDatastore), option.WithUserAgent(userAgent), } } // Warn if we see the legacy emulator environment variables. if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") } if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") } if projectID == "" { projectID = os.Getenv("DATASTORE_PROJECT_ID") } if projectID == "" { return nil, errors.New("datastore: missing project/dataset id") } o = append(o, opts...) conn, err := transport.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &Client{ conn: conn, client: newDatastoreClient(conn, projectID), dataset: projectID, }, nil }
// DefaultClientOptions returns the default client options to use for the // client's gRPC connection. func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { var o []option.ClientOption // Check the environment variables for the bigtable emulator. // Dial it directly and don't pass any credentials. if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("emulator grpc.Dial: %v", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} } else { o = []option.ClientOption{ option.WithEndpoint(endpoint), option.WithScopes(scope), option.WithUserAgent(userAgent), } } return o, nil }
func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) }
func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() clouddebuggerpb.RegisterDebugger2Server(serv, &mockDebugger2) clouddebuggerpb.RegisterController2Server(serv, &mockController2) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) }
func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() pubsubpb.RegisterPublisherServer(serv, &mockPublisher) iampb.RegisterIAMPolicyServer(serv, &mockIamPolicy) pubsubpb.RegisterSubscriberServer(serv, &mockSubscriber) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) }
func NewClient(bt *BigTable) (client *api.Client, err error) { if bt.testing { testSrv, err := bttest.NewServer() //TODO close the testSrv if neccesary if err != nil { return nil, fmt.Errorf("Unable to create bigTable local test server. %v", err) } testConn, err := grpc.Dial(testSrv.Addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("Unable to create bigTable local test server. %v", err) } //Connects to a local bigtable. //It with no security; The project, zone, cluster values are ignored. client, err = api.NewClient(bt.ctx, bt.project, bt.zone, bt.cluster, option.WithGRPCConn(testConn)) } else { //Uses Application Default Credentials to authenticate into Google's Cloud. client, err = api.NewClient(bt.ctx, bt.project, bt.zone, bt.cluster) } if err != nil { return nil, fmt.Errorf("Unable to create a table client. %v", err) } return }
func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() clouderrorreportingpb.RegisterErrorGroupServiceServer(serv, &mockErrorGroup) clouderrorreportingpb.RegisterErrorStatsServiceServer(serv, &mockErrorStats) clouderrorreportingpb.RegisterReportErrorsServiceServer(serv, &mockReportErrors) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) }
func TestMain(m *testing.M) { flag.Parse() // needed for testing.Short() ctx := context.Background() testProjectID = testutil.ProjID() errorc = make(chan error, 100) if testProjectID == "" || testing.Short() { integrationTest = false if testProjectID != "" { log.Print("Integration tests skipped in short mode (using fake instead)") } testProjectID = "PROJECT_ID" clean = func(e *logging.Entry) { // Remove the insert ID for consistency with the integration test. e.InsertID = "" } addr, err := ltesting.NewServer() if err != nil { log.Fatalf("creating fake server: %v", err) } logging.SetNow(testNow) newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatalf("dialing %q: %v", addr, err) } c, err := logging.NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } ac, err := logadmin.NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } return c, ac } } else { integrationTest = true clean = func(e *logging.Entry) { // We cannot compare timestamps, so set them to the test time. // Also, remove the insert ID added by the service. e.Timestamp = testNow().UTC() e.InsertID = "" } ts := testutil.TokenSource(ctx, logging.AdminScope) if ts == nil { log.Fatal("The project key must be set. See CONTRIBUTING.md for details") } log.Printf("running integration tests with project %s", testProjectID) newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { c, err := logging.NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } ac, err := logadmin.NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } return c, ac } } client, aclient = newClients(ctx, testProjectID) client.OnError = func(e error) { errorc <- e } initLogs(ctx) testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, strings.Replace(testLogID, "/", "%2F", -1)) exit := m.Run() client.Close() os.Exit(exit) }
func TestClientIntegration(t *testing.T) { start := time.Now() lastCheckpoint := start checkpoint := func(s string) { n := time.Now() t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) lastCheckpoint = n } proj, instance, table := "proj", "instance", "mytable" var clientOpts []option.ClientOption timeout := 20 * time.Second if *useProd == "" { srv, err := bttest.NewServer("127.0.0.1:0") if err != nil { t.Fatal(err) } defer srv.Close() t.Logf("bttest.Server running on %s", srv.Addr) conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { t.Fatalf("grpc.Dial: %v", err) } clientOpts = []option.ClientOption{option.WithGRPCConn(conn)} } else { t.Logf("Running test against production") a := strings.SplitN(*useProd, ",", 3) proj, instance, table = a[0], a[1], a[2] timeout = 5 * time.Minute } ctx, _ := context.WithTimeout(context.Background(), timeout) client, err := NewClient(ctx, proj, instance, clientOpts...) if err != nil { t.Fatalf("NewClient: %v", err) } defer client.Close() checkpoint("dialed Client") adminClient, err := NewAdminClient(ctx, proj, instance, clientOpts...) if err != nil { t.Fatalf("NewAdminClient: %v", err) } defer adminClient.Close() checkpoint("dialed AdminClient") // Delete the table at the end of the test. // Do this even before creating the table so that if this is running // against production and CreateTable fails there's a chance of cleaning it up. defer adminClient.DeleteTable(ctx, table) if err := adminClient.CreateTable(ctx, table); err != nil { t.Fatalf("Creating table: %v", err) } checkpoint("created table") if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { t.Fatalf("Creating column family: %v", err) } checkpoint(`created "follows" column family`) tbl := client.Open(table) // Insert some data. initialData := map[string][]string{ "wmckinley": {"tjefferson"}, "gwashington": {"jadams"}, "tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below "jadams": {"gwashington", "tjefferson"}, } for row, ss := range initialData { mut := NewMutation() for _, name := range ss { mut.Set("follows", name, 0, []byte("1")) } if err := tbl.Apply(ctx, row, mut); err != nil { t.Errorf("Mutating row %q: %v", row, err) } } checkpoint("inserted initial data") // Do a conditional mutation with a complex filter. mutTrue := NewMutation() mutTrue.Set("follows", "wmckinley", 0, []byte("1")) filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) mut := NewCondMutation(filter, mutTrue, nil) if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { t.Errorf("Conditionally mutating row: %v", err) } // Do a second condition mutation with a filter that does not match, // and thus no changes should be made. mutTrue = NewMutation() mutTrue.DeleteRow() filter = ColumnFilter("snoop.dogg") mut = NewCondMutation(filter, mutTrue, nil) if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { t.Errorf("Conditionally mutating row: %v", err) } checkpoint("did two conditional mutations") // Fetch a row. row, err := tbl.ReadRow(ctx, "jadams") if err != nil { t.Fatalf("Reading a row: %v", err) } wantRow := Row{ "follows": []ReadItem{ {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, }, } for _, ris := range row { sort.Sort(byColumn(ris)) } if !reflect.DeepEqual(row, wantRow) { t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) } checkpoint("tested ReadRow") // Do a bunch of reads with filters. readTests := []struct { desc string rr RowRange filter Filter // may be nil // We do the read, grab all the cells, turn them into "<row>-<col>-<val>", // sort that list, and join with a comma. want string }{ { desc: "read all, unfiltered", rr: RowRange{}, want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", }, { desc: "read with InfiniteRange, unfiltered", rr: InfiniteRange("tjefferson"), want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", }, { desc: "read with NewRange, unfiltered", rr: NewRange("gargamel", "hubbard"), want: "gwashington-jadams-1", }, { desc: "read with PrefixRange, unfiltered", rr: PrefixRange("jad"), want: "jadams-gwashington-1,jadams-tjefferson-1", }, { desc: "read with SingleRow, unfiltered", rr: SingleRow("wmckinley"), want: "wmckinley-tjefferson-1", }, { desc: "read all, with ColumnFilter", rr: RowRange{}, filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", }, } for _, tc := range readTests { var opts []ReadOption if tc.filter != nil { opts = append(opts, RowFilter(tc.filter)) } var elt []string err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { for _, ris := range r { for _, ri := range ris { elt = append(elt, formatReadItem(ri)) } } return true }, opts...) if err != nil { t.Errorf("%s: %v", tc.desc, err) continue } sort.Strings(elt) if got := strings.Join(elt, ","); got != tc.want { t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) } } // Read a RowList var elt []string keys := RowList{"wmckinley", "gwashington", "jadams"} want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1" err = tbl.ReadRows(ctx, keys, func(r Row) bool { for _, ris := range r { for _, ri := range ris { elt = append(elt, formatReadItem(ri)) } } return true }) if err != nil { t.Errorf("read RowList: %v", err) } sort.Strings(elt) if got := strings.Join(elt, ","); got != want { t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) } checkpoint("tested ReadRows in a few ways") // Do a scan and stop part way through. // Verify that the ReadRows callback doesn't keep running. stopped := false err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { if r.Key() < "h" { return true } if !stopped { stopped = true return false } t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) return false }) if err != nil { t.Errorf("Partial ReadRows: %v", err) } checkpoint("did partial ReadRows test") // Delete a row and check it goes away. mut = NewMutation() mut.DeleteRow() if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { t.Errorf("Apply DeleteRow: %v", err) } row, err = tbl.ReadRow(ctx, "wmckinley") if err != nil { t.Fatalf("Reading a row after DeleteRow: %v", err) } if len(row) != 0 { t.Fatalf("Read non-zero row after DeleteRow: %v", row) } checkpoint("exercised DeleteRow") // Check ReadModifyWrite. if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { t.Fatalf("Creating column family: %v", err) } appendRMW := func(b []byte) *ReadModifyWrite { rmw := NewReadModifyWrite() rmw.AppendValue("counter", "likes", b) return rmw } incRMW := func(n int64) *ReadModifyWrite { rmw := NewReadModifyWrite() rmw.Increment("counter", "likes", n) return rmw } rmwSeq := []struct { desc string rmw *ReadModifyWrite want []byte }{ { desc: "append #1", rmw: appendRMW([]byte{0, 0, 0}), want: []byte{0, 0, 0}, }, { desc: "append #2", rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, }, { desc: "increment", rmw: incRMW(8), want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, }, } for _, step := range rmwSeq { row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) if err != nil { t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) } clearTimestamps(row) wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} if !reflect.DeepEqual(row, wantRow) { t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) } } checkpoint("tested ReadModifyWrite") // Test arbitrary timestamps more thoroughly. if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { t.Fatalf("Creating column family: %v", err) } const numVersions = 4 mut = NewMutation() for i := 0; i < numVersions; i++ { // Timestamps are used in thousands because the server // only permits that granularity. mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) } if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Fatalf("Mutating row: %v", err) } r, err := tbl.ReadRow(ctx, "testrow") if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ // These should be returned in descending timestamp order. {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) } // Do the same read, but filter to the latest two versions. r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) } // Delete the cell with timestamp 2000 and repeat the last read, // checking that we get ts 3000 and ts 1000. mut = NewMutation() mut.DeleteTimestampRange("ts", "col", 2000, 3000) // half-open interval if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Fatalf("Mutating row: %v", err) } r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) } checkpoint("tested multiple versions in a cell") // Do highly concurrent reads/writes. // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. const maxConcurrency = 100 var wg sync.WaitGroup for i := 0; i < maxConcurrency; i++ { wg.Add(1) go func() { defer wg.Done() switch r := rand.Intn(100); { // r ∈ [0,100) case 0 <= r && r < 30: // Do a read. _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) if err != nil { t.Errorf("Concurrent read: %v", err) } case 30 <= r && r < 100: // Do a write. mut := NewMutation() mut.Set("ts", "col", 0, []byte("data")) if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Errorf("Concurrent write: %v", err) } } }() } wg.Wait() checkpoint("tested high concurrency") // Large reads, writes and scans. bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB. nonsense := []byte("lorem ipsum dolor sit amet, ") fill(bigBytes, nonsense) mut = NewMutation() mut.Set("ts", "col", 0, bigBytes) if err := tbl.Apply(ctx, "bigrow", mut); err != nil { t.Errorf("Big write: %v", err) } r, err = tbl.ReadRow(ctx, "bigrow") if err != nil { t.Errorf("Big read: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "bigrow", Column: "ts:col", Value: bigBytes}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Big read returned incorrect bytes: %v", r) } // Now write 1000 rows, each with 82 KB values, then scan them all. medBytes := make([]byte, 82<<10) fill(medBytes, nonsense) sem := make(chan int, 50) // do up to 50 mutations at a time. for i := 0; i < 1000; i++ { mut := NewMutation() mut.Set("ts", "big-scan", 0, medBytes) row := fmt.Sprintf("row-%d", i) wg.Add(1) go func() { defer wg.Done() defer func() { <-sem }() sem <- 1 if err := tbl.Apply(ctx, row, mut); err != nil { t.Errorf("Preparing large scan: %v", err) } }() } wg.Wait() n := 0 err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { for _, ris := range r { for _, ri := range ris { n += len(ri.Value) } } return true }, RowFilter(ColumnFilter("big-scan"))) if err != nil { t.Errorf("Doing large scan: %v", err) } if want := 1000 * len(medBytes); n != want { t.Errorf("Large scan returned %d bytes, want %d", n, want) } // Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption. rc := 0 wantRc := 3 err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { rc++ return true }, LimitRows(int64(wantRc))) if rc != wantRc { t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc) } checkpoint("tested big read/write/scan") // Test bulk mutations if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil { t.Fatalf("Creating column family: %v", err) } bulkData := map[string][]string{ "red sox": {"2004", "2007", "2013"}, "patriots": {"2001", "2003", "2004", "2014"}, "celtics": {"1981", "1984", "1986", "2008"}, } var rowKeys []string var muts []*Mutation for row, ss := range bulkData { mut := NewMutation() for _, name := range ss { mut.Set("bulk", name, 0, []byte("1")) } rowKeys = append(rowKeys, row) muts = append(muts, mut) } status, err := tbl.ApplyBulk(ctx, rowKeys, muts) if err != nil { t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) } if status != nil { t.Errorf("non-nil errors: %v", err) } checkpoint("inserted bulk data") // Read each row back for rowKey, ss := range bulkData { row, err := tbl.ReadRow(ctx, rowKey) if err != nil { t.Fatalf("Reading a bulk row: %v", err) } for _, ris := range row { sort.Sort(byColumn(ris)) } var wantItems []ReadItem for _, val := range ss { wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) } wantRow := Row{"bulk": wantItems} if !reflect.DeepEqual(row, wantRow) { t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) } } checkpoint("tested reading from bulk insert") // Test bulk write errors. // Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error. badMut := NewMutation() badMut.Set("badfamily", "col", ServerTime, nil) badMut2 := NewMutation() badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1")) status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2}) if err != nil { t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) } if status == nil { t.Errorf("No errors for bad bulk mutation") } else if status[0] == nil || status[1] == nil { t.Errorf("No error for bad bulk mutation") } }
// makeRequests makes some requests. // req is an incoming request used to construct the trace. traceClient is the // client used to upload the trace. rt is the trace client's http client's // transport. This is used to retrieve the trace uploaded by the client, if // any. If expectTrace is true, we expect a trace will be uploaded. If // synchronous is true, the call to Finish is expected not to return before the // client has uploaded any traces. func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { span := traceClient.SpanFromRequest(req) ctx := NewContext(context.Background(), span) // An HTTP request. { req2, err := http.NewRequest("GET", "http://example.com/bar", nil) if err != nil { t.Fatal(err) } resp := &http.Response{StatusCode: 200} s := span.NewRemoteChild(req2) s.Finish(WithResponse(resp)) } // An autogenerated API call. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} computeClient, err := compute.New(hc) if err != nil { t.Fatal(err) } _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() if err != nil { t.Fatal(err) } } // A cloud library call that uses the autogenerated API. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } var objAttrsList []*storage.ObjectAttrs it := storageClient.Bucket("testbucket").Objects(ctx, nil) for { objAttrs, err := it.Next() if err != nil && err != iterator.Done { t.Fatal(err) } if err == iterator.Done { break } objAttrsList = append(objAttrsList, objAttrs) } } // A cloud library call that uses grpc internally. for _, fail := range []bool{false, true} { srv, err := testutil.NewServer() if err != nil { t.Fatalf("creating test datastore server: %v", err) } dspb.RegisterDatastoreServer(srv.Gsrv, &fakeDatastoreServer{fail: fail}) srv.Start() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), EnableGRPCTracingDialOption) if err != nil { t.Fatalf("connecting to test datastore server: %v", err) } datastoreClient, err := datastore.NewClient(ctx, testProjectID, option.WithGRPCConn(conn)) if err != nil { t.Fatalf("creating datastore client: %v", err) } k := datastore.NameKey("Entity", "stringID", nil) e := new(datastore.Entity) datastoreClient.Get(ctx, k, e) } done := make(chan struct{}) go func() { if synchronous { err := span.FinishWait() if err != nil { t.Errorf("Unexpected error from span.FinishWait: %v", err) } } else { span.Finish() } done <- struct{}{} }() if !expectTrace { <-done select { case <-rt.reqc: t.Errorf("Got a trace, expected none.") case <-time.After(5 * time.Millisecond): } return nil } else if !synchronous { <-done return <-rt.reqc } else { select { case <-done: t.Errorf("Synchronous Finish didn't wait for trace upload.") return <-rt.reqc case <-time.After(5 * time.Millisecond): r := <-rt.reqc <-done return r } } }
// WithBaseGRPC returns a ClientOption that specifies the gRPC client // connection to use as the basis of communications. This option many only be // used with services that support gRPC as their communication transport. func WithBaseGRPC(conn *grpc.ClientConn) ClientOption { return wrapOpt{option.WithGRPCConn(conn)} }