func TestBucketMethods(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() projectID := testutil.ProjID() newBucket := bucket + "-new" // Test Create and Delete. if err := client.Bucket(newBucket).Create(ctx, projectID, nil); err != nil { t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, nil, err) } if err := client.Bucket(newBucket).Delete(ctx); err != nil { t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) } // Test Create and Delete with attributes. attrs := BucketAttrs{ DefaultObjectACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, } if err := client.Bucket(newBucket).Create(ctx, projectID, &attrs); err != nil { t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, attrs, err) } if err := client.Bucket(newBucket).Delete(ctx); err != nil { t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) } }
func TestIntegrationPingBadProject(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, Scope) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } for _, projID := range []string{ testutil.ProjID() + "-BAD", // nonexistent project "amazing-height-519", // exists, but wrong creds } { c, err := NewClient(ctx, projID, "logging-integration-test", option.WithTokenSource(ts)) if err != nil { t.Fatalf("project %s: error creating client: %v", projID, err) } if err := c.Ping(); err == nil { t.Errorf("project %s: want error pinging logging api, got nil", projID) } // Ping twice, just to make sure the deduping doesn't mess with the result. if err := c.Ping(); err == nil { t.Errorf("project %s: want error pinging logging api, got nil", projID) } } }
func newClient(ctx context.Context, t *testing.T) *Client { ts := testutil.TokenSource(ctx, ScopeDatastore) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) if err != nil { t.Fatalf("NewClient: %v", err) } return client }
func TestMain(m *testing.M) { flag.Parse() // needed for testing.Short() ctx := context.Background() testProjectID = testutil.ProjID() var newClient func(ctx context.Context, projectID string) *Client if testProjectID == "" || testing.Short() { integrationTest = false if testProjectID != "" { log.Print("Integration tests skipped in short mode (using fake instead)") } testProjectID = "PROJECT_ID" addr, err := ltesting.NewServer() if err != nil { log.Fatalf("creating fake server: %v", err) } newClient = func(ctx context.Context, projectID string) *Client { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatalf("dialing %q: %v", addr, err) } c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } return c } } else { integrationTest = true ts := testutil.TokenSource(ctx, logging.AdminScope) if ts == nil { log.Fatal("The project key must be set. See CONTRIBUTING.md for details") } log.Printf("running integration tests with project %s", testProjectID) newClient = func(ctx context.Context, projectID string) *Client { c, err := NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } return c } } client = newClient(ctx, testProjectID) initMetrics(ctx) cleanup := initSinks(ctx) exit := m.Run() cleanup() client.Close() os.Exit(exit) }
// config is like testConfig, but it doesn't need a *testing.T. func config(ctx context.Context) (*Client, string) { ts := testutil.TokenSource(ctx, ScopeFullControl) if ts == nil { return nil, "" } p := testutil.ProjID() if p == "" { log.Fatal("The project ID must be set. See CONTRIBUTING.md for details") } client, err := NewClient(ctx, option.WithTokenSource(ts)) if err != nil { log.Fatalf("NewClient: %v", err) } return client, p + suffix }
// If integration tests will be run, create a unique bucket for them. func initIntegrationTest() bool { flag.Parse() // needed for testing.Short() ctx := context.Background() if testing.Short() { return false } client, bucket := config(ctx) if client == nil { return false } defer client.Close() if err := client.Bucket(bucket).Create(ctx, testutil.ProjID(), nil); err != nil { log.Fatalf("creating bucket %q: %v", bucket, err) } return true }
func TestIntegration(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, Scope) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projID := testutil.ProjID() c, err := NewClient(ctx, projID, "logging-integration-test", option.WithTokenSource(ts)) if err != nil { t.Fatalf("error creating client: %v", err) } if err := c.Ping(); err != nil { t.Fatalf("error pinging logging api: %v", err) } // Ping twice, to verify that deduping doesn't change the result. if err := c.Ping(); err != nil { t.Fatalf("error pinging logging api: %v", err) } if err := c.LogSync(Entry{Payload: customJSONObject{}}); err != nil { t.Fatalf("error writing log: %v", err) } if err := c.Log(Entry{Payload: customJSONObject{}}); err != nil { t.Fatalf("error writing log: %v", err) } if _, err := c.Writer(Default).Write([]byte("test log with io.Writer")); err != nil { t.Fatalf("error writing log using io.Writer: %v", err) } c.Logger(Default).Println("test log with log.Logger") if err := c.Flush(); err != nil { t.Fatalf("error flushing logs: %v", err) } }
// If integration tests will be run, create a unique bucket for them. func initIntegrationTest() { flag.Parse() // needed for testing.Short() if testing.Short() { return } ctx := context.Background() ts := testutil.TokenSource(ctx, Scope) if ts == nil { log.Println("Integration tests skipped. See CONTRIBUTING.md for details") return } projID := testutil.ProjID() var err error client, err = NewClient(ctx, projID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("NewClient: %v", err) } dataset = client.Dataset("bigquery_integration_test") if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 log.Fatalf("creating dataset: %v", err) } }
// cleanup deletes the bucket used for testing, as well as old // testing buckets that weren't cleaned previously. func cleanup() error { if testing.Short() { return nil // Don't clean up in short mode. } ctx := context.Background() client, bucket := config(ctx) if client == nil { return nil // Don't cleanup if we're not configured correctly. } defer client.Close() if err := killBucket(ctx, client, bucket); err != nil { return err } // Delete buckets whose name begins with our test prefix, and which were // created a while ago. (Unfortunately GCS doesn't provide last-modified // time, which would be a better way to check for staleness.) const expireAge = 24 * time.Hour projectID := testutil.ProjID() it := client.Buckets(ctx, projectID) it.Prefix = projectID + testPrefix for { bktAttrs, err := it.Next() if err == Done { break } if err != nil { return err } if time.Since(bktAttrs.Created) > expireAge { log.Printf("deleting bucket %q, which more than %s old", bktAttrs.Name, expireAge) if err := killBucket(ctx, client, bktAttrs.Name); err != nil { return err } } } return nil }
func TestMain(m *testing.M) { flag.Parse() // needed for testing.Short() ctx := context.Background() testProjectID = testutil.ProjID() errorc = make(chan error, 100) if testProjectID == "" || testing.Short() { integrationTest = false if testProjectID != "" { log.Print("Integration tests skipped in short mode (using fake instead)") } testProjectID = "PROJECT_ID" clean = func(e *logging.Entry) { // Remove the insert ID for consistency with the integration test. e.InsertID = "" } addr, err := ltesting.NewServer() if err != nil { log.Fatalf("creating fake server: %v", err) } logging.SetNow(testNow) newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatalf("dialing %q: %v", addr, err) } c, err := logging.NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } ac, err := logadmin.NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } return c, ac } } else { integrationTest = true clean = func(e *logging.Entry) { // We cannot compare timestamps, so set them to the test time. // Also, remove the insert ID added by the service. e.Timestamp = testNow().UTC() e.InsertID = "" } ts := testutil.TokenSource(ctx, logging.AdminScope) if ts == nil { log.Fatal("The project key must be set. See CONTRIBUTING.md for details") } log.Printf("running integration tests with project %s", testProjectID) newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { c, err := logging.NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } ac, err := logadmin.NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } return c, ac } } client, aclient = newClients(ctx, testProjectID) client.OnError = func(e error) { errorc <- e } initLogs(ctx) testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, strings.Replace(testLogID, "/", "%2F", -1)) exit := m.Run() client.Close() os.Exit(exit) }
func TestAll(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } now := time.Now() topicName := fmt.Sprintf("topic-%d", now.Unix()) subName := fmt.Sprintf("subscription-%d", now.Unix()) client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) if err != nil { t.Fatalf("Creating client error: %v", err) } defer client.Close() var topic *Topic if topic, err = client.CreateTopic(ctx, topicName); err != nil { t.Errorf("CreateTopic error: %v", err) } var sub *Subscription if sub, err = client.CreateSubscription(ctx, subName, topic, 0, nil); err != nil { t.Errorf("CreateSub error: %v", err) } exists, err := topic.Exists(ctx) if err != nil { t.Fatalf("TopicExists error: %v", err) } if !exists { t.Errorf("topic %s should exist, but it doesn't", topic) } exists, err = sub.Exists(ctx) if err != nil { t.Fatalf("SubExists error: %v", err) } if !exists { t.Errorf("subscription %s should exist, but it doesn't", subName) } msgs := []*Message{} for i := 0; i < 10; i++ { text := fmt.Sprintf("a message with an index %d", i) attrs := make(map[string]string) attrs["foo"] = "bar" msgs = append(msgs, &Message{ Data: []byte(text), Attributes: attrs, }) } ids, err := topic.Publish(ctx, msgs...) if err != nil { t.Fatalf("Publish (1) error: %v", err) } if len(ids) != len(msgs) { t.Errorf("unexpected number of message IDs received; %d, want %d", len(ids), len(msgs)) } want := make(map[string]*messageData) for i, m := range msgs { md := extractMessageData(m) md.ID = ids[i] want[md.ID] = md } // Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available. timeoutCtx, _ := context.WithTimeout(ctx, time.Minute) it, err := sub.Pull(timeoutCtx) if err != nil { t.Fatalf("error constructing iterator: %v", err) } defer it.Stop() got := make(map[string]*messageData) for i := 0; i < len(want); i++ { m, err := it.Next() if err != nil { t.Fatalf("error getting next message: %v", err) } md := extractMessageData(m) got[md.ID] = md m.Done(true) } if !reflect.DeepEqual(got, want) { t.Errorf("messages: got: %v ; want: %v", got, want) } // base64 test data := "=@~" _, err = topic.Publish(ctx, &Message{Data: []byte(data)}) if err != nil { t.Fatalf("Publish error: %v", err) } m, err := it.Next() if err != nil { t.Fatalf("Pull error: %v", err) } if string(m.Data) != data { t.Errorf("unexpected message received; %s, want %s", string(m.Data), data) } m.Done(true) if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok { t.Errorf("topic IAM: %s", msg) } if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok { t.Errorf("sub IAM: %s", msg) } err = sub.Delete(ctx) if err != nil { t.Errorf("DeleteSub error: %v", err) } err = topic.Delete(ctx) if err != nil { t.Errorf("DeleteTopic error: %v", err) } }
func TestIntegration(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, Scope) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projID := testutil.ProjID() c, err := NewClient(ctx, projID, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } ds := c.Dataset("bigquery_integration_test") if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 t.Fatal(err) } schema := Schema([]*FieldSchema{ {Name: "name", Type: StringFieldType}, {Name: "num", Type: IntegerFieldType}, }) table := ds.Table("t1") // Delete the table in case it already exists. (Ignore errors.) table.Delete(ctx) // Create the table. err = table.Create(ctx, schema, TableExpiration(time.Now().Add(5*time.Minute))) if err != nil { t.Fatal(err) } // Check table metadata. md, err := table.Metadata(ctx) if err != nil { t.Fatal(err) } // TODO(jba): check md more thorougly. if got, want := md.ID, fmt.Sprintf("%s:%s.%s", projID, ds.id, table.TableID); got != want { t.Errorf("metadata.ID: got %q, want %q", got, want) } if got, want := md.Type, RegularTable; got != want { t.Errorf("metadata.Type: got %v, want %v", got, want) } // List tables in the dataset. tables, err := ds.ListTables(ctx) if err != nil { t.Fatal(err) } if got, want := len(tables), 1; got != want { t.Fatalf("ListTables: got %d, want %d", got, want) } want := *table if got := tables[0]; !reflect.DeepEqual(got, &want) { t.Errorf("ListTables: got %v, want %v", got, &want) } // Iterate over tables in the dataset. it := ds.Tables(ctx) tables = nil for { tbl, err := it.Next() if err == iterator.Done { break } if err != nil { t.Fatal(err) } tables = append(tables, tbl) } if got, want := tables, []*Table{table}; !reflect.DeepEqual(got, want) { t.Errorf("Tables: got %v, want %v", got, want) } // Populate the table. upl := table.NewUploader() var rows []*ValuesSaver for i, name := range []string{"a", "b", "c"} { rows = append(rows, &ValuesSaver{ Schema: schema, InsertID: name, Row: []Value{name, i}, }) } if err := upl.Put(ctx, rows); err != nil { t.Fatal(err) } checkRead := func(src ReadSource) { it, err := c.Read(ctx, src) if err != nil { t.Fatal(err) } for i := 0; it.Next(ctx); i++ { var vals ValueList if err := it.Get(&vals); err != nil { t.Fatal(err) } if got, want := vals, rows[i].Row; !reflect.DeepEqual([]Value(got), want) { t.Errorf("got %v, want %v", got, want) } } } // Read the table. checkRead(table) // Query the table. q := &Query{ Q: "select name, num from t1", DefaultProjectID: projID, DefaultDatasetID: ds.id, } checkRead(q) // Query the long way. dest := &Table{} job1, err := c.Copy(ctx, dest, q, WriteTruncate) if err != nil { t.Fatal(err) } job2, err := c.JobFromID(ctx, job1.ID()) if err != nil { t.Fatal(err) } // TODO(jba): poll status until job is done _, err = job2.Status(ctx) if err != nil { t.Fatal(err) } checkRead(job2) // TODO(jba): patch the table }
// TestEndToEnd pumps many messages into a topic and tests that they are all delivered to each subscription for the topic. // It also tests that messages are not unexpectedly redelivered. func TestEndToEnd(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } now := time.Now() topicName := fmt.Sprintf("endtoend-%d", now.Unix()) subPrefix := fmt.Sprintf("endtoend-%d", now.Unix()) client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) if err != nil { t.Fatalf("Creating client error: %v", err) } var topic *Topic if topic, err = client.CreateTopic(ctx, topicName); err != nil { t.Fatalf("CreateTopic error: %v", err) } defer topic.Delete(ctx) // Three subscriptions to the same topic. var subA, subB, subC *Subscription if subA, err = client.CreateSubscription(ctx, subPrefix+"-a", topic, ackDeadline, nil); err != nil { t.Fatalf("CreateSub error: %v", err) } defer subA.Delete(ctx) if subB, err = client.CreateSubscription(ctx, subPrefix+"-b", topic, ackDeadline, nil); err != nil { t.Fatalf("CreateSub error: %v", err) } defer subB.Delete(ctx) if subC, err = client.CreateSubscription(ctx, subPrefix+"-c", topic, ackDeadline, nil); err != nil { t.Fatalf("CreateSub error: %v", err) } defer subC.Delete(ctx) expectedCounts := make(map[string]int) for _, id := range publish(t, ctx, topic) { expectedCounts[id] = 1 } // recv provides an indication that messages are still arriving. recv := make(chan struct{}) // Keep track of the number of times each message (by message id) was // seen from each subscription. mcA := &messageCounter{counts: make(map[string]int), recv: recv} mcB := &messageCounter{counts: make(map[string]int), recv: recv} mcC := &messageCounter{counts: make(map[string]int), recv: recv} stopC := make(chan struct{}) // We have three subscriptions to our topic. // Each subscription will get a copy of each pulished message. // // subA has just one iterator, while subB has two. The subB iterators // will each process roughly half of the messages for subB. All of // these iterators live until all messages have been consumed. subC is // processed by a series of short-lived iterators. var wg sync.WaitGroup con := &consumer{ concurrencyPerIterator: 1, iteratorsInFlight: 2, lifetimes: immortal, } con.consume(t, ctx, subA, mcA, &wg, stopC) con = &consumer{ concurrencyPerIterator: 1, iteratorsInFlight: 2, lifetimes: immortal, } con.consume(t, ctx, subB, mcB, &wg, stopC) con = &consumer{ concurrencyPerIterator: 1, iteratorsInFlight: 2, lifetimes: &explicitLifetimes{ lifetimes: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2}, }, } con.consume(t, ctx, subC, mcC, &wg, stopC) go func() { timeoutC := time.After(timeout) // Every time this ticker ticks, we will check if we have received any // messages since the last time it ticked. We check less frequently // than the ack deadline, so that we can detect if messages are // redelivered after having their ack deadline extended. checkQuiescence := time.NewTicker(ackDeadline * 3) defer checkQuiescence.Stop() var received bool for { select { case <-recv: received = true case <-checkQuiescence.C: if received { received = false } else { close(stopC) return } case <-timeoutC: t.Errorf("timed out") close(stopC) return } } }() wg.Wait() for _, mc := range []*messageCounter{mcA, mcB, mcC} { if got, want := mc.counts, expectedCounts; !reflect.DeepEqual(got, want) { t.Errorf("message counts: %v\n", diff(got, want)) } } }