Esempio n. 1
0
func Example_serviceAccount() {
	// Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS
	// and use the Application Default Credentials.
	ctx := context.Background()
	// Use a JSON key file associated with a Google service account to
	// authenticate and authorize.
	// Go to https://console.developers.google.com/permissions/serviceaccounts to create
	// and download a service account key for your project.
	//
	// Note: The example uses the datastore client, but the same steps apply to
	// the other client libraries underneath this package.
	key, err := ioutil.ReadFile("/path/to/service-account-key.json")
	if err != nil {
		// TODO: handle error.
	}
	cfg, err := google.JWTConfigFromJSON(key, datastore.ScopeDatastore)
	if err != nil {
		// TODO: handle error.
	}
	client, err := datastore.NewClient(
		ctx, "project-id", option.WithTokenSource(cfg.TokenSource(ctx)))
	if err != nil {
		// TODO: handle error.
	}
	// Use the client.
	_ = client
}
Esempio n. 2
0
func (s *shard) createOutputFile(c context.Context) (io.WriteCloser, error) {
	c, _ = context.WithTimeout(c, time.Duration(10)*time.Minute)
	// for development we can't use the appengine default credentials so
	// instead need to create our own oauth token source to access storage

	// TODO: maybe give job a chance to generate this - it could also
	// create the writer (?). The only reason we're doing it is to prevent
	// duplication and also handle the file rollup operations
	var client *cstorage.Client
	if appengine.IsDevAppServer() {
		jsonKey, err := ioutil.ReadFile("service-account.json")
		if err != nil {
			return nil, err
		}
		conf, err := google.JWTConfigFromJSON(jsonKey, cstorage.ScopeReadWrite)
		if err != nil {
			return nil, err
		}
		client, err = cstorage.NewClient(c, option.WithTokenSource(conf.TokenSource(c)))
		if err != nil {
			return nil, err
		}
	} else {
		var err error
		client, err = cstorage.NewClient(c)
		if err != nil {
			return nil, err
		}
	}

	o := client.Bucket(s.job.Bucket).Object(s.sliceFilename(s.Sequence)).NewWriter(c)

	// TODO: wrap writer to count bytes and continue slice if we get close to 10Mb limit (?)
	return o, nil
}
Esempio n. 3
0
func TestIntegrationPingBadProject(t *testing.T) {
	if testing.Short() {
		t.Skip("Integration tests skipped in short mode")
	}

	ctx := context.Background()
	ts := testutil.TokenSource(ctx, Scope)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}

	for _, projID := range []string{
		testutil.ProjID() + "-BAD", // nonexistent project
		"amazing-height-519",       // exists, but wrong creds
	} {
		c, err := NewClient(ctx, projID, "logging-integration-test", option.WithTokenSource(ts))
		if err != nil {
			t.Fatalf("project %s: error creating client: %v", projID, err)
		}
		if err := c.Ping(); err == nil {
			t.Errorf("project %s: want error pinging logging api, got nil", projID)
		}
		// Ping twice, just to make sure the deduping doesn't mess with the result.
		if err := c.Ping(); err == nil {
			t.Errorf("project %s: want error pinging logging api, got nil", projID)
		}
	}
}
Esempio n. 4
0
func NewGCS(name string, info map[string]string) (Backend, error) {
	b := &gcsBackend{
		name:       name,
		bucketName: info["bucket"],
	}
	keyJSON := []byte(info["key"])

	if b.bucketName == "" {
		return nil, fmt.Errorf("blobstore: missing Google Cloud Storage bucket param for %s", name)
	}
	if len(keyJSON) == 0 {
		return nil, fmt.Errorf("blobstore: missing Google Cloud Storage key JSON param for %s", name)
	}

	jwtToken, err := google.JWTConfigFromJSON(keyJSON, "https://www.googleapis.com/auth/devstorage.read_write")
	if err != nil {
		return nil, fmt.Errorf("blobstore: error loading Google Cloud Storage JSON key: %s", err)
	}
	tokenSource := jwtToken.TokenSource(context.Background())

	// Test getting an OAuth token so we can disambiguate an issue with the
	// token and an issue with the bucket permissions below.
	if _, err := tokenSource.Token(); err != nil {
		return nil, fmt.Errorf("blobstore: error getting Google Cloud Storage OAuth token: %s", err)
	}

	pemBlock, _ := pem.Decode(jwtToken.PrivateKey)
	privateKey, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes)
	if err != nil {
		return nil, fmt.Errorf("blobstore: error decoding Google Cloud Storage private key: %s", err)
	}
	rsaPrivateKey, ok := privateKey.(*rsa.PrivateKey)
	if !ok {
		return nil, fmt.Errorf("blobstore: unexpected Google Cloud Storage key type: %T", privateKey)
	}
	b.signOpts = func() *storage.SignedURLOptions {
		return &storage.SignedURLOptions{
			GoogleAccessID: jwtToken.Email,
			SignBytes: func(b []byte) ([]byte, error) {
				digest := sha256.Sum256(b)
				return rsa.SignPKCS1v15(rand.Reader, rsaPrivateKey, crypto.SHA256, digest[:])
			},
			Method:  "GET",
			Expires: time.Now().Add(10 * time.Minute),
		}
	}

	client, err := storage.NewClient(context.Background(), option.WithTokenSource(tokenSource))
	if err != nil {
		return nil, fmt.Errorf("blobstore: error creating Google Cloud Storage client: %s", err)
	}
	b.bucket = client.Bucket(b.bucketName)

	_, err = b.bucket.Attrs(context.Background())
	if err != nil {
		return nil, fmt.Errorf("blobstore: error checking Google Cloud Storage bucket %q existence, ensure that it exists and Owner access for %s is included the bucket ACL: %q", b.bucketName, jwtToken.Email, err)
	}
	return b, nil
}
Esempio n. 5
0
func upload(srcPath string) {
	if !*flagUpload {
		return
	}
	destName := strings.Replace(filepath.Base(srcPath), "camlistore", "camlistore-"+releaseDate.Format(fileDateFormat), 1)
	versionedTarball := "monthly/" + destName

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(project)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	contentType := "application/x-gtar"
	if strings.HasSuffix(versionedTarball, ".zip") {
		contentType = "application/zip"
	}
	w.ContentType = contentType
	csw := sha256.New()
	mw := io.MultiWriter(w, csw)

	src, err := os.Open(srcPath)
	if err != nil {
		log.Fatal(err)
	}
	defer src.Close()

	if _, err := io.Copy(mw, src); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded monthly tarball to %s", versionedTarball)

	// And upload the corresponding checksum
	checkSumFile := versionedTarball + ".sha256"
	sum := fmt.Sprintf("%x", csw.Sum(nil))
	w = stoClient.Bucket(bucket).Object(checkSumFile).NewWriter(ctx)
	w.ACL = publicACL(project)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	w.ContentType = "text/plain"
	if _, err := io.Copy(w, strings.NewReader(sum)); err != nil {
		log.Fatalf("error uploading checksum %v: %v", checkSumFile, err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded monthly tarball checksum to %s", checkSumFile)
}
Esempio n. 6
0
func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
	if ts := config.TokenSource; ts != nil {
		opts = append(opts, option.WithTokenSource(ts))
	}
	if tlsCreds := config.TLSCreds; tlsCreds != nil {
		opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds)))
	}
	return opts
}
Esempio n. 7
0
func newClient(ctx context.Context, t *testing.T) *Client {
	ts := testutil.TokenSource(ctx, ScopeDatastore)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}
	client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
	if err != nil {
		t.Fatalf("NewClient: %v", err)
	}
	return client
}
Esempio n. 8
0
// uploadReleaseTarball uploads the generated tarball of binaries in
// camlistore-release/VERSION/camlistoreVERSION-REV-CONTENTS.EXT. It then makes a copy in
// the same bucket and path, as camlistoreVERSION-CONTENTS.EXT.
func uploadReleaseTarball() {
	proj := "camlistore-website"
	bucket := "camlistore-release"
	tarball := *flagVersion + "/" + filepath.Base(releaseTarball)
	versionedTarball := strings.Replace(tarball, "camlistore"+*flagVersion, "camlistore"+*flagVersion+"-"+rev(), 1)

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(proj)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	contentType := "application/x-gtar"
	if *buildOS == "windows" {
		contentType = "application/zip"
	}
	w.ContentType = contentType

	src, err := os.Open(releaseTarball)
	if err != nil {
		log.Fatal(err)
	}
	defer src.Close()

	if _, err := io.Copy(w, src); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded tarball to %s", versionedTarball)
	if !isWIP() {
		log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
		dest := stoClient.Bucket(bucket).Object(tarball)
		if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo(
			ctx,
			dest,
			&storage.ObjectAttrs{
				ACL:         publicACL(proj),
				ContentType: contentType,
			}); err != nil {
			log.Fatalf("Error uploading %v: %v", tarball, err)
		}
		log.Printf("Uploaded tarball to %s", tarball)
	}
}
Esempio n. 9
0
func authOption() option.ClientOption {
	ts := testutil.TokenSource(context.Background(), Scope)
	if ts != nil {
		log.Println("authenticating via OAuth2")
		return option.WithTokenSource(ts)
	}
	apiKey := os.Getenv("GCLOUD_TESTS_API_KEY")
	if apiKey != "" {
		log.Println("authenticating with API key")
		return option.WithAPIKey(apiKey)
	}
	return nil
}
Esempio n. 10
0
func integrationTestClient(ctx context.Context, t *testing.T) *Client {
	if testing.Short() {
		t.Skip("Integration tests skipped in short mode")
	}
	ts := testutil.TokenSource(ctx, Scope)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}
	client, err := NewClient(ctx, option.WithTokenSource(ts))
	if err != nil {
		t.Fatal(err)
	}
	return client
}
Esempio n. 11
0
func TestMain(m *testing.M) {
	flag.Parse() // needed for testing.Short()
	ctx := context.Background()
	testProjectID = testutil.ProjID()
	var newClient func(ctx context.Context, projectID string) *Client
	if testProjectID == "" || testing.Short() {
		integrationTest = false
		if testProjectID != "" {
			log.Print("Integration tests skipped in short mode (using fake instead)")
		}
		testProjectID = "PROJECT_ID"
		addr, err := ltesting.NewServer()
		if err != nil {
			log.Fatalf("creating fake server: %v", err)
		}
		newClient = func(ctx context.Context, projectID string) *Client {
			conn, err := grpc.Dial(addr, grpc.WithInsecure())
			if err != nil {
				log.Fatalf("dialing %q: %v", addr, err)
			}
			c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn))
			if err != nil {
				log.Fatalf("creating client for fake at %q: %v", addr, err)
			}
			return c
		}
	} else {
		integrationTest = true
		ts := testutil.TokenSource(ctx, logging.AdminScope)
		if ts == nil {
			log.Fatal("The project key must be set. See CONTRIBUTING.md for details")
		}
		log.Printf("running integration tests with project %s", testProjectID)
		newClient = func(ctx context.Context, projectID string) *Client {
			c, err := NewClient(ctx, projectID, option.WithTokenSource(ts))
			if err != nil {
				log.Fatalf("creating prod client: %v", err)
			}
			return c
		}
	}
	client = newClient(ctx, testProjectID)
	initMetrics(ctx)
	cleanup := initSinks(ctx)
	exit := m.Run()
	cleanup()
	client.Close()
	os.Exit(exit)
}
Esempio n. 12
0
// config is like testConfig, but it doesn't need a *testing.T.
func config(ctx context.Context) (*Client, string) {
	ts := testutil.TokenSource(ctx, ScopeFullControl)
	if ts == nil {
		return nil, ""
	}
	p := testutil.ProjID()
	if p == "" {
		log.Fatal("The project ID must be set. See CONTRIBUTING.md for details")
	}
	client, err := NewClient(ctx, option.WithTokenSource(ts))
	if err != nil {
		log.Fatalf("NewClient: %v", err)
	}
	return client, p + suffix
}
Esempio n. 13
0
func TestIntegration(t *testing.T) {
	if testing.Short() {
		t.Skip("Integration tests skipped in short mode")
	}

	ctx := context.Background()
	ts := testutil.TokenSource(ctx, Scope)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}

	projID := testutil.ProjID()

	c, err := NewClient(ctx, projID, "logging-integration-test", option.WithTokenSource(ts))
	if err != nil {
		t.Fatalf("error creating client: %v", err)
	}

	if err := c.Ping(); err != nil {
		t.Fatalf("error pinging logging api: %v", err)
	}
	// Ping twice, to verify that deduping doesn't change the result.
	if err := c.Ping(); err != nil {
		t.Fatalf("error pinging logging api: %v", err)
	}

	if err := c.LogSync(Entry{Payload: customJSONObject{}}); err != nil {
		t.Fatalf("error writing log: %v", err)
	}

	if err := c.Log(Entry{Payload: customJSONObject{}}); err != nil {
		t.Fatalf("error writing log: %v", err)
	}

	if _, err := c.Writer(Default).Write([]byte("test log with io.Writer")); err != nil {
		t.Fatalf("error writing log using io.Writer: %v", err)
	}

	c.Logger(Default).Println("test log with log.Logger")

	if err := c.Flush(); err != nil {
		t.Fatalf("error flushing logs: %v", err)
	}
}
Esempio n. 14
0
func newLogTest(t *testing.T) *logTest {
	handlerc := make(chan http.Handler, 1)
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		select {
		case h := <-handlerc:
			h.ServeHTTP(w, r)
		default:
			slurp, _ := ioutil.ReadAll(r.Body)
			t.Errorf("Unexpected HTTP request received: %s", slurp)
			w.WriteHeader(500)
			io.WriteString(w, "unexpected HTTP request")
		}
	}))
	c, err := NewClient(context.Background(), "PROJ-ID", "LOG-NAME",
		option.WithEndpoint(ts.URL),
		option.WithTokenSource(dummyTokenSource{}), // prevent DefaultTokenSource
	)
	if err != nil {
		t.Fatal(err)
	}
	var clock struct {
		sync.Mutex
		now time.Time
	}
	c.timeNow = func() time.Time {
		clock.Lock()
		defer clock.Unlock()
		if clock.now.IsZero() {
			clock.now = time.Unix(0, 0)
		}
		clock.now = clock.now.Add(1 * time.Second)
		return clock.now
	}
	return &logTest{
		t:        t,
		ts:       ts,
		c:        c,
		handlerc: handlerc,
	}
}
Esempio n. 15
0
// Called just before TestMain calls m.Run.
// Returns a cleanup function to be called after the tests finish.
func initSinks(ctx context.Context) func() {
	// Create a unique GCS bucket so concurrent tests don't interfere with each other.
	testBucketPrefix := testProjectID + "-log-sink"
	testBucket := ltesting.UniqueID(testBucketPrefix)
	testSinkDestination = "storage.googleapis.com/" + testBucket
	var storageClient *storage.Client
	if integrationTest {
		// Create a unique bucket as a sink destination, and give the cloud logging account
		// owner right.
		ts := testutil.TokenSource(ctx, storage.ScopeFullControl)
		var err error
		storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts))
		if err != nil {
			log.Fatalf("new storage client: %v", err)
		}
		bucket := storageClient.Bucket(testBucket)
		if err := bucket.Create(ctx, testProjectID, nil); err != nil {
			log.Fatalf("creating storage bucket %q: %v", testBucket, err)
		}
		if err := bucket.ACL().Set(ctx, "*****@*****.**", storage.RoleOwner); err != nil {
			log.Fatalf("setting owner role: %v", err)
		}
	}
	// Clean up from aborted tests.
	for _, sID := range ltesting.ExpiredUniqueIDs(sinkIDs(ctx), testSinkIDPrefix) {
		client.DeleteSink(ctx, sID) // ignore error
	}
	if integrationTest {
		for _, bn := range ltesting.ExpiredUniqueIDs(bucketNames(ctx, storageClient), testBucketPrefix) {
			storageClient.Bucket(bn).Delete(ctx) // ignore error
		}
		return func() {
			if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil {
				log.Printf("deleting %q: %v", testBucket, err)
			}
			storageClient.Close()
		}
	}
	return func() {}
}
// If integration tests will be run, create a unique bucket for them.
func initIntegrationTest() {
	flag.Parse() // needed for testing.Short()
	if testing.Short() {
		return
	}
	ctx := context.Background()
	ts := testutil.TokenSource(ctx, Scope)
	if ts == nil {
		log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
		return
	}
	projID := testutil.ProjID()
	var err error
	client, err = NewClient(ctx, projID, option.WithTokenSource(ts))
	if err != nil {
		log.Fatalf("NewClient: %v", err)
	}
	dataset = client.Dataset("bigquery_integration_test")
	if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
		log.Fatalf("creating dataset: %v", err)
	}
}
Esempio n. 17
0
// Check that user optioned grpc.WithDialer option overrides the App Engine hook.
func TestGRPCHook(t *testing.T) {
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
	expected := false

	appengineDialerHook = (func(ctx context.Context) grpc.DialOption {
		return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			t.Error("did not expect a call to notExpected dialer, got one")
			cancel()
			return nil, errors.New("not expected")
		})
	})
	defer func() {
		appengineDialerHook = nil
	}()

	expectedDialer := grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
		expected = true
		cancel()
		return nil, errors.New("expected")
	})

	conn, err := DialGRPC(ctx,
		option.WithTokenSource(oauth2.StaticTokenSource(nil)), // No creds.
		option.WithGRPCDialOption(expectedDialer),
		option.WithEndpoint("example.google.com:443"))
	if err != nil {
		t.Errorf("DialGRPC: error %v, want nil", err)
	}

	// gRPC doesn't connect before the first call.
	grpc.Invoke(ctx, "foo", nil, nil, conn)
	conn.Close()

	if !expected {
		t.Error("expected a call to expected dialer, didn't get one")
	}
}
Esempio n. 18
0
func TestPing(t *testing.T) {
	// Ping twice, in case the service's InsertID logic messes with the error code.
	ctx := context.Background()
	// The global client should be valid.
	if err := client.Ping(ctx); err != nil {
		t.Errorf("project %s: got %v, expected nil", testProjectID, err)
	}
	if err := client.Ping(ctx); err != nil {
		t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err)
	}
	// nonexistent project
	c, _ := newClients(ctx, testProjectID+"-BAD")
	if err := c.Ping(ctx); err == nil {
		t.Errorf("nonexistent project: want error pinging logging api, got nil")
	}
	if err := c.Ping(ctx); err == nil {
		t.Errorf("nonexistent project, #2: want error pinging logging api, got nil")
	}

	// Bad creds. We cannot test this with the fake, since it doesn't do auth.
	if integrationTest {
		c, err := logging.NewClient(ctx, testProjectID, option.WithTokenSource(badTokenSource{}))
		if err != nil {
			t.Fatal(err)
		}
		if err := c.Ping(ctx); err == nil {
			t.Errorf("bad creds: want error pinging logging api, got nil")
		}
		if err := c.Ping(ctx); err == nil {
			t.Errorf("bad creds, #2: want error pinging logging api, got nil")
		}
		if err := c.Close(); err != nil {
			t.Fatalf("error closing client: %v", err)
		}
	}
}
Esempio n. 19
0
// uploadDockerImage makes a tar.gz snapshot of the camlistored docker image,
// and uploads it at camlistore-release/docker/camlistored-REV.tar.gz. It then
// makes a copy in the same bucket and path as camlistored.tar.gz.
func uploadDockerImage() {
	proj := "camlistore-website"
	bucket := "camlistore-release"
	versionedTarball := "docker/camlistored-" + rev() + ".tar.gz"
	tarball := "docker/camlistored.tar.gz"
	versionFile := "docker/VERSION"

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(proj)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	w.ContentType = "application/x-gtar"

	dockerSave := exec.Command("docker", "save", serverImage)
	dockerSave.Stderr = os.Stderr
	tar, err := dockerSave.StdoutPipe()
	if err != nil {
		log.Fatal(err)
	}
	targz, pw := io.Pipe()
	go func() {
		zw := gzip.NewWriter(pw)
		n, err := io.Copy(zw, tar)
		if err != nil {
			log.Fatalf("Error copying to gzip writer: after %d bytes, %v", n, err)
		}
		if err := zw.Close(); err != nil {
			log.Fatalf("gzip.Close: %v", err)
		}
		pw.CloseWithError(err)
	}()
	if err := dockerSave.Start(); err != nil {
		log.Fatalf("Error starting docker save %v: %v", serverImage, err)
	}
	if _, err := io.Copy(w, targz); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	if err := dockerSave.Wait(); err != nil {
		log.Fatalf("Error waiting for docker save %v: %v", serverImage, err)
	}
	log.Printf("Uploaded tarball to %s", versionedTarball)
	if isWIP() {
		return
	}
	log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
	dest := stoClient.Bucket(bucket).Object(tarball)
	if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo(
		ctx,
		dest,
		&storage.ObjectAttrs{
			ACL:          publicACL(proj),
			CacheControl: "no-cache",
			ContentType:  "application/x-gtar",
		}); err != nil {
		log.Fatalf("Error uploading %v: %v", tarball, err)
	}
	log.Printf("Uploaded tarball to %s", tarball)

	log.Printf("Updating %s/%s file...", bucket, versionFile)
	w = stoClient.Bucket(bucket).Object(versionFile).NewWriter(ctx)
	w.ACL = publicACL(proj)
	w.CacheControl = "no-cache"
	w.ContentType = "text/plain"
	if _, err := io.Copy(w, strings.NewReader(rev())); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
}
Esempio n. 20
0
func TestAll(t *testing.T) {
	if testing.Short() {
		t.Skip("Integration tests skipped in short mode")
	}
	ctx := context.Background()
	ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}

	now := time.Now()
	topicName := fmt.Sprintf("topic-%d", now.Unix())
	subName := fmt.Sprintf("subscription-%d", now.Unix())

	client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
	if err != nil {
		t.Fatalf("Creating client error: %v", err)
	}
	defer client.Close()

	var topic *Topic
	if topic, err = client.CreateTopic(ctx, topicName); err != nil {
		t.Errorf("CreateTopic error: %v", err)
	}

	var sub *Subscription
	if sub, err = client.CreateSubscription(ctx, subName, topic, 0, nil); err != nil {
		t.Errorf("CreateSub error: %v", err)
	}

	exists, err := topic.Exists(ctx)
	if err != nil {
		t.Fatalf("TopicExists error: %v", err)
	}
	if !exists {
		t.Errorf("topic %s should exist, but it doesn't", topic)
	}

	exists, err = sub.Exists(ctx)
	if err != nil {
		t.Fatalf("SubExists error: %v", err)
	}
	if !exists {
		t.Errorf("subscription %s should exist, but it doesn't", subName)
	}

	msgs := []*Message{}
	for i := 0; i < 10; i++ {
		text := fmt.Sprintf("a message with an index %d", i)
		attrs := make(map[string]string)
		attrs["foo"] = "bar"
		msgs = append(msgs, &Message{
			Data:       []byte(text),
			Attributes: attrs,
		})
	}

	ids, err := topic.Publish(ctx, msgs...)
	if err != nil {
		t.Fatalf("Publish (1) error: %v", err)
	}

	if len(ids) != len(msgs) {
		t.Errorf("unexpected number of message IDs received; %d, want %d", len(ids), len(msgs))
	}

	want := make(map[string]*messageData)
	for i, m := range msgs {
		md := extractMessageData(m)
		md.ID = ids[i]
		want[md.ID] = md
	}

	// Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available.
	timeoutCtx, _ := context.WithTimeout(ctx, time.Minute)
	it, err := sub.Pull(timeoutCtx)
	if err != nil {
		t.Fatalf("error constructing iterator: %v", err)
	}
	defer it.Stop()
	got := make(map[string]*messageData)
	for i := 0; i < len(want); i++ {
		m, err := it.Next()
		if err != nil {
			t.Fatalf("error getting next message: %v", err)
		}
		md := extractMessageData(m)
		got[md.ID] = md
		m.Done(true)
	}

	if !reflect.DeepEqual(got, want) {
		t.Errorf("messages: got: %v ; want: %v", got, want)
	}

	// base64 test
	data := "=@~"
	_, err = topic.Publish(ctx, &Message{Data: []byte(data)})
	if err != nil {
		t.Fatalf("Publish error: %v", err)
	}

	m, err := it.Next()
	if err != nil {
		t.Fatalf("Pull error: %v", err)
	}

	if string(m.Data) != data {
		t.Errorf("unexpected message received; %s, want %s", string(m.Data), data)
	}
	m.Done(true)

	if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok {
		t.Errorf("topic IAM: %s", msg)
	}
	if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok {
		t.Errorf("sub IAM: %s", msg)
	}

	err = sub.Delete(ctx)
	if err != nil {
		t.Errorf("DeleteSub error: %v", err)
	}

	err = topic.Delete(ctx)
	if err != nil {
		t.Errorf("DeleteTopic error: %v", err)
	}
}
Esempio n. 21
0
// listDownloads lists all the files found in the monthly repo, and from them,
// builds the data that we'll feed to the template to generate the monthly
// downloads camweb page.
func listDownloads() (*ReleaseData, error) {
	ts, err := tokenSource(bucket)
	if err != nil {
		return nil, err
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts)))
	if err != nil {
		return nil, err
	}
	objList, err := stoClient.Bucket(bucket).List(ctx, &storage.Query{Prefix: "monthly/"})
	if err != nil {
		return nil, err
	}

	platformBySuffix := map[string]string{
		"src.zip":       "Source",
		"linux.tar.gz":  "Linux",
		"darwin.tar.gz": "Darwin",
		"windows.zip":   "Windows",
	}
	getPlatform := func(name string) string {
		for suffix, platform := range platformBySuffix {
			if strings.HasSuffix(name, suffix) {
				return platform
			}
		}
		return ""
	}
	getChecksum := func(name string) (string, error) {
		r, err := stoClient.Bucket(bucket).Object(name).NewReader(ctx)
		if err != nil {
			return "", err
		}
		var buf bytes.Buffer
		if _, err := io.Copy(&buf, r); err != nil {
			return "", err
		}
		return buf.String(), nil
	}
	var date time.Time
	checkDate := func(objDate time.Time) error {
		if date.IsZero() {
			date = objDate
			return nil
		}
		d := date.Sub(objDate)
		if d < 0 {
			d = -d
		}
		if d < 24*time.Hour {
			return nil
		}
		return fmt.Errorf("objects in monthly have not been uploaded or updated the same day")
	}

	var (
		downloadData []DownloadData
		nameToSum    = make(map[string]string)
	)
	fileDate := releaseDate.Format(fileDateFormat)
	for _, attrs := range objList.Results {
		if !strings.Contains(attrs.Name, fileDate) {
			continue
		}
		if err := checkDate(attrs.Updated); err != nil {
			return nil, err
		}
		if !strings.HasSuffix(attrs.Name, ".sha256") {
			continue
		}
		sum, err := getChecksum(attrs.Name)
		if err != nil {
			return nil, err
		}
		nameToSum[strings.TrimSuffix(attrs.Name, ".sha256")] = sum
	}
	for _, attrs := range objList.Results {
		if !strings.Contains(attrs.Name, fileDate) {
			continue
		}
		if strings.HasSuffix(attrs.Name, ".sha256") {
			continue
		}
		sum, ok := nameToSum[attrs.Name]
		if !ok {
			return nil, fmt.Errorf("%v has no checksum file!", attrs.Name)
		}
		downloadData = append(downloadData, DownloadData{
			Filename: filepath.Base(attrs.Name),
			Platform: getPlatform(attrs.Name),
			Checksum: sum,
		})
	}

	return &ReleaseData{
		Date:         releaseDate.Format(titleDateFormat),
		Download:     downloadData,
		CamliVersion: rev(),
		GoVersion:    goVersion,
	}, nil
}
Esempio n. 22
0
// WithTokenSource returns a ClientOption that specifies an OAuth2 token
// source to be used as the basis for authentication.
func WithTokenSource(s oauth2.TokenSource) ClientOption {
	return wrapOpt{option.WithTokenSource(s)}
}
Esempio n. 23
0
func TestIntegration(t *testing.T) {
	if testing.Short() {
		t.Skip("Integration tests skipped in short mode")
	}

	ctx := context.Background()
	ts := testutil.TokenSource(ctx, Scope)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}

	projID := testutil.ProjID()
	c, err := NewClient(ctx, projID, option.WithTokenSource(ts))
	if err != nil {
		t.Fatal(err)
	}
	ds := c.Dataset("bigquery_integration_test")
	if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
		t.Fatal(err)
	}
	schema := Schema([]*FieldSchema{
		{Name: "name", Type: StringFieldType},
		{Name: "num", Type: IntegerFieldType},
	})
	table := ds.Table("t1")
	// Delete the table in case it already exists. (Ignore errors.)
	table.Delete(ctx)
	// Create the table.
	err = table.Create(ctx, schema, TableExpiration(time.Now().Add(5*time.Minute)))
	if err != nil {
		t.Fatal(err)
	}
	// Check table metadata.
	md, err := table.Metadata(ctx)
	if err != nil {
		t.Fatal(err)
	}
	// TODO(jba): check md more thorougly.
	if got, want := md.ID, fmt.Sprintf("%s:%s.%s", projID, ds.id, table.TableID); got != want {
		t.Errorf("metadata.ID: got %q, want %q", got, want)
	}
	if got, want := md.Type, RegularTable; got != want {
		t.Errorf("metadata.Type: got %v, want %v", got, want)
	}

	// List tables in the dataset.
	tables, err := ds.ListTables(ctx)
	if err != nil {
		t.Fatal(err)
	}
	if got, want := len(tables), 1; got != want {
		t.Fatalf("ListTables: got %d, want %d", got, want)
	}
	want := *table
	if got := tables[0]; !reflect.DeepEqual(got, &want) {
		t.Errorf("ListTables: got %v, want %v", got, &want)
	}

	// Iterate over tables in the dataset.
	it := ds.Tables(ctx)
	tables = nil
	for {
		tbl, err := it.Next()
		if err == iterator.Done {
			break
		}
		if err != nil {
			t.Fatal(err)
		}
		tables = append(tables, tbl)
	}
	if got, want := tables, []*Table{table}; !reflect.DeepEqual(got, want) {
		t.Errorf("Tables: got %v, want %v", got, want)
	}

	// Populate the table.
	upl := table.NewUploader()
	var rows []*ValuesSaver
	for i, name := range []string{"a", "b", "c"} {
		rows = append(rows, &ValuesSaver{
			Schema:   schema,
			InsertID: name,
			Row:      []Value{name, i},
		})
	}
	if err := upl.Put(ctx, rows); err != nil {
		t.Fatal(err)
	}

	checkRead := func(src ReadSource) {
		it, err := c.Read(ctx, src)
		if err != nil {
			t.Fatal(err)
		}
		for i := 0; it.Next(ctx); i++ {
			var vals ValueList
			if err := it.Get(&vals); err != nil {
				t.Fatal(err)
			}
			if got, want := vals, rows[i].Row; !reflect.DeepEqual([]Value(got), want) {
				t.Errorf("got %v, want %v", got, want)
			}
		}
	}
	// Read the table.
	checkRead(table)

	// Query the table.
	q := &Query{
		Q:                "select name, num from t1",
		DefaultProjectID: projID,
		DefaultDatasetID: ds.id,
	}
	checkRead(q)

	// Query the long way.
	dest := &Table{}
	job1, err := c.Copy(ctx, dest, q, WriteTruncate)
	if err != nil {
		t.Fatal(err)
	}
	job2, err := c.JobFromID(ctx, job1.ID())
	if err != nil {
		t.Fatal(err)
	}
	// TODO(jba): poll status until job is done
	_, err = job2.Status(ctx)
	if err != nil {
		t.Fatal(err)
	}

	checkRead(job2)

	// TODO(jba): patch the table
}
Esempio n. 24
0
func main() {
	flag.Parse()
	t := time.Now()
	expvar.NewInt("start_time_epoch_secs").Set(t.Unix())
	expvar.NewString("start_time_timestamp").Set(t.Format(time.RFC3339))
	expvar.Publish("uptime_secs", expvar.Func(func() interface{} {
		return int64(time.Now().Sub(t) / time.Second)
	}))
	expvar.Publish("uptime_dur", expvar.Func(func() interface{} {
		return time.Now().Sub(t).String()
	}))

	routeHost, redirectHost := calculateDomains(*rawVHost, *httpsAddr)

	apiVars.Set("requests", apiRequests)
	staticVars.Set("requests", staticRequests)
	webVars.Set("requests", webRequests)

	tlsConf := makeTLSConfig(*certPath, *keyPath)

	tlsListener, err := tls.Listen("tcp", *httpsAddr, tlsConf)
	if err != nil {
		log.Fatalf("unable to listen for the HTTPS server on %s: %s", *httpsAddr, err)
	}
	plaintextListener, err := net.Listen("tcp", *httpAddr)
	if err != nil {
		log.Fatalf("unable to listen for the HTTP server on %s: %s", *httpAddr, err)
	}
	ns := expvar.NewMap("tls")
	l := newListener(tlsListener, ns)

	if *acmeURL != "" {
		if !strings.HasPrefix(*acmeURL, "/") &&
			!strings.HasPrefix(*acmeURL, "https://") &&
			!strings.HasPrefix(*acmeURL, "http://") {
			fmt.Fprintf(os.Stderr, "acmeRedirect must start with 'http://', 'https://', or '/' but does not: %#v\n", *acmeURL)
			os.Exit(1)
		}
	}

	blockedOrigins := []string{}
	if *originsFile != "" {
		jc := loadOriginsConfig(*originsFile)
		blockedOrigins = jc.BlockedOrigins
	}

	hostname, err := os.Hostname()
	if err != nil {
		log.Fatalf("unable to get hostname of local machine: %s", err)
	}

	var gclog logClient
	if *googAcctConf != "" {
		googConf := loadGoogleServiceAccount(*googAcctConf)
		tokSrc := googConf.conf.TokenSource(context.Background())
		client, err := logging.NewClient(context.Background(), googConf.ProjectID, option.WithTokenSource(tokSrc))
		if err != nil {
			log.Fatalf("unable to make Google Cloud Logging client: %s", err)
		}
		client.OnError = func(err error) {
			log.Printf("goog logging error: %s", err)
		}
		gclog = client.Logger(*allowLogName)
	} else {
		gclog = nullLogClient{}
	}
	oa := newOriginAllower(blockedOrigins, hostname, gclog, expvar.NewMap("origins"))

	staticHandler := http.NotFoundHandler()
	webHandleFunc := http.NotFound
	if !*headless {
		index = loadIndex()
		staticHandler = makeStaticHandler(*staticDir, staticVars)
		webHandleFunc = handleWeb
	}

	m := tlsMux(
		routeHost,
		redirectHost,
		*acmeURL,
		staticHandler,
		webHandleFunc,
		oa,
	)

	go func() {
		err := http.ListenAndServe(*adminAddr, nil)
		if err != nil {
			log.Fatalf("unable to open admin server: %s", err)
		}
	}()

	httpsSrv := &http.Server{
		Handler:      m,
		ReadTimeout:  10 * time.Second,
		WriteTimeout: 15 * time.Second,
	}

	httpSrv := &http.Server{
		Handler:      plaintextMux(redirectHost),
		ReadTimeout:  10 * time.Second,
		WriteTimeout: 15 * time.Second,
	}

	log.Printf("Booting HTTPS on %s and HTTP on %s", *httpsAddr, *httpAddr)
	go func() {
		err := httpsSrv.Serve(l)
		if err != nil {
			log.Fatalf("https server error: %s", err)
		}
	}()
	err = httpSrv.Serve(plaintextListener)
	if err != nil {
		log.Fatalf("http server error: %s", err)
	}
}
Esempio n. 25
0
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	var (
		auth      = config.RequiredObject("auth")
		bucket    = config.RequiredString("bucket")
		cacheSize = config.OptionalInt64("cacheSize", 32<<20)

		clientID     = auth.RequiredString("client_id") // or "auto" for service accounts
		clientSecret = auth.OptionalString("client_secret", "")
		refreshToken = auth.OptionalString("refresh_token", "")
	)

	if err := config.Validate(); err != nil {
		return nil, err
	}
	if err := auth.Validate(); err != nil {
		return nil, err
	}

	var dirPrefix string
	if parts := strings.SplitN(bucket, "/", 2); len(parts) > 1 {
		dirPrefix = parts[1]
		bucket = parts[0]
	}
	if dirPrefix != "" && !strings.HasSuffix(dirPrefix, "/") {
		dirPrefix += "/"
	}
	gs := &Storage{
		bucket:    bucket,
		dirPrefix: dirPrefix,
	}

	var (
		ctx = context.Background()
		ts  oauth2.TokenSource
		cl  *storage.Client
		err error
	)
	if clientID == "auto" {
		if !metadata.OnGCE() {
			return nil, errors.New(`Cannot use "auto" client_id when not running on GCE`)
		}
		ts, err = google.DefaultTokenSource(ctx, storage.ScopeReadWrite)
		if err != nil {
			return nil, err
		}
		cl, err = storage.NewClient(ctx)
		if err != nil {
			return nil, err
		}
	} else {
		if clientSecret == "" {
			return nil, errors.New("missing required parameter 'client_secret'")
		}
		if refreshToken == "" {
			return nil, errors.New("missing required parameter 'refresh_token'")
		}
		ts = oauthutil.NewRefreshTokenSource(&oauth2.Config{
			Scopes:       []string{storage.ScopeReadWrite},
			Endpoint:     google.Endpoint,
			ClientID:     clientID,
			ClientSecret: clientSecret,
			RedirectURL:  oauthutil.TitleBarRedirectURL,
		}, refreshToken)
		cl, err = storage.NewClient(ctx, option.WithTokenSource(ts))
		if err != nil {
			return nil, err
		}
	}

	gs.baseHTTPClient = oauth2.NewClient(ctx, ts)
	gs.client = cl

	if cacheSize != 0 {
		gs.cache = memory.NewCache(cacheSize)
	}

	ba, err := gs.client.Bucket(gs.bucket).Attrs(ctx)
	if err != nil {
		return nil, fmt.Errorf("error statting bucket %q: %v", gs.bucket, err)
	}
	hash := sha1.New()
	fmt.Fprintf(hash, "%v%v", ba.Created, ba.MetaGeneration)
	gs.genRandom = fmt.Sprintf("%x", hash.Sum(nil))
	gs.genTime = ba.Created

	return gs, nil
}
Esempio n. 26
0
func TestMain(m *testing.M) {
	flag.Parse() // needed for testing.Short()
	ctx := context.Background()
	testProjectID = testutil.ProjID()
	errorc = make(chan error, 100)
	if testProjectID == "" || testing.Short() {
		integrationTest = false
		if testProjectID != "" {
			log.Print("Integration tests skipped in short mode (using fake instead)")
		}
		testProjectID = "PROJECT_ID"
		clean = func(e *logging.Entry) {
			// Remove the insert ID for consistency with the integration test.
			e.InsertID = ""
		}

		addr, err := ltesting.NewServer()
		if err != nil {
			log.Fatalf("creating fake server: %v", err)
		}
		logging.SetNow(testNow)

		newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) {
			conn, err := grpc.Dial(addr, grpc.WithInsecure())
			if err != nil {
				log.Fatalf("dialing %q: %v", addr, err)
			}
			c, err := logging.NewClient(ctx, projectID, option.WithGRPCConn(conn))
			if err != nil {
				log.Fatalf("creating client for fake at %q: %v", addr, err)
			}
			ac, err := logadmin.NewClient(ctx, projectID, option.WithGRPCConn(conn))
			if err != nil {
				log.Fatalf("creating client for fake at %q: %v", addr, err)
			}
			return c, ac
		}

	} else {
		integrationTest = true
		clean = func(e *logging.Entry) {
			// We cannot compare timestamps, so set them to the test time.
			// Also, remove the insert ID added by the service.
			e.Timestamp = testNow().UTC()
			e.InsertID = ""
		}
		ts := testutil.TokenSource(ctx, logging.AdminScope)
		if ts == nil {
			log.Fatal("The project key must be set. See CONTRIBUTING.md for details")
		}
		log.Printf("running integration tests with project %s", testProjectID)
		newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) {
			c, err := logging.NewClient(ctx, projectID, option.WithTokenSource(ts))
			if err != nil {
				log.Fatalf("creating prod client: %v", err)
			}
			ac, err := logadmin.NewClient(ctx, projectID, option.WithTokenSource(ts))
			if err != nil {
				log.Fatalf("creating prod client: %v", err)
			}
			return c, ac
		}

	}
	client, aclient = newClients(ctx, testProjectID)
	client.OnError = func(e error) { errorc <- e }
	initLogs(ctx)
	testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID,
		strings.Replace(testLogID, "/", "%2F", -1))
	exit := m.Run()
	client.Close()
	os.Exit(exit)
}
Esempio n. 27
0
// TestEndToEnd pumps many messages into a topic and tests that they are all delivered to each subscription for the topic.
// It also tests that messages are not unexpectedly redelivered.
func TestEndToEnd(t *testing.T) {
	if testing.Short() {
		t.Skip("Integration tests skipped in short mode")
	}
	ctx := context.Background()
	ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
	if ts == nil {
		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
	}

	now := time.Now()
	topicName := fmt.Sprintf("endtoend-%d", now.Unix())
	subPrefix := fmt.Sprintf("endtoend-%d", now.Unix())

	client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
	if err != nil {
		t.Fatalf("Creating client error: %v", err)
	}

	var topic *Topic
	if topic, err = client.CreateTopic(ctx, topicName); err != nil {
		t.Fatalf("CreateTopic error: %v", err)
	}
	defer topic.Delete(ctx)

	// Three subscriptions to the same topic.
	var subA, subB, subC *Subscription
	if subA, err = client.CreateSubscription(ctx, subPrefix+"-a", topic, ackDeadline, nil); err != nil {
		t.Fatalf("CreateSub error: %v", err)
	}
	defer subA.Delete(ctx)

	if subB, err = client.CreateSubscription(ctx, subPrefix+"-b", topic, ackDeadline, nil); err != nil {
		t.Fatalf("CreateSub error: %v", err)
	}
	defer subB.Delete(ctx)

	if subC, err = client.CreateSubscription(ctx, subPrefix+"-c", topic, ackDeadline, nil); err != nil {
		t.Fatalf("CreateSub error: %v", err)
	}
	defer subC.Delete(ctx)

	expectedCounts := make(map[string]int)
	for _, id := range publish(t, ctx, topic) {
		expectedCounts[id] = 1
	}

	// recv provides an indication that messages are still arriving.
	recv := make(chan struct{})

	// Keep track of the number of times each message (by message id) was
	// seen from each subscription.
	mcA := &messageCounter{counts: make(map[string]int), recv: recv}
	mcB := &messageCounter{counts: make(map[string]int), recv: recv}
	mcC := &messageCounter{counts: make(map[string]int), recv: recv}

	stopC := make(chan struct{})

	// We have three subscriptions to our topic.
	// Each subscription will get a copy of each pulished message.
	//
	// subA has just one iterator, while subB has two. The subB iterators
	// will each process roughly half of the messages for subB. All of
	// these iterators live until all messages have been consumed.  subC is
	// processed by a series of short-lived iterators.

	var wg sync.WaitGroup

	con := &consumer{
		concurrencyPerIterator: 1,
		iteratorsInFlight:      2,
		lifetimes:              immortal,
	}
	con.consume(t, ctx, subA, mcA, &wg, stopC)

	con = &consumer{
		concurrencyPerIterator: 1,
		iteratorsInFlight:      2,
		lifetimes:              immortal,
	}
	con.consume(t, ctx, subB, mcB, &wg, stopC)

	con = &consumer{
		concurrencyPerIterator: 1,
		iteratorsInFlight:      2,
		lifetimes: &explicitLifetimes{
			lifetimes: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2},
		},
	}
	con.consume(t, ctx, subC, mcC, &wg, stopC)

	go func() {
		timeoutC := time.After(timeout)
		// Every time this ticker ticks, we will check if we have received any
		// messages since the last time it ticked.  We check less frequently
		// than the ack deadline, so that we can detect if messages are
		// redelivered after having their ack deadline extended.
		checkQuiescence := time.NewTicker(ackDeadline * 3)
		defer checkQuiescence.Stop()

		var received bool
		for {
			select {
			case <-recv:
				received = true
			case <-checkQuiescence.C:
				if received {
					received = false
				} else {
					close(stopC)
					return
				}
			case <-timeoutC:
				t.Errorf("timed out")
				close(stopC)
				return
			}
		}
	}()
	wg.Wait()

	for _, mc := range []*messageCounter{mcA, mcB, mcC} {
		if got, want := mc.counts, expectedCounts; !reflect.DeepEqual(got, want) {
			t.Errorf("message counts: %v\n", diff(got, want))
		}
	}
}