Exemplo n.º 1
0
// getInstalledTLS returns the TLS certificate and key stored on Google Cloud Storage for the
// instance defined in d.Conf.
//
// If either the TLS keypair doesn't exist, the error is os.ErrNotExist.
func (d *Deployer) getInstalledTLS() (certPEM, keyPEM []byte, err error) {
	ctx := context.Background()
	stoClient, err := cloudstorage.NewClient(ctx, cloud.WithBaseHTTP(d.Client))
	if err != nil {
		return nil, nil, fmt.Errorf("error creating Cloud Storage client to fetch TLS cert & key from new instance: %v", err)
	}
	getFile := func(name string) ([]byte, error) {
		sr, err := stoClient.Bucket(d.Conf.bucketBase()).Object(path.Join(configDir, name)).NewReader(ctx)
		if err == cloudstorage.ErrObjectNotExist {
			return nil, os.ErrNotExist
		}
		if err != nil {
			return nil, err
		}
		defer sr.Close()
		return ioutil.ReadAll(sr)
	}
	var grp syncutil.Group
	grp.Go(func() (err error) {
		certPEM, err = getFile(certFilename())
		return
	})
	grp.Go(func() (err error) {
		keyPEM, err = getFile(keyFilename())
		return
	})
	err = grp.Err()
	return
}
Exemplo n.º 2
0
func newTestClient(rt http.RoundTripper) *trace.Client {
	t, err := trace.NewClient(context.Background(), testProjectID, cloud.WithBaseHTTP(&http.Client{Transport: rt}))
	if err != nil {
		panic(err)
	}
	return t
}
Exemplo n.º 3
0
func TestCopyObjectMissingFields(t *testing.T) {
	var tests = []struct {
		srcBucket, srcName, destBucket, destName string
		errMsg                                   string
	}{
		{
			"mybucket", "", "mybucket", "destname",
			"srcName and destName must be non-empty",
		},
		{
			"mybucket", "srcname", "mybucket", "",
			"srcName and destName must be non-empty",
		},
		{
			"", "srcfile", "mybucket", "destname",
			"srcBucket and destBucket must both be non-empty",
		},
		{
			"mybucket", "srcfile", "", "destname",
			"srcBucket and destBucket must both be non-empty",
		},
	}
	ctx := context.Background()
	client, err := NewClient(ctx, cloud.WithBaseHTTP(&http.Client{Transport: &fakeTransport{}}))
	if err != nil {
		panic(err)
	}
	for i, test := range tests {
		_, err := client.CopyObject(ctx, test.srcBucket, test.srcName, test.destBucket, test.destName, nil)
		if !strings.Contains(err.Error(), test.errMsg) {
			t.Errorf("CopyObject test #%v: err = %v, want %v", i, err, test.errMsg)
		}
	}
}
Exemplo n.º 4
0
// NewClient creates a new PubSub client.
func NewClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*Client, error) {
	var o []cloud.ClientOption
	// Environment variables for gcloud emulator:
	// https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/
	if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" {
		o = []cloud.ClientOption{
			cloud.WithEndpoint("http://" + addr + "/"),
			cloud.WithBaseHTTP(http.DefaultClient),
		}
	} else {
		o = []cloud.ClientOption{
			cloud.WithEndpoint(prodAddr),
			cloud.WithScopes(raw.PubsubScope, raw.CloudPlatformScope),
			cloud.WithUserAgent(userAgent),
		}
	}
	o = append(o, opts...)
	httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
	if err != nil {
		return nil, fmt.Errorf("dialing: %v", err)
	}

	s, err := newPubSubService(httpClient, endpoint)
	if err != nil {
		return nil, fmt.Errorf("constructing pubsub client: %v", err)
	}

	c := &Client{
		projectID: projectID,
		s:         s,
	}

	return c, nil
}
Exemplo n.º 5
0
func upload(srcPath string) {
	if !*flagUpload {
		return
	}
	destName := strings.Replace(filepath.Base(srcPath), "camlistore", "camlistore-"+rev(), 1)
	versionedTarball := "monthly/" + destName

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, cloud.WithTokenSource(ts), cloud.WithBaseHTTP(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(project)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	contentType := "application/x-gtar"
	if strings.HasSuffix(versionedTarball, ".zip") {
		contentType = "application/zip"
	}
	w.ContentType = contentType
	csw := sha256.New()
	mw := io.MultiWriter(w, csw)

	src, err := os.Open(srcPath)
	if err != nil {
		log.Fatal(err)
	}
	defer src.Close()

	if _, err := io.Copy(mw, src); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded monthly tarball to %s", versionedTarball)

	// And upload the corresponding checksum
	checkSumFile := versionedTarball + ".sha256"
	sum := fmt.Sprintf("%x", csw.Sum(nil))
	w = stoClient.Bucket(bucket).Object(checkSumFile).NewWriter(ctx)
	w.ACL = publicACL(project)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	w.ContentType = "text/plain"
	if _, err := io.Copy(w, strings.NewReader(sum)); err != nil {
		log.Fatalf("error uploading checksum %v: %v", checkSumFile, err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded monthly tarball checksum to %s", checkSumFile)
}
Exemplo n.º 6
0
// uploadReleaseTarball uploads the generated tarball of binaries in
// camlistore-release/VERSION/camlistoreVERSION-REV-CONTENTS.EXT. It then makes a copy in
// the same bucket and path, as camlistoreVERSION-CONTENTS.EXT.
func uploadReleaseTarball() {
	proj := "camlistore-website"
	bucket := "camlistore-release"
	tarball := *flagVersion + "/" + filepath.Base(releaseTarball)
	versionedTarball := strings.Replace(tarball, "camlistore"+*flagVersion, "camlistore"+*flagVersion+"-"+rev(), 1)

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, cloud.WithTokenSource(ts), cloud.WithBaseHTTP(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(proj)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	contentType := "application/x-gtar"
	if *buildOS == "windows" {
		contentType = "application/zip"
	}
	w.ContentType = contentType

	src, err := os.Open(releaseTarball)
	if err != nil {
		log.Fatal(err)
	}
	defer src.Close()

	if _, err := io.Copy(w, src); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded tarball to %s", versionedTarball)
	if !isWIP() {
		log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
		dest := stoClient.Bucket(bucket).Object(tarball)
		if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo(
			ctx,
			dest,
			&storage.ObjectAttrs{
				ACL:         publicACL(proj),
				ContentType: contentType,
			}); err != nil {
			log.Fatalf("Error uploading %v: %v", tarball, err)
		}
		log.Printf("Uploaded tarball to %s", tarball)
	}
}
Exemplo n.º 7
0
func setupOAuth() error {
	client, err := auth.NewDefaultJWTServiceAccountClient(auth.SCOPE_READ_WRITE)
	if err != nil {
		return fmt.Errorf("Problem setting up client OAuth: %v", err)
	}

	if storageClient, err = storage.NewClient(context.Background(), cloud.WithBaseHTTP(client)); err != nil {
		return fmt.Errorf("Problem authenticating: %v", err)
	}
	return nil
}
Exemplo n.º 8
0
func TestErrorOnObjectsInsertCall(t *testing.T) {
	ctx := context.Background()
	hc := &http.Client{Transport: &fakeTransport{}}
	client, err := NewClient(ctx, cloud.WithBaseHTTP(hc))
	if err != nil {
		t.Errorf("error when creating client: %v", err)
	}
	wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx)
	wc.ContentType = "text/plain"
	if _, err := wc.Write([]byte("hello world")); err == nil {
		t.Errorf("expected error on write, got nil")
	}
	if err := wc.Close(); err == nil {
		t.Errorf("expected error on close, got nil")
	}
}
// deleteFolderContent removes all content ing the given GS bucket/foldername.
func deleteFolderContent(t *testing.T, bucket, folderName string, client *http.Client) {
	ctx := context.Background()
	cStorage, err := storage.NewClient(ctx, cloud.WithBaseHTTP(client))
	assert.Nil(t, err)

	bucketHdl := cStorage.Bucket(bucket)
	objList, err := bucketHdl.List(ctx, &storage.Query{Prefix: folderName, MaxResults: 1000})
	assert.Nil(t, err)

	for _, obj := range objList.Results {
		fmt.Printf("%s/%s\n\n", obj.Bucket, obj.Name)
		objHdl := bucketHdl.Object(obj.Name)
		assert.Nil(t, objHdl.Delete(ctx))
		fmt.Printf("Deleted object %s / %s\n\n", obj.Bucket, obj.Name)
	}
}
Exemplo n.º 10
0
// uploadBinary uploads the currently-running Linux binary.
// It crashes if it fails.
func (cl *cloudLaunch) uploadBinary() {
	ctx := context.Background()
	if cl.BinaryBucket == "" {
		log.Fatal("cloudlaunch: Config.BinaryBucket is empty")
	}
	stoClient, err := storage.NewClient(ctx, cloud.WithBaseHTTP(cl.oauthClient))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(cl.BinaryBucket).Object(cl.binaryObject()).NewWriter(ctx)
	if err != nil {
		log.Fatal(err)
	}
	w.ACL = []storage.ACLRule{
		// If you don't give the owners access, the web UI seems to
		// have a bug and doesn't have access to see that it's public, so
		// won't render the "Shared Publicly" link. So we do that, even
		// though it's dumb and unnecessary otherwise:
		{
			Entity: storage.ACLEntity("project-owners-" + cl.GCEProjectID),
			Role:   storage.RoleOwner,
		},
		// Public, so our systemd unit can get it easily:
		{
			Entity: storage.AllUsers,
			Role:   storage.RoleReader,
		},
	}
	w.CacheControl = "no-cache"
	selfPath := getSelfPath()
	log.Printf("Uploading %q to %v", selfPath, cl.binaryURL())
	f, err := os.Open(selfPath)
	if err != nil {
		log.Fatal(err)
	}
	defer f.Close()
	n, err := io.Copy(w, f)
	if err != nil {
		log.Fatal(err)
	}
	if err := w.Close(); err != nil {
		log.Fatal(err)
	}
	log.Printf("Uploaded %d bytes", n)
}
Exemplo n.º 11
0
func gcsCommonClient(client *http.Client, csctx *CloudStoreContext) (Store, error) {
	project := csctx.Project
	bucket := csctx.Bucket
	prefix := fmt.Sprintf("%s:(project=%s bucket=%s)", csctx.LogggingContext, project, bucket)
	l := LogConstructor(prefix)

	gcs, err := storage.NewClient(context.Background(), cloud.WithBaseHTTP(client))
	if err != nil {
		l.Errorf("%v error creating Google cloud storage client. project:%s gs://%s/ err:%v ",
			csctx.LogggingContext, project, bucket, err)
		return nil, err
	}
	store, err := NewGCSStore(gcs, bucket, csctx.TmpDir, maxResults, l)
	if err != nil {
		l.Errorf("error creating the store. err=%v ", err)
		return nil, err
	}
	return store, nil
}
Exemplo n.º 12
0
// CreateGooglePubSubClient returns client communicate with google Pub/Sub service
func CreateGooglePubSubClient() (*pubsub.Client, error) {
	envValue := os.Getenv(NDGooglePubSubURL)
	if envValue != "" {
		googlePubSubURL = envValue
	}

	envValue = os.Getenv(NDGoogleProjectID)
	if envValue != "" {
		googleProjectID = envValue
	}

	ctx := cloud.NewContext(googleProjectID, http.DefaultClient)
	o := []cloud.ClientOption{
		cloud.WithBaseHTTP(http.DefaultClient),
		cloud.WithEndpoint(googleDatastoreURL),
	}
	client, _ := pubsub.NewClient(ctx, googleProjectID, o...)
	return client, nil
}
Exemplo n.º 13
0
func TestErrorOnObjectsInsertCall(t *testing.T) {
	ctx := context.Background()
	hc := &http.Client{Transport: &fakeTransport{}}
	client, err := NewClient(ctx, cloud.WithBaseHTTP(hc))
	if err != nil {
		t.Errorf("error when creating client: %v", err)
	}
	wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx)
	wc.ContentType = "text/plain"

	// We can't check that the Write fails, since it depends on the write to the
	// underling fakeTransport failing which is racy.
	wc.Write([]byte("hello world"))

	// Close must always return an error though since it waits for the transport to
	// have closed.
	if err := wc.Close(); err == nil {
		t.Errorf("expected error on close, got nil")
	}
}
Exemplo n.º 14
0
func setupOAuth() error {
	var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		useRedirectURL = *redirectURL
	}
	if err := login.InitFromMetadataOrJSON(useRedirectURL, login.DEFAULT_SCOPE, *authWhiteList); err != nil {
		return fmt.Errorf("Problem setting up server OAuth: %s", err)
	}

	client, err := auth.NewDefaultJWTServiceAccountClient(auth.SCOPE_READ_ONLY)
	if err != nil {
		return fmt.Errorf("Problem setting up client OAuth: %s", err)
	}

	storageClient, err = storage.NewClient(context.Background(), cloud.WithBaseHTTP(client))
	if err != nil {
		return fmt.Errorf("Problem authenticating: %s", err)
	}
	return nil
}
Exemplo n.º 15
0
// newPDFProcessor implements the ingestion.Constructor signature.
func newPDFProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, client *http.Client) (ingestion.Processor, error) {
	// Parse the parameters right into the pdfProcessor instance.
	ret := &pdfProcessor{}
	err := combineErrors(
		ensureNonEmptyKey(config.ExtraParams, CONFIG_INPUT_IMAGES_BUCKET, &ret.inImagesBucket),
		ensureNonEmptyKey(config.ExtraParams, CONFIG_INPUT_IMAGES_DIR, &ret.inImagesDir),
		ensureNonEmptyKey(config.ExtraParams, CONFIG_OUTPUT_JSON_BUCKET, &ret.outJsonBucket),
		ensureNonEmptyKey(config.ExtraParams, CONFIG_OUTPUT_JSON_DIR, &ret.outJsonDir),
		ensureNonEmptyKey(config.ExtraParams, CONFIG_OUTPUT_IMAGES_BUCKET, &ret.outImagesBucket),
		ensureNonEmptyKey(config.ExtraParams, CONFIG_OUTPUT_IMAGES_DIR, &ret.outImagesDir))
	if err != nil {
		return nil, err
	}

	// Make sure there is a cachedir.
	ret.pdfCacheDir = config.ExtraParams[CONFIG_PDF_CACHEDIR]
	if ret.pdfCacheDir == "" {
		if ret.pdfCacheDir, err = ioutil.TempDir("", "pdfcache-dir"); err != nil {
			return nil, err
		}
	}

	// Create the storage service client.
	storageClient, err := storage.NewClient(context.Background(), cloud.WithBaseHTTP(client))
	if err != nil {
		return nil, fmt.Errorf("Failed to create a Google Storage API client: %s", err)
	}

	// Find rasterizers that have been enabled by the PDF packaged.
	rasterizers := pdf.GetEnabledRasterizers()
	if len(rasterizers) == 0 {
		return nil, fmt.Errorf("No rasterizers enabled.")
	}

	ret.client = client
	ret.storageClient = storageClient
	ret.rasterizers = rasterizers
	return ret, nil
}
Exemplo n.º 16
0
// CreateGoogleDatastoreClient create client to communicate to google Datastore service
func CreateGoogleDatastoreClient() (*datastore.Client, error) {
	envValue := os.Getenv(NDGoogleDatastoreURL)
	if envValue != "" {
		googleDatastoreURL = envValue
	}

	envValue = os.Getenv(NDGoogleProjectID)
	if envValue != "" {
		googleProjectID = envValue
	}

	ctx := cloud.NewContext(googleProjectID, http.DefaultClient)
	o := []cloud.ClientOption{
		cloud.WithBaseHTTP(http.DefaultClient),
		cloud.WithEndpoint(googleDatastoreURL),
	}
	client, err := datastore.NewClient(ctx, googleProjectID, o...)
	if err != nil {
		return nil, err
	}

	return client, nil
}
Exemplo n.º 17
0
func TestCopyToMissingFields(t *testing.T) {
	var tests = []struct {
		srcBucket, srcName, destBucket, destName string
		errMsg                                   string
	}{
		{
			"mybucket", "", "mybucket", "destname",
			"the source and destination object names must both be non-empty",
		},
		{
			"mybucket", "srcname", "mybucket", "",
			"the source and destination object names must both be non-empty",
		},
		{
			"", "srcfile", "mybucket", "destname",
			"the source and destination bucket names must both be non-empty",
		},
		{
			"mybucket", "srcfile", "", "destname",
			"the source and destination bucket names must both be non-empty",
		},
	}
	ctx := context.Background()
	client, err := NewClient(ctx, cloud.WithBaseHTTP(&http.Client{Transport: &fakeTransport{}}))
	if err != nil {
		panic(err)
	}
	for i, test := range tests {
		src := client.Bucket(test.srcBucket).Object(test.srcName)
		dst := client.Bucket(test.destBucket).Object(test.destName)
		_, err := src.CopyTo(ctx, dst, nil)
		if !strings.Contains(err.Error(), test.errMsg) {
			t.Errorf("CopyTo test #%v:\ngot err  %q\nwant err %q", i, err, test.errMsg)
		}
	}
}
Exemplo n.º 18
0
func TestObjects(t *testing.T) {
	ctx := context.Background()
	client, err := NewClient(ctx, cloud.WithTokenSource(testutil.TokenSource(ctx, ScopeFullControl)))
	if err != nil {
		t.Errorf("Could not create client: %v", err)
	}
	defer client.Close()

	bkt := client.Bucket(bucket)

	// Cleanup.
	cleanup(t, "obj")

	const defaultType = "text/plain"

	// Test Writer.
	for _, obj := range objects {
		t.Logf("Writing %v", obj)
		wc := bkt.Object(obj).NewWriter(ctx)
		wc.ContentType = defaultType
		c := randomContents()
		if _, err := wc.Write(c); err != nil {
			t.Errorf("Write for %v failed with %v", obj, err)
		}
		if err := wc.Close(); err != nil {
			t.Errorf("Close for %v failed with %v", obj, err)
		}
		contents[obj] = c
	}

	// Test Reader.
	for _, obj := range objects {
		t.Logf("Creating a reader to read %v", obj)
		rc, err := bkt.Object(obj).NewReader(ctx)
		if err != nil {
			t.Errorf("Can't create a reader for %v, errored with %v", obj, err)
		}
		slurp, err := ioutil.ReadAll(rc)
		if err != nil {
			t.Errorf("Can't ReadAll object %v, errored with %v", obj, err)
		}
		if got, want := slurp, contents[obj]; !bytes.Equal(got, want) {
			t.Errorf("Contents (%q) = %q; want %q", obj, got, want)
		}
		if got, want := rc.Size(), len(contents[obj]); got != int64(want) {
			t.Errorf("Size (%q) = %d; want %d", obj, got, want)
		}
		if got, want := rc.ContentType(), "text/plain"; got != want {
			t.Errorf("ContentType (%q) = %q; want %q", obj, got, want)
		}
		rc.Close()

		// Test SignedURL
		opts := &SignedURLOptions{
			GoogleAccessID: "xxx@clientid",
			PrivateKey:     dummyKey("rsa"),
			Method:         "GET",
			MD5:            []byte("202cb962ac59075b964b07152d234b70"),
			Expires:        time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC),
			ContentType:    "application/json",
			Headers:        []string{"x-header1", "x-header2"},
		}
		u, err := SignedURL(bucket, obj, opts)
		if err != nil {
			t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err)
		}
		res, err := client.hc.Get(u)
		if err != nil {
			t.Fatalf("Can't get URL %q: %v", u, err)
		}
		slurp, err = ioutil.ReadAll(res.Body)
		if err != nil {
			t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err)
		}
		if got, want := slurp, contents[obj]; !bytes.Equal(got, want) {
			t.Errorf("Contents (%v) = %q; want %q", obj, got, want)
		}
		res.Body.Close()
	}

	// Test NotFound.
	_, err = bkt.Object("obj-not-exists").NewReader(ctx)
	if err != ErrObjectNotExist {
		t.Errorf("Object should not exist, err found to be %v", err)
	}

	name := objects[0]

	// Test StatObject.
	o, err := bkt.Object(name).Attrs(ctx)
	if err != nil {
		t.Error(err)
	}
	if got, want := o.Name, name; got != want {
		t.Errorf("Name (%v) = %q; want %q", name, got, want)
	}
	if got, want := o.ContentType, defaultType; got != want {
		t.Errorf("ContentType (%v) = %q; want %q", name, got, want)
	}

	// Test object copy.
	copy, err := client.CopyObject(ctx, bucket, name, bucket, copyObj, nil)
	if err != nil {
		t.Errorf("CopyObject failed with %v", err)
	}
	if copy.Name != copyObj {
		t.Errorf("Copy object's name = %q; want %q", copy.Name, copyObj)
	}
	if copy.Bucket != bucket {
		t.Errorf("Copy object's bucket = %q; want %q", copy.Bucket, bucket)
	}

	// Test UpdateAttrs.
	updated, err := bkt.Object(name).Update(ctx, ObjectAttrs{
		ContentType: "text/html",
		ACL:         []ACLRule{{Entity: "domain-google.com", Role: RoleReader}},
	})
	if err != nil {
		t.Errorf("UpdateAttrs failed with %v", err)
	}
	if want := "text/html"; updated.ContentType != want {
		t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want)
	}

	// Test checksums.
	checksumCases := []struct {
		name     string
		contents [][]byte
		size     int64
		md5      string
		crc32c   uint32
	}{
		{
			name:     "checksum-object",
			contents: [][]byte{[]byte("hello"), []byte("world")},
			size:     10,
			md5:      "fc5e038d38a57032085441e7fe7010b0",
			crc32c:   1456190592,
		},
		{
			name:     "zero-object",
			contents: [][]byte{},
			size:     0,
			md5:      "d41d8cd98f00b204e9800998ecf8427e",
			crc32c:   0,
		},
	}
	for _, c := range checksumCases {
		wc := bkt.Object(c.name).NewWriter(ctx)
		for _, data := range c.contents {
			if _, err := wc.Write(data); err != nil {
				t.Errorf("Write(%q) failed with %q", data, err)
			}
		}
		if err = wc.Close(); err != nil {
			t.Errorf("%q: close failed with %q", c.name, err)
		}
		obj := wc.Attrs()
		if got, want := obj.Size, c.size; got != want {
			t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want)
		}
		if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want {
			t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want)
		}
		if got, want := obj.CRC32C, c.crc32c; got != want {
			t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want)
		}
	}

	// Test public ACL.
	publicObj := objects[0]
	if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil {
		t.Errorf("PutACLEntry failed with %v", err)
	}
	publicClient, err := NewClient(ctx, cloud.WithBaseHTTP(http.DefaultClient))
	if err != nil {
		t.Error(err)
	}
	rc, err := publicClient.Bucket(bucket).Object(publicObj).NewReader(ctx)
	if err != nil {
		t.Error(err)
	}
	slurp, err := ioutil.ReadAll(rc)
	if err != nil {
		t.Errorf("ReadAll failed with %v", err)
	}
	if !bytes.Equal(slurp, contents[publicObj]) {
		t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj])
	}
	rc.Close()

	// Test writer error handling.
	wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx)
	if _, err := wc.Write([]byte("hello")); err != nil {
		t.Errorf("Write unexpectedly failed with %v", err)
	}
	if err = wc.Close(); err == nil {
		t.Error("Close expected an error, found none")
	}

	// DeleteObject object.
	// The rest of the other object will be deleted during
	// the initial cleanup. This tests exists, so we still can cover
	// deletion if there are no objects on the bucket to clean.
	if err := bkt.Object(copyObj).Delete(ctx); err != nil {
		t.Errorf("Deletion of %v failed with %v", copyObj, err)
	}
	_, err = bkt.Object(copyObj).Attrs(ctx)
	if err != ErrObjectNotExist {
		t.Errorf("Copy is expected to be deleted, stat errored with %v", err)
	}
}
Exemplo n.º 19
0
// uploadDockerImage makes a tar.gz snapshot of the camlistored docker image,
// and uploads it at camlistore-release/docker/camlistored-REV.tar.gz. It then
// makes a copy in the same bucket and path as camlistored.tar.gz.
func uploadDockerImage() {
	proj := "camlistore-website"
	bucket := "camlistore-release"
	versionedTarball := "docker/camlistored-" + rev() + ".tar.gz"
	tarball := "docker/camlistored.tar.gz"

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, cloud.WithTokenSource(ts), cloud.WithBaseHTTP(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(proj)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	w.ContentType = "application/x-gtar"

	dockerSave := exec.Command("docker", "save", serverImage)
	dockerSave.Stderr = os.Stderr
	tar, err := dockerSave.StdoutPipe()
	if err != nil {
		log.Fatal(err)
	}
	targz, pw := io.Pipe()
	go func() {
		zw := gzip.NewWriter(pw)
		n, err := io.Copy(zw, tar)
		if err != nil {
			log.Fatalf("Error copying to gzip writer: after %d bytes, %v", n, err)
		}
		if err := zw.Close(); err != nil {
			log.Fatalf("gzip.Close: %v", err)
		}
		pw.CloseWithError(err)
	}()
	if err := dockerSave.Start(); err != nil {
		log.Fatalf("Error starting docker save %v: %v", serverImage, err)
	}
	if _, err := io.Copy(w, targz); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	if err := dockerSave.Wait(); err != nil {
		log.Fatalf("Error waiting for docker save %v: %v", serverImage, err)
	}
	log.Printf("Uploaded tarball to %s", versionedTarball)
	if !isWIP() {
		log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
		if _, err := stoClient.CopyObject(ctx,
			bucket, versionedTarball,
			bucket, tarball,
			&storage.ObjectAttrs{
				ACL:          publicACL(proj),
				CacheControl: "no-cache",
				ContentType:  "application/x-gtar",
			}); err != nil {
			log.Fatalf("Error uploading %v: %v", tarball, err)
		}
		log.Printf("Uploaded tarball to %s", tarball)
	}
}
Exemplo n.º 20
0
func TestRun(t *testing.T) {
	wdir, err := ioutil.TempDir("", "drone-gcs-test")
	if err != nil {
		t.Fatal(err)
	}
	updir := filepath.Join(wdir, "upload")
	subdir := filepath.Join(updir, "sub")
	mkdirs(t, subdir)
	writeFile(t, updir, "file.txt", []byte("text"))
	writeFile(t, updir, "file.js", []byte("javascript"))
	writeFile(t, subdir, "file.css", []byte("sub style"))
	writeFile(t, subdir, "file.bin", []byte("rubbish"))

	files := map[string]*struct {
		ctype string
		body  []byte
		gzip  bool
	}{
		"file.txt":     {"text/plain", []byte("text"), false},
		"file.js":      {"application/javascript", []byte("javascript"), true},
		"sub/file.css": {"text/css", []byte("sub style"), false},
	}

	workspace.Path = wdir
	build.Branch = "patch-1"
	build.Event = plugin.EventPull
	build.Ref = "refs/pull/123/merge"
	vargs.Source = "upload"
	vargs.BucketSuffix = "-suffix"
	vargs.Ignore = "sub/*.bin"
	vargs.Gzip = []string{"js"}
	vargs.CacheControl = "public,max-age=10"
	vargs.Metadata = map[string]string{"x-foo": "bar"}
	acls := []storage.ACLRule{storage.ACLRule{"allUsers", "READER"}}
	vargs.ACL = []string{fmt.Sprintf("%s:%s", acls[0].Entity, acls[0].Role)}

	var seenMu sync.Mutex // guards seen
	seen := make(map[string]struct{}, len(files))

	okTransport := &fakeTransport{func(r *http.Request) (*http.Response, error) {
		return &http.Response{
			Body:       ioutil.NopCloser(strings.NewReader("")),
			Proto:      "HTTP/1.0",
			ProtoMajor: 1,
			ProtoMinor: 0,
			StatusCode: http.StatusOK,
		}, nil
	}}
	rt := &fakeTransport{func(r *http.Request) (resp *http.Response, e error) {
		resp = &http.Response{
			Body:       ioutil.NopCloser(strings.NewReader(`{"name": "fake"}`)),
			Proto:      "HTTP/1.0",
			ProtoMajor: 1,
			ProtoMinor: 0,
			StatusCode: http.StatusOK,
		}
		// skip bucket.Attr() requests
		if r.URL.Path == "/storage/v1/b/pr-123-suffix" && r.Method == "GET" {
			return resp, nil
		}
		up := "/upload/storage/v1/b/pr-123-suffix/o"
		if r.URL.Path != up {
			t.Errorf("r.URL.Path = %q; want %q", r.URL.Path, up)
		}
		_, mp, err := mime.ParseMediaType(r.Header.Get("content-type"))
		if err != nil {
			t.Errorf("ParseMediaType: %v", err)
			return
		}
		mr := multipart.NewReader(r.Body, mp["boundary"])

		// metadata
		p, err := mr.NextPart()
		if err != nil {
			t.Errorf("meta NextPart: %v", err)
			return
		}
		var attrs storage.ObjectAttrs
		if err := json.NewDecoder(p).Decode(&attrs); err != nil {
			t.Errorf("meta json: %v", err)
			return
		}
		seenMu.Lock()
		seen[attrs.Name] = struct{}{}
		seenMu.Unlock()
		obj := files[attrs.Name]
		if obj == nil {
			t.Errorf("unexpected obj: %+v", attrs)
			return
		}

		if attrs.Bucket != "pr-123-suffix" {
			t.Errorf("attrs.Bucket = %q; want pr-123-suffix", attrs.Bucket)
		}
		if attrs.CacheControl != vargs.CacheControl {
			t.Errorf("attrs.CacheControl = %q; want %q", attrs.CacheControl, vargs.CacheControl)
		}
		if obj.gzip && attrs.ContentEncoding != "gzip" {
			t.Errorf("attrs.ContentEncoding = %q; want gzip", attrs.ContentEncoding)
		}
		if !strings.HasPrefix(attrs.ContentType, obj.ctype) {
			t.Errorf("attrs.ContentType = %q; want %q", attrs.ContentType, obj.ctype)
		}
		if !reflect.DeepEqual(attrs.ACL, acls) {
			t.Errorf("attrs.ACL = %v; want %v", attrs.ACL, acls)
		}
		if !reflect.DeepEqual(attrs.Metadata, vargs.Metadata) {
			t.Errorf("attrs.Metadata = %+v; want %+v", attrs.Metadata, vargs.Metadata)
		}

		// media
		p, err = mr.NextPart()
		if err != nil {
			t.Errorf("media NextPart: %v", err)
			return
		}
		b, _ := ioutil.ReadAll(p)
		if attrs.ContentEncoding == "gzip" {
			b = gunzip(t, b)
		}
		if bytes.Compare(b, obj.body) != 0 {
			t.Errorf("media b = %q; want %q", b, obj.body)
		}
		return
	}}

	client.http = &http.Client{Transport: okTransport}
	client.ghub = github.NewClient(&http.Client{Transport: okTransport})
	hc := &http.Client{Transport: rt}
	if client.gcs, err = storage.NewClient(context.Background(), cloud.WithBaseHTTP(hc)); err != nil {
		t.Fatal(err)
	}

	run()
	for k := range files {
		if _, ok := seen[k]; !ok {
			t.Errorf("%s didn't get uploaded", k)
		}
	}
}
Exemplo n.º 21
0
// listDownloads lists all the files found in the monthly repo, and from them,
// builds the data that we'll feed to the template to generate the monthly
// downloads camweb page.
func listDownloads() (*ReleaseData, error) {
	ts, err := tokenSource(bucket)
	if err != nil {
		return nil, err
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, cloud.WithTokenSource(ts), cloud.WithBaseHTTP(oauth2.NewClient(ctx, ts)))
	if err != nil {
		return nil, err
	}
	objList, err := stoClient.Bucket(bucket).List(ctx, &storage.Query{Prefix: "monthly/"})
	if err != nil {
		return nil, err
	}

	platformBySuffix := map[string]string{
		"src.zip":       "Source",
		"linux.tar.gz":  "Linux",
		"darwin.tar.gz": "Darwin",
		"windows.zip":   "Windows",
	}
	getPlatform := func(name string) string {
		for suffix, platform := range platformBySuffix {
			if strings.HasSuffix(name, suffix) {
				return platform
			}
		}
		return ""
	}
	getChecksum := func(name string) (string, error) {
		r, err := stoClient.Bucket(bucket).Object(name).NewReader(ctx)
		if err != nil {
			return "", err
		}
		var buf bytes.Buffer
		if _, err := io.Copy(&buf, r); err != nil {
			return "", err
		}
		return buf.String(), nil
	}
	var date time.Time
	checkDate := func(objDate time.Time) error {
		if date.IsZero() {
			date = objDate
			return nil
		}
		d := date.Sub(objDate)
		if d < 0 {
			d = -d
		}
		if d < 24*time.Hour {
			return nil
		}
		return fmt.Errorf("objects in monthly have not been uploaded or updated the same day")
	}

	var (
		downloadData []DownloadData
		nameToSum    = make(map[string]string)
	)
	for _, attrs := range objList.Results {
		if !strings.Contains(attrs.Name, rev()) {
			continue
		}
		if err := checkDate(attrs.Updated); err != nil {
			return nil, err
		}
		if !strings.HasSuffix(attrs.Name, ".sha256") {
			continue
		}
		sum, err := getChecksum(attrs.Name)
		if err != nil {
			return nil, err
		}
		nameToSum[strings.TrimSuffix(attrs.Name, ".sha256")] = sum
	}
	for _, attrs := range objList.Results {
		if !strings.Contains(attrs.Name, rev()) {
			continue
		}
		if strings.HasSuffix(attrs.Name, ".sha256") {
			continue
		}
		sum, ok := nameToSum[attrs.Name]
		if !ok {
			return nil, fmt.Errorf("%v has no checksum file!", attrs.Name)
		}
		downloadData = append(downloadData, DownloadData{
			Filename: filepath.Base(attrs.Name),
			Platform: getPlatform(attrs.Name),
			Checksum: sum,
		})
	}

	return &ReleaseData{
		Date:     date.Format("2006-01-02"),
		Download: downloadData,
	}, nil
}
Exemplo n.º 22
0
func TestCondition(t *testing.T) {
	gotReq := make(chan *http.Request, 1)
	ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		io.Copy(ioutil.Discard, r.Body)
		gotReq <- r
		if r.Method == "POST" {
			w.WriteHeader(200)
		} else {
			w.WriteHeader(500)
		}
	}))
	defer ts.Close()

	tlsConf := &tls.Config{InsecureSkipVerify: true}
	tr := &http.Transport{
		TLSClientConfig: tlsConf,
		DialTLS: func(netw, addr string) (net.Conn, error) {
			return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf)
		},
	}
	defer tr.CloseIdleConnections()
	hc := &http.Client{Transport: tr}

	ctx := context.Background()
	c, err := NewClient(ctx, cloud.WithBaseHTTP(hc))
	if err != nil {
		t.Fatal(err)
	}

	obj := c.Bucket("buck").Object("obj")
	dst := c.Bucket("dstbuck").Object("dst")
	tests := []struct {
		fn   func()
		want string
	}{
		{
			func() { obj.WithConditions(Generation(1234)).NewReader(ctx) },
			"GET /buck/obj?generation=1234",
		},
		{
			func() { obj.WithConditions(IfGenerationMatch(1234)).NewReader(ctx) },
			"GET /buck/obj?ifGenerationMatch=1234",
		},
		{
			func() { obj.WithConditions(IfGenerationNotMatch(1234)).NewReader(ctx) },
			"GET /buck/obj?ifGenerationNotMatch=1234",
		},
		{
			func() { obj.WithConditions(IfMetaGenerationMatch(1234)).NewReader(ctx) },
			"GET /buck/obj?ifMetagenerationMatch=1234",
		},
		{
			func() { obj.WithConditions(IfMetaGenerationNotMatch(1234)).NewReader(ctx) },
			"GET /buck/obj?ifMetagenerationNotMatch=1234",
		},
		{
			func() { obj.WithConditions(IfMetaGenerationNotMatch(1234)).Attrs(ctx) },
			"GET https://www.googleapis.com/storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full",
		},
		{
			func() { obj.WithConditions(IfMetaGenerationMatch(1234)).Update(ctx, ObjectAttrs{}) },
			"PATCH https://www.googleapis.com/storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full",
		},
		{
			func() { obj.WithConditions(Generation(1234)).Delete(ctx) },
			"DELETE https://www.googleapis.com/storage/v1/b/buck/o/obj?alt=json&generation=1234",
		},
		{
			func() {
				w := obj.WithConditions(IfGenerationMatch(1234)).NewWriter(ctx)
				w.ContentType = "text/plain"
				w.Close()
			},
			"POST https://www.googleapis.com/upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart",
		},
		{
			func() {
				obj.WithConditions(IfGenerationMatch(1234)).CopyTo(ctx, dst.WithConditions(IfMetaGenerationMatch(5678)), nil)
			},
			"POST https://www.googleapis.com/storage/v1/b/buck/o/obj/copyTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full",
		},
	}

	for i, tt := range tests {
		tt.fn()
		select {
		case r := <-gotReq:
			got := r.Method + " " + r.RequestURI
			if got != tt.want {
				t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want)
			}
		case <-time.After(5 * time.Second):
			t.Fatalf("%d. timeout", i)
		}
		if err != nil {
			t.Fatal(err)
		}
	}

	// Test an error, too:
	err = obj.WithConditions(Generation(1234)).NewWriter(ctx).Close()
	if err == nil || !strings.Contains(err.Error(), "NewWriter: condition Generation not supported") {
		t.Errorf("want error about unsupported condition; got %v", err)
	}
}
func TestRetryUpload(t *testing.T) {
	wdir, err := ioutil.TempDir("", "drone-gcs-test")
	if err != nil {
		t.Fatal(err)
	}
	writeFile(t, wdir, "file", []byte("test"))
	vargs.Source = wdir
	sleep = func(time.Duration) {}
	backoff := []int{0, 2, 4, 8, 16, 32, 64} // seconds

	tests := []struct {
		nret, nerr int
		ok         bool
	}{
		{0, 0, true},
		{3, 0, true},
		{1, 2, false},
	}
	for i, test := range tests {
		var n int

		sleep = func(d time.Duration) {
			a := time.Duration(backoff[n]) * time.Second
			b := time.Duration(backoff[n]+1) * time.Second
			if d < a || d > b {
				t.Errorf("%d/%d: d = %v; want between %v and %v", i, n, d, a, b)
			}
		}

		rt := &fakeTransport{func(r *http.Request) (*http.Response, error) {
			_, mp, _ := mime.ParseMediaType(r.Header.Get("content-type"))
			mr := multipart.NewReader(r.Body, mp["boundary"])
			mr.NextPart() // skip metadata
			p, _ := mr.NextPart()
			b, _ := ioutil.ReadAll(p)
			// verify the body is always sent with correct content
			if v := string(b); v != "test" {
				t.Errorf("%d/%d: b = %q; want 'test'", i, n, b)
			}

			res := &http.Response{
				Body:       ioutil.NopCloser(strings.NewReader(`{"name": "fake"}`)),
				Proto:      "HTTP/1.0",
				ProtoMajor: 1,
				ProtoMinor: 0,
				StatusCode: http.StatusServiceUnavailable,
			}
			if n >= test.nerr {
				res.StatusCode = http.StatusOK
			}
			n++
			return res, nil
		}}
		hc := &http.Client{Transport: rt}
		client, _ := storage.NewClient(context.Background(), cloud.WithBaseHTTP(hc))
		bucket = client.Bucket("bucket")

		err := retryUpload("file", filepath.Join(wdir, "file"), test.nret)
		switch {
		case test.ok && err != nil:
			t.Errorf("%d: %v", i, err)
		case !test.ok && err == nil:
			t.Errorf("%d: want error", i)
		}
	}
}
Exemplo n.º 24
0
func TestObjects(t *testing.T) {
	ctx := context.Background()
	client, bucket := testConfig(ctx, t)
	defer client.Close()

	bkt := client.Bucket(bucket)

	const defaultType = "text/plain"

	// Populate object names and make a map for their contents.
	objects := []string{
		"obj1" + suffix,
		"obj2" + suffix,
		"obj/with/slashes" + suffix,
	}
	contents := make(map[string][]byte)

	// Test Writer.
	for _, obj := range objects {
		t.Logf("Writing %q", obj)
		wc := bkt.Object(obj).NewWriter(ctx)
		wc.ContentType = defaultType
		c := randomContents()
		if _, err := wc.Write(c); err != nil {
			t.Errorf("Write for %v failed with %v", obj, err)
		}
		if err := wc.Close(); err != nil {
			t.Errorf("Close for %v failed with %v", obj, err)
		}
		contents[obj] = c
	}

	// Test Reader.
	for _, obj := range objects {
		t.Logf("Creating a reader to read %v", obj)
		rc, err := bkt.Object(obj).NewReader(ctx)
		if err != nil {
			t.Errorf("Can't create a reader for %v, errored with %v", obj, err)
		}
		slurp, err := ioutil.ReadAll(rc)
		if err != nil {
			t.Errorf("Can't ReadAll object %v, errored with %v", obj, err)
		}
		if got, want := slurp, contents[obj]; !bytes.Equal(got, want) {
			t.Errorf("Contents (%q) = %q; want %q", obj, got, want)
		}
		if got, want := rc.Size(), len(contents[obj]); got != int64(want) {
			t.Errorf("Size (%q) = %d; want %d", obj, got, want)
		}
		if got, want := rc.ContentType(), "text/plain"; got != want {
			t.Errorf("ContentType (%q) = %q; want %q", obj, got, want)
		}
		rc.Close()

		// Test SignedURL
		opts := &SignedURLOptions{
			GoogleAccessID: "xxx@clientid",
			PrivateKey:     dummyKey("rsa"),
			Method:         "GET",
			MD5:            []byte("202cb962ac59075b964b07152d234b70"),
			Expires:        time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC),
			ContentType:    "application/json",
			Headers:        []string{"x-header1", "x-header2"},
		}
		u, err := SignedURL(bucket, obj, opts)
		if err != nil {
			t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err)
		}
		res, err := client.hc.Get(u)
		if err != nil {
			t.Fatalf("Can't get URL %q: %v", u, err)
		}
		slurp, err = ioutil.ReadAll(res.Body)
		if err != nil {
			t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err)
		}
		if got, want := slurp, contents[obj]; !bytes.Equal(got, want) {
			t.Errorf("Contents (%v) = %q; want %q", obj, got, want)
		}
		res.Body.Close()
	}

	obj := objects[0]
	objlen := int64(len(contents[obj]))
	// Test Range Reader.
	for i, r := range []struct {
		offset, length, want int64
	}{
		{0, objlen, objlen},
		{0, objlen / 2, objlen / 2},
		{objlen / 2, objlen, objlen / 2},
		{0, 0, 0},
		{objlen / 2, 0, 0},
		{objlen / 2, -1, objlen / 2},
		{0, objlen * 2, objlen},
	} {
		t.Logf("%d: bkt.Object(%v).NewRangeReader(ctx, %d, %d)", i, obj, r.offset, r.length)
		rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length)
		if err != nil {
			t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err)
			continue
		}
		if rc.Size() != objlen {
			t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen)
		}
		if rc.Remain() != r.want {
			t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want)
		}
		slurp, err := ioutil.ReadAll(rc)
		if err != nil {
			t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err)
			continue
		}
		if len(slurp) != int(r.want) {
			t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want)
			continue
		}
		if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) {
			t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want)
		}
		rc.Close()
	}

	// Test NotFound.
	_, err := bkt.Object("obj-not-exists").NewReader(ctx)
	if err != ErrObjectNotExist {
		t.Errorf("Object should not exist, err found to be %v", err)
	}

	objName := objects[0]

	// Test StatObject.
	o, err := bkt.Object(objName).Attrs(ctx)
	if err != nil {
		t.Error(err)
	}
	if got, want := o.Name, objName; got != want {
		t.Errorf("Name (%v) = %q; want %q", objName, got, want)
	}
	if got, want := o.ContentType, defaultType; got != want {
		t.Errorf("ContentType (%v) = %q; want %q", objName, got, want)
	}
	created := o.Created
	// Check that the object is newer than its containing bucket.
	bAttrs, err := bkt.Attrs(ctx)
	if err != nil {
		t.Error(err)
	}
	if o.Created.Before(bAttrs.Created) {
		t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs)
	}

	// Test object copy.
	copyName := "copy-" + objName
	copyObj, err := bkt.Object(objName).CopyTo(ctx, bkt.Object(copyName), nil)
	if err != nil {
		t.Errorf("CopyTo failed with %v", err)
	}
	if copyObj.Name != copyName {
		t.Errorf("Copy object's name = %q; want %q", copyObj.Name, copyName)
	}
	if copyObj.Bucket != bucket {
		t.Errorf("Copy object's bucket = %q; want %q", copyObj.Bucket, bucket)
	}

	// Test UpdateAttrs.
	updated, err := bkt.Object(objName).Update(ctx, ObjectAttrs{
		ContentType: "text/html",
		ACL:         []ACLRule{{Entity: "domain-google.com", Role: RoleReader}},
	})
	if err != nil {
		t.Errorf("UpdateAttrs failed with %v", err)
	}
	if want := "text/html"; updated.ContentType != want {
		t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want)
	}
	if want := created; updated.Created != want {
		t.Errorf("updated.Created == %q; want %q", updated.Created, want)
	}
	if !updated.Created.Before(updated.Updated) {
		t.Errorf("updated.Updated should be newer than update.Created")
	}

	// Test checksums.
	checksumCases := []struct {
		name     string
		contents [][]byte
		size     int64
		md5      string
		crc32c   uint32
	}{
		{
			name:     "checksum-object",
			contents: [][]byte{[]byte("hello"), []byte("world")},
			size:     10,
			md5:      "fc5e038d38a57032085441e7fe7010b0",
			crc32c:   1456190592,
		},
		{
			name:     "zero-object",
			contents: [][]byte{},
			size:     0,
			md5:      "d41d8cd98f00b204e9800998ecf8427e",
			crc32c:   0,
		},
	}
	for _, c := range checksumCases {
		wc := bkt.Object(c.name).NewWriter(ctx)
		for _, data := range c.contents {
			if _, err := wc.Write(data); err != nil {
				t.Errorf("Write(%q) failed with %q", data, err)
			}
		}
		if err = wc.Close(); err != nil {
			t.Errorf("%q: close failed with %q", c.name, err)
		}
		obj := wc.Attrs()
		if got, want := obj.Size, c.size; got != want {
			t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want)
		}
		if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want {
			t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want)
		}
		if got, want := obj.CRC32C, c.crc32c; got != want {
			t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want)
		}
	}

	// Test public ACL.
	publicObj := objects[0]
	if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil {
		t.Errorf("PutACLEntry failed with %v", err)
	}
	publicClient, err := NewClient(ctx, cloud.WithBaseHTTP(http.DefaultClient))
	if err != nil {
		t.Fatal(err)
	}
	rc, err := publicClient.Bucket(bucket).Object(publicObj).NewReader(ctx)
	if err != nil {
		t.Error(err)
	}
	slurp, err := ioutil.ReadAll(rc)
	if err != nil {
		t.Errorf("ReadAll failed with %v", err)
	}
	if !bytes.Equal(slurp, contents[publicObj]) {
		t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj])
	}
	rc.Close()

	// Test writer error handling.
	wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx)
	if _, err := wc.Write([]byte("hello")); err != nil {
		t.Errorf("Write unexpectedly failed with %v", err)
	}
	if err = wc.Close(); err == nil {
		t.Error("Close expected an error, found none")
	}

	// Test deleting the copy object.
	if err := bkt.Object(copyName).Delete(ctx); err != nil {
		t.Errorf("Deletion of %v failed with %v", copyName, err)
	}
	_, err = bkt.Object(copyName).Attrs(ctx)
	if err != ErrObjectNotExist {
		t.Errorf("Copy is expected to be deleted, stat errored with %v", err)
	}
}
Exemplo n.º 25
0
func newTestClient(rt http.RoundTripper) *errors.Client {
	t, err := errors.NewClient(context.Background(), testProjectID, "myservice", "v1.000", cloud.WithBaseHTTP(&http.Client{Transport: rt}))
	if err != nil {
		panic(err)
	}
	t.RepanicDefault = false
	return t
}