Example #1
0
func (s *shard) createOutputFile(c context.Context) (io.WriteCloser, error) {
	c, _ = context.WithTimeout(c, time.Duration(10)*time.Minute)
	// for development we can't use the appengine default credentials so
	// instead need to create our own oauth token source to access storage

	// TODO: maybe give job a chance to generate this - it could also
	// create the writer (?). The only reason we're doing it is to prevent
	// duplication and also handle the file rollup operations
	var client *cstorage.Client
	if appengine.IsDevAppServer() {
		jsonKey, err := ioutil.ReadFile("service-account.json")
		if err != nil {
			return nil, err
		}
		conf, err := google.JWTConfigFromJSON(jsonKey, cstorage.ScopeReadWrite)
		if err != nil {
			return nil, err
		}
		client, err = cstorage.NewClient(c, option.WithTokenSource(conf.TokenSource(c)))
		if err != nil {
			return nil, err
		}
	} else {
		var err error
		client, err = cstorage.NewClient(c)
		if err != nil {
			return nil, err
		}
	}

	o := client.Bucket(s.job.Bucket).Object(s.sliceFilename(s.Sequence)).NewWriter(c)

	// TODO: wrap writer to count bytes and continue slice if we get close to 10Mb limit (?)
	return o, nil
}
Example #2
0
File: gcsbench.go Project: rsc/tmp
func main() {
	log.SetPrefix("gcsbench: ")
	log.SetFlags(0)

	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "usage: gcsbench -b bucketName\n")
		os.Exit(2)
	}
	flag.Parse()
	if *bucketName == "" || len(flag.Args()) > 0 {
		flag.Usage()
	}

	http.DefaultTransport = newLogger(http.DefaultTransport)

	start := time.Now()
	var (
		mu    sync.Mutex
		total time.Duration
	)
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		log.Fatalf("storage.NewClient: %v", err)
	}
	sema := make(chan bool, *parallel)
	for i := 0; i < *count; i++ {
		sema <- true
		name := fmt.Sprintf("gcsbench/tmp.%d", i)
		go func() {
			start := time.Now()
			client := client
			if *newClient {
				var err error
				client, err = storage.NewClient(ctx)
				if err != nil {
					log.Fatalf("storage.NewClient: %v", err)
				}
			}
			obj := client.Bucket(*bucketName).Object(name)
			w := obj.NewWriter(ctx)
			w.Write(make([]byte, *size))
			if err := w.Close(); err != nil {
				log.Fatalf("writing file: %v", err)
			}
			mu.Lock()
			total += time.Since(start)
			mu.Unlock()
			<-sema
		}()
	}
	for i := 0; i < *parallel; i++ {
		sema <- true
	}

	fmt.Printf("avg %.3fs per write\n", (total / time.Duration(*count)).Seconds())
	elapsed := time.Since(start)
	fmt.Printf("total %.3fs %.3f MB/s\n", elapsed.Seconds(), float64(*count)*float64(*size)/1e6/elapsed.Seconds())
}
Example #3
0
func Setup(t *testing.T) *storage.Client {
	if testProject == "" || testBucket == "" {
		t.Skip("TESTPROJECT, and TESTBUCKET EnvVars must be set to perform integration test")
	}

	gcsctx := &cloudstorage.CloudStoreContext{
		LogggingContext: "testing-config",
		TokenSource:     cloudstorage.GCEDefaultOAuthToken,
		Project:         testProject,
		Bucket:          testBucket,
	}

	// Create http client with Google context auth
	googleClient, err := cloudstorage.NewGoogleClient(gcsctx)
	if err != nil {
		t.Errorf("Failed to create Google Client: %v\n", err)
	}

	gsc, err := storage.NewClient(context.Background(), option.WithHTTPClient(googleClient.Client()))
	if err != nil {
		t.Errorf("Error creating Google cloud storage client. project:%s gs://%s/ err:%v\n",
			gcsctx.Project, gcsctx.Bucket, err)

	}
	if gsc == nil {
		t.Errorf("storage Client returned is nil!")
	}
	return gsc
}
Example #4
0
func ExampleObjectHandle_Delete() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	// To delete multiple objects in a bucket, list them with an
	// ObjectIterator, then Delete them.

	// If you are using this package on the App Engine Flex runtime,
	// you can init a bucket client with your app's default bucket name.
	// See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName.
	bucket := client.Bucket("my-bucket")
	it := bucket.Objects(ctx, nil)
	for {
		objAttrs, err := it.Next()
		if err != nil && err != storage.Done {
			// TODO: Handle error.
		}
		if err == storage.Done {
			break
		}
		if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil {
			// TODO: Handle error.
		}
	}
	fmt.Println("deleted all object items in the bucket specified.")
}
Example #5
0
func ExampleComposer_Run() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	bkt := client.Bucket("bucketname")
	src1 := bkt.Object("o1")
	src2 := bkt.Object("o2")
	dst := bkt.Object("o3")
	// Compose and modify metadata.
	c := dst.ComposerFrom(src1, src2)
	c.ContentType = "text/plain"
	attrs, err := c.Run(ctx)
	if err != nil {
		// TODO: Handle error.
	}
	fmt.Println(attrs)
	// Just compose.
	attrs, err = dst.ComposerFrom(src1, src2).Run(ctx)
	if err != nil {
		// TODO: Handle error.
	}
	fmt.Println(attrs)
}
Example #6
0
func ExampleCopier_Run() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	src := client.Bucket("bucketname").Object("file1")
	dst := client.Bucket("another-bucketname").Object("file2")

	// Copy content and modify metadata.
	copier := dst.CopierFrom(src)
	copier.ContentType = "text/plain"
	attrs, err := copier.Run(ctx)
	if err != nil {
		// TODO: Handle error, possibly resuming with copier.RewriteToken.
	}
	fmt.Println(attrs)

	// Just copy content.
	attrs, err = dst.CopierFrom(src).Run(ctx)
	if err != nil {
		// TODO: Handle error. No way to resume.
	}
	fmt.Println(attrs)
}
Example #7
0
// getInstalledTLS returns the TLS certificate and key stored on Google Cloud Storage for the
// instance defined in d.Conf.
//
// If either the TLS keypair doesn't exist, the error is os.ErrNotExist.
func (d *Deployer) getInstalledTLS() (certPEM, keyPEM []byte, err error) {
	ctx := context.Background()
	stoClient, err := cloudstorage.NewClient(ctx, option.WithHTTPClient(d.Client))
	if err != nil {
		return nil, nil, fmt.Errorf("error creating Cloud Storage client to fetch TLS cert & key from new instance: %v", err)
	}
	getFile := func(name string) ([]byte, error) {
		sr, err := stoClient.Bucket(d.Conf.bucketBase()).Object(path.Join(configDir, name)).NewReader(ctx)
		if err == cloudstorage.ErrObjectNotExist {
			return nil, os.ErrNotExist
		}
		if err != nil {
			return nil, err
		}
		defer sr.Close()
		return ioutil.ReadAll(sr)
	}
	var grp syncutil.Group
	grp.Go(func() (err error) {
		certPEM, err = getFile(certFilename())
		return
	})
	grp.Go(func() (err error) {
		keyPEM, err = getFile(keyFilename())
		return
	})
	err = grp.Err()
	return
}
Example #8
0
func deployerCredsFromGCS() (*gce.Config, error) {
	ctx := context.Background()
	sc, err := storage.NewClient(ctx)
	if err != nil {
		return nil, err
	}
	slurp := func(key string) ([]byte, error) {
		const bucket = "camlistore-website-resource"
		rc, err := sc.Bucket(bucket).Object(key).NewReader(ctx)
		if err != nil {
			return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err)
		}
		defer rc.Close()
		return ioutil.ReadAll(rc)
	}
	var cfg gce.Config
	data, err := slurp("launcher-config.json")
	if err != nil {
		return nil, err
	}
	if err := json.Unmarshal(data, &cfg); err != nil {
		return nil, fmt.Errorf("Could not JSON decode camli GCE launcher config: %v", err)
	}
	return &cfg, nil
}
Example #9
0
func NewGCS(name string, info map[string]string) (Backend, error) {
	b := &gcsBackend{
		name:       name,
		bucketName: info["bucket"],
	}
	keyJSON := []byte(info["key"])

	if b.bucketName == "" {
		return nil, fmt.Errorf("blobstore: missing Google Cloud Storage bucket param for %s", name)
	}
	if len(keyJSON) == 0 {
		return nil, fmt.Errorf("blobstore: missing Google Cloud Storage key JSON param for %s", name)
	}

	jwtToken, err := google.JWTConfigFromJSON(keyJSON, "https://www.googleapis.com/auth/devstorage.read_write")
	if err != nil {
		return nil, fmt.Errorf("blobstore: error loading Google Cloud Storage JSON key: %s", err)
	}
	tokenSource := jwtToken.TokenSource(context.Background())

	// Test getting an OAuth token so we can disambiguate an issue with the
	// token and an issue with the bucket permissions below.
	if _, err := tokenSource.Token(); err != nil {
		return nil, fmt.Errorf("blobstore: error getting Google Cloud Storage OAuth token: %s", err)
	}

	pemBlock, _ := pem.Decode(jwtToken.PrivateKey)
	privateKey, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes)
	if err != nil {
		return nil, fmt.Errorf("blobstore: error decoding Google Cloud Storage private key: %s", err)
	}
	rsaPrivateKey, ok := privateKey.(*rsa.PrivateKey)
	if !ok {
		return nil, fmt.Errorf("blobstore: unexpected Google Cloud Storage key type: %T", privateKey)
	}
	b.signOpts = func() *storage.SignedURLOptions {
		return &storage.SignedURLOptions{
			GoogleAccessID: jwtToken.Email,
			SignBytes: func(b []byte) ([]byte, error) {
				digest := sha256.Sum256(b)
				return rsa.SignPKCS1v15(rand.Reader, rsaPrivateKey, crypto.SHA256, digest[:])
			},
			Method:  "GET",
			Expires: time.Now().Add(10 * time.Minute),
		}
	}

	client, err := storage.NewClient(context.Background(), option.WithTokenSource(tokenSource))
	if err != nil {
		return nil, fmt.Errorf("blobstore: error creating Google Cloud Storage client: %s", err)
	}
	b.bucket = client.Bucket(b.bucketName)

	_, err = b.bucket.Attrs(context.Background())
	if err != nil {
		return nil, fmt.Errorf("blobstore: error checking Google Cloud Storage bucket %q existence, ensure that it exists and Owner access for %s is included the bucket ACL: %q", b.bucketName, jwtToken.Email, err)
	}
	return b, nil
}
Example #10
0
func init() {
	client, err := storage.NewClient(context.Background())
	if err != nil {
		log.Fatalf("Unable to get storage client: %v", err)
	}
	bucket = client.Bucket(bucketName)

}
Example #11
0
func upload(srcPath string) {
	if !*flagUpload {
		return
	}
	destName := strings.Replace(filepath.Base(srcPath), "camlistore", "camlistore-"+releaseDate.Format(fileDateFormat), 1)
	versionedTarball := "monthly/" + destName

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(project)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	contentType := "application/x-gtar"
	if strings.HasSuffix(versionedTarball, ".zip") {
		contentType = "application/zip"
	}
	w.ContentType = contentType
	csw := sha256.New()
	mw := io.MultiWriter(w, csw)

	src, err := os.Open(srcPath)
	if err != nil {
		log.Fatal(err)
	}
	defer src.Close()

	if _, err := io.Copy(mw, src); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded monthly tarball to %s", versionedTarball)

	// And upload the corresponding checksum
	checkSumFile := versionedTarball + ".sha256"
	sum := fmt.Sprintf("%x", csw.Sum(nil))
	w = stoClient.Bucket(bucket).Object(checkSumFile).NewWriter(ctx)
	w.ACL = publicACL(project)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	w.ContentType = "text/plain"
	if _, err := io.Copy(w, strings.NewReader(sum)); err != nil {
		log.Fatalf("error uploading checksum %v: %v", checkSumFile, err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded monthly tarball checksum to %s", checkSumFile)
}
Example #12
0
func ExampleBucketHandle_Objects() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	it := client.Bucket("my-bucket").Objects(ctx, nil)
	_ = it // TODO: iterate using Next or NextPage.
}
Example #13
0
func ExampleObjectHandle_NewWriter() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx)
	_ = wc // TODO: Use the Writer.
}
func setup(t *testing.T) *storage.Client {
	ctx := context.Background()

	client, err := storage.NewClient(ctx)
	if err != nil {
		t.Fatalf("failed to create client: %v", err)
	}
	return client
}
Example #15
0
func ExampleClient_Buckets() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	it := client.Bucket("my-bucket")
	_ = it // TODO: iterate using Next or iterator.Pager.
}
Example #16
0
File: gcs.go Project: naunga/vault
// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing
// bucket. Credentials can be provided to the backend, sourced
// from environment variables or a service account file
func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) {

	bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET")

	if bucketName == "" {
		bucketName = conf["bucket"]
		if bucketName == "" {
			return nil, fmt.Errorf("env var GOOGLE_STORAGE_BUCKET or configuration parameter 'bucket' must be set")
		}
	}

	// path to service account JSON file
	credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
	if credentialsFile == "" {
		credentialsFile = conf["credentials_file"]
		if credentialsFile == "" {
			return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set")
		}
	}

	client, err := storage.NewClient(
		context.Background(),
		option.WithServiceAccountFile(credentialsFile),
	)

	if err != nil {
		return nil, fmt.Errorf("error establishing storage client: '%v'", err)
	}

	// check client connectivity by getting bucket attributes
	_, err = client.Bucket(bucketName).Attrs(context.Background())
	if err != nil {
		return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err)
	}

	maxParStr, ok := conf["max_parallel"]
	var maxParInt int
	if ok {
		maxParInt, err = strconv.Atoi(maxParStr)
		if err != nil {
			return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
		}
		if logger.IsDebug() {
			logger.Debug("physical/gcs: max_parallel set", "max_parallel", maxParInt)
		}
	}

	g := GCSBackend{
		bucketName: bucketName,
		client:     client,
		permitPool: NewPermitPool(maxParInt),
		logger:     logger,
	}

	return &g, nil
}
Example #17
0
func ExampleBucketHandle_Create() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil {
		// TODO: handle error.
	}
}
Example #18
0
func ExampleBucketHandle_Delete() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	if err := client.Bucket("my-bucket").Delete(ctx); err != nil {
		// TODO: handle error.
	}
}
Example #19
0
// uploadReleaseTarball uploads the generated tarball of binaries in
// camlistore-release/VERSION/camlistoreVERSION-REV-CONTENTS.EXT. It then makes a copy in
// the same bucket and path, as camlistoreVERSION-CONTENTS.EXT.
func uploadReleaseTarball() {
	proj := "camlistore-website"
	bucket := "camlistore-release"
	tarball := *flagVersion + "/" + filepath.Base(releaseTarball)
	versionedTarball := strings.Replace(tarball, "camlistore"+*flagVersion, "camlistore"+*flagVersion+"-"+rev(), 1)

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}
	ctx := context.Background()
	stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts)))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
	w.ACL = publicACL(proj)
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	contentType := "application/x-gtar"
	if *buildOS == "windows" {
		contentType = "application/zip"
	}
	w.ContentType = contentType

	src, err := os.Open(releaseTarball)
	if err != nil {
		log.Fatal(err)
	}
	defer src.Close()

	if _, err := io.Copy(w, src); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	log.Printf("Uploaded tarball to %s", versionedTarball)
	if !isWIP() {
		log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
		dest := stoClient.Bucket(bucket).Object(tarball)
		if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo(
			ctx,
			dest,
			&storage.ObjectAttrs{
				ACL:         publicACL(proj),
				ContentType: contentType,
			}); err != nil {
			log.Fatalf("Error uploading %v: %v", tarball, err)
		}
		log.Printf("Uploaded tarball to %s", tarball)
	}
}
Example #20
0
func ExampleACLHandle_Delete() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	// No longer grant access to the bucket to everyone on the Internet.
	if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil {
		// TODO: handle error.
	}
}
Example #21
0
func ExampleBucketHandle_Attrs() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	attrs, err := client.Bucket("my-bucket").Attrs(ctx)
	if err != nil {
		// TODO: handle error.
	}
	fmt.Println(attrs)
}
func main() {
	projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
	if projectID == "" {
		fmt.Fprintf(os.Stderr, "GOOGLE_CLOUD_PROJECT environment variable must be set.\n")
		os.Exit(1)
	}
	var o string
	flag.StringVar(&o, "o", "", "source object; in the format of <bucket:object>")
	flag.Parse()

	names := strings.Split(o, ":")
	if len(names) < 2 {
		usage("missing -o flag")
	}
	bucket, object := names[0], names[1]

	if len(os.Args) < 2 {
		usage("missing subcommand")
	}

	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		log.Fatal(err)
	}

	switch os.Args[1] {
	case "write":
		if err := write(client, bucket, object); err != nil {
			log.Fatalf("Cannot write object: %v", err)
		}
	case "read":
		data, err := read(client, bucket, object)
		if err != nil {
			log.Fatalf("Cannot read object: %v", err)
		}
		fmt.Printf("Object contents: %s\n", data)
	case "metadata":
		attrs, err := attrs(client, bucket, object)
		if err != nil {
			log.Fatalf("Cannot get object metadata: %v", err)
		}
		fmt.Printf("Object metadata: %v\n", attrs)
	case "makepublic":
		if err := makePublic(client, bucket, object); err != nil {
			log.Fatalf("Cannot to make object public: %v", err)
		}
	case "delete":
		if err := delete(client, bucket, object); err != nil {
			log.Fatalf("Cannot to delete object: %v", err)
		}
	}
}
Example #23
0
func ExampleACLHandle_Set() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	// Let any authenticated user read my-bucket/my-object.
	obj := client.Bucket("my-bucket").Object("my-object")
	if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil {
		// TODO: handle error.
	}
}
func TestImportExport(t *testing.T) {
	tc := testutil.SystemTest(t)
	ctx := context.Background()

	client, err := bigquery.NewClient(ctx, tc.ProjectID)
	if err != nil {
		t.Fatal(err)
	}
	storageClient, err := storage.NewClient(ctx)
	if err != nil {
		t.Fatal(err)
	}

	datasetID := fmt.Sprintf("golang_example_dataset_importexport_%d", time.Now().Unix())
	tableID := fmt.Sprintf("golang_example_dataset_importexport_%d", time.Now().Unix())
	if err := createDataset(client, datasetID); err != nil {
		t.Errorf("failed to create dataset: %v", err)
	}
	schema := bigquery.Schema{
		&bigquery.FieldSchema{Name: "Year", Type: bigquery.IntegerFieldType},
		&bigquery.FieldSchema{Name: "City", Type: bigquery.StringFieldType},
	}
	if err := client.Dataset(datasetID).Table(tableID).Create(ctx, schema); err != nil {
		t.Errorf("failed to create dataset: %v", err)
	}
	defer deleteDataset(t, ctx, datasetID)

	if err := importFromFile(client, datasetID, tableID, "testdata/olympics.csv"); err != nil {
		t.Fatalf("failed to import from file: %v", err)
	}

	bucket := fmt.Sprintf("golang-example-bigquery-importexport-bucket-%d", time.Now().Unix())
	const object = "values.csv"

	if err := storageClient.Bucket(bucket).Create(ctx, tc.ProjectID, nil); err != nil {
		t.Fatalf("cannot create bucket: %v", err)
	}

	gcsURI := fmt.Sprintf("gs://%s/%s", bucket, object)
	if err := exportToGCS(client, datasetID, tableID, gcsURI); err != nil {
		t.Errorf("failed to export to %v: %v", gcsURI, err)
	}

	// Cleanup the bucket and object.
	if err := storageClient.Bucket(bucket).Object(object).Delete(ctx); err != nil {
		t.Errorf("failed to cleanup the GCS object: %v", err)
	}
	time.Sleep(time.Second) // Give it a second, due to eventual consistency.
	if err := storageClient.Bucket(bucket).Delete(ctx); err != nil {
		t.Errorf("failed to cleanup the GCS bucket: %v", err)
	}
}
Example #25
0
func ExampleACLHandle_List() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	// List the default object ACLs for my-bucket.
	aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx)
	if err != nil {
		// TODO: handle error.
	}
	fmt.Println(aclRules)
}
Example #26
0
func ExampleNewClient() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	// Use the client.

	// Close the client when finished.
	if err := client.Close(); err != nil {
		// TODO: handle error.
	}
}
Example #27
0
func TestWriteRead(t *testing.T) {
	if !metadata.OnGCE() {
		t.Skipf("Not testing on GCE")
	}
	if *flagBucket == "" {
		t.Skipf("No bucket specified")
	}
	ctx := context.Background()
	cl, err := storage.NewClient(ctx)
	it := cl.Bucket(*flagBucket).Objects(ctx, nil)
	if _, err := it.Next(); err != iterator.Done {
		if err == nil {
			t.Fatalf("Bucket %v is not empty, aborting test.", *flagBucket)
		}
		t.Fatalf("unexpected bucket iteration error: %v", err)
	}
	filename := "camli-gcs_test.txt"
	defer func() {
		if err := cl.Bucket(*flagBucket).Object(filename).Delete(ctx); err != nil {
			t.Fatalf("error while cleaning up: %v", err)
		}
	}()

	// Write to camli-gcs_test.txt
	gcsPath := "/gcs/" + *flagBucket + "/" + filename
	f, err := wkfs.Create(gcsPath)
	if err != nil {
		t.Fatalf("error creating %v: %v", gcsPath, err)
	}
	data := "Hello World"
	if _, err := io.Copy(f, strings.NewReader(data)); err != nil {
		t.Fatalf("error writing to %v: %v", gcsPath, err)
	}
	if err := f.Close(); err != nil {
		t.Fatalf("error closing %v: %v", gcsPath, err)
	}

	// Read back from camli-gcs_test.txt
	g, err := wkfs.Open(gcsPath)
	if err != nil {
		t.Fatalf("error opening %v: %v", gcsPath, err)
	}
	defer g.Close()
	var buf bytes.Buffer
	if _, err := io.Copy(&buf, g); err != nil {
		t.Fatalf("error reading %v: %v", gcsPath, err)
	}
	if buf.String() != data {
		t.Fatalf("error with %v contents: got %v, wanted %v", gcsPath, buf.String(), data)
	}
}
func main() {
	ctx := context.Background()

	var err error
	storageClient, err = storage.NewClient(ctx)
	if err != nil {
		log.Fatal(err)
	}

	http.HandleFunc("/", formHandler)
	http.HandleFunc("/upload", uploadHandler)

	appengine.Main()
}
Example #29
0
func ExampleObjectHandle_CopierFrom_rotateEncryptionKeys() {
	// To rotate the encryption key on an object, copy it onto itself.
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	obj := client.Bucket("bucketname").Object("obj")
	// Assume obj is encrypted with key1, and we want to change to key2.
	_, err = obj.Key(key2).CopierFrom(obj.Key(key1)).Run(ctx)
	if err != nil {
		// TODO: handle error.
	}
}
Example #30
0
func ExampleObjectHandle_CopyTo() {
	ctx := context.Background()
	client, err := storage.NewClient(ctx)
	if err != nil {
		// TODO: handle error.
	}
	src := client.Bucket("bucketname").Object("file1")
	dst := client.Bucket("another-bucketname").Object("file2")

	o, err := src.CopyTo(ctx, dst, nil)
	if err != nil {
		// TODO: handle error.
	}
	fmt.Println("copied file:", o)
}