func ExampleBucketHandle_List() { ctx := context.Background() var client *storage.Client // See Example (Auth) var query *storage.Query for { // If you are using this package on the App Engine Flex runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. objects, err := client.Bucket("bucketname").List(ctx, query) if err != nil { log.Fatal(err) } for _, obj := range objects.Results { log.Printf("object name: %s, size: %v", obj.Name, obj.Size) } // If there are more results, objects.Next will be non-nil. if objects.Next == nil { break } query = objects.Next } log.Println("paginated through all object items in the bucket you specified.") }
func (s *shard) createOutputFile(c context.Context) (io.WriteCloser, error) { c, _ = context.WithTimeout(c, time.Duration(10)*time.Minute) // for development we can't use the appengine default credentials so // instead need to create our own oauth token source to access storage // TODO: maybe give job a chance to generate this - it could also // create the writer (?). The only reason we're doing it is to prevent // duplication and also handle the file rollup operations var client *cstorage.Client if appengine.IsDevAppServer() { jsonKey, err := ioutil.ReadFile("service-account.json") if err != nil { return nil, err } conf, err := google.JWTConfigFromJSON(jsonKey, cstorage.ScopeReadWrite) if err != nil { return nil, err } client, err = cstorage.NewClient(c, option.WithTokenSource(conf.TokenSource(c))) if err != nil { return nil, err } } else { var err error client, err = cstorage.NewClient(c) if err != nil { return nil, err } } o := client.Bucket(s.job.Bucket).Object(s.sliceFilename(s.Sequence)).NewWriter(c) // TODO: wrap writer to count bytes and continue slice if we get close to 10Mb limit (?) return o, nil }
func addObjectACL(client *storage.Client, bucket, object string) error { ctx := context.Background() acl := client.Bucket(bucket).Object(object).ACL() if err := acl.Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil { return err } return nil }
func deleteDefaultBucketACL(client *storage.Client, bucket string) error { ctx := context.Background() acl := client.Bucket(bucket).DefaultObjectACL() if err := acl.Delete(ctx, storage.AllAuthenticatedUsers); err != nil { return err } return nil }
func delete(client *storage.Client, bucketName string) error { ctx := context.Background() // [START delete_bucket] if err := client.Bucket(bucketName).Delete(ctx); err != nil { return err } // [END delete_bucket] return nil }
func create(client *storage.Client, projectID, bucketName string) error { ctx := context.Background() // [START create_bucket] if err := client.Bucket(bucketName).Create(ctx, projectID, nil); err != nil { return err } // [END create_bucket] return nil }
func delete(client *storage.Client, bucket, object string) error { ctx := context.Background() // [START delete_file] o := client.Bucket(bucket).Object(object) if err := o.Delete(ctx); err != nil { return err } // [END delete_file] return nil }
func makePublic(client *storage.Client, bucket, object string) error { ctx := context.Background() // [START public] acl := client.Bucket(bucket).Object(object).ACL() if err := acl.Set(ctx, storage.AllUsers, storage.RoleReader); err != nil { return err } // [END public] return nil }
func attrs(client *storage.Client, bucket, object string) (*storage.ObjectAttrs, error) { ctx := context.Background() // [START get_metadata] o := client.Bucket(bucket).Object(object) attrs, err := o.Attrs(ctx) if err != nil { return nil, err } return attrs, nil // [END get_metadata] }
func objectACL(client *storage.Client, bucket, object string) error { ctx := context.Background() rules, err := client.Bucket(bucket).Object(object).ACL().List(ctx) if err != nil { return err } for _, rule := range rules { fmt.Printf("ACL rule: %v\n", rule) } return nil }
func copyToBucket(client *storage.Client, dstBucket, srcBucket, srcObject string) error { ctx := context.Background() // [START copy_file] dstObject := srcObject + "-copy" src := client.Bucket(srcBucket).Object(srcObject) dst := client.Bucket(dstBucket).Object(dstObject) if _, err := dst.CopierFrom(src).Run(ctx); err != nil { return err } // [END copy_file] return nil }
func createWithAttrs(client *storage.Client, projectID, bucketName string) error { ctx := context.Background() // [START create_bucket_with_storageclass_and_location] bucket := client.Bucket(bucketName) if err := bucket.Create(ctx, projectID, &storage.BucketAttrs{ StorageClass: "COLDLINE", Location: "asia", }); err != nil { return err } // [END create_bucket_with_storageclass_and_location] return nil }
func objectACLFiltered(client *storage.Client, bucket, object string, entity storage.ACLEntity) error { ctx := context.Background() rules, err := client.Bucket(bucket).ACL().List(ctx) if err != nil { return err } for _, r := range rules { if r.Entity == entity { fmt.Printf("ACL rule role: %v\n", r.Role) } } return nil }
// Gets a single object's bytes based on bucket and name parameters func GetObject(gc *storage.Client, bucket, name string) (*bytes.Buffer, error) { rc, err := gc.Bucket(bucket).Object(name).NewReader(context.Background()) if err != nil { if err == storage.ErrObjectNotExist { return nil, cloudstorage.ObjectNotFound } return nil, err } by, err := ioutil.ReadAll(rc) if err != nil { return nil, err } return bytes.NewBuffer(by), nil }
func writeEncryptedObject(client *storage.Client, bucket, object string, secretKey []byte) error { ctx := context.Background() // [START storage_upload_encrypted_file] obj := client.Bucket(bucket).Object(object) // Encrypt the object's contents. wc := obj.Key(secretKey).NewWriter(ctx) if _, err := wc.Write([]byte("top secret")); err != nil { return err } if err := wc.Close(); err != nil { return err } // [END storage_upload_encrypted_file] return nil }
func read(client *storage.Client, bucket, object string) ([]byte, error) { ctx := context.Background() // [START download_file] rc, err := client.Bucket(bucket).Object(object).NewReader(ctx) if err != nil { return nil, err } defer rc.Close() data, err := ioutil.ReadAll(rc) if err != nil { return nil, err } return data, nil // [END download_file] }
func rotateEncryptionKey(client *storage.Client, bucket, object string, key, newKey []byte) error { ctx := context.Background() // [START storage_rotate_encryption_key] client, err := storage.NewClient(ctx) if err != nil { return err } obj := client.Bucket(bucket).Object(object) // obj is encrypted with key, we are encrypting it with the newKey. _, err = obj.Key(newKey).CopierFrom(obj.Key(key)).Run(ctx) if err != nil { return err } // [END storage_rotate_encryption_key] return nil }
func move(client *storage.Client, bucket, object string) error { ctx := context.Background() // [START move_file] dstName := object + "-rename" src := client.Bucket(bucket).Object(object) dst := client.Bucket(bucket).Object(dstName) if _, err := dst.CopierFrom(src).Run(ctx); err != nil { return err } if err := src.Delete(ctx); err != nil { return err } // [END move_file] return nil }
func list(client *storage.Client, bucket string) error { ctx := context.Background() // [START storage_list_files] it := client.Bucket(bucket).Objects(ctx, nil) for { attrs, err := it.Next() if err == iterator.Done { break } if err != nil { return err } fmt.Println(attrs.Name) } // [END storage_list_files] return nil }
func list(client *storage.Client, projectID string) ([]string, error) { ctx := context.Background() // [START list_buckets] var buckets []string it := client.Buckets(ctx, projectID) for { battrs, err := it.Next() if err == iterator.Done { break } if err != nil { return nil, err } buckets = append(buckets, battrs.Name) } // [END list_buckets] return buckets, nil }
func readEncryptedObject(client *storage.Client, bucket, object string, secretKey []byte) ([]byte, error) { ctx := context.Background() // [START storage_download_encrypted_file] obj := client.Bucket(bucket).Object(object) rc, err := obj.Key(secretKey).NewReader(ctx) if err != nil { return nil, err } defer rc.Close() data, err := ioutil.ReadAll(rc) if err != nil { return nil, err } // [END storage_download_encrypted_file] return data, nil }
// Collect the name of all buckets for the test project. func bucketNames(ctx context.Context, client *storage.Client) []string { var names []string it := client.Buckets(ctx, testProjectID) loop: for { b, err := it.Next() switch err { case nil: names = append(names, b.Name) case iterator.Done: break loop default: log.Printf("listing buckets: %v", err) break loop } } return names }
func write(client *storage.Client, bucket, object string) error { ctx := context.Background() // [START upload_file] f, err := os.Open("notes.txt") if err != nil { return err } defer f.Close() wc := client.Bucket(bucket).Object(object).NewWriter(ctx) if _, err = io.Copy(wc, f); err != nil { return err } if err := wc.Close(); err != nil { return err } // [END upload_file] return nil }
func listByPrefix(client *storage.Client, bucket, prefix, delim string) error { ctx := context.Background() // [START storage_list_files_with_prefix] // Prefixes and delimiters can be used to emulate directory listings. // Prefixes can be used filter objects starting with prefix. // The delimiter argument can be used to restrict the results to only the // objects in the given "directory". Without the delimeter, the entire tree // under the prefix is returned. // // For example, given these blobs: // /a/1.txt // /a/b/2.txt // // If you just specify prefix="/a", you'll get back: // /a/1.txt // /a/b/2.txt // // However, if you specify prefix="/a"" and delim="/", you'll get back: // /a/1.txt it := client.Bucket(bucket).Objects(ctx, &storage.Query{ Prefix: prefix, Delimiter: delim, }) for { attrs, err := it.Next() if err == iterator.Done { break } if err != nil { return err } fmt.Println(attrs.Name) } // [END storage_list_files_with_prefix] return nil }
// Called just before TestMain calls m.Run. // Returns a cleanup function to be called after the tests finish. func initSinks(ctx context.Context) func() { // Create a unique GCS bucket so concurrent tests don't interfere with each other. testBucketPrefix := testProjectID + "-log-sink" testBucket := ltesting.UniqueID(testBucketPrefix) testSinkDestination = "storage.googleapis.com/" + testBucket var storageClient *storage.Client if integrationTest { // Create a unique bucket as a sink destination, and give the cloud logging account // owner right. ts := testutil.TokenSource(ctx, storage.ScopeFullControl) var err error storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts)) if err != nil { log.Fatalf("new storage client: %v", err) } bucket := storageClient.Bucket(testBucket) if err := bucket.Create(ctx, testProjectID, nil); err != nil { log.Fatalf("creating storage bucket %q: %v", testBucket, err) } if err := bucket.ACL().Set(ctx, "*****@*****.**", storage.RoleOwner); err != nil { log.Fatalf("setting owner role: %v", err) } } // Clean up from aborted tests. for _, sID := range ltesting.ExpiredUniqueIDs(sinkIDs(ctx), testSinkIDPrefix) { client.DeleteSink(ctx, sID) // ignore error } if integrationTest { for _, bn := range ltesting.ExpiredUniqueIDs(bucketNames(ctx, storageClient), testBucketPrefix) { storageClient.Bucket(bn).Delete(ctx) // ignore error } return func() { if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil { log.Printf("deleting %q: %v", testBucket, err) } storageClient.Close() } } return func() {} }