func ExampleListObjects() { ctx := context.Background() var client *storage.Client // See Example (Auth) var query *storage.Query for { // If you are using this package on App Engine Managed VMs runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. objects, err := client.Bucket("bucketname").List(ctx, query) if err != nil { log.Fatal(err) } for _, obj := range objects.Results { log.Printf("object name: %s, size: %v", obj.Name, obj.Size) } // If there are more results, objects.Next will be non-nil. if objects.Next == nil { break } query = objects.Next } log.Println("paginated through all object items in the bucket you specified.") }
func ExampleDeleteObject() { ctx := context.Background() var client *storage.Client // See Example (Auth) // To delete multiple objects in a bucket, first List then Delete them. // If you are using this package on App Engine Managed VMs runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. bucket := client.Bucket("bucketname") var query *storage.Query // Set up query as desired. for { objects, err := bucket.List(ctx, query) if err != nil { log.Fatal(err) } for _, obj := range objects.Results { log.Printf("deleting object name: %q, size: %v", obj.Name, obj.Size) if err := bucket.Object(obj.Name).Delete(ctx); err != nil { log.Fatalf("unable to delete %q: %v", obj.Name, err) } } // If there are more results, objects.Next will be non-nil. if objects.Next == nil { break } query = objects.Next } log.Println("deleted all object items in the bucket specified.") }
// FileContentsFromGS returns the contents of a file in the given bucket or an error. func FileContentsFromGS(s *storage.Client, bucketName, fileName string) ([]byte, error) { response, err := s.Bucket(bucketName).Object(fileName).NewReader(context.Background()) if err != nil { return nil, err } defer util.Close(response) return ioutil.ReadAll(response) }
func ExampleCopyObject() { ctx := context.Background() var client *storage.Client // See Example (Auth) o, err := client.CopyObject(ctx, "bucketname", "file1", "another-bucketname", "file2", nil) if err != nil { log.Fatal(err) } log.Println("copied file:", o) }
func printFolders(ctx context.Context, client *storage.Client, res http.ResponseWriter, delimeter string) { fmt.Fprintf(res, "Delimeter ["+delimeter+"]\n") query := &storage.Query{ Delimiter: delimeter, } objs, err := client.Bucket(BUCKET_NAME).List(ctx, query) logError(err) for _, subfolder := range objs.Prefixes { fmt.Fprintf(res, "Folder: "+subfolder+"\n") } }
func ExampleObjectHandle_CopyTo() { ctx := context.Background() var client *storage.Client // See Example (Auth) src := client.Bucket("bucketname").Object("file1") dst := client.Bucket("another-bucketname").Object("file2") o, err := src.CopyTo(ctx, dst, nil) if err != nil { log.Fatal(err) } log.Println("copied file:", o) }
// run is the actual entry point called from main. // It expects vargs and workspace to be initialized func run(client *storage.Client) { // extract bucket name from the target path p := strings.SplitN(vargs.Target, "/", 2) bname := p[0] if len(p) == 1 { vargs.Target = "" } else { vargs.Target = p[1] } bucket = client.Bucket(strings.Trim(bname, "/")) // create a list of files to upload vargs.Source = filepath.Join(workspace.Path, vargs.Source) src, err := walkFiles() if err != nil { fatalf("local files: %v", err) } // result contains upload result of a single file type result struct { name string err error } // upload all files in a goroutine, maxConcurrent at a time buf := make(chan struct{}, maxConcurrent) res := make(chan *result, len(src)) for _, f := range src { buf <- struct{}{} // alloc one slot go func(f string) { rel, err := filepath.Rel(vargs.Source, f) if err != nil { res <- &result{f, err} return } err = uploadFile(path.Join(vargs.Target, rel), f) res <- &result{rel, err} <-buf // free up }(f) } // wait for all files to be uploaded or stop at first error for _ = range src { r := <-res if r.err != nil { fatalf("%s: %v", r.name, r.err) } printf(r.name) } }
func getPhotoNames(ctx context.Context, client *storage.Client) []string { query := &storage.Query{ Delimiter: "/", Prefix: "photos/", } objs, err := client.Bucket(BUCKET_NAME).List(ctx, query) logError(err) var names []string for _, result := range objs.Results { names = append(names, result.Name) } return names }
func ExampleNewWriter() { ctx := context.Background() var client *storage.Client // See Example (Auth) wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) wc.ContentType = "text/plain" wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} if _, err := wc.Write([]byte("hello world")); err != nil { log.Fatal(err) } if err := wc.Close(); err != nil { log.Fatal(err) } log.Println("updated object:", wc.Attrs()) }
// Stores a dummy file in a loop by using the folder postfix string passed. func store(ctx context.Context, client *storage.Client, folderPostfix string) { // Reading the file from disk reader, err := os.Open(FILE_NAME) logError(err) // Looping to create few number of folders for i := 0; i < 3; i++ { // Adding i index as a prefix to the path in which we want to create the files on storage. writer := client.Bucket(BUCKET_NAME).Object(strconv.Itoa(i) + folderPostfix + FILE_NAME).NewWriter(ctx) writer.ACL = []storage.ACLRule{{ storage.AllUsers, storage.RoleReader}} io.Copy(writer, reader) writer.Close() } }
func ExampleNewReader() { ctx := context.Background() var client *storage.Client // See Example (Auth) rc, err := client.Bucket("bucketname").Object("filename1").NewReader(ctx) if err != nil { log.Fatal(err) } slurp, err := ioutil.ReadAll(rc) rc.Close() if err != nil { log.Fatal(err) } log.Println("file contents:", slurp) }
// versionHelper actually goes and gets the version files from GCS and parses them func versionHelper(storageClient *storage.Client, prefix string) (string, error) { if storageClient == nil { return "", fmt.Errorf("Storage service cannot be nil!") } q := &storage.Query{Prefix: prefix} contents, err := storageClient.Bucket(config.GS.Bucket).List(context.Background(), q) if err != nil { return "", err } for _, r := range contents.Results { if r.Name != prefix { return strings.SplitAfter(r.Name, prefix)[1], nil } } return "", fmt.Errorf("Could not find specified version in %q", prefix) }
func DeleteAllFilesInDir(s *storage.Client, bucket, folder string) error { var deleteError bool del := func(item *storage.ObjectAttrs) { if err := s.Bucket(bucket).Object(item.Name).Delete(context.Background()); err != nil { glog.Errorf("Problem deleting gs://%s/%s: %s", bucket, item.Name, err) deleteError = true } } if err := AllFilesInDir(s, bucket, folder, del); err != nil { return err } if deleteError { return fmt.Errorf("There were one or more problems when deleting files in folder %q", folder) } return nil }
// AllFilesInDir synchronously iterates through all the files in a given Google Storage folder. // The callback function is called on each item in the order it is in the bucket. // It returns an error if the bucket or folder cannot be accessed. func AllFilesInDir(s *storage.Client, bucket, folder string, callback func(item *storage.ObjectAttrs)) error { total := 0 q := &storage.Query{Prefix: folder, Versions: false} for q != nil { list, err := s.Bucket(bucket).List(context.Background(), q) if err != nil { return fmt.Errorf("Problem reading from Google Storage: %v", err) } total += len(list.Results) glog.Infof("Loading %d more files from gs://%s/%s Total: %d", len(list.Results), bucket, folder, total) for _, item := range list.Results { callback(item) } q = list.Next } return nil }
func getImages(ctx context.Context, client *storage.Client) []string { query := &storage.Query{ Delimiter: "/", Prefix: "photos/", } objs, err := client.Bucket(gcsBucket).List(ctx, query) if err != nil { log.Errorf(ctx, "%v", err) } var outString []string for _, obj := range objs.Results { outString = append(outString, obj.Name) } return outString }
func storeImage(rw http.ResponseWriter, req *http.Request) { // Appengine var c appengine.Context // Google Cloud Storage authentication var cc gcscontext.Context // Google Cloud Storage bucket name var bucketName string = "" // Google Cloud Storage client var client *storage.Client // Google Cloud Storage bucket var bucketHandle *storage.BucketHandle // User uploaded image file name var fileName string = uuid.New() // Transform user uploaded image to a thumbnail file name var fileNameThumbnail string = uuid.New() // User uploaded image file type var contentType string = "" // User uploaded image file raw data var b []byte // Google Cloud Storage file writer var wc *storage.Writer = nil // Error var err error = nil // Result, 0: success, 1: failed var r int = http.StatusCreated // Set response in the end defer func() { // Return status. WriteHeader() must be called before call to Write if r == http.StatusCreated { // Changing the header after a call to WriteHeader (or Write) has no effect. // rw.Header().Set("Location", req.URL.String()+"/"+cKey.Encode()) rw.Header().Set("Location", "http://"+bucketName+".storage.googleapis.com/"+fileName) rw.Header().Set("X-Thumbnail", "http://"+bucketName+".storage.googleapis.com/"+fileNameThumbnail) rw.WriteHeader(r) } else { http.Error(rw, http.StatusText(r), r) } }() // To log information in Google APP Engine console c = appengine.NewContext(req) // Get data from body b, err = ioutil.ReadAll(req.Body) if err != nil { c.Errorf("%s in reading body", err) r = http.StatusInternalServerError return } c.Infof("Body length %d bytes, read %d bytes", req.ContentLength, len(b)) // Determine filename extension from content type contentType = req.Header["Content-Type"][0] switch contentType { case "image/jpeg": fileName += ".jpg" fileNameThumbnail += ".jpg" default: c.Errorf("Unknown or unsupported content type '%s'. Valid: image/jpeg", contentType) r = http.StatusBadRequest return } c.Infof("Content type %s is received, %s is detected.", contentType, http.DetectContentType(b)) // Prepare Google Cloud Storage authentication cc = gcsappengine.NewContext(req) if client, err = storage.NewClient(cc); err != nil { c.Errorf("%s in initializing a GCS client", err) r = http.StatusInternalServerError return } defer client.Close() // Get default bucket if bucketName, err = gcsfile.DefaultBucketName(cc); err != nil { c.Errorf("%s in getting default GCS bucket name", err) r = http.StatusInternalServerError return } bucketHandle = client.Bucket(bucketName) c.Infof("APP Engine Version: %s", gcsappengine.VersionID(cc)) c.Infof("Using bucket name: %s", bucketName) // Change default object ACLs if err = bucketHandle.DefaultObjectACL().Set(cc, storage.AllUsers, storage.RoleReader); err != nil { c.Errorf("%v in saving default object ACL rule for bucket %q", err, bucketName) r = http.StatusInternalServerError return } // Store rotated image in Google Cloud Storage var in *bytes.Reader = bytes.NewReader(b) var x *exif.Exif = nil var orientation *tiff.Tag = nil var beforeImage image.Image var afterImage *image.NRGBA = nil // Read EXIF if _, err = in.Seek(0, 0); err != nil { c.Errorf("%s in moving the reader offset to the beginning in order to read EXIF", err) return } if x, err = exif.Decode(in); err != nil { c.Errorf("%s in decoding JPEG image", err) return } // Get Orientation if orientation, err = x.Get(exif.Orientation); err != nil { c.Warningf("%s in getting orientation from EXIF", err) return } c.Debugf("Orientation %s", orientation.String()) // Open image if _, err = in.Seek(0, 0); err != nil { c.Errorf("%s in moving the reader offset to the beginning in order to read EXIF", err) return } if beforeImage, err = imaging.Decode(in); err != nil { c.Errorf("%s in opening image %s", err) return } switch orientation.String() { case "1": afterImage = beforeImage.(*image.NRGBA) case "2": afterImage = imaging.FlipH(beforeImage) case "3": afterImage = imaging.Rotate180(beforeImage) case "4": afterImage = imaging.FlipV(beforeImage) case "5": afterImage = imaging.Transverse(beforeImage) case "6": afterImage = imaging.Rotate270(beforeImage) case "7": afterImage = imaging.Transpose(beforeImage) case "8": afterImage = imaging.Rotate90(beforeImage) } // Save rotated image wc = bucketHandle.Object(fileName).NewWriter(cc) wc.ContentType = contentType if err = imaging.Encode(wc, afterImage, imaging.JPEG); err != nil { c.Errorf("%s in saving rotated image", err) return } if err = wc.Close(); err != nil { c.Errorf("CreateFile: unable to close bucket %q, file %q: %v", bucketName, fileName, err) r = 1 return } wc = nil // Make thumbnail if afterImage.Rect.Dx() > afterImage.Rect.Dy() { afterImage = imaging.Resize(afterImage, 1920, 0, imaging.Lanczos) } else { afterImage = imaging.Resize(afterImage, 0, 1920, imaging.Lanczos) } // Save thumbnail wc = bucketHandle.Object(fileNameThumbnail).NewWriter(cc) wc.ContentType = contentType if imaging.Encode(wc, afterImage, imaging.JPEG); err != nil { c.Errorf("%s in saving image thumbnail", err) return } if err = wc.Close(); err != nil { c.Errorf("CreateFileThumbnail: unable to close bucket %q, file %q: %v", bucketName, fileNameThumbnail, err) r = 1 return } c.Infof("/%v/%v, /%v/%v created", bucketName, fileName, bucketName, fileNameThumbnail) }