// Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(context ctx.Context, path string) error { prefix := d.pathToDirKey(path) gcsContext := d.context(context) keys, err := d.listAll(gcsContext, prefix) if err != nil { return err } if len(keys) > 0 { sort.Sort(sort.Reverse(sort.StringSlice(keys))) for _, key := range keys { if err := storage.DeleteObject(gcsContext, d.bucket, key); err != nil { return err } } return nil } err = storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(path)) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: path} } } } return err }
// Move moves an object stored at sourcePath to destPath, removing the // original object. func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { prefix := d.pathToDirKey(sourcePath) gcsContext := d.context(context) keys, err := d.listAll(gcsContext, prefix) if err != nil { return err } if len(keys) > 0 { destPrefix := d.pathToDirKey(destPath) copies := make([]string, 0, len(keys)) sort.Strings(keys) var err error for _, key := range keys { dest := destPrefix + key[len(prefix):] _, err = storage.CopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) if err == nil { copies = append(copies, dest) } else { break } } // if an error occurred, attempt to cleanup the copies made if err != nil { for i := len(copies) - 1; i >= 0; i-- { _ = storage.DeleteObject(gcsContext, d.bucket, copies[i]) } return err } // delete originals for i := len(keys) - 1; i >= 0; i-- { err2 := storage.DeleteObject(gcsContext, d.bucket, keys[i]) if err2 != nil { err = err2 } } return err } _, err = storage.CopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: sourcePath} } } return err } return storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) }
// RemoveBackup implements BackupStorage. func (bs *GCSBackupStorage) RemoveBackup(dir, name string) error { authCtx, err := bs.authContext() if err != nil { return err } // Find all objects with the right prefix. query := &storage.Query{ Prefix: objName(dir, name, "" /* include trailing slash */), } // Loop in case results are returned in multiple batches. for query != nil { objs, err := storage.ListObjects(authCtx, *bucket, query) if err != nil { return err } // Delete all the found objects. for _, obj := range objs.Results { if err := storage.DeleteObject(authCtx, *bucket, obj.Name); err != nil { return fmt.Errorf("unable to delete %q from bucket %q: %v", obj.Name, *bucket, err) } } query = objs.Next } return nil }
func ExampleDeleteObject() { // To delete multiple objects in a bucket, first ListObjects then delete them. ctx := Example_auth() // If you are using this package on App Engine Managed VMs runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. const bucket = "bucketname" var query *storage.Query // Set up query as desired. for { objects, err := storage.ListObjects(ctx, bucket, query) if err != nil { log.Fatal(err) } for _, obj := range objects.Results { log.Printf("deleting object name: %q, size: %v", obj.Name, obj.Size) if err := storage.DeleteObject(ctx, bucket, obj.Name); err != nil { log.Fatalf("unable to delete %q: %v", obj.Name, err) } } // if there are more results, objects.Next will be non-nil. query = objects.Next if query == nil { break } } log.Println("deleted all object items in the bucket you specified.") }
func (a *AppengineStore) Delete(name string) error { err := storage.DeleteObject(a.Ctx, a.Cfg.Bucket, name) if err == storage.ErrObjectNotExist || err == storage.ErrBucketNotExist { return files.ErrNotFound } return err }
func (bs *GCSBlobStore) RemoveBlob(blobpath string) error { ctx := bs.newAuthedContext(context.TODO()) if err := storage.DeleteObject(ctx, bs.bucketName, blobpath); err != nil { return err } return nil }
// deleteFiles deletes all the temporary files from a bucket created by this demo. func (d *demo) deleteFiles() { io.WriteString(d.w, "\nDeleting files...\n") for _, v := range d.cleanUp { fmt.Fprintf(d.w, "Deleting file %v\n", v) if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) return } } }
// Rename implements part of the VFS interface. func (s FS) Rename(ctx context.Context, oldPath, newPath string) error { if _, err := storage.CopyObject(ctx, s.Bucket, oldPath, s.Bucket, newPath, &storage.ObjectAttrs{ ContentType: "application/octet-stream", }); err != nil { return fmt.Errorf("error copying file during rename: %v", err) } if err := storage.DeleteObject(ctx, s.Bucket, oldPath); err != nil { return fmt.Errorf("error deleting old file during rename: %v", err) } return nil }
func HandleUpload(r *http.Request) (storageName string, other url.Values, err error) { blobs, other, err := blobstore.ParseUpload(r) if err != nil { return "", nil, err } // Delete any uploads other than the one we actually want. // Stops users from wasting our storage for no reason. var deleteList []string for k, fileList := range blobs { for i, file := range fileList { if k != "file" || i != 0 { deleteList = append(deleteList, file.ObjectName) } } } if len(deleteList) > 0 { c := appengine.NewContext(r) ctx, err := getGcsContext(c) if err != nil { return "", nil, err } for _, junk := range deleteList { // If one of our delete ops fails, still try the rest, // but set err aside, preserving it, so we can return // after. if newErr := storage.DeleteObject(ctx, gcsBucket, junk); err != nil { err = newErr } } } if err != nil { return "", nil, err } if len(blobs["file"]) == 0 { return "", nil, errors.New("No file uploaded.") } return blobs["file"][0].ObjectName, other, nil }
// Delete removes an object by name from the bucket being used. If the object does not // exist and there is nothing to delete, Delete returns with no error. // TODO Delete thumbnail and web view copies too! func Delete(filename string, r *http.Request) error { c := appengine.NewContext(r) bucket, err := file.DefaultBucketName(c) if err != nil { log.Errorf(c, "Failed to get default bucket: %v", err) return err } ctx, err := auth(r) if err != nil { log.Errorf(c, "Failed to get context: %v", err) return err } log.Infof(c, "Attempting to delete file %v from bucket %v.", filename, bucket) // StatObject is used here to check existence before calling DeleteObject. // If the object does not exist, DeleteObject returns an error that is NOT // ErrObjectNotExist, so it seemed more reliable to check with StatObject first... _, err = storage.StatObject(ctx, bucket, filename) if err == storage.ErrObjectNotExist { log.Warningf(c, "Object does not exist, nothing to delete.") return nil } err = storage.DeleteObject(ctx, bucket, filename) if err != nil { log.Errorf(c, "Failed to delete file.") log.Infof(c, "Attempting to remove public access to file...") aclErr := storage.DeleteACLRule(ctx, bucket, filename, storage.AllUsers) if aclErr != nil { log.Errorf(c, "Failed to remove public file access!") } else { log.Infof(c, "File access removed.") } return err } return nil }
func storageDeleteObject(context context.Context, bucket string, name string) error { return retry(5, func() error { return storage.DeleteObject(context, bucket, name) }) }
// WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } if offset == 0 { return d.writeCompletely(context, path, 0, reader) } service, err := storageapi.New(d.client) if err != nil { return 0, err } objService := storageapi.NewObjectsService(service) var obj *storageapi.Object err = retry(5, func() error { o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() obj = o return err }) // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) if err != nil { return 0, err } // cannot append more chunks, so redo from scratch if obj.ComponentCount >= 1023 { return d.writeCompletely(context, path, offset, reader) } // skip from reader objSize := int64(obj.Size) nn, err := skip(reader, objSize-offset) if err != nil { return nn, err } // Size <= offset partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) gcsContext := d.context(context) wc := storage.NewWriter(gcsContext, d.bucket, partName) wc.ContentType = "application/octet-stream" if objSize < offset { err = writeZeros(wc, offset-objSize) if err != nil { wc.CloseWithError(err) return nn, err } } n, err := io.Copy(wc, reader) if err != nil { wc.CloseWithError(err) return nn, err } err = wc.Close() if err != nil { return nn, err } // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end // of the function defer storage.DeleteObject(gcsContext, d.bucket, partName) req := &storageapi.ComposeRequest{ Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, SourceObjects: []*storageapi.ComposeRequestSourceObjects{ { Name: obj.Name, Generation: obj.Generation, }, { Name: partName, Generation: wc.Object().Generation, }}, } err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) if err == nil { nn = nn + n } return nn, err }
// Remove implements part of the VFS interface. func (s FS) Remove(ctx context.Context, path string) error { return storage.DeleteObject(ctx, s.Bucket, path) }