func listBucket(ctx context.Context, bucketName, folder string) ([]string, []string, error) { cloudContext, err := getCloudContext(ctx) if err != nil { return nil, nil, err } var files, folders []string query := &storage.Query{ Delimiter: "/", Prefix: folder, } // objs is *storage.Objects objs, err := storage.ListObjects(cloudContext, bucketName, query) if err != nil { return nil, nil, err } for _, subfolder := range objs.Prefixes { folders = append(folders, subfolder[len(folder):]) } for _, obj := range objs.Results { files = append(files, obj.Name) } return files, folders, nil }
func handleList(res http.ResponseWriter, req *http.Request) { ctx := appengine.NewContext(req) cctx := getCloudContext(ctx) query := &storage.Query{ Prefix: prefix, } objs, err := storage.ListObjects(cctx, bucket, query) if err != nil { http.Error(res, err.Error(), 500) return } t, err := template.New("").Parse(`<li><a href="/get?f={{.}}">{{.}}</a></li>`) if err != nil { http.Error(res, err.Error(), 500) return } res.Header().Set("Content-Type", "text/html") io.WriteString(res, `<form action="put" method="POST" enctype="multipart/form-data"><input name="f" type="file"><input type="submit"></form> <ul>`) for _, obj := range objs.Results { err := t.Execute(res, obj.Name[len(prefix):]) if err != nil { http.Error(res, err.Error(), 500) return } } io.WriteString(res, `</ul>`) }
// listAll recursively lists all names of objects stored at "prefix" and its subpaths. func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { list := make([]string, 0, 64) query := &storage.Query{} query.Prefix = prefix query.Versions = false for { objects, err := storage.ListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } for _, obj := range objects.Results { // GCS does not guarantee strong consistency between // DELETE and LIST operationsCheck that the object is not deleted, // so filter out any objects with a non-zero time-deleted if obj.Deleted.IsZero() { list = append(list, obj.Name) } } query = objects.Next if query == nil { break } } return list, nil }
// RemoveBackup implements BackupStorage. func (bs *GCSBackupStorage) RemoveBackup(dir, name string) error { authCtx, err := bs.authContext() if err != nil { return err } // Find all objects with the right prefix. query := &storage.Query{ Prefix: objName(dir, name, "" /* include trailing slash */), } // Loop in case results are returned in multiple batches. for query != nil { objs, err := storage.ListObjects(authCtx, *bucket, query) if err != nil { return err } // Delete all the found objects. for _, obj := range objs.Results { if err := storage.DeleteObject(authCtx, *bucket, obj.Name); err != nil { return fmt.Errorf("unable to delete %q from bucket %q: %v", obj.Name, *bucket, err) } } query = objs.Next } return nil }
func browse(res http.ResponseWriter, req *http.Request) { ctx := appengine.NewContext(req) session := getSession(ctx, req) // if no bucket has been chosen if session.Bucket == "" { http.Redirect(res, req, "/", 302) return } folder := strings.SplitN(req.URL.Path, "/", 3)[2] cctx, err := getCloudContext(ctx) if err != nil { http.Error(res, err.Error(), 500) return } objects, err := storage.ListObjects(cctx, session.Bucket, nil) if err != nil { http.Error(res, err.Error(), 401) return } model := &browseModel{ Bucket: session.Bucket, Folder: folder, Objects: objects.Results, } err = tpls.ExecuteTemplate(res, "browse.html", model) if err != nil { http.Error(res, err.Error(), 500) } }
func ExampleListObjects() { ctx := Example_auth() var query *storage.Query for { // If you are using this package on App Engine Managed VMs runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. objects, err := storage.ListObjects(ctx, "bucketname", query) if err != nil { log.Fatal(err) } for _, obj := range objects.Results { log.Printf("object name: %s, size: %v", obj.Name, obj.Size) } // if there are more results, objects.Next // will be non-nil. query = objects.Next if query == nil { break } } log.Println("paginated through all object items in the bucket you specified.") }
func ExampleDeleteObject() { // To delete multiple objects in a bucket, first ListObjects then delete them. ctx := Example_auth() // If you are using this package on App Engine Managed VMs runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. const bucket = "bucketname" var query *storage.Query // Set up query as desired. for { objects, err := storage.ListObjects(ctx, bucket, query) if err != nil { log.Fatal(err) } for _, obj := range objects.Results { log.Printf("deleting object name: %q, size: %v", obj.Name, obj.Size) if err := storage.DeleteObject(ctx, bucket, obj.Name); err != nil { log.Fatalf("unable to delete %q: %v", obj.Name, err) } } // if there are more results, objects.Next will be non-nil. query = objects.Next if query == nil { break } } log.Println("deleted all object items in the bucket you specified.") }
// List returns a list of the objects that are direct descendants of the //given path. func (d *driver) List(context ctx.Context, path string) ([]string, error) { var query *storage.Query query = &storage.Query{} query.Delimiter = "/" query.Prefix = d.pathToDirKey(path) list := make([]string, 0, 64) for { objects, err := storage.ListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } for _, object := range objects.Results { // GCS does not guarantee strong consistency between // DELETE and LIST operationsCheck that the object is not deleted, // so filter out any objects with a non-zero time-deleted if object.Deleted.IsZero() { name := object.Name // Ignore objects with names that end with '#' (these are uploaded parts) if name[len(name)-1] != '#' { name = d.keyToPath(name) list = append(list, name) } } } for _, subpath := range objects.Prefixes { subpath = d.keyToPath(subpath) list = append(list, subpath) } query = objects.Next if query == nil { break } } return list, nil }
func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { var objs *storage.Objects err := retry(5, func() error { var err error objs, err = storage.ListObjects(context, bucket, q) return err }) return objs, err }
func handleList(res http.ResponseWriter, req *http.Request) { ctx := appengine.NewContext(req) cctx := getCloudContext(ctx) var query *storage.Query objs, err := storage.ListObjects(cctx, bucketName, query) if err != nil { http.Error(res, err.Error(), 500) return } for _, obj := range objs.Results { fmt.Fprintln(res, obj.Name) } }
// ListBackups implements BackupStorage. func (bs *GCSBackupStorage) ListBackups(dir string) ([]backupstorage.BackupHandle, error) { authCtx, err := bs.authContext() if err != nil { return nil, err } // List prefixes that begin with dir (i.e. list subdirs). var subdirs []string searchPrefix := objName(dir, "" /* include trailing slash */) query := &storage.Query{ Delimiter: "/", Prefix: searchPrefix, } // Loop in case results are returned in multiple batches. for query != nil { objs, err := storage.ListObjects(authCtx, *bucket, query) if err != nil { return nil, err } // Each returned prefix is a subdir. // Strip parent dir from full path. for _, prefix := range objs.Prefixes { subdir := strings.TrimPrefix(prefix, searchPrefix) subdir = strings.TrimSuffix(subdir, "/") subdirs = append(subdirs, subdir) } query = objs.Next } // Backups must be returned in order, oldest first. sort.Strings(subdirs) result := make([]backupstorage.BackupHandle, 0, len(subdirs)) for _, subdir := range subdirs { result = append(result, &GCSBackupHandle{ authCtx: authCtx, bs: bs, dir: dir, name: subdir, readOnly: true, }) } return result, nil }
func handleList(res http.ResponseWriter, req *http.Request) { cctx, err := getCloudContext(req) if err != nil { http.Error(res, "ERROR GETTING CCTX: "+err.Error(), 500) return } var query *storage.Query objs, err := storage.ListObjects(cctx, gcsBucket, query) if err != nil { http.Error(res, err.Error(), 500) return } for _, obj := range objs.Results { fmt.Fprintln(res, obj.Name) } }
func handleList(res http.ResponseWriter, req *http.Request) { ctx := appengine.NewContext(req) cctx := getCloudContext(ctx) var query *storage.Query objs, err := storage.ListObjects(cctx, bucketName, query) if err != nil { http.Error(res, err.Error(), 500) return } var fileNames []string for _, obj := range objs.Results { fileNames = append(fileNames, obj.Name) } templates.ExecuteTemplate(res, "index", fileNames) }
// listBucket lists the contents of a bucket in Google Cloud Storage. func (d *demo) listBucket() { io.WriteString(d.w, "\nListbucket result:\n") query := &storage.Query{Prefix: "foo"} for query != nil { objs, err := storage.ListObjects(d.ctx, bucket, query) if err != nil { d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) return } query = objs.Next for _, obj := range objs.Results { d.dumpStats(obj) } } }
func listFiles(cctx context.Context, bucket, path string) ([]string, []string, error) { q := &storage.Query{ Delimiter: delimiter, Prefix: path, } objs, err := storage.ListObjects(cctx, bucket, q) if err != nil { return nil, nil, err } subfolders := []string{} for _, v := range objs.Prefixes { subfolders = append(subfolders, strings.TrimPrefix(v, path)) } files := []string{} for _, v := range objs.Results { files = append(files, v.Name) } return files, subfolders, nil }
func (bs *GCSBlobStore) ListBlobs() ([]string, error) { ctx := bs.newAuthedContext(context.TODO()) ret := make([]string, 0) q := &storage.Query{} for q != nil { res, err := storage.ListObjects(ctx, bs.bucketName, q) if err != nil { return nil, err } for _, o := range res.Results { blobpath := o.Name ret = append(ret, blobpath) } q = res.Next } return ret, nil }
func ExposeMultiURL(ctx context.Context, bucket, name string) ([]string, error) { // TODO(victorbalan): Change this to session expiry time expiry := time.Now().Add(time.Hour * 2) query := &storage.Query{Prefix: name} objects, err := storage.ListObjects(CloudContext(ctx), bucket, query) if err != nil { return nil, err } var urls []string for _, obj := range objects.Results { u, err := Expose(bucket, obj.Name, expiry) if err != nil { return nil, err } urls = append(urls, u) } return urls, nil }
func (d *demo) listDir(name, indent string) { query := &storage.Query{Prefix: name, Delimiter: "/"} for query != nil { objs, err := storage.ListObjects(d.ctx, bucket, query) if err != nil { d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) return } query = objs.Next for _, obj := range objs.Results { fmt.Fprint(d.w, indent) d.dumpStats(obj) } for _, dir := range objs.Prefixes { fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) d.listDir(dir, indent+" ") } } }
// Glob implements part of the VFS interface. func (s FS) Glob(ctx context.Context, glob string) ([]string, error) { q := &storage.Query{ Prefix: globLiteralPrefix(glob), } var paths []string for q != nil { objs, err := storage.ListObjects(ctx, s.Bucket, q) if err != nil { return nil, err } for _, o := range objs.Results { if matched, err := filepath.Match(glob, o.Name); err != nil { return nil, err } else if matched { paths = append(paths, o.Name) } } q = objs.Next } return paths, nil }
// Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { var fi storagedriver.FileInfoFields //try to get as file gcsContext := d.context(context) obj, err := storage.StatObject(gcsContext, d.bucket, d.pathToKey(path)) if err == nil { fi = storagedriver.FileInfoFields{ Path: path, Size: obj.Size, ModTime: obj.Updated, IsDir: false, } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } //try to get as folder dirpath := d.pathToDirKey(path) var query *storage.Query query = &storage.Query{} query.Prefix = dirpath query.MaxResults = 1 objects, err := storage.ListObjects(gcsContext, d.bucket, query) if err != nil { return nil, err } if len(objects.Results) < 1 { return nil, storagedriver.PathNotFoundError{Path: path} } fi = storagedriver.FileInfoFields{ Path: path, IsDir: true, } obj = objects.Results[0] if obj.Name == dirpath { fi.Size = obj.Size fi.ModTime = obj.Updated } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil }
func listHandler(c *gin.Context) { gaeContext := appengine.NewContext(c.Request) fhc := &http.Client{ Transport: &CloudStorageTransport{&oauth2.Transport{ Source: google.AppEngineTokenSource(gaeContext, storage.ScopeFullControl), Base: &urlfetch.Transport{Context: gaeContext}, }}, } bucketName := "balde_de_bits" cloudContext := cloud.NewContext(appengine.AppID(gaeContext), fhc) objects, _ := storage.ListObjects(cloudContext, bucketName, nil) usersMap := mapFilesToDictionary(objects) usersStruct := mapDictionaryToObjects(usersMap) c.JSON(200, gin.H{ "users": usersStruct, }) }
func (conn GoogleCloudStorageConnection) listObjects(prefix string, delimiter string) ([]Object, error) { log.Debugf("GoogleCloudStorageConnection listObjects. prefix: %s, delimeter: %s", prefix, delimiter) objects := make([]Object, 0) query := &storage.Query{ Prefix: prefix, Delimiter: delimiter, } for { gcsObjects, err := storage.ListObjects(conn.Context, conn.BucketName, query) if err != nil { return objects, err } if delimiter == "/" { // folders for _, prefix := range gcsObjects.Prefixes { name := strings.TrimSuffix(prefix, delimiter) object := GoogleCloudStorageObject{ Name: name, } objects = append(objects, object) } } else { // regular files for _, gcsObject := range gcsObjects.Results { object := GoogleCloudStorageObject{ Name: gcsObject.Name, } objects = append(objects, object) } } time.Sleep(100 * time.Millisecond) query = gcsObjects.Next if query == nil { break } } log.Debugf("GoogleCloudStorageConnection listObjects returns: %s", objects) return objects, nil }