// Read the contents of the latest generation of the object with the supplied // name. func ReadObject( ctx context.Context, bucket gcs.Bucket, name string) (contents []byte, err error) { // Call the bucket. req := &gcs.ReadObjectRequest{ Name: name, } rc, err := bucket.NewReader(ctx, req) if err != nil { return } // Don't forget to close. defer func() { closeErr := rc.Close() if closeErr != nil && err == nil { err = fmt.Errorf("Close: %v", closeErr) } }() // Read the contents. contents, err = ioutil.ReadAll(rc) if err != nil { err = fmt.Errorf("ReadAll: %v", err) return } return }
func readOnce( ctx context.Context, o *gcs.Object, bucket gcs.Bucket) (r result, err error) { // Is the object large enough? if o.Size < *fSize { err = fmt.Errorf( "Object of size %d not large enough for read size %d", o.Size, *fSize) return } // Set up an appropriate request. req := &gcs.ReadObjectRequest{ Name: o.Name, Generation: o.Generation, Range: &gcs.ByteRange{}, } req.Range.Start = uint64(rand.Int63n(int64(o.Size - *fSize))) req.Range.Limit = req.Range.Start + *fSize // Create the reader. start := time.Now() rc, err := bucket.NewReader(ctx, req) if err != nil { err = fmt.Errorf("NewReader: %v", err) return } defer func() { closeErr := rc.Close() if err == nil && closeErr != nil { err = fmt.Errorf("Close: %v", closeErr) } }() // Measure the time to first byte. _, err = rc.Read([]byte{0}) if err != nil { err = fmt.Errorf("Read: %v", err) return } r.FirstByteLatency = time.Since(start) // And the time to read everything. n, err := io.Copy(ioutil.Discard, rc) if err != nil { err = fmt.Errorf("Copy: %v", err) return } r.FullBodyDuration = time.Since(start) r.BytesRead = int(n + 1) return }
// Repeatedly call bucket.ListObjects until there is nothing further to list, // returning all objects and collapsed runs encountered. // // May modify *req. func ListAll( ctx context.Context, bucket gcs.Bucket, req *gcs.ListObjectsRequest) ( objects []*gcs.Object, runs []string, err error) { for { // Grab one set of results. var listing *gcs.Listing if listing, err = bucket.ListObjects(ctx, req); err != nil { return } // Accumulate the results. objects = append(objects, listing.Objects...) runs = append(runs, listing.CollapsedRuns...) // Are we done? if listing.ContinuationToken == "" { break } req.ContinuationToken = listing.ContinuationToken } return }
// Create an object with the supplied contents in the given bucket with the // given name. func CreateObject( ctx context.Context, bucket gcs.Bucket, name string, contents string) (*gcs.Object, error) { req := &gcs.CreateObjectRequest{ Name: name, Contents: strings.NewReader(contents), } return bucket.CreateObject(ctx, req) }
// Delete all objects from the supplied bucket. Results are undefined if the // bucket is being concurrently updated. func DeleteAllObjects( ctx context.Context, bucket gcs.Bucket) error { bundle := syncutil.NewBundle(ctx) // List all of the objects in the bucket. objects := make(chan *gcs.Object, 100) bundle.Add(func(ctx context.Context) error { defer close(objects) return ListPrefix(ctx, bucket, "", objects) }) // Strip everything but the name. objectNames := make(chan string, 10e3) bundle.Add(func(ctx context.Context) (err error) { defer close(objectNames) for o := range objects { select { case <-ctx.Done(): err = ctx.Err() return case objectNames <- o.Name: } } return }) // Delete the objects in parallel. const parallelism = 64 for i := 0; i < parallelism; i++ { bundle.Add(func(ctx context.Context) error { for objectName := range objectNames { err := bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: objectName, }) if err != nil { return err } } return nil }) } return bundle.Join() }
func getObject( ctx context.Context, bucket gcs.Bucket) (o *gcs.Object, err error) { if *fObject == "" { err = errors.New("You must set --object.") return } o, err = bucket.StatObject( ctx, &gcs.StatObjectRequest{Name: *fObject}) return }
func setSymlinkTarget( ctx context.Context, bucket gcs.Bucket, objName string, target string) (err error) { _, err = bucket.UpdateObject( ctx, &gcs.UpdateObjectRequest{ Name: objName, Metadata: map[string]*string{ inode.SymlinkMetadataKey: &target, }, }) return }
// List the supplied object name prefix to find out whether it is non-empty. func objectNamePrefixNonEmpty( ctx context.Context, bucket gcs.Bucket, prefix string) (nonEmpty bool, err error) { req := &gcs.ListObjectsRequest{ Prefix: prefix, MaxResults: 1, } listing, err := bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } nonEmpty = len(listing.Objects) != 0 return }
// List objects in the supplied bucket whose name starts with the given prefix. // Write them into the supplied channel in an undefined order. func ListPrefix( ctx context.Context, bucket gcs.Bucket, prefix string, objects chan<- *gcs.Object) (err error) { req := &gcs.ListObjectsRequest{ Prefix: prefix, } // List until we run out. for { // Fetch the next batch. var listing *gcs.Listing listing, err = bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } // Pass on each object. for _, o := range listing.Objects { select { case objects <- o: // Cancelled? case <-ctx.Done(): err = ctx.Err() return } } // Are we done? if listing.ContinuationToken == "" { break } req.ContinuationToken = listing.ContinuationToken } return }
// Stat the object with the given name, returning (nil, nil) if the object // doesn't exist rather than failing. func statObjectMayNotExist( ctx context.Context, bucket gcs.Bucket, name string) (o *gcs.Object, err error) { // Call the bucket. req := &gcs.StatObjectRequest{ Name: name, } o, err = bucket.StatObject(ctx, req) // Suppress "not found" errors. if _, ok := err.(*gcs.NotFoundError); ok { err = nil } // Annotate others. if err != nil { err = fmt.Errorf("StatObject: %v", err) return } return }
func createObjects( ctx context.Context, bucket gcs.Bucket, out chan<- record) (err error) { throttle := time.Tick(time.Second / createRateHz) // Set up a worker function. worker := func(ctx context.Context) (err error) { randSrc := rand.New(rand.NewSource(makeSeed())) for { // Choose a random size (every once in awhile making sure we see the // max), and generate content of that size. const maxSize = 1 << 24 var size int if randSrc.Int31n(100) == 0 { size = maxSize } else { size = int(randSrc.Int31n(maxSize + 1)) } content := randBytes(randSrc, size) // Compute hashes and checksums. sha1 := sha1.Sum(content) crc32c := *gcsutil.CRC32C(content) // Wait for permission to proceed. select { case <-ctx.Done(): err = ctx.Err() return case <-throttle: } // Create an object. req := &gcs.CreateObjectRequest{ Name: fmt.Sprintf("%s%x", objectNamePrefix, sha1), Contents: bytes.NewReader(content), CRC32C: &crc32c, Metadata: map[string]string{ "expected_sha1": fmt.Sprintf("%x", sha1), "expected_crc32c": fmt.Sprintf("%#08x", crc32c), }, } var o *gcs.Object o, err = bucket.CreateObject(ctx, req) if err != nil { err = fmt.Errorf("CreateObject(%q): %v", req.Name, err) return } log.Printf("Created object %q.", req.Name) // Check the object. if o.Name != req.Name { err = fmt.Errorf( "Name mismatch: %q vs. %q", o.Name, req.Name) return } if o.CRC32C != crc32c { err = fmt.Errorf( "Object %q CRC mismatch: %#08x vs. %#08x", o.Name, o.CRC32C, crc32c) return } // Write out the record. r := record{ sha1: sha1, creationTime: time.Now(), } select { case <-ctx.Done(): err = ctx.Err() return case out <- r: } } } // Run a bunch of workers. b := syncutil.NewBundle(ctx) for i := 0; i < perStageParallelism; i++ { b.Add(worker) } err = b.Join() return }
func garbageCollectOnce( ctx context.Context, tmpObjectPrefix string, bucket gcs.Bucket) (objectsDeleted uint64, err error) { const stalenessThreshold = 30 * time.Minute b := syncutil.NewBundle(ctx) // List all objects with the temporary prefix. objects := make(chan *gcs.Object, 100) b.Add(func(ctx context.Context) (err error) { defer close(objects) err = gcsutil.ListPrefix(ctx, bucket, tmpObjectPrefix, objects) if err != nil { err = fmt.Errorf("ListPrefix: %v", err) return } return }) // Filter to the names of objects that are stale. now := time.Now() staleNames := make(chan string, 100) b.Add(func(ctx context.Context) (err error) { defer close(staleNames) for o := range objects { if now.Sub(o.Updated) < stalenessThreshold { continue } select { case <-ctx.Done(): err = ctx.Err() return case staleNames <- o.Name: } } return }) // Delete those objects. b.Add(func(ctx context.Context) (err error) { for name := range staleNames { err = bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: name, }) if err != nil { err = fmt.Errorf("DeleteObject(%q): %v", name, err) return } atomic.AddUint64(&objectsDeleted, 1) } return }) err = b.Join() return }