func (t *ConnTest) BucketsAreSegregatedByName() { const objName = "baz" var contents []byte var err error b0, err := t.conn.OpenBucket(t.ctx, "foo") AssertEq(nil, err) b1, err := t.conn.OpenBucket(t.ctx, "bar") AssertEq(nil, err) // Add an object with the same name but different contents to each of two // buckets. _, err = gcsutil.CreateObject(t.ctx, b0, objName, []byte("taco")) AssertEq(nil, err) _, err = gcsutil.CreateObject(t.ctx, b1, objName, []byte("burrito")) AssertEq(nil, err) // Each should have stored it independently. contents, err = gcsutil.ReadObject(t.ctx, b0, objName) AssertEq(nil, err) ExpectEq("taco", string(contents)) contents, err = gcsutil.ReadObject(t.ctx, b1, objName) AssertEq(nil, err) ExpectEq("burrito", string(contents)) }
func (t *PrefixBucketTest) CopyObject() { var err error suffix := "taco" name := t.prefix + suffix contents := "foobar" // Create an object through the back door. _, err = gcsutil.CreateObject(t.ctx, t.wrapped, name, []byte(contents)) AssertEq(nil, err) // Copy it to a new name. newSuffix := "burrito" o, err := t.bucket.CopyObject( t.ctx, &gcs.CopyObjectRequest{ SrcName: suffix, DstName: newSuffix, }) AssertEq(nil, err) ExpectEq(newSuffix, o.Name) // Read it through the back door. actual, err := gcsutil.ReadObject(t.ctx, t.wrapped, t.prefix+newSuffix) AssertEq(nil, err) ExpectEq(contents, string(actual)) }
func (t *ConnTest) BucketContentsAreStable() { const bucketName = "foo" const objName = "bar" var err error // Open the bucket. bucket, err := t.conn.OpenBucket(t.ctx, bucketName) AssertEq(nil, err) // Add an object to a bucket. _, err = gcsutil.CreateObject( t.ctx, bucket, objName, []byte("taco")) AssertEq(nil, err) // Grab the bucket again. It should still be there. contents, err := gcsutil.ReadObject( t.ctx, bucket, objName) AssertEq(nil, err) ExpectEq("taco", string(contents)) }
func (t *MountTest) BasicUsage() { var err error const fileName = "foo" // Grab a bucket. bucket, err := t.conn.OpenBucket(t.ctx, "some_bucket") AssertEq(nil, err) // Mount that bucket. mfs, err := t.mount(bucket.Name(), t.dir) AssertEq(nil, err) // Create a file. err = ioutil.WriteFile(path.Join(t.dir, fileName), []byte("taco"), 0400) AssertEq(nil, err) // Read the object from the bucket. contents, err := gcsutil.ReadObject(t.ctx, bucket, fileName) AssertEq(nil, err) ExpectEq("taco", string(contents)) // Read the file. contents, err = ioutil.ReadFile(path.Join(t.dir, fileName)) AssertEq(nil, err) ExpectEq("taco", string(contents)) // Unmount and join. err = t.unmount() AssertEq(nil, err) err = mfs.Join(t.ctx) AssertEq(nil, err) }
func (t *IntegrationTest) TruncateThenSync() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) t.create(o) // Truncate. err = t.mc.Truncate(t.ctx, 2) AssertEq(nil, err) // Sync should save out the new generation. rl, newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) ExpectEq(t.objectGeneration("foo"), newObj.Generation) contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo") AssertEq(nil, err) ExpectEq("ta", string(contents)) // Read via the lease. _, err = rl.Seek(0, 0) AssertEq(nil, err) contents, err = ioutil.ReadAll(rl) AssertEq(nil, err) ExpectEq("ta", string(contents)) }
func (t *IntegrationTest) TruncateThenSync() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) AssertEq(nil, err) t.create(o) // Truncate. t.clock.AdvanceTime(time.Second) truncateTime := t.clock.Now() err = t.tf.Truncate(2) t.clock.AdvanceTime(time.Second) AssertEq(nil, err) // Sync should save out the new generation. newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) ExpectEq(t.objectGeneration("foo"), newObj.Generation) ExpectEq( truncateTime.UTC().Format(time.RFC3339Nano), newObj.Metadata["gcsfuse_mtime"]) contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo") AssertEq(nil, err) ExpectEq("ta", string(contents)) }
func (t *IntegrationTest) WithinLeaserLimit() { AssertLt(len("taco"), fileLeaserLimitBytes) // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) t.create(o) // Extend to be up against the leaser limit, then write out to GCS, which // should downgrade to a read lease. err = t.mc.Truncate(t.ctx, fileLeaserLimitBytes) AssertEq(nil, err) rl, _, err := t.sync(o) AssertEq(nil, err) // The backing object should be present and contain the correct contents. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name) AssertEq(nil, err) ExpectEq(fileLeaserLimitBytes, len(contents)) // Delete the backing object. err = t.bucket.DeleteObject(t.ctx, &gcs.DeleteObjectRequest{Name: o.Name}) AssertEq(nil, err) // We should still be able to read the contents, because the read lease // should still be valid. buf := make([]byte, 4) n, err := rl.ReadAt(buf, 0) AssertEq(nil, err) ExpectEq("taco", string(buf[0:n])) }
func (t *IntegrationTest) LargerThanLeaserLimit() { AssertLt(len("taco"), fileLeaserLimitBytes) // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) t.create(o) // Extend to be past the leaser limit, then write out to GCS, which should // downgrade to a read lease. err = t.mc.Truncate(t.ctx, fileLeaserLimitBytes+1) AssertEq(nil, err) rl, _, err := t.sync(o) AssertEq(nil, err) // The backing object should be present and contain the correct contents. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name) AssertEq(nil, err) ExpectEq(fileLeaserLimitBytes+1, len(contents)) // Delete the backing object. err = t.bucket.DeleteObject(t.ctx, &gcs.DeleteObjectRequest{Name: o.Name}) AssertEq(nil, err) // The contents should be lost, because the leaser should have revoked the // read lease. _, err = rl.ReadAt(make([]byte, len(contents)), 0) ExpectThat(err, Error(HasSubstr("revoked"))) }
func (t *DirTest) CloneToChildFile_DestinationExists() { const srcName = "blah/baz" dstName := path.Join(dirInodeName, "qux") var o *gcs.Object var err error // Create the source. src, err := gcsutil.CreateObject(t.ctx, t.bucket, srcName, []byte("taco")) AssertEq(nil, err) // And a destination object that will be overwritten. _, err = gcsutil.CreateObject(t.ctx, t.bucket, dstName, []byte("")) AssertEq(nil, err) // Call the inode. o, err = t.in.CloneToChildFile(t.ctx, path.Base(dstName), src) AssertEq(nil, err) AssertNe(nil, o) ExpectEq(dstName, o.Name) ExpectFalse(inode.IsSymlink(o)) ExpectEq(len("taco"), o.Size) // Check resulting contents. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, dstName) AssertEq(nil, err) ExpectEq("taco", string(contents)) }
func (t *ReadOnlyTest) DeleteFile() { // Create an object in the bucket. _, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) // Attempt to delete it via the file system. err = os.Remove(path.Join(t.Dir, "foo")) ExpectThat(err, Error(HasSubstr("read-only"))) // the bucket should not have been modified. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo") AssertEq(nil, err) ExpectEq("taco", string(contents)) }
func (t *FileTest) AppendThenSync() { var attrs fuseops.InodeAttributes var err error AssertEq("taco", t.initialContents) // Append some data. t.clock.AdvanceTime(time.Second) writeTime := t.clock.Now() err = t.in.Write(t.ctx, []byte("burrito"), int64(len("taco"))) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Sync. err = t.in.Sync(t.ctx) AssertEq(nil, err) // The generation should have advanced. ExpectLt(t.backingObj.Generation, t.in.SourceGeneration().Object) // Stat the current object in the bucket. statReq := &gcs.StatObjectRequest{Name: t.in.Name()} o, err := t.bucket.StatObject(t.ctx, statReq) AssertEq(nil, err) ExpectEq(t.in.SourceGeneration().Object, o.Generation) ExpectEq(t.in.SourceGeneration().Metadata, o.MetaGeneration) ExpectEq(len("tacoburrito"), o.Size) ExpectEq( writeTime.UTC().Format(time.RFC3339Nano), o.Metadata["gcsfuse_mtime"]) // Read the object's contents. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, t.in.Name()) AssertEq(nil, err) ExpectEq("tacoburrito", string(contents)) // Check attributes. attrs, err = t.in.Attributes(t.ctx) AssertEq(nil, err) ExpectEq(len("tacoburrito"), attrs.Size) ExpectThat(attrs.Mtime, timeutil.TimeEq(writeTime.UTC())) }
func (t *IntegrationTest) AppendThenSync() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) t.create(o) // Append some data. n, err := t.mc.WriteAt(t.ctx, []byte("burrito"), 4) AssertEq(nil, err) ExpectEq(len("burrito"), n) // Sync should save out the new generation. rl, newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) ExpectEq(t.objectGeneration("foo"), newObj.Generation) // Read via the bucket. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo") AssertEq(nil, err) ExpectEq("tacoburrito", string(contents)) // Read via the lease. _, err = rl.Seek(0, 0) AssertEq(nil, err) contents, err = ioutil.ReadAll(rl) AssertEq(nil, err) ExpectEq("tacoburrito", string(contents)) // There should be no junk left over in the bucket besides the object of // interest. objects, runs, err := gcsutil.ListAll( t.ctx, t.bucket, &gcs.ListObjectsRequest{}) AssertEq(nil, err) AssertEq(1, len(objects)) AssertEq(0, len(runs)) ExpectEq("foo", objects[0].Name) }
func (t *DirTest) DeleteChildDir_Exists() { const name = "qux" objName := path.Join(dirInodeName, name) + "/" var err error // Create a backing object. _, err = gcsutil.CreateObject(t.ctx, t.bucket, objName, []byte("taco")) AssertEq(nil, err) // Call the inode. err = t.in.DeleteChildDir(t.ctx, name) AssertEq(nil, err) // Check the bucket. _, err = gcsutil.ReadObject(t.ctx, t.bucket, objName) ExpectThat(err, HasSameTypeAs(&gcs.NotFoundError{})) }
func (t *DirTest) DeleteChildFile_ParticularGenerationAndMetaGeneration() { const name = "qux" objName := path.Join(dirInodeName, name) var err error // Create a backing object. o, err := gcsutil.CreateObject(t.ctx, t.bucket, objName, []byte("taco")) AssertEq(nil, err) // Call the inode. err = t.in.DeleteChildFile(t.ctx, name, o.Generation, &o.MetaGeneration) AssertEq(nil, err) // Check the bucket. _, err = gcsutil.ReadObject(t.ctx, t.bucket, objName) ExpectThat(err, HasSameTypeAs(&gcs.NotFoundError{})) }
func (t *IntegrationTest) AppendThenSync() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) AssertEq(nil, err) t.create(o) // Append some data. t.clock.AdvanceTime(time.Second) writeTime := t.clock.Now() n, err := t.tf.WriteAt([]byte("burrito"), 4) t.clock.AdvanceTime(time.Second) AssertEq(nil, err) ExpectEq(len("burrito"), n) // Sync should save out the new generation. newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) ExpectEq(t.objectGeneration("foo"), newObj.Generation) ExpectEq( writeTime.UTC().Format(time.RFC3339Nano), newObj.Metadata["gcsfuse_mtime"]) // Read via the bucket. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo") AssertEq(nil, err) ExpectEq("tacoburrito", string(contents)) // There should be no junk left over in the bucket besides the object of // interest. objects, runs, err := gcsutil.ListAll( t.ctx, t.bucket, &gcs.ListObjectsRequest{}) AssertEq(nil, err) AssertEq(1, len(objects)) AssertEq(0, len(runs)) ExpectEq("foo", objects[0].Name) }
func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) t.create(o) // Fault in the contents. _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) AssertEq(nil, err) // Overwrite the backing object. _, err = gcsutil.CreateObject(t.ctx, t.bucket, "foo", "burrito") AssertEq(nil, err) // Reading and modications should still work. _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) AssertEq(nil, err) _, err = t.mc.WriteAt(t.ctx, []byte("a"), 0) AssertEq(nil, err) truncateTime := t.clock.Now() err = t.mc.Truncate(t.ctx, 3) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Stat should see the current state. sr, err := t.mc.Stat(t.ctx) AssertEq(nil, err) ExpectEq(3, sr.Size) ExpectEq(0, sr.DirtyThreshold) ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(truncateTime))) // Sync should fail with a precondition error. _, _, err = t.sync(o) ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{})) // The newer version should still be present. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name) AssertEq(nil, err) ExpectEq("burrito", string(contents)) }
func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco") AssertEq(nil, err) t.create(o) // Fault in the contents. _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) AssertEq(nil, err) // Delete the backing object. err = t.bucket.DeleteObject(t.ctx, &gcs.DeleteObjectRequest{Name: o.Name}) AssertEq(nil, err) // Reading and modications should still work. _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) AssertEq(nil, err) _, err = t.mc.WriteAt(t.ctx, []byte("a"), 0) AssertEq(nil, err) truncateTime := t.clock.Now() err = t.mc.Truncate(t.ctx, 1) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Stat should see the current state. sr, err := t.mc.Stat(t.ctx) AssertEq(nil, err) ExpectEq(1, sr.Size) ExpectEq(0, sr.DirtyThreshold) ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(truncateTime))) // Sync should fail with a precondition error. _, _, err = t.sync(o) ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{})) // Nothing should have been created. _, err = gcsutil.ReadObject(t.ctx, t.bucket, o.Name) ExpectThat(err, HasSameTypeAs(&gcs.NotFoundError{})) }
func (t *DirTest) DeleteChildFile_WrongGeneration() { const name = "qux" objName := path.Join(dirInodeName, name) var err error // Create a backing object. o, err := gcsutil.CreateObject(t.ctx, t.bucket, objName, []byte("taco")) AssertEq(nil, err) // Call the inode with the wrong generation. No error should be returned. err = t.in.DeleteChildFile(t.ctx, name, o.Generation+1, &o.MetaGeneration) AssertEq(nil, err) // The original generation should still be there. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, objName) AssertEq(nil, err) ExpectEq("taco", string(contents)) }
func (t *FileTest) WriteThenSync() { var attrs fuseops.InodeAttributes var err error AssertEq("taco", t.initialContents) // Overwite a byte. err = t.in.Write(t.ctx, []byte("p"), 0) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Sync. err = t.in.Sync(t.ctx) AssertEq(nil, err) // The generation should have advanced. ExpectLt(t.backingObj.Generation, t.in.SourceGeneration()) // Stat the current object in the bucket. statReq := &gcs.StatObjectRequest{Name: t.in.Name()} o, err := t.bucket.StatObject(t.ctx, statReq) AssertEq(nil, err) ExpectEq(t.in.SourceGeneration(), o.Generation) ExpectEq(len("paco"), o.Size) // Read the object's contents. contents, err := gcsutil.ReadObject(t.ctx, t.bucket, t.in.Name()) AssertEq(nil, err) ExpectEq("paco", string(contents)) // Check attributes. attrs, err = t.in.Attributes(t.ctx) AssertEq(nil, err) ExpectEq(len("paco"), attrs.Size) ExpectThat(attrs.Mtime, timeutil.TimeEq(o.Updated)) }
func (t *PrefixBucketTest) ComposeObjects() { var err error suffix0 := "taco" contents0 := "foo" suffix1 := "burrito" contents1 := "bar" // Create two objects through the back door. err = gcsutil.CreateObjects( t.ctx, t.wrapped, map[string][]byte{ t.prefix + suffix0: []byte(contents0), t.prefix + suffix1: []byte(contents1), }) AssertEq(nil, err) // Compose them. newSuffix := "enchilada" o, err := t.bucket.ComposeObjects( t.ctx, &gcs.ComposeObjectsRequest{ DstName: newSuffix, Sources: []gcs.ComposeSource{ {Name: suffix0}, {Name: suffix1}, }, }) AssertEq(nil, err) ExpectEq(newSuffix, o.Name) // Read it through the back door. actual, err := gcsutil.ReadObject(t.ctx, t.wrapped, t.prefix+newSuffix) AssertEq(nil, err) ExpectEq(contents0+contents1, string(actual)) }
func (t *PrefixBucketTest) CreateObject() { var err error suffix := "taco" contents := "foobar" // Create the object. o, err := t.bucket.CreateObject( t.ctx, &gcs.CreateObjectRequest{ Name: suffix, ContentLanguage: "en-GB", Contents: strings.NewReader(contents), }) AssertEq(nil, err) ExpectEq(suffix, o.Name) ExpectEq("en-GB", o.ContentLanguage) // Read it through the back door. actual, err := gcsutil.ReadObject(t.ctx, t.wrapped, t.prefix+suffix) AssertEq(nil, err) ExpectEq(contents, string(actual)) }
func (t *IntegrationTest) MultipleInteractions() { // We will run through the script below for multiple interesting object // sizes. sizes := []int{ 0, 1, chunkSize - 1, chunkSize, chunkSize + 1, 3*chunkSize - 1, 3 * chunkSize, 3*chunkSize + 1, fileLeaserLimitBytes - 1, fileLeaserLimitBytes, fileLeaserLimitBytes + 1, ((fileLeaserLimitBytes / chunkSize) - 1) * chunkSize, (fileLeaserLimitBytes / chunkSize) * chunkSize, ((fileLeaserLimitBytes / chunkSize) + 1) * chunkSize, } // Generate random contents for the maximum size. var maxSize int for _, size := range sizes { if size > maxSize { maxSize = size } } randData := randBytes(maxSize) // Transition the mutable object in and out of the dirty state. Make sure // everything stays consistent. for i, size := range sizes { desc := fmt.Sprintf("test case %d (size %d)", i, size) name := fmt.Sprintf("obj_%d", i) buf := make([]byte, size) // Create the backing object with random initial contents. expectedContents := make([]byte, size) copy(expectedContents, randData) o, err := gcsutil.CreateObject( t.ctx, t.bucket, name, string(expectedContents)) AssertEq(nil, err) // Create a mutable object around it. t.create(o) // Read the contents of the mutable object. _, err = t.mc.ReadAt(t.ctx, buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { AddFailure("Contents mismatch for %s", desc) AbortTest() } // Modify some bytes. if size > 0 { expectedContents[0] = 17 expectedContents[size/2] = 19 expectedContents[size-1] = 23 _, err = t.mc.WriteAt(t.ctx, []byte{17}, 0) AssertEq(nil, err) _, err = t.mc.WriteAt(t.ctx, []byte{19}, int64(size/2)) AssertEq(nil, err) _, err = t.mc.WriteAt(t.ctx, []byte{23}, int64(size-1)) AssertEq(nil, err) } // Compare contents again. _, err = t.mc.ReadAt(t.ctx, buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { AddFailure("Contents mismatch for %s", desc) AbortTest() } // Sync and recreate if necessary. _, newObj, err := t.sync(o) AssertEq(nil, err) if newObj != nil { t.create(newObj) } // Check the new backing object's contents. objContents, err := gcsutil.ReadObject(t.ctx, t.bucket, name) AssertEq(nil, err) if !bytes.Equal(objContents, expectedContents) { AddFailure("Contents mismatch for %s", desc) AbortTest() } // Compare contents again. _, err = t.mc.ReadAt(t.ctx, buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { AddFailure("Contents mismatch for %s", desc) AbortTest() } // Dirty again. if size > 0 { expectedContents[0] = 29 _, err = t.mc.WriteAt(t.ctx, []byte{29}, 0) AssertEq(nil, err) } // Compare contents again. _, err = t.mc.ReadAt(t.ctx, buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { AddFailure("Contents mismatch for %s", desc) AbortTest() } } }
func verifyObjects( ctx context.Context, bucket gcs.Bucket, verifyAfter time.Duration, in <-chan record, out chan<- record) (err error) { // Set up a worker function. worker := func(ctx context.Context) (err error) { for r := range in { name := fmt.Sprintf("%s%x", objectNamePrefix, r.sha1) // Wait until it is time. wakeTime := r.creationTime.Add(verifyAfter) select { case <-ctx.Done(): err = ctx.Err() return case <-time.After(wakeTime.Sub(time.Now())): } // Attempt to read the object. var contents []byte contents, err = gcsutil.ReadObject(ctx, bucket, name) if err != nil { err = fmt.Errorf("ReadObject(%q): %v", name, err) return } // Check the contents. actual := sha1.Sum(contents) if actual != r.sha1 { err = fmt.Errorf( "SHA1 mismatch for %q: %x vs. %x", name, actual, r.sha1) return } log.Printf("Verified object %q.", name) // Pass on the record if we've been asked to. if out != nil { select { case <-ctx.Done(): err = ctx.Err() return case out <- r: } } } return } // Run a bunch of workers. b := syncutil.NewBundle(ctx) for i := 0; i < perStageParallelism; i++ { b.Add(worker) } err = b.Join() return }