func TestS3(t *testing.T) { if *bucket == "" || *key == "" || *secret == "" { t.Skip("Skipping test because at least one of -s3_key, -s3_secret, or -s3_bucket flags has not been provided.") } if !strings.HasPrefix(*bucket, "camlistore-") || !strings.HasSuffix(*bucket, "-test") { t.Fatalf("bogus bucket name %q; must begin with 'camlistore-' and end in '-test'", *bucket) } storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { sto, err := newFromConfig(nil, jsonconfig.Obj{ "aws_access_key": *key, "aws_secret_access_key": *secret, "bucket": *bucket, }) if err != nil { t.Fatalf("newFromConfig error: %v", err) } if !testing.Short() { log.Printf("Warning: this test does many serial operations. Without the go test -short flag, this test will be very slow.") } clearBucket := func() { var all []blob.Ref blobserver.EnumerateAll(context.New(), sto, func(sb blob.SizedRef) error { t.Logf("Deleting: %v", sb.Ref) all = append(all, sb.Ref) return nil }) if err := sto.RemoveBlobs(all); err != nil { t.Fatalf("Error removing blobs during cleanup: %v", err) } } clearBucket() return sto, clearBucket }) }
func TestReplica(t *testing.T) { storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { sto = newReplica(t, map[string]interface{}{ "backends": []interface{}{"/good-1/", "/good-2/"}, }) return sto, func() {} }) }
func TestStorage(t *testing.T) { storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { s := &storage{ small: new(test.Fetcher), large: new(test.Fetcher), meta: sorted.NewMemoryKeyValue(), log: test.NewLogger(t, "blobpacked: "), } s.init() return s, func() {} }) }
func TestStorageNoSmallSubfetch(t *testing.T) { storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { s := &storage{ // We need to hide SubFetcher, to test *storage's SubFetch, as it delegates // to the underlying SubFetcher, if small implements that interface. small: hideSubFetcher(new(test.Fetcher)), large: new(test.Fetcher), meta: sorted.NewMemoryKeyValue(), log: test.NewLogger(t, "blobpacked: "), } s.init() return s, func() {} }) }
func TestS3(t *testing.T) { cfgFile := os.Getenv("CAMLI_S3_TEST_CONFIG_JSON") if cfgFile == "" { t.Skip("Skipping manual test. To enable, set the environment variable CAMLI_S3_TEST_CONFIG_JSON to the path of a JSON configuration for the s3 storage type.") } conf, err := jsonconfig.ReadFile(cfgFile) if err != nil { t.Fatalf("Error reading s3 configuration file %s: %v", cfgFile, err) } storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { sto, err := newFromConfig(nil, conf) if err != nil { t.Fatalf("newFromConfig error: %v", err) } return sto, func() {} }) }
func TestStorageTest(t *testing.T) { storagetest.Test(t, func(t *testing.T) (_ blobserver.Storage, cleanup func()) { ld := test.NewLoader() s1, _ := ld.GetStorage("/good-schema/") s2, _ := ld.GetStorage("/good-other/") ld.SetStorage("/replica-all/", replica.NewForTest([]blobserver.Storage{s1, s2})) sto := newCond(t, ld, map[string]interface{}{ "write": map[string]interface{}{ "if": "isSchema", "then": "/good-schema/", "else": "/good-other/", }, "read": "/replica-all/", "remove": "/replica-all/", }) return sto, func() {} }) }
// TestMongoStorage tests against a real MongoDB instance, using a Docker container. // Currently using https://index.docker.io/u/robinvdvleuten/mongo/ func TestMongoStorage(t *testing.T) { // SetupMongoContainer may skip or fatal the test if docker isn't found or something goes wrong when setting up the container. // Thus, no error is returned containerID, ip := dockertest.SetupMongoContainer(t) defer containerID.KillRemove(t) sto, err := newMongoStorage(config{ server: ip, database: "camlitest", collection: "blobs", }) if err != nil { t.Fatalf("mongo.NewMongoStorage = %v", err) } storagetest.Test(t, func(t *testing.T) (blobserver.Storage, func()) { return sto, func() {} }) }
func TestStorageTest(t *testing.T) { storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { ld := test.NewLoader() return newNamespace(t, ld), func() {} }) }
func TestShardBasic(t *testing.T) { storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { return newTestStorage(t).sto, nil }) }
func TestDiskpacked(t *testing.T) { storagetest.Test(t, newTempDiskpacked) }
func TestDiskpackedAltIndex(t *testing.T) { storagetest.Test(t, newTempDiskpackedMemory) }
func TestLocaldisk(t *testing.T) { storagetest.Test(t, func(t *testing.T) (blobserver.Storage, func()) { ds := NewStorage(t) return ds, func() { cleanUp(ds) } }) }
// TestMemoryStorage tests against an in-memory blobserver. func TestMemoryStorage(t *testing.T) { storagetest.Test(t, func(t *testing.T) (blobserver.Storage, func()) { return &memory.Storage{}, func() {} }) }
func testStorage(t *testing.T, bucketDir string) { if *bucket == "" || *key == "" || *secret == "" { t.Skip("Skipping test because at least one of -s3_key, -s3_secret, or -s3_bucket flags has not been provided.") } if !strings.HasPrefix(*bucket, "camlistore-") || !strings.HasSuffix(*bucket, "-test") { t.Fatalf("bogus bucket name %q; must begin with 'camlistore-' and end in '-test'", *bucket) } bucketWithDir := path.Join(*bucket, bucketDir) storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { sto, err := newFromConfig(nil, jsonconfig.Obj{ "aws_access_key": *key, "aws_secret_access_key": *secret, "bucket": bucketWithDir, }) if err != nil { t.Fatalf("newFromConfig error: %v", err) } if !testing.Short() { log.Printf("Warning: this test does many serial operations. Without the go test -short flag, this test will be very slow.") } if bucketWithDir != *bucket { // Adding "a", and "c" objects in the bucket to make sure objects out of the // "directory" are not touched and have no influence. for _, key := range []string{"a", "c"} { var buf bytes.Buffer md5h := md5.New() size, err := io.Copy(io.MultiWriter(&buf, md5h), strings.NewReader(key)) if err != nil { t.Fatalf("could not insert object %s in bucket %v: %v", key, sto.(*s3Storage).bucket, err) } if err := sto.(*s3Storage).s3Client.PutObject( key, sto.(*s3Storage).bucket, md5h, size, &buf); err != nil { t.Fatalf("could not insert object %s in bucket %v: %v", key, sto.(*s3Storage).bucket, err) } } } clearBucket := func(beforeTests bool) func() { return func() { var all []blob.Ref blobserver.EnumerateAll(context.TODO(), sto, func(sb blob.SizedRef) error { t.Logf("Deleting: %v", sb.Ref) all = append(all, sb.Ref) return nil }) if err := sto.RemoveBlobs(all); err != nil { t.Fatalf("Error removing blobs during cleanup: %v", err) } if beforeTests { return } if bucketWithDir != *bucket { // checking that "a" and "c" at the root were left untouched. for _, key := range []string{"a", "c"} { if _, _, err := sto.(*s3Storage).s3Client.Get(sto.(*s3Storage).bucket, key); err != nil { t.Fatalf("could not find object %s after tests: %v", key, err) } if err := sto.(*s3Storage).s3Client.Delete(sto.(*s3Storage).bucket, key); err != nil { t.Fatalf("could not remove object %s after tests: %v", key, err) } } } } } clearBucket(true)() return sto, clearBucket(false) }) }
func TestFetcher(t *testing.T) { storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { return new(test.Fetcher), func() {} }) }