func ScanPutMatch(t btesting.T, bucketId typing.BucketId, settings *ScanSettings, scanEntries []ScanEntry) { // First put some entries for _, entry := range scanEntries { t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Put", "Data": btesting.Object{ "BucketId": writer.BucketId(bucketId), "Key": writer.Key(entry.Key), "Value": "", // Currently empty value }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), }, }) if t.Failed() { return } } // ... then see if we can find them ScanMatch(t, bucketId, settings, scanEntries) }
// 1. Creates a bucket (storage) // 2. Sees what metadata can be found for hat bucket. func testScanMetadata(t btesting.T) { // The haser bucket is the simplest one, it currently only has one single metadata: // 'const.system.type_id' // First create a bucket var bucketId typing.BucketId operations.CreateBucket(t, typing.TypeId_Store, &bucketId) if t.Failed() { return } var keyAsBinary []byte t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Scan", "Data": btesting.Object{ "BucketId": fmt.Sprintf(":meta-id:%v", bucketId.ToBase32String()), "FromKey": "", "Limit": 1, }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), "Data": btesting.Object{ "HasMore": false, "Results": btesting.Array{ btesting.Object{ "Key": btesting.Array{eval.IsAnyBinaryB32(&keyAsBinary)}, }, }, }, }, }) if t.Failed() { return } keyAsString := string(keyAsBinary) if keyAsString != "const.system.type_id" { t.Errorf("Expecting to have the metadata const.system.type_id but have %v", keyAsString) return } }
// Uploads something and then downloads func testUploadDownload(t btesting.T, length int) { // First create a blon bucket var bucketId typing.BucketId operations.CreateBucket(t, typing.TypeId_BlobStore, &bucketId) if t.Failed() { return } // Create a buffer and fill that with random data buf := make([]byte, length) rand.Reader.Read(buf) reader := bytes.NewReader(buf) hash := Upload(t, bucketId, reader) if t.Failed() { return } // Now download writer := &bytes.Buffer{} resultingLength := Download(t, bucketId, hash, writer) if t.Failed() { return } // See if the length is the same as the upload length if resultingLength != uint64(length) { t.Errorf("Got different lengths, expecting %v but have %v", length, resultingLength) return } // See if the length is the same as the upload length if len(writer.Bytes()) != length { t.Errorf("Got different lengths, expecting %v but have %v in the buffer", length, len(writer.Bytes())) return } // Now compare the two buffers if bytes.Compare(buf, writer.Bytes()) != 0 { t.Errorf("Downloaded different data than the data just uploaded") return } }
func Upload(t btesting.T, bucketId typing.BucketId, reader io.Reader) (hash []byte) { demoId := []byte("demoId") for { buf := make([]byte, chunkSize) numRead, err := reader.Read(buf) if err != nil && err != io.EOF { t.Errorf("Error reading: %v", err) return } if numRead > 0 { buf = buf[:numRead] //##PUT: keys = incubation/[ID]/append, values = [data] operations.Put(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("append")}, typing.Value{buf}) if t.Failed() { return } } if numRead != chunkSize { // End break } } // Get the hash // ##GET: keys = incubation/[ID]/sum, values = [CBOR(hash), CBOR(length)] value := operations.Get(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("sum")}, true) if t.Failed() { return } if len(value) != 2 { t.Errorf("Expecting 2 value entries (cbor(hash) and cbor(length))") return } err := encoding.Cbor().Decode(value[0], &hash) if err != nil { t.Errorf("Error getting the hash: %v", err) return } var length uint64 err = encoding.Cbor().Decode(value[1], &length) if err != nil { t.Errorf("Error getting the length: %v", err) return } // Ok, commit //##PUT: keys = incubation/[ID]/finish, values = [] operations.Put(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("finish")}, typing.Value{}) if t.Failed() { return } return }
// One of the interesting features of buran is that you can connect two buckets. // In this case we use the 'put forwarding' mechanism to connect two buckets. func testPutForwardingWithTwoStorages(t btesting.T) { var storageOneBucketId typing.BucketId var storageTwoBucketId typing.BucketId // Create two buckets // Bucket "one" CreateBucket(t, typing.TypeId_Store, &storageOneBucketId) if t.Failed() { return } // Bucket "two" CreateBucket(t, typing.TypeId_Store, &storageTwoBucketId) if t.Failed() { return } // Now configure bucket "one" to forward the puts to bucket "two" SetPutReceiver(t, storageOneBucketId, storageTwoBucketId) if t.Failed() { return } // Now put something to storage "one" Put(t, storageOneBucketId, typing.KeyFromStringPanic("daKey"), typing.ValueFromInterfacePanic([][]byte{[]byte("daValue")})) if t.Failed() { return } // So, noting new here, of course the puted value is found in bucket one valueFromBucketOne := Get(t, storageOneBucketId, typing.KeyFromStringPanic("daKey"), true) if t.Failed() { return } if len(valueFromBucketOne) != 1 { t.Errorf("Expect one value") return } if bytes.Compare(valueFromBucketOne[0], []byte("daValue")) != 0 { t.Errorf("Value in bucket one is wrong") return } // Now the interesting part: The same value can also be found in bucket two, since // the system forwarded the PUT operation to bucket two. valueFromBucketTwo := Get(t, storageOneBucketId, typing.KeyFromStringPanic("daKey"), true) if t.Failed() { return } if len(valueFromBucketTwo) != 1 { t.Errorf("Expect one value in bucket two too") return } if bytes.Compare(valueFromBucketTwo[0], []byte("daValue")) != 0 { t.Errorf("Value in bucket two is wrong") return } }
func uploadDirectoryAndGet(t btesting.T) { // First create the blob bucket (that's where the directories are stored, and the data too) var blobBucketId typing.BucketId operations.CreateBucket(t, typing.TypeId_BlobStore, &blobBucketId) if t.Failed() { return } // Now create the directory bucket and connect that to the blob bucket var bucketId typing.BucketId var blobBucketIdCBor []byte blobBucketIdCBor, err := encoding.Cbor().Encode([]byte(blobBucketId)) if err != nil { t.Errorf("error cbor encoding: %v", err) return } operations.CreateBucketWithMetadata(t, typing.TypeId_Directory, map[string][]byte{ "const.forwarder.blob": blobBucketIdCBor, }, &bucketId) if t.Failed() { return } // Both buckets now exist and are connected dir, err := createDemoDirectory() if err != nil { t.Errorf("Error creating directory: %v\n", err) } // Now upload the directory to the blob storage dirReader := bytes.NewReader(dir) hash := blob.Upload(t, blobBucketId, dirReader) if t.Failed() { return } // The directory is now in the blob storage - the directory bucket can now index it cborHash, err := encoding.Cbor().Encode(hash) operations.Put(t, bucketId, typing.Key{[]byte("index")}, typing.Value{cborHash}) if t.Failed() { return } // Get some files operations.Get(t, bucketId, typing.Key{hash, []byte("file_1.txt")}, true) if t.Failed() { return } operations.Get(t, bucketId, typing.Key{hash, []byte("another_file.jpeg")}, true) if t.Failed() { return } // Now some files that do not exist operations.Get(t, bucketId, typing.Key{hash, []byte("<UNKNOWN_FILE>.exe")}, false) if t.Failed() { return } operations.Get(t, bucketId, typing.Key{hash, []byte("no_is_not_in_directory.com")}, false) if t.Failed() { return } }
func ScanMatch(t btesting.T, bucketId typing.BucketId, settings *ScanSettings, scanEntries []ScanEntry) { var resultsSlice []interface{} t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Scan", "Data": btesting.Object{ "BucketId": writer.BucketId(bucketId), "FromKey": settings.FromKey, "FromExclusive": settings.FromExclusive, "ToKey": settings.ToKeyOptional, "ToExclusive": settings.ToExclusive, "Reverse": settings.Reverse, "Limit": settings.Limit, "Skip": settings.Skip, }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), "Data": btesting.Object{ "HasMore": settings.ExpectToHaveMore, "Results": eval.IsAnySlice(&resultsSlice), }, }, }) if t.Failed() { return } // Test if all values can be found var matched bool var collectedErrorMsgs []string for _, entry := range scanEntries { collectedErrorMsgs = nil matched = false for _, actual := range resultsSlice { actualObj, ok := actual.(map[string]interface{}) if !ok { t.Errorf("Got something that's no a object in the results, it's %T.", actual) return } keyInterface := actualObj["Key"] t.Evaluator().Evaluate(entry.Key, keyInterface) if !t.Failed() { // Ok, we have a match matched = true } else { collectedErrorMsgs = append(collectedErrorMsgs, t.FailedMessage()) // Clear failure and try next t.ClearFailure() } } if matched != entry.Match { // Error t.Errorf("Entry with key %v expected to match? %v, actually matched? %v. "+ "All results: %v. Collected errors: %v", entry.Key, entry.Match, matched, resultsSlice, collectedErrorMsgs) return } } }