func Get(t btesting.T, bucketId typing.BucketId, key typing.Key, expectFound bool) (value typing.Value) { var okCode interface{} if expectFound { okCode = eval.Retcode(0x0000) } else { okCode = eval.Retcode(0x0001) } t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Get", "Data": btesting.Object{ "BucketId": writer.BucketId(bucketId), "Key": writer.Key(key), }, }, Expecting: btesting.Object{ "Code": okCode, "Data": eval.Optional(btesting.Object{ "Value": eval.IsAnyValue(&value), }), }, }) return }
func ScanPutMatch(t btesting.T, bucketId typing.BucketId, settings *ScanSettings, scanEntries []ScanEntry) { // First put some entries for _, entry := range scanEntries { t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Put", "Data": btesting.Object{ "BucketId": writer.BucketId(bucketId), "Key": writer.Key(entry.Key), "Value": "", // Currently empty value }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), }, }) if t.Failed() { return } } // ... then see if we can find them ScanMatch(t, bucketId, settings, scanEntries) }
func SetPutReceiver(t btesting.T, bucketId typing.BucketId, receivers ...typing.BucketId) { // Now configure bucket "one" to forward the puts to bucket "two" putReceivers := make([][]byte, len(receivers)) for index, receiver := range receivers { putReceivers[index] = []byte(receiver) } newPutReceiversEncoded, err := encoding.Cbor().Encode(putReceivers) if err != nil { t.Errorf("%v", err) return } Put(t, bucketId.ToMetadataBucketId(), typing.KeyFromStringPanic("system.put_receivers"), typing.ValueFromInterfacePanic([][]byte{newPutReceiversEncoded})) }
func CreateBucket(t btesting.T, bucketTypeId typing.BucketTypeId, outBucketId *typing.BucketId) { t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "CreateBucket", "Data": btesting.Object{ "TypeId": writer.BucketType(bucketTypeId), }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), "Data": btesting.Object{ "Id": eval.IsAnyBucketId(outBucketId), }, }, }) }
func Put(t btesting.T, bucketId typing.BucketId, key typing.Key, value typing.Value) { t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Put", "Data": btesting.Object{ "BucketId": writer.BucketId(bucketId), "Key": writer.Key(key), "Value": writer.Value(value), }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), }, }) }
func CreateBucketWithMetadata(t btesting.T, bucketTypeId typing.BucketTypeId, metadata map[string][]byte, outBucketId *typing.BucketId) { t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "CreateBucket", "Data": btesting.Object{ "TypeId": writer.BucketType(bucketTypeId), "Metadata": writer.Metadata(metadata), }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), "Data": btesting.Object{ "Id": eval.IsAnyBucketId(outBucketId), }, }, }) }
// 1. Creates a bucket (storage) // 2. Sees what metadata can be found for hat bucket. func testScanMetadata(t btesting.T) { // The haser bucket is the simplest one, it currently only has one single metadata: // 'const.system.type_id' // First create a bucket var bucketId typing.BucketId operations.CreateBucket(t, typing.TypeId_Store, &bucketId) if t.Failed() { return } var keyAsBinary []byte t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Scan", "Data": btesting.Object{ "BucketId": fmt.Sprintf(":meta-id:%v", bucketId.ToBase32String()), "FromKey": "", "Limit": 1, }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), "Data": btesting.Object{ "HasMore": false, "Results": btesting.Array{ btesting.Object{ "Key": btesting.Array{eval.IsAnyBinaryB32(&keyAsBinary)}, }, }, }, }, }) if t.Failed() { return } keyAsString := string(keyAsBinary) if keyAsString != "const.system.type_id" { t.Errorf("Expecting to have the metadata const.system.type_id but have %v", keyAsString) return } }
func Download(t btesting.T, bucketId typing.BucketId, hash []byte, writer io.Writer) (entireLength uint64) { var skip int = 0 var limit int = readerBlockSize var entireLengthProcessed bool for { var err error skipEncoded := encoding.UIntToUVarInt(uint64(skip)) limitEncoded := encoding.UIntToUVarInt(uint64(limit)) //hash/[HASH]/content/VUI(skip_optional)/VUI(limit_optional) key := typing.Key{[]byte("hash"), hash, []byte("content"), skipEncoded, limitEncoded} value := operations.Get(t, bucketId, key, true) // value = [data, CBOR(entire_length)] if len(value) != 2 { t.Errorf("Got invalid get from bucket / expecting 2 elements in value. Have %v", len(value)) return } data := value[0] // Set entire length if !entireLengthProcessed { entireLengthEncoded := value[1] err = encoding.Cbor().Decode(entireLengthEncoded, &entireLength) entireLengthProcessed = true if err != nil { t.Errorf("Error decoding entire length %v", err) return } } _, err = writer.Write(data) if err != nil { t.Errorf("Unable to write to writer: %v", err) return } skip += readerBlockSize // Next one? End if we got less than requested or would exceed entire length if uint64(len(data)) < readerBlockSize || uint64(skip) >= entireLength { // No, end here return } } return }
// Uploads something and then downloads func testUploadDownload(t btesting.T, length int) { // First create a blon bucket var bucketId typing.BucketId operations.CreateBucket(t, typing.TypeId_BlobStore, &bucketId) if t.Failed() { return } // Create a buffer and fill that with random data buf := make([]byte, length) rand.Reader.Read(buf) reader := bytes.NewReader(buf) hash := Upload(t, bucketId, reader) if t.Failed() { return } // Now download writer := &bytes.Buffer{} resultingLength := Download(t, bucketId, hash, writer) if t.Failed() { return } // See if the length is the same as the upload length if resultingLength != uint64(length) { t.Errorf("Got different lengths, expecting %v but have %v", length, resultingLength) return } // See if the length is the same as the upload length if len(writer.Bytes()) != length { t.Errorf("Got different lengths, expecting %v but have %v in the buffer", length, len(writer.Bytes())) return } // Now compare the two buffers if bytes.Compare(buf, writer.Bytes()) != 0 { t.Errorf("Downloaded different data than the data just uploaded") return } }
func Upload(t btesting.T, bucketId typing.BucketId, reader io.Reader) (hash []byte) { demoId := []byte("demoId") for { buf := make([]byte, chunkSize) numRead, err := reader.Read(buf) if err != nil && err != io.EOF { t.Errorf("Error reading: %v", err) return } if numRead > 0 { buf = buf[:numRead] //##PUT: keys = incubation/[ID]/append, values = [data] operations.Put(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("append")}, typing.Value{buf}) if t.Failed() { return } } if numRead != chunkSize { // End break } } // Get the hash // ##GET: keys = incubation/[ID]/sum, values = [CBOR(hash), CBOR(length)] value := operations.Get(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("sum")}, true) if t.Failed() { return } if len(value) != 2 { t.Errorf("Expecting 2 value entries (cbor(hash) and cbor(length))") return } err := encoding.Cbor().Decode(value[0], &hash) if err != nil { t.Errorf("Error getting the hash: %v", err) return } var length uint64 err = encoding.Cbor().Decode(value[1], &length) if err != nil { t.Errorf("Error getting the length: %v", err) return } // Ok, commit //##PUT: keys = incubation/[ID]/finish, values = [] operations.Put(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("finish")}, typing.Value{}) if t.Failed() { return } return }
// One of the interesting features of buran is that you can connect two buckets. // In this case we use the 'put forwarding' mechanism to connect two buckets. func testPutForwardingWithTwoStorages(t btesting.T) { var storageOneBucketId typing.BucketId var storageTwoBucketId typing.BucketId // Create two buckets // Bucket "one" CreateBucket(t, typing.TypeId_Store, &storageOneBucketId) if t.Failed() { return } // Bucket "two" CreateBucket(t, typing.TypeId_Store, &storageTwoBucketId) if t.Failed() { return } // Now configure bucket "one" to forward the puts to bucket "two" SetPutReceiver(t, storageOneBucketId, storageTwoBucketId) if t.Failed() { return } // Now put something to storage "one" Put(t, storageOneBucketId, typing.KeyFromStringPanic("daKey"), typing.ValueFromInterfacePanic([][]byte{[]byte("daValue")})) if t.Failed() { return } // So, noting new here, of course the puted value is found in bucket one valueFromBucketOne := Get(t, storageOneBucketId, typing.KeyFromStringPanic("daKey"), true) if t.Failed() { return } if len(valueFromBucketOne) != 1 { t.Errorf("Expect one value") return } if bytes.Compare(valueFromBucketOne[0], []byte("daValue")) != 0 { t.Errorf("Value in bucket one is wrong") return } // Now the interesting part: The same value can also be found in bucket two, since // the system forwarded the PUT operation to bucket two. valueFromBucketTwo := Get(t, storageOneBucketId, typing.KeyFromStringPanic("daKey"), true) if t.Failed() { return } if len(valueFromBucketTwo) != 1 { t.Errorf("Expect one value in bucket two too") return } if bytes.Compare(valueFromBucketTwo[0], []byte("daValue")) != 0 { t.Errorf("Value in bucket two is wrong") return } }
func uploadDirectoryAndGet(t btesting.T) { // First create the blob bucket (that's where the directories are stored, and the data too) var blobBucketId typing.BucketId operations.CreateBucket(t, typing.TypeId_BlobStore, &blobBucketId) if t.Failed() { return } // Now create the directory bucket and connect that to the blob bucket var bucketId typing.BucketId var blobBucketIdCBor []byte blobBucketIdCBor, err := encoding.Cbor().Encode([]byte(blobBucketId)) if err != nil { t.Errorf("error cbor encoding: %v", err) return } operations.CreateBucketWithMetadata(t, typing.TypeId_Directory, map[string][]byte{ "const.forwarder.blob": blobBucketIdCBor, }, &bucketId) if t.Failed() { return } // Both buckets now exist and are connected dir, err := createDemoDirectory() if err != nil { t.Errorf("Error creating directory: %v\n", err) } // Now upload the directory to the blob storage dirReader := bytes.NewReader(dir) hash := blob.Upload(t, blobBucketId, dirReader) if t.Failed() { return } // The directory is now in the blob storage - the directory bucket can now index it cborHash, err := encoding.Cbor().Encode(hash) operations.Put(t, bucketId, typing.Key{[]byte("index")}, typing.Value{cborHash}) if t.Failed() { return } // Get some files operations.Get(t, bucketId, typing.Key{hash, []byte("file_1.txt")}, true) if t.Failed() { return } operations.Get(t, bucketId, typing.Key{hash, []byte("another_file.jpeg")}, true) if t.Failed() { return } // Now some files that do not exist operations.Get(t, bucketId, typing.Key{hash, []byte("<UNKNOWN_FILE>.exe")}, false) if t.Failed() { return } operations.Get(t, bucketId, typing.Key{hash, []byte("no_is_not_in_directory.com")}, false) if t.Failed() { return } }
func ScanMatch(t btesting.T, bucketId typing.BucketId, settings *ScanSettings, scanEntries []ScanEntry) { var resultsSlice []interface{} t.Request(btesting.Request{ Input: btesting.Object{ "Operation": "Scan", "Data": btesting.Object{ "BucketId": writer.BucketId(bucketId), "FromKey": settings.FromKey, "FromExclusive": settings.FromExclusive, "ToKey": settings.ToKeyOptional, "ToExclusive": settings.ToExclusive, "Reverse": settings.Reverse, "Limit": settings.Limit, "Skip": settings.Skip, }, }, Expecting: btesting.Object{ "Code": eval.RetcodeOk(), "Data": btesting.Object{ "HasMore": settings.ExpectToHaveMore, "Results": eval.IsAnySlice(&resultsSlice), }, }, }) if t.Failed() { return } // Test if all values can be found var matched bool var collectedErrorMsgs []string for _, entry := range scanEntries { collectedErrorMsgs = nil matched = false for _, actual := range resultsSlice { actualObj, ok := actual.(map[string]interface{}) if !ok { t.Errorf("Got something that's no a object in the results, it's %T.", actual) return } keyInterface := actualObj["Key"] t.Evaluator().Evaluate(entry.Key, keyInterface) if !t.Failed() { // Ok, we have a match matched = true } else { collectedErrorMsgs = append(collectedErrorMsgs, t.FailedMessage()) // Clear failure and try next t.ClearFailure() } } if matched != entry.Match { // Error t.Errorf("Entry with key %v expected to match? %v, actually matched? %v. "+ "All results: %v. Collected errors: %v", entry.Key, entry.Match, matched, resultsSlice, collectedErrorMsgs) return } } }