// TestBasicRW tests basic read-write func TestBasicRW(t *testing.T, db statedb.VersionedDB) { db.Open() defer db.Close() val, err := db.GetState("ns", "key1") testutil.AssertNoError(t, err, "") testutil.AssertNil(t, val) batch := statedb.NewUpdateBatch() vv1 := statedb.VersionedValue{Value: []byte("value1"), Version: version.NewHeight(1, 1)} vv2 := statedb.VersionedValue{Value: []byte("value2"), Version: version.NewHeight(1, 2)} vv3 := statedb.VersionedValue{Value: []byte("value3"), Version: version.NewHeight(1, 3)} vv4 := statedb.VersionedValue{Value: []byte{}, Version: version.NewHeight(1, 4)} batch.Put("ns1", "key1", vv1.Value, vv1.Version) batch.Put("ns1", "key2", vv2.Value, vv2.Version) batch.Put("ns2", "key3", vv3.Value, vv3.Version) batch.Put("ns2", "key4", vv4.Value, vv4.Version) savePoint := version.NewHeight(2, 5) db.ApplyUpdates(batch, savePoint) vv, _ := db.GetState("ns1", "key1") testutil.AssertEquals(t, vv, &vv1) vv, _ = db.GetState("ns2", "key4") testutil.AssertEquals(t, vv, &vv4) sp, err := db.GetLatestSavePoint() testutil.AssertNoError(t, err, "") testutil.AssertEquals(t, sp, savePoint) }
func TestTxRWSetMarshalUnmarshal(t *testing.T) { txRW := &TxReadWriteSet{} nsRW1 := &NsReadWriteSet{"ns1", []*KVRead{&KVRead{"key1", version.NewHeight(1, 1)}}, []*KVWrite{&KVWrite{"key2", false, []byte("value2")}}} nsRW2 := &NsReadWriteSet{"ns2", []*KVRead{&KVRead{"key3", version.NewHeight(1, 2)}}, []*KVWrite{&KVWrite{"key4", true, nil}}} nsRW3 := &NsReadWriteSet{"ns3", []*KVRead{&KVRead{"key5", version.NewHeight(1, 3)}}, []*KVWrite{&KVWrite{"key6", false, []byte("value6")}, &KVWrite{"key7", false, []byte("value7")}}} txRW.NsRWs = append(txRW.NsRWs, nsRW1, nsRW2, nsRW3) b, err := txRW.Marshal() testutil.AssertNoError(t, err, "Error while marshalling changeset") deserializedRWSet := &TxReadWriteSet{} err = deserializedRWSet.Unmarshal(b) testutil.AssertNoError(t, err, "Error while unmarshalling changeset") t.Logf("Unmarshalled changeset = %#+v", deserializedRWSet.NsRWs[0].Writes[0].IsDelete) testutil.AssertEquals(t, deserializedRWSet, txRW) }
// TestIterator tests the iterator func TestIterator(t *testing.T, db statedb.VersionedDB) { db.Open() defer db.Close() batch := statedb.NewUpdateBatch() batch.Put("ns1", "key1", []byte("value1"), version.NewHeight(1, 1)) batch.Put("ns1", "key2", []byte("value2"), version.NewHeight(1, 2)) batch.Put("ns1", "key3", []byte("value3"), version.NewHeight(1, 3)) batch.Put("ns1", "key4", []byte("value4"), version.NewHeight(1, 4)) batch.Put("ns2", "key5", []byte("value5"), version.NewHeight(1, 5)) batch.Put("ns2", "key6", []byte("value6"), version.NewHeight(1, 6)) batch.Put("ns3", "key7", []byte("value7"), version.NewHeight(1, 7)) savePoint := version.NewHeight(2, 5) db.ApplyUpdates(batch, savePoint) itr1, _ := db.GetStateRangeScanIterator("ns1", "key1", "") testItr(t, itr1, []string{"key1", "key2", "key3", "key4"}) itr2, _ := db.GetStateRangeScanIterator("ns1", "key2", "key3") testItr(t, itr2, []string{"key2"}) itr3, _ := db.GetStateRangeScanIterator("ns1", "", "") testItr(t, itr3, []string{"key1", "key2", "key3", "key4"}) itr4, _ := db.GetStateRangeScanIterator("ns2", "", "") testItr(t, itr4, []string{"key5", "key6"}) }
// TestDeletes tests deteles func TestDeletes(t *testing.T, db statedb.VersionedDB) { db.Open() defer db.Close() batch := statedb.NewUpdateBatch() vv1 := statedb.VersionedValue{Value: []byte("value1"), Version: version.NewHeight(1, 1)} vv2 := statedb.VersionedValue{Value: []byte("value2"), Version: version.NewHeight(1, 2)} vv3 := statedb.VersionedValue{Value: []byte("value1"), Version: version.NewHeight(1, 3)} vv4 := statedb.VersionedValue{Value: []byte("value2"), Version: version.NewHeight(1, 4)} batch.Put("ns", "key1", vv1.Value, vv1.Version) batch.Put("ns", "key2", vv2.Value, vv2.Version) batch.Put("ns", "key3", vv2.Value, vv3.Version) batch.Put("ns", "key4", vv2.Value, vv4.Version) batch.Delete("ns", "key3", version.NewHeight(1, 5)) savePoint := version.NewHeight(1, 5) err := db.ApplyUpdates(batch, savePoint) testutil.AssertNoError(t, err, "") vv, _ := db.GetState("ns", "key2") testutil.AssertEquals(t, vv, &vv2) vv, err = db.GetState("ns", "key3") testutil.AssertNoError(t, err, "") testutil.AssertNil(t, vv) batch = statedb.NewUpdateBatch() batch.Delete("ns", "key2", version.NewHeight(1, 6)) err = db.ApplyUpdates(batch, savePoint) testutil.AssertNoError(t, err, "") vv, err = db.GetState("ns", "key2") testutil.AssertNoError(t, err, "") testutil.AssertNil(t, vv) }
// ValidateAndPrepareBatch implements method in Validator interface func (v *Validator) ValidateAndPrepareBatch(block *common.Block, doMVCCValidation bool) (*statedb.UpdateBatch, error) { logger.Debugf("New block arrived for validation:%#v, doMVCCValidation=%t", block, doMVCCValidation) var valid bool updates := statedb.NewUpdateBatch() logger.Debugf("Validating a block with [%d] transactions", len(block.Data.Data)) txsFilter := util.NewFilterBitArrayFromBytes(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]) for txIndex, envBytes := range block.Data.Data { if txsFilter.IsSet(uint(txIndex)) { // Skiping invalid transaction logger.Debug("Skipping transaction marked as invalid, txIndex=", txIndex) continue } // extract actions from the envelope message respPayload, err := putils.GetActionFromEnvelope(envBytes) if err != nil { return nil, err } //preparation for extracting RWSet from transaction txRWSet := &rwset.TxReadWriteSet{} // Get the Result from the Action // and then Unmarshal it into a TxReadWriteSet using custom unmarshalling if err = txRWSet.Unmarshal(respPayload.Results); err != nil { return nil, err } // trace the first 2000 characters of RWSet only, in case it is huge if logger.IsEnabledFor(logging.DEBUG) { txRWSetString := txRWSet.String() if len(txRWSetString) < 2000 { logger.Debugf("validating txRWSet:[%s]", txRWSetString) } else { logger.Debugf("validating txRWSet:[%s...]", txRWSetString[0:2000]) } } if !doMVCCValidation { valid = true } else if valid, err = v.validateTx(txRWSet, updates); err != nil { return nil, err } if valid { committingTxHeight := version.NewHeight(block.Header.Number, uint64(txIndex+1)) addWriteSetToBatch(txRWSet, committingTxHeight, updates) } else { // Unset bit in byte array corresponded to the invalid transaction txsFilter.Set(uint(txIndex)) } } block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txsFilter.ToBytes() return updates, nil }
func testTxSimulatorWithExistingData(t *testing.T, env testEnv) { txMgr := env.getTxMgr() txMgrHelper := newTxMgrTestHelper(t, txMgr) // simulate tx1 s1, _ := txMgr.NewTxSimulator() s1.SetState("ns1", "key1", []byte("value1")) s1.SetState("ns1", "key2", []byte("value2")) s1.SetState("ns2", "key3", []byte("value3")) s1.SetState("ns2", "key4", []byte("value4")) s1.Done() // validate and commit RWset txRWSet1, _ := s1.GetTxSimulationResults() txMgrHelper.validateAndCommitRWSet(txRWSet1) // simulate tx2 that make changes to existing data s2, _ := txMgr.NewTxSimulator() value, _ := s2.GetState("ns1", "key1") testutil.AssertEquals(t, value, []byte("value1")) s2.SetState("ns1", "key1", []byte("value1_1")) s2.DeleteState("ns2", "key3") value, _ = s2.GetState("ns1", "key1") testutil.AssertEquals(t, value, []byte("value1_1")) s2.Done() // validate and commit RWset for tx2 txRWSet2, _ := s2.GetTxSimulationResults() txMgrHelper.validateAndCommitRWSet(txRWSet2) // simulate tx3 s3, _ := txMgr.NewTxSimulator() value, _ = s3.GetState("ns1", "key1") testutil.AssertEquals(t, value, []byte("value1_1")) value, _ = s3.GetState("ns2", "key3") testutil.AssertEquals(t, value, nil) s3.Done() // verify the versions of keys in persistence vv, _ := env.getVDB().GetState("ns1", "key1") testutil.AssertEquals(t, vv.Version, version.NewHeight(2, 1)) vv, _ = env.getVDB().GetState("ns1", "key2") testutil.AssertEquals(t, vv.Version, version.NewHeight(1, 1)) }
func TestValidator(t *testing.T) { testDBEnv := stateleveldb.NewTestVDBEnv(t, testDBPath) defer testDBEnv.Cleanup() db := testDBEnv.DB //populate db with initial data batch := statedb.NewUpdateBatch() batch.Put("ns1", "key1", []byte("value1"), version.NewHeight(1, 1)) batch.Put("ns1", "key2", []byte("value2"), version.NewHeight(1, 2)) batch.Put("ns1", "key3", []byte("value3"), version.NewHeight(1, 3)) batch.Put("ns1", "key4", []byte("value4"), version.NewHeight(1, 4)) batch.Put("ns1", "key5", []byte("value5"), version.NewHeight(1, 5)) db.ApplyUpdates(batch, version.NewHeight(1, 5)) validator := NewValidator(db) //rwset1 should be valid rwset1 := rwset.NewRWSet() rwset1.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) rwset1.AddToReadSet("ns2", "key2", nil) checkValidation(t, validator, []*rwset.RWSet{rwset1}, []int{}) //rwset2 should not be valid rwset2 := rwset.NewRWSet() rwset2.AddToReadSet("ns1", "key1", version.NewHeight(1, 2)) checkValidation(t, validator, []*rwset.RWSet{rwset2}, []int{0}) //rwset3 should not be valid rwset3 := rwset.NewRWSet() rwset3.AddToReadSet("ns1", "key1", nil) checkValidation(t, validator, []*rwset.RWSet{rwset3}, []int{0}) // rwset4 and rwset5 within same block - rwset4 should be valid and makes rwset5 as invalid rwset4 := rwset.NewRWSet() rwset4.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) rwset4.AddToWriteSet("ns1", "key1", []byte("value1_new")) rwset5 := rwset.NewRWSet() rwset5.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) checkValidation(t, validator, []*rwset.RWSet{rwset4, rwset5}, []int{1}) }
func (scanner *queryScanner) next() (*queryRecord, error) { scanner.cursor++ if scanner.cursor >= len(scanner.results) { return nil, nil } selectedValue := scanner.results[scanner.cursor] namespace, key := splitCompositeKey([]byte(selectedValue.ID)) //TODO - change hardcoded version when version support is available in CouchDB return &queryRecord{namespace, key, version.NewHeight(1, 1), selectedValue.Value}, nil }
func (scanner *kvScanner) next() (*committedKV, error) { scanner.cursor++ if scanner.cursor >= len(scanner.results) { return nil, nil } selectedValue := scanner.results[scanner.cursor] _, key := splitCompositeKey([]byte(selectedValue.ID)) //TODO - change hardcoded version when version support is available in CouchDB return &committedKV{key, version.NewHeight(1, 1), selectedValue.Value}, nil }
// Commit implements method in interface `txmgmt.TxMgr` func (txmgr *LockBasedTxMgr) Commit() error { logger.Debugf("Commiting block") txmgr.commitRWLock.Lock() defer txmgr.commitRWLock.Unlock() logger.Debugf("Write lock aquired for commiting block") if txmgr.batch == nil { panic("validateAndPrepare() method should have been called before calling commit()") } defer func() { txmgr.batch = nil }() if err := txmgr.db.ApplyUpdates(txmgr.batch, version.NewHeight(txmgr.currentBlock.Header.Number, uint64(len(txmgr.currentBlock.Data.Data)))); err != nil { return err } logger.Debugf("Block committed") return nil }
func (txmgr *CouchDBTxMgr) getCommittedValueAndVersion(ns string, key string) ([]byte, *version.Height, error) { compositeKey := constructCompositeKey(ns, key) docBytes, _, _ := txmgr.couchDB.ReadDoc(string(compositeKey)) // TODO add error handling // trace the first 200 bytes of value only, in case it is huge if docBytes != nil && logger.IsEnabledFor(logging.DEBUG) { if len(docBytes) < 200 { logger.Debugf("===COUCHDB=== getCommittedValueAndVersion() Read docBytes %s", docBytes) } else { logger.Debugf("===COUCHDB=== getCommittedValueAndVersion() Read docBytes %s...", docBytes[0:200]) } } ver := version.NewHeight(1, 1) //TODO - version hardcoded to 1 is a temporary value for the prototype return docBytes, ver, nil }
//QueryDocuments method provides function for processing a query func (dbclient *CouchDBConnectionDef) QueryDocuments(query string, limit, skip int) (*[]QueryResult, error) { logger.Debugf("===COUCHDB=== Entering QueryDocuments() query=%s", query) var results []QueryResult queryURL, err := url.Parse(dbclient.URL) if err != nil { logger.Errorf("===COUCHDB=== URL parse error: %s", err.Error()) return nil, err } queryURL.Path = dbclient.Database + "/_find" queryParms := queryURL.Query() queryParms.Set("limit", strconv.Itoa(limit)) queryParms.Add("skip", strconv.Itoa(skip)) queryURL.RawQuery = queryParms.Encode() //Set up a buffer for the data to be pushed to couchdb data := new(bytes.Buffer) data.ReadFrom(bytes.NewReader([]byte(query))) resp, _, err := dbclient.handleRequest(http.MethodPost, queryURL.String(), data, "", "") if err != nil { return nil, err } defer resp.Body.Close() if logger.IsEnabledFor(logging.DEBUG) { dump, err2 := httputil.DumpResponse(resp, true) if err2 != nil { log.Fatal(err2) } logger.Debugf("%s", dump) } //handle as JSON document jsonResponseRaw, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var jsonResponse = &QueryResponse{} err2 := json.Unmarshal(jsonResponseRaw, &jsonResponse) if err2 != nil { return nil, err2 } for _, row := range jsonResponse.Docs { var jsonDoc = &DocID{} err3 := json.Unmarshal(row, &jsonDoc) if err3 != nil { return nil, err3 } logger.Debugf("===COUCHDB=== Adding row to resultset: %s", row) //TODO Replace the temporary NewHeight version when available var addDocument = &QueryResult{jsonDoc.ID, version.NewHeight(1, 1), row} results = append(results, *addDocument) } logger.Debugf("===COUCHDB=== Exiting QueryDocuments()") return &results, nil }
//ReadDocRange method provides function to a range of documents based on the start and end keys //startKey and endKey can also be empty strings. If startKey and endKey are empty, all documents are returned //TODO This function provides a limit option to specify the max number of entries. This will //need to be added to configuration options. Skip will not be used by Fabric since a consistent //result set is required func (dbclient *CouchDBConnectionDef) ReadDocRange(startKey, endKey string, limit, skip int) (*[]QueryResult, error) { logger.Debugf("===COUCHDB=== Entering ReadDocRange() startKey=%s, endKey=%s", startKey, endKey) var results []QueryResult rangeURL, err := url.Parse(dbclient.URL) if err != nil { logger.Errorf("===COUCHDB=== URL parse error: %s", err.Error()) return nil, err } rangeURL.Path = dbclient.Database + "/_all_docs" queryParms := rangeURL.Query() queryParms.Set("limit", strconv.Itoa(limit)) queryParms.Add("skip", strconv.Itoa(skip)) queryParms.Add("include_docs", "true") //Append the startKey if provided if startKey != "" { queryParms.Add("startkey", strings.Replace(strconv.QuoteToGraphic(startKey), "\\x00", "\\u0000", 1)) } //Append the endKey if provided if endKey != "" { queryParms.Add("endkey", strings.Replace(strconv.QuoteToGraphic(endKey), "\\x00", "\\u0000", 1)) } rangeURL.RawQuery = queryParms.Encode() resp, _, err := dbclient.handleRequest(http.MethodGet, rangeURL.String(), nil, "", "") if err != nil { return nil, err } defer resp.Body.Close() if logger.IsEnabledFor(logging.DEBUG) { dump, err2 := httputil.DumpResponse(resp, true) if err2 != nil { log.Fatal(err2) } logger.Debugf("%s", dump) } //handle as JSON document jsonResponseRaw, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var jsonResponse = &RangeQueryResponse{} err2 := json.Unmarshal(jsonResponseRaw, &jsonResponse) if err2 != nil { return nil, err2 } logger.Debugf("Total Rows: %d", jsonResponse.TotalRows) for _, row := range jsonResponse.Rows { var jsonDoc = &Doc{} err3 := json.Unmarshal(row.Doc, &jsonDoc) if err3 != nil { return nil, err3 } if jsonDoc.Attachments != nil { logger.Debugf("===COUCHDB=== Adding binary docment for id: %s", jsonDoc.ID) binaryDocument, _, err := dbclient.ReadDoc(jsonDoc.ID) if err != nil { return nil, err } var addDocument = &QueryResult{jsonDoc.ID, version.NewHeight(1, 1), binaryDocument} results = append(results, *addDocument) } else { logger.Debugf("===COUCHDB=== Adding json docment for id: %s", jsonDoc.ID) var addDocument = &QueryResult{jsonDoc.ID, version.NewHeight(1, 1), row.Doc} results = append(results, *addDocument) } } logger.Debugf("===COUCHDB=== Exiting ReadDocRange()") return &results, nil }
// ValidateAndPrepare implements method in interface `txmgmt.TxMgr` func (txmgr *CouchDBTxMgr) ValidateAndPrepare(block *common.Block, doMVCCValidation bool) error { if doMVCCValidation == true { logger.Debugf("===COUCHDB=== Entering CouchDBTxMgr.ValidateAndPrepare()") logger.Debugf("Validating a block with [%d] transactions", len(block.Data.Data)) } else { logger.Debugf("New block arrived for write set computation:%#v", block) logger.Debugf("Computing write set for a block with [%d] transactions", len(block.Data.Data)) } var valid bool txmgr.updateSet = newUpdateSet() txmgr.blockNum = block.Header.Number txsFilter := util.NewFilterBitArrayFromBytes(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]) for txIndex, envBytes := range block.Data.Data { if txsFilter.IsSet(uint(txIndex)) { // Skiping invalid transaction logger.Debug("Skipping transaction marked as invalid, txIndex=", txIndex) continue } // extract actions from the envelope message respPayload, err := putils.GetActionFromEnvelope(envBytes) if err != nil { return err } //preparation for extracting RWSet from transaction txRWSet := &rwset.TxReadWriteSet{} // Get the Result from the Action // and then Unmarshal it into a TxReadWriteSet using custom unmarshalling if err = txRWSet.Unmarshal(respPayload.Results); err != nil { return err } // trace the first 2000 characters of RWSet only, in case it is huge if logger.IsEnabledFor(logging.DEBUG) { txRWSetString := txRWSet.String() operation := "validating" if doMVCCValidation == false { operation = "computing write set from" } if len(txRWSetString) < 2000 { logger.Debugf(operation+" txRWSet:[%s]", txRWSetString) } else { logger.Debugf(operation+" txRWSet:[%s...]", txRWSetString[0:2000]) } } if doMVCCValidation == true { if valid, err = txmgr.validateTx(txRWSet); err != nil { return err } } else { valid = true } if valid { if err := txmgr.addWriteSetToBatch(txRWSet, version.NewHeight(block.Header.Number, uint64(txIndex+1))); err != nil { return err } } else { // Unset bit in byte array corresponded to the invalid transaction txsFilter.Set(uint(txIndex)) } } block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txsFilter.ToBytes() logger.Debugf("===COUCHDB=== Exiting CouchDBTxMgr.ValidateAndPrepare()") return nil }
func TestDatabaseQuery(t *testing.T) { //call a helper method to load the core.yaml testutil.SetupCoreYAMLConfig("./../../../../../peer") //Only run the tests if CouchDB is explitily enabled in the code, //otherwise CouchDB may not be installed and all the tests would fail //TODO replace this with external config property rather than config within the code if ledgerconfig.IsCouchDBEnabled() == true { env := newTestEnv(t) //env.Cleanup() //cleanup at the beginning to ensure the database doesn't exist already //defer env.Cleanup() //and cleanup at the end txMgr := NewCouchDBTxMgr(env.conf, env.couchDBAddress, //couchDB Address env.couchDatabaseName, //couchDB db name env.couchUsername, //enter couchDB id env.couchPassword) //enter couchDB pw type Asset struct { ID string `json:"_id"` Rev string `json:"_rev"` AssetName string `json:"asset_name"` Color string `json:"color"` Size string `json:"size"` Owner string `json:"owner"` } s1, _ := txMgr.NewTxSimulator() s1.SetState("ns1", "key1", []byte("value1")) s1.SetState("ns1", "key2", []byte("value2")) s1.SetState("ns1", "key3", []byte("value3")) s1.SetState("ns1", "key4", []byte("value4")) s1.SetState("ns1", "key5", []byte("value5")) s1.SetState("ns1", "key6", []byte("value6")) s1.SetState("ns1", "key7", []byte("value7")) s1.SetState("ns1", "key8", []byte("value8")) s1.SetState("ns1", "key9", []byte(`{"asset_name":"marble1","color":"red","size":"25","owner":"jerry"}`)) s1.SetState("ns1", "key10", []byte(`{"asset_name":"marble2","color":"blue","size":"10","owner":"bob"}`)) s1.SetState("ns1", "key11", []byte(`{"asset_name":"marble3","color":"blue","size":"35","owner":"jerry"}`)) s1.SetState("ns1", "key12", []byte(`{"asset_name":"marble4","color":"green","size":"15","owner":"bob"}`)) s1.SetState("ns1", "key13", []byte(`{"asset_name":"marble5","color":"red","size":"35","owner":"jerry"}`)) s1.SetState("ns1", "key14", []byte(`{"asset_name":"marble6","color":"blue","size":"25","owner":"bob"}`)) s1.Done() // validate and commit RWset txRWSet := s1.(*CouchDBTxSimulator).getTxReadWriteSet() isValid, err := txMgr.validateTx(txRWSet) testutil.AssertNoError(t, err, fmt.Sprintf("Error in validateTx(): %s", err)) testutil.AssertSame(t, isValid, true) txMgr.addWriteSetToBatch(txRWSet, version.NewHeight(1, 1)) err = txMgr.Commit() testutil.AssertNoError(t, err, fmt.Sprintf("Error while calling commit(): %s", err)) queryExecuter, _ := txMgr.NewQueryExecutor() queryString := "{\"selector\":{\"owner\": {\"$eq\": \"bob\"}},\"limit\": 10,\"skip\": 0}" itr, _ := queryExecuter.ExecuteQuery(queryString) counter := 0 for { queryRecord, _ := itr.Next() if queryRecord == nil { break } //Unmarshal the document to Asset structure assetResp := &Asset{} json.Unmarshal(queryRecord.(*ledger.QueryRecord).Record, &assetResp) //Verify the owner retrieved matches testutil.AssertEquals(t, assetResp.Owner, "bob") counter++ } //Ensure the query returns 3 documents testutil.AssertEquals(t, counter, 3) txMgr.Shutdown() } }
func TestEncodeDecodeValueAndVersion(t *testing.T) { testValueAndVersionEncodeing(t, []byte("value1"), version.NewHeight(1, 2)) testValueAndVersionEncodeing(t, []byte{}, version.NewHeight(50, 50)) }