func (sc *systemChain) authorize(configEnvelope *cb.ConfigurationEnvelope) cb.Status { creationConfigItem := &cb.ConfigurationItem{} err := proto.Unmarshal(configEnvelope.Items[0].ConfigurationItem, creationConfigItem) if err != nil { logger.Debugf("Failing to validate chain creation because of unmarshaling error: %s", err) return cb.Status_BAD_REQUEST } if creationConfigItem.Key != utils.CreationPolicyKey { logger.Debugf("Failing to validate chain creation because first configuration item was not the CreationPolicy") return cb.Status_BAD_REQUEST } creationPolicy := &ab.CreationPolicy{} err = proto.Unmarshal(creationConfigItem.Value, creationPolicy) if err != nil { logger.Debugf("Failing to validate chain creation because first configuration item could not unmarshal to a CreationPolicy: %s", err) return cb.Status_BAD_REQUEST } ok := false for _, chainCreatorPolicy := range sc.support.SharedConfig().ChainCreators() { if chainCreatorPolicy == creationPolicy.Policy { ok = true break } } if !ok { logger.Debugf("Failed to validate chain creation because chain creation policy is not authorized for chain creation") return cb.Status_FORBIDDEN } policy, ok := sc.support.PolicyManager().GetPolicy(creationPolicy.Policy) if !ok { logger.Debugf("Failed to get policy for chain creation despite it being listed as an authorized policy") return cb.Status_INTERNAL_SERVER_ERROR } // XXX actually do policy signature validation _ = policy var remainingBytes []byte for i, item := range configEnvelope.Items { if i == 0 { // Do not include the creation policy continue } remainingBytes = append(remainingBytes, item.ConfigurationItem...) } configHash := util.ComputeCryptoHash(remainingBytes) if !bytes.Equal(configHash, creationPolicy.Digest) { logger.Debugf("Validly signed chain creation did not contain correct digest for remaining configuration %x vs. %x", configHash, creationPolicy.Digest) return cb.Status_BAD_REQUEST } return cb.Status_SUCCESS }
// TestHashContentChange changes a random byte in a content and checks for hash change func TestHashContentChange(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) b2 := []byte("To be, or not to be- that is the question: Whether 'tis nobler in the mind to suffer The slings and arrows of outrageous fortune Or to take arms against a sea of troubles, And by opposing end them. To die- to sleep- No more; and by a sleep to say we end The heartache, and the thousand natural shocks That flesh is heir to. 'Tis a consummation Devoutly to be wish'd.") h1 := computeHash(b2, hash) r := rand.New(rand.NewSource(time.Now().UnixNano())) randIndex := (int(r.Uint32())) % len(b2) randByte := byte((int(r.Uint32())) % 128) //make sure the two bytes are different for { if randByte != b2[randIndex] { break } randByte = byte((int(r.Uint32())) % 128) } //change a random byte b2[randIndex] = randByte //this is the core hash func under test h2 := computeHash(b2, hash) //the two hashes should be different if bytes.Compare(h1, h2) == 0 { t.Fail() t.Logf("Hash expected to be different but is same") } }
func (trieNode *trieNode) computeCryptoHash() []byte { stateTrieLogger.Debug("Enter computeCryptoHash() for trieNode [%s]", trieNode) var cryptoHashContent []byte if trieNode.containsValue() { stateTrieLogger.Debug("Adding value to hash computation for trieNode [%s]", trieNode) key := trieNode.trieKey.getEncodedBytes() cryptoHashContent = append(cryptoHashContent, proto.EncodeVarint(uint64(len(key)))...) cryptoHashContent = append(cryptoHashContent, key...) cryptoHashContent = append(cryptoHashContent, trieNode.value...) } sortedChildrenIndexes := trieNode.getSortedChildrenIndex() for _, index := range sortedChildrenIndexes { childCryptoHash := trieNode.childrenCryptoHashes[index] stateTrieLogger.Debug("Adding hash [%#v] for child number [%d] to hash computation for trieNode [%s]", childCryptoHash, index, trieNode) cryptoHashContent = append(cryptoHashContent, childCryptoHash...) } if cryptoHashContent == nil { // node has no associated value and no associated children. stateTrieLogger.Debug("Returning nil as hash for trieNode = [%s]. Also, marking this key for deletion.", trieNode) trieNode.markedForDeletion = true return nil } if !trieNode.containsValue() && trieNode.getNumChildren() == 1 { // node has no associated value and has a single child. Propagate the child hash up stateTrieLogger.Debug("Returning hash as of a single child for trieKey = [%s]", trieNode.trieKey) return cryptoHashContent } stateTrieLogger.Debug("Recomputing hash for trieKey = [%s]", trieNode) return util.ComputeCryptoHash(cryptoHashContent) }
func TestGoodProposal(t *testing.T) { newChainID := "NewChainID" mcc := newMockChainCreator() mcc.ms.msc.chainCreators = []string{provisional.AcceptAllPolicyKey} mcc.ms.mpm.mp = &mockPolicy{} chainCreateTx := &cb.ConfigurationItem{ Header: &cb.ChainHeader{ ChainID: newChainID, Type: int32(cb.HeaderType_CONFIGURATION_ITEM), }, Key: utils.CreationPolicyKey, Type: cb.ConfigurationItem_Orderer, Value: utils.MarshalOrPanic(&ab.CreationPolicy{ Policy: provisional.AcceptAllPolicyKey, Digest: coreutil.ComputeCryptoHash([]byte{}), }), } ingressTx := makeConfigTxWithItems(newChainID, chainCreateTx) status := mcc.sysChain.proposeChain(ingressTx) if status != cb.Status_SUCCESS { t.Fatalf("Should have successfully proposed chain") } expected := 1 if len(mcc.ms.queue) != expected { t.Fatalf("Expected %d creation txs in the chain, but found %d", expected, len(mcc.ms.queue)) } wrapped := mcc.ms.queue[0] payload := utils.UnmarshalPayloadOrPanic(wrapped.Payload) if payload.Header.ChainHeader.Type != int32(cb.HeaderType_ORDERER_TRANSACTION) { t.Fatalf("Wrapped transaction should be of type ORDERER_TRANSACTION") } envelope := utils.UnmarshalEnvelopeOrPanic(payload.Data) if !reflect.DeepEqual(envelope, ingressTx) { t.Fatalf("Received different configtx than ingressed into the system") } sysFilter := newSystemChainFilter(mcc) action, committer := sysFilter.Apply(wrapped) if action != filter.Accept { t.Fatalf("Should have accepted the transaction, as it was already validated") } if !committer.Isolated() { t.Fatalf("Chain creation transactions should be isolated on commit") } committer.Commit() if len(mcc.newChains) != 1 { t.Fatalf("Proposal should only have created 1 new chain") } if !reflect.DeepEqual(mcc.newChains[0], ingressTx) { t.Fatalf("New chain should have been created with ingressTx") } }
func TestHashSameRemoteRepo(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) srcPath1, err := getCodeFromHTTP("https://github.com/xspeedcruiser/javachaincodemvn") srcPath2, err := getCodeFromHTTP("https://github.com/xspeedcruiser/javachaincodemvn") if err != nil { t.Logf("Error getting code from remote repo %s", err) t.Fail() } defer func() { os.RemoveAll(srcPath1) }() defer func() { os.RemoveAll(srcPath2) }() hash1, err := hashFilesInDir(srcPath1, srcPath1, hash, nil) if err != nil { t.Logf("Error getting code %s", err) t.Fail() } hash2, err := hashFilesInDir(srcPath2, srcPath2, hash, nil) if err != nil { t.Logf("Error getting code %s", err) t.Fail() } if bytes.Compare(hash1, hash2) != 0 { t.Logf("Hash should be same across multiple downloads") t.Fail() } }
func (b *BlockData) Hash() []byte { data, err := proto.Marshal(b) // XXX this is wrong, protobuf is not the right mechanism to serialize for a hash, AND, it is not a MerkleTree hash if err != nil { panic("This should never fail and is generally irrecoverable") } return util.ComputeCryptoHash(data) }
func (c *bucketHashCalculator) computeCryptoHash() []byte { if c.currentChaincodeID != "" { c.appendCurrentChaincodeData() c.currentChaincodeID = "" c.dataNodes = nil } logger.Debug("Hashable content for bucket [%s]: length=%d, contentInStringForm=[%s]", c.bucketKey, len(c.hashingData), string(c.hashingData)) if util.IsNil(c.hashingData) { return nil } return openchainUtil.ComputeCryptoHash(c.hashingData) }
func expectedCryptoHashForTest(key *trieKey, value []byte, childrenHashes ...[]byte) []byte { expectedHash := []byte{} if key != nil { keyBytes := key.getEncodedBytes() expectedHash = append(expectedHash, proto.EncodeVarint(uint64(len(keyBytes)))...) expectedHash = append(expectedHash, keyBytes...) expectedHash = append(expectedHash, value...) } for _, b := range childrenHashes { expectedHash = append(expectedHash, b...) } return util.ComputeCryptoHash(expectedHash) }
//GetName returns canonical chaincode name based on chain name func (ccid *CCID) GetName() string { if ccid.ChaincodeSpec == nil { panic("nil chaincode spec") } //this better be chainless system chaincode! if ccid.ChainID != "" { hash := util.ComputeCryptoHash([]byte(ccid.ChainID)) hexstr := hex.EncodeToString(hash[:]) return ccid.ChaincodeSpec.ChaincodeID.Name + "-" + hexstr } return ccid.ChaincodeSpec.ChaincodeID.Name }
func hash(msg interface{}) string { var raw []byte switch converted := msg.(type) { case *Request: raw, _ = proto.Marshal(converted) case *RequestBatch: raw, _ = proto.Marshal(converted) default: logger.Error("Asked to hash non-supported message type, ignoring") return "" } return base64.StdEncoding.EncodeToString(util.ComputeCryptoHash(raw)) }
//core hash computation factored out for testing func computeHash(contents []byte, hash []byte) []byte { newSlice := make([]byte, len(hash)+len(contents)) //copy the contents copy(newSlice[0:len(contents)], contents[:]) //add the previous hash copy(newSlice[len(contents):], hash[:]) //compute new hash hash = util.ComputeCryptoHash(newSlice) return hash }
// TestHashOrderChange changes a order of hash computation over a list of lines and checks for hash change func TestHashOrderChange(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) b2 := [][]byte{[]byte("To be, or not to be- that is the question:"), []byte("Whether 'tis nobler in the mind to suffer"), []byte("The slings and arrows of outrageous fortune"), []byte("Or to take arms against a sea of troubles,"), []byte("And by opposing end them."), []byte("To die- to sleep- No more; and by a sleep to say we end"), []byte("The heartache, and the thousand natural shocks"), []byte("That flesh is heir to."), []byte("'Tis a consummation Devoutly to be wish'd.")} h1 := hash for _, l := range b2 { h1 = computeHash(l, h1) } r := rand.New(rand.NewSource(time.Now().UnixNano())) randIndex1 := (int(r.Uint32())) % len(b2) randIndex2 := (int(r.Uint32())) % len(b2) //make sure the two indeces are different for { if randIndex2 != randIndex1 { break } randIndex2 = (int(r.Uint32())) % len(b2) } //switch two arbitrary lines tmp := b2[randIndex2] b2[randIndex2] = b2[randIndex1] b2[randIndex1] = tmp h2 := hash for _, l := range b2 { h2 = computeHash(l, hash) } //hash should be different if bytes.Compare(h1, h2) == 0 { t.Fail() t.Logf("Hash expected to be different but is same") } }
func BenchmarkCryptoHash(b *testing.B) { flags := flag.NewFlagSet("testParams", flag.ExitOnError) numBytesPointer := flags.Int("NumBytes", -1, "Number of Bytes") flags.Parse(testParams) numBytes := *numBytesPointer if numBytes == -1 { b.Fatal("Missing value for parameter NumBytes") } randomBytes := testutil.ConstructRandomBytes(b, numBytes) //b.Logf("byte size=%d, b.N=%d", len(randomBytes), b.N) b.ResetTimer() for i := 0; i < b.N; i++ { util.ComputeCryptoHash(randomBytes) } }
func TestHashOverLocalDir(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) hash, err := hashFilesInDir(".", "../golang/hashtestfiles", hash, nil) if err != nil { t.Fail() t.Logf("error : %s", err) } expectedHash := "7b3b2193bed2bd7c19300aa5d6d7f6bb4d61602e4978a78bc08028379cb5cf0ed877bd9db3e990230e8bf6c974edd765f3027f061fd8657d30fc858a676a6f4a" computedHash := hex.EncodeToString(hash[:]) if expectedHash != computedHash { t.Fail() t.Logf("Hash expected to be unchanged") } }
// GetHash returns the hash of this block. func (block *Block) GetHash() ([]byte, error) { // copy the block and remove the non-hash data blockBytes, err := block.Bytes() if err != nil { return nil, fmt.Errorf("Could not calculate hash of block: %s", err) } blockCopy, err := UnmarshallBlock(blockBytes) if err != nil { return nil, fmt.Errorf("Could not calculate hash of block: %s", err) } blockCopy.NonHashData = nil // Hash the block data, err := proto.Marshal(blockCopy) if err != nil { return nil, fmt.Errorf("Could not calculate hash of block: %s", err) } hash := util.ComputeCryptoHash(data) return hash, nil }
// TestHashOverFiles computes hash over a directory and ensures it matches precomputed, hardcoded, hash func TestHashOverFiles(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) hash, err := hashFilesInDir(".", "hashtestfiles", hash, nil) if err != nil { t.Fail() t.Logf("error : %s", err) } //as long as no files under "hashtestfiles" are changed, hash should always compute to the following expectedHash := "a4fe18bebf3d7e1c030c042903bdda9019b33829d03d9b95ab1edc8957be70dee6d786ab27b207210d29b5d9f88456ff753b8da5c244458cdcca6eb3c28a17ce" computedHash := hex.EncodeToString(hash[:]) if expectedHash != computedHash { t.Fail() t.Logf("Hash expected to be unchanged") } }
//generateHashcode gets hashcode of the code under path. If path is a HTTP(s) url //it downloads the code first to compute the hash. //NOTE: for dev mode, user builds and runs chaincode manually. The name provided //by the user is equivalent to the path. This method will treat the name //as codebytes and compute the hash from it. ie, user cannot run the chaincode //with the same (name, ctor, args) func generateHashcode(spec *pb.ChaincodeSpec, path string) (string, error) { ctor := spec.CtorMsg if ctor == nil || ctor.Function == "" { return "", fmt.Errorf("Cannot generate hashcode from empty ctor") } hash := util.GenerateHashFromSignature(spec.ChaincodeID.Path, ctor.Function, ctor.Args) buf, err := ioutil.ReadFile(path) if err != nil { return "", fmt.Errorf("Error reading file: %s", err) } newSlice := make([]byte, len(hash)+len(buf)) copy(newSlice[len(buf):], hash[:]) //hash = md5.Sum(newSlice) hash = util.ComputeCryptoHash(newSlice) return hex.EncodeToString(hash[:]), nil }
// TestHashLenChange changes a random length of a content and checks for hash change func TestHashLenChange(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) b2 := []byte("To be, or not to be-") h1 := computeHash(b2, hash) r := rand.New(rand.NewSource(time.Now().UnixNano())) randIndex := (int(r.Uint32())) % len(b2) b2 = b2[0:randIndex] h2 := computeHash(b2, hash) //hash should be different if bytes.Compare(h1, h2) == 0 { t.Fail() t.Logf("Hash expected to be different but is same") } }
func (bucketNode *bucketNode) computeCryptoHash() []byte { cryptoHashContent := []byte{} numChildren := 0 for i, childCryptoHash := range bucketNode.childrenCryptoHash { if childCryptoHash != nil { numChildren++ logger.Debugf("Appending crypto-hash for child bucket = [%s]", bucketNode.bucketKey.getChildKey(i)) cryptoHashContent = append(cryptoHashContent, childCryptoHash...) } } if numChildren == 0 { logger.Debugf("Returning <nil> crypto-hash of bucket = [%s] - because, it has not children", bucketNode.bucketKey) bucketNode.markedForDeletion = true return nil } if numChildren == 1 { logger.Debugf("Propagating crypto-hash of single child node for bucket = [%s]", bucketNode.bucketKey) return cryptoHashContent } logger.Debugf("Computing crypto-hash for bucket [%s] by merging [%d] children", bucketNode.bucketKey, numChildren) return openchainUtil.ComputeCryptoHash(cryptoHashContent) }
//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory //Directory entries are traversed recursively. In the end a single //hash value is returned for the entire directory structure func hashFilesInDir(rootDir string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) { currentDir := filepath.Join(rootDir, dir) logger.Debug("hashFiles %s", currentDir) //ReadDir returns sorted list of files in dir fis, err := ioutil.ReadDir(currentDir) if err != nil { return hash, fmt.Errorf("ReadDir failed %s\n", err) } for _, fi := range fis { name := filepath.Join(dir, fi.Name()) if fi.IsDir() { var err error hash, err = hashFilesInDir(rootDir, name, hash, tw) if err != nil { return hash, err } continue } fqp := filepath.Join(rootDir, name) buf, err := ioutil.ReadFile(fqp) if err != nil { fmt.Printf("Error reading %s\n", err) return hash, err } newSlice := make([]byte, len(hash)+len(buf)) copy(newSlice[len(buf):], hash[:]) //hash = md5.Sum(newSlice) hash = util.ComputeCryptoHash(newSlice) if tw != nil { is := bytes.NewReader(buf) if err = cutil.WriteStreamToPackage(is, fqp, filepath.Join("src", name), tw); err != nil { return hash, fmt.Errorf("Error adding file to tar %s", err) } } } return hash, nil }
func TestProposalWithMissingPolicy(t *testing.T) { newChainID := "NewChainID" mcc := newMockChainCreator() mcc.ms.msc.chainCreators = []string{provisional.AcceptAllPolicyKey} chainCreateTx := &cb.ConfigurationItem{ Key: utils.CreationPolicyKey, Type: cb.ConfigurationItem_Orderer, Value: utils.MarshalOrPanic(&ab.CreationPolicy{ Policy: provisional.AcceptAllPolicyKey, Digest: coreutil.ComputeCryptoHash([]byte{}), }), } ingressTx := makeConfigTxWithItems(newChainID, chainCreateTx) status := mcc.sysChain.proposeChain(ingressTx) if status == cb.Status_SUCCESS { t.Fatalf("Should not have validated the transaction with missing policy") } }
// ComputeCryptoHash computes crypto-hash for the data held // returns nil if no data is present func (stateDelta *StateDelta) ComputeCryptoHash() []byte { if stateDelta.IsEmpty() { return nil } var buffer bytes.Buffer sortedChaincodeIds := stateDelta.GetUpdatedChaincodeIds(true) for _, chaincodeID := range sortedChaincodeIds { buffer.WriteString(chaincodeID) chaincodeStateDelta := stateDelta.ChaincodeStateDeltas[chaincodeID] sortedKeys := chaincodeStateDelta.getSortedKeys() for _, key := range sortedKeys { buffer.WriteString(key) updatedValue := chaincodeStateDelta.get(key) if !updatedValue.IsDeleted() { buffer.Write(updatedValue.Value) } } } hashingContent := buffer.Bytes() logger.Debugf("computing hash on %#v", hashingContent) return util.ComputeCryptoHash(hashingContent) }
//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory //Directory entries are traversed recursively. In the end a single //hash value is returned for the entire directory structure func hashFilesInDir(cutoff string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) { //ReadDir returns sorted list of files in dir fis, err := ioutil.ReadDir(dir) if err != nil { return hash, fmt.Errorf("ReadDir failed %s\n", err) } for _, fi := range fis { name := fmt.Sprintf("%s/%s", dir, fi.Name()) if fi.IsDir() { var err error hash, err = hashFilesInDir(cutoff, name, hash, tw) if err != nil { return hash, err } continue } buf, err := ioutil.ReadFile(name) if err != nil { fmt.Printf("Error reading %s\n", err) return hash, err } newSlice := make([]byte, len(hash)+len(buf)) copy(newSlice[len(buf):], hash[:]) //hash = md5.Sum(newSlice) hash = util.ComputeCryptoHash(newSlice) if tw != nil { is := bytes.NewReader(buf) if err = cutil.WriteStreamToPackage(is, name, name[len(cutoff):], tw); err != nil { return hash, fmt.Errorf("Error adding file to tar %s", err) } } } return hash, nil }
func TestHashDiffRemoteRepo(t *testing.T) { b := []byte("firstcontent") hash := util.ComputeCryptoHash(b) // TODO Change reference to an official test repo once established // Same repo being used in behave tests srcPath1, err := getCodeFromHTTP("https://github.com/xspeedcruiser/javachaincodemvn") srcPath2, err := getCodeFromHTTP("https://github.com/xspeedcruiser/javachaincode") if err != nil { t.Logf("Error getting code from remote repo %s", err) t.Fail() } defer func() { os.RemoveAll(srcPath1) }() defer func() { os.RemoveAll(srcPath2) }() hash1, err := hashFilesInDir(srcPath1, srcPath1, hash, nil) if err != nil { t.Logf("Error getting code %s", err) t.Fail() } hash2, err := hashFilesInDir(srcPath2, srcPath2, hash, nil) if err != nil { t.Logf("Error getting code %s", err) t.Fail() } if bytes.Compare(hash1, hash2) == 0 { t.Logf("Hash should be different for 2 different remote repos") t.Fail() } }
// ChainCreationConfiguration creates a new chain creation configuration envelope from // the supplied creationPolicy, new chainID, and a template configuration envelope // The template configuration envelope will have the correct chainID set on all items, // and the first item will be a CreationPolicy which is ready for the signatures as // required by the policy func ChainCreationConfiguration(creationPolicy, newChainID string, template *cb.ConfigurationEnvelope) *cb.ConfigurationEnvelope { newConfigItems := make([]*cb.SignedConfigurationItem, len(template.Items)) var hashBytes []byte for i, item := range template.Items { configItem := UnmarshalConfigurationItemOrPanic(item.ConfigurationItem) configItem.Header.ChainID = newChainID newConfigItems[i] = &cb.SignedConfigurationItem{ ConfigurationItem: MarshalOrPanic(configItem), } hashBytes = append(hashBytes, newConfigItems[i].ConfigurationItem...) } digest := cu.ComputeCryptoHash(hashBytes) authorizeItem := &cb.SignedConfigurationItem{ ConfigurationItem: MarshalOrPanic(&cb.ConfigurationItem{ Header: &cb.ChainHeader{ ChainID: newChainID, Type: int32(cb.HeaderType_CONFIGURATION_ITEM), }, Type: cb.ConfigurationItem_Orderer, Key: CreationPolicyKey, Value: MarshalOrPanic(&ab.CreationPolicy{ Policy: creationPolicy, Digest: digest, }), }), } authorizedConfig := append([]*cb.SignedConfigurationItem{authorizeItem}, newConfigItems...) return &cb.ConfigurationEnvelope{ Items: authorizedConfig, } }
func hashReq(req *Request) string { raw, _ := proto.Marshal(req) return base64.StdEncoding.EncodeToString(util.ComputeCryptoHash(raw)) }
Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil()) ledgerPtr.TxFinished("txUuid2", true) }) It("creates, but does not populate and finishes a transaction", func() { ledgerPtr.TxBegin("txUuid3") ledgerPtr.TxFinished("txUuid3", true) }) It("creates, populates and finishes a transaction", func() { ledgerPtr.TxBegin("txUuid4") Expect(ledgerPtr.SetState("chaincode4", "key4", []byte("value4"))).To(BeNil()) ledgerPtr.TxFinished("txUuid4", false) }) It("should retrieve the delta state hash array containing expected values", func() { _, txDeltaHashes, err := ledgerPtr.GetTempStateHashWithTxDeltaStateHashes() Expect(err).To(BeNil()) Expect(util.ComputeCryptoHash(appendAll([]byte("chaincode1key1value1")))).To(Equal(txDeltaHashes["txUuid1"])) Expect(util.ComputeCryptoHash(appendAll([]byte("chaincode2key2value2")))).To(Equal(txDeltaHashes["txUuid2"])) Expect(txDeltaHashes["txUuid3"]).To(BeNil()) _, ok := txDeltaHashes["txUuid4"] Expect(ok).To(Equal(false)) }) It("should commit the batch", func() { Expect(ledgerPtr.CommitTxBatch(1, []*protos.Transaction{}, nil, []byte("proof"))).To(BeNil()) }) It("creates, populates and finishes a transaction", func() { Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil()) ledgerPtr.TxBegin("txUuid1") Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil()) ledgerPtr.TxFinished("txUuid1", true) }) It("should retrieve a delta state hash array of length 1", func() {
func ComputeCryptoHash(content ...[]byte) []byte { return util.ComputeCryptoHash(AppendAll(content...)) }
func (op *obcSieve) processExecute() { if op.currentReq != "" { return } primary := op.pbft.primary(op.epoch) exec := op.queuedExec[primary] delete(op.queuedExec, primary) if exec == nil { return } if !(exec.View == op.epoch && op.pbft.primary(op.epoch) == exec.ReplicaId && op.pbft.activeView) { logger.Debug("Invalid execute from %d", exec.ReplicaId) return } if exec.BlockNumber != op.blockNumber+1 { logger.Debug("Block block number in execute wrong: expected %d, got %d", op.blockNumber, exec.BlockNumber) return } op.currentReq = base64.StdEncoding.EncodeToString(util.ComputeCryptoHash(exec.Request)) logger.Debug("Sieve replica %d received exec from %d, epoch=%d, blockNo=%d from request=%s", op.id, exec.ReplicaId, exec.View, exec.BlockNumber, op.currentReq) // With the execution decoupled from the ordering, this sanity check is challenging and introduces a race /* blockchainSize, _ := op.stack.GetBlockchainSize() blockchainSize-- if op.blockNumber != blockchainSize { logger.Critical("Sieve replica %d block number and ledger blockchain size diverged: blockNo=%d, blockchainSize=%d", op.id, op.blockNumber, blockchainSize) return } */ op.blockNumber = exec.BlockNumber tx := &pb.Transaction{} proto.Unmarshal(exec.Request, tx) op.stack.BeginTxBatch(op.currentReq) results, err := op.stack.ExecTxs(op.currentReq, []*pb.Transaction{tx}) _ = results // XXX what to do? _ = err // XXX what to do? logger.Debug("Sieve replica %d results=%x err=%v using lastPbftExec of %d", op.id, results, err, op.lastExecPbftSeqNo) meta, _ := proto.Marshal(&Metadata{op.lastExecPbftSeqNo}) op.currentResult, err = op.stack.PreviewCommitTxBatch(op.currentReq, meta) if err != nil { logger.Error("could not preview next block: %s", err) op.rollback() return } logger.Debug("Sieve replica %d executed blockNo=%d, request=%s", op.id, op.blockNumber, op.currentReq) verify := &Verify{ View: op.epoch, BlockNumber: op.blockNumber, RequestDigest: op.currentReq, ResultDigest: op.currentResult, ReplicaId: op.id, } op.pbft.sign(verify) logger.Debug("Sieve replica %d sending verify blockNo=%d with result %x", op.id, verify.BlockNumber, op.currentResult) op.recvVerify(verify) op.broadcastMsg(&SieveMessage{&SieveMessage_Verify{verify}}) // To prevent races, have the main pbft thread start this timer, as it will need to stop it op.pbft.inject(func() { op.pbft.startTimer(op.pbft.requestTimeout, fmt.Sprintf("new request %s", op.currentReq)) }) }