// Next - see interface 'statemgmt.RangeScanIterator' for details func (itr *RangeScanIterator) Next() bool { if itr.done { return false } for ; itr.dbItr.Valid(); itr.dbItr.Next() { // making a copy of key-value bytes because, underlying key bytes are reused by itr. // no need to free slices as iterator frees memory when closed. trieKeyBytes := statemgmt.Copy(itr.dbItr.Key().Data()) trieNodeBytes := statemgmt.Copy(itr.dbItr.Value().Data()) value := unmarshalTrieNodeValue(trieNodeBytes) if util.IsNil(value) { continue } // found an actual key currentCompositeKey := trieKeyEncoderImpl.decodeTrieKeyBytes(statemgmt.Copy(trieKeyBytes)) currentChaincodeID, currentKey := statemgmt.DecodeCompositeKey(currentCompositeKey) if currentChaincodeID == itr.chaincodeID && (itr.endKey == "" || currentKey <= itr.endKey) { itr.currentKey = currentKey itr.currentValue = value itr.dbItr.Next() return true } // retrieved all the keys in the given range break } itr.done = true return false }
// Next - see interface 'statemgmt.RangeScanIterator' for details func (itr *RangeScanIterator) Next() bool { if itr.done { return false } for ; itr.dbItr.Valid(); itr.dbItr.Next() { trieKeyBytes := itr.dbItr.Key().Data() trieNodeBytes := itr.dbItr.Value().Data() value := unmarshalTrieNodeValue(trieNodeBytes) if util.IsNil(value) { continue } // found an actual key currentCompositeKey := trieKeyEncoderImpl.decodeTrieKeyBytes(statemgmt.Copy(trieKeyBytes)) currentChaincodeID, currentKey := statemgmt.DecodeCompositeKey(currentCompositeKey) if currentChaincodeID == itr.chaincodeID && (itr.endKey == "" || currentKey <= itr.endKey) { itr.currentKey = currentKey itr.currentValue = value itr.dbItr.Next() return true } // retrieved all the keys in the given range break } itr.done = true return false }
func (bucketNode *bucketNode) mergeBucketNode(anotherBucketNode *bucketNode) { if !bucketNode.bucketKey.equals(anotherBucketNode.bucketKey) { panic(fmt.Errorf("Nodes with different keys can not be merged. BaseKey=[%#v], MergeKey=[%#v]", bucketNode.bucketKey, anotherBucketNode.bucketKey)) } for i, childCryptoHash := range anotherBucketNode.childrenCryptoHash { if !bucketNode.childrenUpdated[i] && util.IsNil(bucketNode.childrenCryptoHash[i]) { bucketNode.childrenCryptoHash[i] = childCryptoHash } } }
func fetchBucketNodeFromDB(bucketKey *bucketKey) (*bucketNode, error) { openchainDB := db.GetDBHandle() nodeBytes, err := openchainDB.GetFromStateCF(bucketKey.getEncodedBytes()) if err != nil { return nil, err } if util.IsNil(nodeBytes) { return nil, nil } return unmarshalBucketNode(bucketKey, nodeBytes), nil }
func (c *bucketHashCalculator) computeCryptoHash() []byte { if c.currentChaincodeID != "" { c.appendCurrentChaincodeData() c.currentChaincodeID = "" c.dataNodes = nil } logger.Debug("Hashable content for bucket [%s]: length=%d, contentInStringForm=[%s]", c.bucketKey, len(c.hashingData), string(c.hashingData)) if util.IsNil(c.hashingData) { return nil } return openchainUtil.ComputeCryptoHash(c.hashingData) }
func unmarshalBucketNode(bucketKey *bucketKey, serializedBytes []byte) *bucketNode { bucketNode := newBucketNode(bucketKey) buffer := proto.NewBuffer(serializedBytes) for i := 0; i < conf.getMaxGroupingAtEachLevel(); i++ { childCryptoHash, err := buffer.DecodeRawBytes(false) if err != nil { panic(fmt.Errorf("this error should not occur: %s", err)) } if !util.IsNil(childCryptoHash) { bucketNode.childrenCryptoHash[i] = childCryptoHash } } return bucketNode }
func (stateImpl *StateImpl) addDataNodeChangesForPersistence(writeBatch *gorocksdb.WriteBatch) { openchainDB := db.GetDBHandle() affectedBuckets := stateImpl.dataNodesDelta.getAffectedBuckets() for _, affectedBucket := range affectedBuckets { dataNodes := stateImpl.dataNodesDelta.getSortedDataNodesFor(affectedBucket) for _, dataNode := range dataNodes { if util.IsNil(dataNode.value) { writeBatch.DeleteCF(openchainDB.StateCF, dataNode.dataKey.getEncodedBytes()) } else { writeBatch.PutCF(openchainDB.StateCF, dataNode.dataKey.getEncodedBytes(), dataNode.value) } } } }
func computeDataNodesCryptoHash(bucketKey *bucketKey, updatedNodes dataNodes, existingNodes dataNodes) []byte { logger.Debug("Computing crypto-hash for bucket [%s]. numUpdatedNodes=[%d], numExistingNodes=[%d]", bucketKey, len(updatedNodes), len(existingNodes)) hashableContent := []byte{} i := 0 j := 0 for i < len(updatedNodes) && j < len(existingNodes) { updatedNode := updatedNodes[i] existingNode := existingNodes[j] c := bytes.Compare(updatedNode.dataKey.compositeKey, existingNode.dataKey.compositeKey) var nextNode *dataNode switch c { case -1: nextNode = updatedNode i++ case 0: nextNode = updatedNode i++ j++ case 1: nextNode = existingNode j++ } hashableContent = addNodeData(hashableContent, nextNode) } var remainingNodes dataNodes if i < len(updatedNodes) { remainingNodes = updatedNodes[i:] } else if j < len(existingNodes) { remainingNodes = existingNodes[j:] } for _, remainingNode := range remainingNodes { hashableContent = addNodeData(hashableContent, remainingNode) } logger.Debug("Hashable content for bucket [%s] = [%s]", bucketKey, string(hashableContent)) if util.IsNil(hashableContent) { return nil } return openchainUtil.ComputeCryptoHash(hashableContent) }
func (dataNode *dataNode) isDelete() bool { return util.IsNil(dataNode.value) }