Esempio n. 1
0
func populateDB(tb testing.TB, kvSize int, totalKeys int, keyPrefix string) {
	dbWrapper := db.NewTestDBWrapper()
	dbWrapper.CreateFreshDB(tb)
	batch := gorocksdb.NewWriteBatch()
	for i := 0; i < totalKeys; i++ {
		key := []byte(keyPrefix + strconv.Itoa(i))
		value := testutil.ConstructRandomBytes(tb, kvSize-len(key))
		batch.Put(key, value)
		if i%1000 == 0 {
			dbWrapper.WriteToDB(tb, batch)
			batch = gorocksdb.NewWriteBatch()
		}
	}
	dbWrapper.CloseDB(tb)
}
Esempio n. 2
0
func (rh *RocksDBHandler) _list_doMerge(key []byte, value interface{}, opCode string, start, end int) error {
	var values [][]byte
	if d1Slice, ok := value.([]byte); ok {
		values = [][]byte{d1Slice}
	}
	if d2Slice, ok := value.([][]byte); ok {
		values = d2Slice
	}
	if values == nil || len(values) == 0 {
		return ErrWrongArgumentsCount
	}

	options := rocks.NewDefaultWriteOptions()
	defer options.Destroy()
	batch := rocks.NewWriteBatch()
	defer batch.Destroy()
	batch.Put(rh.getTypeKey(key), []byte(kRedisList))
	for _, dValue := range values {
		operand := ListOperand{opCode, start, end, dValue}
		if data, err := encode(operand); err == nil {
			batch.Merge(key, data)
		} else {
			return err
		}
	}
	return rh.db.Write(options, batch)
}
Esempio n. 3
0
// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed
// This function returns successfully iff the transactions details and state changes (that
// may have happened during execution of this transaction-batch) have been committed to permanent storage
func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, proof []byte) error {
	err := ledger.checkValidIDCommitORRollback(id)
	if err != nil {
		return err
	}

	success := true
	defer ledger.resetForNextTxGroup(success)
	defer ledger.blockchain.blockPersistenceStatus(success)

	stateHash, err := ledger.state.GetHash()
	if err != nil {
		success = false
		return err
	}

	writeBatch := gorocksdb.NewWriteBatch()
	block := protos.NewBlock(transactions)
	newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch)
	if err != nil {
		success = false
		return err
	}
	ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch)
	opt := gorocksdb.NewDefaultWriteOptions()
	dbErr := db.GetDBHandle().DB.Write(opt, writeBatch)
	if dbErr != nil {
		success = false
		return dbErr
	}
	producer.Send(producer.CreateBlockEvent(block))
	return nil
}
Esempio n. 4
0
func (rh *RocksDBHandler) RedisDel(key []byte, keys ...[]byte) (int, error) {
	if rh.db == nil {
		return 0, ErrRocksIsDead
	}
	if key == nil || len(key) == 0 {
		return 0, ErrWrongArgumentsCount
	}

	keyData := append([][]byte{key}, keys...)
	count := 0
	readOptions := rocks.NewDefaultReadOptions()
	writeOptions := rocks.NewDefaultWriteOptions()
	defer readOptions.Destroy()
	defer writeOptions.Destroy()

	for _, dKey := range keyData {
		_, err := rh.loadRedisObject(readOptions, dKey)
		if err == nil {
			batch := rocks.NewWriteBatch()
			batch.Delete(rh.getTypeKey(dKey))
			batch.Delete(dKey)
			if err := rh.db.Write(writeOptions, batch); err == nil {
				count++
			}
			batch.Destroy()
		}
	}
	return count, nil
}
Esempio n. 5
0
func (testWrapper *stateTestWrapper) persistAndClearInMemoryChanges(blockNumber uint64) {
	writeBatch := gorocksdb.NewWriteBatch()
	defer writeBatch.Destroy()
	testWrapper.state.AddChangesForPersistence(blockNumber, writeBatch)
	testDBWrapper.WriteToDB(testWrapper.t, writeBatch)
	testWrapper.state.ClearInMemoryChanges(true)
}
Esempio n. 6
0
func TestDBStatsOversizedKV(t *testing.T) {
	dbTestWrapper := db.NewTestDBWrapper()
	dbTestWrapper.CleanDB(t)
	defer dbTestWrapper.CloseDB(t)
	defer deleteTestDBDir()

	openchainDB := db.GetDBHandle()
	writeBatch := gorocksdb.NewWriteBatch()
	writeBatch.PutCF(openchainDB.BlockchainCF, []byte("key1"), []byte("value1"))
	writeBatch.PutCF(openchainDB.BlockchainCF, []byte("key2"), generateOversizedValue(0))
	writeBatch.PutCF(openchainDB.BlockchainCF, []byte("key3"), generateOversizedValue(100))
	writeBatch.PutCF(openchainDB.BlockchainCF, []byte("key4"), []byte("value4"))
	dbTestWrapper.WriteToDB(t, writeBatch)

	totalKVs, numOverSizedKVs := scan(openchainDB, "blockchainCF", openchainDB.BlockchainCF, testDetailPrinter)

	if totalKVs != 4 {
		t.Fatalf("totalKVs is not correct. Expected [%d], found [%d]", 4, totalKVs)
	}

	if numOverSizedKVs != 2 {
		t.Fatalf("numOverSizedKVs is not correct. Expected [%d], found [%d]", 2, numOverSizedKVs)
	}

	if numOversizedKeyValues != 2 {
		t.Fatalf("numOversizedKeyValues is not correct. Expected [%d], found [%d]", 2, numOversizedKeyValues)
	}
}
Esempio n. 7
0
// true for LPop(), false for RPop()
func (l *ListElement) pop(left bool) ([]byte, error) {
	l.mu.Lock()
	defer l.mu.Unlock()

	x, y := l.leftIndex(), l.rightIndex()
	size := y - x + 1
	if size == 0 {
		return nil, nil
	}

	var idxkey []byte
	if left {
		idxkey = l.indexKey(x)
	} else {
		idxkey = l.indexKey(y)
	}

	val, err := l.db.RawGet(idxkey)
	if err != nil {
		return nil, err
	}

	if size > 1 {
		return val, l.db.RawDelete(idxkey)
	} else if size == 1 {
		batch := gorocksdb.NewWriteBatch()
		defer batch.Destroy()
		batch.Delete(l.rawKey())
		batch.Delete(idxkey)
		return val, l.db.WriteBatch(batch)
	} else {
		return nil, errors.New("size less than 0")
	}
}
Esempio n. 8
0
func (w *Writer) NewBatch() store.KVBatch {
	rv := Batch{
		w:     w,
		batch: gorocksdb.NewWriteBatch(),
	}
	return &rv
}
Esempio n. 9
0
// http://redis.io/commands/zadd#return-value
func (s *SortedSetElement) Add(scoreMembers ...[]byte) (int, error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	count := len(scoreMembers)
	if count < 2 || count%2 != 0 {
		return 0, errors.New("invalid score/member pairs")
	}

	batch := gorocksdb.NewWriteBatch()
	defer batch.Destroy()

	added := 0
	for i := 0; i < count; i += 2 {
		score, member := scoreMembers[i], scoreMembers[i+1]
		skey, mkey := s.scoreKey(score, member), s.memberKey(member)

		// remove old score key
		oldscore, err := s.db.RawGet(mkey)
		if err != nil {
			return 0, err
		} else if oldscore != nil {
			batch.Delete(skey)
		} else {
			added++
		}

		// put new value
		batch.Put(mkey, score)
		batch.Put(skey, nil)
	}

	return added, s.db.WriteBatch(batch)
}
Esempio n. 10
0
func (h *HashElement) Remove(fields ...[]byte) error {
	h.mu.Lock()
	defer h.mu.Unlock()

	batch := gorocksdb.NewWriteBatch()
	defer batch.Destroy()

	dict := make(map[string]bool)
	for _, field := range fields {
		dict[string(field)] = true
		batch.Delete(h.fieldKey(field))
	}

	deleteAll := true
	h.db.PrefixEnumerate(h.fieldPrefix(), IterForward, func(i int, key, value []byte, quit *bool) {
		field := h.fieldInKey(key)
		if _, ok := dict[string(field)]; !ok { // wouldn't delete raw key
			deleteAll = false
			*quit = true
		}
	})
	if deleteAll {
		batch.Delete(h.rawKey())
	}

	return h.db.WriteBatch(batch)
}
Esempio n. 11
0
func (blockchain *blockchain) persistRawBlock(block *protos.Block, blockNumber uint64) error {
	blockBytes, blockBytesErr := block.Bytes()
	if blockBytesErr != nil {
		return blockBytesErr
	}
	writeBatch := gorocksdb.NewWriteBatch()
	defer writeBatch.Destroy()
	writeBatch.PutCF(db.GetDBHandle().BlockchainCF, encodeBlockNumberDBKey(blockNumber), blockBytes)

	blockHash, err := block.GetHash()
	if err != nil {
		return err
	}

	// Need to check as we suport out of order blocks in cases such as block/state synchronization. This is
	// really blockchain height, not size.
	if blockchain.getSize() < blockNumber+1 {
		sizeBytes := encodeUint64(blockNumber + 1)
		writeBatch.PutCF(db.GetDBHandle().BlockchainCF, blockCountKey, sizeBytes)
		blockchain.size = blockNumber + 1
		blockchain.previousBlockHash = blockHash
	}

	if blockchain.indexer.isSynchronous() {
		blockchain.indexer.createIndexesSync(block, blockNumber, blockHash, writeBatch)
	}

	opt := gorocksdb.NewDefaultWriteOptions()
	defer opt.Destroy()
	err = db.GetDBHandle().DB.Write(opt, writeBatch)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 12
0
func (stateTrieTestWrapper *stateTrieTestWrapper) PersistChangesAndResetInMemoryChanges() {
	writeBatch := gorocksdb.NewWriteBatch()
	defer writeBatch.Destroy()
	stateTrieTestWrapper.AddChangesForPersistence(writeBatch)
	testDBWrapper.WriteToDB(stateTrieTestWrapper.t, writeBatch)
	stateTrieTestWrapper.stateTrie.ClearWorkingSet(true)
}
Esempio n. 13
0
func (testWrapper *blockchainTestWrapper) addNewBlock(block *protos.Block, stateHash []byte) uint64 {
	writeBatch := gorocksdb.NewWriteBatch()
	newBlockNumber, err := testWrapper.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch)
	testutil.AssertNoError(testWrapper.t, err, "Error while adding a new block")
	testDBWrapper.WriteToDB(testWrapper.t, writeBatch)
	testWrapper.blockchain.blockPersistenceStatus(true)
	return newBlockNumber
}
Esempio n. 14
0
// CommitStateDelta commits the changes from state.ApplyStateDelta to the
// DB.
func (state *State) CommitStateDelta() error {
	if state.updateStateImpl {
		state.stateImpl.PrepareWorkingSet(state.stateDelta)
		state.updateStateImpl = false
	}
	writeBatch := gorocksdb.NewWriteBatch()
	state.stateImpl.AddChangesForPersistence(writeBatch)
	opt := gorocksdb.NewDefaultWriteOptions()
	return db.GetDBHandle().DB.Write(opt, writeBatch)
}
Esempio n. 15
0
func (q *Queue) Enqueue(data []byte) (uint64, error) {
	id := atomic.AddUint64(&q.tail, 1)
	wb := rocks.NewWriteBatch()
	defer wb.Destroy()
	wb.MergeCF(q.cfHandle, q.metaKey("tail"), oneBinary)
	wb.PutCF(q.cfHandle, q.key(id), data)
	err := q.store.Write(q.store.wo, wb)

	log.Debugf("[Queue] Enqueued data id=%d, err=%v", id, err)
	return id, err
}
// createIndexes adds entries into db for creating indexes on various atributes
func (indexer *blockchainIndexerAsync) createIndexesInternal(block *protos.Block, blockNumber uint64, blockHash []byte) error {
	openchainDB := db.GetDBHandle()
	writeBatch := gorocksdb.NewWriteBatch()
	addIndexDataForPersistence(block, blockNumber, blockHash, writeBatch)
	writeBatch.PutCF(openchainDB.IndexesCF, lastIndexedBlockKey, encodeBlockNumber(blockNumber))
	opt := gorocksdb.NewDefaultWriteOptions()
	err := openchainDB.DB.Write(opt, writeBatch)
	if err != nil {
		return err
	}
	indexer.indexerState.blockIndexed(blockNumber)
	return nil
}
Esempio n. 17
0
func (blockchain *blockchain) blockPersistenceStatus(success bool) {
	if success {
		blockchain.size++
		blockchain.previousBlockHash = blockchain.lastProcessedBlock.blockHash
		if !blockchain.indexer.isSynchronous() {
			writeBatch := gorocksdb.NewWriteBatch()
			defer writeBatch.Destroy()
			blockchain.indexer.createIndexes(blockchain.lastProcessedBlock.block,
				blockchain.lastProcessedBlock.blockNumber, blockchain.lastProcessedBlock.blockHash, writeBatch)
		}
	}
	blockchain.lastProcessedBlock = nil
}
Esempio n. 18
0
func (rh *RocksDBHandler) _string_doMerge(key, value []byte, opCode string) error {
	options := rocks.NewDefaultWriteOptions()
	defer options.Destroy()
	batch := rocks.NewWriteBatch()
	defer batch.Destroy()
	batch.Put(rh.getTypeKey(key), []byte(kRedisString))
	operand := StringOperand{opCode, value}
	if data, err := encode(operand); err != nil {
		return err
	} else {
		batch.Merge(key, data)
	}
	return rh.db.Write(options, batch)
}
Esempio n. 19
0
func (q *Queue) Dequeue(startId ...uint64) (uint64, []byte, error) {
	store := q.store

	var seekId uint64 = 1
	if len(startId) > 0 {
		seekId = startId[0]
	}
	if seekId < 1 {
		seekId = 1
	}

	var it *rocks.Iterator
	if q.useTailing {
		it = q.tailIterator
		if !it.Valid() {
			// FIXME?(mijia): When Dequeue happens faster than enqueue, the tail iterator would be exhausted
			// so we have seek it again.
			it.Seek(q.key(seekId))
		}
	} else {
		it = store.NewIteratorCF(store.ro, q.cfHandle)
		defer it.Close()
		it.Seek(q.key(seekId))
	}

	if !it.Valid() {
		return 0, nil, EmptyQueue
	}

	wb := rocks.NewWriteBatch()
	defer wb.Destroy()
	key := makeSlice(it.Key())
	value := makeSlice(it.Value())
	wb.DeleteCF(q.cfHandle, key)
	wb.MergeCF(q.cfHandle, q.metaKey("head"), oneBinary)
	err := store.Write(store.wo, wb)
	if err == nil {
		atomic.AddUint64(&q.head, 1)
		if q.useTailing {
			it.Next()
		}
	}

	id := q.id(key)
	log.Debugf("[Queue] Dequeued data id=%d, err=%v", id, err)
	return id, value, err
}
Esempio n. 20
0
func performBasicReadWrite(t *testing.T) {
	openchainDB := GetDBHandle()
	opt := gorocksdb.NewDefaultWriteOptions()
	defer opt.Destroy()
	writeBatch := gorocksdb.NewWriteBatch()
	defer writeBatch.Destroy()
	writeBatch.PutCF(openchainDB.BlockchainCF, []byte("dummyKey"), []byte("dummyValue"))
	writeBatch.PutCF(openchainDB.StateCF, []byte("dummyKey1"), []byte("dummyValue1"))
	writeBatch.PutCF(openchainDB.StateDeltaCF, []byte("dummyKey2"), []byte("dummyValue2"))
	writeBatch.PutCF(openchainDB.IndexesCF, []byte("dummyKey3"), []byte("dummyValue3"))
	err := openchainDB.DB.Write(opt, writeBatch)
	if err != nil {
		t.Fatalf("Error while writing to db: %s", err)
	}
	value, err := openchainDB.GetFromBlockchainCF([]byte("dummyKey"))
	if err != nil {
		t.Fatalf("read error = [%s]", err)
	}
	if !bytes.Equal(value, []byte("dummyValue")) {
		t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue", value)
	}

	value, err = openchainDB.GetFromStateCF([]byte("dummyKey1"))
	if err != nil {
		t.Fatalf("read error = [%s]", err)
	}
	if !bytes.Equal(value, []byte("dummyValue1")) {
		t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue1", value)
	}

	value, err = openchainDB.GetFromStateDeltaCF([]byte("dummyKey2"))
	if err != nil {
		t.Fatalf("read error = [%s]", err)
	}
	if !bytes.Equal(value, []byte("dummyValue2")) {
		t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue2", value)
	}

	value, err = openchainDB.GetFromIndexesCF([]byte("dummyKey3"))
	if err != nil {
		t.Fatalf("read error = [%s]", err)
	}
	if !bytes.Equal(value, []byte("dummyValue3")) {
		t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue3", value)
	}
}
Esempio n. 21
0
func (l *ListElement) LPush(vals ...[]byte) error {
	l.mu.Lock()
	defer l.mu.Unlock()

	batch := gorocksdb.NewWriteBatch()
	defer batch.Destroy()

	x, y := l.leftIndex(), l.rightIndex()
	if x == 0 && y == -1 {
		// empty
		batch.Put(l.rawKey(), nil)
	}

	for i, val := range vals {
		batch.Put(l.indexKey(x-int64(i)-1), val)
	}
	return l.db.WriteBatch(batch)
}
Esempio n. 22
0
func (h *HashElement) drop() error {
	h.mu.Lock()
	defer h.mu.Unlock()

	batch := gorocksdb.NewWriteBatch()
	defer batch.Destroy()

	h.db.PrefixEnumerate(h.fieldPrefix(), IterForward, func(i int, key, value []byte, quit *bool) {
		batch.Delete(copyBytes(key))
	})
	batch.Delete(h.rawKey())

	err := h.db.WriteBatch(batch)
	if err == nil {
		h.db = nil // make sure not invoked by others
	}
	return err
}
Esempio n. 23
0
func (l *ListElement) drop() error {
	l.mu.Lock()
	defer l.mu.Unlock()

	batch := gorocksdb.NewWriteBatch()
	defer batch.Destroy()

	l.db.PrefixEnumerate(l.keyPrefix(), IterForward, func(i int, key, value []byte, quit *bool) {
		batch.Delete(copyBytes(key))
	})
	batch.Delete(l.rawKey())

	err := l.db.WriteBatch(batch)
	if err == nil {
		l.db = nil
	}
	return err
}
Esempio n. 24
0
func (h *HashElement) multiSet(fieldVals ...[]byte) error {
	h.mu.Lock()
	defer h.mu.Unlock()

	if len(fieldVals) == 0 || len(fieldVals)%2 != 0 {
		return errors.New("invalid field value pairs")
	}

	batch := gorocksdb.NewWriteBatch()
	defer batch.Destroy()
	for i := 0; i < len(fieldVals); i += 2 {
		field, value := fieldVals[i], fieldVals[i+1]
		batch.Put(h.fieldKey(field), value)
	}
	batch.Put(h.rawKey(), nil)

	return h.db.WriteBatch(batch)
}
Esempio n. 25
0
func (rh *RocksDBHandler) _hash_doMerge(key []byte, values [][]byte, opCode string) error {
	if values == nil || len(values) == 0 || len(values)%2 != 0 {
		return ErrWrongArgumentsCount
	}
	options := rocks.NewDefaultWriteOptions()
	defer options.Destroy()
	batch := rocks.NewWriteBatch()
	defer batch.Destroy()
	batch.Put(rh.getTypeKey(key), []byte(kRedisHash))
	for i := 0; i < len(values); i += 2 {
		operand := HashOperand{opCode, string(values[i]), values[i+1]}
		if data, err := encode(operand); err == nil {
			batch.Merge(key, data)
		} else {
			return err
		}
	}
	return rh.db.Write(options, batch)
}
Esempio n. 26
0
func (rh *RocksDBHandler) _set_doMerge(key []byte, values [][]byte, opCode string) error {
	if values == nil || len(values) == 0 {
		return ErrWrongArgumentsCount
	}
	options := rocks.NewDefaultWriteOptions()
	defer options.Destroy()
	batch := rocks.NewWriteBatch()
	defer batch.Destroy()
	batch.Put(rh.getTypeKey(key), []byte(kRedisSet))
	for _, value := range values {
		operand := SetOperand{opCode, value}
		if data, err := encode(operand); err == nil {
			batch.Merge(key, data)
		} else {
			return err
		}
	}
	return rh.db.Write(options, batch)
}
Esempio n. 27
0
func performBasicReadWrite(t *testing.T) {
	openchainDB := GetDBHandle()
	opt := gorocksdb.NewDefaultWriteOptions()
	writeBatch := gorocksdb.NewWriteBatch()
	writeBatch.PutCF(openchainDB.BlockchainCF, []byte("dummyKey"), []byte("dummyValue"))
	err := openchainDB.DB.Write(opt, writeBatch)
	if err != nil {
		t.Fatal("Error while writing to db")
	}
	value, err := openchainDB.GetFromBlockchainCF([]byte("dummyKey"))

	if err != nil {
		t.Fatalf("read error = [%s]", err)
	}

	if !bytes.Equal(value, []byte("dummyValue")) {
		t.Fatal("read error. Bytes not equal")
	}
}
Esempio n. 28
0
// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed
// This function returns successfully iff the transactions details and state changes (that
// may have happened during execution of this transaction-batch) have been committed to permanent storage
func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, transactionResults []*protos.TransactionResult, metadata []byte) error {
	err := ledger.checkValidIDCommitORRollback(id)
	if err != nil {
		return err
	}

	stateHash, err := ledger.state.GetHash()
	if err != nil {
		ledger.resetForNextTxGroup(false)
		ledger.blockchain.blockPersistenceStatus(false)
		return err
	}

	writeBatch := gorocksdb.NewWriteBatch()
	defer writeBatch.Destroy()
	block := protos.NewBlock(transactions, metadata)
	block.NonHashData = &protos.NonHashData{}
	newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch)
	if err != nil {
		ledger.resetForNextTxGroup(false)
		ledger.blockchain.blockPersistenceStatus(false)
		return err
	}
	ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch)
	opt := gorocksdb.NewDefaultWriteOptions()
	defer opt.Destroy()
	dbErr := db.GetDBHandle().DB.Write(opt, writeBatch)
	if dbErr != nil {
		ledger.resetForNextTxGroup(false)
		ledger.blockchain.blockPersistenceStatus(false)
		return dbErr
	}

	ledger.resetForNextTxGroup(true)
	ledger.blockchain.blockPersistenceStatus(true)

	sendProducerBlockEvent(block)
	if len(transactionResults) != 0 {
		ledgerLogger.Debug("There were some erroneous transactions. We need to send a 'TX rejected' message here.")
	}
	return nil
}
Esempio n. 29
0
func (rh *RocksDBHandler) saveRedisObject(options *rocks.WriteOptions, key []byte, value interface{}, objType string) error {
	obj := RedisObject{
		Type: objType,
		Data: value,
	}
	data, err := encode(obj)
	if err != nil {
		return err
	}

	batch := rocks.NewWriteBatch()
	defer batch.Destroy()
	batch.Put(rh.getTypeKey(key), []byte(objType))
	batch.Put(key, data)
	err = rh.db.Write(options, batch)
	if err != nil {
		log.Printf("[saveRedisObject] Error when PUT > RocksDB, %s", err)
	}
	return err
}
Esempio n. 30
0
func (testWrapper *stateImplTestWrapper) persistChangesAndResetInMemoryChanges() {
	writeBatch := gorocksdb.NewWriteBatch()
	testWrapper.addChangesForPersistence(writeBatch)
	testDBWrapper.WriteToDB(testWrapper.t, writeBatch)
	testWrapper.stateImpl.ClearWorkingSet(true)
}