コード例 #1
0
ファイル: dbtst.go プロジェクト: chrjen/btcd
func main() {

	ro := &opt.ReadOptions{}
	wo := &opt.WriteOptions{}
	opts := &opt.Options{}

	ldb, err := leveldb.OpenFile("dbfile", opts)
	if err != nil {
		fmt.Printf("db open failed %v\n", err)
		return
	}

	batch := new(leveldb.Batch)
	for _, datum := range dataset {
		key := fmt.Sprintf("%v", datum.key)
		batch.Put([]byte(key), []byte(datum.value))
	}
	err = ldb.Write(batch, wo)

	for _, datum := range dataset {
		key := fmt.Sprintf("%v", datum.key)
		data, err := ldb.Get([]byte(key), ro)

		if err != nil {
			fmt.Printf("db read failed %v\n", err)
		}

		if string(data) != datum.value {
			fmt.Printf("mismatched data from db key %v val %v db %v", key, datum.value, data)
		}
	}
	fmt.Printf("completed\n")
	ldb.Close()
}
コード例 #2
0
ファイル: dbcache.go プロジェクト: chrjen/btcd
// commitTx atomically adds all of the pending keys to add and remove into the
// database cache.  When adding the pending keys would cause the size of the
// cache to exceed the max cache size, or the time since the last flush exceeds
// the configured flush interval, the cache will be flushed to the underlying
// persistent database.
//
// This is an atomic operation with respect to the cache in that either all of
// the pending keys to add and remove in the transaction will be applied or none
// of them will.
//
// The database cache itself might be flushed to the underlying persistent
// database even if the transaction fails to apply, but it will only be the
// state of the cache without the transaction applied.
//
// This function MUST be called during a database write transaction which in
// turn implies the database write lock will be held.
func (c *dbCache) commitTx(tx *transaction) error {
	// Flush the cache and write directly to the database if a flush is
	// needed.
	if c.needsFlush(tx) {
		if err := c.flush(); err != nil {
			return err
		}

		// Perform all leveldb update operations using a batch for
		// atomicity.
		batch := new(leveldb.Batch)
		tx.pendingKeys.ForEach(func(k, v []byte) bool {
			batch.Put(k, v)
			return true
		})
		tx.pendingKeys = nil
		tx.pendingRemove.ForEach(func(k, v []byte) bool {
			batch.Delete(k)
			return true
		})
		tx.pendingRemove = nil
		if err := c.ldb.Write(batch, nil); err != nil {
			return convertErr("failed to commit transaction", err)
		}

		return nil
	}

	// At this point a database flush is not needed, so atomically commit
	// the transaction to the cache.

	// Create a slice of transaction log entries large enough to house all
	// of the updates and add it to the list of logged transactions to
	// replay on flush.
	numEntries := tx.pendingKeys.Len() + tx.pendingRemove.Len()
	txLogEntries := make([]txLogEntry, numEntries)
	c.txLog = append(c.txLog, txLogEntries)

	// Since the cached keys to be added and removed use an immutable treap,
	// a snapshot is simply obtaining the root of the tree under the lock
	// which is used to atomically swap the root.
	c.cacheLock.RLock()
	newCachedKeys := c.cachedKeys
	newCachedRemove := c.cachedRemove
	c.cacheLock.RUnlock()

	// Apply every key to add in the database transaction to the cache.
	// Also create a transaction log entry for each one at the same time so
	// the database transaction can be replayed during flush.
	logEntryNum := 0
	tx.pendingKeys.ForEach(func(k, v []byte) bool {
		newCachedRemove = newCachedRemove.Delete(k)
		newCachedKeys = newCachedKeys.Put(k, v)

		logEntry := &txLogEntries[logEntryNum]
		logEntry.entryType = entryTypeUpdate
		logEntry.key = k
		logEntry.value = v
		logEntryNum++
		return true
	})
	tx.pendingKeys = nil

	// Apply every key to remove in the database transaction to the cache.
	// Also create a transaction log entry for each one at the same time so
	// the database transaction can be replayed during flush.
	tx.pendingRemove.ForEach(func(k, v []byte) bool {
		newCachedKeys = newCachedKeys.Delete(k)
		newCachedRemove = newCachedRemove.Put(k, nil)

		logEntry := &txLogEntries[logEntryNum]
		logEntry.entryType = entryTypeRemove
		logEntry.key = k
		logEntryNum++
		return true
	})
	tx.pendingRemove = nil

	// Atomically replace the immutable treaps which hold the cached keys to
	// add and delete.
	c.cacheLock.Lock()
	c.cachedKeys = newCachedKeys
	c.cachedRemove = newCachedRemove
	c.cacheLock.Unlock()
	return nil
}
コード例 #3
0
ファイル: dbcache.go プロジェクト: chrjen/btcd
// flush flushes the database cache to persistent storage.  This involes syncing
// the block store and replaying all transactions that have been applied to the
// cache to the underlying database.
//
// This function MUST be called with the database write lock held.
func (c *dbCache) flush() error {
	c.lastFlush = time.Now()

	// Sync the current write file associated with the block store.  This is
	// necessary before writing the metadata to prevent the case where the
	// metadata contains information about a block which actually hasn't
	// been written yet in unexpected shutdown scenarios.
	if err := c.store.syncBlocks(); err != nil {
		return err
	}

	// Nothing to do if there are no transactions to flush.
	if len(c.txLog) == 0 {
		return nil
	}

	// Perform all leveldb updates using batches for atomicity.
	batchLen := 0
	batchTxns := 0
	batch := new(leveldb.Batch)
	for logTxNum, txLogEntries := range c.txLog {
		// Replay the transaction from the log into the current batch.
		for _, logEntry := range txLogEntries {
			switch logEntry.entryType {
			case entryTypeUpdate:
				batch.Put(logEntry.key, logEntry.value)
			case entryTypeRemove:
				batch.Delete(logEntry.key)
			}
		}
		batchTxns++

		// Write and reset the current batch when the number of items in
		// it exceeds the the batch threshold or this is the last
		// transaction in the log.
		batchLen += len(txLogEntries)
		if batchLen > batchThreshold || logTxNum == len(c.txLog)-1 {
			if err := c.ldb.Write(batch, nil); err != nil {
				return convertErr("failed to write batch", err)
			}
			batch.Reset()
			batchLen = 0

			// Clear the transactions that were written from the
			// log so the memory can be reclaimed.
			for i := logTxNum - (batchTxns - 1); i <= logTxNum; i++ {
				c.txLog[i] = nil
			}
			batchTxns = 0
		}
	}
	c.txLog = c.txLog[:]

	// Clear the cache since it has been flushed.
	c.cacheLock.Lock()
	c.cachedKeys = treap.NewImmutable()
	c.cachedRemove = treap.NewImmutable()
	c.cacheLock.Unlock()

	return nil
}