Esempio n. 1
0
// Write apply the given batch to the DB. The batch records will be applied
// sequentially. Write might be used concurrently, when used concurrently and
// batch is small enough, write will try to merge the batches. Set NoWriteMerge
// option to true to disable write merge.
//
// It is safe to modify the contents of the arguments after Write returns but
// not before. Write will not modify content of the batch.
func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error {
	if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 {
		return err
	}

	// If the batch size is larger than write buffer, it may justified to write
	// using transaction instead. Using transaction the batch will be written
	// into tables directly, skipping the journaling.
	if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
		tr, err := db.OpenTransaction()
		if err != nil {
			return err
		}
		if err := tr.Write(batch, wo); err != nil {
			tr.Discard()
			return err
		}
		return tr.Commit()
	}

	merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
	sync := wo.GetSync() && !db.s.o.GetNoSync()

	// Acquire write lock.
	if merge {
		select {
		case db.writeMergeC <- writeMerge{sync: sync, batch: batch}:
			if <-db.writeMergedC {
				// Write is merged.
				return <-db.writeAckC
			}
			// Write is not merged, the write lock is handed to us. Continue.
		case db.writeLockC <- struct{}{}:
			// Write lock acquired.
		case err := <-db.compPerErrC:
			// Compaction error.
			return err
		case <-db.closeC:
			// Closed
			return ErrClosed
		}
	} else {
		select {
		case db.writeLockC <- struct{}{}:
			// Write lock acquired.
		case err := <-db.compPerErrC:
			// Compaction error.
			return err
		case <-db.closeC:
			// Closed
			return ErrClosed
		}
	}

	return db.writeLocked(batch, nil, merge, sync)
}
Esempio n. 2
0
func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error {
	if err := db.ok(); err != nil {
		return err
	}

	merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
	sync := wo.GetSync() && !db.s.o.GetNoSync()

	// Acquire write lock.
	if merge {
		select {
		case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}:
			if <-db.writeMergedC {
				// Write is merged.
				return <-db.writeAckC
			}
			// Write is not merged, the write lock is handed to us. Continue.
		case db.writeLockC <- struct{}{}:
			// Write lock acquired.
		case err := <-db.compPerErrC:
			// Compaction error.
			return err
		case <-db.closeC:
			// Closed
			return ErrClosed
		}
	} else {
		select {
		case db.writeLockC <- struct{}{}:
			// Write lock acquired.
		case err := <-db.compPerErrC:
			// Compaction error.
			return err
		case <-db.closeC:
			// Closed
			return ErrClosed
		}
	}

	batch := db.batchPool.Get().(*Batch)
	batch.Reset()
	batch.appendRec(kt, key, value)
	return db.writeLocked(batch, batch, merge, sync)
}
Esempio n. 3
0
// Write apply the given batch to the DB. The batch will be applied
// sequentially.
//
// It is safe to modify the contents of the arguments after Write returns.
func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
	err = db.ok()
	if err != nil || b == nil || b.Len() == 0 {
		return
	}

	b.init(wo.GetSync() && !db.s.o.GetNoSync())

	if b.size() > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
		// Writes using transaction.
		tr, err1 := db.OpenTransaction()
		if err1 != nil {
			return err1
		}
		if err1 := tr.Write(b, wo); err1 != nil {
			tr.Discard()
			return err1
		}
		return tr.Commit()
	}

	// The write happen synchronously.
	select {
	case db.writeC <- b:
		if <-db.writeMergedC {
			return <-db.writeAckC
		}
		// Continue, the write lock already acquired by previous writer
		// and handed out to us.
	case db.writeLockC <- struct{}{}:
	case err = <-db.compPerErrC:
		return
	case _, _ = <-db.closeC:
		return ErrClosed
	}

	merged := 0
	danglingMerge := false
	defer func() {
		for i := 0; i < merged; i++ {
			db.writeAckC <- err
		}
		if danglingMerge {
			// Only one dangling merge at most, so this is safe.
			db.writeMergedC <- false
		} else {
			<-db.writeLockC
		}
	}()

	mdb, mdbFree, err := db.flush(b.size())
	if err != nil {
		return
	}
	defer mdb.decref()

	// Calculate maximum size of the batch.
	m := 1 << 20
	if x := b.size(); x <= 128<<10 {
		m = x + (128 << 10)
	}
	m = minInt(m, mdbFree)

	// Merge with other batch.
drain:
	for b.size() < m && !b.sync {
		select {
		case nb := <-db.writeC:
			if b.size()+nb.size() <= m {
				b.append(nb)
				db.writeMergedC <- true
				merged++
			} else {
				danglingMerge = true
				break drain
			}
		default:
			break drain
		}
	}

	// Set batch first seq number relative from last seq.
	b.seq = db.seq + 1

	// Write journal concurrently if it is large enough.
	if b.size() >= (128 << 10) {
		// Push the write batch to the journal writer
		select {
		case db.journalC <- b:
			// Write into memdb
			if berr := b.memReplay(mdb.DB); berr != nil {
				panic(berr)
			}
		case err = <-db.compPerErrC:
			return
		case _, _ = <-db.closeC:
			err = ErrClosed
			return
		}
		// Wait for journal writer
		select {
		case err = <-db.journalAckC:
			if err != nil {
				// Revert memdb if error detected
				if berr := b.revertMemReplay(mdb.DB); berr != nil {
					panic(berr)
				}
				return
			}
		case _, _ = <-db.closeC:
			err = ErrClosed
			return
		}
	} else {
		err = db.writeJournal(b)
		if err != nil {
			return
		}
		if berr := b.memReplay(mdb.DB); berr != nil {
			panic(berr)
		}
	}

	// Set last seq number.
	db.addSeq(uint64(b.Len()))

	if b.size() >= mdbFree {
		db.rotateMem(0, false)
	}
	return
}
Esempio n. 4
0
// Write apply the given batch to the DB. The batch will be applied
// sequentially.
//
// It is safe to modify the contents of the arguments after Write returns.
func (d *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
	err = d.ok()
	if err != nil || b == nil || b.len() == 0 {
		return
	}

	b.init(wo.GetSync())

	// The write happen synchronously.
	select {
	case _, _ = <-d.closeCh:
		return ErrClosed
	case d.writeCh <- b:
		return <-d.writeAckCh
	case d.writeLockCh <- struct{}{}:
	}

	merged := 0
	defer func() {
		<-d.writeLockCh
		for i := 0; i < merged; i++ {
			d.writeAckCh <- err
		}
	}()

	mem, err := d.flush()
	if err != nil {
		return
	}

	// Calculate maximum size of the batch.
	m := 1 << 20
	if x := b.size(); x <= 128<<10 {
		m = x + (128 << 10)
	}

	// Merge with other batch.
drain:
	for b.size() <= m && !b.sync {
		select {
		case nb := <-d.writeCh:
			b.append(nb)
			merged++
		default:
			break drain
		}
	}

	// Set batch first seq number relative from last seq.
	b.seq = d.seq + 1

	// Write journal concurrently if it is large enough.
	if b.size() >= (128 << 10) {
		// Push the write batch to the journal writer
		select {
		case _, _ = <-d.closeCh:
			err = ErrClosed
			return
		case d.journalCh <- b:
			// Write into memdb
			b.memReplay(mem)
		}
		// Wait for journal writer
		select {
		case _, _ = <-d.closeCh:
			err = ErrClosed
			return
		case err = <-d.journalAckCh:
			if err != nil {
				// Revert memdb if error detected
				b.revertMemReplay(mem)
				return
			}
		}
	} else {
		err = d.doWriteJournal(b)
		if err != nil {
			return
		}
		b.memReplay(mem)
	}

	// Set last seq number.
	d.addSeq(uint64(b.len()))
	return
}
Esempio n. 5
0
// Write apply the specified batch to the database.
func (d *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
	err = d.wok()
	if err != nil || b == nil || b.len() == 0 {
		return
	}

	b.init(wo.HasFlag(opt.WFSync))

	select {
	case d.wqueue <- b:
		return <-d.wack
	case d.wlock <- struct{}{}:
	}

	merged := 0
	defer func() {
		<-d.wlock
		for i := 0; i < merged; i++ {
			d.wack <- err
		}
	}()

	mem, err := d.flush()
	if err != nil {
		return
	}

	// calculate maximum size of the batch
	m := 1 << 20
	if x := b.size(); x <= 128<<10 {
		m = x + (128 << 10)
	}

	// merge with other batch
drain:
	for b.size() <= m && !b.sync {
		select {
		case nb := <-d.wqueue:
			b.append(nb)
			merged++
		default:
			break drain
		}
	}

	// set batch first seq number relative from last seq
	b.seq = d.seq + 1

	// write log concurrently if it is large enough
	if b.size() >= (128 << 10) {
		d.lch <- b
		b.memReplay(mem)
		err = <-d.lack
		if err != nil {
			b.revertMemReplay(mem)
			return
		}
	} else {
		err = d.doWriteLog(b)
		if err != nil {
			return
		}
		b.memReplay(mem)
	}

	// set last seq number
	d.addSeq(uint64(b.len()))

	return
}
Esempio n. 6
0
// Write apply the given batch to the DB. The batch will be applied
// sequentially.
//
// It is safe to modify the contents of the arguments after Write returns.
func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
	err = db.ok()
	if err != nil || b == nil || b.Len() == 0 {
		return
	}

	b.init(wo.GetSync())

	// The write happen synchronously.
	select {
	case db.writeC <- b:
		if <-db.writeMergedC {
			return <-db.writeAckC
		}
	case db.writeLockC <- struct{}{}:
	case err = <-db.compPerErrC:
		return
	case _, _ = <-db.closeC:
		return ErrClosed
	}

	merged := 0
	danglingMerge := false
	defer func() {
		if danglingMerge {
			db.writeMergedC <- false
		} else {
			<-db.writeLockC
		}
		for i := 0; i < merged; i++ {
			db.writeAckC <- err
		}
	}()

	mem, memFree, err := db.flush(b.size())
	if err != nil {
		return
	}
	defer mem.decref()

	// Calculate maximum size of the batch.
	m := 1 << 20
	if x := b.size(); x <= 128<<10 {
		m = x + (128 << 10)
	}
	m = minInt(m, memFree)

	// Merge with other batch.
drain:
	for b.size() < m && !b.sync {
		select {
		case nb := <-db.writeC:
			if b.size()+nb.size() <= m {
				b.append(nb)
				db.writeMergedC <- true
				merged++
			} else {
				danglingMerge = true
				break drain
			}
		default:
			break drain
		}
	}

	// Set batch first seq number relative from last seq.
	b.seq = db.seq + 1

	// Write journal concurrently if it is large enough.
	if b.size() >= (128 << 10) {
		// Push the write batch to the journal writer
		select {
		case db.journalC <- b:
			// Write into memdb
			if berr := b.memReplay(mem.mdb); berr != nil {
				panic(berr)
			}
		case err = <-db.compPerErrC:
			return
		case _, _ = <-db.closeC:
			err = ErrClosed
			return
		}
		// Wait for journal writer
		select {
		case err = <-db.journalAckC:
			if err != nil {
				// Revert memdb if error detected
				if berr := b.revertMemReplay(mem.mdb); berr != nil {
					panic(berr)
				}
				return
			}
		case _, _ = <-db.closeC:
			err = ErrClosed
			return
		}
	} else {
		err = db.writeJournal(b)
		if err != nil {
			return
		}
		if berr := b.memReplay(mem.mdb); berr != nil {
			panic(berr)
		}
	}

	// Set last seq number.
	db.addSeq(uint64(b.Len()))

	if b.size() >= memFree {
		db.rotateMem(0)
	}
	return
}