func (f *Firestorm) storeVersion(writer store.KVWriter) error { vr := NewVersionRow(Version) wb := writer.NewBatch() wb.Set(vr.Key(), vr.Value()) err := writer.ExecuteBatch(wb) return err }
func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRows []UpsideDownCouchRow, updateRows []UpsideDownCouchRow, deleteRows []UpsideDownCouchRow) (err error) { // prepare batch wb := writer.NewBatch() // add for _, row := range addRows { tfr, ok := row.(*TermFrequencyRow) if ok { // need to increment counter dictionaryKey := tfr.DictionaryRowKey() wb.Merge(dictionaryKey, dictionaryTermIncr) } wb.Set(row.Key(), row.Value()) } // update for _, row := range updateRows { wb.Set(row.Key(), row.Value()) } // delete for _, row := range deleteRows { tfr, ok := row.(*TermFrequencyRow) if ok { // need to decrement counter dictionaryKey := tfr.DictionaryRowKey() wb.Merge(dictionaryKey, dictionaryTermDecr) } wb.Delete(row.Key()) } // write out the batch return wb.Execute() }
func (f *Firestorm) DeleteInternal(key []byte) (err error) { internalRow := NewInternalRow(key, nil) var writer store.KVWriter writer, err = f.store.Writer() if err != nil { return } defer func() { if cerr := writer.Close(); err == nil && cerr != nil { err = cerr } }() wb := writer.NewBatch() wb.Delete(internalRow.Key()) return writer.ExecuteBatch(wb) }
func (udc *SmolderingCouch) DeleteInternal(key []byte) (err error) { internalRow := NewInternalRow(key, nil) udc.writeMutex.Lock() defer udc.writeMutex.Unlock() var writer store.KVWriter writer, err = udc.store.Writer() if err != nil { return } defer func() { if cerr := writer.Close(); err == nil && cerr != nil { err = cerr } }() batch := writer.NewBatch() batch.Delete(internalRow.Key()) return writer.ExecuteBatch(batch) }
func (udc *UpsideDownCouch) SetInternal(key, val []byte) (err error) { internalRow := NewInternalRow(key, val) udc.writeMutex.Lock() defer udc.writeMutex.Unlock() var writer store.KVWriter writer, err = udc.store.Writer() if err != nil { return } defer func() { if cerr := writer.Close(); err == nil && cerr != nil { err = cerr } }() batch := writer.NewBatch() batch.Set(internalRow.Key(), internalRow.Value()) return writer.ExecuteBatch(batch) }
func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRows []UpsideDownCouchRow, updateRows []UpsideDownCouchRow, deleteRows []UpsideDownCouchRow) (err error) { // prepare batch wb := writer.NewBatch() // add for _, row := range addRows { tfr, ok := row.(*TermFrequencyRow) if ok { // need to increment counter summaryKey := tfr.SummaryKey() wb.Merge(summaryKey, newTermSummaryIncr()) } wb.Set(row.Key(), row.Value()) } // update for _, row := range updateRows { wb.Set(row.Key(), row.Value()) } // delete for _, row := range deleteRows { tfr, ok := row.(*TermFrequencyRow) if ok { // need to decrement counter summaryKey := tfr.SummaryKey() wb.Merge(summaryKey, newTermSummaryDecr()) } wb.Delete(row.Key()) } // write out the batch err = wb.Execute() if err != nil { return } return }
func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRowsAll [][]UpsideDownCouchRow, updateRowsAll [][]UpsideDownCouchRow, deleteRowsAll [][]UpsideDownCouchRow) (err error) { // prepare batch wb := writer.NewBatch() defer func() { _ = wb.Close() }() // buffer to work with rowBuf := GetRowBuffer() dictionaryDeltas := make(map[string]int64) // add for _, addRows := range addRowsAll { for _, row := range addRows { tfr, ok := row.(*TermFrequencyRow) if ok { if tfr.DictionaryRowKeySize() > len(rowBuf) { rowBuf = make([]byte, tfr.DictionaryRowKeySize()) } dictKeySize, err := tfr.DictionaryRowKeyTo(rowBuf) if err != nil { return err } dictionaryDeltas[string(rowBuf[:dictKeySize])] += 1 } if row.KeySize()+row.ValueSize() > len(rowBuf) { rowBuf = make([]byte, row.KeySize()+row.ValueSize()) } keySize, err := row.KeyTo(rowBuf) if err != nil { return err } valSize, err := row.ValueTo(rowBuf[keySize:]) wb.Set(rowBuf[:keySize], rowBuf[keySize:keySize+valSize]) } } // update for _, updateRows := range updateRowsAll { for _, row := range updateRows { if row.KeySize()+row.ValueSize() > len(rowBuf) { rowBuf = make([]byte, row.KeySize()+row.ValueSize()) } keySize, err := row.KeyTo(rowBuf) if err != nil { return err } valSize, err := row.ValueTo(rowBuf[keySize:]) if err != nil { return err } wb.Set(rowBuf[:keySize], rowBuf[keySize:keySize+valSize]) } } // delete for _, deleteRows := range deleteRowsAll { for _, row := range deleteRows { tfr, ok := row.(*TermFrequencyRow) if ok { // need to decrement counter if tfr.DictionaryRowKeySize() > len(rowBuf) { rowBuf = make([]byte, tfr.DictionaryRowKeySize()) } dictKeySize, err := tfr.DictionaryRowKeyTo(rowBuf) if err != nil { return err } dictionaryDeltas[string(rowBuf[:dictKeySize])] -= 1 } if row.KeySize()+row.ValueSize() > len(rowBuf) { rowBuf = make([]byte, row.KeySize()+row.ValueSize()) } keySize, err := row.KeyTo(rowBuf) if err != nil { return err } wb.Delete(rowBuf[:keySize]) } } if 8 > len(rowBuf) { rowBuf = make([]byte, 8) } for dictRowKey, delta := range dictionaryDeltas { binary.LittleEndian.PutUint64(rowBuf, uint64(delta)) wb.Merge([]byte(dictRowKey), rowBuf[0:8]) } PutRowBuffer(rowBuf) // write out the batch return writer.ExecuteBatch(wb) }
func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRows []UpsideDownCouchRow, updateRows []UpsideDownCouchRow, deleteRows []UpsideDownCouchRow) (err error) { // prepare batch wb := writer.NewBatch() // buffer to work with rowBuf := GetRowBuffer() // add for _, row := range addRows { tfr, ok := row.(*TermFrequencyRow) if ok { if tfr.DictionaryRowKeySize() > len(rowBuf) { rowBuf = make([]byte, tfr.DictionaryRowKeySize()) } dictKeySize, err := tfr.DictionaryRowKeyTo(rowBuf) if err != nil { return err } wb.Merge(rowBuf[:dictKeySize], dictionaryTermIncr) } if row.KeySize()+row.ValueSize() > len(rowBuf) { rowBuf = make([]byte, row.KeySize()+row.ValueSize()) } keySize, err := row.KeyTo(rowBuf) if err != nil { return err } valSize, err := row.ValueTo(rowBuf[keySize:]) wb.Set(rowBuf[:keySize], rowBuf[keySize:keySize+valSize]) } // update for _, row := range updateRows { if row.KeySize()+row.ValueSize() > len(rowBuf) { rowBuf = make([]byte, row.KeySize()+row.ValueSize()) } keySize, err := row.KeyTo(rowBuf) if err != nil { return err } valSize, err := row.ValueTo(rowBuf[keySize:]) if err != nil { return err } wb.Set(rowBuf[:keySize], rowBuf[keySize:keySize+valSize]) } // delete for _, row := range deleteRows { tfr, ok := row.(*TermFrequencyRow) if ok { // need to decrement counter if tfr.DictionaryRowKeySize() > len(rowBuf) { rowBuf = make([]byte, tfr.DictionaryRowKeySize()) } dictKeySize, err := tfr.DictionaryRowKeyTo(rowBuf) if err != nil { return err } wb.Merge(rowBuf[:dictKeySize], dictionaryTermDecr) } if row.KeySize()+row.ValueSize() > len(rowBuf) { rowBuf = make([]byte, row.KeySize()+row.ValueSize()) } keySize, err := row.KeyTo(rowBuf) if err != nil { return err } wb.Delete(rowBuf[:keySize]) } PutRowBuffer(rowBuf) // write out the batch return writer.ExecuteBatch(wb) }