func (self *LevelDbDatastore) WriteSeriesData(database string, series *protocol.Series) error { wb := levigo.NewWriteBatch() defer wb.Close() for fieldIndex, field := range series.Fields { temp := field id, _, err := self.getIdForDbSeriesColumn(&database, series.Name, &temp) if err != nil { return err } for _, point := range series.Points { timestampBuffer := bytes.NewBuffer(make([]byte, 0, 8)) sequenceNumberBuffer := bytes.NewBuffer(make([]byte, 0, 8)) binary.Write(timestampBuffer, binary.BigEndian, self.convertTimestampToUint(point.GetTimestampInMicroseconds())) binary.Write(sequenceNumberBuffer, binary.BigEndian, uint64(*point.SequenceNumber)) pointKey := append(append(id, timestampBuffer.Bytes()...), sequenceNumberBuffer.Bytes()...) // TODO: we should remove the column value if timestamp and sequence number // were provided if point.Values[fieldIndex] == nil { continue } data, err2 := proto.Marshal(point.Values[fieldIndex]) if err2 != nil { return err2 } wb.Put(pointKey, data) } } return self.db.Write(self.writeOptions, wb) }
func newLevelDBBatch(store *LevelDBStore) *LevelDBBatch { rv := LevelDBBatch{ store: store, batch: levigo.NewWriteBatch(), } return &rv }
// Delete a span from the shard. Note that leveldb may retain the data until // compaction(s) remove it. func (shd *shard) DeleteSpan(span *common.Span) error { batch := levigo.NewWriteBatch() defer batch.Close() primaryKey := append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...) batch.Delete(primaryKey) for parentIdx := range span.Parents { key := append(append([]byte{PARENT_ID_INDEX_PREFIX}, span.Parents[parentIdx].Val()...), span.Id.Val()...) batch.Delete(key) } beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX}, u64toSlice(s2u64(span.Begin))...), span.Id.Val()...) batch.Delete(beginTimeKey) endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX}, u64toSlice(s2u64(span.End))...), span.Id.Val()...) batch.Delete(endTimeKey) durationKey := append(append([]byte{DURATION_INDEX_PREFIX}, u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...) batch.Delete(durationKey) err := shd.ldb.Write(shd.store.writeOpts, batch) if err != nil { return err } return nil }
func (shd *shard) writeSpan(ispan *IncomingSpan) error { batch := levigo.NewWriteBatch() defer batch.Close() span := ispan.Span primaryKey := append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...) batch.Put(primaryKey, ispan.SpanDataBytes) // Add this to the parent index. for parentIdx := range span.Parents { key := append(append([]byte{PARENT_ID_INDEX_PREFIX}, span.Parents[parentIdx].Val()...), span.Id.Val()...) batch.Put(key, EMPTY_BYTE_BUF) } // Add to the other secondary indices. beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX}, u64toSlice(s2u64(span.Begin))...), span.Id.Val()...) batch.Put(beginTimeKey, EMPTY_BYTE_BUF) endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX}, u64toSlice(s2u64(span.End))...), span.Id.Val()...) batch.Put(endTimeKey, EMPTY_BYTE_BUF) durationKey := append(append([]byte{DURATION_INDEX_PREFIX}, u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...) batch.Put(durationKey, EMPTY_BYTE_BUF) err := shd.ldb.Write(shd.store.writeOpts, batch) if err != nil { shd.store.lg.Errorf("Error writing span %s to leveldb at %s: %s\n", span.String(), shd.path, err.Error()) return err } return nil }
func (this *DB) NewWriteBatch() *WriteBatch { batch := levigo.NewWriteBatch() var hook *Hook if len(this.pre) != 0 || len(this.post) != 0 { hook = &Hook{db: this, batch: batch} } return &WriteBatch{db: this, batch: batch, hook: hook} }
func (w *Writer) NewBatch() store.KVBatch { rv := Batch{ w: w, merge: store.NewEmulatedMerge(w.store.mo), batch: levigo.NewWriteBatch(), } return &rv }
// StartBatch start a new batch write processing func (lvdb *LVDB) StartBatch() { if lvdb._writeBatch == nil { lvdb._writeBatch = levigo.NewWriteBatch() } else { lvdb._writeBatch.Clear() } lvdb.isBatch = true }
func (self *LevelDbShard) Write(database string, series []*protocol.Series) error { wb := levigo.NewWriteBatch() defer wb.Close() for _, s := range series { if len(s.Points) == 0 { return errors.New("Unable to write no data. Series was nil or had no points.") } count := 0 for fieldIndex, field := range s.Fields { temp := field id, err := self.createIdForDbSeriesColumn(&database, s.Name, &temp) if err != nil { return err } keyBuffer := bytes.NewBuffer(make([]byte, 0, 24)) dataBuffer := proto.NewBuffer(nil) for _, point := range s.Points { keyBuffer.Reset() dataBuffer.Reset() keyBuffer.Write(id) timestamp := self.convertTimestampToUint(point.GetTimestampInMicroseconds()) // pass the uint64 by reference so binary.Write() doesn't create a new buffer // see the source code for intDataSize() in binary.go binary.Write(keyBuffer, binary.BigEndian, ×tamp) binary.Write(keyBuffer, binary.BigEndian, point.SequenceNumber) pointKey := keyBuffer.Bytes() if point.Values[fieldIndex].GetIsNull() { wb.Delete(pointKey) goto check } err = dataBuffer.Marshal(point.Values[fieldIndex]) if err != nil { return err } wb.Put(pointKey, dataBuffer.Bytes()) check: count++ if count >= self.writeBatchSize { err = self.db.Write(self.writeOptions, wb) if err != nil { return err } count = 0 wb.Clear() } } } } return self.db.Write(self.writeOptions, wb) }
func (self *LevelDbShard) deleteRangeOfSeriesCommon(database, series string, startTimeBytes, endTimeBytes []byte) error { columns := self.getColumnNamesForSeries(database, series) fields, err := self.getFieldsForSeries(database, series, columns) if err != nil { // because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore switch err := err.(type) { case FieldLookupError: return nil default: return err } } ro := levigo.NewReadOptions() defer ro.Close() ro.SetFillCache(false) for _, field := range fields { it := self.db.NewIterator(ro) defer it.Close() wb := levigo.NewWriteBatch() defer wb.Close() startKey := append(field.Id, startTimeBytes...) it.Seek(startKey) if it.Valid() { if !bytes.Equal(it.Key()[:8], field.Id) { it.Next() if it.Valid() { startKey = it.Key() } } } count := 0 for it = it; it.Valid(); it.Next() { k := it.Key() if len(k) < 16 || !bytes.Equal(k[:8], field.Id) || bytes.Compare(k[8:16], endTimeBytes) == 1 { break } wb.Delete(k) count++ if count >= SIXTY_FOUR_KILOBYTES { err = self.db.Write(self.writeOptions, wb) if err != nil { return err } count = 0 wb.Clear() } } err = self.db.Write(self.writeOptions, wb) if err != nil { return err } } return nil }
func (db LevelDB) BatchPut(writes []Write) error { wb := levigo.NewWriteBatch() defer wb.Close() for _, w := range writes { if w.Value == nil { wb.Delete(w.Key) continue } wb.Put(w.Key, w.Value) } return db.db.Write(db.wopts, wb) }
func (self *LevelDbDatastore) deleteRangeOfSeriesCommon(database, series string, startTimeBytes, endTimeBytes []byte) error { columns := self.getColumnNamesForSeries(database, series) fields, err := self.getFieldsForSeries(database, series, columns) if err != nil { // because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore switch err := err.(type) { case FieldLookupError: return nil default: return err } } ro := levigo.NewReadOptions() defer ro.Close() ro.SetFillCache(false) rangesToCompact := make([]*levigo.Range, 0) for _, field := range fields { it := self.db.NewIterator(ro) defer it.Close() wb := levigo.NewWriteBatch() defer wb.Close() startKey := append(field.Id, startTimeBytes...) endKey := startKey it.Seek(startKey) if it.Valid() { if !bytes.Equal(it.Key()[:8], field.Id) { it.Next() if it.Valid() { startKey = it.Key() } } } for it = it; it.Valid(); it.Next() { k := it.Key() if len(k) < 16 || !bytes.Equal(k[:8], field.Id) || bytes.Compare(k[8:16], endTimeBytes) == 1 { break } wb.Delete(k) endKey = k } err = self.db.Write(self.writeOptions, wb) if err != nil { return err } rangesToCompact = append(rangesToCompact, &levigo.Range{startKey, endKey}) } for _, r := range rangesToCompact { self.db.CompactRange(*r) } return nil }
func (context *levelDBContext) Write() error { var wb = levigo.NewWriteBatch() defer wb.Close() for _, entry := range context.batch { if entry.Delete { wb.Delete(entry.Key) } else { wb.Put(entry.Key, entry.Val) } } return context.ldbStore.db.Write(defaultWriteOptions, wb) }
func (connection *LevelDbConnection) Puts(options *proto.DbWriteOptions, keys [][]byte, values [][]byte) error { wo := levigo.NewWriteOptions() defer wo.Close() if options != nil { wo.SetSync(options.Sync) } batch := levigo.NewWriteBatch() defer batch.Close() for i, key := range keys { batch.Put(key, values[i]) } return connection.db.Write(wo, batch) }
func (p *InsertedWaysCache) DeleteMembers(members []element.Member) error { batch := levigo.NewWriteBatch() defer batch.Close() for _, m := range members { if m.Type != element.WAY { continue } keyBuf := idToKeyBuf(m.Id) batch.Delete(keyBuf) } return p.db.Write(p.wo, batch) }
func (p *WaysCache) PutWays(ways []element.Way) error { batch := levigo.NewWriteBatch() defer batch.Close() for _, way := range ways { keyBuf := idToKeyBuf(way.Id) data, err := binary.MarshalWay(&way) if err != nil { return err } batch.Put(keyBuf, data) } return p.db.Write(p.wo, batch) }
// DiscardFrom discards all entries from // log index onwards (inclusive) // TODO: Propogate error to higher layers func (l *levelDbLogStore) DiscardFrom(index int64) { writeOpts := levigo.NewWriteOptions() writeOpts.SetSync(true) defer writeOpts.Close() writeBatch := levigo.NewWriteBatch() for i := index; i <= l.TailIndex(); i += 1 { writeBatch.Delete(int64ToBytes(i)) } err := l.localDb.Write(writeOpts, writeBatch) if err != nil { panic("Error in DiscardFrom") } l.nextIndex.Set(index) }
func (self *LevelDbShard) DropDatabase(database string) error { wb := levigo.NewWriteBatch() defer wb.Close() seriesNames := self.getSeriesForDatabase(database) for _, name := range seriesNames { if err := self.dropSeries(database, name); err != nil { log.Error("DropDatabase: ", err) } } return self.db.Write(self.writeOptions, wb) }
func GenerateWriteBatchForIndexes(added, deleted [][2]string, key string, indexDb *levigo.DB) (*levigo.WriteBatch, error) { wb := levigo.NewWriteBatch() bkey := []byte(key) for _, index := range added { if err := AddIndex(index, bkey, indexDb, wb); err != nil { return nil, err } } for _, index := range deleted { if err := RemoveIndex(index, bkey, indexDb, wb); err != nil { return nil, err } } return wb, nil }
func (self *LevelDbShard) DropDatabase(database string) error { wb := levigo.NewWriteBatch() defer wb.Close() seriesNames := self.getSeriesForDatabase(database) for _, name := range seriesNames { if err := self.dropSeries(database, name); err != nil { log.Error("DropDatabase: ", err) } seriesKey := append(DATABASE_SERIES_INDEX_PREFIX, []byte(database+"~")...) wb.Delete(seriesKey) } return self.db.Write(self.writeOptions, wb) }
func (index *bunchRefCache) writeRefs(idRefs idRefBunches) error { batch := levigo.NewWriteBatch() defer batch.Close() wg := sync.WaitGroup{} putc := make(chan writeBunchItem) loadc := make(chan loadBunchItem) for i := 0; i < runtime.NumCPU(); i++ { wg.Add(1) go func() { for item := range loadc { keyBuf := idToKeyBuf(item.bunchId) putc <- writeBunchItem{ keyBuf, index.loadMergeMarshal(keyBuf, item.bunch.idRefs), } } wg.Done() }() } go func() { for bunchId, bunch := range idRefs { loadc <- loadBunchItem{bunchId, bunch} } close(loadc) wg.Wait() close(putc) }() for item := range putc { batch.Put(item.bunchIdBuf, item.data) bytePool.release(item.data) } go func() { for k, _ := range idRefs { delete(idRefs, k) } select { case idRefBunchesPool <- idRefs: } }() return index.db.Write(index.wo, batch) }
func (p *RelationsCache) PutRelations(rels []element.Relation) error { batch := levigo.NewWriteBatch() defer batch.Close() for _, rel := range rels { if len(rel.Tags) == 0 { continue } keyBuf := idToKeyBuf(rel.Id) data, err := binary.MarshalRelation(&rel) if err != nil { return err } batch.Put(keyBuf, data) } return p.db.Write(p.wo, batch) }
func (self *LevelDbDatastore) deleteRangeOfSeries(database, series string, startTimeBytes, endTimeBytes []byte) error { columns := self.getColumnNamesForSeries(database, series) fields, err := self.getFieldsForSeries(database, series, columns) if err != nil { return err } ro := levigo.NewReadOptions() defer ro.Close() ro.SetFillCache(false) rangesToCompact := make([]*levigo.Range, 0) for _, field := range fields { it := self.db.NewIterator(ro) defer it.Close() wb := levigo.NewWriteBatch() defer wb.Close() startKey := append(field.Id, startTimeBytes...) endKey := startKey it.Seek(startKey) if it.Valid() { if !bytes.Equal(it.Key()[:8], field.Id) { it.Next() if it.Valid() { startKey = it.Key() } } } for it = it; it.Valid(); it.Next() { k := it.Key() if len(k) < 16 || !bytes.Equal(k[:8], field.Id) || bytes.Compare(k[8:16], endTimeBytes) == 1 { break } wb.Delete(k) endKey = k } err = self.db.Write(self.writeOptions, wb) if err != nil { return err } rangesToCompact = append(rangesToCompact, &levigo.Range{startKey, endKey}) } for _, r := range rangesToCompact { self.db.CompactRange(*r) } return nil }
func (self *LevelDbShard) getNextIdForColumn(db, series, column *string) (ret []byte, err error) { id := self.lastIdUsed + 1 self.lastIdUsed += 1 idBytes := make([]byte, 8, 8) binary.PutUvarint(idBytes, id) wb := levigo.NewWriteBatch() defer wb.Close() wb.Put(NEXT_ID_KEY, idBytes) databaseSeriesIndexKey := append(DATABASE_SERIES_INDEX_PREFIX, []byte(*db+"~"+*series)...) wb.Put(databaseSeriesIndexKey, []byte{}) seriesColumnIndexKey := append(SERIES_COLUMN_INDEX_PREFIX, []byte(*db+"~"+*series+"~"+*column)...) wb.Put(seriesColumnIndexKey, idBytes) if err = self.db.Write(self.writeOptions, wb); err != nil { return nil, err } return idBytes, nil }
func (this *Hook) ensureBatch() { if this.batch != nil { return } this.batch = levigo.NewWriteBatch() if this.key != nil { newkey := append(this.db.prefix, this.key...) if this.value == nil { this.batch.Delete(newkey) } else { this.batch.Put(newkey, this.value) } this.kv = []KeyValue{KeyValue{newkey, this.value}} this.key = nil this.value = nil } }
func (self *LevelDbShard) dropSeries(database, series string) error { startTimeBytes := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} endTimeBytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} wb := levigo.NewWriteBatch() defer wb.Close() for _, name := range self.getColumnNamesForSeries(database, series) { if err := self.deleteRangeOfSeriesCommon(database, series, startTimeBytes, endTimeBytes); err != nil { return err } indexKey := append(SERIES_COLUMN_INDEX_PREFIX, []byte(database+"~"+series+"~"+name)...) wb.Delete(indexKey) } // remove the column indeces for this time series return self.db.Write(self.writeOptions, wb) }
func (p *NodesCache) PutNodes(nodes []element.Node) (int, error) { batch := levigo.NewWriteBatch() defer batch.Close() var n int for _, node := range nodes { if len(node.Tags) == 0 { continue } keyBuf := idToKeyBuf(node.Id) data, err := binary.MarshalNode(&node) if err != nil { return 0, err } batch.Put(keyBuf, data) n += 1 } return n, p.db.Write(p.wo, batch) }
func (shd *shard) writeSpan(span *common.Span) error { batch := levigo.NewWriteBatch() defer batch.Close() // Add SpanData to batch. spanDataBuf := new(bytes.Buffer) spanDataEnc := gob.NewEncoder(spanDataBuf) err := spanDataEnc.Encode(span.SpanData) if err != nil { return err } primaryKey := append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...) batch.Put(primaryKey, spanDataBuf.Bytes()) // Add this to the parent index. for parentIdx := range span.Parents { key := append(append([]byte{PARENT_ID_INDEX_PREFIX}, span.Parents[parentIdx].Val()...), span.Id.Val()...) batch.Put(key, EMPTY_BYTE_BUF) } // Add to the other secondary indices. beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX}, u64toSlice(s2u64(span.Begin))...), span.Id.Val()...) batch.Put(beginTimeKey, EMPTY_BYTE_BUF) endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX}, u64toSlice(s2u64(span.End))...), span.Id.Val()...) batch.Put(endTimeKey, EMPTY_BYTE_BUF) durationKey := append(append([]byte{DURATION_INDEX_PREFIX}, u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...) batch.Put(durationKey, EMPTY_BYTE_BUF) err = shd.ldb.Write(shd.store.writeOpts, batch) if err != nil { return err } shd.store.stats.IncrementWrittenSpans() if shd.store.WrittenSpans != nil { shd.store.WrittenSpans <- span } return nil }
func (self *LevelDbDatastore) DropDatabase(database string) error { wb := levigo.NewWriteBatch() defer wb.Close() err := self.GetSeriesForDatabase(database, func(name string) error { if err := self.DropSeries(database, name); err != nil { return err } seriesKey := append(DATABASE_SERIES_INDEX_PREFIX, []byte(database+"~")...) wb.Delete(seriesKey) return nil }) if err != nil { return err } return self.db.Write(self.writeOptions, wb) }
func (db LevelDB) Del(start, finish []byte) error { wb := levigo.NewWriteBatch() defer wb.Close() itr := db.Iterator() defer itr.Close() for itr.Seek(start); itr.Valid(); itr.Next() { k := itr.Key() if bytes.Compare(k, finish) > 0 { break } wb.Delete(k) } if err := itr.Error(); err != nil { return err } return db.db.Write(db.wopts, wb) }
// DeleteServiceData deletes all service data such as service metadata, items and payloads. func (ds *DataStorage) DeleteServiceData(serviceId string) { counter := 0 iter := ds.IterServiceItems(serviceId) wb := levigo.NewWriteBatch() for iter.Valid() { if counter < 1000 { wb.Delete(iter.Key) counter++ } else { counter = 0 ds.db.Write(defaultWriteOptions, wb) } } wb.Delete([]byte(serviceDescriptionPrefix + serviceId)) wb.Delete([]byte(serviceConfigPrefix + serviceId)) ds.db.Write(defaultWriteOptions, wb) }