// NewReader create new initialized block reader. func NewReader(buf []byte, cmp comparer.BasicComparer) (b *Reader, err error) { if len(buf) < 8 { err = errors.ErrCorrupt("block to short") return } // Decode restart len restartLen := binary.LittleEndian.Uint32(buf[len(buf)-4:]) // Calculate restart start offset restartStart := len(buf) - int(restartLen)*4 - 4 if restartStart >= len(buf)-4 { err = errors.ErrCorrupt("bad restart offset in block") return } b = &Reader{ cmp: cmp, buf: buf, rbuf: buf[restartStart : len(buf)-4], restartLen: int(restartLen), restartStart: restartStart, } return }
// readAll read entire referenced block. func (p *bInfo) readAll(r io.ReaderAt, checksum bool) (b []byte, err error) { raw := make([]byte, p.size+5) _, err = readFullAt(r, raw, int64(p.offset)) if err != nil { return } crcb := raw[len(raw)-4:] raw = raw[:len(raw)-4] if checksum { sum := binary.LittleEndian.Uint32(crcb) sum = hash.UnmaskCRC32(sum) crc := hash.NewCRC32C() crc.Write(raw) if crc.Sum32() != sum { err = errors.ErrCorrupt("block checksum mismatch") return } } compression := raw[len(raw)-1] b = raw[:len(raw)-1] switch compression { case kNoCompression: case kSnappyCompression: return snappy.Decode(nil, b) default: err = errors.ErrCorrupt("bad block type") } return }
func (i *Iterator) getRestartRange(idx int) (r *restartRange, err error) { var start, end int start, err = i.getRestartOffset(idx) if err != nil { return } if start >= i.b.restartStart { goto corrupt } if idx+1 < i.b.restartLen { end, err = i.getRestartOffset(idx + 1) if err != nil { return } if end >= i.b.restartStart { goto corrupt } } else { end = i.b.restartStart } if start < end { r = &restartRange{raw: i.b.buf[start:end]} r.buf = bytes.NewBuffer(r.raw) return } corrupt: return nil, errors.ErrCorrupt("bad restart range in block") }
func (p *bInfo) decodeFrom(b []byte) (int, error) { var n, m int p.offset, n = binary.Uvarint(b) if n > 0 { p.size, m = binary.Uvarint(b[n:]) } if n <= 0 || m <= 0 { return 0, errors.ErrCorrupt("bad block handle") } return n + m, nil }
func (r *restartRange) next() (err error) { if r.cached && len(r.cache) > r.pos { r.kv = r.cache[r.pos] r.pos++ return } if r.buf.Len() == 0 { return io.EOF } var nkey []byte // Read header var shared, nonShared, valueLen uint64 shared, err = binary.ReadUvarint(r.buf) if err != nil || shared > uint64(len(r.kv.key)) { goto corrupt } nonShared, err = binary.ReadUvarint(r.buf) if err != nil { goto corrupt } valueLen, err = binary.ReadUvarint(r.buf) if err != nil { goto corrupt } if nonShared+valueLen > uint64(r.buf.Len()) { goto corrupt } if r.cached && r.pos > 0 { r.cache = append(r.cache, r.kv) } // Read content nkey = r.buf.Next(int(nonShared)) if shared == 0 { r.kv.key = nkey } else { pkey := r.kv.key[:shared] key := make([]byte, shared+nonShared) copy(key, pkey) copy(key[shared:], nkey) r.kv.key = key } r.kv.value = r.buf.Next(int(valueLen)) r.pos++ return corrupt: return errors.ErrCorrupt("bad entry in block") }
// NewFilterReader create new initialized filter block reader. func NewFilterReader(buf []byte, filter filter.Filter) (b *FilterReader, err error) { // 4 bytes for offset start and 1 byte for baseLg if len(buf) < 5 { err = errors.ErrCorrupt("filter block to short") return } offsetsStart := binary.LittleEndian.Uint32(buf[len(buf)-5:]) if offsetsStart > uint32(len(buf))-5 { err = errors.ErrCorrupt("bad restart offset in filter block") return } b = &FilterReader{ filter: filter, buf: buf, baseLg: uint(buf[len(buf)-1]), offsetsStart: offsetsStart, length: (uint(len(buf)) - 5 - uint(offsetsStart)) / 4, ob: buf[offsetsStart : len(buf)-1], } return }
func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error { off := kBatchHdrLen for i := 0; i < b.rLen; i++ { if off >= len(b.buf) { return errors.ErrCorrupt("invalid batch record length") } t := vType(b.buf[off]) if t > tVal { return errors.ErrCorrupt("invalid batch record type in batch") } off += 1 x, n := binary.Uvarint(b.buf[off:]) off += n if n <= 0 || off+int(x) > len(b.buf) { return errBatchBadRecord } key := b.buf[off : off+int(x)] off += int(x) var value []byte if t == tVal { x, n := binary.Uvarint(b.buf[off:]) off += n if n <= 0 || off+int(x) > len(b.buf) { return errBatchBadRecord } value = b.buf[off : off+int(x)] off += int(x) } f(i, t, key, value) } return nil }
func (d *FileDesc) GetMainManifest() (f File, err error) { pth := path.Join(d.path, "CURRENT") rw, err := os.OpenFile(pth, os.O_RDONLY, 0) if err != nil { err = err.(*os.PathError).Err return } defer rw.Close() buf := new(bytes.Buffer) _, err = buf.ReadFrom(rw) if err != nil { return } b := buf.Bytes() p := &file{desc: d} if len(b) < 1 || b[len(b)-1] != '\n' || !p.parse(string(b[:len(b)-1])) { return nil, errors.ErrCorrupt("invalid CURRENT file") } return p, nil }
func (i *Iterator) getRestartKey(idx int) (key []byte, err error) { offset, err := i.getRestartOffset(idx) if err != nil { return } buf := i.b.buf[offset:] shared, n := binary.Uvarint(buf) // shared key buf = buf[n:] nonShared, n := binary.Uvarint(buf) // non-shared key buf = buf[n:] valueLen, n := binary.Uvarint(buf) // value len buf = buf[n:] if shared > 0 || nonShared+valueLen > uint64(len(buf)) { err = errors.ErrCorrupt("bad entry in block") return } key = buf[:nonShared] return }
// Recover a database session; need external synchronization. func (s *session) recover() (err error) { file, err := s.stor.GetManifest() if err != nil { return } r, err := newJournalReader(file, true, s.journalDropFunc("manifest", file.Num())) if err != nil { return } defer r.close() cmp := s.cmp.cmp.Name() staging := s.version_NB().newStaging() srec := new(sessionRecord) for r.journal.Next() { rec := new(sessionRecord) err = rec.decode(r.journal.Record()) if err != nil { continue } if rec.hasComparer && rec.comparer != cmp { return errors.ErrInvalid("invalid comparer, " + "want '" + cmp + "', " + "got '" + rec.comparer + "'") } // save compact pointers for _, rp := range rec.compactPointers { s.stCPtrs[rp.level] = iKey(rp.key) } // commit record to version staging staging.commit(rec) if rec.hasJournalNum { srec.setJournalNum(rec.journalNum) } if rec.hasPrevJournalNum { srec.setPrevJournalNum(rec.prevJournalNum) } if rec.hasNextNum { srec.setNextNum(rec.nextNum) } if rec.hasSeq { srec.setSeq(rec.seq) } } // check for error in journal reader err = r.journal.Error() if err != nil { return } switch false { case srec.hasNextNum: err = errors.ErrCorrupt("manifest missing next file number") case srec.hasJournalNum: err = errors.ErrCorrupt("manifest missing journal file number") case srec.hasSeq: err = errors.ErrCorrupt("manifest missing seq number") } if err != nil { return } s.manifest = &journalWriter{file: file} s.setVersion(staging.finish()) s.setFileNum(srec.nextNum) s.recordCommited(srec) return }
// Copyright (c) 2012, Suryandaru Triandana <*****@*****.**> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" ) var errIKeyCorrupt = errors.ErrCorrupt("internal key corrupted") // newRawIterator return merged interators of current version, current frozen memdb // and current memdb. func (d *DB) newRawIterator(ro *opt.ReadOptions) iterator.Iterator { s := d.s mem := d.getMem() v := s.version() ti := v.getIterators(ro) ii := make([]iterator.Iterator, 0, len(ti)+2) ii = append(ii, mem.cur.NewIterator()) if mem.froze != nil { ii = append(ii, mem.froze.NewIterator()) }
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LEVELDBCPP_LICENSE file. See the LEVELDBCPP_AUTHORS file // for names of contributors. package db import ( "encoding/binary" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/memdb" ) var ( errBatchTooShort = errors.ErrCorrupt("batch in too short") errBatchBadRecord = errors.ErrCorrupt("bad record in batch") ) const kBatchHdrLen = 8 + 4 type batchReplay interface { put(key, value []byte, seq uint64) delete(key []byte, seq uint64) } // Batch represent a write batch. type Batch struct { buf []byte rLen int seq uint64
func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool, err error) { s := v.s icmp := s.cmp ucmp := icmp.cmp ukey := key.ukey() var tset *tSet tseek := true // We can search level-by-level since entries never hop across // levels. Therefore we are guaranteed that if we find data // in an smaller level, later levels are irrelevant. for level, ts := range v.tables { if len(ts) == 0 { continue } if level == 0 { // Level-0 files may overlap each other. Find all files that // overlap user_key and process them in order from newest to var tmp tFiles for _, t := range ts { if ucmp.Compare(ukey, t.min.ukey()) >= 0 && ucmp.Compare(ukey, t.max.ukey()) <= 0 { tmp = append(tmp, t) } } if len(tmp) == 0 { continue } tmp.sort(tFileSorterNewest(nil)) ts = tmp } else { i := ts.search(key, icmp) if i >= len(ts) || ucmp.Compare(ukey, ts[i].min.ukey()) < 0 { continue } ts = ts[i : i+1] } for _, t := range ts { if tseek { if tset == nil { tset = &tSet{level, t} } else if tset.table.incrSeek() <= 0 { cstate = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) tseek = false } } var _rkey, rval []byte _rkey, rval, err = s.tops.get(t, key, ro) if err == errors.ErrNotFound { continue } else if err != nil { return } rkey := iKey(_rkey) if _, t, ok := rkey.parseNum(); ok { if ucmp.Compare(ukey, rkey.ukey()) == 0 { switch t { case tVal: value = rval case tDel: err = errors.ErrNotFound default: panic("not reached") } return } } else { err = errors.ErrCorrupt("internal key corrupted") return } } } err = errors.ErrNotFound return }