func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { for i := 0; i < 4; i++ { func() { switch i { case 0: case 1: if o == nil { o = &opt.Options{Filter: _bloom_filter} } else { old := o o = &opt.Options{} *o = *old o.Filter = _bloom_filter } case 2: if o == nil { o = &opt.Options{Compression: opt.NoCompression} } else { old := o o = &opt.Options{} *o = *old o.Compression = opt.NoCompression } } h := newDbHarnessWopt(t, o) defer h.close() switch i { case 3: h.reopenDB() } f(h) }() } }
func (s *session) setOptions(o *opt.Options) { s.o = &opt.Options{} if o != nil { *s.o = *o } // Alternative filters. if filters := o.GetAltFilters(); len(filters) > 0 { s.o.AltFilters = make([]filter.Filter, len(filters)) for i, filter := range filters { s.o.AltFilters[i] = &iFilter{filter} } } // Block cache. switch o.GetBlockCache() { case nil: s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize) case opt.NoCache: s.o.BlockCache = nil } // Comparer. s.icmp = &iComparer{o.GetComparer()} s.o.Comparer = s.icmp // Filter. if filter := o.GetFilter(); filter != nil { s.o.Filter = &iFilter{filter} } }
// NewWriter creates a new initialized table writer for the file. // // Table writer is not goroutine-safe. func NewWriter(f io.Writer, o *opt.Options) *Writer { w := &Writer{ writer: f, cmp: o.GetComparer(), filter: o.GetFilter(), compression: o.GetCompression(), blockSize: o.GetBlockSize(), comparerScratch: make([]byte, 0), } // data block w.dataBlock.restartInterval = o.GetBlockRestartInterval() // The first 20-bytes are used for encoding block handle. w.dataBlock.scratch = w.scratch[20:] // index block w.indexBlock.restartInterval = 1 w.indexBlock.scratch = w.scratch[20:] // filter block if w.filter != nil { w.filterBlock.generator = w.filter.NewGenerator() w.filterBlock.flush(0) } return w }
func recoverTable(s *session, o *opt.Options) error { // Get all tables and sort it by file number. tableFiles_, err := s.getFiles(storage.TypeTable) if err != nil { return err } tableFiles := files(tableFiles_) tableFiles.sort() var mSeq uint64 var good, corrupted int rec := new(sessionRecord) bpool := util.NewBufferPool(o.GetBlockSize() + 5) buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { tmp = s.newTemp() writer, err := tmp.Create() if err != nil { return } defer func() { writer.Close() if err != nil { tmp.Remove() tmp = nil } }() // Copy entries. tw := table.NewWriter(writer, o) for iter.Next() { key := iter.Key() if validIkey(key) { err = tw.Append(key, iter.Value()) if err != nil { return } } } err = iter.Error() if err != nil { return } err = tw.Close() if err != nil { return } err = writer.Sync() if err != nil { return } size = int64(tw.BytesLen()) return } recoverTable := func(file storage.File) error { s.logf("table@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() // Get file size. size, err := reader.Seek(0, 2) if err != nil { return err } var tSeq uint64 var tgood, tcorrupted, blockerr int var imin, imax []byte tr := table.NewReader(reader, size, nil, bpool, o) iter := tr.NewIterator(nil, nil) iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { s.logf("table@recovery found error @%d %q", file.Num(), err) blockerr++ }) // Scan the table. for iter.Next() { key := iter.Key() _, seq, _, ok := parseIkey(key) if !ok { tcorrupted++ continue } tgood++ if seq > tSeq { tSeq = seq } if imin == nil { imin = append([]byte{}, key...) } imax = append(imax[:0], key...) } if err := iter.Error(); err != nil { iter.Release() return err } iter.Release() if tgood > 0 { if tcorrupted > 0 || blockerr > 0 { // Rebuild the table. s.logf("table@recovery rebuilding @%d", file.Num()) iter := tr.NewIterator(nil, nil) tmp, newSize, err := buildTable(iter) iter.Release() if err != nil { return err } reader.Close() if err := file.Replace(tmp); err != nil { return err } size = newSize } if tSeq > mSeq { mSeq = tSeq } // Add table to level 0. rec.addTable(0, file.Num(), uint64(size), imin, imax) s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq) } else { s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size) } good += tgood corrupted += tcorrupted return nil } // Recover all tables. if len(tableFiles) > 0 { s.logf("table@recovery F·%d", len(tableFiles)) // Mark file number as used. s.markFileNum(tableFiles[len(tableFiles)-1].Num()) for _, file := range tableFiles { if err := recoverTable(file); err != nil { return err } } s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(tableFiles), good, corrupted, mSeq) } // Set sequence number. rec.setSeq(mSeq + 1) // Create new manifest. if err := s.create(); err != nil { return err } // Commit. return s.commit(rec) }
// NewReader creates a new initialized table reader for the file. // The cache and bpool is optional and can be nil. // // The returned table reader instance is goroutine-safe. func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader { if bpool == nil { bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen) } r := &Reader{ reader: f, cache: cache, bpool: bpool, cmp: o.GetComparer(), checksum: o.GetStrict(opt.StrictBlockChecksum), strictIter: o.GetStrict(opt.StrictIterator), } if f == nil { r.err = errors.New("leveldb/table: Reader: nil file") return r } if size < footerLen { r.err = errors.New("leveldb/table: Reader: invalid table (file size is too small)") return r } var footer [footerLen]byte if _, err := r.reader.ReadAt(footer[:], size-footerLen); err != nil && err != io.EOF { r.err = fmt.Errorf("leveldb/table: Reader: invalid table (could not read footer): %v", err) } if string(footer[footerLen-len(magic):footerLen]) != magic { r.err = errors.New("leveldb/table: Reader: invalid table (bad magic number)") return r } // Decode the metaindex block handle. metaBH, n := decodeBlockHandle(footer[:]) if n == 0 { r.err = errors.New("leveldb/table: Reader: invalid table (bad metaindex block handle)") return r } // Decode the index block handle. indexBH, n := decodeBlockHandle(footer[n:]) if n == 0 { r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)") return r } // Read index block. r.indexBlock, r.err = r.readBlock(indexBH, true) if r.err != nil { return r } // Read metaindex block. metaBlock, err := r.readBlock(metaBH, true) if err != nil { r.err = err return r } // Set data end. r.dataEnd = int64(metaBH.offset) metaIter := metaBlock.newIterator(nil, false, nil) for metaIter.Next() { key := string(metaIter.Key()) if !strings.HasPrefix(key, "filter.") { continue } fn := key[7:] var filter filter.Filter if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { filter = f0 } else { for _, f0 := range o.GetAltFilters() { if f0.Name() == fn { filter = f0 break } } } if filter != nil { filterBH, n := decodeBlockHandle(metaIter.Value()) if n == 0 { continue } // Update data end. r.dataEnd = int64(filterBH.offset) filterBlock, err := r.readFilterBlock(filterBH, filter) if err != nil { continue } r.filterBlock = filterBlock break } } metaIter.Release() return r }