func (fdb *fdbSlice) Statistics() (StorageStatistics, error) {
	var sts StorageStatistics

	sz, err := common.FileSize(fdb.currfile)
	if err != nil {
		return sts, err
	}

	// Compute approximate fragmentation percentage
	// Since we keep multiple index snapshots after compaction, it is not
	// trivial to compute fragmentation as ration of data size to disk size.
	// Hence we compute approximate fragmentation by adding overhead data size
	// caused by extra snapshots.
	extraSnapDataSize := platform.LoadInt64(&fdb.extraSnapDataSize)

	fdb.statFdLock.Lock()
	sts.DataSize = int64(fdb.statFd.EstimateSpaceUsed()) + extraSnapDataSize
	fdb.statFdLock.Unlock()

	sts.DiskSize = sz
	sts.ExtraSnapDataSize = extraSnapDataSize

	sts.GetBytes = platform.LoadInt64(&fdb.get_bytes)
	sts.InsertBytes = platform.LoadInt64(&fdb.insert_bytes)
	sts.DeleteBytes = platform.LoadInt64(&fdb.delete_bytes)

	return sts, nil
}
func (fdb *fdbSlice) Compact() error {
	fdb.IncrRef()
	defer fdb.DecrRef()

	//get oldest snapshot upto which compaction can be done
	infos, err := fdb.getSnapshotsMeta()
	if err != nil {
		return err
	}

	sic := NewSnapshotInfoContainer(infos)

	osnap := sic.GetOldest()
	if osnap == nil {
		logging.Infof("ForestDBSlice::Compact No Snapshot Found. Skipped Compaction."+
			"Slice Id %v, IndexInstId %v, IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
		return nil
	}

	mainSeq := osnap.(*fdbSnapshotInfo).MainSeq

	//find the db snapshot lower than oldest snapshot
	snap, err := fdb.compactFd.GetAllSnapMarkers()
	if err != nil {
		return err
	}
	defer snap.FreeSnapMarkers()

	var snapMarker *forestdb.SnapMarker
	var compactSeqNum forestdb.SeqNum
snaploop:
	for _, s := range snap.SnapInfoList() {

		cm := s.GetKvsCommitMarkers()
		for _, c := range cm {
			//if seqNum of "main" kvs is less than or equal to oldest snapshot seqnum
			//it is safe to compact upto that snapshot
			if c.GetKvStoreName() == "main" && c.GetSeqNum() <= mainSeq {
				snapMarker = s.GetSnapMarker()
				compactSeqNum = c.GetSeqNum()
				break snaploop
			}
		}
	}

	if snapMarker == nil {
		logging.Infof("ForestDBSlice::Compact No Valid SnapMarker Found. Skipped Compaction."+
			"Slice Id %v, IndexInstId %v, IndexDefnId %v", fdb.id, fdb.idxInstId, fdb.idxDefnId)
		return nil
	} else {
		logging.Infof("ForestDBSlice::Compact Compacting upto SeqNum %v. "+
			"Slice Id %v, IndexInstId %v, IndexDefnId %v", compactSeqNum, fdb.id,
			fdb.idxInstId, fdb.idxDefnId)
	}

	newpath := newFdbFile(fdb.path, true)
	// Remove any existing files leftover due to a crash during last compaction attempt
	os.Remove(newpath)
	err = fdb.compactFd.CompactUpto(newpath, snapMarker)
	if err != nil {
		return err
	}

	if _, e := os.Stat(fdb.currfile); e == nil {
		err = os.Remove(fdb.currfile)
	}

	fdb.currfile = newpath

	diskSz, err := common.FileSize(fdb.currfile)
	config := forestdb.DefaultConfig()
	config.SetOpenFlags(forestdb.OPEN_FLAG_RDONLY)
	fdb.statFd.Close()
	if fdb.statFd, err = forestdb.Open(fdb.currfile, config); err != nil {
		return err
	}

	dataSz := int64(fdb.statFd.EstimateSpaceUsed())
	var extraSnapDataSize int64
	if diskSz > dataSz {
		extraSnapDataSize = diskSz - dataSz
	}

	platform.StoreInt64(&fdb.extraSnapDataSize, extraSnapDataSize)
	return err
}