Ejemplo n.º 1
0
// init recovery super block from index or super block.
func (v *Volume) init() (err error) {
	var offset uint32
	// recovery from index
	if err = v.Indexer.Recovery(func(ix *index.Index) (err1 error) {
		v.needles[ix.Key] = needle.NewCache(ix.Offset, ix.Size)
		offset = ix.Offset + needle.NeedleOffset(int64(ix.Size))
		if ix.Size > int32(v.conf.NeedleMaxSize) || ix.Size < 0 {
			err1 = errors.ErrIndexSize
		}
		return
	}); err != nil {
		return
	}
	// recovery from super block
	err = v.Block.Recovery(offset, func(n *needle.Needle, so, eo uint32) (err1 error) {
		if n.TotalSize > int32(v.conf.NeedleMaxSize) || n.TotalSize < 0 {
			err1 = errors.ErrNeedleSize
			return
		}
		if n.Flag == needle.FlagOK {
			if err1 = v.Indexer.Write(n.Key, so, n.TotalSize); err1 != nil {
				return
			}
		} else {
			so = needle.CacheDelOffset
		}
		v.needles[n.Key] = needle.NewCache(so, n.TotalSize)
		return
	})
	// flush index
	err = v.Indexer.Flush()
	return
}
Ejemplo n.º 2
0
Archivo: volume.go Proyecto: 1d7500/bfs
// init recovery super block from index or super block.
func (v *Volume) init() (err error) {
	var (
		size               int64
		lastOffset, offset uint32
	)
	// recovery from index
	if err = v.Indexer.Recovery(func(ix *index.Index) error {
		if ix.Size > int32(v.conf.NeedleMaxSize) || ix.Size < 0 {
			log.Error("recovery index: %s error(%v)", ix, errors.ErrIndexSize)
			return errors.ErrIndexSize
		}
		// must no less than last offset
		if ix.Offset < lastOffset {
			log.Error("recovery index: %s lastoffset: %d error(%v)", ix, lastOffset, errors.ErrIndexOffset)
			return errors.ErrIndexOffset
		}
		// WARN if index's offset more than the block, discard it.
		if size = int64(ix.Size) + needle.BlockOffset(ix.Offset); size > v.Block.Size {
			log.Error("recovery index: %s EOF", ix)
			return errors.ErrIndexEOF
		}
		v.needles[ix.Key] = needle.NewCache(ix.Offset, ix.Size)
		offset = ix.Offset + needle.NeedleOffset(int64(ix.Size))
		lastOffset = ix.Offset
		return nil
	}); err != nil && err != errors.ErrIndexEOF {
		return
	}
	// recovery from super block
	if err = v.Block.Recovery(offset, func(n *needle.Needle, so, eo uint32) (err1 error) {
		if n.TotalSize > int32(v.conf.NeedleMaxSize) || n.TotalSize < 0 {
			log.Error("recovery needle: %s error(%v)", n, errors.ErrNeedleSize)
			return errors.ErrNeedleSize
		}
		if n.Flag == needle.FlagOK {
			if err1 = v.Indexer.Write(n.Key, so, n.TotalSize); err1 != nil {
				return
			}
		} else {
			so = needle.CacheDelOffset
		}
		v.needles[n.Key] = needle.NewCache(so, n.TotalSize)
		return
	}); err != nil {
		return
	}
	// recheck offset, keep size and offset consistency
	if v.Block.Size != needle.BlockOffset(v.Block.Offset) {
		log.Error("block: %s size: %d, offset: %d (%d size) not consistency", v.Block.File, v.Block.Size, v.Block.Offset, needle.BlockOffset(v.Block.Offset))
		return errors.ErrSuperBlockOffset
	}
	// flush index
	err = v.Indexer.Flush()
	return
}
Ejemplo n.º 3
0
// Del logical delete a needle, update disk needle flag and memory needle
// cache offset to zero.
func (v *Volume) Del(key int64) (err error) {
	var (
		ok     bool
		nc     int64
		size   int32
		offset uint32
	)
	v.lock.Lock()
	nc, ok = v.needles[key]
	if ok {
		if offset, size = needle.Cache(nc); offset != needle.CacheDelOffset {
			v.needles[key] = needle.NewCache(needle.CacheDelOffset, size)
			// when in compact, must save all del operations.
			if v.Compact {
				v.compactKeys = append(v.compactKeys, key)
			}
		} else {
			err = errors.ErrNeedleDeleted
		}
	} else {
		err = errors.ErrNeedleNotExist
	}
	v.lock.Unlock()
	if err == nil {
		err = v.asyncDel(offset)
	}
	return
}
Ejemplo n.º 4
0
// Add add a needles, if key exists append to super block, then update
// needle cache offset to new offset.
func (v *Volume) Add(n *needle.Needle) (err error) {
	var (
		ok              bool
		nc              int64
		offset, ooffset uint32
		now             = time.Now().UnixNano()
	)
	v.lock.Lock()
	offset = v.Block.Offset
	if err = v.Block.Write(n.Buffer[:n.TotalSize]); err == nil {
		if err = v.Indexer.Add(n.Key, offset, n.TotalSize); err == nil {
			nc, ok = v.needles[n.Key]
			v.needles[n.Key] = needle.NewCache(offset, n.TotalSize)
			v.addCheck(n.Key, n.Cookie)
		}
	}
	v.lock.Unlock()
	if err != nil {
		return
	}
	if log.V(1) {
		log.Infof("add needle, offset: %d, size: %d", offset, n.TotalSize)
		log.Info(n)
	}
	if ok {
		ooffset, _ = needle.Cache(nc)
		err = v.asyncDel(ooffset)
		log.Warningf("same key: %d, old offset: %d, new offset: %d", n.Key, ooffset, offset)
	}
	atomic.AddUint64(&v.Stats.TotalWriteProcessed, 1)
	atomic.AddUint64(&v.Stats.TotalWriteBytes, uint64(n.TotalSize))
	atomic.AddUint64(&v.Stats.TotalWriteDelay, uint64(time.Now().UnixNano()-now))
	return
}
Ejemplo n.º 5
0
// Get get a needle by key and cookie.
func (v *Volume) Get(n *needle.Needle) (err error) {
	var (
		ok     bool
		nc     int64
		size   int32
		offset uint32
		key    = n.Key
		cookie = n.Cookie
		now    = time.Now().UnixNano()
	)
	// WARN pread syscall is atomic, so use rlock
	v.lock.RLock()
	if nc, ok = v.needles[n.Key]; ok {
		offset, size = needle.Cache(nc)
		if offset != needle.CacheDelOffset {
			err = v.Block.Get(offset, n.Buffer[:size])
		} else {
			err = errors.ErrNeedleDeleted
		}
	} else {
		err = errors.ErrNeedleNotExist
	}
	v.lock.RUnlock()
	if err != nil {
		return
	}
	if log.V(1) {
		log.Infof("get needle key: %d, cookie: %d, offset: %d, size: %d", n.Key, n.Cookie, offset, size)
	}
	if err = n.Parse(); err != nil {
		return
	}
	if log.V(1) {
		log.Infof("%v\n", n)
	}
	if n.Key != key {
		err = errors.ErrNeedleKey
		return
	}
	if n.Cookie != cookie {
		err = errors.ErrNeedleCookie
		return
	}
	// needles map may be out-dated, recheck
	if n.Flag == needle.FlagDel {
		v.lock.Lock()
		v.needles[key] = needle.NewCache(needle.CacheDelOffset, size)
		v.lock.Unlock()
		err = errors.ErrNeedleDeleted
	} else {
		atomic.AddUint64(&v.Stats.TotalGetProcessed, 1)
		atomic.AddUint64(&v.Stats.TotalReadBytes, uint64(size))
		atomic.AddUint64(&v.Stats.TotalGetDelay, uint64(time.Now().UnixNano()-now))
	}
	return
}
Ejemplo n.º 6
0
// Write write needles, if key exists append to super block, then update
// needle cache offset to new offset.
func (v *Volume) Write(ns []needle.Needle, buf []byte) (err error) {
	var (
		now             = time.Now().UnixNano()
		i               int
		ok              bool
		nc              int64
		ncs             []int64
		offset, ooffset uint32
		ts              int32
		n               *needle.Needle
	)
	if v.Debug {
		for i = 0; i < len(ns); i++ {
			n = &ns[i]
			ts += n.TotalSize
		}
		if int(ts) != len(buf) {
			err = errors.ErrNeedleSize
			return
		}
	}
	v.lock.Lock()
	offset = v.Block.Offset
	if err = v.Block.Write(buf); err == nil {
		for i = 0; i < len(ns); i++ {
			n = &ns[i]
			if err = v.Indexer.Add(n.Key, offset, n.TotalSize); err != nil {
				break
			}
			if nc, ok = v.needles[n.Key]; ok {
				ncs = append(ncs, nc)
			}
			v.needles[n.Key] = needle.NewCache(offset, n.TotalSize)
			v.addCheck(n.Key, n.Cookie)
			offset += n.IncrOffset
			if log.V(1) {
				log.Infof("add needle, offset: %d, size: %d", offset, n.TotalSize)
			}
		}
	}
	v.lock.Unlock()
	if err != nil {
		return
	}
	for _, nc = range ncs {
		ooffset, _ = needle.Cache(nc)
		err = v.asyncDel(ooffset)
		log.Warningf("same key: %d, old offset: %d, new offset: %d", n.Key, ooffset, offset)
	}
	atomic.AddUint64(&v.Stats.TotalWriteProcessed, 1)
	atomic.AddUint64(&v.Stats.TotalWriteBytes, uint64(n.TotalSize))
	atomic.AddUint64(&v.Stats.TotalWriteDelay, uint64(time.Now().UnixNano()-now))
	return
}
Ejemplo n.º 7
0
// Write write needles, if key exists append to super block, then update
// needle cache offset to new offset.
func (v *Volume) Write(ns *needle.Needles) (err error) {
	var (
		i               int
		ok              bool
		nc              int64
		ncs             []int64
		offset, ooffset uint32
		n               *needle.Needle
		now             = time.Now().UnixNano()
	)
	v.lock.Lock()
	offset = v.Block.Offset
	if err = v.Block.Write(ns.Buffer()); err == nil {
		for i = 0; i < ns.Num; i++ {
			n = ns.Needle(i)
			if err = v.Indexer.Add(n.Key, offset, n.TotalSize); err != nil {
				break
			}
			if nc, ok = v.needles[n.Key]; ok {
				ncs = append(ncs, nc)
			}
			v.needles[n.Key] = needle.NewCache(offset, n.TotalSize)
			v.addCheck(n.Key, n.Cookie)
			offset += n.IncrOffset
			if log.V(1) {
				log.Infof("add needle, offset: %d, size: %d", offset, n.TotalSize)
				log.Info(n)
			}
		}
	}
	v.lock.Unlock()
	if err != nil {
		return
	}
	for _, nc = range ncs {
		ooffset, _ = needle.Cache(nc)
		err = v.asyncDel(ooffset)
		log.Warningf("same key: %d, old offset: %d, new offset: %d", n.Key, ooffset, offset)
	}
	atomic.AddUint64(&v.Stats.TotalWriteProcessed, 1)
	atomic.AddUint64(&v.Stats.TotalWriteBytes, uint64(n.TotalSize))
	atomic.AddUint64(&v.Stats.TotalWriteDelay, uint64(time.Now().UnixNano()-now))
	return
}
Ejemplo n.º 8
0
func TestSuperBlock(t *testing.T) {
	var (
		b                  *SuperBlock
		offset, v2, v3, v4 uint32
		buf                []byte
		err                error
		n                  = &needle.Needle{}
		needles            = make(map[int64]int64)
		data               = []byte("test")
		file               = "../test/test.block"
		ifile              = "../test/test.idx"
		//indexer *Indexer
	)
	os.Remove(file)
	os.Remove(ifile)
	defer os.Remove(file)
	defer os.Remove(ifile)
	// test new block file
	if b, err = NewSuperBlock(file, Options{
		BufferSize:    4 * 1024 * 1024,
		SyncAtWrite:   1024,
		Syncfilerange: true,
	}); err != nil {
		t.Errorf("NewSuperBlock(\"%s\") error(%v)", file, err)
		t.FailNow()
	}
	b.Close()
	// test parse block file
	if b, err = NewSuperBlock(file, Options{
		BufferSize:    4 * 1024 * 1024,
		SyncAtWrite:   1024,
		Syncfilerange: true,
	}); err != nil {
		t.Errorf("NewSuperBlock(\"%s\") error(%v)", file, err)
		t.FailNow()
	}
	b.Close()
	// test open
	if err = b.Open(); err != nil {
		t.Errorf("Open() error(%v)", err)
		t.FailNow()
	}
	defer b.Close()
	// test add
	n.Parse(1, 1, data)
	if err = b.Add(n); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestOffset(b, n, needle.NeedleOffset(int64(headerSize))); err != nil {
		t.Errorf("compareTestOffset() error(%v)", err)
		t.FailNow()
	}
	offset = b.Offset
	v2 = b.Offset
	// test get
	buf = make([]byte, 40)
	if err = b.Get(1, buf); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 1, 1, needle.FlagOK, n, data, buf); err != nil {
		t.Errorf("compareTestNeedle() error(%v)", err)
		t.FailNow()
	}
	// test add
	n.Parse(2, 2, data)
	if err = b.Add(n); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestOffset(b, n, offset); err != nil {
		t.Errorf("compareTestOffset() error(%v)", err)
		t.FailNow()
	}
	offset = b.Offset
	v3 = b.Offset
	if err = b.Get(6, buf); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 2, 2, needle.FlagOK, n, data, buf); err != nil {
		t.Error("compareTestNeedle(2)")
		t.FailNow()
	}
	// test write
	n.Parse(3, 3, data)
	if err = b.Write(n); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	offset = b.Offset
	v4 = b.Offset
	// test write
	n.Parse(4, 4, data)
	if err = b.Write(n); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = b.Flush(); err != nil {
		t.Errorf("Flush() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestOffset(b, n, offset); err != nil {
		t.Errorf("compareTestOffset() error(%v)", err)
		t.FailNow()
	}
	if err = b.Get(11, buf); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 3, 3, needle.FlagOK, n, data, buf); err != nil {
		t.Error("compareTestNeedle(3)")
		t.FailNow()
	}
	if err = b.Get(16, buf); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 4, 4, needle.FlagOK, n, data, buf); err != nil {
		t.Error("compareTestNeedle(r)")
		t.FailNow()
	}
	// test del, del first needles
	if err = b.Del(1); err != nil {
		t.Errorf("Del() error(%v)", err)
		t.FailNow()
	}
	// test get
	if err = b.Get(1, buf); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 1, 1, needle.FlagDel, n, data, buf); err != nil {
		t.FailNow()
	}
	if err = b.Get(11, buf); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 3, 3, needle.FlagOK, n, data, buf); err != nil {
		t.FailNow()
	}
	if err = b.Get(16, buf); err != nil {
		t.Errorf("b.Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 4, 4, needle.FlagOK, n, data, buf); err != nil {
		t.FailNow()
	}
	// test recovery
	offset = b.Offset
	if err = b.Recovery(0, func(rn *needle.Needle, so, eo uint32) (err1 error) {
		if rn.Flag != needle.FlagOK {
			so = needle.CacheDelOffset
		}
		needles[rn.Key] = needle.NewCache(so, rn.TotalSize)
		return
	}); err != nil {
		t.Errorf("Recovery() error(%v)", err)
		t.FailNow()
	}
	if b.Offset != offset {
		err = fmt.Errorf("b.Offset not match %d", b.Offset)
		t.Error(err)
		t.FailNow()
	}
	if o, s := needle.Cache(needles[1]); o != needle.CacheDelOffset && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != v2 && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[3]); o != v3 && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[4]); o != v4 && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	needles = make(map[int64]int64)
	if err = b.Recovery(v2, func(rn *needle.Needle, so, eo uint32) (err1 error) {
		if rn.Flag != needle.FlagOK {
			so = needle.CacheDelOffset
		}
		needles[rn.Key] = needle.NewCache(so, rn.TotalSize)
		return
	}); err != nil {
		t.Errorf("b.Recovery() error(%v)", err)
		t.FailNow()
	}
	// skip first needle, so key:1 must not exist
	if o, s := needle.Cache(needles[1]); o != 0 && s != 0 {
		t.Error("needle.Value(1) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != v2 && s != 40 {
		t.Error("needle.Value(2) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[3]); o != v3 && s != 40 {
		t.Error("needle.Value(3) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[4]); o != v4 && s != 40 {
		t.Error("needle.Value(4) not match")
		t.FailNow()
	}
	// test repair
	n.Parse(3, 3, data)
	n.Fill(buf)
	if err = b.Repair(v3, buf); err != nil {
		t.Errorf("b.Repair(3) error(%v)", err)
		t.FailNow()
	}
	if err = b.Get(v3, buf); err != nil {
		t.Errorf("b.Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 3, 3, needle.FlagOK, n, data, buf); err != nil {
		t.Error("compareTestNeedle(3)")
		t.FailNow()
	}
	// test compress
	if err = b.Compact(0, func(rn *needle.Needle, so, eo uint32) (err1 error) {
		if rn.Flag != needle.FlagOK {
			return
		}
		needles[rn.Key] = needle.NewCache(so, rn.TotalSize)
		return
	}); err != nil {
		t.Errorf("b.Compress() error(%v)", err)
		t.FailNow()
	}
	// skip first needle, so key:1 must not exist
	if o, s := needle.Cache(needles[1]); o != 0 && s != 0 {
		t.Error("needle.Value(1) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != v2 && s != 40 {
		t.Error("needle.Value(2) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[3]); o != v3 && s != 40 {
		t.Error("needle.Value(3) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[4]); o != v4 && s != 40 {
		t.Error("needle.Value(4) not match")
		t.FailNow()
	}
}
Ejemplo n.º 9
0
func TestIndex(t *testing.T) {
	var (
		i       *Indexer
		err     error
		noffset uint32
		file    = "../test/test.idx"
		needles = make(map[int64]int64)
	)
	os.Remove(file)
	defer os.Remove(file)
	if i, err = NewIndexer(file, Options{
		BufferSize:    4 * 1024 * 1024,
		MergeAtTime:   10 * time.Second,
		MergeAtWrite:  5,
		RingBuffer:    10,
		SyncAtWrite:   10,
		Syncfilerange: true,
		NeedleMaxSize: 4 * 1024 * 1024,
	}); err != nil {
		t.Errorf("NewIndexer() error(%v)", err)
		t.FailNow()
	}
	i.Close()
	// test closed
	if err = i.Add(1, 1, 8); err != errors.ErrIndexClosed {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	// test open
	if err = i.Open(); err != nil {
		t.Errorf("Open() error(%v)", err)
		t.FailNow()
	}
	defer i.Close()
	// test add
	if err = i.Add(1, 1, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = i.Add(2, 2, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = i.Add(5, 3, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = i.Add(6, 4, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	i.Signal()
	time.Sleep(1 * time.Second)
	i.Flush()
	// test recovery
	if err = i.Recovery(func(ix *Index) error {
		needles[ix.Key] = needle.NewCache(ix.Offset, ix.Size)
		noffset = ix.Offset + needle.NeedleOffset(int64(ix.Size))
		return nil
	}); err != nil {
		t.Errorf("Recovery() error(%v)", err)
		t.FailNow()
	}
	// add 4 index, start with 5
	if noffset != 5 {
		t.Errorf("noffset: %d not match", noffset)
		t.FailNow()
	}
	if o, s := needle.Cache(needles[1]); o != 1 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != 2 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[5]); o != 3 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[6]); o != 4 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	// test write
	if err = i.Write(10, 5, 8); err != nil {
		t.Error("Write() error(%v)", err)
		t.FailNow()
	}
	if err = i.Flush(); err != nil {
		t.Error("Flush() error(%v)", err)
		t.FailNow()
	}
	// test recovery
	noffset = 0
	if err = i.Recovery(func(ix *Index) error {
		needles[ix.Key] = needle.NewCache(ix.Offset, ix.Size)
		noffset = ix.Offset + needle.NeedleOffset(int64(ix.Size))
		return nil
	}); err != nil {
		t.Errorf("Recovery() error(%v)", err)
		t.FailNow()
	}
	// add 5 index, start with 6
	if noffset != 6 {
		t.Errorf("noffset: %d not match", noffset)
		t.FailNow()
	}
	if o, s := needle.Cache(needles[10]); o != 5 && s != 8 {
		t.Error("needle.Value(1) not match")
		t.FailNow()
	}
}