Esempio n. 1
0
// Delete logical delete a needle, update disk needle flag and memory needle
// cache offset to zero.
func (v *Volume) Delete(key int64) (err error) {
	var (
		ok     bool
		nc     int64
		size   int32
		offset uint32
	)
	v.lock.Lock()
	if nc, ok = v.needles[key]; ok {
		if offset, size = needle.Cache(nc); offset != needle.CacheDelOffset {
			v.needles[key] = needle.NewCache(needle.CacheDelOffset, size)
			// when in compact, must save all del operations.
			if v.Compact {
				v.compactKeys = append(v.compactKeys, key)
			}
		} else {
			err = errors.ErrNeedleDeleted
		}
	} else {
		err = errors.ErrNeedleNotExist
	}
	v.lock.Unlock()
	if err == nil {
		err = v.del(offset)
	}
	return
}
Esempio n. 2
0
// Write add a needles, if key exists append to super block, then update
// needle cache offset to new offset.
func (v *Volume) Write(n *needle.Needle) (err error) {
	var (
		ok      bool
		nc      int64
		offset  uint32
		ooffset uint32
		now     = time.Now().UnixNano()
	)
	v.lock.Lock()
	offset = v.Block.Offset
	if err = v.Block.Write(n); err == nil {
		if err = v.Indexer.Add(n.Key, offset, n.TotalSize); err == nil {
			nc, ok = v.needles[n.Key]
			v.needles[n.Key] = needle.NewCache(offset, n.TotalSize)
		}
	}
	v.lock.Unlock()
	if err != nil {
		return
	}
	if log.V(1) {
		log.Infof("add needle, offset: %d, size: %d", offset, n.TotalSize)
		log.Info(n)
	}
	if ok {
		ooffset, _ = needle.Cache(nc)
		err = v.del(ooffset)
		log.Warningf("same key: %d, old offset: %d, new offset: %d", n.Key, ooffset, offset)
	}
	atomic.AddUint64(&v.Stats.TotalWriteProcessed, 1)
	atomic.AddUint64(&v.Stats.TotalWriteBytes, uint64(n.TotalSize))
	atomic.AddUint64(&v.Stats.TotalWriteDelay, uint64(time.Now().UnixNano()-now))
	return
}
Esempio n. 3
0
func (v *Volume) get(n *needle.Needle) (err error) {
	var (
		ok     bool
		nc     int64
		offset uint32
		size   int32
		key    = n.Key
		now    = time.Now().UnixNano()
	)
	// pread syscall is atomic, so use rlock
	v.lock.RLock()
	if nc, ok = v.needles[key]; ok {
		if offset, size = needle.Cache(nc); offset != needle.CacheDelOffset {
			n.TotalSize = size
			err = v.Block.ReadAt(offset, n)
		} else {
			err = errors.ErrNeedleDeleted
		}
	} else {
		err = errors.ErrNeedleNotExist
	}
	v.lock.RUnlock()
	if err != nil {
		return
	}
	if err = n.Parse(); err != nil {
		return
	}
	if n.Key != key {
		return errors.ErrNeedleKey
	}
	if n.TotalSize != size {
		return errors.ErrNeedleSize
	}
	if log.V(1) {
		log.Infof("get needle key: %d, cookie: %d, offset: %d, size: %d", n.Key, n.Cookie, offset, size)
		log.Infof("%v\n", n)
	}
	// needles map may be out-dated, recheck
	if n.Flag == needle.FlagDel {
		v.lock.Lock()
		v.needles[key] = needle.NewCache(needle.CacheDelOffset, size)
		v.lock.Unlock()
		err = errors.ErrNeedleDeleted
	} else {
		atomic.AddUint64(&v.Stats.TotalGetProcessed, 1)
		atomic.AddUint64(&v.Stats.TotalReadBytes, uint64(size))
		atomic.AddUint64(&v.Stats.TotalGetDelay, uint64(time.Now().UnixNano()-now))
	}
	return
}
Esempio n. 4
0
// Writes write needles, if key exists append to super block, then update
// needle cache offset to new offset.
func (v *Volume) Writes(ns *needle.Needles) (err error) {
	var (
		i       int
		ok      bool
		nc      int64
		ncs     []int64
		offset  uint32
		ooffset uint32
		n       *needle.Needle
		now     = time.Now().UnixNano()
	)
	v.lock.Lock()
	offset = v.Block.Offset
	if err = v.Block.Writes(ns); err == nil {
		for i = 0; i < ns.Num; i++ {
			n = ns.Needle(i)
			if err = v.Indexer.Add(n.Key, offset, n.TotalSize); err != nil {
				break
			}
			if nc, ok = v.needles[n.Key]; ok {
				ncs = append(ncs, nc)
			}
			v.needles[n.Key] = needle.NewCache(offset, n.TotalSize)
			offset += n.IncrOffset
			if log.V(1) {
				log.Infof("add needle, offset: %d, size: %d", offset, n.TotalSize)
				log.Info(n)
			}
		}
	}
	v.lock.Unlock()
	if err != nil {
		return
	}
	for _, nc = range ncs {
		ooffset, _ = needle.Cache(nc)
		err = v.del(ooffset)
		log.Warningf("same key: %d, old offset: %d, new offset: %d", n.Key, ooffset, offset)
	}
	atomic.AddUint64(&v.Stats.TotalWriteProcessed, uint64(ns.Num))
	atomic.AddUint64(&v.Stats.TotalWriteBytes, uint64(ns.TotalSize))
	atomic.AddUint64(&v.Stats.TotalWriteDelay, uint64(time.Now().UnixNano()-now))
	return
}
Esempio n. 5
0
func TestIndex(t *testing.T) {
	var (
		i       *Indexer
		err     error
		noffset uint32
		file    = "../test/test.idx"
		needles = make(map[int64]int64)
	)
	os.Remove(file)
	defer os.Remove(file)
	if i, err = NewIndexer(file, testConf); err != nil {
		t.Errorf("NewIndexer() error(%v)", err)
		t.FailNow()
	}
	i.Close()
	// test closed
	if err = i.Add(1, 1, 8); err != errors.ErrIndexClosed {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	// test open
	if err = i.Open(); err != nil {
		t.Errorf("Open() error(%v)", err)
		t.FailNow()
	}
	defer i.Close()
	// test add
	if err = i.Add(1, 1, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = i.Add(2, 2, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = i.Add(5, 3, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	if err = i.Add(6, 4, 8); err != nil {
		t.Errorf("Add() error(%v)", err)
		t.FailNow()
	}
	i.Signal()
	time.Sleep(1 * time.Second)
	i.Flush()
	// test recovery
	if err = i.Recovery(func(ix *Index) error {
		needles[ix.Key] = needle.NewCache(ix.Offset, ix.Size)
		noffset = ix.Offset + needle.NeedleOffset(int64(ix.Size))
		return nil
	}); err != nil {
		t.Errorf("Recovery() error(%v)", err)
		t.FailNow()
	}
	// add 4 index, start with 5
	if noffset != 5 {
		t.Errorf("noffset: %d not match", noffset)
		t.FailNow()
	}
	if o, s := needle.Cache(needles[1]); o != 1 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != 2 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[5]); o != 3 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[6]); o != 4 && s != 8 {
		t.Error("needle cache not match")
		t.FailNow()
	}
	// test write
	if err = i.Write(10, 5, 8); err != nil {
		t.Error("Write() error(%v)", err)
		t.FailNow()
	}
	if err = i.Flush(); err != nil {
		t.Error("Flush() error(%v)", err)
		t.FailNow()
	}
	// test recovery
	noffset = 0
	if err = i.Recovery(func(ix *Index) error {
		needles[ix.Key] = needle.NewCache(ix.Offset, ix.Size)
		noffset = ix.Offset + needle.NeedleOffset(int64(ix.Size))
		return nil
	}); err != nil {
		t.Errorf("Recovery() error(%v)", err)
		t.FailNow()
	}
	// add 5 index, start with 6
	if noffset != 6 {
		t.Errorf("noffset: %d not match", noffset)
		t.FailNow()
	}
	if o, s := needle.Cache(needles[10]); o != 5 && s != 8 {
		t.Error("needle.Value(1) not match")
		t.FailNow()
	}
}
Esempio n. 6
0
func TestSuperBlock(t *testing.T) {
	var (
		b                  *SuperBlock
		offset, v2, v3, v4 uint32
		err                error
		buf                = &bytes.Buffer{}
		needles            = make(map[int64]int64)
		data               = []byte("test")
		n                  = needle.NewBufferNeedle(4)
		file               = "../test/test.block"
		ifile              = "../test/test.idx"
		//indexer *Indexer
	)
	os.Remove(file)
	os.Remove(ifile)
	defer os.Remove(file)
	defer os.Remove(ifile)
	// test new block file
	if b, err = NewSuperBlock(file, testConf); err != nil {
		t.Errorf("NewSuperBlock(\"%s\") error(%v)", file, err)
		t.FailNow()
	}
	b.Close()
	// test parse block file
	if b, err = NewSuperBlock(file, testConf); err != nil {
		t.Errorf("NewSuperBlock(\"%s\") error(%v)", file, err)
		t.FailNow()
	}
	b.Close()
	// test open
	if err = b.Open(); err != nil {
		t.Errorf("Open() error(%v)", err)
		t.FailNow()
	}
	defer b.Close()
	// test write
	if _, err = buf.Write(data); err != nil {
		t.Errorf("buf.Write() error(%v)", err)
		t.FailNow()
	}
	if err = n.WriteFrom(1, 1, 4, buf); err != nil {
		t.Errorf("n.Write() error(%v)", err)
		t.FailNow()
	}
	if err = b.Write(n); err != nil {
		t.Errorf("b.Write() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestOffset(b, n, needle.NeedleOffset(int64(_headerSize))); err != nil {
		t.Errorf("compareTestOffset() error(%v)", err)
		t.FailNow()
	}
	offset = b.Offset
	v2 = b.Offset
	// test get
	if err = b.ReadAt(1, n); err != nil {
		t.Errorf("b.ReadAt() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 1, 1, needle.FlagOK, n, data); err != nil {
		t.Errorf("compareTestNeedle() error(%v)", err)
		t.FailNow()
	}
	// test write
	if _, err = buf.Write(data); err != nil {
		t.Errorf("buf.Write() error(%v)", err)
		t.FailNow()
	}
	if err = n.WriteFrom(2, 2, 4, buf); err != nil {
		t.Errorf("n.Write() error(%v)", err)
		t.FailNow()
	}
	if err = b.Write(n); err != nil {
		t.Errorf("b.Write() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestOffset(b, n, offset); err != nil {
		t.Errorf("compareTestOffset() error(%v)", err)
		t.FailNow()
	}
	offset = b.Offset
	v3 = b.Offset
	if err = b.ReadAt(6, n); err != nil {
		t.Errorf("b.ReadAt() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 2, 2, needle.FlagOK, n, data); err != nil {
		t.Error("compareTestNeedle(2)")
		t.FailNow()
	}
	// test write
	if _, err = buf.Write(data); err != nil {
		t.Errorf("buf.Write() error(%v)", err)
		t.FailNow()
	}
	if err = n.WriteFrom(3, 3, 4, buf); err != nil {
		t.Errorf("n.Write() error(%v)", err)
		t.FailNow()
	}
	if err = b.Write(n); err != nil {
		t.Errorf("b.Write() error(%v)", err)
		t.FailNow()
	}
	offset = b.Offset
	v4 = b.Offset
	// test write
	if _, err = buf.Write(data); err != nil {
		t.Errorf("buf.Write() error(%v)", err)
		t.FailNow()
	}
	if err = n.WriteFrom(4, 4, 4, buf); err != nil {
		t.Errorf("n.Write() error(%v)", err)
		t.FailNow()
	}
	if err = b.Write(n); err != nil {
		t.Errorf("b.Write() error(%v)", err)
		t.FailNow()
	}
	if err = b.flush(true); err != nil {
		t.Errorf("Flush() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestOffset(b, n, offset); err != nil {
		t.Errorf("compareTestOffset() error(%v)", err)
		t.FailNow()
	}
	if err = b.ReadAt(11, n); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 3, 3, needle.FlagOK, n, data); err != nil {
		t.Error("compareTestNeedle(3)")
		t.FailNow()
	}
	if err = b.ReadAt(16, n); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 4, 4, needle.FlagOK, n, data); err != nil {
		t.Error("compareTestNeedle(r)")
		t.FailNow()
	}
	// test del, del first needles
	if err = b.Delete(1); err != nil {
		t.Errorf("Del() error(%v)", err)
		t.FailNow()
	}
	// test get
	if err = b.ReadAt(1, n); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 1, 1, needle.FlagDel, n, data); err != nil {
		t.FailNow()
	}
	if err = b.ReadAt(11, n); err != nil {
		t.Errorf("Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 3, 3, needle.FlagOK, n, data); err != nil {
		t.FailNow()
	}
	if err = b.ReadAt(16, n); err != nil {
		t.Errorf("b.Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 4, 4, needle.FlagOK, n, data); err != nil {
		t.FailNow()
	}
	// test recovery
	offset = b.Offset
	if err = b.Recovery(0, func(rn *needle.Needle, so, eo uint32) (err1 error) {
		if rn.Flag != needle.FlagOK {
			so = needle.CacheDelOffset
		}
		needles[rn.Key] = needle.NewCache(so, rn.TotalSize)
		return
	}); err != nil {
		t.Errorf("Recovery() error(%v)", err)
		t.FailNow()
	}
	if b.Offset != offset {
		err = fmt.Errorf("b.Offset not match %d", b.Offset)
		t.Error(err)
		t.FailNow()
	}
	if o, s := needle.Cache(needles[1]); o != needle.CacheDelOffset && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != v2 && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[3]); o != v3 && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[4]); o != v4 && s != 40 {
		t.Error("needle.Cache() not match")
		t.FailNow()
	}
	needles = make(map[int64]int64)
	if err = b.Recovery(v2, func(rn *needle.Needle, so, eo uint32) (err1 error) {
		if rn.Flag != needle.FlagOK {
			so = needle.CacheDelOffset
		}
		needles[rn.Key] = needle.NewCache(so, rn.TotalSize)
		return
	}); err != nil {
		t.Errorf("b.Recovery() error(%v)", err)
		t.FailNow()
	}
	// skip first needle, so key:1 must not exist
	if o, s := needle.Cache(needles[1]); o != 0 && s != 0 {
		t.Error("needle.Value(1) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != v2 && s != 40 {
		t.Error("needle.Value(2) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[3]); o != v3 && s != 40 {
		t.Error("needle.Value(3) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[4]); o != v4 && s != 40 {
		t.Error("needle.Value(4) not match")
		t.FailNow()
	}
	// test repair
	if _, err = buf.Write(data); err != nil {
		t.Errorf("buf.Write() error(%v)", err)
		t.FailNow()
	}
	if err = n.WriteFrom(3, 3, 4, buf); err != nil {
		t.Errorf("n.Write() error(%v)", err)
		t.FailNow()
	}
	if err = b.WriteAt(v3, n); err != nil {
		t.Errorf("b.Repair(3) error(%v)", err)
		t.FailNow()
	}
	if err = b.ReadAt(v3, n); err != nil {
		t.Errorf("b.Get() error(%v)", err)
		t.FailNow()
	}
	if err = compareTestNeedle(t, 3, 3, needle.FlagOK, n, data); err != nil {
		t.Error("compareTestNeedle(3)")
		t.FailNow()
	}
	// test compress
	if err = b.Compact(0, func(rn *needle.Needle, so, eo uint32) (err1 error) {
		if rn.Flag != needle.FlagOK {
			return
		}
		needles[rn.Key] = needle.NewCache(so, rn.TotalSize)
		return
	}); err != nil {
		t.Errorf("b.Compress() error(%v)", err)
		t.FailNow()
	}
	// skip first needle, so key:1 must not exist
	if o, s := needle.Cache(needles[1]); o != 0 && s != 0 {
		t.Error("needle.Value(1) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[2]); o != v2 && s != 40 {
		t.Error("needle.Value(2) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[3]); o != v3 && s != 40 {
		t.Error("needle.Value(3) not match")
		t.FailNow()
	}
	if o, s := needle.Cache(needles[4]); o != v4 && s != 40 {
		t.Error("needle.Value(4) not match")
		t.FailNow()
	}
}