Exemplo n.º 1
0
func newTripleStore(path string, options graph.Options) (graph.TripleStore, error) {
	var qs TripleStore
	var err error
	qs.path = path
	cache_size := DefaultCacheSize
	if val, ok := options.IntKey("cache_size_mb"); ok {
		cache_size = val
	}
	qs.dbOpts = &opt.Options{
		BlockCache: cache.NewLRUCache(cache_size * opt.MiB),
	}
	qs.dbOpts.ErrorIfMissing = true

	write_buffer_mb := DefaultWriteBufferSize
	if val, ok := options.IntKey("write_buffer_mb"); ok {
		write_buffer_mb = val
	}
	qs.dbOpts.WriteBuffer = write_buffer_mb * opt.MiB
	qs.writeopts = &opt.WriteOptions{
		Sync: false,
	}
	qs.readopts = &opt.ReadOptions{}
	db, err := leveldb.OpenFile(qs.path, qs.dbOpts)
	if err != nil {
		glog.Errorln("Error, couldn't open! ", err)
		return nil, err
	}
	qs.db = db
	glog.Infoln(qs.GetStats())
	err = qs.getMetadata()
	if err != nil {
		return nil, err
	}
	return &qs, nil
}
Exemplo n.º 2
0
func newTripleStore(path string, options graph.Options) (graph.TripleStore, error) {
	var ts TripleStore
	ts.path = path
	cache_size := DefaultCacheSize
	if val, ok := options.IntKey("cache_size_mb"); ok {
		cache_size = val
	}
	ts.dbOpts = &opt.Options{
		BlockCache: cache.NewLRUCache(cache_size * opt.MiB),
	}
	ts.dbOpts.ErrorIfMissing = true

	write_buffer_mb := DefaultWriteBufferSize
	if val, ok := options.IntKey("write_buffer_mb"); ok {
		write_buffer_mb = val
	}
	ts.dbOpts.WriteBuffer = write_buffer_mb * opt.MiB
	ts.hasher = sha1.New()
	ts.writeopts = &opt.WriteOptions{
		Sync: false,
	}
	ts.readopts = &opt.ReadOptions{}
	db, err := leveldb.OpenFile(ts.path, ts.dbOpts)
	if err != nil {
		panic("Error, couldn't open! " + err.Error())
	}
	ts.db = db
	glog.Infoln(ts.GetStats())
	ts.getSize()
	return &ts, nil
}
Exemplo n.º 3
0
func (s *session) setOptions(o *opt.Options) {
	s.o = &opt.Options{}
	if o != nil {
		*s.o = *o
	}
	// Alternative filters.
	if filters := o.GetAltFilters(); len(filters) > 0 {
		s.o.AltFilters = make([]filter.Filter, len(filters))
		for i, filter := range filters {
			s.o.AltFilters[i] = &iFilter{filter}
		}
	}
	// Block cache.
	switch o.GetBlockCache() {
	case nil:
		s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize)
	case opt.NoCache:
		s.o.BlockCache = nil
	}
	// Comparer.
	s.cmp = &iComparer{o.GetComparer()}
	s.o.Comparer = s.cmp
	// Filter.
	if filter := o.GetFilter(); filter != nil {
		s.o.Filter = &iFilter{filter}
	}
}
Exemplo n.º 4
0
func (s *session) setOptions(o *opt.Options) {
	no := dupOptions(o)
	// Alternative filters.
	if filters := o.GetAltFilters(); len(filters) > 0 {
		no.AltFilters = make([]filter.Filter, len(filters))
		for i, filter := range filters {
			no.AltFilters[i] = &iFilter{filter}
		}
	}
	// Block cache.
	switch o.GetBlockCache() {
	case nil:
		no.BlockCache = cache.NewLRUCache(o.GetBlockCacheSize())
	case opt.NoCache:
		no.BlockCache = nil
	}
	// Comparer.
	s.icmp = &iComparer{o.GetComparer()}
	no.Comparer = s.icmp
	// Filter.
	if filter := o.GetFilter(); filter != nil {
		no.Filter = &iFilter{filter}
	}

	s.o = &cachedOptions{Options: no}
	s.o.cache()
}
Exemplo n.º 5
0
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
	h := new(dbCorruptHarness)
	h.init(t, &opt.Options{
		Flag:       opt.OFCreateIfMissing,
		BlockCache: cache.NewLRUCache(100),
	})
	return h
}
Exemplo n.º 6
0
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
	h := new(dbCorruptHarness)
	h.init(t, &opt.Options{
		BlockCache: cache.NewLRUCache(100),
		Strict:     opt.StrictJournalChecksum,
	})
	return h
}
Exemplo n.º 7
0
func (o *iOptions) sanitize() {
	if p := o.GetBlockCache(); p == nil {
		o.Options.SetBlockCache(cache.NewLRUCache(opt.DefaultBlockCacheSize))
	}

	if p := o.GetFilter(); p != nil {
		o.Options.SetFilter(&iFilter{p})
	}
}
Exemplo n.º 8
0
// Creates new initialized table ops instance.
func newTableOps(s *session, cacheCap int) *tOps {
	c := cache.NewLRUCache(cacheCap)
	return &tOps{
		s:       s,
		cache:   c,
		cacheNS: c.GetNamespace(0),
		bpool:   util.NewBufferPool(s.o.GetBlockSize() + 5),
	}
}
Exemplo n.º 9
0
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
	return newDbCorruptHarnessWopt(t, &opt.Options{
<<<<<<< HEAD
		BlockCacheCapacity: 100,
		Strict:             opt.StrictJournalChecksum,
=======
		BlockCache: cache.NewLRUCache(100),
		Strict:     opt.StrictJournalChecksum,
>>>>>>> 9bca75c48d6c31becfbb127702b425e7226052e3
	})
Exemplo n.º 10
0
func (o *Options) GetBlockCache() cache.Cache {
	o.mu.Lock()
	defer o.mu.Unlock()
	if o == nil {
		return nil
	}
	if o.BlockCache == nil {
		o.BlockCache = cache.NewLRUCache(8 << 20)
	}
	return o.BlockCache
}
Exemplo n.º 11
0
func (o *iOptions) sanitize() {
	if p := o.GetBlockCache(); p == nil {
		o.Options.SetBlockCache(cache.NewLRUCache(opt.DefaultBlockCacheSize))
	}

	for _, p := range o.GetAltFilters() {
		o.InsertAltFilter(p)
	}

	if p := o.GetFilter(); p != nil {
		o.SetFilter(p)
	}
}
Exemplo n.º 12
0
Arquivo: db.go Projeto: parkghost/nodb
func newOptions(cfg *config.LevelDBConfig) *opt.Options {
	opts := &opt.Options{}
	opts.ErrorIfMissing = false

	cfg.Adjust()

	opts.BlockCache = cache.NewLRUCache(cfg.CacheSize)

	//we must use bloomfilter
	opts.Filter = filter.NewBloomFilter(defaultFilterBits)

	if !cfg.Compression {
		opts.Compression = opt.NoCompression
	} else {
		opts.Compression = opt.SnappyCompression
	}

	opts.BlockSize = cfg.BlockSize
	opts.WriteBuffer = cfg.WriteBufferSize

	return opts
}
Exemplo n.º 13
0
func TestCorruptDB_MissingManifest(t *testing.T) {
	rnd := rand.New(rand.NewSource(0x0badda7a))
	h := newDbCorruptHarnessWopt(t, &opt.Options{
		BlockCache:  cache.NewLRUCache(100),
		Strict:      opt.StrictJournalChecksum,
		WriteBuffer: 1000 * 60,
	})

	h.build(1000)
	h.compactMem()
	h.buildShuffled(1000, rnd)
	h.compactMem()
	h.deleteRand(500, 1000, rnd)
	h.compactMem()
	h.buildShuffled(1000, rnd)
	h.compactMem()
	h.deleteRand(500, 1000, rnd)
	h.compactMem()
	h.buildShuffled(1000, rnd)
	h.compactMem()
	h.closeDB()

	h.stor.SetIgnoreOpenErr(storage.TypeManifest)
	h.removeAll(storage.TypeManifest)
	h.openAssert(false)
	h.stor.SetIgnoreOpenErr(0)

	h.recover()
	h.check(1000, 1000)
	h.build(1000)
	h.compactMem()
	h.compactRange("", "")
	h.closeDB()

	h.recover()
	h.check(1000, 1000)

	h.close()
}
Exemplo n.º 14
0
func newTableOps(s *session, cacheCap int) *tOps {
	c := cache.NewLRUCache(cacheCap)
	ns := c.GetNamespace(0)
	return &tOps{s, c, ns}
}
Exemplo n.º 15
0
func NewLevelDataStore(conf map[string]string) (DataStore, error) {
	encoder := LEVEL_ENCODER_MSGPAK
	path := ""
	writeBuffer := 16777216
	readCache := 16777216
	if v, ok := conf["debug_leveldb_encoder"]; ok && v != "" {
		switch v {
		case LEVEL_ENCODER_GOB, LEVEL_ENCODER_MSGPAK:
			encoder = v
		}
	}
	if v, ok := conf["leveldb_path"]; ok && v != "" {
		path = v
	}
	if v, ok := conf["leveldb_cache"]; ok && v != "" {
		if n, err := strconv.Atoi(v); err == nil {
			if n < 8388608 {
				n = 8388608
			}
			readCache = n
		}
	}
	if v, ok := conf["leveldb_writebuffer"]; ok && v != "" {
		if n, err := strconv.Atoi(v); err == nil {
			if n < 4194304 {
				n = 4194304
			}
			writeBuffer = n
		}
	}
	o := &opt.Options{
		Filter:      filter.NewBloomFilter(10),
		WriteBuffer: writeBuffer,
		BlockCache:  cache.NewLRUCache(readCache),
	}
	lds := new(LevelDataStore)
	lds.dbPath = path
	if lds.dbPath == "" {
		if p, err := ioutil.TempDir("", "tblvl"); err == nil {
			lds.dbPath = p
			lds.isTemp = true
		} else {
			return nil, err
		}
	}
	switch encoder {
	case LEVEL_ENCODER_MSGPAK:
		lds.encoder = msgpackEncoder{}
	default:
		encoder = LEVEL_ENCODER_GOB
		lds.encoder = gobEncoder{}
	}
	if db, err := leveldb.OpenFile(lds.dbPath, o); err == nil {
		if v, e := db.Get([]byte("_VERSION"), nil); e == nil {
			if v[0] != LEVELDB_VERSION {
				return nil, fmt.Errorf("Invalid LevelDB Version (%d)", v)
			}
		} else if e != util.ErrNotFound {
			return nil, e
		}

		if v, e := db.Get([]byte("_ENC"), nil); e == nil {
			if string(v) != encoder {
				return nil, fmt.Errorf("Unexpected LevelDB encoding (Expected '%s', Got '%s')", encoder, v)
			}
		} else if e != util.ErrNotFound {
			return nil, e
		}

		db.Put([]byte("_VERSION"), []byte{LEVELDB_VERSION}, nil)
		db.Put([]byte("_ENC"), []byte(encoder), nil)
		lds.db = db
		return lds, nil
	} else {
		return nil, err
	}
}
Exemplo n.º 16
0
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
	return newDbCorruptHarnessWopt(t, &opt.Options{
		BlockCache: cache.NewLRUCache(100),
		Strict:     opt.StrictJournalChecksum,
	})
}
Exemplo n.º 17
0
func TestDb_Concurrent(t *testing.T) {
	const n, secs, maxkey = 4, 2, 1000

	runtime.GOMAXPROCS(n)
	runAllOpts(t, func(h *dbHarness) {
		var wg sync.WaitGroup
		var stop uint32
		var cnt [n]uint32

		for i := 0; i < n; i++ {
			wg.Add(1)
			go func(i int) {
				var put, get, found uint
				defer func() {
					t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d",
						i, cnt[i], put, get, found, get-found)
					wg.Done()
				}()

				rnd := rand.New(rand.NewSource(int64(1000 + i)))
				for atomic.LoadUint32(&stop) == 0 {
					x := cnt[i]

					k := rnd.Intn(maxkey)
					kstr := fmt.Sprintf("%016d", k)

					if (rnd.Int() % 2) > 0 {
						put++
						h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x))
					} else {
						get++
						v, err := h.db.Get([]byte(kstr), h.ro)
						if err == nil {
							found++
							rk, ri, rx := 0, -1, uint32(0)
							fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx)
							if rk != k {
								t.Errorf("invalid key want=%d got=%d", k, rk)
							}
							if ri < 0 || ri >= n {
								t.Error("invalid goroutine number: ", ri)
							} else {
								tx := atomic.LoadUint32(&(cnt[ri]))
								if rx > tx {
									t.Errorf("invalid seq number, %d > %d ", rx, tx)
								}
							}
						} else if err != errors.ErrNotFound {
							t.Error("Get: got error: ", err)
							return
						}
					}
					atomic.AddUint32(&cnt[i], 1)
				}
			}(i)
		}

		for i := 0; i < secs; i++ {
			h.oo.SetBlockCache(cache.NewLRUCache(rand.Int() % (opt.DefaultBlockCacheSize * 2)))
			time.Sleep(time.Second)
		}
		atomic.StoreUint32(&stop, 1)
		wg.Wait()
	})

	runtime.GOMAXPROCS(1)
}
Exemplo n.º 18
0
func (ulevel) NewLRUCache(capacity int) level.UnderlyingCache {
	return che{
		Cache: C.NewLRUCache(capacity),
	}
}