func main() { flag.Parse() if *showVersion { fmt.Printf("gocountme: v%s\n", VERSION) return } if *defaultSize <= 0 { fmt.Printf("--default-size must be greater than 0\n") return } if _, err := os.Stat(*dblocation); err != nil { if os.IsNotExist(err) { fmt.Println("Database location does not exist:", *dblocation) return } } log.Println("Opening levelDB") Default_KMinValues_Size = *defaultSize opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(*leveldbLRUCache)) opts.SetCreateIfMissing(true) db, err := levigo.Open(*dblocation, opts) defer db.Close() if err != nil { log.Panicln(err) } RequestChan = make(chan RequestCommand, *nWorkers) workerWaitGroup := sync.WaitGroup{} log.Printf("Starting %d workers", *nWorkers) for i := 0; i < *nWorkers; i++ { go func(id int) { workerWaitGroup.Add(1) levelDBWorker(db, RequestChan) workerWaitGroup.Done() }(i) } http.HandleFunc("/get", GetHandler) http.HandleFunc("/delete", DeleteHandler) http.HandleFunc("/cardinality", CardinalityHandler) http.HandleFunc("/jaccard", JaccardHandler) http.HandleFunc("/correlation", CorrelationMatrixHandler) http.HandleFunc("/add", AddHandler) http.HandleFunc("/addhash", AddHashHandler) http.HandleFunc("/query", QueryHandler) http.HandleFunc("/exit", ExitHandler) log.Printf("Starting gocountme HTTP server on %s", *httpAddress) go func() { log.Fatal(http.ListenAndServe(*httpAddress, nil)) }() workerWaitGroup.Wait() }
func main() { dbname := "leveldb" opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 20)) opts.SetCreateIfMissing(true) _ = levigo.DestroyDatabase(dbname, opts) db, _ := levigo.Open(dbname, opts) wo := levigo.NewWriteOptions() ro := levigo.NewReadOptions() start := time.Now() for i := 0; i < 10e4; i++ { db.Put(wo, []byte(fmt.Sprintf("a%v", i)), []byte(strconv.Itoa(i))) } for i := 0; i < 10e4; i++ { db.Get(ro, []byte(fmt.Sprintf("a%v", i))) } for i := 0; i < 10e4; i++ { db.Delete(wo, []byte(fmt.Sprintf("a%v", i))) } duration := time.Since(start) log.Printf("Elapsed: %v.", duration) }
func New(directory string) (*Level, error) { options := levigo.NewOptions() options.SetCreateIfMissing(true) options.SetCompression(levigo.SnappyCompression) db, err := levigo.Open(directory, options) return &Level{db: db}, err }
func TestLevigo(t *testing.T) { path := "/tmp/levigo_test_10101" os.RemoveAll(path) opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) opts.SetFilterPolicy(filter) opts.SetCache(levigo.NewLRUCache(1024 << 20)) // 1G opts.SetCreateIfMissing(true) if ldb, err := levigo.Open(path, opts); err == nil { key := []byte("test-test hwl0dsfds") val := []byte("value") if err = ldb.Put(levigo.NewWriteOptions(), key, val); err != nil { t.Fail() } else { ro := levigo.NewReadOptions() if data, err := ldb.Get(ro, key); err == nil && reflect.DeepEqual(data, val) { ro.SetFillCache(false) it := ldb.NewIterator(ro) it.Seek([]byte{0}) for ; it.Valid(); it.Next() { log.Printf("%s => %s", it.Key(), it.Value()) } } else { t.Fail() } } } else { t.Fail() } }
func NewLevelDB(path string, config interface{}) (Engine, error) { c, ok := config.(*LevelDbConfiguration) if !ok { return nil, fmt.Errorf("Config is of type %T instead of %T", config, LevelDbConfiguration{}) } // if it wasn't set, set it to 100 if c.MaxOpenFiles == 0 { c.MaxOpenFiles = 100 } // if it wasn't set, set it to 200 MB if c.LruCacheSize == 0 { c.LruCacheSize = 200 * 1024 * 1024 } // initialize the global cache if cache == nil { cacheLock.Lock() if cache == nil { cache = levigo.NewLRUCache(int(c.LruCacheSize)) } cacheLock.Unlock() } opts := levigo.NewOptions() opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetMaxOpenFiles(c.MaxOpenFiles) db, err := levigo.Open(path, opts) wopts := levigo.NewWriteOptions() ropts := levigo.NewReadOptions() return LevelDB{db, opts, wopts, ropts, path}, err }
// Open returns a keyvalue DB backed by a LevelDB database at the given // filepath. If opts==nil, the DefaultOptions are used. func Open(path string, opts *Options) (keyvalue.DB, error) { if opts == nil { opts = DefaultOptions } options := levigo.NewOptions() defer options.Close() cache := levigo.NewLRUCache(opts.CacheCapacity) options.SetCache(cache) options.SetCreateIfMissing(!opts.MustExist) if opts.WriteBufferSize > 0 { options.SetWriteBufferSize(opts.WriteBufferSize) } db, err := levigo.Open(path, options) if err != nil { return nil, fmt.Errorf("could not open LevelDB at %q: %v", path, err) } largeReadOpts := levigo.NewReadOptions() largeReadOpts.SetFillCache(opts.CacheLargeReads) return &levelDB{ db: db, cache: cache, readOpts: levigo.NewReadOptions(), largeReadOpts: largeReadOpts, writeOpts: levigo.NewWriteOptions(), }, nil }
func NewTree(dbname string) *Tree { debug("New tree") n := new(Tree) n.closed = false opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 30)) opts.SetCreateIfMissing(true) d, err := levigo.Open(dbname, opts) if err != nil { Error(err) } n.DB = d if n.Root, err = n.Get(nil, 0); err != nil { debug("could not get", err) n.Root = &Node{Parent: 0} if err := n.Put(nil, n.Root); err != nil { Error("could not set", err) } } n.Newid = make(chan int) go func() { id := n.Root.Parent + 1 for ; ; id++ { n.Newid <- id if n.closed { break } } log.Println("exiting Newid goroutine") }() return n }
func NewLeveldbCache(dbname string, cacheM int) (*LeveldbCache, error) { opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) cache := levigo.NewLRUCache(1024 * 1024 * cacheM) opts.SetFilterPolicy(filter) opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetWriteBufferSize(8 * 1024 * 104) // 8M opts.SetCompression(levigo.SnappyCompression) if ldb, err := levigo.Open(dbname, opts); err == nil { so := levigo.NewReadOptions() so.SetFillCache(false) return &LeveldbCache{ db: ldb, fp: filter, cache: cache, Ro: levigo.NewReadOptions(), Wo: levigo.NewWriteOptions(), So: so, }, nil } else { return nil, err } }
func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilterEncoded int) (p *LevelDBPersistence, err error) { options := levigo.NewOptions() options.SetCreateIfMissing(true) options.SetParanoidChecks(true) cache := levigo.NewLRUCache(cacheCapacity) options.SetCache(cache) filterPolicy := levigo.NewBloomFilter(bitsPerBloomFilterEncoded) options.SetFilterPolicy(filterPolicy) storage, err := levigo.Open(storageRoot, options) if err != nil { return } readOptions := levigo.NewReadOptions() writeOptions := levigo.NewWriteOptions() writeOptions.SetSync(true) p = &LevelDBPersistence{ cache: cache, filterPolicy: filterPolicy, options: options, readOptions: readOptions, storage: storage, writeOptions: writeOptions, } return }
func NewStore(path string, sync bool) *Store { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) db, err := levigo.Open(path, opts) if err != nil { panic(fmt.Sprintf("queued.Store: Unable to open db: %v", err)) } id := 0 it := db.NewIterator(levigo.NewReadOptions()) defer it.Close() it.SeekToLast() if it.Valid() { id, err = strconv.Atoi(string(it.Key())) if err != nil { panic(fmt.Sprintf("queued: Error loading db: %v", err)) } } store := &Store{ id: id, path: path, sync: sync, db: db, } return store }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() return &LevelDbDatastore{db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo}, nil }
func NewLevelDBStore(basePath string) LogeStore { var opts = levigo.NewOptions() opts.SetCreateIfMissing(true) db, err := levigo.Open(basePath, opts) if err != nil { panic(fmt.Sprintf("Can't open DB at %s: %v", basePath, err)) } var store = &levelDBStore{ basePath: basePath, db: db, types: spack.NewTypeSet(), writeQueue: make(chan *levelDBContext), flushed: false, } store.types.LastTag = ldb_START_TAG store.loadTypeMetadata() go store.writer() return store }
func (db *DB) initOptions(cfg *Config) *levigo.Options { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) if cfg.CacheSize > 0 { db.cache = levigo.NewLRUCache(cfg.CacheSize) opts.SetCache(db.cache) } //we must use bloomfilter db.filter = levigo.NewBloomFilter(defaultFilterBits) opts.SetFilterPolicy(db.filter) if !cfg.Compression { opts.SetCompression(levigo.NoCompression) } if cfg.BlockSize > 0 { opts.SetBlockSize(cfg.BlockSize) } if cfg.WriteBufferSize > 0 { opts.SetWriteBufferSize(cfg.WriteBufferSize) } return opts }
func InitDB() (*levigo.DB, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1024 * 1024)) opts.SetCreateIfMissing(true) homedir := os.Getenv("HOME") dbdir := homedir + "/.overseer/db" os.MkdirAll(dbdir, 0700) db, err := levigo.Open(dbdir, opts) if err != nil { fmt.Println("Failed to open database", err) return nil, err } procs, err := ListProcs(db) for p, status := range procs { log.Printf("Proc ID: %d, status: %d", p, status) if isProcAlive(p) == false { setProcStatus(db, p, PROC_STOPPED) } else { setProcStatus(db, p, PROC_ALIVE) } } return db, nil }
func (db *DB) Destroy() { db.Close() opts := levigo.NewOptions() defer opts.Close() levigo.DestroyDatabase(db.cfg.Path, opts) }
func (c *cache) open(path string) error { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) if c.options.CacheSizeM > 0 { c.cache = levigo.NewLRUCache(c.options.CacheSizeM * 1024 * 1024) opts.SetCache(c.cache) } if c.options.MaxOpenFiles > 0 { opts.SetMaxOpenFiles(c.options.MaxOpenFiles) } if c.options.BlockRestartInterval > 0 { opts.SetBlockRestartInterval(c.options.BlockRestartInterval) } if c.options.WriteBufferSizeM > 0 { opts.SetWriteBufferSize(c.options.WriteBufferSizeM * 1024 * 1024) } if c.options.BlockSizeK > 0 { opts.SetBlockSize(c.options.BlockSizeK * 1024) } db, err := levigo.Open(path, opts) if err != nil { return err } c.db = db c.wo = levigo.NewWriteOptions() c.ro = levigo.NewReadOptions() return nil }
func (buckets *Database) DestroyBucket(name string) error { if _, ok := buckets.DBMap[name]; ok { delete(buckets.DBMap, name) return levigo.DestroyDatabase(name, levigo.NewOptions()) } return nil }
func initDB() (*levigo.DB, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 30)) opts.SetCreateIfMissing(true) return levigo.Open(settings.dir+"/db", opts) }
func NewLevelDbShardDatastore(config *configuration.Configuration) (*LevelDbShardDatastore, error) { baseDbDir := filepath.Join(config.DataDir, SHARD_DATABASE_DIR) err := os.MkdirAll(baseDbDir, 0744) if err != nil { return nil, err } opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(config.LevelDbLruCacheSize)) opts.SetCreateIfMissing(true) opts.SetBlockSize(64 * ONE_KILOBYTE) filter := levigo.NewBloomFilter(SHARD_BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) opts.SetMaxOpenFiles(config.LevelDbMaxOpenFiles) return &LevelDbShardDatastore{ baseDbDir: baseDbDir, config: config, shards: make(map[uint32]*LevelDbShard), levelDbOptions: opts, maxOpenShards: config.LevelDbMaxOpenShards, lastAccess: make(map[uint32]int64), shardRefCounts: make(map[uint32]int), shardsToClose: make(map[uint32]bool), pointBatchSize: config.LevelDbPointBatchSize, }, nil }
// Will panic if there is a problem with the database. // Should only be called on server initialization. func NewDatabase(databaseLocation string) *Database { buckets := new(Database) buckets.DBMap = make(map[string]*levigo.DB) buckets.BaseLocation = databaseLocation os.MkdirAll(databaseLocation, 0755) files, err := ioutil.ReadDir(databaseLocation) if err != nil { panic(err) } for _, file := range files { if !file.IsDir() || strings.HasPrefix(file.Name(), "_") { continue } opts := levigo.NewOptions() opts.SetCreateIfMissing(true) opts.SetCache(levigo.NewLRUCache(4194304)) buckets.DBMap[file.Name()], err = levigo.Open(path.Join(databaseLocation, file.Name()), opts) if err != nil { panic(err) } } return buckets }
func (engine *LevelDbEngine) Init(config *proto.DBConfigs) error { if config == nil { return proto.ErrNoEngineConfig } if config.LevelDbConfigs == nil { config.LevelDbConfigs = DefaultLevelDbConf } options := levigo.NewOptions() // options.SetCreateIfMissing(config.CreateIfMissing) options.SetCreateIfMissing(true) options.SetParanoidChecks(config.LevelDbConfigs.ParanoidCheck) if config.LevelDbConfigs.LRUCacheSize > 0 { options.SetCache(levigo.NewLRUCache(config.LevelDbConfigs.LRUCacheSize)) } if config.LevelDbConfigs.BloomFilterLength > 0 { options.SetFilterPolicy(levigo.NewBloomFilter(config.LevelDbConfigs.BloomFilterLength)) } engine.config = config engine.dbOptions = options db, err := levigo.Open(engine.config.DataPath, engine.dbOptions) if err != nil { return err } engine.db = db return nil }
func New(mo store.MergeOperator, config map[string]interface{}) (store.KVStore, error) { path, ok := config["path"].(string) if !ok { return nil, fmt.Errorf("must specify path") } if path == "" { return nil, os.ErrInvalid } rv := Store{ path: path, opts: levigo.NewOptions(), mo: mo, } _, err := applyConfig(rv.opts, config) if err != nil { return nil, err } rv.db, err = levigo.Open(rv.path, rv.opts) if err != nil { return nil, err } return &rv, nil }
func TestTrigger(t *testing.T) { opts := levigo.NewOptions() levigo.DestroyDatabase("test.ldb", opts) // opts.SetCache(levigo.NewLRUCache(3<<30)) opts.SetCreateIfMissing(true) db, err := levigo.Open("test.ldb", opts) if err != nil { t.Fatal(err) } ro := levigo.NewReadOptions() wo := levigo.NewWriteOptions() sub1 := sublevel.Sublevel(db, "input") index := sublevel.Sublevel(db, "index") job := sublevel.Sublevel(db, "job") task := Trigger(sub1, job, func(key, value []byte) []byte { if strings.HasPrefix(string(key), "Doc_") { return key } return nil }, func(key, value []byte) bool { doc := make(map[string]string) err := json.Unmarshal(value, &doc) if err != nil { t.Fatal(err) } index.Put(wo, []byte(doc["id"]), []byte(doc["number"])) // Make sure that the next task invocation comes in concurrently to this one time.Sleep(300 * time.Millisecond) return true }) sub1.Put(wo, []byte("foobar"), []byte("do-not-process")) // Two put operations which will both trigger the task for the same taskKey. sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"42\"}")) sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"43\"}")) val, err := sub1.Get(ro, []byte("Doc_1")) if err != nil || string(val) != "{\"id\":\"01234\", \"number\": \"43\"}" { t.Fatal(err, string(val)) } time.Sleep(800 * time.Millisecond) val, err = index.Get(ro, []byte("01234")) if err != nil || string(val) != "43" { t.Fatal(err, string(val)) } task.Close() ro.Close() wo.Close() db.Close() }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { mainDbDir := filepath.Join(dbDir, DATABASE_DIR) requestLogDir := filepath.Join(dbDir, REQUEST_LOG_BASE_DIR) err := os.MkdirAll(mainDbDir, 0744) if err != nil { return nil, err } previousLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now().Add(-time.Hour*24))) if err != nil { return nil, err } currentLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now())) if err != nil { return nil, err } opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() leveldbStore := &LevelDbDatastore{ db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo, requestLogDir: requestLogDir, currentRequestLog: currentLog, previousRequestLog: previousLog} go leveldbStore.periodicallyRotateRequestLog() return leveldbStore, nil }
func (conn *DbConnection) open(path string) error { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 10)) opts.SetCreateIfMissing(true) var err error conn.db, err = levigo.Open(path, opts) conn.ro = levigo.NewReadOptions() conn.wo = levigo.NewWriteOptions() return err }
/* CreateDB creates a db at provided dbpath. */ func CreateDB(dbpath string) *levigo.DB { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1 << 10)) opts.SetCreateIfMissing(true) db, err := levigo.Open(dbpath, opts) if err != nil { errMsg := fmt.Sprintf("DB %s Creation failed. %q", dbpath, err) golerror.Boohoo(errMsg, true) } return db }
// formats a new filesystem in the given data dir func Format(dataDir string, rootUid, rootGid uint32) error { // wipe out previous err := os.RemoveAll(dataDir) if err != nil { return err } // create err = os.Mkdir(dataDir, 0755) if err != nil { return fmt.Errorf("issue creating namenode home dir: %s\n", err.Error()) } err = os.Mkdir(dataDir+"/"+dir_inodb, 0755) if err != nil { return fmt.Errorf("issue creating inodb parent dir: %s\n", err.Error()) } err = os.Mkdir(dataDir+"/"+dir_counters, 0755) if err != nil { return fmt.Errorf("issue creating counters parent dir: %s\n", err.Error()) } opts := levigo.NewOptions() defer opts.Close() opts.SetCreateIfMissing(true) // create inodb db, err := levigo.Open(dataDir+"/"+dir_inodb, opts) if err != nil { return err } // add root node ino := maggiefs.NewInode(1, maggiefs.FTYPE_DIR, 0755, rootUid, rootGid) binSize := ino.BinSize() inoBytes := make([]byte, binSize) ino.ToBytes(inoBytes) rootNodeId := make([]byte, 8) binary.LittleEndian.PutUint64(rootNodeId, 1) db.Put(WriteOpts, rootNodeId, inoBytes) db.Close() db, err = levigo.Open(dataDir+"/"+dir_counters, opts) if err != nil { return err } // put 1 for inode counter so other nodes are higher key := []byte(COUNTER_INODE) val := make([]byte, 8) binary.LittleEndian.PutUint64(val, 1) err = db.Put(WriteOpts, key, val) if err != nil { return err } db.Close() return nil }
func (buckets *Database) GetBucket(name string) (*levigo.DB, error) { if db, ok := buckets.DBMap[name]; ok { return db, nil } opts := levigo.NewOptions() opts.SetCreateIfMissing(true) opts.SetCache(levigo.NewLRUCache(4194304)) var err error buckets.DBMap[name], err = levigo.Open(path.Join(buckets.BaseLocation, name), opts) return buckets.DBMap[name], err }
func storageInit(path string) { opts := levigo.NewOptions() //opts.SetCache(levigo.NewLRUCache(32*1024*1024)) // 32mb in-mem cache //opts.SetBlockSize(4096) // 4096 is the default block size //opts.SetCompression(levigo.NoCompression) // default is levigo.SnappyCompression opts.SetCreateIfMissing(true) db, err := levigo.Open(path, opts) if err != nil { panic("error opening DB at " + path + ": " + err.Error()) } storeDB = db }
// Create creates a new Log at location func Create(location string) (LogStore, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(CACHE_SIZE)) opts.SetCreateIfMissing(true) db, err := levigo.Open(location, opts) if err != nil { return nil, err } opts.Close() logStore := &levelDbLogStore{localDb: db, nextIndex: &utils.AtomicI64{Value: 1}, firstIndex: &utils.AtomicI64{Value: 0}} return LogStore(logStore), err }