// formats a new filesystem in the given data dir func Format(dataDir string, rootUid, rootGid uint32) error { // wipe out previous err := os.RemoveAll(dataDir) if err != nil { return err } // create err = os.Mkdir(dataDir, 0755) if err != nil { return fmt.Errorf("issue creating namenode home dir: %s\n", err.Error()) } err = os.Mkdir(dataDir+"/"+dir_inodb, 0755) if err != nil { return fmt.Errorf("issue creating inodb parent dir: %s\n", err.Error()) } err = os.Mkdir(dataDir+"/"+dir_counters, 0755) if err != nil { return fmt.Errorf("issue creating counters parent dir: %s\n", err.Error()) } opts := levigo.NewOptions() defer opts.Close() opts.SetCreateIfMissing(true) // create inodb db, err := levigo.Open(dataDir+"/"+dir_inodb, opts) if err != nil { return err } // add root node ino := maggiefs.NewInode(1, maggiefs.FTYPE_DIR, 0755, rootUid, rootGid) binSize := ino.BinSize() inoBytes := make([]byte, binSize) ino.ToBytes(inoBytes) rootNodeId := make([]byte, 8) binary.LittleEndian.PutUint64(rootNodeId, 1) db.Put(WriteOpts, rootNodeId, inoBytes) db.Close() db, err = levigo.Open(dataDir+"/"+dir_counters, opts) if err != nil { return err } // put 1 for inode counter so other nodes are higher key := []byte(COUNTER_INODE) val := make([]byte, 8) binary.LittleEndian.PutUint64(val, 1) err = db.Put(WriteOpts, key, val) if err != nil { return err } db.Close() return nil }
func New(directory string) (*Level, error) { options := levigo.NewOptions() options.SetCreateIfMissing(true) options.SetCompression(levigo.SnappyCompression) db, err := levigo.Open(directory, options) return &Level{db: db}, err }
func main() { flag.Parse() if *showVersion { fmt.Printf("gocountme: v%s\n", VERSION) return } if *defaultSize <= 0 { fmt.Printf("--default-size must be greater than 0\n") return } if _, err := os.Stat(*dblocation); err != nil { if os.IsNotExist(err) { fmt.Println("Database location does not exist:", *dblocation) return } } log.Println("Opening levelDB") Default_KMinValues_Size = *defaultSize opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(*leveldbLRUCache)) opts.SetCreateIfMissing(true) db, err := levigo.Open(*dblocation, opts) defer db.Close() if err != nil { log.Panicln(err) } RequestChan = make(chan RequestCommand, *nWorkers) workerWaitGroup := sync.WaitGroup{} log.Printf("Starting %d workers", *nWorkers) for i := 0; i < *nWorkers; i++ { go func(id int) { workerWaitGroup.Add(1) levelDBWorker(db, RequestChan) workerWaitGroup.Done() }(i) } http.HandleFunc("/get", GetHandler) http.HandleFunc("/delete", DeleteHandler) http.HandleFunc("/cardinality", CardinalityHandler) http.HandleFunc("/jaccard", JaccardHandler) http.HandleFunc("/correlation", CorrelationMatrixHandler) http.HandleFunc("/add", AddHandler) http.HandleFunc("/addhash", AddHashHandler) http.HandleFunc("/query", QueryHandler) http.HandleFunc("/exit", ExitHandler) log.Printf("Starting gocountme HTTP server on %s", *httpAddress) go func() { log.Fatal(http.ListenAndServe(*httpAddress, nil)) }() workerWaitGroup.Wait() }
func main() { dbname := "leveldb" opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 20)) opts.SetCreateIfMissing(true) _ = levigo.DestroyDatabase(dbname, opts) db, _ := levigo.Open(dbname, opts) wo := levigo.NewWriteOptions() ro := levigo.NewReadOptions() start := time.Now() for i := 0; i < 10e4; i++ { db.Put(wo, []byte(fmt.Sprintf("a%v", i)), []byte(strconv.Itoa(i))) } for i := 0; i < 10e4; i++ { db.Get(ro, []byte(fmt.Sprintf("a%v", i))) } for i := 0; i < 10e4; i++ { db.Delete(wo, []byte(fmt.Sprintf("a%v", i))) } duration := time.Since(start) log.Printf("Elapsed: %v.", duration) }
func TestLevigo(t *testing.T) { path := "/tmp/levigo_test_10101" os.RemoveAll(path) opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) opts.SetFilterPolicy(filter) opts.SetCache(levigo.NewLRUCache(1024 << 20)) // 1G opts.SetCreateIfMissing(true) if ldb, err := levigo.Open(path, opts); err == nil { key := []byte("test-test hwl0dsfds") val := []byte("value") if err = ldb.Put(levigo.NewWriteOptions(), key, val); err != nil { t.Fail() } else { ro := levigo.NewReadOptions() if data, err := ldb.Get(ro, key); err == nil && reflect.DeepEqual(data, val) { ro.SetFillCache(false) it := ldb.NewIterator(ro) it.Seek([]byte{0}) for ; it.Valid(); it.Next() { log.Printf("%s => %s", it.Key(), it.Value()) } } else { t.Fail() } } } else { t.Fail() } }
func InitDB() (*levigo.DB, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1024 * 1024)) opts.SetCreateIfMissing(true) homedir := os.Getenv("HOME") dbdir := homedir + "/.overseer/db" os.MkdirAll(dbdir, 0700) db, err := levigo.Open(dbdir, opts) if err != nil { fmt.Println("Failed to open database", err) return nil, err } procs, err := ListProcs(db) for p, status := range procs { log.Printf("Proc ID: %d, status: %d", p, status) if isProcAlive(p) == false { setProcStatus(db, p, PROC_STOPPED) } else { setProcStatus(db, p, PROC_ALIVE) } } return db, nil }
func NewLevelDB(path string, config interface{}) (Engine, error) { c, ok := config.(*LevelDbConfiguration) if !ok { return nil, fmt.Errorf("Config is of type %T instead of %T", config, LevelDbConfiguration{}) } // if it wasn't set, set it to 100 if c.MaxOpenFiles == 0 { c.MaxOpenFiles = 100 } // if it wasn't set, set it to 200 MB if c.LruCacheSize == 0 { c.LruCacheSize = 200 * 1024 * 1024 } // initialize the global cache if cache == nil { cacheLock.Lock() if cache == nil { cache = levigo.NewLRUCache(int(c.LruCacheSize)) } cacheLock.Unlock() } opts := levigo.NewOptions() opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetMaxOpenFiles(c.MaxOpenFiles) db, err := levigo.Open(path, opts) wopts := levigo.NewWriteOptions() ropts := levigo.NewReadOptions() return LevelDB{db, opts, wopts, ropts, path}, err }
// Open returns a keyvalue DB backed by a LevelDB database at the given // filepath. If opts==nil, the DefaultOptions are used. func Open(path string, opts *Options) (keyvalue.DB, error) { if opts == nil { opts = DefaultOptions } options := levigo.NewOptions() defer options.Close() cache := levigo.NewLRUCache(opts.CacheCapacity) options.SetCache(cache) options.SetCreateIfMissing(!opts.MustExist) if opts.WriteBufferSize > 0 { options.SetWriteBufferSize(opts.WriteBufferSize) } db, err := levigo.Open(path, options) if err != nil { return nil, fmt.Errorf("could not open LevelDB at %q: %v", path, err) } largeReadOpts := levigo.NewReadOptions() largeReadOpts.SetFillCache(opts.CacheLargeReads) return &levelDB{ db: db, cache: cache, readOpts: levigo.NewReadOptions(), largeReadOpts: largeReadOpts, writeOpts: levigo.NewWriteOptions(), }, nil }
func NewStore(path string, sync bool) *Store { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) db, err := levigo.Open(path, opts) if err != nil { panic(fmt.Sprintf("queued.Store: Unable to open db: %v", err)) } id := 0 it := db.NewIterator(levigo.NewReadOptions()) defer it.Close() it.SeekToLast() if it.Valid() { id, err = strconv.Atoi(string(it.Key())) if err != nil { panic(fmt.Sprintf("queued: Error loading db: %v", err)) } } store := &Store{ id: id, path: path, sync: sync, db: db, } return store }
func NewLevelDBStore(basePath string) LogeStore { var opts = levigo.NewOptions() opts.SetCreateIfMissing(true) db, err := levigo.Open(basePath, opts) if err != nil { panic(fmt.Sprintf("Can't open DB at %s: %v", basePath, err)) } var store = &levelDBStore{ basePath: basePath, db: db, types: spack.NewTypeSet(), writeQueue: make(chan *levelDBContext), flushed: false, } store.types.LastTag = ldb_START_TAG store.loadTypeMetadata() go store.writer() return store }
func NewLeveldbCache(dbname string, cacheM int) (*LeveldbCache, error) { opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) cache := levigo.NewLRUCache(1024 * 1024 * cacheM) opts.SetFilterPolicy(filter) opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetWriteBufferSize(8 * 1024 * 104) // 8M opts.SetCompression(levigo.SnappyCompression) if ldb, err := levigo.Open(dbname, opts); err == nil { so := levigo.NewReadOptions() so.SetFillCache(false) return &LeveldbCache{ db: ldb, fp: filter, cache: cache, Ro: levigo.NewReadOptions(), Wo: levigo.NewWriteOptions(), So: so, }, nil } else { return nil, err } }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() return &LevelDbDatastore{db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo}, nil }
func NewTree(dbname string) *Tree { debug("New tree") n := new(Tree) n.closed = false opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 30)) opts.SetCreateIfMissing(true) d, err := levigo.Open(dbname, opts) if err != nil { Error(err) } n.DB = d if n.Root, err = n.Get(nil, 0); err != nil { debug("could not get", err) n.Root = &Node{Parent: 0} if err := n.Put(nil, n.Root); err != nil { Error("could not set", err) } } n.Newid = make(chan int) go func() { id := n.Root.Parent + 1 for ; ; id++ { n.Newid <- id if n.closed { break } } log.Println("exiting Newid goroutine") }() return n }
func (c *cache) open(path string) error { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) if c.options.CacheSizeM > 0 { c.cache = levigo.NewLRUCache(c.options.CacheSizeM * 1024 * 1024) opts.SetCache(c.cache) } if c.options.MaxOpenFiles > 0 { opts.SetMaxOpenFiles(c.options.MaxOpenFiles) } if c.options.BlockRestartInterval > 0 { opts.SetBlockRestartInterval(c.options.BlockRestartInterval) } if c.options.WriteBufferSizeM > 0 { opts.SetWriteBufferSize(c.options.WriteBufferSizeM * 1024 * 1024) } if c.options.BlockSizeK > 0 { opts.SetBlockSize(c.options.BlockSizeK * 1024) } db, err := levigo.Open(path, opts) if err != nil { return err } c.db = db c.wo = levigo.NewWriteOptions() c.ro = levigo.NewReadOptions() return nil }
func (self *LevelDbShardDatastore) GetOrCreateShard(id uint32) (cluster.LocalShardDb, error) { now := time.Now().Unix() self.shardsLock.Lock() defer self.shardsLock.Unlock() db := self.shards[id] self.lastAccess[id] = now if db != nil { self.incrementShardRefCountAndCloseOldestIfNeeded(id) return db, nil } dbDir := self.shardDir(id) log.Info("DATASTORE: opening or creating shard %s", dbDir) ldb, err := levigo.Open(dbDir, self.levelDbOptions) if err != nil { return nil, err } db, err = NewLevelDbShard(ldb, self.pointBatchSize) if err != nil { return nil, err } self.shards[id] = db self.incrementShardRefCountAndCloseOldestIfNeeded(id) return db, nil }
func initDB() (*levigo.DB, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 30)) opts.SetCreateIfMissing(true) return levigo.Open(settings.dir+"/db", opts) }
// Will panic if there is a problem with the database. // Should only be called on server initialization. func NewDatabase(databaseLocation string) *Database { buckets := new(Database) buckets.DBMap = make(map[string]*levigo.DB) buckets.BaseLocation = databaseLocation os.MkdirAll(databaseLocation, 0755) files, err := ioutil.ReadDir(databaseLocation) if err != nil { panic(err) } for _, file := range files { if !file.IsDir() || strings.HasPrefix(file.Name(), "_") { continue } opts := levigo.NewOptions() opts.SetCreateIfMissing(true) opts.SetCache(levigo.NewLRUCache(4194304)) buckets.DBMap[file.Name()], err = levigo.Open(path.Join(databaseLocation, file.Name()), opts) if err != nil { panic(err) } } return buckets }
func (engine *LevelDbEngine) Init(config *proto.DBConfigs) error { if config == nil { return proto.ErrNoEngineConfig } if config.LevelDbConfigs == nil { config.LevelDbConfigs = DefaultLevelDbConf } options := levigo.NewOptions() // options.SetCreateIfMissing(config.CreateIfMissing) options.SetCreateIfMissing(true) options.SetParanoidChecks(config.LevelDbConfigs.ParanoidCheck) if config.LevelDbConfigs.LRUCacheSize > 0 { options.SetCache(levigo.NewLRUCache(config.LevelDbConfigs.LRUCacheSize)) } if config.LevelDbConfigs.BloomFilterLength > 0 { options.SetFilterPolicy(levigo.NewBloomFilter(config.LevelDbConfigs.BloomFilterLength)) } engine.config = config engine.dbOptions = options db, err := levigo.Open(engine.config.DataPath, engine.dbOptions) if err != nil { return err } engine.db = db return nil }
func New(mo store.MergeOperator, config map[string]interface{}) (store.KVStore, error) { path, ok := config["path"].(string) if !ok { return nil, fmt.Errorf("must specify path") } if path == "" { return nil, os.ErrInvalid } rv := Store{ path: path, opts: levigo.NewOptions(), mo: mo, } _, err := applyConfig(rv.opts, config) if err != nil { return nil, err } rv.db, err = levigo.Open(rv.path, rv.opts) if err != nil { return nil, err } return &rv, nil }
func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilterEncoded int) (p *LevelDBPersistence, err error) { options := levigo.NewOptions() options.SetCreateIfMissing(true) options.SetParanoidChecks(true) cache := levigo.NewLRUCache(cacheCapacity) options.SetCache(cache) filterPolicy := levigo.NewBloomFilter(bitsPerBloomFilterEncoded) options.SetFilterPolicy(filterPolicy) storage, err := levigo.Open(storageRoot, options) if err != nil { return } readOptions := levigo.NewReadOptions() writeOptions := levigo.NewWriteOptions() writeOptions.SetSync(true) p = &LevelDBPersistence{ cache: cache, filterPolicy: filterPolicy, options: options, readOptions: readOptions, storage: storage, writeOptions: writeOptions, } return }
func loadVolume(volRoot string) (*volume, error) { id, err := getVolId(volRoot) if err != nil { return nil, err } dnInfoFile, err := os.Open(volRoot + "/DNINFO") defer dnInfoFile.Close() if err != nil { return nil, err } d := json.NewDecoder(dnInfoFile) dnInfo := maggiefs.DataNodeInfo{} d.Decode(dnInfo) db, err := levigo.Open(volRoot+"/meta", openOpts) if err != nil { db.Close() return nil, err } rootFile, err := os.Open(volRoot) if err != nil { return nil, err } return &volume{id, volRoot, rootFile, maggiefs.VolumeInfo{id, dnInfo}, db}, nil }
func (ldbs *Store) Open() error { var err error ldbs.db, err = levigo.Open(ldbs.path, ldbs.opts) if err != nil { return err } return nil }
func (dld *DataStoreLoader) Load() error { var err error // If data.store.clear was set, clear existing data. if dld.ClearStored { err = dld.clearStored() if err != nil { return err } } // Make sure the shard directories exist in all cases, with a mkdir -p for i := range dld.shards { err := os.MkdirAll(dld.shards[i].path, 0777) if err != nil { return errors.New(fmt.Sprintf("Failed to MkdirAll(%s): %s", dld.shards[i].path, err.Error())) } } // Get information about each shard, and verify them. dld.LoadShards() err = dld.VerifyShardInfos() if err != nil { return err } if dld.shards[0].ldb != nil { dld.lg.Infof("Loaded %d leveldb instances with "+ "DaemonId of 0x%016x\n", len(dld.shards), dld.shards[0].info.DaemonId) } else { // Create leveldb instances if needed. rnd := rand.New(rand.NewSource(time.Now().UnixNano())) daemonId := uint64(rnd.Int63()) dld.lg.Infof("Initializing %d leveldb instances with a new "+ "DaemonId of 0x%016x\n", len(dld.shards), daemonId) dld.openOpts.SetCreateIfMissing(true) for i := range dld.shards { shd := dld.shards[i] shd.ldb, err = levigo.Open(shd.path, shd.dld.openOpts) if err != nil { return errors.New(fmt.Sprintf("levigo.Open(%s) failed to "+ "create the shard: %s", shd.path, err.Error())) } info := &ShardInfo{ LayoutVersion: CURRENT_LAYOUT_VERSION, DaemonId: daemonId, TotalShards: uint32(len(dld.shards)), ShardIndex: uint32(i), } err = shd.writeShardInfo(info) if err != nil { return errors.New(fmt.Sprintf("levigo.Open(%s) failed to "+ "write shard info: %s", shd.path, err.Error())) } dld.lg.Infof("Shard %s initialized with ShardInfo %s \n", shd.path, asJson(info)) } } return nil }
func TestTrigger(t *testing.T) { opts := levigo.NewOptions() levigo.DestroyDatabase("test.ldb", opts) // opts.SetCache(levigo.NewLRUCache(3<<30)) opts.SetCreateIfMissing(true) db, err := levigo.Open("test.ldb", opts) if err != nil { t.Fatal(err) } ro := levigo.NewReadOptions() wo := levigo.NewWriteOptions() sub1 := sublevel.Sublevel(db, "input") index := sublevel.Sublevel(db, "index") job := sublevel.Sublevel(db, "job") task := Trigger(sub1, job, func(key, value []byte) []byte { if strings.HasPrefix(string(key), "Doc_") { return key } return nil }, func(key, value []byte) bool { doc := make(map[string]string) err := json.Unmarshal(value, &doc) if err != nil { t.Fatal(err) } index.Put(wo, []byte(doc["id"]), []byte(doc["number"])) // Make sure that the next task invocation comes in concurrently to this one time.Sleep(300 * time.Millisecond) return true }) sub1.Put(wo, []byte("foobar"), []byte("do-not-process")) // Two put operations which will both trigger the task for the same taskKey. sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"42\"}")) sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"43\"}")) val, err := sub1.Get(ro, []byte("Doc_1")) if err != nil || string(val) != "{\"id\":\"01234\", \"number\": \"43\"}" { t.Fatal(err, string(val)) } time.Sleep(800 * time.Millisecond) val, err = index.Get(ro, []byte("01234")) if err != nil || string(val) != "43" { t.Fatal(err, string(val)) } task.Close() ro.Close() wo.Close() db.Close() }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { mainDbDir := filepath.Join(dbDir, DATABASE_DIR) requestLogDir := filepath.Join(dbDir, REQUEST_LOG_BASE_DIR) err := os.MkdirAll(mainDbDir, 0744) if err != nil { return nil, err } previousLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now().Add(-time.Hour*24))) if err != nil { return nil, err } currentLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now())) if err != nil { return nil, err } opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() leveldbStore := &LevelDbDatastore{ db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo, requestLogDir: requestLogDir, currentRequestLog: currentLog, previousRequestLog: previousLog} go leveldbStore.periodicallyRotateRequestLog() return leveldbStore, nil }
func (conn *DbConnection) open(path string) error { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 10)) opts.SetCreateIfMissing(true) var err error conn.db, err = levigo.Open(path, opts) conn.ro = levigo.NewReadOptions() conn.wo = levigo.NewWriteOptions() return err }
// initializes a namedata func NewNameData(dataDir string) (*NameData, error) { opts := OpenOpts // todo configure caching // todo investigate turning off compression inodb, err := levigo.Open(dataDir+"/"+dir_inodb, opts) if err != nil { return nil, err } ret := &NameData{} ret.inodb = inodb ret.inodeStripeLock = make(map[uint64]*sync.Mutex) for i := uint64(0); i < STRIPE_SIZE; i++ { ret.inodeStripeLock[i] = &sync.Mutex{} } ret.counterLock = &sync.Mutex{} ret.counterdb, err = levigo.Open(dataDir+"/"+dir_counters, opts) return ret, err }
func (buckets *Database) GetBucket(name string) (*levigo.DB, error) { if db, ok := buckets.DBMap[name]; ok { return db, nil } opts := levigo.NewOptions() opts.SetCreateIfMissing(true) opts.SetCache(levigo.NewLRUCache(4194304)) var err error buckets.DBMap[name], err = levigo.Open(path.Join(buckets.BaseLocation, name), opts) return buckets.DBMap[name], err }
/* CreateDB creates a db at provided dbpath. */ func CreateDB(dbpath string) *levigo.DB { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1 << 10)) opts.SetCreateIfMissing(true) db, err := levigo.Open(dbpath, opts) if err != nil { errMsg := fmt.Sprintf("DB %s Creation failed. %q", dbpath, err) golerror.Boohoo(errMsg, true) } return db }
func storageInit(path string) { opts := levigo.NewOptions() //opts.SetCache(levigo.NewLRUCache(32*1024*1024)) // 32mb in-mem cache //opts.SetBlockSize(4096) // 4096 is the default block size //opts.SetCompression(levigo.NoCompression) // default is levigo.SnappyCompression opts.SetCreateIfMissing(true) db, err := levigo.Open(path, opts) if err != nil { panic("error opening DB at " + path + ": " + err.Error()) } storeDB = db }