func saveItem(db *leveldb.DB, it *gohn.Item) { pbmsg, err := proto.Marshal(it) check(err) key := []byte(strconv.Itoa(int(it.GetId()))) err = db.Put(key, pbmsg, nil) check(err) }
func ldbAvailability(db *leveldb.DB, folder, file []byte) []protocol.DeviceID { k := globalKey(folder, file) bs, err := db.Get(k, nil) if err == leveldb.ErrNotFound { return nil } if err != nil { panic(err) } var vl versionList err = vl.UnmarshalXDR(bs) if err != nil { panic(err) } var devices []protocol.DeviceID for _, v := range vl.versions { if !v.version.Equal(vl.versions[0].version) { break } n := protocol.DeviceIDFromBytes(v.device) devices = append(devices, n) } return devices }
func ldbListFolders(db *leveldb.DB) []string { runtime.GC() start := []byte{keyTypeGlobal} limit := []byte{keyTypeGlobal + 1} snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() folderExists := make(map[string]bool) for dbi.Next() { folder := string(globalKeyFolder(dbi.Key())) if !folderExists[folder] { folderExists[folder] = true } } folders := make([]string, 0, len(folderExists)) for k := range folderExists { folders = append(folders, k) } sort.Strings(folders) return folders }
// flush a leveldb batch to database and reset batch to 0 func cacheFlush(db *leveldb.DB, batch *leveldb.Batch) { err := db.Write(batch, nil) if err != nil { log.Fatal(err) } batch.Reset() }
func cacheLookup(db *leveldb.DB, way *osmpbf.Way) ([]map[string]string, error) { var container []map[string]string for _, each := range way.NodeIDs { stringid := strconv.FormatInt(each, 10) data, err := db.Get([]byte(stringid), nil) if err != nil { log.Println("denormalize failed for way:", way.ID, "node not found:", stringid) return container, err } s := string(data) spl := strings.Split(s, ":") latlon := make(map[string]string) lat, lon := spl[0], spl[1] latlon["lat"] = lat latlon["lon"] = lon container = append(container, latlon) } return container, nil // fmt.Println(way.NodeIDs) // fmt.Println(container) // os.Exit(1) }
// write to leveldb immediately func cacheStore(db *leveldb.DB, node *osmpbf.Node) { id, val := formatLevelDB(node) err := db.Put([]byte(id), []byte(val), nil) if err != nil { log.Fatal(err) } }
func read_metadata(filename string, metadatadb *leveldb.DB) (version int, numbytes int, exptime time.Time, exp int, err error) { data, err2 := metadatadb.Get([]byte(filename), nil) // log.Println(data, filename, err2) err = err2 if err != nil { version = 0 numbytes = 0 exptime = time.Now() exp = 0 } else { line := string(data) fields := strings.Fields(line) version, _ = strconv.Atoi(fields[0]) numbytes, _ = strconv.Atoi(fields[1]) if len(fields) == 7 { exptime_str := fields[2] + " " + fields[3] + " " + fields[4] + " " + fields[5] layout := "2006-01-02 15:04:05 -0700 MST" exptime, _ = time.Parse(layout, exptime_str) exp, _ = strconv.Atoi(fields[6]) } else { log.Println("why here?", string(data), string(filename)) } } return }
//从levelDB中载入列表判断是否近期执行 func WalkTasks(base, span int64, db *leveldb.DB) { var task Task var err error var ok bool timer.DropMap = nil timer.DropMap = make(map[string]bool) s := time.Now() iter := db.NewIterator(nil, nil) parseMap := make(map[string][]string, 1024) for iter.Next() { key := string(iter.Key()) val := string(iter.Value()) if _, ok = parseMap[val]; ok == false { parseMap[val] = make([]string, 0) } parseMap[val] = append(parseMap[val], key) } iter.Release() inWalking = true for val, keys := range parseMap { task, err = parseTask(val) if err == nil { task.Base = base ret := task.Dispatch(span, true) for _, key := range keys { task.Key = key timer.Add(ret, key) } } } inWalking = false parseMap = nil e := time.Now() logPrintln(e.Sub(s)) }
func clearItems(db *leveldb.DB) error { snap, err := db.GetSnapshot() if err != nil { return err } defer snap.Release() // Iterate over k2 it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) defer it.Release() batch := new(leveldb.Batch) for it.Next() { k1 := it.Key() k2 := it.Value() // k2 should exist _, err := snap.Get(k2, nil) if err != nil { return err } // Delete the k1 => k2 mapping first batch.Delete(k1) // Then the k2 => data mapping batch.Delete(k2) } if testing.Verbose() { log.Printf("batch write (clear) %p", batch) } return db.Write(batch, nil) }
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f protocol.FileInfoTruncated) bool) { runtime.GC() start := deviceKey(folder, nil, nil) // before all folder/device files limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() for dbi.Next() { device := deviceKeyDevice(dbi.Key()) var f protocol.FileInfoTruncated err := f.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } if cont := fn(device, f); !cont { return } } }
func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) { defer runtime.GC() start := nodeKey(repo, nil, nil) // before all repo/node files limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() for dbi.Next() { node := nodeKeyNode(dbi.Key()) var f protocol.FileInfoTruncated err := f.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } if cont := fn(node, f); !cont { return } } }
func ldbListRepos(db *leveldb.DB) []string { defer runtime.GC() start := []byte{keyTypeGlobal} limit := []byte{keyTypeGlobal + 1} snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() repoExists := make(map[string]bool) for dbi.Next() { repo := string(globalKeyRepo(dbi.Key())) if !repoExists[repo] { repoExists[repo] = true } } repos := make([]string, 0, len(repoExists)) for k := range repoExists { repos = append(repos, k) } sort.Strings(repos) return repos }
//Function: getFile //Checkes whether file exists or not //Parameters: // fileName - file name // fileDB - database connection //Return: // true - file found // false - file not found // file itself func getFile(fileName string, fileDB *leveldb.DB) (bool, []byte, iterator.Iterator) { var flag int = 0 var val []byte iter := fileDB.NewIterator(nil, nil) //doOperation([]byte(fileNm), nil, "delete", fileDB, wo) for iter.Next() { key := iter.Key() if string(key) == fileName { val = iter.Value() flag = 1 break } } if flag == 1 { return true, val, iter } else { return false, nil, iter } }
func clearItems(db *leveldb.DB) error { snap, err := db.GetSnapshot() if err != nil { return err } defer snap.Release() // Iterate from the start of k2 space to the end it := snap.NewIterator(&util.Range{Start: []byte{2}, Limit: []byte{2, 0xff, 0xff, 0xff, 0xff}}, nil) defer it.Release() batch := &leveldb.Batch{} for it.Next() { k2 := it.Key() k1 := it.Value() // k1 should exist _, err := snap.Get(k1, nil) if err != nil { return err } // Delete the k2 => k1 mapping first batch.Delete(k2) // Then the k1 => key mapping batch.Delete(k1) } return db.Write(batch, nil) }
/* /Read the DATABASE /Read the Key and Value by Key */ func dbRead(dbObj *leveldb.DB, readDBKey []byte) []byte { data, readError := dbObj.Get([]byte(readDBKey), nil) if readError != nil { fmt.Println("DB Read ERROR") } return data }
func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID { k := globalKey(repo, file) bs, err := db.Get(k, nil) if err == leveldb.ErrNotFound { return nil } if err != nil { panic(err) } var vl versionList err = vl.UnmarshalXDR(bs) if err != nil { panic(err) } var nodes []protocol.NodeID for _, v := range vl.versions { if v.version != vl.versions[0].version { break } var n protocol.NodeID copy(n[:], v.node) nodes = append(nodes, n) } return nodes }
func ldbListFolders(db *leveldb.DB) []string { runtime.GC() snap, err := db.GetSnapshot() if err != nil { panic(err) } l.Debugf("created snapshot %p", snap) defer func() { l.Debugf("close snapshot %p", snap) snap.Release() }() dbi := snap.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil) defer dbi.Release() folderExists := make(map[string]bool) for dbi.Next() { folder := string(globalKeyFolder(dbi.Key())) if !folderExists[folder] { folderExists[folder] = true } } folders := make([]string, 0, len(folderExists)) for k := range folderExists { folders = append(folders, k) } sort.Strings(folders) return folders }
// Command read func read(conn net.Conn, input_bytes []byte, datadb *leveldb.DB, metadatadb *leveldb.DB) { input_string := string(input_bytes) inputs := strings.Fields(input_string) filename := inputs[1] mutex.RLock() version, numbytes, exptime, exp, err1 := read_metadata(filename, metadatadb) if err1 == nil { if time.Now().After(exptime) { mutex.RUnlock() conn.Write([]byte("ERR_FILE_NOT_FOUND\r\n")) // content has expired } else { data, err2 := datadb.Get([]byte(filename), nil) mutex.RUnlock() if err2 != nil { log.Println("error in conversion: ", err1) } version_str := strconv.Itoa(version) numbytes_str := strconv.Itoa(numbytes) exp_str := "0" if exp != 0 { exp_str = strconv.Itoa(int(exptime.Sub(time.Now()).Seconds())) } response := append([]byte("CONTENTS "+version_str+" "+numbytes_str+" "+exp_str+" \r\n"), data...) // log.Println(response) conn.Write(response) // log.Println(response) } } else { mutex.RUnlock() conn.Write([]byte("ERR_FILE_NOT_FOUND\r\n")) } }
func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterator) { start := deviceKey(folder, device, nil) // before all folder/device files limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() for dbi.Next() { f, err := unmarshalTrunc(dbi.Value(), truncate) if err != nil { panic(err) } if cont := fn(f); !cont { return } } }
func BitIndexes(bdb *leveldb.DB, bmd5s []string) map[uint]struct{} { wg := new(sync.WaitGroup) res := make(map[uint]struct{}) bitChan := make(chan uint, 1000) go func() { for bit := range bitChan { if _, ok := res[bit]; !ok { res[bit] = struct{}{} } } }() for _, bmd5 := range bmd5s { wg.Add(1) go func(bmd5 string) { var fpmap Fpmap data, _ := bdb.Get([]byte("m-"+bmd5), nil) if err := json.Unmarshal(data, &fpmap); err != nil { Error.Println("Error decoding fpmap object", bmd5) } for _, index := range fpmap { bitChan <- index.bindex } }(bmd5) } wg.Wait() close(bitChan) return res }
func ldbWithGlobal(db *leveldb.DB, folder []byte, truncate bool, fn fileIterator) { runtime.GC() start := globalKey(folder, nil) limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff}) snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() for dbi.Next() { var vl versionList err := vl.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } if len(vl.versions) == 0 { l.Debugln(dbi.Key()) panic("no versions?") } name := globalKeyName(dbi.Key()) fk := deviceKey(folder, vl.versions[0].device, name) if debugDB { l.Debugf("snap.Get %p %x", snap, fk) } bs, err := snap.Get(fk, nil) if err != nil { l.Debugf("folder: %q (%x)", folder, folder) l.Debugf("key: %q (%x)", dbi.Key(), dbi.Key()) l.Debugf("vl: %v", vl) l.Debugf("vl.versions[0].device: %x", vl.versions[0].device) l.Debugf("name: %q (%x)", name, name) l.Debugf("fk: %q", fk) l.Debugf("fk: %x %x %x", fk[1:1+64], fk[1+64:1+64+32], fk[1+64+32:]) panic(err) } f, err := unmarshalTrunc(bs, truncate) if err != nil { panic(err) } if cont := fn(f); !cont { return } } }
func dbEmpty(db *leveldb.DB) bool { iter := db.NewIterator(nil, nil) defer iter.Release() if iter.Next() { return false } return true }
func get(db *leveldb.DB, id protocol.DeviceID) []address { var addrs addressList val, err := db.Get(id[:], nil) if err == nil { addrs.UnmarshalXDR(val) } return addrs.addresses }
func ldbCheckGlobals(db *leveldb.DB, folder []byte) { defer runtime.GC() snap, err := db.GetSnapshot() if err != nil { panic(err) } l.Debugf("created snapshot %p", snap) defer func() { l.Debugf("close snapshot %p", snap) snap.Release() }() start := globalKey(folder, nil) limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff}) dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() batch := new(leveldb.Batch) l.Debugf("new batch %p", batch) var fk []byte for dbi.Next() { gk := dbi.Key() var vl versionList err := vl.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } // Check the global version list for consistency. An issue in previous // versions of goleveldb could result in reordered writes so that // there are global entries pointing to no longer existing files. Here // we find those and clear them out. name := globalKeyName(gk) var newVL versionList for _, version := range vl.versions { fk = deviceKeyInto(fk[:cap(fk)], folder, version.device, name) l.Debugf("snap.Get %p %x", snap, fk) _, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { continue } if err != nil { panic(err) } newVL.versions = append(newVL.versions, version) } if len(newVL.versions) != len(vl.versions) { l.Infof("db repair: rewriting global version list for %x %x", gk[1:1+64], gk[1+64:]) batch.Put(dbi.Key(), newVL.MustMarshalXDR()) } } l.Debugf("db check completed for %q", folder) db.Write(batch, nil) }
func getVersion(db *leveldb.DB) (int64, error) { data, err := db.Get([]byte(versionKey), nil) if err == leveldb.ErrNotFound { return nilDataVersion, nil } else if len(data) != 8 { return 0, fmt.Errorf("version value format is unknown") } return int64(order.Uint64(data)), nil }
/* /Put the DATABASE /Write the Key and Value */ func dbPut(dbObj *leveldb.DB, putData string, putDBKey []byte) int { var result int = 0 putError := dbObj.Put([]byte(putDBKey), []byte(putData), nil) if putError != nil { result = 1 } return result }
func setVersion(db *leveldb.DB, version int64, wo *opt.WriteOptions) error { buf := make([]byte, 8) order.PutUint64(buf, uint64(version)) err := db.Put([]byte(versionKey), buf, wo) if err != nil { clog.Errorf("Couldn't write version!") return err } return nil }
/* /Read the DATABASE /Read the All Key and Value by Key */ func dbAllRead(dbObj *leveldb.DB, readDBKey []byte) { //create Iterator iter := dbObj.NewIterator(nil, nil) //Read to the end of the Key for iter.Seek(readDBKey); iter.Valid(); iter.Next() { fmt.Println(string(iter.Value())) } }
func saveVideos(db *leveldb.DB, videos Videos) { for _, video := range videos { data, _ := db.Get([]byte(video.Url), nil) if len(data) == 0 { encoded, _ := json.Marshal(video) _ = db.Put([]byte(video.Url), []byte(encoded), nil) } } }
func levelDbWrite(db *leveldb.DB, key uint64, offset uint32, size uint32) error { bytes := make([]byte, 16) util.Uint64toBytes(bytes[0:8], key) util.Uint32toBytes(bytes[8:12], offset) util.Uint32toBytes(bytes[12:16], size) if err := db.Put(bytes[0:8], bytes[8:16], nil); err != nil { return fmt.Errorf("failed to write leveldb: %v", err) } return nil }