//Function: getFile //Checkes whether file exists or not //Parameters: // fileName - file name // fileDB - database connection //Return: // true - file found // false - file not found // file itself func getFile(fileName string, fileDB *leveldb.DB) (bool, []byte, iterator.Iterator) { var flag int = 0 var val []byte iter := fileDB.NewIterator(nil, nil) //doOperation([]byte(fileNm), nil, "delete", fileDB, wo) for iter.Next() { key := iter.Key() if string(key) == fileName { val = iter.Value() flag = 1 break } } if flag == 1 { return true, val, iter } else { return false, nil, iter } }
//从levelDB中载入列表判断是否近期执行 func WalkTasks(base, span int64, db *leveldb.DB) { var task Task var err error var ok bool timer.DropMap = nil timer.DropMap = make(map[string]bool) s := time.Now() iter := db.NewIterator(nil, nil) parseMap := make(map[string][]string, 1024) for iter.Next() { key := string(iter.Key()) val := string(iter.Value()) if _, ok = parseMap[val]; ok == false { parseMap[val] = make([]string, 0) } parseMap[val] = append(parseMap[val], key) } iter.Release() inWalking = true for val, keys := range parseMap { task, err = parseTask(val) if err == nil { task.Base = base ret := task.Dispatch(span, true) for _, key := range keys { task.Key = key timer.Add(ret, key) } } } inWalking = false parseMap = nil e := time.Now() logPrintln(e.Sub(s)) }
func dbEmpty(db *leveldb.DB) bool { iter := db.NewIterator(nil, nil) defer iter.Release() if iter.Next() { return false } return true }
/* /Read the DATABASE /Read the All Key and Value by Key */ func dbAllRead(dbObj *leveldb.DB, readDBKey []byte) { //create Iterator iter := dbObj.NewIterator(nil, nil) //Read to the end of the Key for iter.Seek(readDBKey); iter.Valid(); iter.Next() { fmt.Println(string(iter.Value())) } }
func dumpsize(ldb *leveldb.DB) { h := &ElementHeap{} heap.Init(h) it := ldb.NewIterator(nil, nil) var dev protocol.DeviceID var ele SizedElement for it.Next() { key := it.Key() switch key[0] { case db.KeyTypeDevice: folder := nulString(key[1 : 1+64]) devBytes := key[1+64 : 1+64+32] name := nulString(key[1+64+32:]) copy(dev[:], devBytes) ele.key = fmt.Sprintf("DEVICE:%s:%s:%s", dev, folder, name) case db.KeyTypeGlobal: folder := nulString(key[1 : 1+64]) name := nulString(key[1+64:]) ele.key = fmt.Sprintf("GLOBAL:%s:%s", folder, name) case db.KeyTypeBlock: folder := nulString(key[1 : 1+64]) hash := key[1+64 : 1+64+32] name := nulString(key[1+64+32:]) ele.key = fmt.Sprintf("BLOCK:%s:%x:%s", folder, hash, name) case db.KeyTypeDeviceStatistic: ele.key = fmt.Sprintf("DEVICESTATS:%s", key[1:]) case db.KeyTypeFolderStatistic: ele.key = fmt.Sprintf("FOLDERSTATS:%s", key[1:]) case db.KeyTypeVirtualMtime: ele.key = fmt.Sprintf("MTIME:%s", key[1:]) default: ele.key = fmt.Sprintf("UNKNOWN:%x", key) } ele.size = len(it.Value()) heap.Push(h, ele) } for h.Len() > 0 { ele = heap.Pop(h).(SizedElement) fmt.Println(ele.key, ele.size) } }
func fetchAllZip(db *leveldb.DB) { iter := db.NewIterator(nil, nil) for iter.Next() { // Remember that the contents of the returned slice should not be modified, and // only valid until the next call to Next. key := iter.Key() value := iter.Value() fmt.Printf("\t%v: %v\n", string(key), string(value)) } iter.Release() err := iter.Error() if err != nil { log.Fatal("Cannot fetch all data. ") } }
// convertKeyFormat converts from the v0.12 to the v0.13 database format, to // avoid having to do rescan. The change is in the key format for folder // labels, so we basically just iterate over the database rewriting keys as // necessary. func convertKeyFormat(from, to *leveldb.DB) error { l.Infoln("Converting database key format") blocks, files, globals, unchanged := 0, 0, 0, 0 dbi := newDBInstance(to) i := from.NewIterator(nil, nil) for i.Next() { key := i.Key() switch key[0] { case KeyTypeBlock: folder, file := oldFromBlockKey(key) folderIdx := dbi.folderIdx.ID([]byte(folder)) hash := key[1+64:] newKey := blockKeyInto(nil, hash, folderIdx, file) if err := to.Put(newKey, i.Value(), nil); err != nil { return err } blocks++ case KeyTypeDevice: newKey := dbi.deviceKey(oldDeviceKeyFolder(key), oldDeviceKeyDevice(key), oldDeviceKeyName(key)) if err := to.Put(newKey, i.Value(), nil); err != nil { return err } files++ case KeyTypeGlobal: newKey := dbi.globalKey(oldGlobalKeyFolder(key), oldGlobalKeyName(key)) if err := to.Put(newKey, i.Value(), nil); err != nil { return err } globals++ case KeyTypeVirtualMtime: // Cannot be converted, we drop it instead :( default: if err := to.Put(key, i.Value(), nil); err != nil { return err } unchanged++ } } l.Infof("Converted %d blocks, %d files, %d globals (%d unchanged).", blocks, files, globals, unchanged) return nil }
func dump(ldb *leveldb.DB) { it := ldb.NewIterator(nil, nil) var dev protocol.DeviceID for it.Next() { key := it.Key() switch key[0] { case db.KeyTypeDevice: folder := nulString(key[1 : 1+64]) devBytes := key[1+64 : 1+64+32] name := nulString(key[1+64+32:]) copy(dev[:], devBytes) fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev) var f protocol.FileInfo err := f.UnmarshalXDR(it.Value()) if err != nil { log.Fatal(err) } fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks)) case db.KeyTypeGlobal: folder := nulString(key[1 : 1+64]) name := nulString(key[1+64:]) var flv db.VersionList flv.UnmarshalXDR(it.Value()) fmt.Printf("[global] F:%q N:%q V: %s\n", folder, name, flv) case db.KeyTypeBlock: folder := nulString(key[1 : 1+64]) hash := key[1+64 : 1+64+32] name := nulString(key[1+64+32:]) fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value())) case db.KeyTypeDeviceStatistic: fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value()) case db.KeyTypeFolderStatistic: fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value()) case db.KeyTypeVirtualMtime: fmt.Printf("[mtime]\n %x\n %x\n", it.Key(), it.Value()) default: fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value()) } } }
func getVideosDb(db *leveldb.DB) Videos { iter := db.NewIterator(nil, nil) videos := make(Videos, 0) for iter.Next() { value := iter.Value() var video Video json.Unmarshal(value, &video) videos = append(videos, video) } iter.Release() _ = iter.Error() return videos }
func fetchStore(store *leveldb.DB, cols []*model.ColumnInfo) ([]*plan.Row, error) { var rows []*plan.Row iter := store.NewIterator(nil, nil) defer iter.Release() for iter.Next() { value := iter.Value() record, err := decodeValue(value, cols) if err != nil { return nil, errors.Trace(err) } rows = append(rows, &plan.Row{Data: record}) } err := iter.Error() if err != nil { return nil, errors.Trace(err) } return rows, nil }
func clean(statsLog io.Writer, db *leveldb.DB) { for { now := next(cacheLimitSeconds) nowSecs := now.Unix() var kept, deleted int64 iter := db.NewIterator(nil, nil) for iter.Next() { var addrs addressList addrs.UnmarshalXDR(iter.Value()) // Remove expired addresses newAddrs := addrs.addresses for i := 0; i < len(newAddrs); i++ { if nowSecs-newAddrs[i].seen > cacheLimitSeconds { newAddrs[i] = newAddrs[len(newAddrs)-1] newAddrs = newAddrs[:len(newAddrs)-1] } } // Delete empty records if len(newAddrs) == 0 { db.Delete(iter.Key(), nil) deleted++ continue } // Update changed records if len(newAddrs) != len(addrs.addresses) { addrs.addresses = newAddrs db.Put(iter.Key(), addrs.MarshalXDR(), nil) } kept++ } iter.Release() fmt.Fprintf(statsLog, "%d Kept:%d Deleted:%d Took:%0.04fs\n", nowSecs, kept, deleted, time.Since(now).Seconds()) } }
// Helper function to initialize a streamIdGenerator. func initStreamIdGenerator(db *leveldb.DB) (*streamIdGenerator, error) { gen := newStreamIdGenerator() searchKey := eventStoreKey{ streamPrefix, nil, nil, } ro := &opt.ReadOptions{} it := db.NewIterator(ro) it.Seek(searchKey.toBytes()) for it.Valid() { key, err := newEventStoreKey(it.Key()) if err != nil { log.Println("A key could not be deserialized:") log.Println(string(it.Key())) return nil, err } if bytes.Compare(key.groupKey, streamPrefix) != 0 { // We have reached the end of the stream listing break } stream := key.key latestId := loadByteCounter(it.Value()) nextId := latestId.NewIncrementedCounter() err = gen.Register(stream, nextId) if err != nil { return nil, err } it.Next() } return gen, nil }
func buildGetAllHandler(modelName string, db *leveldb.DB) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { keys := []string{} prefix := modelName + ":" iter := db.NewIterator(util.BytesPrefix([]byte(prefix)), nil) for iter.Next() { keys = append(keys, strings.TrimPrefix(string(iter.Key()), prefix)) } iter.Release() err := iter.Error() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") bs, err := json.Marshal(keys) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(fmt.Sprintf("Error in db: %v", err.Error()))) return } w.Write(bs) } }
func upgrade1To2(db *leveldb.DB) error { fmt.Println("Upgrading v1 to v2...") type v1IndexEntry struct { Subject string `json:"subject"` Predicate string `json:"predicate"` Object string `json:"object"` Label string `json:"label,omitempty"` History []int64 `json:"History"` } type v1ValueData struct { Name string `json:"Name"` Size int64 `json:"Size"` } var ( spoPref = []byte{spo[0].Prefix(), spo[1].Prefix()} ospPref = []byte{osp[0].Prefix(), osp[1].Prefix()} posPref = []byte{pos[0].Prefix(), pos[1].Prefix()} cpsPref = []byte{cps[0].Prefix(), cps[1].Prefix()} ) { fmt.Println("Upgrading bucket z") it := db.NewIterator(&util.Range{Start: []byte{'z'}, Limit: []byte{'z' + 1}}, nil) for it.Next() { k, v := it.Key(), it.Value() var val v1ValueData if err := json.Unmarshal(v, &val); err != nil { return err } node := proto.NodeData{ Size: val.Size, Value: proto.MakeValue(quad.Raw(val.Name)), } nv, err := node.Marshal() if err != nil { return err } if err = db.Put(k, nv, nil); err != nil { return err } } it.Release() } for _, pref := range [4][]byte{spoPref, ospPref, posPref, cpsPref} { fmt.Println("Upgrading bucket", string(pref)) end := []byte{pref[0], pref[1] + 1} it := db.NewIterator(&util.Range{Start: pref, Limit: end}, nil) for it.Next() { k, v := it.Key(), it.Value() var entry v1IndexEntry if err := json.Unmarshal(v, &entry); err != nil { return err } var h proto.HistoryEntry h.History = make([]uint64, len(entry.History)) for i, id := range entry.History { h.History[i] = uint64(id) } nv, err := h.Marshal() if err != nil { return err } if err = db.Put(k, nv, nil); err != nil { return err } } it.Release() } { fmt.Println("Upgrading bucket d") it := db.NewIterator(&util.Range{Start: []byte{'d'}, Limit: []byte{'d' + 1}}, nil) for it.Next() { k, v := it.Key(), it.Value() id, err := strconv.ParseInt(string(k[1:]), 16, 64) if err != nil { return err } nk := createDeltaKeyFor(id) var val graph.Delta if err := json.Unmarshal(v, &val); err != nil { return err } p := deltaToProto(val) nv, err := p.Marshal() if err != nil { return err } b := &leveldb.Batch{} b.Put(nk, nv) b.Delete(k) if err = db.Write(b, nil); err != nil { return err } } it.Release() } return nil }
func MetaCheck(benDB, malDB *leveldb.DB, bmd5, mmd5 string) bool { beninfo, bok := benMetaMap[bmd5] malinfo, mok := malMetaMap[mmd5] if bok && mok { if beninfo.Certmd5 == malinfo.Certmd5 { Info.Println("Metainfo: cert filtered", bmd5, mmd5) return false } else if beninfo.Dexsize < malinfo.Dexsize*2 && beninfo.Dexsize*2 > malinfo.Dexsize { Info.Println("Metainfo: dexsize", bmd5, mmd5) return true } else if beninfo.Pkgname != "" && malinfo.Pkgname != "" { if levenshtein(beninfo.Pkgname, malinfo.Pkgname) < 4 { Info.Println("Metainfo: pkgname", bmd5, mmd5) return true } } else if beninfo.Dirs != nil && malinfo.Dirs != nil { sim := MapCompare(beninfo.Dirs, malinfo.Dirs) if sim > 0.5 && sim < 2 { Info.Println("Metainfo: dirs", bmd5, mmd5) return true } } else if beninfo.Files != nil && malinfo.Files != nil { sim := MapCompare(beninfo.Files, malinfo.Files) if sim > 0.5 && sim < 2 { Info.Println("Metainfo: files", bmd5, mmd5) return true } } } var bnumFunc, mnumFunc float64 var bkey, mkey, bvalue, mvalue []byte var btopPkgs, mtopPkgs []string beniter := benDB.NewIterator(util.BytesPrefix([]byte("p-"+bmd5)), nil) for beniter.Next() { bkey = beniter.Key() bnumFunc, _ = strconv.ParseFloat(strings.Split(string(bkey[:]), "-")[1], 64) bvalue = beniter.Value() if err := json.Unmarshal(bvalue, &btopPkgs); err != nil { Error.Println("Unmarshal error", bmd5, err) } } defer beniter.Release() maliter := malDB.NewIterator(util.BytesPrefix([]byte("h-"+mmd5)), nil) for maliter.Next() { mkey = maliter.Key() mnumFunc, _ = strconv.ParseFloat(strings.Split(string(mkey[:]), "-")[1], 64) mvalue = maliter.Value() if err := json.Unmarshal(mvalue, &mtopPkgs); err != nil { Error.Println("Unmarshal error", mmd5, err) } } defer maliter.Release() if bnumFunc/mnumFunc < 2 && bnumFunc/mnumFunc > 0.5 { Info.Println("Metainfo: numfunc", bmd5, mmd5) return true } else { sim := SliceCompare(btopPkgs, mtopPkgs) if sim > 0.5 && sim < 2 { Info.Println("Metainfo: toppkgs", bmd5, mmd5) return true } } return false }
func main() { rootCmd := &cobra.Command{ Short: "leveldb-tools", } var db *leveldb.DB openDB := func(cmd *cobra.Command, args []string) { if len(args) != 1 { rootCmd.Usage() os.Exit(1) } var err error opts := &opt.Options{ErrorIfMissing: cmd.Use == "dump", Strict: opt.StrictAll} if db, err = leveldb.OpenFile(args[0], opts); err != nil { log.Fatal(err) } } dumpCmd := &cobra.Command{ Use: "dump", Short: "dump database", Run: func(cmd *cobra.Command, args []string) { defer db.Close() defer os.Stdout.Close() w := bufio.NewWriter(os.Stdout) defer w.Flush() iter := db.NewIterator(nil, &opt.ReadOptions{Strict: opt.StrictAll, DontFillCache: true}) defer iter.Release() sep := sep[:] for iter.Next() { k, v := iter.Key(), iter.Value() w.WriteString(fmt.Sprintf("+%d,%d:", len(k), len(v))) w.Write(k) w.Write(sep) w.Write(v) if err := w.WriteByte('\n'); err != nil { log.Fatal(err) } } if err := iter.Error(); err != nil { log.Fatal(err) } }, PersistentPreRun: openDB, } rootCmd.AddCommand(dumpCmd) loadCmd := &cobra.Command{ Use: "load", Short: "load database", Run: func(cmd *cobra.Command, args []string) { defer func() { if err := db.Close(); err != nil { log.Fatal(err) } }() r := bufio.NewReader(os.Stdin) var lk, lv int var k, v []byte sepLen := len(sep) sep := sep[:] for { if _, err := fmt.Fscanf(r, "+%d,%d:", &lk, &lv); err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { break } log.Fatal(err) } if cap(k) < lk { k = make([]byte, lk*2) } if cap(v) < lv+sepLen+1 { v = make([]byte, lv*2+2) } n, err := io.ReadFull(r, k[:lk]) if err != nil { log.Fatal(err) } k = k[:n] if n, err = io.ReadFull(r, v[:lv+sepLen+1]); err != nil { log.Fatal(err) } if !bytes.Equal(sep, v[:sepLen]) { log.Fatal("awaited %q, got %q", sep, v) } v = v[:n] if v[n-1] != '\n' { log.Fatal("should end with EOL, got %q", v) } v = v[2 : n-1] if err := db.Put(k, v, nil); err != nil { log.Fatal(err) } } }, PersistentPreRun: openDB, } rootCmd.AddCommand(loadCmd) rootCmd.Execute() }