// write to leveldb immediately func cacheStore(db *leveldb.DB, node *osmpbf.Node) { id, val := formatLevelDB(node) err := db.Put([]byte(id), []byte(val), nil) if err != nil { log.Fatal(err) } }
func saveItem(db *leveldb.DB, it *gohn.Item) { pbmsg, err := proto.Marshal(it) check(err) key := []byte(strconv.Itoa(int(it.GetId()))) err = db.Put(key, pbmsg, nil) check(err) }
/* /Put the DATABASE /Write the Key and Value */ func dbPut(dbObj *leveldb.DB, putData string, putDBKey []byte) int { var result int = 0 putError := dbObj.Put([]byte(putDBKey), []byte(putData), nil) if putError != nil { result = 1 } return result }
func setVersion(db *leveldb.DB, version int64, wo *opt.WriteOptions) error { buf := make([]byte, 8) order.PutUint64(buf, uint64(version)) err := db.Put([]byte(versionKey), buf, wo) if err != nil { clog.Errorf("Couldn't write version!") return err } return nil }
func levelDbWrite(db *leveldb.DB, key uint64, offset uint32, size uint32) error { bytes := make([]byte, 16) util.Uint64toBytes(bytes[0:8], key) util.Uint32toBytes(bytes[8:12], offset) util.Uint32toBytes(bytes[12:16], size) if err := db.Put(bytes[0:8], bytes[8:16], nil); err != nil { return fmt.Errorf("failed to write leveldb: %v", err) } return nil }
func saveVideos(db *leveldb.DB, videos Videos) { for _, video := range videos { data, _ := db.Get([]byte(video.Url), nil) if len(data) == 0 { encoded, _ := json.Marshal(video) _ = db.Put([]byte(video.Url), []byte(encoded), nil) } } }
func (rn *RaftNode) fSaveTerm(i int) { var currentTermDB *leveldb.DB var err error currentTermDB, err = leveldb.OpenFile(PATH+"/currentTerm", nil) for err != nil { currentTermDB, err = leveldb.OpenFile(PATH+"/currentTerm", nil) } // fmt.Println("sad", currentTermDB, reflect.TypeOf(currentTermDB), err, reflect.TypeOf(err)) defer currentTermDB.Close() currentTermDB.Put([]byte(strconv.Itoa(rn.sm.ServerID)), []byte(strconv.Itoa(i)), nil) }
func (rn *RaftNode) fSaveVotedFor(i int) { var votedForDB *leveldb.DB var err error votedForDB, err = leveldb.OpenFile(PATH+"/currentTerm", nil) for err != nil { votedForDB, err = leveldb.OpenFile(PATH+"/currentTerm", nil) } defer votedForDB.Close() votedForDB.Put([]byte(strconv.Itoa(rn.sm.ServerID)), []byte(strconv.Itoa(i)), nil) }
//Write entry to leveldb func (e *LogItem) writeToDB(db *leveldb.DB) error { var network bytes.Buffer enc := gob.NewEncoder(&network) err := enc.Encode(e) if err != nil { panic("gob error: " + err.Error()) } buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, e.Index) err = db.Put(buf, []byte(network.String()), nil) return err }
func InjectLevelDB(jsEngine *JSEngine, db *leveldb.DB) { jsEngine.Run("var db = {};") dbValue, _ := jsEngine.Get("db") dbObj := dbValue.Object() dbObj.Set("put", func(call otto.FunctionCall) otto.Value { key, err := call.Argument(0).ToString() if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } value, err := call.Argument(1).ToString() if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } err = db.Put([]byte(key), []byte(value), nil) if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } return otto.TrueValue() }) dbObj.Set("get", func(call otto.FunctionCall) otto.Value { key, err := call.Argument(0).ToString() if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } data, err := db.Get([]byte(key), nil) if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } v, _ := otto.ToValue(string(data)) return v }) dbObj.Set("remove", func(call otto.FunctionCall) otto.Value { key, err := call.Argument(0).ToString() if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } err = db.Delete([]byte(key), nil) if err != nil { log.Println("Error:", err.Error()) return otto.FalseValue() } return otto.TrueValue() }) }
func doOperation(fileNm []byte, finalByt []byte, cmdTyp string, fileDB *leveldb.DB, wo *opt.WriteOptions) { var err error mutex.Lock() switch cmdTyp { case "write": err = fileDB.Put(fileNm, finalByt, nil) case "delete": err = fileDB.Delete([]byte(fileNm), wo) } mutex.Unlock() checkError(err) }
func doWrite(cmd *utils.Cmd, db *leveldb.DB) *utils.Cmd { dirMutex.Lock() defer dirMutex.Unlock() fi := dir[cmd.Filename] if fi != nil { if fi.Timer != nil { fi.Timer.Stop() fi.Timer = nil } } else { fi = &Metadata{} } if cmd.Exptime != 0 { dur := time.Duration(cmd.Exptime) * time.Second fi.Exptime = time.Now().Add(dur) // create timer for deletion timerFunc := func(name string, ver int64) func() { return func() { doDelete(&utils.Cmd{Type: "delete", Filename: name, Version: ver}, db) } }(cmd.Filename, gversion) fi.Timer = time.AfterFunc(dur, timerFunc) } err := db.Put([]byte(cmd.Filename), cmd.Content, nil) if err != nil { log.Fatal(err) } gversion += 1 fi.Version = gversion fi.Numbytes = cmd.Numbytes dir[cmd.Filename] = fi return &utils.Cmd{Type: "O", Version: gversion} }
func applyLog(sc *ServerConfig, store *leveldb.DB) { timer := time.NewTimer(500 * time.Millisecond) var er error for { <-timer.C dbg4.Println(sc.basicCluster.Mypid, "logAplied", sc.commitIndex, sc.lastApplied, sc.votedFor) for i := sc.lastApplied + 1; i < sc.commitIndex; i++ { logMsg := sc.log[i].Msg.(string) splits := strings.Split(logMsg, " ") //dbg5.Println(splits) if splits[0] == "set" { er = store.Put([]byte(splits[1]), []byte(splits[2]), nil) //dbg5.Println("storing", er) if er == nil { sc.lastApplied += 1 } } } timer = time.NewTimer(500 * time.Millisecond) } }
func update(db *leveldb.DB, id protocol.DeviceID, addrs []address) { var newAddrs addressList val, err := db.Get(id[:], nil) if err == nil { newAddrs.UnmarshalXDR(val) } nextAddr: for _, newAddr := range addrs { for i, exAddr := range newAddrs.addresses { if bytes.Compare(newAddr.ip, exAddr.ip) == 0 { newAddrs.addresses[i] = newAddr continue nextAddr } } newAddrs.addresses = append(newAddrs.addresses, newAddr) } db.Put(id[:], newAddrs.MarshalXDR(), nil) }
func clean(statsLog io.Writer, db *leveldb.DB) { for { now := next(cacheLimitSeconds) nowSecs := now.Unix() var kept, deleted int64 iter := db.NewIterator(nil, nil) for iter.Next() { var addrs addressList addrs.UnmarshalXDR(iter.Value()) // Remove expired addresses newAddrs := addrs.addresses for i := 0; i < len(newAddrs); i++ { if nowSecs-newAddrs[i].seen > cacheLimitSeconds { newAddrs[i] = newAddrs[len(newAddrs)-1] newAddrs = newAddrs[:len(newAddrs)-1] } } // Delete empty records if len(newAddrs) == 0 { db.Delete(iter.Key(), nil) deleted++ continue } // Update changed records if len(newAddrs) != len(addrs.addresses) { addrs.addresses = newAddrs db.Put(iter.Key(), addrs.MarshalXDR(), nil) } kept++ } iter.Release() fmt.Fprintf(statsLog, "%d Kept:%d Deleted:%d Took:%0.04fs\n", nowSecs, kept, deleted, time.Since(now).Seconds()) } }
// convertKeyFormat converts from the v0.12 to the v0.13 database format, to // avoid having to do rescan. The change is in the key format for folder // labels, so we basically just iterate over the database rewriting keys as // necessary. func convertKeyFormat(from, to *leveldb.DB) error { l.Infoln("Converting database key format") blocks, files, globals, unchanged := 0, 0, 0, 0 dbi := newDBInstance(to) i := from.NewIterator(nil, nil) for i.Next() { key := i.Key() switch key[0] { case KeyTypeBlock: folder, file := oldFromBlockKey(key) folderIdx := dbi.folderIdx.ID([]byte(folder)) hash := key[1+64:] newKey := blockKeyInto(nil, hash, folderIdx, file) if err := to.Put(newKey, i.Value(), nil); err != nil { return err } blocks++ case KeyTypeDevice: newKey := dbi.deviceKey(oldDeviceKeyFolder(key), oldDeviceKeyDevice(key), oldDeviceKeyName(key)) if err := to.Put(newKey, i.Value(), nil); err != nil { return err } files++ case KeyTypeGlobal: newKey := dbi.globalKey(oldGlobalKeyFolder(key), oldGlobalKeyName(key)) if err := to.Put(newKey, i.Value(), nil); err != nil { return err } globals++ case KeyTypeVirtualMtime: // Cannot be converted, we drop it instead :( default: if err := to.Put(key, i.Value(), nil); err != nil { return err } unchanged++ } } l.Infof("Converted %d blocks, %d files, %d globals (%d unchanged).", blocks, files, globals, unchanged) return nil }
func Worker(mapChan chan *Map, db *leveldb.DB, wg *sync.WaitGroup) { // Decreasing internal counter for wait-group as soon as goroutine finishes var fphash *[]byte var bucket float64 var indexSplit []string var index string defer wg.Done() for input := range mapChan { indexSplit = strings.Split(input.index, "-") // FNV hash range [1, 2^32], divide into 2^4 buckets, each bucket has 2^28 unique values bucket = GetBucket(input.fphash) index = fmt.Sprintf("%.0f-%v-%v", bucket, indexSplit[2], indexSplit[3]) //fmt.Println(index) //resChan <- &Result{index, md5, fphash, fpmap} //mapbyte, _ := json.Marshal(*fpmap) // pkgbyte, _ := json.Marshal(*topPkgs) db.Put([]byte("h-"+index), *fphash, nil) // db.Put([]byte("m-"+md5), mapbyte, nil) // db.Put([]byte("p-"+md5+strconv.Itoa(numFunc)), pkgbyte, nil) } }
// Command write func write(conn net.Conn, input_bytes []byte, datadb *leveldb.DB, metadatadb *leveldb.DB, bytes_in_first_line int) { input_string := string(input_bytes) inputs := strings.Fields(input_string) filename := inputs[1] numbytes := inputs[2] var exptime string var exp string if len(inputs) == 4 { exptime = "2018-02-01 03:04:05 +0530 IST" exp = "0" } else { exp = inputs[3] delay, _ := strconv.Atoi(exp) exptime = time.Now().Add(time.Duration(delay) * time.Second).String() } mutex.Lock() prev_version_int, _, _, _, err := read_metadata(filename, metadatadb) new_version := "" if err == nil { if err != nil { log.Println("error in conversion: ", err) } saved_metadata := strconv.Itoa(prev_version_int+1) + " " + numbytes + " " + exptime + " " + exp err = metadatadb.Put([]byte(filename), []byte(saved_metadata), nil) if err != nil { log.Println("failed to add to database: ", err) } new_version = strconv.Itoa(prev_version_int + 1) } else { saved_metadata := strconv.Itoa(1) + " " + numbytes + " " + exptime + " " + exp err = metadatadb.Put([]byte(filename), []byte(saved_metadata), nil) if err != nil { log.Println("failed to add to database: ", err) } new_version = "1" } err = datadb.Put([]byte(filename), []byte(input_bytes[bytes_in_first_line:]), nil) mutex.Unlock() if err != nil { log.Println("failed to add to database: ", err) } response := "OK " + string(new_version) + "\r\n" // log.Println(response) some_int, err := conn.Write([]byte(response)) if err != nil { log.Println("failed to reply back: ", err, some_int) } }
func Worker(indir string, md5Chan chan string, db *leveldb.DB, wg *sync.WaitGroup) { // Decreasing internal counter for wait-group as soon as goroutine finishes var out io.ReadCloser var topPkgs *map[string]struct{} var fphash *[]byte var fpmap *Fpmap var numFunc int var inpath, command, index string var err error var nbit uint var nlogbit float64 done := make(chan error, 1) defer wg.Done() for md5 := range md5Chan { inpath = fmt.Sprintf("%s", path.Join(indir, md5+".dex")) command = fmt.Sprintf("%s -d -l plain %s", path.Join(libpath, "dexdump"), inpath) cmd := exec.Command("sh", "-c", command) out, err = cmd.StdoutPipe() if err != nil { Error.Println("Error reading cmd", md5, err) } if err = cmd.Start(); err != nil { Error.Println("Error starting Cmd", err, md5) } numFunc, topPkgs, fphash, fpmap = GenerateHash(GetCode(out)) go func() { done <- cmd.Wait() }() select { case <-time.After(time.Second * 180): if err = cmd.Process.Kill(); err != nil { Error.Println("Failed to kill", err) } <-done Error.Println("Error exec time out ", md5) case err = <-done: if err != nil { Error.Println("Error waiting for Cmd", err, md5) } } nbit = bitcount(*fphash) if nbit > 0 { nlogbit = math.Log2(float64(nbit)) + 1 index = fmt.Sprintf("%.0f-%v-%v", nlogbit, nbit, md5) //fmt.Println(index) //resChan <- &Result{index, md5, fphash, fpmap} mapbyte, _ := json.Marshal(*fpmap) pkgbyte, _ := json.Marshal(*topPkgs) db.Put([]byte("h-"+index), *fphash, nil) db.Put([]byte("m-"+md5), mapbyte, nil) db.Put([]byte("p-"+md5+strconv.Itoa(numFunc)), pkgbyte, nil) } else { db.Put([]byte("h-Inf-0-"+md5), nil, nil) Info.Println("Empty fphash:", md5) } } }
func handleConnection(conn net.Conn, db *leveldb.DB, files *leveldb.DB, expiry *leveldb.DB, mutex *sync.RWMutex) { defer conn.Close() // make sure to close the connection even if we panic. // Make a buffer to hold incoming data with size bufferSize var bufferSize int64 = 1024 buf := make([]byte, bufferSize) var fileName string var sLarge string = "" // Initial command string and number of bytes read with the command var bufferRead int = 0 // Iterate over different commands, takes residual string as input OUTER: for { // Check residual string from previous command buf = []byte(sLarge) bufferRead = len([]byte(sLarge)) // Read the incoming connection into the buffer // Warning : We might not get the complete message for { if strings.Contains(sLarge, "\r\n") || len(sLarge) > 100000 { break } buf = make([]byte, bufferSize) n, _ := conn.Read(buf) sLarge += string(buf[:n]) // Read till we get the complete command bufferRead = n } //fmt.Println(sLarge, len(sLarge)) s := strings.Split(sLarge, "\r\n") // Isolate main command from input commands := strings.Split(s[0], " ") // Remove extra input apart from basic command string var remainder int64 = int64(len([]byte(sLarge)) - len([]byte(s[0]+"\r\n"))) sLarge = sLarge[len(s[0]+"\r\n"):] if len(commands) < 2 { conn.Write([]byte("ERR_CMD_ERR\r\n")) continue OUTER } else { fileName = commands[1] } if commands[0] == "read" { mutex.RLock() file, err := files.Get([]byte(fileName), nil) if err != nil { conn.Write([]byte("ERR_FILE_NOT_FOUND\r\n")) mutex.RUnlock() continue OUTER } //Check expiry var diff int64 = 0 expbytes, err := expiry.Get([]byte(fileName), nil) if err == nil { exp, _ := strconv.ParseInt(string(expbytes), 10, 64) if exp != -1 { diff = exp - time.Now().Unix() } } if diff < 0 { conn.Write([]byte("ERR_FILE_NOT_FOUND\r\n")) mutex.RUnlock() continue OUTER } // Check version var version int64 = 0 data, err := db.Get([]byte(fileName), nil) if err == nil { version, err = strconv.ParseInt(string(data), 10, 64) } mutex.RUnlock() conn.Write([]byte("CONTENTS " + strconv.FormatInt(version, 10) + " " + strconv.Itoa(len(file)-2) + " " + strconv.FormatInt(diff, 10) + "\r\n")) for i := 0; i < len(file)-2; i++ { _, err = conn.Write(file[i : i+1]) } _, err = conn.Write([]byte("\r\n")) if err != nil && err != io.EOF { conn.Write([]byte("ERR_INTERNAL\r\n")) } } else if commands[0] == "delete" { err := files.Delete([]byte(fileName), nil) // Remove entry from database //err := os.Remove(fileName) if err != nil { conn.Write([]byte("ERR_FILE_NOT_FOUND\r\n")) } else { db.Delete([]byte(fileName), nil) // Remove entry from database expiry.Delete([]byte(fileName), nil) // Remove entry from database conn.Write([]byte("OK\r\n")) } } else if commands[0] == "write" { sLarge = "" if len(commands) < 3 { conn.Write([]byte("ERR_CMD_ERR\r\n")) continue OUTER } // NUmber of bytes to be written to the file fileSize, err := strconv.ParseInt(commands[2], 10, 64) if err != nil { conn.Write([]byte("ERR_CMD_ERR\r\n")) continue OUTER } fileSize = fileSize + int64(len([]byte("\r\n"))) file := write(conn, fileSize, remainder, bufferRead, buf, &sLarge) mutex.Lock() err = files.Put([]byte(fileName), file, nil) // Get file version var version int64 = 0 data, err := db.Get([]byte(fileName), nil) if err == nil { version, err = strconv.ParseInt(string(data), 10, 64) } err = db.Put([]byte(fileName), []byte(strconv.FormatInt(version+1, 10)), nil) conn.Write([]byte("OK " + strconv.FormatInt(version+1, 10) + "\r\n")) // Write expiry time var exp int64 = 0 if len(commands) > 3 { exp, _ = strconv.ParseInt(commands[3], 10, 64) } if exp == 0 { err = expiry.Put([]byte(fileName), []byte(strconv.FormatInt(-1, 10)), nil) } else { err = expiry.Put([]byte(fileName), []byte(strconv.FormatInt(time.Now().Unix()+exp, 10)), nil) } mutex.Unlock() } else if commands[0] == "cas" { sLarge = "" if len(commands) < 4 { conn.Write([]byte("ERR_CMD_ERR\r\n")) continue OUTER } // Number of bytes to be written to file fileSize, err := strconv.ParseInt(commands[3], 10, 64) if err != nil { conn.Write([]byte("ERR_CMD_ERR\r\n")) continue OUTER } fileSize = fileSize + int64(len([]byte("\r\n"))) file := write(conn, fileSize, remainder, bufferRead, buf, &sLarge) mutex.Lock() // Get file version var version int64 = 0 data, err := db.Get([]byte(fileName), nil) if err == nil { version, err = strconv.ParseInt(string(data), 10, 64) } if err != nil { conn.Write([]byte("ERR_INTERNAL\r\n")) mutex.Unlock() return } fileVersion, err := strconv.ParseInt(commands[2], 10, 64) // Check for version match if version != fileVersion { conn.Write([]byte("ERR_VERSION " + strconv.FormatInt(version, 10) + "\r\n")) mutex.Unlock() continue OUTER } err = files.Put([]byte(fileName), file, nil) err = db.Put([]byte(fileName), []byte(strconv.FormatInt(version, 10)), nil) conn.Write([]byte("OK " + strconv.FormatInt(version, 10) + "\r\n")) // Write expiry time var exp int64 = 0 if len(commands) > 4 { exp, _ = strconv.ParseInt(commands[4], 10, 64) } if exp == 0 { err = expiry.Put([]byte(fileName), []byte(strconv.FormatInt(-1, 10)), nil) } else { err = expiry.Put([]byte(fileName), []byte(strconv.FormatInt(time.Now().Unix()+exp, 10)), nil) } mutex.Unlock() } else { conn.Write([]byte("ERR_CMD_ERR\r\n")) continue OUTER } } }
func upgrade1To2(db *leveldb.DB) error { fmt.Println("Upgrading v1 to v2...") type v1IndexEntry struct { Subject string `json:"subject"` Predicate string `json:"predicate"` Object string `json:"object"` Label string `json:"label,omitempty"` History []int64 `json:"History"` } type v1ValueData struct { Name string `json:"Name"` Size int64 `json:"Size"` } var ( spoPref = []byte{spo[0].Prefix(), spo[1].Prefix()} ospPref = []byte{osp[0].Prefix(), osp[1].Prefix()} posPref = []byte{pos[0].Prefix(), pos[1].Prefix()} cpsPref = []byte{cps[0].Prefix(), cps[1].Prefix()} ) { fmt.Println("Upgrading bucket z") it := db.NewIterator(&util.Range{Start: []byte{'z'}, Limit: []byte{'z' + 1}}, nil) for it.Next() { k, v := it.Key(), it.Value() var val v1ValueData if err := json.Unmarshal(v, &val); err != nil { return err } node := proto.NodeData{ Size: val.Size, Value: proto.MakeValue(quad.Raw(val.Name)), } nv, err := node.Marshal() if err != nil { return err } if err = db.Put(k, nv, nil); err != nil { return err } } it.Release() } for _, pref := range [4][]byte{spoPref, ospPref, posPref, cpsPref} { fmt.Println("Upgrading bucket", string(pref)) end := []byte{pref[0], pref[1] + 1} it := db.NewIterator(&util.Range{Start: pref, Limit: end}, nil) for it.Next() { k, v := it.Key(), it.Value() var entry v1IndexEntry if err := json.Unmarshal(v, &entry); err != nil { return err } var h proto.HistoryEntry h.History = make([]uint64, len(entry.History)) for i, id := range entry.History { h.History[i] = uint64(id) } nv, err := h.Marshal() if err != nil { return err } if err = db.Put(k, nv, nil); err != nil { return err } } it.Release() } { fmt.Println("Upgrading bucket d") it := db.NewIterator(&util.Range{Start: []byte{'d'}, Limit: []byte{'d' + 1}}, nil) for it.Next() { k, v := it.Key(), it.Value() id, err := strconv.ParseInt(string(k[1:]), 16, 64) if err != nil { return err } nk := createDeltaKeyFor(id) var val graph.Delta if err := json.Unmarshal(v, &val); err != nil { return err } p := deltaToProto(val) nv, err := p.Marshal() if err != nil { return err } b := &leveldb.Batch{} b.Put(nk, nv) b.Delete(k) if err = db.Write(b, nil); err != nil { return err } } it.Release() } return nil }
func writeToDb(db *leveldb.DB, k string, v string) { log.Println(k + " - " + v) db.Put([]byte(k), []byte(v), nil) }
func main() { rootCmd := &cobra.Command{ Short: "leveldb-tools", } var db *leveldb.DB openDB := func(cmd *cobra.Command, args []string) { if len(args) != 1 { rootCmd.Usage() os.Exit(1) } var err error opts := &opt.Options{ErrorIfMissing: cmd.Use == "dump", Strict: opt.StrictAll} if db, err = leveldb.OpenFile(args[0], opts); err != nil { log.Fatal(err) } } dumpCmd := &cobra.Command{ Use: "dump", Short: "dump database", Run: func(cmd *cobra.Command, args []string) { defer db.Close() defer os.Stdout.Close() w := bufio.NewWriter(os.Stdout) defer w.Flush() iter := db.NewIterator(nil, &opt.ReadOptions{Strict: opt.StrictAll, DontFillCache: true}) defer iter.Release() sep := sep[:] for iter.Next() { k, v := iter.Key(), iter.Value() w.WriteString(fmt.Sprintf("+%d,%d:", len(k), len(v))) w.Write(k) w.Write(sep) w.Write(v) if err := w.WriteByte('\n'); err != nil { log.Fatal(err) } } if err := iter.Error(); err != nil { log.Fatal(err) } }, PersistentPreRun: openDB, } rootCmd.AddCommand(dumpCmd) loadCmd := &cobra.Command{ Use: "load", Short: "load database", Run: func(cmd *cobra.Command, args []string) { defer func() { if err := db.Close(); err != nil { log.Fatal(err) } }() r := bufio.NewReader(os.Stdin) var lk, lv int var k, v []byte sepLen := len(sep) sep := sep[:] for { if _, err := fmt.Fscanf(r, "+%d,%d:", &lk, &lv); err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { break } log.Fatal(err) } if cap(k) < lk { k = make([]byte, lk*2) } if cap(v) < lv+sepLen+1 { v = make([]byte, lv*2+2) } n, err := io.ReadFull(r, k[:lk]) if err != nil { log.Fatal(err) } k = k[:n] if n, err = io.ReadFull(r, v[:lv+sepLen+1]); err != nil { log.Fatal(err) } if !bytes.Equal(sep, v[:sepLen]) { log.Fatal("awaited %q, got %q", sep, v) } v = v[:n] if v[n-1] != '\n' { log.Fatal("should end with EOL, got %q", v) } v = v[2 : n-1] if err := db.Put(k, v, nil); err != nil { log.Fatal(err) } } }, PersistentPreRun: openDB, } rootCmd.AddCommand(loadCmd) rootCmd.Execute() }