func selectDB(c redigo.Conn, db uint32) { s, err := redigo.String(c.Do("select", db)) if err != nil { log.PanicError(err, "select command error") } if s != "OK" { log.Panicf("select command response = '%s', should be 'OK'", s) } }
func openSyncConn(target string, authCode string) (net.Conn, chan int64) { c := openNetConn(target) // send auth to master if len(authCode) > 0 { cmd := fmt.Sprintf("*2\r\n$4\r\nauth\r\n$%d\r\n%s\r\n", len(authCode), authCode) if _, err := ioutils.WriteFull(c, []byte(cmd)); err != nil { log.PanicError(err, "write auth command failed") } resp := make([]byte, 5) if _, err := io.ReadFull(c, resp); err != nil { log.PanicError(err, "read auth response failed") } if string(resp) != "+OK\r\n" { log.Panic("auth failed") } } if _, err := ioutils.WriteFull(c, []byte("*1\r\n$4\r\nsync\r\n")); err != nil { log.PanicError(err, "write sync command failed") } size := make(chan int64) go func() { var rsp string for { b := []byte{0} if _, err := c.Read(b); err != nil { log.PanicErrorf(err, "read sync response = '%s'", rsp) } if len(rsp) == 0 && b[0] == '\n' { size <- 0 continue } rsp += string(b) if strings.HasSuffix(rsp, "\r\n") { break } } if rsp[0] != '$' { log.Panicf("invalid sync response, rsp = '%s'", rsp) } n, err := strconv.Atoi(rsp[1 : len(rsp)-2]) if err != nil || n <= 0 { log.PanicErrorf(err, "invalid sync response = '%s', n = %d", rsp, n) } size <- int64(n) }() return c, size }
func (cmd *cmdRestore) RestoreCommand(reader *bufio.Reader, slave string, nsize int64) { var forward, nbypass atomic2.Int64 c := openNetConn(slave) defer c.Close() writer := bufio.NewWriterSize(c, WriterBufferSize) defer flushWriter(writer) discard := bufio.NewReaderSize(c, ReaderBufferSize) go func() { var bypass bool = false for { resp := redis.MustDecode(reader) if cmd, args, err := redis.ParseArgs(resp); err != nil { log.PanicError(err, "parse command arguments failed") } else if cmd != "ping" { if cmd == "select" { if len(args) != 1 { log.Panicf("select command len(args) = %d", len(args)) } s := string(args[0]) n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicErrorf(err, "parse db = %s failed", s) } bypass = !acceptDB(uint32(n)) } if bypass { nbypass.Incr() continue } } redis.MustEncode(writer, resp) flushWriter(writer) forward.Incr() redis.MustDecode(discard) } }() for { lastForward := forward.Get() lastByPass := nbypass.Get() time.Sleep(time.Second) log.Infof("restore: +forward=%-6d +bypass=%-6d\n", forward.Get()-lastForward, nbypass.Get()-lastByPass) } }
func iocopy(r io.Reader, w io.Writer, p []byte, max int) int { if max <= 0 || len(p) == 0 { log.Panicf("invalid max = %d, len(p) = %d", max, len(p)) } if len(p) > max { p = p[:max] } if n, err := r.Read(p); err != nil { log.PanicError(err, "read error") } else { p = p[:n] } if _, err := ioutils.WriteFull(w, p); err != nil { log.PanicError(err, "write full error") } return len(p) }
func restoreRdbEntry(c redigo.Conn, e *rdb.BinEntry) { var ttlms uint64 if e.ExpireAt != 0 { now := uint64(time.Now().Add(args.shift).UnixNano()) now /= uint64(time.Millisecond) if now >= e.ExpireAt { ttlms = 1 } else { ttlms = e.ExpireAt - now } } s, err := redigo.String(c.Do("slotsrestore", e.Key, ttlms, e.Value)) if err != nil { log.PanicError(err, "restore command error") } if s != "OK" { log.Panicf("restore command response = '%s', should be 'OK'", s) } }
func (cmd *cmdSync) SyncCommand(reader *bufio.Reader, slave string) { var forward, nbypass atomic2.Int64 c := openNetConn(slave) defer c.Close() writer := bufio.NewWriterSize(c, WriterBufferSize) defer flushWriter(writer) go func() { p := make([]byte, ReaderBufferSize) for { cnt := iocopy(c, ioutil.Discard, p, len(p)) cmd.nrecv.Add(int64(cnt)) } }() var mu sync.Mutex go func() { for { time.Sleep(time.Second) mu.Lock() flushWriter(writer) mu.Unlock() } }() go func() { var bypass bool = false for { resp := redis.MustDecode(reader) if cmd, args, err := redis.ParseArgs(resp); err != nil { log.PanicError(err, "parse command arguments failed") } else if cmd != "ping" { if cmd == "select" { if len(args) != 1 { log.Panicf("select command len(args) = %d", len(args)) } s := string(args[0]) n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicErrorf(err, "parse db = %s failed", s) } bypass = !acceptDB(uint32(n)) } if bypass { nbypass.Incr() continue } } mu.Lock() redis.MustEncode(writer, resp) mu.Unlock() forward.Incr() } }() for { lastForward := forward.Get() lastByPass := nbypass.Get() lastRead := cmd.nread.Get() lastRecv := cmd.nrecv.Get() time.Sleep(time.Second) log.Infof("sync: +forward=%-6d +bypass=%-6d +read=%-9d +recv=%-9d\n", forward.Get()-lastForward, nbypass.Get()-lastByPass, cmd.nread.Get()-lastRead, cmd.nrecv.Get()-lastRecv) } }
func (cmd *cmdDecode) decoderMain(ipipe <-chan *rdb.BinEntry, opipe chan<- string) { toText := func(p []byte) string { var b bytes.Buffer for _, c := range p { switch { case c >= '#' && c <= '~': b.WriteByte(c) default: b.WriteByte('.') } } return b.String() } toBase64 := func(p []byte) string { return base64.StdEncoding.EncodeToString(p) } toJson := func(o interface{}) string { b, err := json.Marshal(o) if err != nil { log.PanicError(err, "encode to json failed") } return string(b) } for e := range ipipe { o, err := rdb.DecodeDump(e.Value) if err != nil { log.PanicError(err, "decode failed") } var b bytes.Buffer switch obj := o.(type) { default: log.Panicf("unknown object %v", o) case rdb.String: o := &struct { DB uint32 `json:"db"` Type string `json:"type"` ExpireAt uint64 `json:"expireat"` Key string `json:"key"` Key64 string `json:"key64"` Value64 string `json:"value64"` }{ e.DB, "string", e.ExpireAt, toText(e.Key), toBase64(e.Key), toBase64(obj), } fmt.Fprintf(&b, "%s\n", toJson(o)) case rdb.List: for i, ele := range obj { o := &struct { DB uint32 `json:"db"` Type string `json:"type"` ExpireAt uint64 `json:"expireat"` Key string `json:"key"` Key64 string `json:"key64"` Index int `json:"index"` Value64 string `json:"value64"` }{ e.DB, "list", e.ExpireAt, toText(e.Key), toBase64(e.Key), i, toBase64(ele), } fmt.Fprintf(&b, "%s\n", toJson(o)) } case rdb.Hash: for _, ele := range obj { o := &struct { DB uint32 `json:"db"` Type string `json:"type"` ExpireAt uint64 `json:"expireat"` Key string `json:"key"` Key64 string `json:"key64"` Field string `json:"field"` Field64 string `json:"field64"` Value64 string `json:"value64"` }{ e.DB, "hash", e.ExpireAt, toText(e.Key), toBase64(e.Key), toText(ele.Field), toBase64(ele.Field), toBase64(ele.Value), } fmt.Fprintf(&b, "%s\n", toJson(o)) } case rdb.Set: for _, mem := range obj { o := &struct { DB uint32 `json:"db"` Type string `json:"type"` ExpireAt uint64 `json:"expireat"` Key string `json:"key"` Key64 string `json:"key64"` Member string `json:"member"` Member64 string `json:"member64"` }{ e.DB, "set", e.ExpireAt, toText(e.Key), toBase64(e.Key), toText(mem), toBase64(mem), } fmt.Fprintf(&b, "%s\n", toJson(o)) } case rdb.ZSet: for _, ele := range obj { o := &struct { DB uint32 `json:"db"` Type string `json:"type"` ExpireAt uint64 `json:"expireat"` Key string `json:"key"` Key64 string `json:"key64"` Member string `json:"member"` Member64 string `json:"member64"` Score float64 `json:"score"` }{ e.DB, "zset", e.ExpireAt, toText(e.Key), toBase64(e.Key), toText(ele.Member), toBase64(ele.Member), ele.Score, } fmt.Fprintf(&b, "%s\n", toJson(o)) } } cmd.nobjs.Incr() opipe <- b.String() } }
func main() { usage := ` Usage: redis-port decode [--ncpu=N] [--parallel=M] [--input=INPUT] [--output=OUTPUT] redis-port restore [--ncpu=N] [--parallel=M] [--input=INPUT] --target=TARGET [--extra] [--faketime=FAKETIME] [--filterdb=DB] redis-port dump [--ncpu=N] [--parallel=M] --from=MASTER [--output=OUTPUT] [--extra] [--password=PASSWORD] redis-port sync [--ncpu=N] [--parallel=M] --from=MASTER --target=TARGET [--sockfile=FILE [--filesize=SIZE]] [--filterdb=DB] [--password=PASSWORD] Options: -P PASSWORD, --password Set master's auth code. -n N, --ncpu=N Set runtime.GOMAXPROCS to N. -p M, --parallel=M Set the number of parallel routines to M. -i INPUT, --input=INPUT Set input file, default is stdin ('/dev/stdin'). -o OUTPUT, --output=OUTPUT Set output file, default is stdout ('/dev/stdout'). -f MASTER, --from=MASTER Set host:port of master redis. -t TARGET, --target=TARGET Set host:port of slave redis. --faketime=FAKETIME Set current system time to adjust key's expire time. --sockfile=FILE Use FILE to as socket buffer, default is disabled. --filesize=SIZE Set FILE size, default value is 1gb. -e, --extra Set ture to send/receive following redis commands, default is false. --filterdb=DB Filter db = DB, default is *. ` d, err := docopt.Parse(usage, nil, true, "", false) if err != nil { log.PanicError(err, "parse arguments failed") } if s, ok := d["--ncpu"].(string); ok && s != "" { n, err := parseInt(s, 1, 1024) if err != nil { log.PanicErrorf(err, "parse --ncpu failed") } runtime.GOMAXPROCS(n) } ncpu := runtime.GOMAXPROCS(0) if s, ok := d["--parallel"].(string); ok && s != "" { n, err := parseInt(s, 1, 1024) if err != nil { log.PanicErrorf(err, "parse --parallel failed") } args.parallel = n } if ncpu > args.parallel { args.parallel = ncpu } if args.parallel == 0 { args.parallel = 4 } args.input, _ = d["--input"].(string) args.output, _ = d["--output"].(string) args.target, _ = d["--target"].(string) args.from, _ = d["--from"].(string) args.extra, _ = d["--extra"].(bool) args.sockfile, _ = d["--sockfile"].(string) args.auth, _ = d["--password"].(string) if s, ok := d["--faketime"].(string); ok && s != "" { switch s[0] { case '-', '+': d, err := time.ParseDuration(strings.ToLower(s)) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = d case '@': n, err := strconv.ParseInt(s[1:], 10, 64) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = time.Duration(n*int64(time.Millisecond) - time.Now().UnixNano()) default: t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = time.Duration(t.UnixNano() - time.Now().UnixNano()) } } if s, ok := d["--filterdb"].(string); ok && s != "" && s != "*" { n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicError(err, "parse --filterdb failed") } u := uint32(n) acceptDB = func(db uint32) bool { return db == u } } if s, ok := d["--filesize"].(string); ok && s != "" { if len(args.sockfile) == 0 { log.Panic("please specify --sockfile first") } n, err := bytesize.Parse(s) if err != nil { log.PanicError(err, "parse --filesize failed") } if n <= 0 { log.Panicf("parse --filesize = %d, invalid number", n) } args.filesize = n } else { args.filesize = bytesize.GB } log.Infof("set ncpu = %d, parallel = %d\n", ncpu, args.parallel) switch { case d["decode"].(bool): new(cmdDecode).Main() case d["restore"].(bool): new(cmdRestore).Main() case d["dump"].(bool): new(cmdDump).Main() case d["sync"].(bool): new(cmdSync).Main() } }