func (cmd *cmdDump) Main() { from, output := args.from, args.output if len(from) == 0 { log.Panic("invalid argument: from") } if len(output) == 0 { output = "/dev/stdout" } log.Infof("dump from '%s' to '%s'\n", from, output) var dumpto io.WriteCloser if output != "/dev/stdout" { dumpto = openWriteFile(output) defer dumpto.Close() } else { dumpto = os.Stdout } master, nsize := cmd.SendCmd(from) defer master.Close() log.Infof("rdb file = %d\n", nsize) reader := bufio.NewReaderSize(master, ReaderBufferSize) writer := bufio.NewWriterSize(ioutils.NewCountWriter(dumpto, &cmd.ndump), WriterBufferSize) cmd.DumpRDBFile(reader, writer, nsize) if !args.extra { return } cmd.DumpCommand(reader, writer) }
func (cmd *cmdRestore) RestoreRDBFile(reader *bufio.Reader, target string, nsize int64) { pipe := newRDBLoader(reader, args.parallel*32) wait := make(chan struct{}) go func() { defer close(wait) group := make(chan int, args.parallel) for i := 0; i < cap(group); i++ { go func() { defer func() { group <- 0 }() c := openRedisConn(target) defer c.Close() var lastdb uint32 = 0 for e := range pipe { if !acceptDB(e.DB) { continue } if e.DB != lastdb { lastdb = e.DB selectDB(c, lastdb) } restoreRdbEntry(c, e) cmd.nobjs.Incr() } }() } for i := 0; i < cap(group); i++ { <-group } }() for done := false; !done; { select { case <-wait: done = true case <-time.After(time.Second): } n, o := cmd.nread.Get(), cmd.nobjs.Get() if nsize != 0 { p := 100 * n / nsize log.Infof("total = %d - %12d [%3d%%] objs=%d\n", nsize, n, p, o) } else { log.Infof("total = %12d objs=%d\n", n, o) } } log.Info("restore: rdb done") }
func (cmd *cmdRestore) Main() { input, target := args.input, args.target if len(target) == 0 { log.Panic("invalid argument: target") } if len(input) == 0 { input = "/dev/stdin" } log.Infof("restore from '%s' to '%s'\n", input, target) var readin io.ReadCloser var nsize int64 if input != "/dev/stdin" { readin, nsize = openReadFile(input) defer readin.Close() } else { readin, nsize = os.Stdin, 0 } reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize) cmd.RestoreRDBFile(reader, target, nsize) if !args.extra { return } if nsize != 0 && nsize == cmd.nread.Get() { return } cmd.RestoreCommand(reader, target, nsize) }
func (cmd *cmdDump) DumpRDBFile(reader *bufio.Reader, writer *bufio.Writer, nsize int64) { var nread atomic2.Int64 wait := make(chan struct{}) go func() { defer close(wait) p := make([]byte, WriterBufferSize) for nsize != nread.Get() { cnt := iocopy(reader, writer, p, int(nsize-nread.Get())) nread.Add(int64(cnt)) } flushWriter(writer) }() for done := false; !done; { select { case <-wait: done = true case <-time.After(time.Second): } n := nread.Get() p := 100 * n / nsize log.Infof("total = %d - %12d [%3d%%]\n", nsize, n, p) } log.Info("dump: rdb done") }
func (cmd *cmdSync) Main() { from, target := args.from, args.target if len(from) == 0 { log.Panic("invalid argument: from") } if len(target) == 0 { log.Panic("invalid argument: target") } log.Infof("sync from '%s' to '%s'\n", from, target) var sockfile *os.File if len(args.sockfile) != 0 { f, err := pipe.OpenFile(args.sockfile, false) if err != nil { log.PanicError(err, "open sockbuff file failed") } sockfile = f } master, nsize := cmd.SendCmd(from) defer master.Close() log.Infof("rdb file = %d\n", nsize) var input io.Reader if sockfile != nil { r, w := pipe.PipeFile(ReaderBufferSize, int(args.filesize), sockfile) defer r.Close() go func() { defer w.Close() p := make([]byte, ReaderBufferSize) for { iocopy(master, w, p, len(p)) } }() input = r } else { input = master } reader := bufio.NewReaderSize(ioutils.NewCountReader(input, &cmd.nread), ReaderBufferSize) cmd.SyncRDBFile(reader, target, nsize) cmd.SyncCommand(reader, target) }
func (cmd *cmdDump) DumpCommand(reader *bufio.Reader, writer *bufio.Writer) { go func() { p := make([]byte, ReaderBufferSize) for { iocopy(reader, writer, p, len(p)) flushWriter(writer) } }() for { time.Sleep(time.Second) log.Infof("dump: size = %d\n", cmd.ndump.Get()) } }
func (cmd *cmdRestore) RestoreCommand(reader *bufio.Reader, slave string, nsize int64) { var forward, nbypass atomic2.Int64 c := openNetConn(slave) defer c.Close() writer := bufio.NewWriterSize(c, WriterBufferSize) defer flushWriter(writer) discard := bufio.NewReaderSize(c, ReaderBufferSize) go func() { var bypass bool = false for { resp := redis.MustDecode(reader) if cmd, args, err := redis.ParseArgs(resp); err != nil { log.PanicError(err, "parse command arguments failed") } else if cmd != "ping" { if cmd == "select" { if len(args) != 1 { log.Panicf("select command len(args) = %d", len(args)) } s := string(args[0]) n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicErrorf(err, "parse db = %s failed", s) } bypass = !acceptDB(uint32(n)) } if bypass { nbypass.Incr() continue } } redis.MustEncode(writer, resp) flushWriter(writer) forward.Incr() redis.MustDecode(discard) } }() for { lastForward := forward.Get() lastByPass := nbypass.Get() time.Sleep(time.Second) log.Infof("restore: +forward=%-6d +bypass=%-6d\n", forward.Get()-lastForward, nbypass.Get()-lastByPass) } }
func (cmd *cmdSync) SyncCommand(reader *bufio.Reader, slave string) { var forward, nbypass atomic2.Int64 c := openNetConn(slave) defer c.Close() writer := bufio.NewWriterSize(c, WriterBufferSize) defer flushWriter(writer) go func() { p := make([]byte, ReaderBufferSize) for { cnt := iocopy(c, ioutil.Discard, p, len(p)) cmd.nrecv.Add(int64(cnt)) } }() var mu sync.Mutex go func() { for { time.Sleep(time.Second) mu.Lock() flushWriter(writer) mu.Unlock() } }() go func() { var bypass bool = false for { resp := redis.MustDecode(reader) if cmd, args, err := redis.ParseArgs(resp); err != nil { log.PanicError(err, "parse command arguments failed") } else if cmd != "ping" { if cmd == "select" { if len(args) != 1 { log.Panicf("select command len(args) = %d", len(args)) } s := string(args[0]) n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicErrorf(err, "parse db = %s failed", s) } bypass = !acceptDB(uint32(n)) } if bypass { nbypass.Incr() continue } } mu.Lock() redis.MustEncode(writer, resp) mu.Unlock() forward.Incr() } }() for { lastForward := forward.Get() lastByPass := nbypass.Get() lastRead := cmd.nread.Get() lastRecv := cmd.nrecv.Get() time.Sleep(time.Second) log.Infof("sync: +forward=%-6d +bypass=%-6d +read=%-9d +recv=%-9d\n", forward.Get()-lastForward, nbypass.Get()-lastByPass, cmd.nread.Get()-lastRead, cmd.nrecv.Get()-lastRecv) } }
func (cmd *cmdDecode) Main() { input, output := args.input, args.output if len(input) == 0 { input = "/dev/stdin" } if len(output) == 0 { output = "/dev/stdout" } log.Infof("decode from '%s' to '%s'\n", input, output) var readin io.ReadCloser var nsize int64 if input != "/dev/stdin" { readin, nsize = openReadFile(input) defer readin.Close() } else { readin, nsize = os.Stdin, 0 } var saveto io.WriteCloser if output != "/dev/stdout" { saveto = openWriteFile(output) defer saveto.Close() } else { saveto = os.Stdout } reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize) writer := bufio.NewWriterSize(ioutils.NewCountWriter(saveto, &cmd.nsave), WriterBufferSize) ipipe := newRDBLoader(reader, args.parallel*32) opipe := make(chan string, cap(ipipe)) go func() { defer close(opipe) group := make(chan int, args.parallel) for i := 0; i < cap(group); i++ { go func() { defer func() { group <- 0 }() cmd.decoderMain(ipipe, opipe) }() } for i := 0; i < cap(group); i++ { <-group } }() wait := make(chan struct{}) go func() { defer close(wait) for s := range opipe { if _, err := writer.WriteString(s); err != nil { log.PanicError(err, "write string failed") } flushWriter(writer) } }() for done := false; !done; { select { case <-wait: done = true case <-time.After(time.Second): } n, w, o := cmd.nread.Get(), cmd.nsave.Get(), cmd.nobjs.Get() if nsize != 0 { p := 100 * n / nsize log.Infof("total = %d - %12d [%3d%%] write=%-12d objs=%d\n", nsize, n, p, w, o) } else { log.Infof("total = %12d write=%-12d objs=%d\n", n, w, o) } } log.Info("done") }
func main() { usage := ` Usage: redis-port decode [--ncpu=N] [--parallel=M] [--input=INPUT] [--output=OUTPUT] redis-port restore [--ncpu=N] [--parallel=M] [--input=INPUT] --target=TARGET [--extra] [--faketime=FAKETIME] [--filterdb=DB] redis-port dump [--ncpu=N] [--parallel=M] --from=MASTER [--output=OUTPUT] [--extra] [--password=PASSWORD] redis-port sync [--ncpu=N] [--parallel=M] --from=MASTER --target=TARGET [--sockfile=FILE [--filesize=SIZE]] [--filterdb=DB] [--password=PASSWORD] Options: -P PASSWORD, --password Set master's auth code. -n N, --ncpu=N Set runtime.GOMAXPROCS to N. -p M, --parallel=M Set the number of parallel routines to M. -i INPUT, --input=INPUT Set input file, default is stdin ('/dev/stdin'). -o OUTPUT, --output=OUTPUT Set output file, default is stdout ('/dev/stdout'). -f MASTER, --from=MASTER Set host:port of master redis. -t TARGET, --target=TARGET Set host:port of slave redis. --faketime=FAKETIME Set current system time to adjust key's expire time. --sockfile=FILE Use FILE to as socket buffer, default is disabled. --filesize=SIZE Set FILE size, default value is 1gb. -e, --extra Set ture to send/receive following redis commands, default is false. --filterdb=DB Filter db = DB, default is *. ` d, err := docopt.Parse(usage, nil, true, "", false) if err != nil { log.PanicError(err, "parse arguments failed") } if s, ok := d["--ncpu"].(string); ok && s != "" { n, err := parseInt(s, 1, 1024) if err != nil { log.PanicErrorf(err, "parse --ncpu failed") } runtime.GOMAXPROCS(n) } ncpu := runtime.GOMAXPROCS(0) if s, ok := d["--parallel"].(string); ok && s != "" { n, err := parseInt(s, 1, 1024) if err != nil { log.PanicErrorf(err, "parse --parallel failed") } args.parallel = n } if ncpu > args.parallel { args.parallel = ncpu } if args.parallel == 0 { args.parallel = 4 } args.input, _ = d["--input"].(string) args.output, _ = d["--output"].(string) args.target, _ = d["--target"].(string) args.from, _ = d["--from"].(string) args.extra, _ = d["--extra"].(bool) args.sockfile, _ = d["--sockfile"].(string) args.auth, _ = d["--password"].(string) if s, ok := d["--faketime"].(string); ok && s != "" { switch s[0] { case '-', '+': d, err := time.ParseDuration(strings.ToLower(s)) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = d case '@': n, err := strconv.ParseInt(s[1:], 10, 64) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = time.Duration(n*int64(time.Millisecond) - time.Now().UnixNano()) default: t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = time.Duration(t.UnixNano() - time.Now().UnixNano()) } } if s, ok := d["--filterdb"].(string); ok && s != "" && s != "*" { n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicError(err, "parse --filterdb failed") } u := uint32(n) acceptDB = func(db uint32) bool { return db == u } } if s, ok := d["--filesize"].(string); ok && s != "" { if len(args.sockfile) == 0 { log.Panic("please specify --sockfile first") } n, err := bytesize.Parse(s) if err != nil { log.PanicError(err, "parse --filesize failed") } if n <= 0 { log.Panicf("parse --filesize = %d, invalid number", n) } args.filesize = n } else { args.filesize = bytesize.GB } log.Infof("set ncpu = %d, parallel = %d\n", ncpu, args.parallel) switch { case d["decode"].(bool): new(cmdDecode).Main() case d["restore"].(bool): new(cmdRestore).Main() case d["dump"].(bool): new(cmdDump).Main() case d["sync"].(bool): new(cmdSync).Main() } }