func (h *Handler) daemonSyncMaster() { var last *conn wait := make(chan int, 0) for exit := false; !exit; { var c *conn select { case <-wait: last = nil case <-h.signal: exit = true case c = <-h.master: } if last != nil { last.Close() <-wait } last = c if c != nil { go func() { defer func() { wait <- 0 }() defer c.Close() err := h.doSyncTo(c) log.InfoErrorf(err, "stop sync: %s", c.summ) }() h.syncto = c.nc.RemoteAddr().String() log.Infof("sync to %s", h.syncto) } else { h.syncto = "" log.Infof("sync to no one") } } }
func (b *Rpdb) Reset() error { if err := b.acquire(); err != nil { return err } defer b.release() log.Infof("rpdb is reseting...") for i := b.splist.Len(); i != 0; i-- { v := b.splist.Remove(b.splist.Front()).(*RpdbSnapshot) v.Close() } for i := b.itlist.Len(); i != 0; i-- { v := b.itlist.Remove(b.itlist.Front()).(*rpdbIterator) v.Close() } if err := b.db.Clear(); err != nil { b.db.Close() b.db = nil log.ErrorErrorf(err, "rpdb reset failed") return err } else { b.serial++ log.Infof("rpdb is reset") return nil } }
func (cmd *cmdDump) Main() { from, output := args.from, args.output if len(from) == 0 { log.Panic("invalid argument: from") } if len(output) == 0 { output = "/dev/stdout" } log.Infof("dump from '%s' to '%s'\n", from, output) var dumpto io.WriteCloser if output != "/dev/stdout" { dumpto = openWriteFile(output) defer dumpto.Close() } else { dumpto = os.Stdout } master, nsize := cmd.SendCmd(from, args.passwd) defer master.Close() log.Infof("rdb file = %d\n", nsize) reader := bufio.NewReaderSize(master, ReaderBufferSize) writer := bufio.NewWriterSize(dumpto, WriterBufferSize) cmd.DumpRDBFile(reader, writer, nsize) if !args.extra { return } cmd.DumpCommand(reader, writer, nsize) }
func (cmd *cmdSync) SendPSyncCmd(master, passwd string) (pipe.Reader, int64) { c := openNetConn(master, passwd) br := bufio.NewReaderSize(c, ReaderBufferSize) bw := bufio.NewWriterSize(c, WriterBufferSize) runid, offset, wait := sendPSyncFullsync(br, bw) log.Infof("psync runid = %s offset = %d, fullsync", runid, offset) var nsize int64 for nsize == 0 { select { case nsize = <-wait: if nsize == 0 { log.Info("+") } case <-time.After(time.Second): log.Info("-") } } piper, pipew := pipe.NewSize(ReaderBufferSize) go func() { defer pipew.Close() p := make([]byte, 8192) for rdbsize := int(nsize); rdbsize != 0; { rdbsize -= iocopy(br, pipew, p, rdbsize) } for { n, err := cmd.PSyncPipeCopy(c, br, bw, offset, pipew) if err != nil { log.PanicErrorf(err, "psync runid = %s, offset = %d, pipe is broken", runid, offset) } offset += n for { time.Sleep(time.Second) c = openNetConnSoft(master, passwd) if c != nil { log.Infof("psync reopen connection, offset = %d", offset) break } else { log.Infof("psync reopen connection, failed") } } authPassword(c, passwd) br = bufio.NewReaderSize(c, ReaderBufferSize) bw = bufio.NewWriterSize(c, WriterBufferSize) sendPSyncContinue(br, bw, runid, offset) } }() return piper, nsize }
func (b *Rpdb) CompactAll() error { if err := b.acquire(); err != nil { return err } defer b.release() log.Infof("rpdb is compacting all...") if err := b.compact([]byte{MetaCode}, []byte{MetaCode + 1}); err != nil { return err } if err := b.compact([]byte{DataCode}, []byte{DataCode + 1}); err != nil { return err } log.Infof("rpdb is compacted") return nil }
func Serve(config *Config, bl *rpdb.Rpdb) error { h := &Handler{ config: config, master: make(chan *conn, 0), signal: make(chan int, 0), } defer func() { close(h.signal) }() l, err := net.Listen("tcp", config.Listen) if err != nil { return errors.Trace(err) } defer l.Close() if h.htable, err = redis.NewHandlerTable(h); err != nil { return err } else { go h.daemonSyncMaster() } log.Infof("open listen address '%s' and start service", l.Addr()) for { if nc, err := l.Accept(); err != nil { return errors.Trace(err) } else { h.counters.clientsAccepted.Add(1) go func() { h.counters.clients.Add(1) defer h.counters.clients.Sub(1) c := newConn(nc, bl, h.config.ConnTimeout) defer c.Close() log.Infof("new connection: %s", c.summ) if err := c.serve(h); err != nil { if errors.Equal(err, io.EOF) { log.Infof("connection lost: %s [io.EOF]", c.summ) } else { log.InfoErrorf(err, "connection lost: %s", c.summ) } } else { log.Infof("connection exit: %s", c.summ) } }() } } }
// SLAVEOF host port func (h *Handler) SlaveOf(arg0 interface{}, args [][]byte) (redis.Resp, error) { if len(args) != 2 { return toRespErrorf("len(args) = %d, expect = 2", len(args)) } s, err := session(arg0, args) if err != nil { return toRespError(err) } addr := fmt.Sprintf("%s:%s", string(args[0]), string(args[1])) log.Infof("set slave of %s", addr) var c *conn if strings.ToLower(addr) != "no:one" { if nc, err := net.DialTimeout("tcp", addr, time.Second); err != nil { return toRespError(errors.Trace(err)) } else { c = newConn(nc, s.Rpdb(), 0) if err := c.ping(); err != nil { c.Close() return toRespError(err) } } } select { case <-h.signal: if c != nil { c.Close() } return toRespErrorf("sync master has been closed") case h.master <- c: return redis.NewString("OK"), nil } }
func init() { poolmap.m = make(map[string]*list.List) go func() { for { time.Sleep(time.Second) poolmap.Lock() for addr, pool := range poolmap.m { for i := pool.Len(); i != 0; i-- { c := pool.Remove(pool.Front()).(*conn) if time.Now().Before(c.last.Add(time.Second * 5)) { pool.PushBack(c) } else { c.sock.Close() log.Infof("close connection %s : %s", addr, c.summ) } } if pool.Len() != 0 { continue } delete(poolmap.m, addr) } poolmap.Unlock() } }() }
func (cmd *cmdRestore) Main() { input, target := args.input, args.target if len(target) == 0 { log.Panic("invalid argument: target") } if len(input) == 0 { input = "/dev/stdin" } log.Infof("restore from '%s' to '%s'\n", input, target) var readin io.ReadCloser var nsize int64 if input != "/dev/stdin" { readin, nsize = openReadFile(input) defer readin.Close() } else { readin, nsize = os.Stdin, 0 } reader := bufio.NewReaderSize(readin, ReaderBufferSize) cmd.RestoreRDBFile(reader, target, args.auth, nsize) if !args.extra { return } if nsize != 0 && nsize == cmd.rbytes.Get() { return } cmd.RestoreCommand(reader, target, args.auth) }
func (cmd *cmdDump) DumpRDBFile(reader *bufio.Reader, writer *bufio.Writer, nsize int64) { var nread atomic2.Int64 wait := make(chan struct{}) go func() { defer close(wait) p := make([]byte, WriterBufferSize) for nsize != nread.Get() { nstep := int(nsize - nread.Get()) ncopy := int64(iocopy(reader, writer, p, nstep)) nread.Add(ncopy) flushWriter(writer) } }() for done := false; !done; { select { case <-wait: done = true case <-time.After(time.Second): } n := nread.Get() p := 100 * n / nsize log.Infof("total = %d - %12d [%3d%%]\n", nsize, n, p) } log.Info("dump: rdb done") }
func (cmd *cmdSync) Main() { from, target := args.from, args.target if len(from) == 0 { log.Panic("invalid argument: from") } if len(target) == 0 { log.Panic("invalid argument: target") } log.Infof("sync from '%s' to '%s'\n", from, target) var sockfile *os.File if len(args.sockfile) != 0 { sockfile = openReadWriteFile(args.sockfile) defer sockfile.Close() } var input io.ReadCloser var nsize int64 if args.psync { input, nsize = cmd.SendPSyncCmd(from, args.passwd) } else { input, nsize = cmd.SendSyncCmd(from, args.passwd) } defer input.Close() log.Infof("rdb file = %d\n", nsize) if sockfile != nil { r, w := pipe.NewFilePipe(int(args.filesize), sockfile) defer r.Close() go func(r io.Reader) { defer w.Close() p := make([]byte, ReaderBufferSize) for { iocopy(r, w, p, len(p)) } }(input) input = r } reader := bufio.NewReaderSize(input, ReaderBufferSize) cmd.SyncRDBFile(reader, target, args.auth, nsize) cmd.SyncCommand(reader, target, args.auth) }
func (s *RpdbSnapshot) Close() { if err := s.acquire(); err != nil { return } defer s.release() log.Infof("snapshot is closing ...") if s.cursor.it != nil { s.cursor.it.Close() s.cursor.it = nil } for i := s.readers.Len(); i != 0; i-- { r := s.readers.Remove(s.readers.Front()).(*snapshotReader) r.cleanup() } s.sp.Close() s.sp = nil log.Infof("snapshot is closed") }
func (b *Rpdb) NewSnapshot() (*RpdbSnapshot, error) { if err := b.acquire(); err != nil { return nil, err } defer b.release() sp := &RpdbSnapshot{sp: b.db.NewSnapshot()} b.splist.PushBack(sp) log.Infof("rpdb create new snapshot, address = %p", sp) return sp, nil }
func (b *Rpdb) Close() { if err := b.acquire(); err != nil { return } defer b.release() log.Infof("rpdb is closing ...") for i := b.splist.Len(); i != 0; i-- { v := b.splist.Remove(b.splist.Front()).(*RpdbSnapshot) v.Close() } for i := b.itlist.Len(); i != 0; i-- { v := b.itlist.Remove(b.itlist.Front()).(*rpdbIterator) v.Close() } if b.db != nil { b.db.Close() b.db = nil } log.Infof("rpdb is closed") }
func (b *Rpdb) ReleaseSnapshot(sp *RpdbSnapshot) { if err := b.acquire(); err != nil { return } defer b.release() log.Infof("rpdb release snapshot, address = %p", sp) for i := b.splist.Len(); i != 0; i-- { v := b.splist.Remove(b.splist.Front()).(*RpdbSnapshot) if v != sp { b.splist.PushBack(v) } } sp.Close() }
func (cmd *cmdDump) DumpCommand(reader *bufio.Reader, writer *bufio.Writer, nsize int64) { var nread atomic2.Int64 go func() { p := make([]byte, ReaderBufferSize) for { ncopy := int64(iocopy(reader, writer, p, len(p))) nread.Add(ncopy) flushWriter(writer) } }() for { time.Sleep(time.Second) log.Infof("dump: total = %d\n", nsize+nread.Get()) } }
func getSockConn(addr string, timeout time.Duration) (*conn, error) { poolmap.Lock() if pool := poolmap.m[addr]; pool != nil && pool.Len() != 0 { c := pool.Remove(pool.Front()).(*conn) poolmap.Unlock() return c, nil } poolmap.Unlock() sock, err := net.DialTimeout("tcp", addr, timeout) if err != nil { return nil, errors.Trace(err) } c := &conn{ summ: fmt.Sprintf("<local> %s -- %s <remote>", sock.LocalAddr(), sock.RemoteAddr()), sock: sock, last: time.Now(), r: bufio.NewReader(sock), w: bufio.NewWriter(sock), } log.Infof("create connection %s : %s", addr, c.summ) return c, nil }
func (h *Handler) doSyncTo(c *conn) error { defer func() { h.counters.syncTotalBytes.Set(0) h.counters.syncCacheBytes.Set(0) }() filePath := h.config.SyncFilePath fileSize := h.config.SyncFileSize buffSize := h.config.SyncBuffSize var file *os.File if filePath != "" { f, err := pipe.OpenFile(filePath, false) if err != nil { log.ErrorErrorf(err, "open pipe file '%s' failed", filePath) } else { file = f } } pr, pw := pipe.PipeFile(buffSize, fileSize, file) defer pr.Close() wg := &sync.WaitGroup{} defer wg.Wait() wg.Add(1) go func(r io.Reader) { defer wg.Done() defer pw.Close() p := make([]byte, 8192) for { deadline := time.Now().Add(time.Minute) if err := c.nc.SetReadDeadline(deadline); err != nil { pr.CloseWithError(errors.Trace(err)) return } n, err := r.Read(p) if err != nil { pr.CloseWithError(err) return } h.counters.syncTotalBytes.Add(int64(n)) s := p[:n] for len(s) != 0 { n, err := pw.Write(s) if err != nil { pr.CloseWithError(err) return } s = s[n:] } } }(c.r) wg.Add(1) go func() { defer wg.Done() for { time.Sleep(time.Millisecond * 200) n, err := pr.Buffered() if err != nil { return } h.counters.syncCacheBytes.Set(int64(n)) } }() c.r = bufio.NewReader(pr) size, err := c.presync() if err != nil { return err } log.Infof("sync rdb file size = %d bytes\n", size) c.w = bufio.NewWriter(ioutil.Discard) if err := c.Rpdb().Reset(); err != nil { return err } if err := h.doSyncRDB(c, size); err != nil { return err } log.Infof("sync rdb done") return c.serve(h) }
func main() { usage := ` Usage: redis-port decode [--ncpu=N] [--parallel=M] [--input=INPUT] [--output=OUTPUT] redis-port restore [--ncpu=N] [--parallel=M] [--input=INPUT] --target=TARGET [--extra] [--faketime=FAKETIME] [--filterdb=DB] redis-port dump [--ncpu=N] [--parallel=M] --from=MASTER [--output=OUTPUT] [--password=PASSWORD] [--extra] redis-port sync [--ncpu=N] [--parallel=M] --from=MASTER --target=TARGET [--password=PASSWORD] [--sockfile=FILE [--filesize=SIZE]] [--filterdb=DB] [--psync] Options: -n N, --ncpu=N Set runtime.GOMAXPROCS to N. -p M, --parallel=M Set the number of parallel routines to M. -i INPUT, --input=INPUT Set input file, default is stdin ('/dev/stdin'). -o OUTPUT, --output=OUTPUT Set output file, default is stdout ('/dev/stdout'). -f MASTER, --from=MASTER Set host:port of master redis. -t TARGET, --target=TARGET Set host:port of slave redis. -P PASSWORD, --password=PASSWORD Set redis auth password. --faketime=FAKETIME Set current system time to adjust key's expire time. --sockfile=FILE Use FILE to as socket buffer, default is disabled. --filesize=SIZE Set FILE size, default value is 1gb. -e, --extra Set ture to send/receive following redis commands, default is false. --filterdb=DB Filter db = DB, default is *. --psync Use PSYNC command. ` d, err := docopt.Parse(usage, nil, true, "", false) if err != nil { log.PanicError(err, "parse arguments failed") } if s, ok := d["--ncpu"].(string); ok && s != "" { n, err := parseInt(s, 1, 1024) if err != nil { log.PanicErrorf(err, "parse --ncpu failed") } runtime.GOMAXPROCS(n) } ncpu := runtime.GOMAXPROCS(0) if s, ok := d["--parallel"].(string); ok && s != "" { n, err := parseInt(s, 1, 1024) if err != nil { log.PanicErrorf(err, "parse --parallel failed") } args.parallel = n } if ncpu > args.parallel { args.parallel = ncpu } if args.parallel == 0 { args.parallel = 4 } args.input, _ = d["--input"].(string) args.output, _ = d["--output"].(string) args.from, _ = d["--from"].(string) args.target, _ = d["--target"].(string) args.passwd, _ = d["--password"].(string) args.extra, _ = d["--extra"].(bool) args.psync, _ = d["--psync"].(bool) args.sockfile, _ = d["--sockfile"].(string) if s, ok := d["--faketime"].(string); ok && s != "" { switch s[0] { case '-', '+': d, err := time.ParseDuration(strings.ToLower(s)) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = d case '@': n, err := strconv.ParseInt(s[1:], 10, 64) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = time.Duration(n*int64(time.Millisecond) - time.Now().UnixNano()) default: t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { log.PanicError(err, "parse --faketime failed") } args.shift = time.Duration(t.UnixNano() - time.Now().UnixNano()) } } if s, ok := d["--filterdb"].(string); ok && s != "" && s != "*" { n, err := parseInt(s, MinDB, MaxDB) if err != nil { log.PanicError(err, "parse --filterdb failed") } u := uint32(n) acceptDB = func(db uint32) bool { return db == u } } if s, ok := d["--filesize"].(string); ok && s != "" { if len(args.sockfile) == 0 { log.Panic("please specify --sockfile first") } n, err := bytesize.Parse(s) if err != nil { log.PanicError(err, "parse --filesize failed") } if n <= 0 { log.Panicf("parse --filesize = %d, invalid number", n) } args.filesize = n } else { args.filesize = bytesize.GB } log.Infof("set ncpu = %d, parallel = %d\n", ncpu, args.parallel) switch { case d["decode"].(bool): new(cmdDecode).Main() case d["restore"].(bool): new(cmdRestore).Main() case d["dump"].(bool): new(cmdDump).Main() case d["sync"].(bool): new(cmdSync).Main() } }
func (cmd *cmdDecode) Main() { input, output := args.input, args.output if len(input) == 0 { input = "/dev/stdin" } if len(output) == 0 { output = "/dev/stdout" } log.Infof("decode from '%s' to '%s'\n", input, output) var readin io.ReadCloser var nsize int64 if input != "/dev/stdin" { readin, nsize = openReadFile(input) defer readin.Close() } else { readin, nsize = os.Stdin, 0 } var saveto io.WriteCloser if output != "/dev/stdout" { saveto = openWriteFile(output) defer saveto.Close() } else { saveto = os.Stdout } reader := bufio.NewReaderSize(readin, ReaderBufferSize) writer := bufio.NewWriterSize(saveto, WriterBufferSize) ipipe := newRDBLoader(reader, &cmd.rbytes, args.parallel*32) opipe := make(chan string, cap(ipipe)) go func() { defer close(opipe) group := make(chan int, args.parallel) for i := 0; i < cap(group); i++ { go func() { defer func() { group <- 0 }() cmd.decoderMain(ipipe, opipe) }() } for i := 0; i < cap(group); i++ { <-group } }() wait := make(chan struct{}) go func() { defer close(wait) for s := range opipe { cmd.wbytes.Add(int64(len(s))) if _, err := writer.WriteString(s); err != nil { log.PanicError(err, "write string failed") } flushWriter(writer) } }() for done := false; !done; { select { case <-wait: done = true case <-time.After(time.Second): } stat := cmd.Stat() var b bytes.Buffer fmt.Fprintf(&b, "decode: ") if nsize != 0 { fmt.Fprintf(&b, "total = %d - %12d [%3d%%]", nsize, stat.rbytes, 100*stat.rbytes/nsize) } else { fmt.Fprintf(&b, "total = %12d", stat.rbytes) } fmt.Fprintf(&b, " write=%-12d", stat.wbytes) fmt.Fprintf(&b, " entry=%-12d", stat.nentry) log.Info(b.String()) } log.Info("decode: done") }
func main() { usage := ` Usage: rpdb [--config=CONF] [--create|--repair] [--ncpu=N] Options: -n N, --ncpu=N set runtime.GOMAXPROCS to N -c CONF, --config=CONF specify the config file --create create if not exists --repair repair database ` d, err := docopt.Parse(usage, nil, true, "", false) if err != nil { log.PanicErrorf(err, "parse arguments failed") } if s, ok := d["--ncpu"].(string); ok && len(s) != 0 { if n, err := strconv.ParseInt(s, 10, 64); err != nil { log.PanicErrorf(err, "parse --ncpu failed") } else if n <= 0 || n > 64 { log.Panicf("parse --ncpu = %d, only accept [1,64]", n) } else { runtime.GOMAXPROCS(int(n)) } } args.config, _ = d["--config"].(string) args.create, _ = d["--create"].(bool) args.repair, _ = d["--repair"].(bool) conf := &Config{ DBType: "rocksdb", DBPath: "testdb-rocksdb", LevelDB: leveldb.NewDefaultConfig(), RocksDB: rocksdb.NewDefaultConfig(), Service: service.NewDefaultConfig(), } if args.config != "" { if err := conf.LoadFromFile(args.config); err != nil { log.PanicErrorf(err, "load config failed") } } log.Infof("load config\n%s\n\n", conf) var db store.Database switch t := strings.ToLower(conf.DBType); t { default: log.Panicf("unknown db type = '%s'", conf.DBType) case "leveldb": db, err = leveldb.Open(conf.DBPath, conf.LevelDB, args.create, args.repair) case "rocksdb": db, err = rocksdb.Open(conf.DBPath, conf.RocksDB, args.create, args.repair) case "boltdb": db, err = boltdb.Open(conf.DBPath, conf.BoltDB, args.create, args.repair) } if err != nil { log.PanicErrorf(err, "open database failed") } bl := rpdb.New(db) defer bl.Close() if args.repair { return } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { for _ = range c { log.Infof("interrupt and shutdown") bl.Close() os.Exit(0) } }() if err := service.Serve(conf.Service, bl); err != nil { log.ErrorErrorf(err, "service failed") } }