Ejemplo n.º 1
0
func (cmd *cmdRestore) Main() {
	input, target := args.input, args.target
	if len(target) == 0 {
		log.Panic("invalid argument: target")
	}
	if len(input) == 0 {
		input = "/dev/stdin"
	}

	log.Infof("restore from '%s' to '%s'\n", input, target)

	var readin io.ReadCloser
	var nsize int64
	if input != "/dev/stdin" {
		readin, nsize = openReadFile(input)
		defer readin.Close()
	} else {
		readin, nsize = os.Stdin, 0
	}

	reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize)

	cmd.RestoreRDBFile(reader, target, nsize)

	if !args.extra {
		return
	}

	if nsize != 0 && nsize == cmd.nread.Get() {
		return
	}

	cmd.RestoreCommand(reader, target, nsize)
}
Ejemplo n.º 2
0
func (cmd *cmdSync) Main() {
	from, target := args.from, args.target
	if len(from) == 0 {
		log.Panic("invalid argument: from")
	}
	if len(target) == 0 {
		log.Panic("invalid argument: target")
	}

	log.Infof("sync from '%s' to '%s'\n", from, target)

	var sockfile *os.File
	if len(args.sockfile) != 0 {
		f, err := pipe.OpenFile(args.sockfile, false)
		if err != nil {
			log.PanicError(err, "open sockbuff file failed")
		}
		sockfile = f
	}

	master, nsize := cmd.SendCmd(from)
	defer master.Close()

	log.Infof("rdb file = %d\n", nsize)

	var input io.Reader
	if sockfile != nil {
		r, w := pipe.PipeFile(ReaderBufferSize, int(args.filesize), sockfile)
		defer r.Close()
		go func() {
			defer w.Close()
			p := make([]byte, ReaderBufferSize)
			for {
				iocopy(master, w, p, len(p))
			}
		}()
		input = r
	} else {
		input = master
	}

	reader := bufio.NewReaderSize(ioutils.NewCountReader(input, &cmd.nread), ReaderBufferSize)

	cmd.SyncRDBFile(reader, target, nsize)
	cmd.SyncCommand(reader, target)
}
Ejemplo n.º 3
0
func (s *testRedisRdbSuite) TestEncodeRdb(c *gocheck.C) {
	objs := make([]struct {
		db       uint32
		expireat uint64
		key      []byte
		obj      interface{}
		typ      string
	}, 128)
	var b bytes.Buffer
	enc := NewEncoder(&b)
	c.Assert(enc.EncodeHeader(), gocheck.IsNil)

	for i := 0; i < len(objs); i++ {
		db := uint32(i + 32)
		expireat := uint64(i)
		key := []byte(strconv.Itoa(i))
		var obj interface{}
		var typ string
		switch i % 5 {
		case 0:
			sss := strconv.Itoa(i)
			obj = sss
			typ = "string"
			c.Assert(enc.EncodeObject(db, key, expireat, s.toString(sss)), gocheck.IsNil)
		case 1:
			list := []string{}
			for j := 0; j < 32; j++ {
				list = append(list, fmt.Sprintf("l%d_%d", i, rand.Int()))
			}
			obj = list
			typ = "list"
			c.Assert(enc.EncodeObject(db, key, expireat, s.toList(list...)), gocheck.IsNil)
		case 2:
			hash := make(map[string]string)
			for j := 0; j < 32; j++ {
				hash[strconv.Itoa(j)] = fmt.Sprintf("h%d_%d", i, rand.Int())
			}
			obj = hash
			typ = "hash"
			c.Assert(enc.EncodeObject(db, key, expireat, s.toHash(hash)), gocheck.IsNil)
		case 3:
			zset := make(map[string]float64)
			for j := 0; j < 32; j++ {
				zset[strconv.Itoa(j)] = rand.Float64()
			}
			obj = zset
			typ = "zset"
			c.Assert(enc.EncodeObject(db, key, expireat, s.toZSet(zset)), gocheck.IsNil)
		case 4:
			set := []string{}
			for j := 0; j < 32; j++ {
				set = append(set, fmt.Sprintf("s%d_%d", i, rand.Int()))
			}
			obj = set
			typ = "set"
			c.Assert(enc.EncodeObject(db, key, expireat, s.toSet(set...)), gocheck.IsNil)
		}

		objs[i].db = db
		objs[i].expireat = expireat
		objs[i].key = key
		objs[i].obj = obj
		objs[i].typ = typ
	}

	c.Assert(enc.EncodeFooter(), gocheck.IsNil)

	rdb := b.Bytes()
	var cc atomic2.Int64
	l := NewLoader(ioutils.NewCountReader(bytes.NewReader(rdb), &cc))
	c.Assert(l.Header(), gocheck.IsNil)

	var i int = 0
	for {
		e, err := l.NextBinEntry()
		c.Assert(err, gocheck.IsNil)
		if e == nil {
			break
		}

		c.Assert(objs[i].db, gocheck.Equals, e.DB)
		c.Assert(objs[i].expireat, gocheck.Equals, e.ExpireAt)
		c.Assert(objs[i].key, gocheck.DeepEquals, e.Key)

		o, err := DecodeDump(e.Value)
		c.Assert(err, gocheck.IsNil)

		switch objs[i].typ {
		case "string":
			s.checkString(c, o, objs[i].obj.(string))
		case "list":
			s.checkList(c, o, objs[i].obj.([]string))
		case "hash":
			s.checkHash(c, o, objs[i].obj.(map[string]string))
		case "zset":
			s.checkZSet(c, o, objs[i].obj.(map[string]float64))
		case "set":
			s.checkSet(c, o, objs[i].obj.([]string))
		}
		i++
	}

	c.Assert(i, gocheck.Equals, len(objs))
	c.Assert(l.Footer(), gocheck.IsNil)
	c.Assert(cc.Get(), gocheck.DeepEquals, int64(len(rdb)))
}
Ejemplo n.º 4
0
func (cmd *cmdDecode) Main() {
	input, output := args.input, args.output
	if len(input) == 0 {
		input = "/dev/stdin"
	}
	if len(output) == 0 {
		output = "/dev/stdout"
	}

	log.Infof("decode from '%s' to '%s'\n", input, output)

	var readin io.ReadCloser
	var nsize int64
	if input != "/dev/stdin" {
		readin, nsize = openReadFile(input)
		defer readin.Close()
	} else {
		readin, nsize = os.Stdin, 0
	}

	var saveto io.WriteCloser
	if output != "/dev/stdout" {
		saveto = openWriteFile(output)
		defer saveto.Close()
	} else {
		saveto = os.Stdout
	}

	reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize)
	writer := bufio.NewWriterSize(ioutils.NewCountWriter(saveto, &cmd.nsave), WriterBufferSize)

	ipipe := newRDBLoader(reader, args.parallel*32)
	opipe := make(chan string, cap(ipipe))

	go func() {
		defer close(opipe)
		group := make(chan int, args.parallel)
		for i := 0; i < cap(group); i++ {
			go func() {
				defer func() {
					group <- 0
				}()
				cmd.decoderMain(ipipe, opipe)
			}()
		}
		for i := 0; i < cap(group); i++ {
			<-group
		}
	}()

	wait := make(chan struct{})
	go func() {
		defer close(wait)
		for s := range opipe {
			if _, err := writer.WriteString(s); err != nil {
				log.PanicError(err, "write string failed")
			}
			flushWriter(writer)
		}
	}()

	for done := false; !done; {
		select {
		case <-wait:
			done = true
		case <-time.After(time.Second):
		}
		n, w, o := cmd.nread.Get(), cmd.nsave.Get(), cmd.nobjs.Get()
		if nsize != 0 {
			p := 100 * n / nsize
			log.Infof("total = %d - %12d [%3d%%]  write=%-12d objs=%d\n", nsize, n, p, w, o)
		} else {
			log.Infof("total = %12d  write=%-12d objs=%d\n", n, w, o)
		}
	}
	log.Info("done")
}
Ejemplo n.º 5
0
Archivo: sync.go Proyecto: vebin/reborn
func (h *Handler) doSyncRDB(c *conn, size int64) error {
	defer h.counters.syncRdbRemains.Set(0)
	h.counters.syncRdbRemains.Set(size)

	r := ioutils.NewCountReader(c.r, nil)
	l := rdb.NewLoader(r)
	if err := l.Header(); err != nil {
		return err
	}

	ncpu := runtime.GOMAXPROCS(0)
	errs := make(chan error, ncpu)

	var lock sync.Mutex
	var flag atomic2.Int64
	loadNextEntry := func() (*rdb.BinEntry, error) {
		lock.Lock()
		defer lock.Unlock()
		if flag.Get() != 0 {
			return nil, nil
		}
		entry, err := l.NextBinEntry()
		if err != nil || entry == nil {
			flag.Set(1)
			return nil, err
		}
		return entry, nil
	}

	for i := 0; i < ncpu; i++ {
		go func() {
			defer flag.Set(1)
			for {
				entry, err := loadNextEntry()
				if err != nil || entry == nil {
					errs <- err
					return
				}
				db, key, value := entry.DB, entry.Key, entry.Value
				ttlms := int64(0)
				if entry.ExpireAt != 0 {
					if v, ok := store.ExpireAtToTTLms(int64(entry.ExpireAt)); ok && v > 0 {
						ttlms = v
					} else {
						ttlms = 1
					}
				}
				if err := c.Store().SlotsRestore(db, [][]byte{key, store.FormatInt(ttlms), value}); err != nil {
					errs <- err
					return
				}
			}
		}()
	}

	for {
		select {
		case <-time.After(time.Second):
			h.counters.syncRdbRemains.Set(size - r.Count())
		case err := <-errs:
			for i := 1; i < cap(errs); i++ {
				e := <-errs
				if err == nil && e != nil {
					err = e
				}
			}
			if err != nil {
				return err
			}
			return l.Footer()
		}
	}
}
Ejemplo n.º 6
0
Archivo: sync.go Proyecto: vebin/reborn
func (h *Handler) startSyncFromMaster(c *conn, size int64) error {
	defer func() {
		h.counters.syncTotalBytes.Set(0)
		h.counters.syncCacheBytes.Set(0)
	}()

	pr, pw := h.openSyncPipe()
	defer pr.Close()

	wg := &sync.WaitGroup{}
	defer wg.Wait()

	wg.Add(1)
	go func(r io.Reader) {
		defer wg.Done()
		defer pw.Close()
		p := make([]byte, 8192)
		for {
			deadline := time.Now().Add(time.Minute)
			if err := c.nc.SetReadDeadline(deadline); err != nil {
				pr.CloseWithError(errors.Trace(err))
				return
			}
			n, err := r.Read(p)
			if err != nil {
				pr.CloseWithError(errors.Trace(err))
				return
			}

			h.counters.syncTotalBytes.Add(int64(n))
			s := p[:n]
			for len(s) != 0 {
				n, err := pw.Write(s)
				if err != nil {
					pr.CloseWithError(errors.Trace(err))
					return
				}
				s = s[n:]
			}
		}
	}(c.r)

	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			time.Sleep(time.Millisecond * 200)
			n, err := pr.Buffered()
			if err != nil {
				return
			}
			h.counters.syncCacheBytes.Set(int64(n))
		}
	}()

	var counter atomic2.Int64
	c.r = bufio.NewReader(ioutils.NewCountReader(pr, &counter))

	if size > 0 {
		// we need full sync first
		if err := c.Store().Reset(); err != nil {
			return errors.Trace(err)
		}

		h.masterConnState.Set(masterConnSync)
		log.Infof("sync rdb file size = %d bytes\n", size)
		if err := h.doSyncRDB(c, size); err != nil {
			return errors.Trace(err)
		}
		log.Infof("sync rdb done")
	}

	h.masterConnState.Set(masterConnConnected)
	return h.doSyncFromMater(c, &counter)
}
Ejemplo n.º 7
0
func TestEncodeRdb(t *testing.T) {
	objs := make([]struct {
		db       uint32
		expireat uint64
		key      []byte
		obj      interface{}
		typ      string
	}, 128)
	var b bytes.Buffer
	enc := NewEncoder(&b)
	assert.ErrorIsNil(t, enc.EncodeHeader())
	for i := 0; i < len(objs); i++ {
		db := uint32(i + 32)
		expireat := uint64(i)
		key := []byte(strconv.Itoa(i))
		var obj interface{}
		var typ string
		switch i % 5 {
		case 0:
			s := strconv.Itoa(i)
			obj = s
			typ = "string"
			assert.ErrorIsNil(t, enc.EncodeObject(db, key, expireat, toString(s)))
		case 1:
			list := []string{}
			for j := 0; j < 32; j++ {
				list = append(list, fmt.Sprintf("l%d_%d", i, rand.Int()))
			}
			obj = list
			typ = "list"
			assert.ErrorIsNil(t, enc.EncodeObject(db, key, expireat, toList(list...)))
		case 2:
			hash := make(map[string]string)
			for j := 0; j < 32; j++ {
				hash[strconv.Itoa(j)] = fmt.Sprintf("h%d_%d", i, rand.Int())
			}
			obj = hash
			typ = "hash"
			assert.ErrorIsNil(t, enc.EncodeObject(db, key, expireat, toHash(hash)))
		case 3:
			zset := make(map[string]float64)
			for j := 0; j < 32; j++ {
				zset[strconv.Itoa(j)] = rand.Float64()
			}
			obj = zset
			typ = "zset"
			assert.ErrorIsNil(t, enc.EncodeObject(db, key, expireat, toZSet(zset)))
		case 4:
			set := []string{}
			for j := 0; j < 32; j++ {
				set = append(set, fmt.Sprintf("s%d_%d", i, rand.Int()))
			}
			obj = set
			typ = "set"
			assert.ErrorIsNil(t, enc.EncodeObject(db, key, expireat, toSet(set...)))
		}
		objs[i].db = db
		objs[i].expireat = expireat
		objs[i].key = key
		objs[i].obj = obj
		objs[i].typ = typ
	}
	assert.ErrorIsNil(t, enc.EncodeFooter())
	rdb := b.Bytes()
	var c atomic2.Int64
	l := NewLoader(ioutils.NewCountReader(bytes.NewReader(rdb), &c))
	assert.ErrorIsNil(t, l.Header())
	var i int = 0
	for {
		e, err := l.NextBinEntry()
		assert.ErrorIsNil(t, err)
		if e == nil {
			break
		}
		assert.Must(t, objs[i].db == e.DB)
		assert.Must(t, objs[i].expireat == e.ExpireAt)
		assert.Must(t, bytes.Equal(objs[i].key, e.Key))
		o, err := DecodeDump(e.Value)
		assert.ErrorIsNil(t, err)
		switch objs[i].typ {
		case "string":
			checkString(t, o, objs[i].obj.(string))
		case "list":
			checkList(t, o, objs[i].obj.([]string))
		case "hash":
			checkHash(t, o, objs[i].obj.(map[string]string))
		case "zset":
			checkZSet(t, o, objs[i].obj.(map[string]float64))
		case "set":
			checkSet(t, o, objs[i].obj.([]string))
		}
		i++
	}
	assert.Must(t, i == len(objs))
	assert.ErrorIsNil(t, l.Footer())
	assert.Must(t, c.Get() == int64(len(rdb)))
}