Exemplo n.º 1
0
// ParseFrom read from bufio.Reader and parse needle.
// used in scan block by bufio.Reader.
func (n *Needle) ParseFrom(rd *bufio.Reader) (err error) {
	var (
		dataOffset   int32
		footerOffset int32
		endOffset    int32
		data         []byte
	)
	// header
	if data, err = rd.Peek(_headerSize); err != nil {
		return
	}
	if err = n.parseHeader(data); err != nil {
		return
	}
	dataOffset = _headerSize
	footerOffset = dataOffset + n.Size
	endOffset = footerOffset + n.FooterSize
	// no discard, get all needle buffer
	if data, err = rd.Peek(int(n.TotalSize)); err != nil {
		return
	}
	if err = n.parseData(data[dataOffset:footerOffset]); err != nil {
		return
	}
	// footer
	if err = n.parseFooter(data[footerOffset:endOffset]); err != nil {
		return
	}
	n.buffer = data
	_, err = rd.Discard(int(n.TotalSize))
	return
}
Exemplo n.º 2
0
func (u *udpgwPacket) read(r *bufio.Reader) error {
	bs, err := r.Peek(2)
	if nil != err {
		return err
	}
	u.length = binary.LittleEndian.Uint16(bs)
	//binary.Read(r, binary.BigEndian, &u.length)
	r.Discard(2)
	//log.Printf("###First %d  %d %d %p", u.length, binary.BigEndian.Uint16(bs), len(bs), r)
	_, err = r.Peek(int(u.length))
	if nil != err {
		//log.Printf("### %v", err)
		return err
	}
	bodylen := u.length
	binary.Read(r, binary.BigEndian, &u.flags)
	binary.Read(r, binary.BigEndian, &u.conid)
	bodylen -= 3
	if bodylen > 0 {
		if (u.flags & flagIPv6) != 0 {
			u.addr.ip = make(net.IP, 16)

		} else {
			u.addr.ip = make(net.IP, 4)
		}
		r.Read(u.addr.ip)
		bodylen -= uint16(len(u.addr.ip))
		binary.Read(r, binary.BigEndian, &u.addr.port)
		bodylen -= 2
		u.content = make([]byte, int(bodylen))
		r.Read(u.content)
	}
	return nil
}
Exemplo n.º 3
0
func (h Handler) Set(cmd common.SetRequest, src *bufio.Reader) error {
	// TODO: should there be a unique flags value for regular data?
	// Write command header
	if err := binprot.WriteSetCmd(h.rw.Writer, cmd.Key, cmd.Flags, cmd.Exptime, uint32(len(cmd.Data))); err != nil {
		return err
	}

	// Write value
	h.rw.Write(cmd.Data)

	if err := h.rw.Flush(); err != nil {
		return err
	}

	// Read server's response
	resHeader, err := readResponseHeader(h.rw.Reader)
	if err != nil {
		// Discard request body
		if _, ioerr := src.Discard(len(cmd.Data)); ioerr != nil {
			return ioerr
		}

		// Discard response body
		if _, ioerr := h.rw.Discard(int(resHeader.TotalBodyLength)); ioerr != nil {
			return ioerr
		}

		return err
	}

	return nil
}
Exemplo n.º 4
0
Arquivo: wim.go Projeto: vmware/vic
func (img *Image) readNextStream(r *bufio.Reader) (*Stream, error) {
	lengthBuf, err := r.Peek(8)
	if err != nil {
		return nil, &ParseError{Oper: "stream length check", Err: err}
	}

	left := int(binary.LittleEndian.Uint64(lengthBuf))
	if left < streamentrySize {
		return nil, &ParseError{Oper: "stream entry", Err: errors.New("size too short")}
	}

	var sentry streamentry
	err = binary.Read(r, binary.LittleEndian, &sentry)
	if err != nil {
		return nil, &ParseError{Oper: "stream entry", Err: err}
	}

	left -= streamentrySize

	if left < int(sentry.NameLength) {
		return nil, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")}
	}

	names := make([]uint16, sentry.NameLength/2)
	err = binary.Read(r, binary.LittleEndian, names)
	if err != nil {
		return nil, &ParseError{Oper: "file name", Err: err}
	}

	left -= int(sentry.NameLength)
	name := string(utf16.Decode(names))

	var offset resourceDescriptor
	if sentry.Hash != (SHA1Hash{}) {
		var ok bool
		offset, ok = img.wim.fileData[sentry.Hash]
		if !ok {
			return nil, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)}
		}
	}

	s := &Stream{
		StreamHeader: StreamHeader{
			Hash: sentry.Hash,
			Size: offset.OriginalSize,
			Name: name,
		},
		wim:    img.wim,
		offset: offset,
	}

	_, err = r.Discard(left)
	if err != nil {
		return nil, err
	}

	return s, nil
}
Exemplo n.º 5
0
// Recovery recovery needles map from super block.
func (b *SuperBlock) Recovery(offset uint32, fn func(*Needle, uint32) error) (
	err error) {
	var (
		n    = &Needle{}
		rd   *bufio.Reader
		data []byte
	)
	log.Infof("block: %s recovery from offset: %d", b.File, offset)
	if offset == 0 {
		offset = NeedleOffset(superBlockHeaderOffset)
	}
	b.Offset = offset
	if _, err = b.r.Seek(blockOffset(b.Offset), os.SEEK_SET); err != nil {
		log.Errorf("block: %s Seek() error(%v)", b.File)
		return
	}
	rd = bufio.NewReaderSize(b.r, NeedleMaxSize)
	for {
		if data, err = rd.Peek(NeedleHeaderSize); err != nil {
			break
		}
		if err = n.ParseHeader(data); err != nil {
			break
		}
		if _, err = rd.Discard(NeedleHeaderSize); err != nil {
			break
		}
		if data, err = rd.Peek(n.DataSize); err != nil {
			break
		}
		if err = n.ParseData(data); err != nil {
			break
		}
		if _, err = rd.Discard(n.DataSize); err != nil {
			break
		}
		if log.V(1) {
			log.Infof("block add offset: %d, size: %d to needles cache",
				b.Offset, n.TotalSize)
			log.Info(n.String())
		}
		if err = fn(n, b.Offset); err != nil {
			break
		}
		b.Offset += NeedleOffset(int64(n.TotalSize))
	}
	if err == io.EOF {
		// reset b.w offset, discard left space which can't parse to a needle
		if _, err = b.w.Seek(blockOffset(b.Offset), os.SEEK_SET); err != nil {
			log.Errorf("block: %s Seek() error(%v)", b.File, err)
		} else {
			log.Infof("block: %s:%d*8 recovery [ok]", b.File, b.Offset)
			return
		}
	}
	log.Infof("block: %s recovery [failed]", b.File)
	return
}
Exemplo n.º 6
0
func getLocalIntoBuf(rw *bufio.Reader, metaData metadata, tokenBuf, dataBuf []byte, chunkNum, totalDataLength int) (opcodeNoop bool, err error) {
	resHeader, err := binprot.ReadResponseHeader(rw)
	if err != nil {
		return false, err
	}

	// it feels a bit dirty knowing about batch gets here, but it's the most logical place to put
	// a check for an opcode that signals the end of a batch get or GAT. This code is a bit too big
	// to copy-paste in multiple places.
	if resHeader.Opcode == binprot.OpcodeNoop {
		return true, nil
	}

	err = binprot.DecodeError(resHeader)
	if err != nil {
		// Discard the message body on error
		if _, ioerr := rw.Discard(int(resHeader.TotalBodyLength)); ioerr != nil {
			return false, ioerr
		}
		return false, err
	}

	serverFlags := make([]byte, 4)
	binary.Read(rw, binary.BigEndian, &serverFlags)

	// Read in token if requested
	if tokenBuf != nil {
		if _, err := io.ReadFull(rw, tokenBuf); err != nil {
			return false, err
		}
	}

	// indices for slicing, end exclusive
	start, end := chunkSliceIndices(int(metaData.ChunkSize), chunkNum, int(metaData.Length))
	// read data directly into buf
	chunkBuf := dataBuf[start:end]

	// Read in value
	if _, err := io.ReadFull(rw, chunkBuf); err != nil {
		return false, err
	}

	// consume padding at end of chunk if needed
	if len(chunkBuf) < totalDataLength {
		if _, ioerr := rw.Discard(totalDataLength - len(chunkBuf)); ioerr != nil {
			return false, ioerr
		}
	}

	return false, nil
}
Exemplo n.º 7
0
// Recovery recovery needles map from super block.
func (b *SuperBlock) Recovery(needles map[int64]NeedleCache, indexer *Indexer, offset int64) (err error) {
	var (
		rd      *bufio.Reader
		data    []byte
		size    int32
		noffset uint32
		n       = &Needle{}
	)
	log.Printf("start super block recovery, offset: %d\n", offset)
	if _, err = b.r.Seek(offset, os.SEEK_SET); err != nil {
		return
	}
	rd = bufio.NewReaderSize(b.r, NeedleMaxSize)
	for {
		// header
		if data, err = rd.Peek(NeedleHeaderSize); err != nil {
			break
		}
		if err = ParseNeedleHeader(data, n); err != nil {
			break
		}
		if _, err = rd.Discard(NeedleHeaderSize); err != nil {
			break
		}
		// data
		if data, err = rd.Peek(n.DataSize); err != nil {
			break
		}
		if err = ParseNeedleData(data, n); err != nil {
			break
		}
		if _, err = rd.Discard(n.DataSize); err != nil {
			break
		}
		log.Print(n.String())
		size = int32(NeedleHeaderSize + n.DataSize)
		noffset += NeedleOffset(int(size))
		needles[n.Key] = NewNeedleCache(size, noffset)
		indexer.Add(n.Key, noffset, size)
	}
	if err == io.EOF {
		err = nil
	}
	// reset b.w offset, discard left space which can't parse to a needle
	if _, err = b.w.Seek(BlockOffset(noffset), os.SEEK_SET); err != nil {
		return
	}
	return
}
Exemplo n.º 8
0
// Recovery recovery needle cache meta data in memory, index file  will stop
// at the right parse data offset.
func (i *Indexer) Recovery(fn func(*Index) error) (noffset uint32, err error) {
	var (
		rd     *bufio.Reader
		data   []byte
		offset int64
		ix     = &Index{}
	)
	log.Infof("index: %s recovery", i.File)
	if offset, err = i.f.Seek(0, os.SEEK_SET); err != nil {
		log.Errorf("index: %s Seek() error(%v)", i.File, err)
		return
	}
	rd = bufio.NewReaderSize(i.f, NeedleMaxSize)
	for {
		if data, err = rd.Peek(indexSize); err != nil {
			break
		}
		ix.parse(data)
		if ix.Size > NeedleMaxSize || ix.Size < 1 {
			log.Errorf("index parse size: %d error", ix.Size)
			err = ErrIndexSize
			break
		}
		if _, err = rd.Discard(indexSize); err != nil {
			break
		}
		if log.V(1) {
			log.Info(ix.String())
		}
		offset += int64(indexSize)
		if err = fn(ix); err != nil {
			break
		}
		// save this for recovery supper block
		noffset = ix.Offset + NeedleOffset(int64(ix.Size))
	}
	if err == io.EOF {
		// reset b.w offset, discard left space which can't parse to a needle
		if _, err = i.f.Seek(offset, os.SEEK_SET); err != nil {
			log.Errorf("index: %s Seek() error(%v)", i.File, err)
		} else {
			log.Infof("index: %s recovery [ok]", i.File)
			return
		}
	}
	log.Errorf("index: %s recovery [failed], error(%v)", i.File, err)
	return
}
Exemplo n.º 9
0
func consumeResponse(r *bufio.Reader) error {
	res, err := readRes(r)
	if err != nil {
		return err
	}

	apperr := statusToError(res.Status)

	// read body in regardless of the error in the header
	r.Discard(int(res.BodyLen))

	if apperr != nil && srsErr(apperr) {
		return apperr
	}

	return err
}
Exemplo n.º 10
0
func handleSet(msg string, reader *bufio.Reader) string {
	m := setFormat.FindStringSubmatch(msg)
	if len(m) < 5 {
		return fmt.Sprintf("CLIENT_ERROR couln't extract values from %v\r\n", msg)
	}
	key, _, _, sizeStr := m[1], m[2], m[3], m[4]
	size, _ := strconv.ParseInt(sizeStr, 10, 16)
	buf := make([]byte, size)
	_, err := reader.Read(buf)
	reader.Discard(2)
	if err != nil {
		log.Printf("Error reading buffer: %v\n", err)
	}
	lock.Lock()
	kvs[key] = string(buf)
	lock.Unlock()
	return "STORED\r\n"
}
Exemplo n.º 11
0
Arquivo: resp.go Projeto: rod6/rodis
func parseBulkString(reader *bufio.Reader) (RESPType, BulkString, error) {
	i, err := readInt(reader)
	if err != nil {
		return BulkStringType, BulkString(nil), err
	}

	if i == -1 {
		return BulkStringType, BulkString(nil), nil
	}

	b, err := reader.Peek(int(i))
	if err != nil {
		return BulkStringType, BulkString(nil), err
	}
	reader.Discard(int(i) + 2) // +2 for \r\n

	return BulkStringType, BulkString(b), nil
}
Exemplo n.º 12
0
func (i *Indexer) Recovery(needles map[int64]NeedleCache) (noffset uint32, err error) {
	var (
		rd     *bufio.Reader
		data   []byte
		offset int64
		ix     = &Index{}
	)
	if offset, err = i.f.Seek(0, os.SEEK_SET); err != nil {
		return
	}
	rd = bufio.NewReaderSize(i.f, i.bufSize)
	for {
		// parse data
		if data, err = rd.Peek(indexSize); err != nil {
			break
		}
		ix.Key = BigEndian.Int64(data)
		ix.Offset = BigEndian.Uint32(data[indexOffsetOffset:])
		ix.Size = BigEndian.Int32(data[indexSizeOffset:])
		// check
		if ix.Offset%NeedlePaddingSize != 0 {
			err = ErrIndexOffset
			break
		}
		if ix.Size > NeedleMaxSize {
			err = ErrIndexSize
			break
		}
		if _, err = rd.Discard(indexSize); err != nil {
			break
		}
		log.Print(ix.String())
		offset += int64(indexSize)
		needles[ix.Key] = NewNeedleCache(ix.Size, ix.Offset)
		// save this for recovery supper block
		noffset = ix.Offset + NeedleOffset(int(ix.Size))
	}
	// reset b.w offset, discard left space which can't parse to a needle
	log.Printf("seek offset: %d\n", offset)
	if _, err = i.f.Seek(offset, os.SEEK_SET); err != nil {
		return
	}
	return
}
Exemplo n.º 13
0
func consumeBatchResponse(r *bufio.Reader) error {
	opcode := uint8(Get)
	var apperr error

	for opcode != Noop {
		res, err := readRes(r)
		if err != nil {
			return err
		}

		opcode = res.Opcode
		apperr = statusToError(res.Status)

		// read body in regardless of the error in the header
		r.Discard(int(res.BodyLen))
	}

	return apperr
}
Exemplo n.º 14
0
Arquivo: index.go Projeto: MrXiaoZ/bfs
// Recovery recovery needle cache meta data in memory, index file  will stop
// at the right parse data offset.
func (i *Indexer) Recovery(needles map[int64]NeedleCache) (noffset uint32, err error) {
	var (
		rd     *bufio.Reader
		data   []byte
		offset int64
		ix     = &Index{}
	)
	log.Infof("index: %s recovery", i.file)
	if offset, err = i.f.Seek(0, os.SEEK_SET); err != nil {
		log.Errorf("index: %s Seek() error(%v)", i.file, err)
		return
	}
	rd = bufio.NewReaderSize(i.f, NeedleMaxSize)
	for {
		// parse data
		if data, err = rd.Peek(indexSize); err != nil {
			break
		}
		ix.parse(data)
		// check
		if ix.Size > NeedleMaxSize || ix.Size < 1 {
			log.Errorf("index parse size: %d > %d or %d < 1", ix.Size, NeedleMaxSize, ix.Size)
			break
		}
		if _, err = rd.Discard(indexSize); err != nil {
			break
		}
		log.V(1).Info(ix.String())
		offset += int64(indexSize)
		needles[ix.Key] = NewNeedleCache(ix.Offset, ix.Size)
		// save this for recovery supper block
		noffset = ix.Offset + NeedleOffset(int64(ix.Size))
	}
	if err != io.EOF {
		return
	}
	// reset b.w offset, discard left space which can't parse to a needle
	if _, err = i.f.Seek(offset, os.SEEK_SET); err != nil {
		log.Errorf("index: %s Seek() error(%v)", i.file, err)
	}
	log.Infof("index: %s recovery [ok]", i.file)
	return
}
Exemplo n.º 15
0
//当error为ErrUnexpectedRESPEOF时,说明还缺少数据,并不是一个完整的bulk string
//还缺少的数据由第二个参数返回,其余n的值则都是0
func parseBulkString(line []byte, br *bufio.Reader) (RESP_BULK_STRING, int, error) {
	neededDataLen := 0
	n, err := parseLen(line[0:])
	if n < 0 || err != nil {
		return "", neededDataLen, err
	}
	brSize := br.Buffered()
	//n+2: bulkString + "\r\n"
	if n+2 > brSize {
		neededDataLen = n + 2 - brSize
		return "", neededDataLen, ErrUnexpectedRESPEOF
	}
	p, _ := br.Peek(n)
	br.Discard(n)
	if line, err := readLine(br); err != nil {
		return "", neededDataLen, err
	} else if len(line) != 0 {
		return "", neededDataLen, errors.New("bad bulk string format")
	}
	return RESP_BULK_STRING(p), neededDataLen, nil
}
Exemplo n.º 16
0
// Parse Parse needle from bufio.
func (n *Needle) ParseFrom(rd *bufio.Reader) (err error) {
	var data []byte
	// header
	if data, err = rd.Peek(HeaderSize); err != nil {
		return
	}
	if err = n.ParseHeader(data); err != nil {
		return
	}
	if _, err = rd.Discard(HeaderSize); err != nil {
		return
	}
	// data
	if data, err = rd.Peek(int(n.Size)); err != nil {
		return
	}
	if err = n.ParseData(data); err != nil {
		return
	}
	if _, err = rd.Discard(int(n.Size)); err != nil {
		return
	}
	// footer
	if data, err = rd.Peek(int(n.FooterSize)); err != nil {
		return
	}
	if err = n.ParseFooter(data); err != nil {
		return
	}
	_, err = rd.Discard(int(n.FooterSize))
	return
}
Exemplo n.º 17
0
// Dump parse supper block file and dump print for debug.
// ONLY DEBUG!!!!
func (b *SuperBlock) Dump() (err error) {
	var (
		rd   *bufio.Reader
		data []byte
		n    = &Needle{}
	)
	if _, err = b.r.Seek(0, os.SEEK_SET); err != nil {
		return
	}
	rd = bufio.NewReaderSize(b.r, NeedleMaxSize)
	for {
		// header
		if data, err = rd.Peek(NeedleHeaderSize); err != nil {
			break
		}
		if err = ParseNeedleHeader(data, n); err != nil {
			break
		}
		if _, err = rd.Discard(NeedleHeaderSize); err != nil {
			break
		}
		// data
		if data, err = rd.Peek(n.DataSize); err != nil {
			break
		}
		if err = ParseNeedleData(data, n); err != nil {
			break
		}
		if _, err = rd.Discard(n.DataSize); err != nil {
			break
		}
		log.Print(n.String())
	}
	if err == io.EOF {
		err = nil
	}
	return
}
Exemplo n.º 18
0
Arquivo: wim.go Projeto: vmware/vic
func (img *Image) readNextEntry(r *bufio.Reader) (*File, error) {
	lengthBuf, err := r.Peek(8)
	if err != nil {
		return nil, &ParseError{Oper: "directory length check", Err: err}
	}

	left := int(binary.LittleEndian.Uint64(lengthBuf))
	if left == 0 {
		return nil, io.EOF
	}

	if left < direntrySize {
		return nil, &ParseError{Oper: "directory entry", Err: errors.New("size too short")}
	}

	var dentry direntry
	err = binary.Read(r, binary.LittleEndian, &dentry)
	if err != nil {
		return nil, &ParseError{Oper: "directory entry", Err: err}
	}

	left -= direntrySize

	namesLen := int(dentry.FileNameLength + 2 + dentry.ShortNameLength)
	if left < namesLen {
		return nil, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")}
	}

	names := make([]uint16, namesLen/2)
	err = binary.Read(r, binary.LittleEndian, names)
	if err != nil {
		return nil, &ParseError{Oper: "file name", Err: err}
	}

	left -= namesLen

	var name, shortName string
	if dentry.FileNameLength > 0 {
		name = string(utf16.Decode(names[:dentry.FileNameLength/2]))
	}

	if dentry.ShortNameLength > 0 {
		shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:]))
	}

	var offset resourceDescriptor
	zerohash := SHA1Hash{}
	if dentry.Hash != zerohash {
		var ok bool
		offset, ok = img.wim.fileData[dentry.Hash]
		if !ok {
			return nil, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)}
		}
	}

	f := &File{
		FileHeader: FileHeader{
			Attributes:     dentry.Attributes,
			CreationTime:   dentry.CreationTime,
			LastAccessTime: dentry.LastAccessTime,
			LastWriteTime:  dentry.LastWriteTime,
			Hash:           dentry.Hash,
			Size:           offset.OriginalSize,
			Name:           name,
			ShortName:      shortName,
		},

		offset:       offset,
		img:          img,
		subdirOffset: dentry.SubdirOffset,
	}

	isDir := false

	if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 {
		f.LinkID = dentry.ReparseHardLink
		if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 {
			isDir = true
		}
	} else {
		f.ReparseTag = uint32(dentry.ReparseHardLink)
		f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32)
	}

	if isDir && f.subdirOffset == 0 {
		return nil, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")}
	} else if !isDir && f.subdirOffset != 0 {
		return nil, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")}
	}

	if dentry.SecurityID != 0xffffffff {
		f.SecurityDescriptor = img.sds[dentry.SecurityID]
	}

	_, err = r.Discard(left)
	if err != nil {
		return nil, err
	}

	if dentry.StreamCount > 0 {
		var streams []*Stream
		for i := uint16(0); i < dentry.StreamCount; i++ {
			s, err := img.readNextStream(r)
			if err != nil {
				return nil, err
			}
			// The first unnamed stream should be treated as the file stream.
			if i == 0 && s.Name == "" {
				f.Hash = s.Hash
				f.Size = s.Size
				f.offset = s.offset
			} else if s.Name != "" {
				streams = append(streams, s)
			}
		}
		f.Streams = streams
	}

	if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 {
		return nil, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")}
	}

	return f, nil
}
Exemplo n.º 19
0
func getLocalIntoBuf(rw *bufio.Reader, metaData metadata, tokenBuf, dataBuf []byte, chunkNum, totalDataLength int) (opcodeNoop bool, err error) {
	resHeader, err := binprot.ReadResponseHeader(rw)
	if err != nil {
		return false, err
	}
	defer binprot.PutResponseHeader(resHeader)

	// it feels a bit dirty knowing about batch gets here, but it's the most logical place to put
	// a check for an opcode that signals the end of a batch get or GAT. This code is a bit too big
	// to copy-paste in multiple places.
	if resHeader.Opcode == binprot.OpcodeNoop {
		return true, nil
	}

	err = binprot.DecodeError(resHeader)
	if err != nil {
		// read in the message "Not found" after a miss
		n, ioerr := rw.Discard(int(resHeader.TotalBodyLength))
		metrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))
		if ioerr != nil {
			return false, ioerr
		}
		return false, err
	}

	// we currently do nothing with the flags
	//buf := make([]byte, 4)
	//n, err := io.ReadAtLeast(rw, buf, 4)
	//metrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))
	//if err != nil {
	//	return emptyMeta, err
	//}
	//serverFlags := binary.BigEndian.Uint32(buf)

	// instead of reading and parsing flags, just discard
	rw.Discard(4)
	metrics.IncCounterBy(common.MetricBytesReadLocal, 4)

	// Read in token if requested
	if tokenBuf != nil {
		n, err := io.ReadAtLeast(rw, tokenBuf, tokenSize)
		metrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))
		if err != nil {
			return false, err
		}
	}

	// indices for slicing, end exclusive
	start, end := chunkSliceIndices(int(metaData.ChunkSize), chunkNum, int(metaData.Length))
	// read data directly into buf
	chunkBuf := dataBuf[start:end]

	// Read in value
	n, err := io.ReadAtLeast(rw, chunkBuf, len(chunkBuf))
	metrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))
	if err != nil {
		return false, err
	}

	// consume padding at end of chunk if needed
	if len(chunkBuf) < totalDataLength {
		n, ioerr := rw.Discard(totalDataLength - len(chunkBuf))
		metrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))
		if ioerr != nil {
			return false, ioerr
		}
	}

	return false, nil
}
Exemplo n.º 20
0
// Compress compress the orig block, copy to disk dst block.
func (b *SuperBlock) Compress(offset int64, v *Volume) (noffset int64, err error) {
	var (
		data []byte
		r    *os.File
		rd   *bufio.Reader
		n    = &Needle{}
	)
	log.Infof("block: %s compress", b.file)
	if r, err = os.OpenFile(b.file, os.O_RDONLY, 0664); err != nil {
		log.Errorf("os.OpenFile(\"%s\", os.O_RDONLY, 0664) error(%v)", b.file, err)
		return
	}
	if offset == 0 {
		offset = superBlockHeaderOffset
	}
	if _, err = r.Seek(offset, os.SEEK_SET); err != nil {
		log.Errorf("block: %s Seek() error(%v)", b.file, err)
		return
	}
	rd = bufio.NewReaderSize(r, NeedleMaxSize)
	for {
		// header
		if data, err = rd.Peek(NeedleHeaderSize); err != nil {
			break
		}
		if err = n.ParseHeader(data); err != nil {
			break
		}
		if _, err = rd.Discard(NeedleHeaderSize); err != nil {
			break
		}
		// data
		if data, err = rd.Peek(n.DataSize); err != nil {
			break
		}
		if err = n.ParseData(data); err != nil {
			break
		}
		if _, err = rd.Discard(n.DataSize); err != nil {
			break
		}
		offset += int64(NeedleHeaderSize + n.DataSize)
		log.V(1).Info(n.String())
		// skip delete needle
		if n.Flag == NeedleStatusDel {
			continue
		}
		// multi append
		if err = v.Write(n.Key, n.Cookie, n.Data); err != nil {
			break
		}
	}
	if err != io.EOF {
		return
	}
	if err = v.Flush(); err != nil {
		return
	}
	if err = r.Close(); err != nil {
		return
	}
	noffset = offset
	return
}
Exemplo n.º 21
0
// Recovery recovery needles map from super block.
func (b *SuperBlock) Recovery(needles map[int64]NeedleCache, indexer *Indexer, offset int64) (err error) {
	var (
		size    int32
		data    []byte
		rd      *bufio.Reader
		n       = &Needle{}
		nc      NeedleCache
		noffset uint32
	)
	log.Infof("block: %s recovery from offset: %d", b.file, offset)
	if offset == 0 {
		offset = superBlockHeaderOffset
	}
	noffset = NeedleOffset(offset)
	if _, err = b.r.Seek(offset, os.SEEK_SET); err != nil {
		log.Errorf("block: %s Seek() error(%v)", b.file)
		return
	}
	rd = bufio.NewReaderSize(b.r, NeedleMaxSize)
	for {
		// header
		if data, err = rd.Peek(NeedleHeaderSize); err != nil {
			break
		}
		if err = n.ParseHeader(data); err != nil {
			break
		}
		if _, err = rd.Discard(NeedleHeaderSize); err != nil {
			break
		}
		// data
		if data, err = rd.Peek(n.DataSize); err != nil {
			break
		}
		if err = n.ParseData(data); err != nil {
			break
		}
		if _, err = rd.Discard(n.DataSize); err != nil {
			break
		}
		size = int32(NeedleHeaderSize + n.DataSize)
		if n.Flag == NeedleStatusOK {
			if err = indexer.Add(n.Key, noffset, size); err != nil {
				break
			}
			nc = NewNeedleCache(noffset, size)
		} else {
			nc = NewNeedleCache(NeedleCacheDelOffset, size)
		}
		needles[n.Key] = nc
		log.V(1).Infof("block add offset: %d, size: %d to needles cache", noffset, size)
		log.V(1).Info(n.String())
		noffset += NeedleOffset(int64(size))
	}
	if err == io.EOF {
		err = nil
	}
	// reset b.w offset, discard left space which can't parse to a needle
	if _, err = b.w.Seek(BlockOffset(noffset), os.SEEK_SET); err != nil {
		log.Errorf("block: %s Seek() error(%v)", b.file, err)
	}
	return
}
Exemplo n.º 22
0
func discard(r *bufio.Reader, i int) (int, error) {
	return r.Discard(i)
}
Exemplo n.º 23
0
func discard(r *bufio.Reader, n int) {
	r.Discard(n)
}
Exemplo n.º 24
0
func (h Handler) Set(cmd common.SetRequest, src *bufio.Reader) error {
	// For writing chunks, the specialized chunked reader is appropriate.
	// for unchunked, a limited reader will be needed since the text protocol
	// includes a /r/n at the end and there's no EOF to be had with a long-lived
	// connection.
	limChunkReader := newChunkLimitedReader(bytes.NewBuffer(cmd.Data), int64(chunkSize), int64(len(cmd.Data)))
	numChunks := int(math.Ceil(float64(len(cmd.Data)) / float64(chunkSize)))
	token := <-tokens

	metaKey := metaKey(cmd.Key)
	metaData := metadata{
		Length:    uint32(len(cmd.Data)),
		OrigFlags: cmd.Flags,
		NumChunks: uint32(numChunks),
		ChunkSize: chunkSize,
		Token:     token,
	}

	metaDataBuf := new(bytes.Buffer)
	binary.Write(metaDataBuf, binary.BigEndian, metaData)

	// Write metadata key
	// TODO: should there be a unique flags value for chunked data?
	if err := binprot.WriteSetCmd(h.rw.Writer, metaKey, cmd.Flags, cmd.Exptime, metadataSize); err != nil {
		return err
	}
	// Write value
	if _, err := io.Copy(h.rw.Writer, metaDataBuf); err != nil {
		return err
	}
	if err := h.rw.Flush(); err != nil {
		return err
	}

	// Read server's response
	resHeader, err := readResponseHeader(h.rw.Reader)
	if err != nil {
		// Discard request body
		if _, ioerr := src.Discard(len(cmd.Data)); ioerr != nil {
			return ioerr
		}

		// Discard response body
		if _, ioerr := h.rw.Discard(int(resHeader.TotalBodyLength)); ioerr != nil {
			return ioerr
		}

		return err
	}

	// Write all the data chunks
	// TODO: Clean up if a data chunk write fails
	// Failure can mean the write failing at the I/O level
	// or at the memcached level, e.g. response == ERROR
	chunkNum := 0
	for limChunkReader.More() {
		// Build this chunk's key
		key := chunkKey(cmd.Key, chunkNum)

		// Write the key
		if err := binprot.WriteSetCmd(h.rw.Writer, key, cmd.Flags, cmd.Exptime, fullDataSize); err != nil {
			return err
		}
		// Write token
		if _, err := h.rw.Write(token[:]); err != nil {
			return err
		}
		// Write value
		if _, err := io.Copy(h.rw.Writer, limChunkReader); err != nil {
			return err
		}
		// There's some additional overhead here calling Flush() because it causes a write() syscall
		// The set case is already a slow path and is async from the client perspective for our use
		// case so this is not a problem.
		if err := h.rw.Flush(); err != nil {
			return err
		}

		// Read server's response
		resHeader, err = readResponseHeader(h.rw.Reader)
		if err != nil {
			// Reset the ReadWriter to prevent sending garbage to memcached
			// otherwise we get disconnected
			h.reset()

			// Discard request body
			// This is more complicated code but more straightforward than attempting to get at
			// the underlying reader and discard directly, since we don't exactly know how many
			// bytes were sent already
			for limChunkReader.More() {
				if _, ioerr := io.Copy(ioutil.Discard, limChunkReader); ioerr != nil {
					return ioerr
				}

				limChunkReader.NextChunk()
			}

			// Discard repsonse body
			if _, ioerr := h.rw.Discard(int(resHeader.TotalBodyLength)); ioerr != nil {
				return ioerr
			}

			return err
		}

		// Reset for next iteration
		limChunkReader.NextChunk()
		chunkNum++
	}

	return nil
}
Exemplo n.º 25
0
func HandleSet(cmd common.SetRequest, src *bufio.Reader, rw *bufio.ReadWriter) error {
	// For writing chunks, the specialized chunked reader is appropriate.
	// for unchunked, a limited reader will be needed since the text protocol
	// includes a /r/n at the end and there's no EOF to be had with a long-l`ived
	// connection.
	limChunkReader := stream.NewChunkLimitedReader(src, int64(CHUNK_SIZE), int64(cmd.Length))
	numChunks := int(math.Ceil(float64(cmd.Length) / float64(CHUNK_SIZE)))
	token := <-tokens

	metaKey := metaKey(cmd.Key)
	metaData := common.Metadata{
		Length:    cmd.Length,
		OrigFlags: cmd.Flags,
		NumChunks: uint32(numChunks),
		ChunkSize: CHUNK_SIZE,
		Token:     token,
	}

	metaDataBuf := new(bytes.Buffer)
	binary.Write(metaDataBuf, binary.BigEndian, metaData)

	// Write metadata key
	// TODO: should there be a unique flags value for chunked data?
	localCmd := binprot.SetCmd(metaKey, cmd.Flags, cmd.Exptime, common.METADATA_SIZE)
	if err := setLocal(rw.Writer, localCmd, metaDataBuf); err != nil {
		return err
	}

	// Read server's response
	resHeader, err := readResponseHeader(rw.Reader)
	if err != nil {
		// Discard request body
		if _, ioerr := src.Discard(int(cmd.Length)); ioerr != nil {
			return ioerr
		}

		// Discard response body
		if _, ioerr := rw.Discard(int(resHeader.TotalBodyLength)); ioerr != nil {
			return ioerr
		}

		return err
	}

	// Write all the data chunks
	// TODO: Clean up if a data chunk write fails
	// Failure can mean the write failing at the I/O level
	// or at the memcached level, e.g. response == ERROR
	chunkNum := 0
	for limChunkReader.More() {
		// Build this chunk's key
		key := chunkKey(cmd.Key, chunkNum)

		// Write the key
		localCmd = binprot.SetCmd(key, cmd.Flags, cmd.Exptime, FULL_DATA_SIZE)
		if err = setLocalWithToken(rw.Writer, localCmd, token, limChunkReader); err != nil {
			return err
		}

		// Read server's response
		resHeader, err = readResponseHeader(rw.Reader)
		if err != nil {
			// Discard request body
			for limChunkReader.More() {
				if _, ioerr := io.Copy(ioutil.Discard, limChunkReader); ioerr != nil {
					return ioerr
				}

				limChunkReader.NextChunk()
			}

			// Discard repsonse body
			if _, ioerr := rw.Discard(int(resHeader.TotalBodyLength)); ioerr != nil {
				return ioerr
			}

			return err
		}

		// Reset for next iteration
		limChunkReader.NextChunk()
		chunkNum++
	}

	return nil
}
Exemplo n.º 26
0
func discardBytes(buf *bufio.Reader, n int) {
	// the Discard method was introduced in Go 1.5
	buf.Discard(n)
}
Exemplo n.º 27
0
// Compact compact the orig block, copy to disk dst block.
func (b *SuperBlock) Compact(offset int64, fn func(*Needle) error) (
	noffset int64, err error) {
	if b.LastErr != nil {
		err = b.LastErr
		return
	}
	var (
		r    *os.File
		rd   *bufio.Reader
		data []byte
		n    = &Needle{}
	)
	log.Infof("block: %s compact", b.File)
	if r, err = os.OpenFile(b.File, os.O_RDONLY, 0664); err != nil {
		log.Errorf("os.OpenFile(\"%s\") error(%v)", b.File, err)
		return
	}
	if offset == 0 {
		offset = superBlockHeaderOffset
	}
	if _, err = r.Seek(offset, os.SEEK_SET); err != nil {
		log.Errorf("block: %s Seek() error(%v)", b.File, err)
		return
	}
	rd = bufio.NewReaderSize(r, NeedleMaxSize)
	for {
		if data, err = rd.Peek(NeedleHeaderSize); err != nil {
			break
		}
		if err = n.ParseHeader(data); err != nil {
			break
		}
		if _, err = rd.Discard(NeedleHeaderSize); err != nil {
			break
		}
		if data, err = rd.Peek(n.DataSize); err != nil {
			break
		}
		if err = n.ParseData(data); err != nil {
			break
		}
		if _, err = rd.Discard(n.DataSize); err != nil {
			break
		}
		offset += int64(n.TotalSize)
		if log.V(1) {
			log.Info(n.String())
		}
		// skip delete needle
		if n.Flag == NeedleStatusDel {
			continue
		}
		if err = fn(n); err != nil {
			break
		}
	}
	if err != io.EOF {
		return
	}
	if err = r.Close(); err != nil {
		return
	}
	noffset = offset
	return
}
Exemplo n.º 28
0
func mustDiscard(r *bufio.Reader, n int) {
	if _, err := r.Discard(n); err != nil {
		panic(fmt.Sprintf("bufio.Reader.Discard(%d) failed: %s", n, err))
	}
}
Exemplo n.º 29
0
//读取一个完整的回复数据
func readReply(io *bufio.Reader) (reply interface{}, err error) {
	if io == nil {
		panic(ErrNotConnected)
	}

	b := readToCRLF(io)
	switch v := string(b[1:]); b[0] {
	case ok_byte:
		switch {
		case v == okReply:
			reply = okReply
		case v == pongReply:
			reply = pongReply
		default:
			reply = v
		}

	case err_byte:
		err = Error(v)

	case num_byte:
		i, err := strconv.Atoi(v)
		checkError(err)
		reply = i

	case size_byte:
		var size int
		size, err = strconv.Atoi(v)
		checkError(err)

		if size < 0 {
			reply = nil
		}

		s, err := io.Peek(size)
		checkError(err)

		l, err := io.Discard(size)
		checkError(err)

		readToCRLF(io)

		reply = string(s[0:l])

	case array_byte:
		var size int
		size, err = strconv.Atoi(v)
		checkError(err)

		r := make([]interface{}, size)
		for i := 0; i < size; i++ {
			r[i], err = readReply(io)
			if err != nil {
				return nil, err
			}
		}
		reply = r

	default:
		panic(ErrUnexpectedReplyType)
	}

	return
}