Exemple #1
1
func (s *buffer) Get(r io.Reader, n int) ([]byte, error) {
	// compress the data we already have
	if s.begin > defaultBufferSize {
		copy(s.buf[:], s.buf[s.begin:s.end])
		s.end -= s.begin
		s.begin = 0
	}

	toget := len(s.buf) - s.end
	if toget > defaultBufferSize {
		toget = defaultBufferSize
	}

	// only get up to the buffer size
	if n > defaultBufferSize {
		n = defaultBufferSize
	}

	for s.end-s.begin < n {
		got, err := r.Read(s.buf[s.end : s.end+toget])
		s.end += got
		if err != nil {
			return nil, err
		}
	}

	return s.buf[s.begin : s.begin+n], nil
}
Exemple #2
1
// ReadFrom implements io.ReaderFrom.
func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
	if b.Buffered() == 0 {
		if w, ok := b.wr.(io.ReaderFrom); ok {
			return w.ReadFrom(r)
		}
	}
	var m int
	for {
		m, err = r.Read(b.buf[b.n:])
		if m == 0 {
			break
		}
		b.n += m
		n += int64(m)
		if b.Available() == 0 {
			if err1 := b.Flush(); err1 != nil {
				return n, err1
			}
		}
		if err != nil {
			break
		}
	}
	if err == io.EOF {
		err = nil
	}
	return n, err
}
Exemple #3
1
func messageCopy(ws *websocket.Conn, r io.Reader, base64Encode, ping bool, timeout time.Duration) error {
	buf := make([]byte, 2048)
	if ping {
		resetTimeout(ws, timeout)
		if err := websocket.Message.Send(ws, []byte{}); err != nil {
			return err
		}
	}
	for {
		resetTimeout(ws, timeout)
		n, err := r.Read(buf)
		if err != nil {
			if err == io.EOF {
				return nil
			}
			return err
		}
		if n > 0 {
			if base64Encode {
				if err := websocket.Message.Send(ws, base64.StdEncoding.EncodeToString(buf[:n])); err != nil {
					return err
				}
			} else {
				if err := websocket.Message.Send(ws, buf[:n]); err != nil {
					return err
				}
			}
		}
	}
}
Exemple #4
1
func pipe(r io.Reader, w io.Writer) {
	var err error
	defer func() {
		if err != nil && err != io.EOF {
			log.Print(err)
		}
	}()

	firstChunk := true
	buf := make([]byte, cmdline.bufsize)
	for {
		var n int
		n, err = r.Read(buf)
		if n <= 0 {
			return
		}

		if firstChunk {
			firstChunk = false
		} else if cmdline.maxPostDelay > 0 {
			delay(cmdline.minPostDelay, cmdline.maxPostDelay)
		}

		_, err = w.Write(buf[:n])
		if err != nil {
			return
		}
	}
}
func (this *packetClientLoginEncryptRequestCodec17) Decode(reader io.Reader, util []byte) (decode packet.Packet, err error) {
	packetClientLoginEncryptRequest := new(PacketClientLoginEncryptRequest)
	packetClientLoginEncryptRequest.ServerId, err = packet.ReadString(reader, util)
	if err != nil {
		return
	}
	publicKeyLength, err := packet.ReadUint16(reader, util)
	if err != nil {
		return
	}
	packetClientLoginEncryptRequest.PublicKey = make([]byte, publicKeyLength)
	_, err = reader.Read(packetClientLoginEncryptRequest.PublicKey)
	if err != nil {
		return
	}
	verifyTokenLength, err := packet.ReadUint16(reader, util)
	if err != nil {
		return
	}
	packetClientLoginEncryptRequest.VerifyToken = make([]byte, verifyTokenLength)
	_, err = reader.Read(packetClientLoginEncryptRequest.VerifyToken)
	if err != nil {
		return
	}
	decode = packetClientLoginEncryptRequest
	return
}
Exemple #6
1
// ReadFrom implements io.ReaderFrom.
func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
	if b.Buffered() == 0 {
		if w, ok := b.wr.(io.ReaderFrom); ok {
			return w.ReadFrom(r)
		}
	}
	var m int
	for {
		if b.Available() == 0 {
			if err1 := b.Flush(); err1 != nil {
				return n, err1
			}
		}
		m, err = r.Read(b.buf[b.n:])
		if m == 0 {
			break
		}
		b.n += m
		n += int64(m)
		if err != nil {
			break
		}
	}
	if err == io.EOF {
		// If we filled the buffer exactly, flush pre-emptively.
		if b.Available() == 0 {
			err = b.Flush()
		} else {
			err = nil
		}
	}
	return n, err
}
Exemple #7
0
// ReadFrom reads data from r until error or io.EOF and appends it to the buffer.
// The amount of bytes read is returned plus any error except io.EOF.
func (b *Buffer) ReadFrom(r io.Reader) (int64, error) {
	err := b.err
	if err != nil && err != ErrNoMoreBytes {
		return 0, b.ioErr()
	}
	if b.fixed {
		return 0, ErrOperationNotAllowed
	}

	var buf [4096]byte
	var total int64
	for {
		n, err := r.Read(buf[:])
		if err != nil {
			if err == io.EOF {
				break
			}
			return total, err
		}
		_, err = b.Write(buf[:n])
		if err != nil {
			return total, err
		}
		total += int64(n)
	}

	return total, nil
}
Exemple #8
0
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
	b.lastRead = opInvalid
	// If buffer is empty, reset to recover space.
	if b.off >= len(b.buf) {
		b.Truncate(0)
	}
	for {
		if free := cap(b.buf) - len(b.buf); free < MinRead {
			// not enough space at end
			newBuf := b.buf
			if b.off+free < MinRead {
				// not enough space using beginning of buffer;
				// double buffer capacity
				newBuf = makeSlice(2*cap(b.buf) + MinRead)
			}
			copy(newBuf, b.buf[b.off:])
			b.buf = newBuf[:len(b.buf)-b.off]
			b.off = 0
		}
		m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
		b.buf = b.buf[0 : len(b.buf)+m]
		n += int64(m)
		if e == io.EOF {
			break
		}
		if e != nil {
			return n, e
		}
	}
	return n, nil // err is EOF, so return nil explicitly
}
Exemple #9
0
func TtyLiner(conn io.Reader, output chan string) {
	buf := make([]byte, 1)
	line := []byte{}
	cr := false
	emit := false
	for {

		nr, err := conn.Read(buf)
		if err != nil || nr < 1 {
			glog.V(1).Info("Input byte chan closed, close the output string chan")
			close(output)
			return
		}
		switch buf[0] {
		case '\n':
			emit = !cr
			cr = false
		case '\r':
			emit = true
			cr = true
		default:
			cr = false
			line = append(line, buf[0])
		}
		if emit {
			output <- string(line)
			line = []byte{}
			emit = false
		}
	}
}
Exemple #10
0
// Compute the MD5 digest of a data block (consisting of buf1 + buf2 +
// all bytes readable from rdr). If all data is read successfully,
// return DiskHashError or CollisionError depending on whether it
// matches expectMD5. If an error occurs while reading, return that
// error.
//
// "content has expected MD5" is called a collision because this
// function is used in cases where we have another block in hand with
// the given MD5 but different content.
func collisionOrCorrupt(expectMD5 string, buf1, buf2 []byte, rdr io.Reader) error {
	outcome := make(chan error)
	data := make(chan []byte, 1)
	go func() {
		h := md5.New()
		for b := range data {
			h.Write(b)
		}
		if fmt.Sprintf("%x", h.Sum(nil)) == expectMD5 {
			outcome <- CollisionError
		} else {
			outcome <- DiskHashError
		}
	}()
	data <- buf1
	if buf2 != nil {
		data <- buf2
	}
	var err error
	for rdr != nil && err == nil {
		buf := make([]byte, 1<<18)
		var n int
		n, err = rdr.Read(buf)
		data <- buf[:n]
	}
	close(data)
	if rdr != nil && err != io.EOF {
		<-outcome
		return err
	}
	return <-outcome
}
Exemple #11
0
func compareReaderWithBuf(rdr io.Reader, expect []byte, hash string) error {
	bufLen := 1 << 20
	if bufLen > len(expect) && len(expect) > 0 {
		// No need for bufLen to be longer than
		// expect, except that len(buf)==0 would
		// prevent us from handling empty readers the
		// same way as non-empty readers: reading 0
		// bytes at a time never reaches EOF.
		bufLen = len(expect)
	}
	buf := make([]byte, bufLen)
	cmp := expect

	// Loop invariants: all data read so far matched what
	// we expected, and the first N bytes of cmp are
	// expected to equal the next N bytes read from
	// rdr.
	for {
		n, err := rdr.Read(buf)
		if n > len(cmp) || bytes.Compare(cmp[:n], buf[:n]) != 0 {
			return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], buf[:n], rdr)
		}
		cmp = cmp[n:]
		if err == io.EOF {
			if len(cmp) != 0 {
				return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], nil, nil)
			}
			return nil
		} else if err != nil {
			return err
		}
	}
}
Exemple #12
0
func DecodeUInt29(r io.Reader) (uint32, error) {
	var n uint32 = 0
	i := 0
	b := make([]byte, 1)
	for {
		_, err := r.Read(b)
		if err != nil {
			return 0, err
		}
		if i != 3 {
			n |= uint32(b[0] & 0x7F)
			if b[0]&0x80 != 0 {
				if i != 2 {
					n <<= 7
				} else {
					n <<= 8
				}
			} else {
				break
			}
		} else {
			n |= uint32(b[0])
			break
		}
		i++
	}
	return n, nil
}
Exemple #13
0
// Like `io.Copy` except it only ever does one allocation of the 32K buffer.
func (tsd *Digest) copyWithBuf(dst io.Writer, src io.Reader) (written int64, err error) {
	if tsd.copyBuf == nil {
		tsd.copyBuf = make([]byte, 32*1024)
	}

	for {
		nr, er := src.Read(tsd.copyBuf)
		if nr > 0 {
			nw, ew := dst.Write(tsd.copyBuf[0:nr])
			if nw > 0 {
				written += int64(nw)
			}
			if ew != nil {
				err = ew
				break
			}
			if nr != nw {
				err = io.ErrShortWrite
				break
			}
		}
		if er == io.EOF {
			break
		}
		if er != nil {
			err = er
			break
		}
	}
	return
}
Exemple #14
0
func copyWithExit(w io.Writer, r io.Reader, ch chan int) {
	buf := make([]byte, 1024)
	isTerminalRaw := false

	for {
		n, err := r.Read(buf)

		if err == io.EOF {
			ch <- 1
			return
		}

		if err != nil {
			break
		}

		if !isTerminalRaw {
			terminal.MakeRaw(int(os.Stdin.Fd()))
			isTerminalRaw = true
		}

		if s := string(buf[0:n]); strings.HasPrefix(s, StatusCodePrefix) {
			code, _ := strconv.Atoi(strings.TrimSpace(s[37:]))
			ch <- code
			return
		}

		_, err = w.Write(buf[0:n])

		if err != nil {
			break
		}
	}
}
Exemple #15
0
// readTrace does wire-format parsing and verification.
// It does not care about specific event types and argument meaning.
func readTrace(r io.Reader) ([]rawEvent, error) {
	// Read and validate trace header.
	var buf [16]byte
	off, err := r.Read(buf[:])
	if off != 16 || err != nil {
		return nil, fmt.Errorf("failed to read header: read %v, err %v", off, err)
	}
	if bytes.Compare(buf[:], []byte("go 1.5 trace\x00\x00\x00\x00")) != 0 {
		return nil, fmt.Errorf("not a trace file")
	}

	// Read events.
	var events []rawEvent
	for {
		// Read event type and number of arguments (1 byte).
		off0 := off
		n, err := r.Read(buf[:1])
		if err == io.EOF {
			break
		}
		if err != nil || n != 1 {
			return nil, fmt.Errorf("failed to read trace at offset 0x%x: n=%v err=%v", off0, n, err)
		}
		off += n
		typ := buf[0] << 2 >> 2
		narg := buf[0] >> 6
		ev := rawEvent{typ: typ, off: off0}
		if narg < 3 {
			for i := 0; i < int(narg)+2; i++ { // sequence number and time stamp are present but not counted in narg
				var v uint64
				v, off, err = readVal(r, off)
				if err != nil {
					return nil, err
				}
				ev.args = append(ev.args, v)
			}
		} else {
			// If narg == 3, the first value is length of the event in bytes.
			var v uint64
			v, off, err = readVal(r, off)
			if err != nil {
				return nil, err
			}
			evLen := v
			off1 := off
			for evLen > uint64(off-off1) {
				v, off, err = readVal(r, off)
				if err != nil {
					return nil, err
				}
				ev.args = append(ev.args, v)
			}
			if evLen != uint64(off-off1) {
				return nil, fmt.Errorf("event has wrong length at offset 0x%x: want %v, got %v", off0, evLen, off-off1)
			}
		}
		events = append(events, ev)
	}
	return events, nil
}
Exemple #16
0
func writeBodyChunked(w *bufio.Writer, r io.Reader) error {
	vbuf := copyBufPool.Get()
	buf := vbuf.([]byte)

	var err error
	var n int
	for {
		n, err = r.Read(buf)
		if n == 0 {
			if err == nil {
				panic("BUG: io.Reader returned 0, nil")
			}
			if err == io.EOF {
				if err = writeChunk(w, buf[:0]); err != nil {
					break
				}
				err = nil
			}
			break
		}
		if err = writeChunk(w, buf[:n]); err != nil {
			break
		}
	}

	copyBufPool.Put(vbuf)
	return err
}
// NewChunkStreamReader creates a ChunkStream which wraps an io.Reader.
// The size of the chunks will vary depending on the io.Reader.
//
// After calling this, you should not read from the reader manually, even if you close the
// ChunkStream.
func NewChunkStreamReader(r io.Reader) ChunkStream {
	rawInput := make(chan []byte)
	cancel := make(chan struct{})
	go func() {
		defer close(rawInput)
		for {
			buffer := make([]byte, ReaderMaxChunkSize)
			count, err := r.Read(buffer)
			if count > 0 {
				// NOTE: if someone is reading our output channel but we are cancelled, there is
				// probabliity 1/2^n that outputs will be sent rather than reading the cancelChan.
				// To address this, we first check for cancelChan before doing the second select{}.
				select {
				case <-cancel:
					return
				default:
				}

				select {
				case rawInput <- buffer[:count]:
				case <-cancel:
					return
				}
			}
			if err != nil {
				return
			}
		}
		close(rawInput)
	}()
	return ChunkStream{rawInput, cancel}
}
Exemple #18
0
func main() {
	flag.Parse()

	// Create a parser based on given format
	parser := gonx.NewParser(format)

	// Read given file or from STDIN
	var file io.Reader
	if logFile == "dummy" {
		file = strings.NewReader(`89.234.89.123 [08/Nov/2013:13:39:18 +0000] "GET /t/100x100/foo/bar.jpeg HTTP/1.1" 200 1027 2430 0.014 "100x100" 10 1`)
	} else if logFile == "-" {
		file = os.Stdin
	} else {
		file, err := os.Open(logFile)
		if err != nil {
			panic(err)
		}
		defer file.Close()
	}

	// Make a chain of reducers to get some stats from log file
	reducer := gonx.NewChain(
		&gonx.Avg{[]string{"request_time", "read_time", "gen_time"}},
		&gonx.Sum{[]string{"body_bytes_sent"}},
		&gonx.Count{})
	output := gonx.MapReduce(file, parser, reducer)
	for res := range output {
		// Process the record... e.g.
		fmt.Printf("Parsed entry: %+v\n", res)
	}
}
Exemple #19
0
func (m *MyCodec) Decode(r io.Reader) ([]byte, error) {
	buf := make([]byte, 8)
	if _, err := r.Read(buf); nil != err {
		return nil, err
	}
	return buf, nil
}
Exemple #20
0
func decodeArray(r io.Reader, t reflect.Type) (reflect.Value, error) {
	var sz uint32
	if err := binary.Read(r, byteOrder, &sz); err != nil {
		return nullValue, err
	}

	ksz := int(kindSize(t.Elem().Kind()))

	data := make([]byte, int(sz)*ksz)
	_, err := r.Read(data)
	if err != nil {
		return nullValue, err
	}
	slice := reflect.MakeSlice(t, int(sz), int(sz))
	for i := 0; i < int(sz); i++ {
		from := data[i*ksz:]
		var val uint64
		switch ksz {
		case 1:
			val = uint64(from[0])
		case 2:
			val = uint64(byteOrder.Uint16(from[0:]))
		case 4:
			val = uint64(byteOrder.Uint32(from[0:]))
		default:
			panic("unimp")
		}

		slice.Index(i).SetUint(val)
	}
	return slice, nil
}
func (c *ClickWindow) read(rr io.Reader) (err error) {
	var tmp [2]byte
	if _, err = rr.Read(tmp[:1]); err != nil {
		return
	}
	c.ID = (byte(tmp[0]) << 0)
	if _, err = rr.Read(tmp[:2]); err != nil {
		return
	}
	c.Slot = int16((uint16(tmp[1]) << 0) | (uint16(tmp[0]) << 8))
	if _, err = rr.Read(tmp[:1]); err != nil {
		return
	}
	c.Button = (byte(tmp[0]) << 0)
	if _, err = rr.Read(tmp[:2]); err != nil {
		return
	}
	c.ActionNumber = int16((uint16(tmp[1]) << 0) | (uint16(tmp[0]) << 8))
	if _, err = rr.Read(tmp[:1]); err != nil {
		return
	}
	c.Mode = (byte(tmp[0]) << 0)
	if err = c.ClickedItem.Deserialize(rr); err != nil {
		return
	}
	return
}
Exemple #22
0
func decodeStr(r io.Reader) (string, error) {
	var szSlice [1]byte
	_, err := r.Read(szSlice[:])
	if err != nil {
		return "", err
	}
	sz := int(szSlice[0])
	if sz == 0 {
		return "", nil
	}
	utfStr := make([]byte, 4*sz)
	data := make([]byte, 2*sz)
	n, err := r.Read(data)
	if err != nil {
		return "", err
	}
	if n < len(data) {
		return "", fmt.Errorf("underflow")
	}
	w := 0
	for i := 0; i < int(2*sz); i += 2 {
		cp := byteOrder.Uint16(data[i:])
		w += utf8.EncodeRune(utfStr[w:], rune(cp))
	}
	if utfStr[w-1] == 0 {
		w--
	}
	s := string(utfStr[:w])
	return s, nil
}
Exemple #23
0
// copyNRandomly copies n bytes from src to dst. It uses a variable, and random,
// buffer size to exercise more code paths.
func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) {
	var (
		buf       = make([]byte, 32*1024)
		written   int
		remaining = n
	)
	for remaining > 0 {
		l := rand.Intn(1 << 15)
		if remaining < l {
			l = remaining
		}
		nr, er := src.Read(buf[:l])
		nw, ew := dst.Write(buf[:nr])
		remaining -= nw
		written += nw
		if ew != nil {
			return written, ew
		}
		if nr != nw {
			return written, io.ErrShortWrite
		}
		if er != nil && er != io.EOF {
			return written, er
		}
	}
	return written, nil
}
Exemple #24
0
// checkUpload checks to see if an error occurred after the file was
// completely uploaded.
//
// If it was then it waits for a while to see if the file really
// exists and is the right size and returns an updated info.
//
// If the file wasn't found or was the wrong size then it returns the
// original error.
//
// This is a workaround for Amazon sometimes returning
//
//  * 408 REQUEST_TIMEOUT
//  * 504 GATEWAY_TIMEOUT
//  * 500 Internal server error
//
// At the end of large uploads.  The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
	// Return if no error - all is well
	if inErr == nil {
		return false, inInfo, inErr
	}
	// If not one of the errors we can fix return
	// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
	// 	return false, inInfo, inErr
	// }

	// The HTTP status
	httpStatus := "HTTP status UNKNOWN"
	if resp != nil {
		httpStatus = resp.Status
	}

	// check to see if we read to the end
	buf := make([]byte, 1)
	n, err := in.Read(buf)
	if !(n == 0 && err == io.EOF) {
		fs.Debug(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
		return false, inInfo, inErr
	}

	// Don't wait for uploads - assume they will appear later
	if *uploadWaitPerGB <= 0 {
		fs.Debug(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
		return false, inInfo, inErr
	}

	// Time we should wait for the upload
	uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
	timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))

	const sleepTime = 5 * time.Second                        // sleep between tries
	retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up

	fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
	remote := src.Remote()
	for i := 1; i <= retries; i++ {
		o, err := f.NewObject(remote)
		if err == fs.ErrorObjectNotFound {
			fs.Debug(src, "Object not found - waiting (%d/%d)", i, retries)
		} else if err != nil {
			fs.Debug(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
		} else {
			if src.Size() == o.Size() {
				fs.Debug(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
				info = &acd.File{
					Node: o.(*Object).info,
				}
				return true, info, nil
			}
			fs.Debug(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
		}
		time.Sleep(sleepTime)
	}
	fs.Debug(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
	return false, inInfo, inErr
}
Exemple #25
0
func (c *Copier) Copy(dst io.Writer, src io.Reader) (wr int64, err error) {
	buf := c.getBuffer()
	defer c.putBuffer(buf)

	for {
		nr, er := src.Read(buf)
		if nr > 0 {
			nw, ew := dst.Write(buf[0:nr])
			if nw > 0 {
				wr += int64(nw)
			}
			if ew != nil {
				err = ew
				break
			}
			if nr != nw {
				err = io.ErrShortWrite
				break
			}
		}
		if er == io.EOF {
			break
		}
		if er != nil {
			err = er
			break
		}
	}
	return wr, err
}
Exemple #26
0
// PutBlockBlob uploads given stream into a block blob by splitting
// data stream into chunks and uploading as blocks. Commits the block
// list at the end. This is a helper method built on top of PutBlock
// and PutBlockList methods with sequential block ID counting logic.
func putBlockBlob(b storage.BlobStorageClient, container, name string, blob io.Reader, chunkSize int) error {
	if chunkSize <= 0 || chunkSize > storage.MaxBlobBlockSize {
		chunkSize = storage.MaxBlobBlockSize
	}

	chunk := make([]byte, chunkSize)
	n, err := blob.Read(chunk)
	if err != nil && err != io.EOF {
		return err
	}

	blockList := []storage.Block{}

	for blockNum := 0; ; blockNum++ {
		id := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%011d", blockNum)))
		data := chunk[:n]
		err = b.PutBlock(container, name, id, data)
		if err != nil {
			return err
		}

		blockList = append(blockList, storage.Block{id, storage.BlockStatusLatest})

		// Read next block
		n, err = blob.Read(chunk)
		if err != nil && err != io.EOF {
			return err
		}
		if err == io.EOF {
			break
		}
	}

	return b.PutBlockList(container, name, blockList)
}
Exemple #27
0
// ReadFrom reads data from r until EOF and appends it to the buffer.
// The return value n is the number of bytes read.
// Any error except os.EOF encountered during the read
// is also returned.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
	b.lastRead = opInvalid
	// If buffer is empty, reset to recover space.
	if b.off >= len(b.buf) {
		b.Truncate(0)
	}
	for {
		if cap(b.buf)-len(b.buf) < MinRead {
			var newBuf []byte
			// can we get space without allocation?
			if b.off+cap(b.buf)-len(b.buf) >= MinRead {
				// reuse beginning of buffer
				newBuf = b.buf[0 : len(b.buf)-b.off]
			} else {
				// not enough space at end; put space on end
				newBuf = make([]byte, len(b.buf)-b.off, 2*(cap(b.buf)-b.off)+MinRead)
			}
			copy(newBuf, b.buf[b.off:])
			b.buf = newBuf
			b.off = 0
		}
		m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
		b.buf = b.buf[0 : len(b.buf)+m]
		n += int64(m)
		if e == io.EOF {
			break
		}
		if e != nil {
			return n, e
		}
	}
	return n, nil // err is EOF, so return nil explicitly
}
Exemple #28
0
// copyWithBuffer is taken from stdlib io.Copy implementation
// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367
func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
	for {
		nr, er := src.Read(buf)
		if nr > 0 {
			nw, ew := dst.Write(buf[0:nr])
			if nw > 0 {
				written += int64(nw)
			}
			if ew != nil {
				err = ew
				break
			}
			if nr != nw {
				err = io.ErrShortWrite
				break
			}
		}
		if er == io.EOF {
			break
		}
		if er != nil {
			err = er
			break
		}
	}
	return written, err
}
Exemple #29
0
// EncodeMessage encode a message off of a reader
func EncodeMessage(R io.Reader, W io.Writer) error {
	BS := make([]byte, BufferSize)

	for {
		n, err := R.Read(BS)

		if err == io.EOF {
			MBS, _ := EncodeBytes(BS[:n])

			if len(MBS) > 0 {
				W.Write(MBS)
			}

			W.Write(
				[]byte{
					TerminalByte,
				},
			)

			return err
		}

		if err != nil {
			return err
		}

		MBS, _ := EncodeBytes(BS[:n])

		W.Write(MBS)
	}
}
Exemple #30
0
// Verify returns nil or an error describing the mismatch between the block
// list and actual reader contents
func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error {
	hf := sha256.New()
	for i, block := range blocks {
		lr := &io.LimitedReader{R: r, N: int64(blocksize)}
		_, err := io.Copy(hf, lr)
		if err != nil {
			return err
		}

		hash := hf.Sum(nil)
		hf.Reset()

		if !bytes.Equal(hash, block.Hash) {
			return fmt.Errorf("hash mismatch %x != %x for block %d", hash, block.Hash, i)
		}
	}

	// We should have reached the end  now
	bs := make([]byte, 1)
	n, err := r.Read(bs)
	if n != 0 || err != io.EOF {
		return fmt.Errorf("file continues past end of blocks")
	}

	return nil
}