Esempio n. 1
0
func TestConcurrent(t *testing.T) {
	var grp sync.WaitGroup
	buf := New()

	var rs []io.ReadCloser
	for i := 0; i < 1000; i++ {
		rs = append(rs, buf.NextReader())
	}

	testData := bytes.NewBuffer(nil)
	io.CopyN(testData, rand.Reader, 32*1024*10)

	for _, r := range rs {
		grp.Add(1)
		go func(r io.ReadCloser) {
			defer grp.Done()
			defer r.Close()
			data, err := ioutil.ReadAll(r)
			if err != nil {
				t.Error(err)
			}
			if !bytes.Equal(testData.Bytes(), data) {
				t.Error("unexpected result...", testData.Len(), len(data))
			}
		}(r)
	}

	r := bytes.NewReader(testData.Bytes())
	for r.Len() > 0 {
		io.CopyN(buf, r, 32*1024*2)
		<-time.After(100 * time.Millisecond)
	}
	buf.Close()
	grp.Wait()
}
Esempio n. 2
0
func TestMixReader(t *testing.T) {
	r := NewMixReader(nil, nil)
	if _, err := r.Read(make([]byte, 1)); err == nil {
		t.Error("nil source should failed.")
	}

	r = NewMixReader(bytes.NewBuffer([]byte{0x00}), bytes.NewReader([]byte{0x00}))
	if _, err := io.CopyN(NewBytesWriter(make([]byte, 2)), r, 2); err != nil {
		t.Error("should not be nil")
	}
	if _, err := r.Read(make([]byte, 1)); err == nil {
		t.Error("should dry")
	}

	r = NewMixReader(bytes.NewBuffer([]byte{0x00}), bytes.NewReader([]byte{0x00}))
	if _, err := io.CopyN(NewBytesWriter(make([]byte, 1)), r, 1); err != nil {
		t.Error("should not be nil")
	}
	if _, err := io.CopyN(NewBytesWriter(make([]byte, 1)), r, 1); err != nil {
		t.Error("should not be nil")
	}
	if _, err := r.Read(make([]byte, 1)); err == nil {
		t.Error("should dry")
	}
}
Esempio n. 3
0
func ExampleReadTwoOpenedUncompressedFiles() {
	var fs http.FileSystem = assets

	f0, err := fs.Open("/not-worth-compressing-file.txt")
	if err != nil {
		panic(err)
	}
	defer f0.Close()
	_ = f0.(notWorthGzipCompressing)
	f1, err := fs.Open("/not-worth-compressing-file.txt")
	if err != nil {
		panic(err)
	}
	defer f1.Close()
	_ = f1.(notWorthGzipCompressing)

	_, err = io.CopyN(os.Stdout, f0, 9)
	if err != nil {
		panic(err)
	}
	_, err = io.CopyN(os.Stdout, f1, 9)
	if err != nil {
		panic(err)
	}

	// Output:
	// Its normaIts norma
}
Esempio n. 4
0
func ExampleBytes() {
	buf := newWriter(make([]byte, 0, 10))
	io.Copy(os.Stdout, buf)
	io.Copy(os.Stdout, io.NewSectionReader(*&buf, 0, 100))

	io.WriteString(buf, "Hello ")
	r := io.NewSectionReader(*&buf, 0, int64(buf.Len()))
	io.CopyN(os.Stdout, r, 5)
	io.CopyN(os.Stdout, buf, 5)
	io.WriteString(buf, "World")
	r = io.NewSectionReader(*&buf, 0, int64(buf.Len()))
	io.CopyN(os.Stdout, r, 6)

	io.WriteString(buf, "abcdefg")
	io.Copy(os.Stdout, buf)
	io.Copy(os.Stdout, buf)

	io.WriteString(buf, "Hello World")
	r = io.NewSectionReader(*&buf, 0, int64(buf.Len()))
	io.CopyN(os.Stdout, r, 5)
	io.CopyN(os.Stdout, buf, 4)

	io.WriteString(buf, "abcdefg")
	io.Copy(os.Stdout, buf)
	io.Copy(os.Stdout, buf)
	//Output:
	// HelloHello World WorldabcdefgHelloHello Worldabcdefg
}
Esempio n. 5
0
// decryptRemoteState is used to help decrypt the remote state
func (m *Memberlist) decryptRemoteState(bufConn io.Reader) ([]byte, error) {
	// Read in enough to determine message length
	cipherText := bytes.NewBuffer(nil)
	cipherText.WriteByte(byte(encryptMsg))
	_, err := io.CopyN(cipherText, bufConn, 4)
	if err != nil {
		return nil, err
	}

	// Ensure we aren't asked to download too much. This is to guard against
	// an attack vector where a huge amount of state is sent
	moreBytes := binary.BigEndian.Uint32(cipherText.Bytes()[1:5])
	if moreBytes > maxPushStateBytes {
		return nil, fmt.Errorf("Remote node state is larger than limit (%d)", moreBytes)
	}

	// Read in the rest of the payload
	_, err = io.CopyN(cipherText, bufConn, int64(moreBytes))
	if err != nil {
		return nil, err
	}

	// Decrypt the cipherText
	dataBytes := cipherText.Bytes()[:5]
	cipherBytes := cipherText.Bytes()[5:]

	// Decrypt the payload
	keys := m.config.Keyring.GetKeys()
	return decryptPayload(keys, cipherBytes, dataBytes)
}
Esempio n. 6
0
func ExampleReadTwoOpenedCompressedFiles() {
	var fs http.FileSystem = assets

	f0, err := fs.Open("/sample-file.txt")
	if err != nil {
		panic(err)
	}
	defer f0.Close()
	_ = f0.(gzipByter)
	f1, err := fs.Open("/sample-file.txt")
	if err != nil {
		panic(err)
	}
	defer f1.Close()
	_ = f1.(gzipByter)

	_, err = io.CopyN(os.Stdout, f0, 9)
	if err != nil {
		panic(err)
	}
	_, err = io.CopyN(os.Stdout, f1, 9)
	if err != nil {
		panic(err)
	}

	// Output:
	// This fileThis file
}
Esempio n. 7
0
// Copy writes the entire database to a writer.
// A reader transaction is maintained during the copy so it is safe to continue
// using the database while a copy is in progress.
// Copy will write exactly tx.Size() bytes into the writer.
func (tx *Tx) Copy(w io.Writer) error {
	// Open reader on the database.
	f, err := os.Open(tx.db.path)
	if err != nil {
		_ = tx.Rollback()
		return err
	}

	// Copy the meta pages.
	tx.db.metalock.Lock()
	_, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
	tx.db.metalock.Unlock()
	if err != nil {
		_ = tx.Rollback()
		_ = f.Close()
		return fmt.Errorf("meta copy: %s", err)
	}

	// Copy data pages.
	if _, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)); err != nil {
		_ = tx.Rollback()
		_ = f.Close()
		return err
	}

	return f.Close()
}
Esempio n. 8
0
// Write out the changes to the file in place.
func writeNew(f *os.File, v string, offset, length int64) error {
	_, err := f.Seek(0, 0) // reset reader
	if err != nil {
		return err
	}
	// Read from old file, write changes out to buffer.
	var buf bytes.Buffer
	_, err = io.CopyN(&buf, f, offset) // write up to change
	if err != nil {
		return err
	}
	_, err = io.CopyN(ioutil.Discard, f, length) // scrap the old
	if err != nil {
		return err
	}
	_, err = io.WriteString(&buf, v) // write the new
	if err != nil {
		return err
	}
	_, err = io.Copy(&buf, f) // write the rest
	if err != nil {
		return err
	}

	stat, err := f.Stat()
	if err != nil {
		return err
	}

	return ioutil.WriteFile(f.Name(), buf.Bytes(), stat.Mode())
}
Esempio n. 9
0
func roundtrip(ctx *ClientContext, conn net.Conn, requestSize, requestBytes []byte) ([]byte, error) {
	deadline, ok := ctx.Deadline()
	if !ok {
		deadline = time.Time{}
	}

	var err error
	if err = conn.SetDeadline(deadline); err != nil {
		return nil, makeClientErr(err.Error())
	}
	if _, err = conn.Write(requestSize); err != nil {
		return nil, makeClientErrf("Failed to write 4 bytes for request size: %s", err)
	}
	if _, err = conn.Write(requestBytes); err != nil {
		return nil, makeClientErrf("Failed to write %d bytes for request: %s", err)
	}
	buf := bytes.NewBuffer(make([]byte, 0, 4))
	if _, err = io.CopyN(buf, conn, 4); err != nil {
		return nil, makeClientErrf(
			"Failed to read 4 bytes for response size from '%s': %s",
			conn.RemoteAddr().String(), err)
	}
	responseSize := binary.BigEndian.Uint32(buf.Bytes())
	if _, err = io.CopyN(buf, conn, int64(responseSize)); err != nil {
		return nil, makeClientErrf(
			"Failed to read %d bytes for response from '%s': %s",
			conn.RemoteAddr().String(), err)
	}
	return buf.Bytes(), nil
}
Esempio n. 10
0
// WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
	// Attempt to open reader with WriteFlag
	f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
	if err != nil {
		return 0, err
	}
	defer f.Close()

	// Copy the meta pages.
	tx.db.metalock.Lock()
	n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
	tx.db.metalock.Unlock()
	if err != nil {
		return n, fmt.Errorf("meta copy: %s", err)
	}

	// Copy data pages.
	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
	n += wn
	if err != nil {
		return n, err
	}

	return n, f.Close()
}
Esempio n. 11
0
// WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
	// Attempt to open reader directly.
	var f *os.File
	if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil {
		// Fallback to a regular open if that doesn't work.
		if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil {
			return 0, err
		}
	}

	// Copy the meta pages.
	tx.db.metalock.Lock()
	n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
	tx.db.metalock.Unlock()
	if err != nil {
		_ = f.Close()
		return n, fmt.Errorf("meta copy: %s", err)
	}

	// Copy data pages.
	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
	n += wn
	if err != nil {
		_ = f.Close()
		return n, err
	}

	return n, f.Close()
}
Esempio n. 12
0
// ReadRecord reads a whole record into memory (up to 32 KB), otherwise the record is discarded.
func ReadRecord(r io.Reader) (*bytes.Buffer, error) {

	var buf bytes.Buffer

	for {
		size, last, err := ReadRecordMarker(r)

		if err != nil {
			return nil, err
		}

		if size < 1 {
			return nil, errors.New("A TCP record must be at least one byte in size")
		}

		if size >= maxRecordSize {
			io.CopyN(ioutil.Discard, r, int64(size))

			return nil, fmt.Errorf("Discarded record exceeding maximum size of %v bytes", maxRecordSize)
		}

		if n, err := io.CopyN(&buf, r, int64(size)); err != nil {
			return nil, fmt.Errorf("Unable to read entire record. Read %v, expected %v", n, size)
		}

		if last {
			break
		}
	}

	return &buf, nil
}
Esempio n. 13
0
// readBlockHeader reads the block header.
func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
	var buf bytes.Buffer
	buf.Grow(20)

	// block header size
	z, err := io.CopyN(&buf, r, 1)
	n = int(z)
	if err != nil {
		return nil, n, err
	}
	s := buf.Bytes()[0]
	if s == 0 {
		return nil, n, errIndexIndicator
	}

	// read complete header
	headerLen := (int(s) + 1) * 4
	buf.Grow(headerLen - 1)
	z, err = io.CopyN(&buf, r, int64(headerLen-1))
	n += int(z)
	if err != nil {
		return nil, n, err
	}

	// unmarshal block header
	h = new(blockHeader)
	if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
		return nil, n, err
	}

	return h, n, nil
}
Esempio n. 14
0
// Reads transfer-encoding: chunked payloads from the connection reader.
func (c *Connection) readChunkedData() error {
	var err error
	var line []byte
	var size uint64
	var start time.Time

	start = time.Now()
	writer := &nonEmptyWriter{os.Stdout}

	var buffer *bytes.Buffer
	var decompressor *gzip.Reader
	var zipReader *bufio.Reader
	var data []byte

	if c.conf.GZip == true {
		buffer = bytes.NewBufferString("")
	}

	for err == nil {
		line, _, err = c.reader.ReadLine()
		if err != nil {
			return err
		}
		size, err = decodeHexString(line)
		if err != nil {
			str := fmt.Sprintf("Expected hex, got %v", string(line))
			return errors.New(str)
		}
		if c.conf.GZip == false {
			_, err = io.CopyN(writer, c.reader, int64(size))
		} else {
			_, err = io.CopyN(buffer, c.reader, int64(size))
			if err != nil {
				return err
			}
			if decompressor == nil {
				decompressor, err = gzip.NewReader(buffer)
				defer decompressor.Close()
				if err != nil {
					return err
				}
				zipReader = bufio.NewReader(decompressor)
			}
			data = make([]byte, 512, 512)
			_, err = zipReader.Read(data)
			if err != nil {
				return err
			}
			strBuffer := bytes.NewBuffer(data)
			io.CopyN(writer, strBuffer, int64(len(data)))
		}
		if c.conf.TTL > 0 {
			if time.Now().Sub(start).Nanoseconds() > c.conf.TTL {
				return nil
			}
		}
	}
	return err
}
Esempio n. 15
0
File: v1.go Progetto: nawawi/drive
// NewDecryptReader creates an io.ReadCloser wrapping an io.Reader.
// It has to read the entire io.Reader to disk using a temp file so that it can
// hash the contents to verify that it is safe to decrypt.
// If the file is athenticated, the DecryptReader will be returned and
// the resulting bytes will be the plaintext.
func NewDecryptReader(r io.Reader, pass []byte) (d io.ReadCloser, err error) {
	mac := make([]byte, hmacSize)
	aesKey, hmacKey, iv, header, err := decodeHeader(r, pass)
	h := hmac.New(hashFunc, hmacKey)
	h.Write(header)
	if err != nil {
		return nil, err
	}
	dst, err := tmpfile.New(&tmpfile.Context{
		Dir:    os.TempDir(),
		Suffix: "drive-encrypted-",
	})
	if err != nil {
		return nil, err
	}
	// If there is an error, try to delete the temp file.
	defer func() {
		if err != nil {
			dst.Done()
		}
	}()
	b, err := aes.NewCipher(aesKey)
	if err != nil {
		return nil, err
	}
	d = &decryptReader{
		tmpFile: dst,
		sReader: &cipher.StreamReader{R: dst, S: cipher.NewCTR(b, iv)},
	}
	w := io.MultiWriter(h, dst)
	buf := bufio.NewReaderSize(r, _16KB)
	for {
		b, err := buf.Peek(_16KB)
		if err != nil && err != io.EOF {
			return nil, err
		}
		if err == io.EOF {
			left := buf.Buffered()
			if left < hmacSize {
				return nil, DecryptErr
			}
			copy(mac, b[left-hmacSize:left])
			_, err = io.CopyN(w, buf, int64(left-hmacSize))
			if err != nil {
				return nil, err
			}
			break
		}
		_, err = io.CopyN(w, buf, _16KB-hmacSize)
		if err != nil {
			return nil, err
		}
	}
	if !hmac.Equal(mac, h.Sum(nil)) {
		return nil, DecryptErr
	}
	dst.Seek(0, 0)
	return d, nil
}
Esempio n. 16
0
// decodeBlock decodes one block with each call. Returns the length of the
// written bytes and an error if there was one.
func (nf *NwaFile) decodeBlock() (int64, error) {
	// Uncompressed wave data stream
	if nf.complevel == -1 {
		if nf.curblock == -1 {
			// If it's the first block we have to write the wave header
			written, _ := io.Copy(&nf.outdata, makeWavHeader(nf.datasize, nf.Channels, nf.Bps, nf.Freq))
			nf.curblock++
			return written, nil
		}
		if nf.curblock <= nf.blocks {
			nf.curblock++
			ret, err := io.CopyN(&nf.outdata, nf.reader, (int64)(nf.blocksize*(nf.Bps/8)))
			if err != nil && err != io.EOF {
				return -1, err
			}
			return ret, nil
		}
		return -1, errors.New("This shouldn't happen! Please report me")
	}

	// Compressed (NWA) wave data stream
	if nf.offsets == nil {
		return -1, errors.New("Offsets weren't set. Aborting")
	}
	if nf.blocks == nf.curblock {
		// We are finished
		return 0, nil
	}
	if nf.curblock == -1 {
		// If it's the first block we have to write the wave header
		written, _ := io.Copy(&nf.outdata, makeWavHeader(nf.datasize, nf.Channels, nf.Bps, nf.Freq))
		nf.curblock++
		return written, nil
	}

	// Calculate the size of the decoded block
	var curblocksize, curcompsize int
	if nf.curblock != nf.blocks-1 {
		curblocksize = nf.blocksize * (nf.Bps / 8)
		curcompsize = nf.offsets[nf.curblock+1] - nf.offsets[nf.curblock]
		if curblocksize >= nf.blocksize*(nf.Bps/8)*2 {
			return -1, errors.New("Current block exceeds the excepted count.")
		} // Fatal error
	} else {
		curblocksize = nf.restsize * (nf.Bps / 8)
		curcompsize = nf.blocksize * (nf.Bps / 8) * 2
	}

	// Read in the block data
	nf.tmpdata.Reset()
	io.CopyN(&nf.tmpdata, nf.reader, (int64)(curcompsize))

	// Decode the compressed block
	nf.decode(curblocksize)

	nf.curblock++
	return (int64)(curblocksize), nil
}
Esempio n. 17
0
// ApplyPatchFull is like ApplyPatch but accepts an ApplyWound channel
func (ctx *Context) ApplyPatchFull(output io.Writer, pool Pool, ops chan Operation, failFast bool) error {
	blockSize := int64(ctx.blockSize)
	pos := int64(0)

	for op := range ops {
		switch op.Type {
		case OpBlockRange:
			fileSize := pool.GetSize(op.FileIndex)
			fixedSize := (op.BlockSpan - 1) * blockSize
			lastIndex := op.BlockIndex + (op.BlockSpan - 1)
			lastSize := blockSize
			if blockSize*(lastIndex+1) > fileSize {
				lastSize = fileSize % blockSize
			}
			opSize := (fixedSize + lastSize)

			target, err := pool.GetReadSeeker(op.FileIndex)
			if err != nil {
				if failFast {
					return errors.Wrap(err, 1)
				}
				io.CopyN(output, &devNullReader{}, opSize)
				pos += opSize
				continue
			}

			_, err = target.Seek(blockSize*op.BlockIndex, os.SEEK_SET)
			if err != nil {
				if failFast {
					return errors.Wrap(err, 1)
				}
				io.CopyN(output, &devNullReader{}, opSize)
				pos += opSize
				continue
			}

			copied, err := io.CopyN(output, target, opSize)
			if err != nil {
				if failFast {
					return errors.Wrap(fmt.Errorf("While copying %d bytes: %s", blockSize*op.BlockSpan, err.Error()), 1)
				}

				remaining := opSize - copied
				io.CopyN(output, &devNullReader{}, remaining)
				pos += opSize
				continue
			}
		case OpData:
			_, err := output.Write(op.Data)
			if err != nil {
				return errors.Wrap(err, 1)
			}
		}
	}

	return nil
}
Esempio n. 18
0
func (p *BinlogParser) ParseReader(r io.Reader, onEvent OnEventFunc) error {
	p.tables = make(map[uint64]*TableMapEvent)
	p.format = nil

	var err error
	var n int64

	for {
		var buf bytes.Buffer

		if n, err = io.CopyN(&buf, r, EventHeaderSize); err != nil {
			if n == 0 {
				return nil
			}
			return err
		}

		data := buf.Bytes()
		var h *EventHeader
		h, err = p.parseHeader(data)
		if err != nil {
			return err
		}

		if h.EventSize <= uint32(EventHeaderSize) {
			return fmt.Errorf("invalid event header, event size is %d, too small", h.EventSize)

		}

		if _, err = io.CopyN(&buf, r, int64(h.EventSize)-int64(EventHeaderSize)); err != nil {
			return err
		}

		data = buf.Bytes()
		rawData := data

		data = data[EventHeaderSize:]
		eventLen := int(h.EventSize) - EventHeaderSize

		if len(data) != eventLen {
			return fmt.Errorf("invalid data size %d in event %s, less event length %d", len(data), h.EventType, eventLen)
		}

		var e Event
		e, err = p.parseEvent(h, data)
		if err != nil {
			break
		}

		if err = onEvent(&BinlogEvent{rawData, h, e}); err != nil {
			return err
		}
	}

	return nil
}
Esempio n. 19
0
func Write(w io.Writer, args ...interface{}) error {
	var err error
	for _, arg := range args {
		switch t := arg.(type) {
		case byte:
			data := []byte{t}
			_, err = w.Write(data)
		case int:
			data := make([]byte, 8)
			binary.BigEndian.PutUint64(data, uint64(t))
			_, err = w.Write(data)
		case string:
			err = Write(w, len(t))
			if err == nil {
				_, err = io.CopyN(w, strings.NewReader(t), int64(len(t)))
			}
		case []byte:
			err = Write(w, len(t))
			if err == nil {
				_, err = io.CopyN(w, bytes.NewReader(t), int64(len(t)))
			}
		case map[string]string:
			err = Write(w, len(t))
			if err == nil {
				for k, v := range t {
					err = Write(w, k, v)
					if err != nil {
						return err
					}
				}
			}
		case SocketDefinition:
			var flags byte = 0
			if t.HTTP != nil {
				flags |= 1 << 0
			}
			if t.TLS != nil {
				flags |= 1 << 1
			}
			err = Write(w, t.Address, t.Port, flags)
			if err == nil {
				if t.HTTP != nil {
					err = Write(w, t.HTTP.DomainSuffix, t.HTTP.PathPrefix)
				}
			}
			if err == nil {
				if t.TLS != nil {
					err = Write(w, t.TLS.Cert, t.TLS.Key)
				}
			}
		default:
			err = fmt.Errorf("don't know how to write %T", arg)
		}
	}
	return err
}
Esempio n. 20
0
// GetPartialObject - GET object from cache buffer range
func (donut API) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
	donut.lock.Lock()
	defer donut.lock.Unlock()

	errParams := map[string]string{
		"bucket": bucket,
		"object": object,
		"start":  strconv.FormatInt(start, 10),
		"length": strconv.FormatInt(length, 10),
	}

	if !IsValidBucket(bucket) {
		return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams)
	}
	if !IsValidObjectName(object) {
		return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams)
	}
	if start < 0 {
		return 0, iodine.New(InvalidRange{
			Start:  start,
			Length: length,
		}, errParams)
	}
	objectKey := bucket + "/" + object
	data, ok := donut.objects.Get(objectKey)
	if !ok {
		if len(donut.config.NodeDiskMap) > 0 {
			reader, _, err := donut.getObject(bucket, object)
			if err != nil {
				return 0, iodine.New(err, nil)
			}
			if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
				return 0, iodine.New(err, nil)
			}
			pw := NewProxyWriter(w)
			written, err := io.CopyN(w, reader, length)
			if err != nil {
				return 0, iodine.New(err, nil)
			}
			ok := donut.objects.Append(objectKey, pw.writtenBytes)
			pw.writtenBytes = nil
			go debug.FreeOSMemory()
			if !ok {
				return 0, iodine.New(InternalError{}, nil)
			}
			return written, nil
		}
		return 0, iodine.New(ObjectNotFound{Object: object}, nil)
	}
	written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
	if err != nil {
		return 0, iodine.New(err, nil)
	}
	return written, nil
}
Esempio n. 21
0
// ReadFromStream reads a non-packed serialized stream from r. buf is used to
// buffer the read contents, can be nil, and is provided so that the buffer
// can be reused between messages. The returned segment is the first segment
// read, which contains the root pointer.
func ReadFromStream(r io.Reader, buf *bytes.Buffer) (*Segment, error) {
	if buf == nil {
		buf = new(bytes.Buffer)
	} else {
		buf.Reset()
	}

	if _, err := io.CopyN(buf, r, 4); err != nil {
		return nil, err
	}

	if little32(buf.Bytes()[:]) >= uint32(MaxSegmentNumber) {
		return nil, ErrTooMuchData
	}

	segnum := int(little32(buf.Bytes()[:]) + 1)
	hdrsz := 8*(segnum/2) + 4

	if _, err := io.CopyN(buf, r, int64(hdrsz)); err != nil {
		return nil, err
	}

	total := 0
	for i := 0; i < segnum; i++ {
		sz := little32(buf.Bytes()[4*i+4:])
		if uint64(total)+uint64(sz)*8 > uint64(MaxTotalSize) {
			return nil, ErrTooMuchData
		}
		total += int(sz) * 8
	}

	if _, err := io.CopyN(buf, r, int64(total)); err != nil {
		return nil, err
	}

	hdrv := buf.Bytes()[4 : hdrsz+4]
	datav := buf.Bytes()[hdrsz+4:]

	if segnum == 1 {
		sz := int(little32(hdrv)) * 8
		return NewBuffer(datav[:sz]), nil
	}

	m := &multiBuffer{make([]*Segment, segnum)}
	for i := 0; i < segnum; i++ {
		sz := int(little32(hdrv[4*i:])) * 8
		m.segments[i] = &Segment{m, datav[:sz], uint32(i)}
		datav = datav[sz:]
	}

	return m.segments[0], nil
}
Esempio n. 22
0
func TestIndex(t *testing.T) {
	f, err := os.Open("testdata/divina2.txt.gz")
	if err != nil {
		t.Fatal(err)
	}
	defer f.Close()

	gz, err := NewReader(f)
	if err != nil {
		t.Fatal(err)
	}

	type offsets struct {
		Off Offset
		Sum string
	}
	var pos []offsets

	seed := time.Now().UnixNano()
	t.Log("using seed:", seed)
	rand.Seed(seed)
	for {
		skip := rand.Int63n(10000) + 1
		_, err := io.CopyN(ioutil.Discard, gz, skip)
		if err == io.EOF {
			break
		}

		off := gz.Offset()
		hash := sha1.New()
		io.CopyN(hash, gz, 64)
		sum := hash.Sum([]byte{})

		pos = append(pos, offsets{Off: off, Sum: hex.EncodeToString(sum)})
	}

	if !gz.IsProbablyMultiGzip() {
		t.Error("file is not detected as multigzip")
	}

	perm := rand.Perm(len(pos))
	for _, idx := range perm {
		p := pos[idx]
		gz.Seek(p.Off)
		hash := sha1.New()
		io.CopyN(hash, gz, 64)
		sum := hash.Sum([]byte{})
		if hex.EncodeToString(sum) != p.Sum {
			t.Error("invalid checksum", p, hex.EncodeToString(sum))
		}
	}
}
Esempio n. 23
0
func (l *Ledis) LoadDump(r io.Reader) (*MasterInfo, error) {
	l.Lock()
	defer l.Unlock()

	info := new(MasterInfo)

	rb := bufio.NewReaderSize(r, 4096)

	err := info.ReadFrom(rb)
	if err != nil {
		return nil, err
	}

	var keyLen uint16
	var valueLen uint32

	var keyBuf bytes.Buffer
	var valueBuf bytes.Buffer
	for {
		if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF {
			return nil, err
		} else if err == io.EOF {
			break
		}

		if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil {
			return nil, err
		}

		if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil {
			return nil, err
		}

		if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil {
			return nil, err
		}

		if err = l.ldb.Put(keyBuf.Bytes(), valueBuf.Bytes()); err != nil {
			return nil, err
		}

		if l.binlog != nil {
			err = l.binlog.Log(encodeBinLogPut(keyBuf.Bytes(), valueBuf.Bytes()))
		}

		keyBuf.Reset()
		valueBuf.Reset()
	}

	return info, nil
}
Esempio n. 24
0
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
func (tr *Reader) skipUnread() {
	nr := tr.numBytes() + tr.pad // number of bytes to skip
	tr.curr, tr.pad = nil, 0
	if tr.RawAccounting {
		_, tr.err = io.CopyN(tr.rawBytes, tr.r, nr)
		return
	}
	if sr, ok := tr.r.(io.Seeker); ok {
		if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
			return
		}
	}
	_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
}
Esempio n. 25
0
func (d *Decoder) Decode(i interface{}) error {
	// no longer use bufio.Reader.Peek because we do want to avoid buffering (to enable capturing of parts of bson)
	// read 4 bytes into buffer and save them in saved (so we can use them for reading later)
	buf, saved := &bytes.Buffer{}, &bytes.Buffer{}
	if _, err := io.CopyN(buf, io.TeeReader(d.in, saved), 4); err != nil {
		return err
	}
	toRead := int64(decodeint32(buf.Bytes()))

	bsonBuf := &bytes.Buffer{}
	if _, err := io.CopyN(bsonBuf, io.MultiReader(saved, d.in), toRead); err != nil {
		return err
	}
	return bson.Unmarshal(bsonBuf.Bytes(), i)
}
Esempio n. 26
0
func (conn *httpClientConn) Write(buf []byte) (int, error) {
	b := make([]byte, len(buf))
	copy(b, buf)
	go func() {
		req, err := http.NewRequest("POST", conn.url, bytes.NewReader(b))
		if err == nil {
			req.Header.Add("Content-Type", contentType)
			req.Header.Add("Accept", contentType)
			var resp *http.Response
			resp, err = conn.doer.Do(req)
			const maxBodySlurpSize = 32 * 1024
			if err != nil {
			} else if resp.Header.Get("Content-Type") != contentType {
				err = fmt.Errorf("bad HTTP Content-Type: %s", resp.Header.Get("Content-Type"))
			} else if resp.StatusCode == http.StatusOK {
				conn.ready <- resp.Body
				return
			} else if resp.StatusCode == http.StatusNoContent || resp.StatusCode == http.StatusAccepted {
				// Read the body if small so underlying TCP connection will be re-used.
				// No need to check for errors: if it fails, Transport won't reuse it anyway.
				if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
					io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize)
				}
				resp.Body.Close()
				return
			} else {
				err = fmt.Errorf("bad HTTP Status: %s", resp.Status)
			}
			if resp != nil {
				// Read the body if small so underlying TCP connection will be re-used.
				// No need to check for errors: if it fails, Transport won't reuse it anyway.
				if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
					io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize)
				}
				resp.Body.Close()
			}
		}
		var res clientResponse
		if json.Unmarshal(b, &res) == nil && res.ID == nil {
			return // ignore error from Notification
		}
		res.Error = NewError(errInternal.Code, err.Error())
		buf := &bytes.Buffer{}
		json.NewEncoder(buf).Encode(res)
		conn.ready <- ioutil.NopCloser(buf)
	}()
	return len(buf), nil
}
Esempio n. 27
0
// GetPartialObject retrieves an object range and writes it to a writer
func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) {
	d.lock.RLock()
	defer d.lock.RUnlock()
	if d.donut == nil {
		return 0, iodine.New(drivers.InternalError{}, nil)
	}
	errParams := map[string]string{
		"bucketName": bucketName,
		"objectName": objectName,
		"start":      strconv.FormatInt(start, 10),
		"length":     strconv.FormatInt(length, 10),
	}
	if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") {
		return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams)
	}
	if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" {
		return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams)
	}
	if start < 0 {
		return 0, iodine.New(drivers.InvalidRange{
			Start:  start,
			Length: length,
		}, errParams)
	}
	reader, size, err := d.donut.GetObject(bucketName, objectName)
	if err != nil {
		return 0, iodine.New(drivers.ObjectNotFound{
			Bucket: bucketName,
			Object: objectName,
		}, nil)
	}
	defer reader.Close()
	if start > size || (start+length-1) > size {
		return 0, iodine.New(drivers.InvalidRange{
			Start:  start,
			Length: length,
		}, errParams)
	}
	_, err = io.CopyN(ioutil.Discard, reader, start)
	if err != nil {
		return 0, iodine.New(err, errParams)
	}
	n, err := io.CopyN(w, reader, length)
	if err != nil {
		return 0, iodine.New(err, errParams)
	}
	return n, nil
}
Esempio n. 28
0
func doWrites(size int64) func(b *testing.B, mnt string) {
	return func(b *testing.B, mnt string) {
		counter := &bazfstestutil.CountReader{}
		p := path.Join(mnt, "testcontent")

		b.ResetTimer()
		b.SetBytes(size)

		for i := 0; i < b.N; i++ {
			f, err := os.Create(p)
			if err != nil {
				b.Fatalf("create: %v", err)
			}
			defer f.Close()

			_, err = io.CopyN(f, counter, size)
			if err != nil {
				b.Fatalf("write: %v", err)
			}

			err = f.Close()
			if err != nil {
				b.Fatalf("close: %v", err)
			}
		}
	}
}
Esempio n. 29
0
// SendFile sends the given file as response
func SendFile(w http.ResponseWriter, filename, contentType string) error {
	fh, err := os.Open(filename)
	if err != nil {
		return err
	}
	defer fh.Close()
	fi, err := fh.Stat()
	if err != nil {
		return err
	}
	size := fi.Size()
	if _, err = fh.Seek(0, 0); err != nil {
		err = fmt.Errorf("error seeking in %v: %s", fh, err)
		http.Error(w, err.Error(), 500)
		return err
	}
	if contentType != "" {
		w.Header().Add("Content-Type", contentType)
	}
	w.Header().Add("Content-Length", fmt.Sprintf("%d", size))
	w.WriteHeader(200)
	Log.Info("SendFile", "filename", filename, "length", size, "header", w.Header())
	fh.Seek(0, 0)
	if _, err = io.CopyN(w, fh, size); err != nil {
		err = fmt.Errorf("error sending file %q: %s", filename, err)
		Log.Error("SendFile", "filename", filename, "error", err)
	}
	return err
}
Esempio n. 30
0
// Split a an input stream into the number of shards given to the encoder.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// You must supply the total size of your input.
// 'ErrShortData' will be returned if it is unable to retrieve the number of bytes
// indicated.
func (r rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
	if size < int64(r.r.DataShards) {
		return ErrShortData
	}

	if len(dst) != r.r.DataShards {
		return ErrInvShardNum
	}

	for i := range dst {
		if dst[i] == nil {
			return StreamWriteError{Err: ErrShardNoData, Stream: i}
		}
	}

	// Calculate number of bytes per shard.
	perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)

	// Pad data to r.Shards*perShard.
	padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
	data = io.MultiReader(data, bytes.NewBuffer(padding))

	// Split into equal-length shards and copy.
	for i := range dst {
		n, err := io.CopyN(dst[i], data, perShard)
		if err != io.EOF && err != nil {
			return err
		}
		if n != perShard {
			return ErrShortData
		}
	}

	return nil
}