Esempio n. 1
0
func getPack(t *testing.T) *PackReader {
	// Take a random packfile in our own repository.
	packs, err := filepath.Glob("../.git/objects/pack/pack-*.pack")
	if err != nil || len(packs) == 0 {
		t.Fatalf("globbing failed: %v", err, packs)
	}
	pname := packs[0]
	pack, err := os.Open(pname)
	if err != nil {
		t.Fatalf("count not open %s: %s", pname, err)
	}
	idx, err := os.Open(pname[:len(pname)-5] + ".idx")
	if err != nil {
		t.Fatal(err)
	}
	packstat, err := pack.Stat()
	if err != nil {
		t.Fatal("stat pack", err)
	}
	idxstat, err := idx.Stat()
	if err != nil {
		t.Fatal("stat idx", err)
	}
	t.Logf("opening pack %s (%d bytes)", pname, packstat.Size())
	pk, err := NewPackReader(
		io.NewSectionReader(pack, 0, packstat.Size()),
		io.NewSectionReader(idx, 0, idxstat.Size()))
	if err != nil {
		t.Fatal(err)
	}
	return pk
}
Esempio n. 2
0
// Open returns a ReadCloser that provides access to the File's contents.
func (f *File) Open() (rc io.ReadCloser, err os.Error) {
	off := int64(f.headerOffset)
	size := int64(f.CompressedSize)
	if f.bodyOffset == 0 {
		r := io.NewSectionReader(f.zipr, off, f.zipsize-off)
		if err = readFileHeader(f, r); err != nil {
			return
		}
		if f.bodyOffset, err = r.Seek(0, os.SEEK_CUR); err != nil {
			return
		}
		if size == 0 {
			size = int64(f.CompressedSize)
		}
	}
	if f.hasDataDescriptor() && size == 0 {
		// permit SectionReader to see the rest of the file
		size = f.zipsize - (off + f.bodyOffset)
	}
	r := io.NewSectionReader(f.zipr, off+f.bodyOffset, size)
	switch f.Method {
	case Store: // (no compression)
		rc = ioutil.NopCloser(r)
	case Deflate:
		rc = flate.NewReader(r)
	default:
		err = UnsupportedMethod
	}
	if rc != nil {
		rc = &checksumReader{rc, crc32.NewIEEE(), f, r}
	}
	return
}
Esempio n. 3
0
func TestNewRequestContentLength(t *testing.T) {
	readByte := func(r io.Reader) io.Reader {
		var b [1]byte
		r.Read(b[:])
		return r
	}
	tests := []struct {
		r    io.Reader
		want int64
	}{
		{bytes.NewReader([]byte("123")), 3},
		{bytes.NewBuffer([]byte("1234")), 4},
		{strings.NewReader("12345"), 5},
		{strings.NewReader(""), 0},
		{NoBody, 0},

		// Not detected. During Go 1.8 we tried to make these set to -1, but
		// due to Issue 18117, we keep these returning 0, even though they're
		// unknown.
		{struct{ io.Reader }{strings.NewReader("xyz")}, 0},
		{io.NewSectionReader(strings.NewReader("x"), 0, 6), 0},
		{readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0},
	}
	for i, tt := range tests {
		req, err := NewRequest("POST", "http://localhost/", tt.r)
		if err != nil {
			t.Fatal(err)
		}
		if req.ContentLength != tt.want {
			t.Errorf("test[%d]: ContentLength(%T) = %d; want %d", i, tt.r, req.ContentLength, tt.want)
		}
	}
}
Esempio n. 4
0
func TestNewRequestContentLength(t *testing.T) {
	readByte := func(r io.Reader) io.Reader {
		var b [1]byte
		r.Read(b[:])
		return r
	}
	tests := []struct {
		r    io.Reader
		want int64
	}{
		{bytes.NewReader([]byte("123")), 3},
		{bytes.NewBuffer([]byte("1234")), 4},
		{strings.NewReader("12345"), 5},
		// Not detected:
		{struct{ io.Reader }{strings.NewReader("xyz")}, 0},
		{io.NewSectionReader(strings.NewReader("x"), 0, 6), 0},
		{readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0},
	}
	for _, tt := range tests {
		req, err := NewRequest("POST", "http://localhost/", tt.r)
		if err != nil {
			t.Fatal(err)
		}
		if req.ContentLength != tt.want {
			t.Errorf("ContentLength(%T) = %d; want %d", tt.r, req.ContentLength, tt.want)
		}
	}
}
Esempio n. 5
0
func parseSFNT(r io.ReaderAt, headerOffset int64, table map[int64]Table) (SFNT, error) {
	header := new(SfntHeader)
	headerSize := int64(binary.Size(header))
	sr := io.NewSectionReader(r, headerOffset, headerSize)
	if err := binary.Read(sr, binary.BigEndian, header); err != nil {
		return nil, err
	}
	numTables := header.NumTables
	offsetTable := make([]OffsetEntry, numTables)
	sr = io.NewSectionReader(r, headerOffset+headerSize, int64(binary.Size(offsetTable)))
	if err := binary.Read(sr, binary.BigEndian, offsetTable); err != nil {
		return nil, err
	}
	tableMap := make(SFNT)
	for _, entry := range offsetTable {
		tag := entry.Tag.String()
		offset := int64(entry.Offset)
		size := int64(entry.Length)
		if v, ok := table[offset]; ok {
			tableMap[tag] = v
		} else {
			v = &DefaultTable{entry.Tag, io.NewSectionReader(r, offset, size)}
			table[offset] = v
			tableMap[tag] = v
		}
	}
	for _, p := range DefaultParser {
		for i, v := range tableMap {
			tableMap[i] = p.Parse(tableMap, v)
		}
	}
	return tableMap, nil
}
Esempio n. 6
0
// Open returns a ReadCloser that provides access to the File's contents.
// Multiple files may be read concurrently.
func (f *File) Open() (rc io.ReadCloser, err error) {
	bodyOffset, err := f.findBodyOffset()
	if err != nil {
		return
	}
	size := int64(f.CompressedSize64)
	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
	dcomp := f.zip.decompressor(f.Method)
	if dcomp == nil {
		err = ErrAlgorithm
		return
	}
	rc = dcomp(r)
	var desr io.Reader
	if f.hasDataDescriptor() {
		desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
	}
	rc = &checksumReader{
		rc:   rc,
		hash: crc32.NewIEEE(),
		f:    f,
		desr: desr,
	}
	return
}
Esempio n. 7
0
func ExampleBytes() {
	buf := newWriter(make([]byte, 0, 10))
	io.Copy(os.Stdout, buf)
	io.Copy(os.Stdout, io.NewSectionReader(*&buf, 0, 100))

	io.WriteString(buf, "Hello ")
	r := io.NewSectionReader(*&buf, 0, int64(buf.Len()))
	io.CopyN(os.Stdout, r, 5)
	io.CopyN(os.Stdout, buf, 5)
	io.WriteString(buf, "World")
	r = io.NewSectionReader(*&buf, 0, int64(buf.Len()))
	io.CopyN(os.Stdout, r, 6)

	io.WriteString(buf, "abcdefg")
	io.Copy(os.Stdout, buf)
	io.Copy(os.Stdout, buf)

	io.WriteString(buf, "Hello World")
	r = io.NewSectionReader(*&buf, 0, int64(buf.Len()))
	io.CopyN(os.Stdout, r, 5)
	io.CopyN(os.Stdout, buf, 4)

	io.WriteString(buf, "abcdefg")
	io.Copy(os.Stdout, buf)
	io.Copy(os.Stdout, buf)
	//Output:
	// HelloHello World WorldabcdefgHelloHello Worldabcdefg
}
Esempio n. 8
0
// Open returns a ReadCloser that provides access to the File's contents.
// Multiple files may be read concurrently.
func (f *File) Open() (rc io.ReadCloser, err error) {
	bodyOffset, err := f.findBodyOffset()
	if err != nil {
		return
	}
	size := int64(f.CompressedSize64)
	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
	switch f.Method {
	case Store: // (no compression)
		rc = ioutil.NopCloser(r)
	case Deflate:
		rc, err = zlib.NewReader(r)
		if err != nil {
			return
		}
	default:
		err = ErrAlgorithm
		return
	}
	var desr io.Reader
	if f.hasDataDescriptor() {
		desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
	}
	rc = &checksumReader{rc, crc32.NewIEEE(), f, desr, nil}
	return
}
Esempio n. 9
0
func ExamplePackReader_Extract() {
	base := ".git/objects/pack/pack-bb4afc76654154e3a9f198723ba89873ecb14293"
	fpack, err := os.Open(base + ".pack")
	if err != nil {
		log.Fatal(err)
	}
	fidx, err := os.Open(base + ".idx")
	if err != nil {
		log.Fatal(err)
	}
	defer fpack.Close()
	defer fidx.Close()

	packsize, err1 := fpack.Seek(0, os.SEEK_END)
	idxsize, err2 := fidx.Seek(0, os.SEEK_END)
	if err1 != nil || err2 != nil {
		log.Fatal(err1, err2)
	}
	pk, err := NewPackReader(
		io.NewSectionReader(fpack, 0, packsize),
		io.NewSectionReader(fidx, 0, idxsize))
	if err != nil {
		log.Fatal(err)
	}
	var hash Hash
	hex.Decode(hash[:], []byte("2e16bbf779131a90346eab585e9e5c4736d3aeac"))
	obj, err := pk.Extract(hash)
	if err != nil {
		log.Fatal(err)
	}
	log.Printf("%+v", obj)
}
Esempio n. 10
0
// Decode reads a TIFF image from r and returns it as an image.Image.
// The type of Image returned depends on the contents of the TIFF.
func Decode(r io.Reader) (img image.Image, err os.Error) {
	d, err := newDecoder(r)
	if err != nil {
		return
	}

	// Check if we have the right number of strips, offsets and counts.
	rps := int(d.firstVal(tRowsPerStrip))
	numStrips := (d.config.Height + rps - 1) / rps
	if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips {
		return nil, FormatError("inconsistent header")
	}

	switch d.mode {
	case mGray, mGrayInvert:
		img = image.NewGray(d.config.Width, d.config.Height)
	case mPaletted:
		img = image.NewPaletted(d.config.Width, d.config.Height, d.palette)
	case mNRGBA:
		img = image.NewNRGBA(d.config.Width, d.config.Height)
	case mRGB, mRGBA:
		img = image.NewRGBA(d.config.Width, d.config.Height)
	}

	var p []byte
	for i := 0; i < numStrips; i++ {
		ymin := i * rps
		// The last strip may be shorter.
		if i == numStrips-1 && d.config.Height%rps != 0 {
			rps = d.config.Height % rps
		}
		offset := int64(d.features[tStripOffsets][i])
		n := int64(d.features[tStripByteCounts][i])
		switch d.firstVal(tCompression) {
		case cNone:
			// TODO(bsiegert): Avoid copy if r is a tiff.buffer.
			p = make([]byte, 0, n)
			_, err = d.r.ReadAt(p, offset)
		case cLZW:
			r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
			p, err = ioutil.ReadAll(r)
			r.Close()
		case cDeflate, cDeflateOld:
			r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n))
			if err != nil {
				return nil, err
			}
			p, err = ioutil.ReadAll(r)
			r.Close()
		default:
			err = UnsupportedError("compression")
		}
		if err != nil {
			return
		}
		err = d.decode(img, p, ymin, ymin+rps)
	}
	return
}
Esempio n. 11
0
func TestFile(t *testing.T) {
	if *path == "" {
		t.Logf("skipping since no input file specified")
		return
	}
	r, err := OpenFile(*path)
	if err != nil {
		t.Fatal(err)
	}
	info, err := r.Info()
	if err != nil {
		t.Error(err)
	}
	sects := info.Sections
	info.Sections = nil
	t.Logf("%+v", info)
	for _, sec := range sects {
		t.Logf("%s: %+v", secNames[sec.Type], sec)
	}

	// Test messages.
	for _, sec := range sects {
		if sec.Type == SecMessages {
			for id, off := range sec.Folders {
				r := io.NewSectionReader(r.File, off, sec.Offset+16+sec.Length-off)
				title, msgs, err := parseMessageFolder(r)
				if err != nil {
					t.Error(err)
				}
				t.Logf("Folder %d %q", id, title)
				t.Logf("%d messages", len(msgs))
				if len(msgs) > 0 {
					t.Logf("First message: %s", msgs[0])
					t.Logf("Last message: %s", msgs[len(msgs)-1])
				}
			}
		}
		if sec.Type == SecMMS {
			for id, off := range sec.Folders {
				r := io.NewSectionReader(r.File, off, sec.Offset+16+sec.Length-off)
				title, msgs, err := parseMMSFolder(r)
				if err != nil {
					t.Error(err)
				}
				t.Logf("Folder %d %q", id, title)
				t.Logf("%d messages", len(msgs))
				if len(msgs) > 0 {
					first, last := msgs[0], msgs[len(msgs)-1]
					t.Logf("First message: %q...%q", first[:40], first[len(first)-40:])
					t.Logf("Last message: %q...%q", last[:40], last[len(last)-40:])
				}
			}
		}
	}
}
Esempio n. 12
0
func (s *subset) Read() (err error) {
	a := io.NewSectionReader(s.fh[0], s.offsets[0], chunkSize)
	b := io.NewSectionReader(s.fh[1], s.offsets[1], chunkSize)
	s.read[0], err = a.Read(s.b[0])
	if err != nil && err != io.EOF {
		return err
	}
	s.read[1], err = b.Read(s.b[1])
	if err != nil && err != io.EOF {
		return err
	}
	return
}
Esempio n. 13
0
func (i *Idx) Part(part string, idxFilePath string, idxLength int64) (pos int64, length int64, err error) {
	// this function is for returning a single pos and length for a given range
	// used for non-subset indices where the records are contiguous for the data file
	f, err := os.Open(idxFilePath)
	if err != nil {
		err = errors.New(e.IndexNoFile)
		return
	}
	defer f.Close()

	if strings.Contains(part, "-") {
		startend := strings.Split(part, "-")
		start, startEr := strconv.ParseInt(startend[0], 10, 64)
		end, endEr := strconv.ParseInt(startend[1], 10, 64)
		if startEr != nil || endEr != nil || start <= 0 || start > int64(idxLength) || end <= 0 || end > int64(idxLength) {
			err = errors.New(e.InvalidIndexRange)
			return
		}

		// read start offset and length from index file
		sr := io.NewSectionReader(f, (start-1)*16, 16)
		srec := make([]int64, 2)
		binary.Read(sr, binary.LittleEndian, &srec[0])
		binary.Read(sr, binary.LittleEndian, &srec[1])

		// read end offset and length from index file
		sr = io.NewSectionReader(f, (end-1)*16, 16)
		erec := make([]int64, 2)
		binary.Read(sr, binary.LittleEndian, &erec[0])
		binary.Read(sr, binary.LittleEndian, &erec[1])

		pos = srec[0]
		length = (erec[0] - srec[0]) + erec[1]
	} else {
		p, er := strconv.ParseInt(part, 10, 64)
		if er != nil || p <= 0 || p > int64(idxLength) {
			err = errors.New(e.IndexOutBounds)
			return
		}

		// read offset and length from index file
		sr := io.NewSectionReader(f, (p-1)*16, 16)
		rec := make([]int64, 2)
		binary.Read(sr, binary.LittleEndian, &rec[0])
		binary.Read(sr, binary.LittleEndian, &rec[1])

		pos = rec[0]
		length = rec[1]
	}
	return
}
Esempio n. 14
0
func main() {
	reader, _ := os.Open("test.txt")
	defer reader.Close()
	sectionReader := io.NewSectionReader(reader, 5, 10)
	fmt.Println(reflect.TypeOf(sectionReader))
	var n, total int
	var err error
	p := make([]byte, 15)
	//Read
	for {
		n, err = sectionReader.Read(p)
		if err == io.EOF {
			fmt.Println("Find EOF so end total", total)
			break
		}
		total = total + n
		fmt.Println("Read value:", string(p[0:n]))
		fmt.Println("Read count:", n)
	}
	//ReadAt
	p = make([]byte, 15)
	n, _ = sectionReader.ReadAt(p, 4)
	fmt.Println("Read value:", string(p[0:n]))
	fmt.Println("Read count:", n)
	//Seek
	sectionReader1 := io.NewSectionReader(reader, 2, 20)
	sectionReader1.Seek(2, 0)
	p = make([]byte, 10)
	n, _ = sectionReader1.Read(p)
	fmt.Println("First read value:", string(p[0:n]))
	fmt.Println("First read count:", n)
	a, _ := sectionReader.Seek(2, 1)
	fmt.Println("off - base is", a)
	n, _ = sectionReader.Read(p)
	fmt.Println("Second Read value:", string(p[0:n]))
	fmt.Println("Second Read count:", n)
	sectionReader.Seek(8, 2)
	n, _ = sectionReader.Read(p)
	fmt.Println("Third read value:", string(p[0:n]))
	fmt.Println("Third read count:", n)

	//size
	sectionReader2 := io.NewSectionReader(reader, 5, 20)
	fmt.Println("Can read count:", sectionReader2.Size())
	p = make([]byte, 10)
	n, _ = sectionReader2.Read(p)
	fmt.Println("Read count:", n)
	fmt.Println("Can read count:", sectionReader2.Size())
}
Esempio n. 15
0
File: file.go Progetto: wycharry/go
// Open returns a new ReadSeeker reading the ELF section.
// Even if the section is stored compressed in the ELF file,
// the ReadSeeker reads uncompressed data.
func (s *Section) Open() io.ReadSeeker {
	if s.Flags&SHF_COMPRESSED == 0 {
		return io.NewSectionReader(s.sr, 0, 1<<63-1)
	}
	if s.compressionType == COMPRESS_ZLIB {
		return &readSeekerFromReader{
			reset: func() (io.Reader, error) {
				fr := io.NewSectionReader(s.sr, s.compressionOffset, int64(s.FileSize)-s.compressionOffset)
				return zlib.NewReader(fr)
			},
			size: int64(s.Size),
		}
	}
	err := &FormatError{int64(s.Offset), "unknown compression type", s.compressionType}
	return errorReader{err}
}
Esempio n. 16
0
func (a *combinedAPIServer) GetFile(getFileRequest *pfs.GetFileRequest, apiGetFileServer pfs.Api_GetFileServer) (retErr error) {
	shard, clientConn, err := a.getShardAndClientConnIfNecessary(getFileRequest.Path, true)
	if err != nil {
		return err
	}
	if clientConn != nil {
		apiGetFileClient, err := pfs.NewApiClient(clientConn).GetFile(context.Background(), getFileRequest)
		if err != nil {
			return err
		}
		return protoutil.RelayFromStreamingBytesClient(apiGetFileClient, apiGetFileServer)
	}
	file, err := a.driver.GetFile(getFileRequest.Path, shard)
	if err != nil {
		return err
	}
	defer func() {
		if err := file.Close(); err != nil && retErr == nil {
			retErr = err
		}
	}()
	return protoutil.WriteToStreamingBytesServer(
		io.NewSectionReader(file, getFileRequest.OffsetBytes, getFileRequest.SizeBytes),
		apiGetFileServer,
	)
}
Esempio n. 17
0
// ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx.
// At most one of Media and ResumableMedia may be set.
// mediaType identifies the MIME media type of the upload, such as "image/png".
// If mediaType is "", it will be auto-detected.
func (c *MailInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *MailInsertCall {
	c.ctx_ = ctx
	c.resumable_ = io.NewSectionReader(r, 0, size)
	c.mediaType_ = mediaType
	c.protocol_ = "resumable"
	return c
}
Esempio n. 18
0
// nextReader returns a seekable reader representing the next packet of data.
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, error) {
	switch r := u.in.Body.(type) {
	case io.ReaderAt:
		var err error

		n := u.opts.PartSize
		if u.totalSize >= 0 {
			bytesLeft := u.totalSize - u.readerPos

			if bytesLeft == 0 {
				err = io.EOF
				n = bytesLeft
			} else if bytesLeft <= u.opts.PartSize {
				err = io.ErrUnexpectedEOF
				n = bytesLeft
			}
		}

		buf := io.NewSectionReader(r, u.readerPos, n)
		u.readerPos += n

		return buf, err

	default:
		packet := make([]byte, u.opts.PartSize)
		n, err := io.ReadFull(u.in.Body, packet)
		u.readerPos += int64(n)

		return bytes.NewReader(packet[0:n]), err
	}
}
Esempio n. 19
0
func (s *storage) Fetch(br blob.Ref) (io.ReadCloser, uint32, error) {
	meta, err := s.meta(br)
	if err != nil {
		return nil, 0, err
	}

	if meta.file >= len(s.fds) {
		return nil, 0, fmt.Errorf("diskpacked: attempt to fetch blob from out of range pack file %d > %d", meta.file, len(s.fds))
	}
	rac := s.fds[meta.file]
	var rs io.ReadSeeker = io.NewSectionReader(rac, meta.offset, int64(meta.size))
	fn := rac.Name()
	// Ensure entry is in map.
	readVar.Add(fn, 0)
	if v, ok := readVar.Get(fn).(*expvar.Int); ok {
		rs = types.NewStatsReadSeeker(v, rs)
	}
	readTotVar.Add(s.root, 0)
	if v, ok := readTotVar.Get(s.root).(*expvar.Int); ok {
		rs = types.NewStatsReadSeeker(v, rs)
	}
	rsc := struct {
		io.ReadSeeker
		io.Closer
	}{
		rs,
		types.NopCloser,
	}
	return rsc, meta.size, nil
}
Esempio n. 20
0
// Reader returns the data contained in the stream v.
// If v.Kind() != Stream, Reader returns a ReadCloser that
// responds to all reads with a ``stream not present'' error.
func (v Value) Reader() io.ReadCloser {
	x, ok := v.data.(stream)
	if !ok {
		return &errorReadCloser{fmt.Errorf("stream not present")}
	}
	var rd io.Reader
	rd = io.NewSectionReader(v.r.f, x.offset, v.Key("Length").Int64())
	if v.r.key != nil {
		rd = decryptStream(v.r.key, v.r.useAES, x.ptr, rd)
	}
	filter := v.Key("Filter")
	param := v.Key("DecodeParms")
	switch filter.Kind() {
	default:
		panic(fmt.Errorf("unsupported filter %v", filter))
	case Null:
		// ok
	case Name:
		rd = applyFilter(rd, filter.Name(), param)
	case Array:
		for i := 0; i < filter.Len(); i++ {
			rd = applyFilter(rd, filter.Index(i).Name(), param.Index(i))
		}
	}

	return ioutil.NopCloser(rd)
}
Esempio n. 21
0
File: upload.go Progetto: ncw/rclone
// nextReader returns a seekable reader representing the next packet of data.
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, int, error) {
	type readerAtSeeker interface {
		io.ReaderAt
		io.ReadSeeker
	}
	switch r := u.in.Body.(type) {
	case readerAtSeeker:
		var err error

		n := u.ctx.PartSize
		if u.totalSize >= 0 {
			bytesLeft := u.totalSize - u.readerPos

			if bytesLeft <= u.ctx.PartSize {
				err = io.EOF
				n = bytesLeft
			}
		}

		reader := io.NewSectionReader(r, u.readerPos, n)
		u.readerPos += n

		return reader, int(n), err

	default:
		part := make([]byte, u.ctx.PartSize)
		n, err := readFillBuf(r, part)
		u.readerPos += int64(n)

		return bytes.NewReader(part[0:n]), n, err
	}
}
Esempio n. 22
0
func readCompressed(r *io.SectionReader, offset int64, s []byte) (int, error) {
	zr, err := zlib.NewReader(io.NewSectionReader(r, offset, r.Size()-offset))
	if err != nil {
		return 0, err
	}
	return io.ReadFull(zr, s)
}
Esempio n. 23
0
func TestByteView(t *testing.T) {
	for _, s := range []string{"", "x", "yy"} {
		for _, v := range []ByteView{of([]byte(s)), of(s)} {
			name := fmt.Sprintf("string %q, view %+v", s, v)
			if v.Len() != len(s) {
				t.Errorf("%s: Len = %d; want %d", name, v.Len(), len(s))
			}
			if v.String() != s {
				t.Errorf("%s: String = %q; want %q", name, v.String(), s)
			}
			var longDest [3]byte
			if n := v.Copy(longDest[:]); n != len(s) {
				t.Errorf("%s: long Copy = %d; want %d", name, n, len(s))
			}
			var shortDest [1]byte
			if n := v.Copy(shortDest[:]); n != min(len(s), 1) {
				t.Errorf("%s: short Copy = %d; want %d", name, n, min(len(s), 1))
			}
			if got, err := ioutil.ReadAll(v.Reader()); err != nil || string(got) != s {
				t.Errorf("%s: Reader = %q, %v; want %q", name, got, err, s)
			}
			if got, err := ioutil.ReadAll(io.NewSectionReader(v, 0, int64(len(s)))); err != nil || string(got) != s {
				t.Errorf("%s: SectionReader of ReaderAt = %q, %v; want %q", name, got, err, s)
			}
		}
	}
}
Esempio n. 24
0
// Open opens and returns the FileHeader's associated File.
func (fh *FileHeader) Open() (multipart.File, error) {
	if b := fh.content; b != nil {
		r := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
		return sectionReadCloser{r}, nil
	}
	return os.Open(fh.tmpfile)
}
Esempio n. 25
0
// BodyReader returns an io.ReadCloser that reads the HTTP request or response
// body. If mv.skipBody was set the reader will immediately return io.EOF.
//
// If the Decode option is passed the body will be unchunked if
// Transfer-Encoding is set to "chunked", and will decode the following
// Content-Encodings: gzip, deflate.
func (mv *MessageView) BodyReader(opts ...Option) (io.ReadCloser, error) {
	var r io.Reader

	conf := &config{}
	for _, o := range opts {
		o(conf)
	}

	br := bytes.NewReader(mv.message)
	r = io.NewSectionReader(br, mv.bodyoffset, mv.traileroffset-mv.bodyoffset)

	if !conf.decode {
		return ioutil.NopCloser(r), nil
	}

	if mv.chunked {
		r = httputil.NewChunkedReader(r)
	}
	switch mv.compress {
	case "gzip":
		gr, err := gzip.NewReader(r)
		if err != nil {
			return nil, err
		}
		return gr, nil
	case "deflate":
		return flate.NewReader(r), nil
	default:
		return ioutil.NopCloser(r), nil
	}
}
Esempio n. 26
0
func (s *Storage) Fetch(ref blob.Ref) (file io.ReadCloser, size uint32, err error) {
	s.mu.RLock()
	defer s.mu.RUnlock()
	if s.lru != nil {
		s.lru.Get(ref.String()) // force to head
	}
	if s.m == nil {
		err = os.ErrNotExist
		return
	}
	b, ok := s.m[ref]
	if !ok {
		err = os.ErrNotExist
		return
	}
	size = uint32(len(b))
	atomic.AddInt64(&s.blobsFetched, 1)
	atomic.AddInt64(&s.bytesFetched, int64(len(b)))

	return struct {
		*io.SectionReader
		io.Closer
	}{
		io.NewSectionReader(bytes.NewReader(b), 0, int64(size)),
		types.NopCloser,
	}, size, nil
}
Esempio n. 27
0
// FileTime returns the best guess of the file's creation time (or modtime).
// If the file doesn't have its own metadata indication the creation time (such as in EXIF),
// FileTime uses the modification time from the file system.
// It there was a valid EXIF but an error while trying to get a date from it,
// it logs the error and tries the other methods.
func FileTime(f io.ReaderAt) (time.Time, error) {
	var ct time.Time
	defaultTime := func() (time.Time, error) {
		if osf, ok := f.(*os.File); ok {
			fi, err := osf.Stat()
			if err != nil {
				return ct, fmt.Errorf("Failed to find a modtime: lstat: %v", err)
			}
			return fi.ModTime(), nil
		}
		return ct, errors.New("All methods failed to find a creation time or modtime.")
	}

	size, ok := findSize(f)
	if !ok {
		size = 256 << 10 // enough to get the EXIF
	}
	r := io.NewSectionReader(f, 0, size)
	ex, err := exif.Decode(r)
	if err != nil {
		return defaultTime()
	}
	ct, err = ex.DateTime()
	if err != nil {
		return defaultTime()
	}
	return ct, nil
}
Esempio n. 28
0
// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
// If ra implements googleapi.ContentTyper, then the returned reader
// will also implement googleapi.ContentTyper, delegating to ra.
func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
	r := io.NewSectionReader(ra, 0, size)
	if typer, ok := ra.(googleapi.ContentTyper); ok {
		return readerTyper{r, typer}
	}
	return r
}
Esempio n. 29
0
func TestNewChunkAlignedReaderAt(t *testing.T) {
	s := "0123" + "4567" + "8"
	sr := io.NewSectionReader(strings.NewReader(s), 0, int64(len(s)))
	r, err := NewChunkAlignedReaderAt(sr, 4)
	if err != nil {
		t.Fatalf("NewChunkAlignedReaderAt: %v", err)
	}

	if r.Size() != int64(len(s)) {
		t.Fatalf("Size() is %d, want %d", r.Size(), len(s))
	}

	buf := make([]byte, 9)
	for i := 1; i < len(buf); i++ {
		for off := 0; off < len(s); off++ {
			if (i + off) > len(s) {
				continue
			}
			readP := buf[:i]
			_, err = r.ReadAt(readP, int64(off))
			if err != nil {
				t.Fatalf("r.ReadAt: %v", err)
			}
			if !reflect.DeepEqual(s[off:off+len(readP)], string(readP)) {
				t.Errorf("Want %s, got %s", s[off:off+len(readP)], readP)
			}
		}
	}

	_, err = r.ReadAt(buf, int64(len(s)))
	if err != io.EOF {
		t.Fatalf("Want %v, got %v", io.EOF, err)
	}
}
Esempio n. 30
0
func (zipper ApplicationZipper) isZipWithOffsetFileHeaderLocation(name string) bool {
	loc, err := zipper.zipFileHeaderLocation(name)
	if err != nil {
		return false
	}

	if loc > int64(-1) {
		f, err := os.Open(name)
		if err != nil {
			return false
		}

		defer f.Close()

		fi, err := f.Stat()
		if err != nil {
			return false
		}

		readerAt := io.NewSectionReader(f, loc, fi.Size())
		_, err = zip.NewReader(readerAt, fi.Size())
		if err == nil {
			return true
		}
	}

	return false
}