Esempio n. 1
0
func GetRawTileFromMetatile(r io.ReadSeeker, coord gopnik.TileCoord) ([]byte, error) {
	ml, err := decodeMetatileHeader(r)
	if err != nil {
		return nil, err
	}

	size := int32(math.Sqrt(float64(ml.Count)))
	index := (int32(coord.X)-ml.X)*size + (int32(coord.Y) - ml.Y)
	if index >= ml.Count {
		return nil, fmt.Errorf("Invalid index %v/%v", index, ml.Count)
	}
	entry := ml.Index[index]
	if entry.Size > MAXENTRYSIZE {
		return nil, fmt.Errorf("entry size > MAXENTRYSIZE (size: %v)", entry.Size)
	}
	r.Seek(int64(entry.Offset), 0)
	buf := make([]byte, entry.Size)
	l, err := r.Read(buf)
	if err != nil {
		return nil, err
	}
	if int32(l) != entry.Size {
		return nil, fmt.Errorf("Invalid tile seze: %v != %v", l, entry.Size)
	}
	return buf, nil
}
Esempio n. 2
0
func NewFS(r io.ReadSeeker) *FS {
	bpb32 := &BPB32{}
	err := binary.Read(r, binary.LittleEndian, bpb32)
	if err != nil {
		// TODO error handling
	}

	fs := &FS{bpb32, UnknownType, r}
	t := fs.DetermineType()
	switch t {
	case FAT32:
		fs.Type = FAT32
		return fs
	case FAT12, FAT16:
		// reread the BPB, this time for the correct fs type
		bpb16 := &BPB16{}
		r.Seek(0, 0)
		err := binary.Read(r, binary.LittleEndian, bpb16)
		if err != nil {
			// TODO error handling
		}
		bpb32 = &BPB32{bpb16.BPBBase, BPB32Base{0, 0, 0, 0, 0, 0, [12]byte{}}, bpb16.BPB16Base}
		fs = &FS{bpb32, t, r}
	}

	return fs
}
Esempio n. 3
0
func (t *TarInfo) Load(file io.ReadSeeker) {
	var reader *tar.Reader
	file.Seek(0, 0)
	gzipReader, err := gzip.NewReader(file)
	if err != nil {
		// likely not a gzip compressed file
		file.Seek(0, 0)
		reader = tar.NewReader(file)
	} else {
		reader = tar.NewReader(gzipReader)
	}
	for {
		header, err := reader.Next()
		if err == io.EOF {
			// end of tar file
			break
		} else if err != nil {
			// error occured
			logger.Debug("[TarInfoLoad] Error when reading tar stream tarsum. Disabling TarSum, TarFilesInfo. Error: %s", err.Error())
			t.Error = TarError(err.Error())
			return
		}
		t.TarSum.Append(header, reader)
		t.TarFilesInfo.Append(header)
	}
}
Esempio n. 4
0
func ReadLine(file io.ReadSeeker) (string, error) {
	localBuffer := make([]byte, 0)
	offset, err := file.Seek(0, 1)
	if err != nil {
		return "", err
	}
	if lastOffset != offset {
		nextIndex = bufferSize
	}
	for {
		newChar, err := getNext(file)
		if err != nil {
			return "", err
		}
		localBuffer = append(localBuffer, newChar)
		if newChar == '\n' {
			offset += len(localBuffer)
			lastOffset = offset
			_, err = file.Seek(offset, 0)
			if err != nil {
				return "", err
			}
			return string(localBuffer), nil
		}
	}
}
Esempio n. 5
0
func (p *putter) retryRequest(method, urlStr string, body io.ReadSeeker, h http.Header) (resp *http.Response, err error) {
	for i := 0; i < p.c.NTry; i++ {
		var req *http.Request
		req, err = http.NewRequest(method, urlStr, body)
		if err != nil {
			return
		}
		for k := range h {
			for _, v := range h[k] {
				req.Header.Add(k, v)
			}
		}

		p.b.Sign(req)
		resp, err = p.c.Client.Do(req)
		if err == nil {
			return
		}
		logger.debugPrintln(err)
		if body != nil {
			if _, err = body.Seek(0, 0); err != nil {
				return
			}
		}
	}
	return
}
Esempio n. 6
0
func (table *hheaTable) init(file io.ReadSeeker, entry *tableDirEntry) (err error) {
	if _, err = file.Seek(int64(entry.offset), os.SEEK_SET); err != nil {
		return
	}
	// No advantage to using a buffered reader here.
	if err = table.version.Read(file); err != nil {
		return
	}
	err = readValues(file,
		&table.ascent,
		&table.descent,
		&table.lineGap,
		&table.advanceWidthMax,
		&table.minLeftSideBearing,
		&table.minRightSideBearing,
		&table.xMaxExtent,
		&table.caretSlopeRise,
		&table.caretSlopeRun,
		&table.caretOffset,
		&table.reserved1,
		&table.reserved2,
		&table.reserved3,
		&table.reserved4,
		&table.metricDataFormat,
		&table.numOfLongHorMetrics,
	)
	return
}
Esempio n. 7
0
func ChooseCompressAlgo(path string, rs io.ReadSeeker) (AlgorithmType, error) {
	buf := make([]byte, Threshold)

	bytesRead, err := rs.Read(buf)
	if err != nil {
		return AlgoNone, err
	}

	if _, errSeek := rs.Seek(0, os.SEEK_SET); err != nil {
		return AlgoNone, errSeek
	}

	mime := guessMime(path, buf)
	compressAble := isCompressable(mime)

	if !compressAble || int64(bytesRead) != Threshold {
		return AlgoNone, nil
	}

	if strings.HasPrefix(mime, "text/") {
		return AlgoLZ4, nil
	} else {
		return AlgoSnappy, nil
	}
}
Esempio n. 8
0
// Skips both the ID3V2 tags and optional VBR headers
func getFirstRealFrameOffset(src io.ReadSeeker) (int64, error) {
	var hdr FrameHeader
	var xing XingHeader

	off, err := getFirstFrameOffset(src)
	if err != nil {
		return 0, err
	}

	_, err = src.Seek(off, 0)
	if err != nil {
		return 0, err
	}

	bs := make([]byte, 8192)

	_, err = io.ReadAtLeast(src, bs, 4)
	if err != nil {
		return 0, err
	}

	err = hdr.Parse(bs)
	if err != nil {
		return 0, err
	}

	if xing.Parse(bs[:int(hdr.Size)]) {
		return off + hdr.Size, nil
	}

	return off, nil
}
Esempio n. 9
0
func checkSignature(ks *Keystore, prefix string, signed, signature io.ReadSeeker) (*openpgp.Entity, error) {
	acidentifier, err := types.NewACIdentifier(prefix)
	if err != nil {
		return nil, err
	}
	keyring, err := ks.loadKeyring(acidentifier.String())
	if err != nil {
		return nil, fmt.Errorf("keystore: error loading keyring %v", err)
	}
	entities, err := openpgp.CheckArmoredDetachedSignature(keyring, signed, signature)
	if err == io.EOF {
		// When the signature is binary instead of armored, the error is io.EOF.
		// Let's try with binary signatures as well
		if _, err := signed.Seek(0, 0); err != nil {
			return nil, fmt.Errorf("error seeking ACI file: %v", err)
		}
		if _, err := signature.Seek(0, 0); err != nil {
			return nil, fmt.Errorf("error seeking signature file: %v", err)
		}
		entities, err = openpgp.CheckDetachedSignature(keyring, signed, signature)
	}
	if err == io.EOF {
		// otherwise, the client failure is just "EOF", which is not helpful
		return nil, fmt.Errorf("keystore: no valid signatures found in signature file")
	}
	return entities, err
}
Esempio n. 10
0
func (table *headTable) init(file io.ReadSeeker, entry *tableDirEntry) (err error) {
	if _, err = file.Seek(int64(entry.offset), os.SEEK_SET); err != nil {
		return
	}
	// No advantage to using a buffered reader here.
	if err = table.version.Read(file); err != nil {
		return
	}
	if err = table.fontRevision.Read(file); err != nil {
		return
	}
	err = readValues(file,
		&table.checkSumAdjustment,
		&table.magicNumber,
		&table.flags,
		&table.unitsPerEm,
		&table.created,
		&table.modified,
		&table.xMin,
		&table.yMin,
		&table.xMax,
		&table.yMax,
		&table.macStyle,
		&table.lowestRecPPEM,
		&table.fontDirectionHint,
		&table.indexToLocFormat,
		&table.glyphDataFormat,
	)
	return
}
Esempio n. 11
0
func NewMp4Media(reader io.ReadSeeker) (fd *Mp4Media, err error) {
	fd = &Mp4Media{}
	h := next_box_header(reader)
	ftyp := next_box_body(reader, h).to_ftyp()
	fd.brand = string(ftyp.major_brand[:])
	log.Println(h.box_type(), ftyp.major_brand)

L:
	for {
		h = next_box_header(reader)
		if h.size == 0 {
			break
		}
		switch string(h.typ[:]) {
		default:
			reader.Seek(int64(h.body_size), 1)
		case "moov":
			//			mo, _ := reader.Seek(0, 1)
			//			fd.moov_offset = mo - (h.size - h.body_size)
			fd.moov_body_length = h.body_size
			fd.from_moov(next_box_body(reader, h))
			break L
		case "mdat":
			//			mo, _ := reader.Seek(0, 1)
			//			fd.mdat_offset = mo - (h.size - h.body_size)
			fd.mdat_body_length = h.body_size
			reader.Seek(int64(h.body_size), 1)
		}
	}
	return
}
Esempio n. 12
0
// dumpChunk extracts a compressed chunk from the given reader and
// dumps its NBT tag contents.
func dumpChunk(w io.Writer, r io.ReadSeeker, offset int64) error {
	address := offset*sectorSize + 4
	_, err := r.Seek(address, 0)
	if err != nil {
		return err
	}

	var scheme [1]byte
	_, err = io.ReadFull(r, scheme[:])
	if err != nil {
		return err
	}

	var rr io.ReadCloser
	switch scheme[0] {
	case 1:
		rr, err = gzip.NewReader(r)
	case 2:
		rr, err = zlib.NewReader(r)
	default:
		return fmt.Errorf("chunk(%d); invalid compression scheme: %d", offset, scheme[0])
	}

	if err != nil {
		return err
	}

	err = dump(w, rr)
	rr.Close()
	return err
}
Esempio n. 13
0
func compile(source io.ReadSeeker, targetname string) os.Error {
	O, err := getArchSym()
	if err != nil {
		return err
	}
	gc := O + "g"
	gl := O + "l"
	tempobj := path.Join(os.TempDir(), targetname+"."+O)

	_, err = source.Seek(0, 0)
	if err != nil {
		return err
	}
	bufsource := bufio.NewReader(source)
	var insource io.Reader = bufsource
	if line, err := bufsource.ReadString('\n'); err != nil && err != os.EOF ||
		len(line) < 2 || line[:2] != "#!" {
		_, err := source.Seek(0, 0)
		if err != nil {
			return err
		}
		insource = source
	}

	err = run(gc, []string{gc, "-o", tempobj, "/dev/stdin"}, insource)
	if err != nil {
		return err
	}
	err = run(gl, []string{gl, "-o", path.Join(storedir, targetname),
		tempobj},
		nil)
	return err
}
Esempio n. 14
0
func (cOff chunkOffTs) readPreChunk(r io.ReadSeeker) (*preChunk, error) {
	pc := preChunk{ts: cOff.ts}

	if _, err := r.Seek(cOff.offset, 0); err != nil {
		return nil, err
	}

	lr := io.LimitReader(r, cOff.size)

	var length uint32
	if err := binary.Read(lr, binary.BigEndian, &length); err != nil {
		return nil, err
	}
	lr = io.LimitReader(lr, int64(length))

	compType, err := kagus.ReadByte(lr)
	if err != nil {
		return nil, err
	}
	pc.compression = compType

	buf := new(bytes.Buffer)
	if _, err := io.Copy(buf, lr); err != nil {
		return nil, err
	}
	pc.data = buf.Bytes()

	return &pc, err

}
Esempio n. 15
0
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
	reader := &offsetReader{}
	buf.Seek(offset, 0)

	reader.buf = buf
	return reader
}
Esempio n. 16
0
func (f *nameFetcher) validate(app *discovery.App, aciFile, ascFile io.ReadSeeker) error {
	v, err := newValidator(aciFile)
	if err != nil {
		return err
	}

	if err := v.ValidateName(app.Name.String()); err != nil {
		return err
	}

	if err := v.ValidateLabels(app.Labels); err != nil {
		return err
	}

	entity, err := v.ValidateWithSignature(f.Ks, ascFile)
	if err != nil {
		return err
	}

	if _, err := aciFile.Seek(0, 0); err != nil {
		return errwrap.Wrap(errors.New("error seeking ACI file"), err)
	}

	printIdentities(entity)
	return nil
}
Esempio n. 17
0
File: gpt.go Progetto: rekby/gpt
// Read GPT partition
// Have to set to first byte of GPT Header (usually start of second sector on disk)
func ReadTable(reader io.ReadSeeker, SectorSize uint64) (table Table, err error) {
	table.SectorSize = SectorSize
	table.Header, err = readHeader(reader, SectorSize)
	if err != nil {
		return
	}
	if seekDest, ok := mul(int64(SectorSize), int64(table.Header.PartitionsTableStartLBA)); ok {
		reader.Seek(seekDest, 0)
	} else {
		err = fmt.Errorf("Seek overflow when read partition tables")
		return
	}
	for i := uint32(0); i < table.Header.PartitionsArrLen; i++ {
		var p Partition
		p, err = readPartition(reader, table.Header.PartitionEntrySize)
		if err != nil {
			return
		}
		table.Partitions = append(table.Partitions, p)
	}

	if table.Header.PartitionsCRC != table.calcPartitionsCRC() {
		err = fmt.Errorf("Bad partitions crc")
		return
	}
	return
}
Esempio n. 18
0
func blobDetails(contents io.ReadSeeker) (bref blob.Ref, size uint32, err error) {
	s1 := sha1.New()
	if _, err = contents.Seek(0, 0); err != nil {
		return
	}
	defer func() {
		if _, seekErr := contents.Seek(0, 0); seekErr != nil {
			if err == nil {
				err = seekErr
			} else {
				err = fmt.Errorf("%s, cannot seek back: %v", err, seekErr)
			}
		}
	}()
	sz, err := io.CopyN(s1, contents, constants.MaxBlobSize+1)
	if err == nil || err == io.EOF {
		bref, err = blob.RefFromHash(s1), nil
	} else {
		err = fmt.Errorf("error reading contents: %v", err)
		return
	}
	if sz > constants.MaxBlobSize {
		err = fmt.Errorf("blob size cannot be bigger than %d", constants.MaxBlobSize)
	}
	size = uint32(sz)
	return
}
Esempio n. 19
0
func readStringTable(fh *FileHeader, r io.ReadSeeker) (StringTable, error) {
	// COFF string table is located right after COFF symbol table.
	if fh.PointerToSymbolTable <= 0 {
		return nil, nil
	}
	offset := fh.PointerToSymbolTable + COFFSymbolSize*fh.NumberOfSymbols
	_, err := r.Seek(int64(offset), seekStart)
	if err != nil {
		return nil, fmt.Errorf("fail to seek to string table: %v", err)
	}
	var l uint32
	err = binary.Read(r, binary.LittleEndian, &l)
	if err != nil {
		return nil, fmt.Errorf("fail to read string table length: %v", err)
	}
	// string table length includes itself
	if l <= 4 {
		return nil, nil
	}
	l -= 4
	buf := make([]byte, l)
	_, err = io.ReadFull(r, buf)
	if err != nil {
		return nil, fmt.Errorf("fail to read string table: %v", err)
	}
	return StringTable(buf), nil
}
Esempio n. 20
0
// Parses a new tag
func ParseTag(readSeeker io.ReadSeeker) *Tag {
	header := ParseHeader(readSeeker)

	if header == nil {
		return nil
	}

	t := NewTag(header.version)
	t.Header = header

	var frame Framer
	size := int(t.size)
	for size > 0 {
		frame = t.frameConstructor(readSeeker)

		if frame == nil {
			break
		}

		id := frame.Id()
		t.frames[id] = append(t.frames[id], frame)
		frame.setOwner(t)

		size -= t.frameHeaderSize + int(frame.Size())
	}

	t.padding = uint(size)
	if _, err := readSeeker.Seek(int64(HeaderSize+t.Size()), os.SEEK_SET); err != nil {
		return nil
	}

	return t
}
Esempio n. 21
0
func (g *getter) retryRequest(method, urlStr string, body io.ReadSeeker) (resp *http.Response, err error) {
	for i := 0; i < g.c.NTry; i++ {
		time.Sleep(time.Duration(math.Exp2(float64(i))) * 100 * time.Millisecond) // exponential back-off
		var req *http.Request
		req, err = http.NewRequest(method, urlStr, body)
		if err != nil {
			logger.debugPrintf("NewRequest error on attempt %d: retrying url: %s, error: %s", i, urlStr, err)
			return
		}
		g.b.Sign(req)
		resp, err = g.c.Client.Do(req)

		// This is a completely successful request. We check for non error, non nil respond and OK status code.
		// return without retrying.
		if err == nil && resp != nil && resp.StatusCode == 200 {
			return
		}

		logger.debugPrintf("Client error on attempt %d: retrying url: %s, error: %s", i, urlStr, err)

		if body != nil {
			if _, err = body.Seek(0, 0); err != nil {
				logger.debugPrintf("retryRequest body ERROR", errgo.Mask(err))
				return
			}
		}
	}
	return
}
Esempio n. 22
0
func (gateway Gateway) NewRequest(method, path, accessToken string, body io.ReadSeeker) (req *Request, apiErr error) {
	if body != nil {
		body.Seek(0, 0)
	}

	request, err := http.NewRequest(method, path, body)
	if err != nil {
		apiErr = errors.NewWithError("Error building request", err)
		return
	}

	if accessToken != "" {
		request.Header.Set("Authorization", accessToken)
	}

	request.Header.Set("accept", "application/json")
	request.Header.Set("content-type", "application/json")
	request.Header.Set("User-Agent", "go-cli "+cf.Version+" / "+runtime.GOOS)

	if body != nil {
		switch v := body.(type) {
		case *os.File:
			fileStats, err := v.Stat()
			if err != nil {
				break
			}
			request.ContentLength = fileStats.Size()
		}
	}

	req = &Request{HttpReq: request, SeekableBody: body}
	return
}
Esempio n. 23
0
func (r *multiReadSeeker) Read(b []byte) (int, error) {
	if r.pos == nil {
		r.pos = &pos{0, 0}
	}

	bCap := int64(cap(b))
	buf := bytes.NewBuffer(nil)
	var rdr io.ReadSeeker

	for _, rdr = range r.readers[r.pos.idx:] {
		readBytes, err := io.CopyN(buf, rdr, bCap)
		if err != nil && err != io.EOF {
			return -1, err
		}
		bCap -= readBytes

		if bCap == 0 {
			break
		}
	}

	rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
	if err != nil {
		return -1, err
	}
	r.pos = &pos{r.posIdx[rdr], rdrPos}
	return buf.Read(b)
}
Esempio n. 24
0
func decodeSample(r io.ReadSeeker) (Sample, error) {
	format, length, err := uint32(0), uint32(0), error(nil)

	err = binary.Read(r, binary.BigEndian, &format)
	if err != nil {
		return nil, err
	}

	err = binary.Read(r, binary.BigEndian, &length)
	if err != nil {
		return nil, err
	}

	switch format {
	case TypeCounterSample:
		return decodeCounterSample(r)

	case TypeFlowSample:
		return decodeFlowSample(r)

	default:
		_, err = r.Seek(int64(length), 1)
		if err != nil {
			return nil, err
		}

		return nil, ErrUnknownSampleType
	}
}
Esempio n. 25
0
func maybeDecompress(rs io.ReadSeeker) (io.Reader, error) {
	// TODO(jonboulle): this is a bit redundant with detectValType
	typ, err := aci.DetectFileType(rs)
	if err != nil {
		return nil, err
	}
	if _, err := rs.Seek(0, 0); err != nil {
		return nil, err
	}
	var r io.Reader
	switch typ {
	case aci.TypeGzip:
		r, err = gzip.NewReader(rs)
		if err != nil {
			return nil, fmt.Errorf("error reading gzip: %v", err)
		}
	case aci.TypeBzip2:
		r = bzip2.NewReader(rs)
	case aci.TypeXz:
		r = aci.XzReader(rs)
	case aci.TypeTar:
		r = rs
	case aci.TypeUnknown:
		return nil, errors.New("unknown filetype")
	default:
		// should never happen
		panic(fmt.Sprintf("bad type returned from DetectFileType: %v", typ))
	}
	return r, nil
}
Esempio n. 26
0
File: sum.go Progetto: andynu/rfk
// SumID3v2 constructs a checksum of MP3 audio file data (assumed to have ID3v2 tags) provided by the
// io.ReadSeeker which is metadata invariant.
func SumID3v2(r io.ReadSeeker) (string, error) {
	header, err := readID3v2Header(r)
	if err != nil {
		return "", fmt.Errorf("error reading ID3v2 header: %v", err)
	}

	_, err = r.Seek(int64(header.Size), os.SEEK_CUR)
	if err != nil {
		return "", fmt.Errorf("error seeking to end of ID3V2 header: %v", err)
	}

	n, err := sizeToEndOffset(r, 128)
	if err != nil {
		return "", fmt.Errorf("error determining read size to ID3v1 header: %v", err)
	}

	// TODO: remove this check?????
	if n < 0 {
		return "", fmt.Errorf("file size must be greater than 128 bytes for MP3: %v bytes", n)
	}

	h := sha1.New()
	_, err = io.CopyN(h, r, n)
	if err != nil {
		return "", fmt.Errorf("error reading %v bytes: %v", n, err)
	}
	return hashSum(h), nil
}
Esempio n. 27
0
// NewUnpacker returns a pointer to Unpacker which can be used to read
// individual Blobs from a pack.
func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) {
	var err error
	ls := binary.Size(uint32(0))

	// reset to the end to read header length
	_, err = rd.Seek(-int64(ls), 2)
	if err != nil {
		return nil, fmt.Errorf("seeking to read header length failed: %v", err)
	}

	var length uint32
	err = binary.Read(rd, binary.LittleEndian, &length)
	if err != nil {
		return nil, fmt.Errorf("reading header length failed: %v", err)
	}

	// reset to the beginning of the header
	_, err = rd.Seek(-int64(ls)-int64(length), 2)
	if err != nil {
		return nil, fmt.Errorf("seeking to read header length failed: %v", err)
	}

	// read header
	hrd, err := crypto.DecryptFrom(k, io.LimitReader(rd, int64(length)))
	if err != nil {
		return nil, err
	}

	var entries []Blob

	pos := uint(0)
	for {
		e := headerEntry{}
		err = binary.Read(hrd, binary.LittleEndian, &e)
		if err == io.EOF {
			break
		}

		if err != nil {
			return nil, err
		}

		entries = append(entries, Blob{
			Type:   e.Type,
			Length: uint(e.Length),
			ID:     e.ID,
			Offset: pos,
		})

		pos += uint(e.Length)
	}

	p := &Unpacker{
		rd:      rd,
		k:       k,
		Entries: entries,
	}

	return p, nil
}
Esempio n. 28
0
File: sum.go Progetto: andynu/rfk
// Sum creates a checksum of the audio file data provided by the io.ReadSeeker which is metadata
// (ID3, MP4) invariant.
func Sum(r io.ReadSeeker) (string, error) {
	b, err := readBytes(r, 11)
	if err != nil {
		return "", err
	}

	_, err = r.Seek(-11, os.SEEK_CUR)
	if err != nil {
		return "", fmt.Errorf("could not seek back to original position: %v", err)
	}

	switch {
	case string(b[0:4]) == "fLaC":
		return SumFLAC(r)

	case string(b[4:11]) == "ftypM4A":
		return SumAtoms(r)

	case string(b[0:3]) == "ID3":
		return SumID3v2(r)
	}

	h, err := SumID3v1(r)
	if err != nil {
		if err == ErrNotID3v1 {
			return SumAll(r)
		}
		return "", err
	}
	return h, nil
}
Esempio n. 29
0
// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
	// MD5 and SHA256 hasher.
	var hashMD5, hashSHA256 hash.Hash
	// MD5 and SHA256 hasher.
	hashMD5 = md5.New()
	hashWriter := io.MultiWriter(hashMD5)
	if c.signature.isV4() {
		hashSHA256 = sha256.New()
		hashWriter = io.MultiWriter(hashMD5, hashSHA256)
	}

	size, err = io.Copy(hashWriter, reader)
	if err != nil {
		return nil, nil, 0, err
	}

	// Seek back reader to the beginning location.
	if _, err := reader.Seek(0, 0); err != nil {
		return nil, nil, 0, err
	}

	// Finalize md5shum and sha256 sum.
	md5Sum = hashMD5.Sum(nil)
	if c.signature.isV4() {
		sha256Sum = hashSHA256.Sum(nil)
	}
	return md5Sum, sha256Sum, size, nil
}
Esempio n. 30
0
// Returns new HTTP request object.
func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64, body io.ReadSeeker) (*http.Request, error) {
	if method == "" {
		method = "POST"
	}

	req, err := http.NewRequest(method, urlStr, nil)
	if err != nil {
		return nil, err
	}

	if body == nil {
		// this is added to avoid panic during ioutil.ReadAll(req.Body).
		// th stack trace can be found here  https://github.com/minio/minio/pull/2074 .
		// This is very similar to https://github.com/golang/go/issues/7527.
		req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
	}

	contentLength := calculateStreamContentLength(dataLength, chunkSize)

	req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
	req.Header.Set("content-encoding", "aws-chunked")
	req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLength, 10))
	req.Header.Set("content-length", strconv.FormatInt(contentLength, 10))

	// Seek back to beginning.
	body.Seek(0, 0)

	// Add body
	req.Body = ioutil.NopCloser(body)
	req.ContentLength = contentLength

	return req, nil
}