Example #1
0
func Decode(data []byte) (m *Ins, err error) {
	if len(data) < OffsetMsgData {
		return nil, ErrInvalidHeader.Format("short length")
	}

	m = &Ins{
		underlay: data,
	}
	if err := ReadHeader(data[:SizeMsgHeader], &m.Length); err != nil {
		return nil, logex.Trace(err)
	}

	// body
	m.MsgId = binary.LittleEndian.Uint64(data[OffsetMsgId:])
	m.Crc = binary.LittleEndian.Uint32(data[OffsetMsgCrc:])

	h := crc32.NewIEEE()
	h.Write(data[OffsetMsgCrcCheck:])
	if m.Crc != h.Sum32() {
		logex.Info(data)
		return nil, ErrChecksumNotMatch.Trace(m.Crc, h.Sum32())
	}

	m.Version = uint8(data[OffsetMsgVer])
	switch m.Version {
	case 1:
		m.Data = data[OffsetMsgData:]
	default:
		return nil, ErrInvalidMessage.Format("unsupport version")
	}

	return m, nil
}
Example #2
0
// Reset discards the Reader z's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) error {
	z.killReadAhead()
	z.r = makeReader(r)
	z.digest = crc32.NewIEEE()
	z.size = 0
	z.err = nil
	z.multistream = true

	// Account for uninitialized values
	if z.blocks <= 0 {
		z.blocks = defaultBlocks
	}
	if z.blockSize <= 512 {
		z.blockSize = defaultBlockSize
	}

	if z.blockPool == nil {
		z.blockPool = make(chan []byte, z.blocks)
		for i := 0; i < z.blocks; i++ {
			z.blockPool <- make([]byte, z.blockSize)
		}
	}

	return z.readHeader(true)
}
Example #3
0
func (z *Writer) init(w io.Writer, level int) {
	z.wg.Wait()
	digest := z.digest
	if digest != nil {
		digest.Reset()
	} else {
		digest = crc32.NewIEEE()
	}
	z.Header = Header{OS: 255}
	z.w = w
	z.level = level
	z.digest = digest
	z.pushedErr = make(chan struct{}, 0)
	z.results = make(chan result, z.blocks)
	z.err = nil
	z.closed = false
	z.Comment = ""
	z.Extra = nil
	z.ModTime = time.Time{}
	z.wroteHeader = false
	z.currentBuffer = nil
	z.buf = [10]byte{}
	z.prevTail = nil
	z.size = 0
	if z.dictFlatePool.New == nil {
		z.dictFlatePool.New = func() interface{} {
			f, _ := flate.NewWriterDict(w, level, nil)
			return f
		}
	}
}
Example #4
0
// Open returns a ReadCloser that provides access to the File's contents.
// Multiple files may be read concurrently.
func (f *File) Open() (rc io.ReadCloser, err error) {
	bodyOffset, err := f.findBodyOffset()
	if err != nil {
		return
	}
	size := int64(f.CompressedSize64)
	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
	dcomp := f.zip.decompressor(f.Method)
	if dcomp == nil {
		err = ErrAlgorithm
		return
	}
	rc = dcomp(r)
	var desr io.Reader
	if f.hasDataDescriptor() {
		desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
	}
	rc = &checksumReader{
		rc:   rc,
		hash: crc32.NewIEEE(),
		f:    f,
		desr: desr,
	}
	return
}
Example #5
0
func (z *Writer) init(w io.Writer, level int) {
	digest := z.digest
	if digest != nil {
		digest.Reset()
	} else {
		digest = crc32.NewIEEE()
	}

	*z = Writer{
		Header: Header{
			OS: 255, // unknown
		},
		w:         w,
		level:     level,
		digest:    digest,
		pushedErr: make(chan error, 1),
		results:   make(chan result, z.blocks),
		blockSize: z.blockSize,
		blocks:    z.blocks,
	}
	z.dictFlatePool = &sync.Pool{
		New: func() interface{} {
			f, _ := flate.NewWriterDict(w, level, nil)
			return f
		},
	}
	z.dstPool = &sync.Pool{New: func() interface{} { return make([]byte, 0, z.blockSize) }}

}
Example #6
0
// Reset discards the Reader z's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) error {
	z.r = makeReader(r)
	z.digest = crc32.NewIEEE()
	z.size = 0
	z.err = nil
	z.multistream = true
	return z.readHeader(true)
}
Example #7
0
// NewReader creates a new Reader reading the given reader.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the Reader when done.
func NewReader(r io.Reader) (*Reader, error) {
	z := new(Reader)
	z.r = makeReader(r)
	z.multistream = true
	z.digest = crc32.NewIEEE()
	if err := z.readHeader(true); err != nil {
		return nil, err
	}
	return z, nil
}
Example #8
0
// Support the io.WriteTo interface for io.Copy and friends.
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
	total := int64(0)
	crcWriter := crc32.NewIEEE()
	for {
		if z.err != nil {
			if z.err == io.EOF {
				return total, nil
			}
			return total, z.err
		}

		// We write both to output and digest.
		mw := io.MultiWriter(w, crcWriter)
		n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
		total += n
		z.size += uint32(n)
		if err != nil {
			z.err = err
			return total, z.err
		}

		// Finished file; check checksum + size.
		if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
			if err == io.EOF {
				err = io.ErrUnexpectedEOF
			}
			z.err = err
			return total, err
		}
		z.digest = crcWriter.Sum32()
		digest := le.Uint32(z.buf[:4])
		size := le.Uint32(z.buf[4:8])
		if digest != z.digest || size != z.size {
			z.err = ErrChecksum
			return total, z.err
		}
		z.digest, z.size = 0, 0

		// File is ok; check if there is another.
		if !z.multistream {
			return total, nil
		}
		crcWriter.Reset()
		z.err = nil // Remove io.EOF

		if _, z.err = z.readHeader(); z.err != nil {
			if z.err == io.EOF {
				return total, nil
			}
			return total, z.err
		}
	}
}
Example #9
0
// NewReaderN creates a new Reader reading the given reader.
// The implementation buffers input and may read more data than necessary from r.
// It is the caller's responsibility to call Close on the Reader when done.
//
// With this you can control the approximate size of your blocks,
// as well as how many blocks you want to have prefetched.
//
// Default values for this is blockSize = 250000, blocks = 16,
// meaning up to 16 blocks of maximum 250000 bytes will be
// prefetched.
func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) {
	z := new(Reader)
	z.blocks = blocks
	z.blockSize = blockSize
	z.r = makeReader(r)
	z.digest = crc32.NewIEEE()
	z.multistream = true
	if err := z.readHeader(true); err != nil {
		return nil, err
	}
	return z, nil
}
Example #10
0
// NewReader creates a new Reader reading the given reader.
// The implementation buffers input and may read more data than necessary from r.
// It is the caller's responsibility to call Close on the Reader when done.
func NewReader(r io.Reader) (*Reader, error) {
	z := new(Reader)
	z.blocks = defaultBlocks
	z.blockSize = defaultBlockSize
	z.r = makeReader(r)
	z.digest = crc32.NewIEEE()
	z.multistream = true
	z.blockPool = make(chan []byte, z.blocks)
	for i := 0; i < z.blocks; i++ {
		z.blockPool <- make([]byte, z.blockSize)
	}
	if err := z.readHeader(true); err != nil {
		return nil, err
	}
	return z, nil
}
Example #11
0
// CreateHeader adds a file to the zip file using the provided FileHeader
// for the file metadata.
// It returns a Writer to which the file contents should be written.
//
// The file's contents must be written to the io.Writer before the next
// call to Create, CreateHeader, or Close. The provided FileHeader fh
// must not be modified after a call to CreateHeader.
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
	if w.last != nil && !w.last.closed {
		if err := w.last.close(); err != nil {
			return nil, err
		}
	}
	if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
		// See https://golang.org/issue/11144 confusion.
		return nil, errors.New("archive/zip: invalid duplicate FileHeader")
	}

	fh.Flags |= 0x8 // we will write a data descriptor

	fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
	fh.ReaderVersion = zipVersion20

	fw := &fileWriter{
		zipw:      w.cw,
		compCount: &countWriter{w: w.cw},
		crc32:     crc32.NewIEEE(),
	}
	comp := w.compressor(fh.Method)
	if comp == nil {
		return nil, ErrAlgorithm
	}
	var err error
	fw.comp, err = comp(fw.compCount)
	if err != nil {
		return nil, err
	}
	fw.rawCount = &countWriter{w: fw.comp}

	h := &header{
		FileHeader: fh,
		offset:     uint64(w.cw.count),
	}
	w.dir = append(w.dir, h)
	fw.header = h

	if err := writeHeader(w.cw, fh); err != nil {
		return nil, err
	}

	w.last = fw
	return fw, nil
}
Example #12
0
func hashesForReader(in io.Reader) (*Hashes, error) {
	hSha1 := sha1.New()
	hMd5 := md5.New()
	hCrc := crc32.NewIEEE()

	w := io.MultiWriter(hSha1, hMd5, hCrc)

	_, err := io.Copy(w, in)
	if err != nil {
		return nil, err
	}

	res := new(Hashes)
	res.Crc = hCrc.Sum(nil)
	res.Md5 = hMd5.Sum(nil)
	res.Sha1 = hSha1.Sum(nil)

	return res, nil
}
Example #13
0
// CreateHeader adds a file to the zip file using the provided FileHeader
// for the file metadata.
// It returns a Writer to which the file contents should be written.
// The file's contents must be written to the io.Writer before the next
// call to Create, CreateHeader, or Close.
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
	if w.last != nil && !w.last.closed {
		if err := w.last.close(); err != nil {
			return nil, err
		}
	}

	fh.Flags |= 0x8 // we will write a data descriptor

	fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
	fh.ReaderVersion = zipVersion20

	fw := &fileWriter{
		zipw:      w.cw,
		compCount: &countWriter{w: w.cw},
		crc32:     crc32.NewIEEE(),
	}
	comp := compressor(fh.Method)
	if comp == nil {
		return nil, ErrAlgorithm
	}
	var err error
	fw.comp, err = comp(fw.compCount)
	if err != nil {
		return nil, err
	}
	fw.rawCount = &countWriter{w: fw.comp}

	h := &header{
		FileHeader: fh,
		offset:     uint64(w.cw.count),
	}
	w.dir = append(w.dir, h)
	fw.header = h

	if err := writeHeader(w.cw, fh); err != nil {
		return nil, err
	}

	w.last = fw
	return fw, nil
}
Example #14
0
func NewByData(data *Data) *Ins {
	underlay := data.underlay
	underlay[OffsetMsgVer] = byte(1)
	h := crc32.NewIEEE()
	h.Write(underlay[OffsetMsgVer:])

	m := &Ins{
		Length:   uint32(len(underlay) - OffsetMsgBody),
		Crc:      h.Sum32(),
		Version:  1,
		Data:     data.Bytes(),
		underlay: underlay,
	}

	copy(underlay, MagicBytes)
	binary.LittleEndian.PutUint32(underlay[OffsetMsgLength:], m.Length)
	binary.LittleEndian.PutUint64(underlay[OffsetMsgId:], 0)
	binary.LittleEndian.PutUint32(underlay[OffsetMsgCrc:], m.Crc)

	return m
}
Example #15
0
func (z *Writer) init(w io.Writer, level int) {
	digest := z.digest
	if digest != nil {
		digest.Reset()
	} else {
		digest = crc32.NewIEEE()
	}
	compressor := z.compressor
	if compressor != nil {
		compressor.Reset(w)
	}
	*z = Writer{
		Header: Header{
			OS: 255, // unknown
		},
		w:          w,
		level:      level,
		digest:     digest,
		compressor: compressor,
	}
}
Example #16
0
func (z *Writer) init(w io.Writer, level int) {
	digest := z.digest
	if digest != nil {
		digest.Reset()
	} else {
		digest = crc32.NewIEEE()
	}

	*z = Writer{
		Header: Header{
			OS: 255, // unknown
		},
		w:         w,
		level:     level,
		digest:    digest,
		pushedErr: make(chan error, 1),
		results:   make(chan result, z.blocks),
		blockSize: z.blockSize,
		blocks:    z.blocks,
	}
}
Example #17
0
// NewReaderN creates a new Reader reading the given reader.
// The implementation buffers input and may read more data than necessary from r.
// It is the caller's responsibility to call Close on the Reader when done.
//
// With this you can control the approximate size of your blocks,
// as well as how many blocks you want to have prefetched.
//
// Default values for this is blockSize = 250000, blocks = 16,
// meaning up to 16 blocks of maximum 250000 bytes will be
// prefetched.
func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) {
	z := new(Reader)
	z.blocks = blocks
	z.blockSize = blockSize
	z.r = makeReader(r)
	z.digest = crc32.NewIEEE()
	z.multistream = true

	// Account for too small values
	if z.blocks <= 0 {
		z.blocks = defaultBlocks
	}
	if z.blockSize <= 512 {
		z.blockSize = defaultBlockSize
	}
	z.blockPool = make(chan []byte, z.blocks)
	for i := 0; i < z.blocks; i++ {
		z.blockPool <- make([]byte, z.blockSize)
	}
	if err := z.readHeader(true); err != nil {
		return nil, err
	}
	return z, nil
}
Example #18
0
func (hh *Hashes) forReader(in io.Reader) error {
	br := bufio.NewReader(in)

	hSha1 := sha1.New()
	hMd5 := md5.New()
	hCrc := crc32.NewIEEE()

	w := io.MultiWriter(hSha1, hMd5, hCrc)
	cw := &countWriter{
		w: w,
	}

	_, err := io.Copy(cw, br)
	if err != nil {
		return err
	}

	hh.Crc = hCrc.Sum(hh.Crc[0:0])
	hh.Md5 = hMd5.Sum(hh.Md5[0:0])
	hh.Sha1 = hSha1.Sum(hh.Sha1[0:0])
	hh.Size = cw.count

	return nil
}