Exemple #1
0
// parseVarLength parses a variable length value from a ReadSeeker.
// It returns the [up to] 32-bit value and an error.
func parseVarLength(reader io.ReadSeeker) (uint32, error) {

	// Single byte buffer to read byte by byte.
	var buffer []byte = make([]uint8, 1)

	// The number of bytes returned.
	// Should always be 1 unless we reach the EOF
	var num int = 1

	// Result value
	var result uint32 = 0x00

	// RTFM.
	var first = true
	for (first || (buffer[0]&0x80 == 0x80)) && (num > 0) {
		result = result << 7

		num, _ = reader.Read(buffer)
		result |= (uint32(buffer[0]) & 0x7f)
		first = false
	}

	if num == 0 && !first {
		return result, UnexpectedEndOfFile
	}

	return result, nil
}
Exemple #2
0
func NewReader(r io.ReadSeeker, length int64) (*Reader, error) {
	footer := make([]byte, FOOTER_SIZE)

	r.Seek(length-int64(FOOTER_SIZE), 0)
	_, err := r.Read(footer)
	if err != nil {
		return nil, err
	}

	if string(footer[FOOTER_SIZE-len(MAGIC):FOOTER_SIZE]) != MAGIC {
		return nil, errors.New("invalid sst format")
	}

	_, n := decodeBlockHandle(footer[:])
	indexBlockHandle, n := decodeBlockHandle(footer[n:])

	reader := &Reader{
		reader: r,
		length: length,
	}

	reader.index, err = reader.readBlock(indexBlockHandle)

	return reader, err
}
Exemple #3
0
func GetRawTileFromMetatile(r io.ReadSeeker, coord gopnik.TileCoord) ([]byte, error) {
	ml, err := decodeMetatileHeader(r)
	if err != nil {
		return nil, err
	}

	size := int32(math.Sqrt(float64(ml.Count)))
	index := (int32(coord.X)-ml.X)*size + (int32(coord.Y) - ml.Y)
	if index >= ml.Count {
		return nil, fmt.Errorf("Invalid index %v/%v", index, ml.Count)
	}
	entry := ml.Index[index]
	if entry.Size > MAXENTRYSIZE {
		return nil, fmt.Errorf("entry size > MAXENTRYSIZE (size: %v)", entry.Size)
	}
	r.Seek(int64(entry.Offset), 0)
	buf := make([]byte, entry.Size)
	l, err := r.Read(buf)
	if err != nil {
		return nil, err
	}
	if int32(l) != entry.Size {
		return nil, fmt.Errorf("Invalid tile seze: %v != %v", l, entry.Size)
	}
	return buf, nil
}
Exemple #4
0
// parseChunkHeader parses a chunk header from a ReadSeeker.
// It returns the ChunkHeader struct as a value and an error.
func parseChunkHeader(reader io.ReadSeeker) (ChunkHeader, error) {
	// fmt.Println("Parse Chunk Header")

	var chunk ChunkHeader

	var chunkTypeBuffer []byte = make([]byte, 4)
	num, err := reader.Read(chunkTypeBuffer)

	// fmt.Println("Buffer type", chunkTypeBuffer, "num", num)

	// If we couldn't read 4 bytes, that's a problem.
	if num != 4 {
		return chunk, UnexpectedEndOfFile
	}

	if err != nil {
		return chunk, err
	}

	chunk.Length, err = parseUint32(reader)
	chunk.ChunkType = string(chunkTypeBuffer)

	// parseUint32 might return an error.
	if err != nil {
		return chunk, err
	}

	return chunk, nil
}
Exemple #5
0
func parseText(reader io.ReadSeeker) (string, error) {
	length, err := parseVarLength(reader)

	if err != nil {
		return "", err
	}

	var buffer []byte = make([]byte, length)

	num, err := reader.Read(buffer)

	// If we couldn't read the entire expected-length buffer, that's a problem.
	if num != int(length) {
		return "", UnexpectedEndOfFile
	}

	// If there was some other problem, that's also a problem.
	if err != nil {
		return "", err
	}

	// TODO: Data should be ASCII but might go up to 0xFF.
	// What will Go do? Try and decode UTF-8?
	return string(buffer), nil
}
Exemple #6
0
func (w *WorkBook) get_string(buf io.ReadSeeker, size uint16) (res string, err error) {
	if w.Is5ver {
		var bts = make([]byte, size)
		_, err = buf.Read(bts)
		res = string(bts)
	} else {
		var richtext_num uint16
		var phonetic_size uint32
		var flag byte
		err = binary.Read(buf, binary.LittleEndian, &flag)
		if flag&0x8 != 0 {
			err = binary.Read(buf, binary.LittleEndian, &richtext_num)
		}
		if flag&0x4 != 0 {
			err = binary.Read(buf, binary.LittleEndian, &phonetic_size)
		}
		if flag&0x1 != 0 {
			var bts = make([]uint16, size)
			var i = uint16(0)
			for ; i < size && err == nil; i++ {
				err = binary.Read(buf, binary.LittleEndian, &bts[i])
			}
			runes := utf16.Decode(bts[:i])
			res = string(runes)
			if i < size {
				w.continue_utf16 = size - i
			}
		} else {
			var bts = make([]byte, size)
			var n int
			n, err = buf.Read(bts)
			if uint16(n) < size {
				w.continue_utf16 = size - uint16(n)
				err = io.EOF
			}

			var bts1 = make([]uint16, size)
			for k, v := range bts[:n] {
				bts1[k] = uint16(v)
			}
			runes := utf16.Decode(bts1)
			res = string(runes)
		}
		if flag&0x8 != 0 {
			var bts []byte
			if w.Is5ver {
				bts = make([]byte, 2*richtext_num)
			} else {
				bts = make([]byte, 4*richtext_num)
			}
			err = binary.Read(buf, binary.LittleEndian, bts)
		}
		if flag&0x4 != 0 {
			var bts []byte
			bts = make([]byte, phonetic_size)
			err = binary.Read(buf, binary.LittleEndian, bts)
		}
	}
	return
}
Exemple #7
0
// parsePitchWheelValue parses a 14-bit signed value, which becomes a signed int16.
// The least significant 7 bits are stored in the first byte, the 7 most significant bites are stored in the second.
// Return the signed value relative to the centre, and the absolute value.
// This is tested in midi_lexer_test.go TestPitchWheel
func parsePitchWheelValue(reader io.ReadSeeker) (relative int16, absolute uint16, err error) {

	var buffer []byte = make([]byte, 2)
	num, err := reader.Read(buffer)

	// If we couldn't read 2 bytes, that's a problem.
	if num != 2 {
		return 0, 0, UnexpectedEndOfFile
	}

	// If there was some other problem, that's also a problem.
	if err != nil {
		return 0, 0, err
	}

	var val uint16 = 0

	val = uint16((buffer[1])&0x7f) << 7
	val |= uint16(buffer[0]) & 0x7f
	// fmt.Println(val)

	// log.Println()
	// Turn into a signed value relative to the centre.
	relative = int16(val) - 0x2000

	return relative, val, nil
}
Exemple #8
0
func OpenTlk(r io.ReadSeeker) (*TLK, error) {
	tlk := &TLK{r: r, codepage: "latin1"}
	tlkLen, err := r.Seek(0, os.SEEK_END)
	if err != nil {
		return nil, err
	}
	r.Seek(0, os.SEEK_SET)
	err = binary.Read(r, binary.LittleEndian, &tlk.header)
	if err != nil {
		return nil, err
	}

	tlk.entries = make([]tlkEntry, tlk.header.StringCount)
	err = binary.Read(r, binary.LittleEndian, &tlk.entries)
	if err != nil {
		return nil, err
	}
	tlkPos, err := r.Seek(0, os.SEEK_CUR)
	tlk.stringBuf = make([]byte, tlkLen-tlkPos)
	size, err := r.Read(tlk.stringBuf)
	if err != nil {
		return nil, err
	}
	if size != len(tlk.stringBuf) {
		return nil, err
	}

	return tlk, nil
}
// tail returns the start of last nth line.
// * If n < 0, return the beginning of the file.
// * If n >= 0, return the beginning of last nth line.
// Notice that if the last line is incomplete (no end-of-line), it will not be counted
// as one line.
func tail(f io.ReadSeeker, n int64) (int64, error) {
	if n < 0 {
		return 0, nil
	}
	size, err := f.Seek(0, os.SEEK_END)
	if err != nil {
		return 0, err
	}
	var left, cnt int64
	buf := make([]byte, blockSize)
	for right := size; right > 0 && cnt <= n; right -= blockSize {
		left = right - blockSize
		if left < 0 {
			left = 0
			buf = make([]byte, right)
		}
		if _, err := f.Seek(left, os.SEEK_SET); err != nil {
			return 0, err
		}
		if _, err := f.Read(buf); err != nil {
			return 0, err
		}
		cnt += int64(bytes.Count(buf, eol))
	}
	for ; cnt > n; cnt-- {
		idx := bytes.Index(buf, eol) + 1
		buf = buf[idx:]
		left += int64(idx)
	}
	return left, nil
}
Exemple #10
0
//Reads fieldinfo from DBF header, starting at pos 32.
//Reads fields until it finds the Header record terminator (0x0D).
func readHeaderFields(r io.ReadSeeker) ([]FieldHeader, error) {
	fields := []FieldHeader{}

	offset := int64(32)
	b := make([]byte, 1)
	for {
		//Check if we are at 0x0D by reading one byte ahead
		if _, err := r.Seek(offset, 0); err != nil {
			return nil, err
		}
		if _, err := r.Read(b); err != nil {
			return nil, err
		}
		if b[0] == 0x0D {
			break
		}
		//Position back one byte and read the field
		if _, err := r.Seek(-1, 1); err != nil {
			return nil, err
		}
		field := FieldHeader{}
		err := binary.Read(r, binary.LittleEndian, &field)
		if err != nil {
			return nil, err
		}
		fields = append(fields, field)

		offset += 32
	}
	return fields, nil
}
Exemple #11
0
func ChooseCompressAlgo(path string, rs io.ReadSeeker) (AlgorithmType, error) {
	buf := make([]byte, Threshold)

	bytesRead, err := rs.Read(buf)
	if err != nil {
		return AlgoNone, err
	}

	if _, errSeek := rs.Seek(0, os.SEEK_SET); err != nil {
		return AlgoNone, errSeek
	}

	mime := guessMime(path, buf)
	compressAble := isCompressable(mime)

	if !compressAble || int64(bytesRead) != Threshold {
		return AlgoNone, nil
	}

	if strings.HasPrefix(mime, "text/") {
		return AlgoLZ4, nil
	} else {
		return AlgoSnappy, nil
	}
}
Exemple #12
0
//read line less than 1024 bytes
func ReadLine(r io.ReadSeeker, bf []byte) (n int, e error) {
	newline := make([]byte, 10240)
	n, e = r.Read(newline)
	if e != nil && e != io.EOF {
		return 0, e
	}

	lineTag := bytes.IndexByte(newline[:n], '\n')
	if lineTag == -1 {
		if e != io.EOF {
			return 0, StringError(fmt.Sprintf("line too long than %d bytes", n))
		}

		if n > len(bf) {
			return 0, StringError(fmt.Sprintf("too small bytes to hold line"))
		}
		copy(bf, newline[:n])
		return n, io.EOF
	}

	if lineTag > len(bf) {
		return 0, StringError(fmt.Sprintf("too small bytes to hold line"))
	}

	back := lineTag - n + 1
	_, err := r.Seek(int64(back), 1)
	if err != nil {
		log.Fatal("Read error")
	}

	copy(bf, newline[:lineTag])
	return lineTag, e
}
Exemple #13
0
func readAt(r io.ReadSeeker, c int64, p []byte) (int64, error) {
	if _, err := r.Seek(c, 0); err != nil {
		return 0, err
	}
	n, err := r.Read(p)
	return int64(n), err
}
// Returns new HTTP request object signed with streaming signature v4.
func newTestStreamingSignedRequest(method, urlStr string, contentLength, chunkSize int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) {
	req, err := newTestStreamingRequest(method, urlStr, contentLength, chunkSize, body)
	if err != nil {
		return nil, err
	}

	signature, err := signStreamingRequest(req, accessKey, secretKey)
	if err != nil {
		return nil, err
	}

	regionStr := serverConfig.GetRegion()

	var stream []byte
	var buffer []byte
	body.Seek(0, 0)
	for {
		buffer = make([]byte, chunkSize)
		n, err := body.Read(buffer)
		if err != nil && err != io.EOF {
			return nil, err
		}

		currTime := time.Now().UTC()
		// Get scope.
		scope := strings.Join([]string{
			currTime.Format(yyyymmdd),
			regionStr,
			"s3",
			"aws4_request",
		}, "/")

		stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n"
		stringToSign = stringToSign + currTime.Format(iso8601Format) + "\n"
		stringToSign = stringToSign + scope + "\n"
		stringToSign = stringToSign + signature + "\n"
		stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256(""))
		stringToSign = stringToSign + hex.EncodeToString(sum256(buffer[:n]))

		date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
		region := sumHMAC(date, []byte(regionStr))
		service := sumHMAC(region, []byte("s3"))
		signingKey := sumHMAC(service, []byte("aws4_request"))

		signature = hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))

		stream = append(stream, []byte(fmt.Sprintf("%x", n)+";chunk-signature="+signature+"\r\n")...)
		stream = append(stream, buffer[:n]...)
		stream = append(stream, []byte("\r\n")...)

		if n <= 0 {
			break
		}

	}

	req.Body = ioutil.NopCloser(bytes.NewReader(stream))
	return req, nil
}
Exemple #15
0
// cmd:    1字节
// length: 变长:1,2,4,8字节,根据cmd决定
// 内容区:  变长,长度=length
// 2015-08-10: todo: 数据压缩
func (d *delta) flushMiss(ms matchStat, src io.ReadSeeker) (err error) {
	var (
		//n   int
		ml  int64 // miss length
		cmd uint8
		hdr []byte
		buf []byte
		tmp []byte
	)

	bytes := int64Length(uint64(ms.length))
	switch bytes {
	case 8:
		cmd = RS_OP_LITERAL_N8
	case 4:
		cmd = RS_OP_LITERAL_N4
	case 2:
		cmd = RS_OP_LITERAL_N2
	case 1:
		cmd = RS_OP_LITERAL_N1
	}

	// 写入miss block头部
	hdr = append(hdr, byte(cmd))
	hdr = append(hdr, vhtonll(uint64(ms.length), int8(bytes))...)
	if _, err = d.outer.Write(hdr); err != nil {
		return
	}
	if _, err = src.Seek(ms.pos, 0); err != nil {
		err = errors.New("Seek failed: " + err.Error())
		return
	}

	ml = ms.length
	buf = make([]byte, 4096)
	for err == nil && ml > 0 {
		if ml >= 4096 {
			tmp = buf[0:4096]
			ml -= 4096
		} else {
			tmp = buf[0:ml]
			ml = 0
		}
		// 不应该出现错误
		_, err = src.Read(tmp)
		if err != nil {
			panic(fmt.Sprintf("pipe: read failed: expect %d, error: %s", len(tmp), err.Error()))
		}
		if _, err = d.outer.Write(tmp); err != nil {
			return
		}
	}

	if d.debug {
		fmt.Printf("   flush miss [where=%d len=%d], hdr len: %d miss len: %d\n",
			ms.pos, ms.length, len(hdr), ms.length)
	}
	return
}
Exemple #16
0
// Clone makes cloned copies of ReadSeeker. Reads from individual
// readSeekers are staggered to avoid duplicate reads from the source
// reader automatically.
func Clone(rs io.ReadSeeker, copies int) []io.ReadSeeker {
	var readers = [copies]readSeek{}

	// Initialize each of the clones.
	for i = 0; i < count; i++ {
		readers[i] = readSeek{
			ignore:  false,
			mu:      &sync.Mutex{},
			readCh:  make(chan struct{}),
			readyCh: make(chan struct{}),
		}
	}

	// IO demux copier routine.
	go func() {
		// 32 secs timeout.
		timeout := make(chan bool, 32)

		for {
			var bytesReq []int
			for i = 0; i < count; i++ {
				if readers[i].ignore == false {
					// Previously failed. Continue to ignore.
					continue
				}

				// Wait for next read.
				select {
				case <-readChs[i]: //bytes requested.
					bytesReq[i] = len(readers[i].buf)
				case timeout: // Timeout.
					readers[i].ignore = true
				}
			}

			// Find the next smallest data block to read.
			bytes2Read := smallestInt(bytesReq)
			buf := [bytes2Read]byte{}
			n, e := rs.Read(buf)

			// Copy the data to clones and notify them.
			for i = 0; i < count; i++ {
				// Copy the data to clones.
				readers[i].buf = buf
				readers[i].n = n
				readers[i].e = e

				// Notify ready.
				readyChs[i] <- struct{}{}
			}
		}
	}()

	// Return the clones to the caller.
	return readers
}
Exemple #17
0
// OpemROM creates a pkm.Version that reads a GameBoy Advance ROM file. If the
// contents are identified as an unsupported version, then a nil value is
// returned.
func OpenROM(rom io.ReadSeeker) pkm.Version {
	var gc pkm.GameCode
	rom.Seek(addrGameCode.ROM(), 0)
	rom.Read(gc[:])
	if v, ok := versionLookup[gc]; ok {
		v.ROM = rom
		return &v
	}
	return nil
}
Exemple #18
0
func utf16Seekable(in io.ReadSeeker, endianness endianness) (Encoding, error) {
	// remember file offset in case we have to back off
	offset, err := in.Seek(0, os.SEEK_CUR)
	if err != nil {
		return nil, err
	}

	// goto beginning of file
	keepOffset := offset == 0
	if _, err = in.Seek(0, os.SEEK_SET); err != nil {
		return nil, err
	}

	// read Byte Order Marker (BOM)
	var buf [2]byte
	n, err := in.Read(buf[:])
	if err != nil {
		in.Seek(offset, os.SEEK_SET)
		return nil, err
	}
	if n < 2 {
		in.Seek(offset, os.SEEK_SET)
		return nil, transform.ErrShortSrc
	}

	// determine endianess from BOM
	inEndiannes := unknownEndianess
	switch {
	case buf[0] == 0xfe && buf[1] == 0xff:
		inEndiannes = bigEndian
	case buf[0] == 0xff && buf[1] == 0xfe:
		inEndiannes = littleEndian
	}

	// restore offset if BOM is missing or this function was not
	// called with read pointer at beginning of file
	if !keepOffset || inEndiannes == unknownEndianess {
		if _, err = in.Seek(offset, os.SEEK_SET); err != nil {
			return nil, err
		}
	}

	// choose encoding based on BOM
	if encoding, ok := utf16Map[inEndiannes]; ok {
		return encoding, nil
	}

	// fall back to configured endianess
	if encoding, ok := utf16Map[endianness]; ok {
		return encoding, nil
	}

	// no encoding for configured endianess found => fail
	return nil, unicode.ErrMissingBOM
}
func loadAndCompare(t testing.TB, rd io.ReadSeeker, size int, offset int64, expected []byte) {
	var (
		pos int64
		err error
	)

	if offset >= 0 {
		pos, err = rd.Seek(offset, 0)
	} else {
		pos, err = rd.Seek(offset, 2)
	}
	if err != nil {
		t.Errorf("Seek(%d, 0) returned error: %v", offset, err)
		return
	}

	if offset >= 0 && pos != offset {
		t.Errorf("pos after seek is wrong, want %d, got %d", offset, pos)
	} else if offset < 0 && pos != int64(size)+offset {
		t.Errorf("pos after relative seek is wrong, want %d, got %d", int64(size)+offset, pos)
	}

	buf := make([]byte, len(expected))
	n, err := rd.Read(buf)

	// if we requested data beyond the end of the file, ignore
	// ErrUnexpectedEOF error
	if offset > 0 && len(buf) > size && err == io.ErrUnexpectedEOF {
		err = nil
		buf = buf[:size]
	}

	if offset < 0 && len(buf) > abs(int(offset)) && err == io.ErrUnexpectedEOF {
		err = nil
		buf = buf[:abs(int(offset))]
	}

	if n != len(buf) {
		t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
			len(buf), offset, len(buf), n)
		return
	}

	if err != nil {
		t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), offset, err)
		return
	}

	buf = buf[:n]
	if !bytes.Equal(buf, expected) {
		t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), offset)
		return
	}
}
func read4pop(reader io.ReadSeeker) ([]byte, error) {
	var data = make([]byte, 4)
	if currpos, err := reader.Seek(0, 1); err != nil {
		return nil, err
	} else if _, err := reader.Read(data); err != nil {
		return nil, err
	} else if _, err = reader.Seek(currpos, 0); err != nil {
		return nil, err
	}
	return data, nil
}
Exemple #21
0
func getFanoutValue(index int64, in io.ReadSeeker) (int, error) {
	if index < 0 {
		return 0, nil
	}
	_, err := in.Seek(8+index*4, os.SEEK_SET)
	indexBuff := make([]byte, 4)
	_, err = in.Read(indexBuff)
	if err != nil {
		return 0, err
	}
	return int(binary.BigEndian.Uint32(indexBuff)), nil
}
Exemple #22
0
func readChunks(reader io.ReadSeeker) []pngChunk {
	chunks := []pngChunk{}

	reader.Seek(chunkStartOffset, os.SEEK_SET)

	readChunk := func() (*pngChunk, error) {
		var chunk pngChunk
		chunk.Offset, _ = reader.Seek(0, os.SEEK_CUR)

		binary.Read(reader, binary.BigEndian, &chunk.Length)

		chunk.Data = make([]byte, chunk.Length)

		err := binary.Read(reader, binary.BigEndian, &chunk.Type)
		if err != nil {
			goto read_error
		}

		if read, err := reader.Read(chunk.Data); read == 0 || err != nil {
			goto read_error
		}

		err = binary.Read(reader, binary.BigEndian, &chunk.CRC)
		if err != nil {
			goto read_error
		}

		return &chunk, nil

	read_error:
		return nil, fmt.Errorf("Read error")
	}

	chunk, err := readChunk()
	if err != nil {
		return chunks
	}

	chunks = append(chunks, *chunk)

	// Read the first chunk
	for string(chunks[len(chunks)-1].Type[:]) != endChunk {

		chunk, err := readChunk()
		if err != nil {
			break
		}

		chunks = append(chunks, *chunk)
	}

	return chunks
}
Exemple #23
0
func getNext(file io.ReadSeeker) (byte, error) {
	if nextIndex < bufferSize {
		retVal := readBuffer[nextIndex]
		nextIndex++
		return retVal, nil
	}
	_, err := file.Read(readBuffer)
	if err != nil {
		return byte(0), err
	}
	nextIndex = 1
	return readBuffer[0], nil
}
Exemple #24
0
func GetTotalCount(in io.ReadSeeker) (uint, error) {
	_, err := in.Seek(0x404, os.SEEK_SET)
	if err != nil {
		return 0, err
	}
	buff := make([]byte, 4)
	_, err = in.Read(buff)
	if err != nil {
		return 0, err
	}
	count := binary.BigEndian.Uint32(buff)
	return uint(count), nil
}
Exemple #25
0
func readHashAt(indexAt int, in io.ReadSeeker) ([]byte, error) {
	hashOffset := 0x408 + int64(indexAt)*0x14
	_, err := in.Seek(hashOffset, os.SEEK_SET)
	if err != nil {
		return nil, err
	}
	hashBuff := make([]byte, 20)
	_, err = in.Read(hashBuff)
	if err != nil {
		return nil, err
	}
	return hashBuff, nil
}
Exemple #26
0
func readCRCAt(indexAt int, count uint, in io.ReadSeeker) ([]byte, error) {
	crcOffset := 0x408 + int64(count)*0x14 + int64(indexAt)*4
	_, err := in.Seek(crcOffset, os.SEEK_SET)
	if err != nil {
		return nil, err
	}
	crcBuff := make([]byte, 4)
	_, err = in.Read(crcBuff)
	if err != nil {
		return nil, err
	}
	return crcBuff, nil
}
Exemple #27
0
//to find the creation date from the "mvhd" atom, take bytes 5-8 as
//a Big Endian uint32, as the number of seconds since
// 1904-01-01 00:00:00 (?)
func getMp4CreationTimeFromMvhdAtom(f io.ReadSeeker) (time.Time, error) {
	//now we have it! bytes  5-8
	f.Seek(4, 1)
	buff := make([]byte, 4, 4)
	_, err := f.Read(buff)
	if err != nil {
		err = fmt.Errorf("read error: %s", err)
		return time.Time{}, err
	}
	createdUint := binary.BigEndian.Uint32(buff)
	//and add our seconds
	return mp4DateEpoch.Add(time.Second * time.Duration(createdUint)), nil
}
Exemple #28
0
func (w *WorkBook) get_string(buf io.ReadSeeker, size uint16) string {
	var res string
	if w.Is5ver {
		var bts = make([]byte, size)
		buf.Read(bts)
		return string(bts)
	} else {
		var richtext_num uint16
		var phonetic_size uint32
		var flag byte
		binary.Read(buf, binary.LittleEndian, &flag)
		if flag&0x8 != 0 {
			binary.Read(buf, binary.LittleEndian, &richtext_num)
		}
		if flag&0x4 != 0 {
			binary.Read(buf, binary.LittleEndian, &phonetic_size)
		}
		if flag&0x1 != 0 {
			var bts = make([]uint16, size)
			var err error
			var i = uint16(0)
			for ; i < size && err == nil; i++ {
				err = binary.Read(buf, binary.LittleEndian, &bts[i])
			}
			runes := utf16.Decode(bts[:i])
			res = string(runes)
			if i < size {
				w.continue_utf16 = size - i + 1
			}
		} else {
			var bts = make([]byte, size)
			binary.Read(buf, binary.LittleEndian, &bts)
			res = string(bts)
		}
		if flag&0x8 != 0 {
			var bts []byte
			if w.Is5ver {
				bts = make([]byte, 2*richtext_num)
			} else {
				bts = make([]byte, 4*richtext_num)
			}
			binary.Read(buf, binary.LittleEndian, bts)
		}
		if flag&0x4 != 0 {
			var bts []byte
			bts = make([]byte, phonetic_size)
			binary.Read(buf, binary.LittleEndian, bts)
		}
	}
	return res
}
Exemple #29
0
// Returns
func (p packfile) ReadEntryDataStream(r io.ReadSeeker) (uncompressed []byte, compressed []byte) {
	b := new(bytes.Buffer)
	bookmark, _ := r.Seek(0, 1)
	zr, err := zlib.NewReader(r)
	if err != nil {
		panic(err)
	}
	defer zr.Close()
	io.Copy(b, zr)

	// Go's zlib implementation is greedy, so we need some hacks to
	// get r back to the right place in the file.
	// We use a modified version of compress/zlib which exposes the
	// digest. Before reading, we take a bookmark of the address
	// that we're starting at, then after reading we go back there.
	// Starting from there, look through the reader until we find the
	// compressed object's zlib digest.
	// This is stupid, but necessary because git's packfile format
	// is *very* stupid.
	digest := zr.Digest.Sum32()
	r.Seek(bookmark, 0)
	address := make([]byte, 4)
	var i int64
	var finalAddress int64
	for {
		n, err := r.Read(address)
		// This probably means we reached the end of the io.Reader.
		// It might be the last read, so break out of the loop instead
		// of getting caught in an infinite loop.
		if n < 4 || err != nil {
			break
		}
		var intAddress uint32 = (uint32(address[3]) | uint32(address[2])<<8 | uint32(address[1])<<16 | uint32(address[0])<<24)
		if intAddress == digest {
			finalAddress = bookmark + i + 4
			break
		}
		// Advance a byte
		i += 1
		r.Seek(bookmark+i, 0)

	}
	r.Seek(bookmark, 0)
	compressed = make([]byte, finalAddress-bookmark)
	r.Read(compressed)
	r.Seek(finalAddress, 0)
	return b.Bytes(), compressed

}
Exemple #30
0
func validMapHeader(rom io.ReadSeeker, p ptr) bool {
	maph := make([]byte, structMapHeader.Size())
	rom.Seek(p.ROM(), 0)
	rom.Read(maph)
	if p := decPtr(maph[0:4]); !p.ValidROM() {
		return false
	}
	if p := decPtr(maph[4:8]); !p.ValidROM() {
		return false
	}
	if p := decPtr(maph[8:12]); !p.ValidROM() {
		return false
	}
	return true
}