func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) { // look for directoryEndSignature in the last 1k, then in the last 65k var b []byte for i, bLen := range []int64{1024, 65 * 1024} { if bLen > size { bLen = size } b = make([]byte, int(bLen)) if _, err := r.ReadAt(b, size-bLen); err != nil && err != io.EOF { return nil, err } if p := findSignatureInBlock(b); p >= 0 { b = b[p:] break } if i == 1 || bLen == size { return nil, FormatError } } // read header into struct c := binary.LittleEndian d := new(directoryEnd) d.diskNbr = c.Uint16(b[4:6]) d.dirDiskNbr = c.Uint16(b[6:8]) d.dirRecordsThisDisk = c.Uint16(b[8:10]) d.directoryRecords = c.Uint16(b[10:12]) d.directorySize = c.Uint32(b[12:16]) d.directoryOffset = c.Uint32(b[16:20]) d.commentLen = c.Uint16(b[20:22]) d.comment = string(b[22 : 22+int(d.commentLen)]) return d, nil }
// IsBZip2 checks to see if the received reader's contents are in bzip2 format // by checking the magic numbers. func IsBZip2(r io.ReaderAt) (bool, error) { h := make([]byte, 3) // Read the first 3 bytes _, err := r.ReadAt(h, 0) if err != nil { return false, err } var hb [3]byte // check for bzip2 hbuf := bytes.NewReader(h) err = binary.Read(hbuf, binary.LittleEndian, &hb) if err != nil { return false, fmt.Errorf("error while checking if input matched bzip2's magic number: %s", err) } var cb [3]byte cbuf := bytes.NewBuffer(magicnumBZip2) err = binary.Read(cbuf, binary.BigEndian, &cb) if err != nil { return false, fmt.Errorf("error while converting bzip2 magic number for comparison: %s", err) } if hb == cb { return true, nil } return false, nil }
// IsGZip checks to see if the received reader's contents are in gzip format // by checking the magic numbers. func IsGZip(r io.ReaderAt) (bool, error) { h := make([]byte, 2) // Read the first 2 bytes _, err := r.ReadAt(h, 0) if err != nil { return false, err } var h16 uint16 // check for gzip hbuf := bytes.NewReader(h) err = binary.Read(hbuf, binary.BigEndian, &h16) if err != nil { return false, fmt.Errorf("error while checking if input matched bzip2's magic number: %s", err) } var c16 uint16 cbuf := bytes.NewBuffer(magicnumGZip) err = binary.Read(cbuf, binary.BigEndian, &c16) if err != nil { return false, fmt.Errorf("error while converting bzip2 magic number for comparison: %s", err) } if h16 == c16 { return true, nil } return false, nil }
// readHeader reads the header at the end of rd. size is the length of the // whole data accessible in rd. func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { hl, err := readHeaderLength(rd, size) if err != nil { return nil, err } if int64(hl) > size-int64(binary.Size(hl)) { return nil, errors.New("header is larger than file") } if int64(hl) > maxHeaderSize { return nil, errors.New("header is larger than maxHeaderSize") } buf := make([]byte, int(hl)) n, err := rd.ReadAt(buf, size-int64(hl)-int64(binary.Size(hl))) if err != nil { return nil, errors.Wrap(err, "ReadAt") } if n != len(buf) { return nil, errors.New("not enough bytes read") } return buf, nil }
func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err os.Error) { // look for directoryEndSignature in the last 1k, then in the last 65k var b []byte for i, bLen := range []int64{1024, 65 * 1024} { if bLen > size { bLen = size } b = make([]byte, int(bLen)) if _, err := r.ReadAt(b, size-bLen); err != nil && err != os.EOF { return nil, err } if p := findSignatureInBlock(b); p >= 0 { b = b[p:] break } if i == 1 || bLen == size { return nil, FormatError } } // read header into struct defer recoverError(&err) br := bytes.NewBuffer(b[4:]) // skip over signature d := new(directoryEnd) read(br, &d.diskNbr) read(br, &d.dirDiskNbr) read(br, &d.dirRecordsThisDisk) read(br, &d.directoryRecords) read(br, &d.directorySize) read(br, &d.directoryOffset) read(br, &d.commentLen) d.comment = string(readByteSlice(br, d.commentLen)) return d, nil }
func Patch(delta []byte, baseFile io.ReaderAt) (newFile io.Reader, checksum []byte, err error) { ops, checksum, err := readDelta(delta) if err != nil { log.Println("Couldn't patch:", err) return } var wr bytes.Buffer for _, op := range ops { switch op.code { case ADD: b := make([]byte, op.length) baseFile.ReadAt(b, int64(op.index)) wr.Write(b) break case DATA: wr.Write(op.data) break } } newFile = &wr return }
func readFooter(r io.ReaderAt, size uint64) (mi, ii *bInfo, err error) { if size < uint64(footerSize) { err = errors.ErrInvalid("file is too short to be an sstable") return } buf := make([]byte, footerSize) n, err := r.ReadAt(buf, int64(size)-footerSize) if err != nil { return } if bytes.Compare(buf[handlesSize:], magicBytes) != 0 { err = errors.ErrInvalid("not an sstable (bad magic number)") return } mi = new(bInfo) n, err = mi.decodeFrom(buf) if err != nil { return } ii = new(bInfo) n, err = ii.decodeFrom(buf[n:]) if err != nil { return } return }
// Determine the size of a ReaderAt using a binary search. Given that file // offsets are no larger than int64, there is an upper limit of 64 iterations // before the EOF is found. func ReaderAtSize(rd io.ReaderAt) (pos int64, err error) { defer errs.Recover(&err) // Function to check if the given position is at EOF buf := make([]byte, 2) checkEOF := func(pos int64) int { if pos > 0 { cnt, err := rd.ReadAt(buf[:2], pos-1) errs.Panic(errs.Ignore(err, io.EOF)) return 1 - cnt // RetVal[Cnt] = {0: +1, 1: 0, 2: -1} } else { // Special case where position is zero cnt, err := rd.ReadAt(buf[:1], pos-0) errs.Panic(errs.Ignore(err, io.EOF)) return 0 - cnt // RetVal[Cnt] = {0: 0, 1: -1} } } // Obtain the size via binary search O(log n) => 64 iterations posMin, posMax := int64(0), int64(math.MaxInt64) for posMax >= posMin { pos = (posMax + posMin) / 2 switch checkEOF(pos) { case -1: // Below EOF posMin = pos + 1 case 0: // At EOF return pos, nil case +1: // Above EOF posMax = pos - 1 } } panic(errs.New("EOF is in a transient state")) }
func Hash(r io.ReaderAt, size int64) (string, error) { var hash uint64 if size < chunkSize*2 { return "", errors.New("File is too small") } // Read head and tail blocks. buf := make([]byte, chunkSize*2) if _, err := r.ReadAt(buf[:chunkSize], 0); err != nil { return "", err } if _, err := r.ReadAt(buf[chunkSize:], size-chunkSize); err != nil { return "", err } // Convert to uint64, and sum. nums := make([]uint64, (chunkSize*2)/8) reader := bytes.NewReader(buf) if err := binary.Read(reader, binary.LittleEndian, &nums); err != nil { return "", err } for _, num := range nums { hash += num } return fmt.Sprintf("%016x", hash+uint64(size)), nil }
// compXmlStringAt -- Return the string stored in StringTable format at // offset strOff. This offset points to the 16 bit string length, which // is followed by that number of 16 bit (Unicode) chars. func compXmlStringAt(arr io.ReaderAt, meta stringsMeta, strOff uint32) string { if strOff == 0xffffffff { return "" } length := make([]byte, 2) off := meta.StringDataOffset + meta.DataOffset[strOff] arr.ReadAt(length, int64(off)) strLen := int(length[1]<<8 + length[0]) chars := make([]byte, int64(strLen)) ii := 0 for i := 0; i < strLen; i++ { c := make([]byte, 1) arr.ReadAt(c, int64(int(off)+2+ii)) if c[0] == 0 { i-- } else { chars[i] = c[0] } ii++ } return string(chars) } // end of compXmlStringAt
func decrypt_part_of_file(input_file, key_file io.ReaderAt, output_filename string, chunk_len, chunk_num, from int64) { data := make([]byte, chunk_len) key := make([]byte, chunk_len) xored_data := make([]byte, chunk_len) var err error output_file, err := os.Create(output_filename) check(err) defer output_file.Close() for i := int64(0); i < chunk_num; i++ { n, err := input_file.ReadAt(data, from+i*chunk_len) if err != nil && err != io.EOF { panic(err) } _, err = key_file.ReadAt(key[:n], from+i*chunk_len) if err != nil && err != io.EOF { panic(err) } xor(data[:n], key[:n], xored_data[:n]) _, err = output_file.WriteAt(xored_data[:n], from+i*chunk_len) check(err) } }
func (cr ChunkedReader) ReadAt(p []byte, off int64) (n int, err error) { n = 0 for len(p) > 0 { var startOff int64 var r io.ReaderAt startOff, r, err = cr(off) if err != nil { return } var m int m, err = r.ReadAt(p, off-startOff) n += m off += int64(m) p = p[m:] if err == nil { if len(p) > 0 { panic("ReaderAt returned a non-full read without errors") } return } if err == io.EOF { err = nil } if err != nil { return } } return }
// IsTar checks to see if the received reader's contents are in the tar format // by checking the magic numbers. This evaluates using both tar1 and tar2 magic // numbers. func IsTar(r io.ReaderAt) (bool, error) { h := make([]byte, 8) // Read the first 8 bytes at offset 257 _, err := r.ReadAt(h, 257) if err != nil { return false, err } var h64 uint64 // check for Zip hbuf := bytes.NewReader(h) err = binary.Read(hbuf, binary.BigEndian, &h64) if err != nil { return false, fmt.Errorf("error while checking if input matched tar's magic number: %s", err) } var c64 uint64 cbuf := bytes.NewBuffer(magicnumTar1) err = binary.Read(cbuf, binary.BigEndian, &c64) if err != nil { return false, fmt.Errorf("error while converting the tar magic number for comparison: %s", err) } if h64 == c64 { return true, nil } cbuf = bytes.NewBuffer(magicnumTar2) err = binary.Read(cbuf, binary.BigEndian, &c64) if err != nil { return false, fmt.Errorf("error while converting the empty tar magic number for comparison: %s", err) } if h64 == c64 { return true, nil } return false, nil }
// IsLZ4 checks to see if the received reader's contents are in LZ4 foramt by // checking the magic numbers. func IsLZ4(r io.ReaderAt) (bool, error) { h := make([]byte, 4) // Read the first 4 bytes _, err := r.ReadAt(h, 0) if err != nil { return false, err } var h32 uint32 // check for lz4 hbuf := bytes.NewReader(h) err = binary.Read(hbuf, binary.LittleEndian, &h32) if err != nil { return false, fmt.Errorf("error while checking if input matched LZ4's magic number: %s", err) } var c32 uint32 cbuf := bytes.NewBuffer(magicnumLZ4) err = binary.Read(cbuf, binary.BigEndian, &c32) if err != nil { return false, fmt.Errorf("error while converting LZ4 magic number for comparison: %s", err) } if h32 == c32 { return true, nil } return false, nil }
func readNumRecs(r io.ReaderAt) (int64, error) { var buf [4]byte _, err := r.ReadAt(buf[:], _NumRecsOffset) if err != nil { return 0, err } return int64(buf[0])<<24 + int64(buf[1])<<16 + int64(buf[2])<<8 + int64(buf[3]), nil }
func readAtWithoutBuffer(r io.ReaderAt, n int, off int64) (int, string, error) { buf := make([]byte, n) n, err := r.ReadAt(buf, off) buf = buf[:n] str := string(buf) return n, str, err }
func readAtWithBuffer(r io.ReaderAt, n int, off int64) (int, string, error) { extendAndSliceBuffer(n) n, err := r.ReadAt(buffer, off) buffer = buffer[:n] str := string(buffer) return n, str, err }
func readRandom( r io.ReaderAt, fileSize int64, readSize int, desiredDuration time.Duration) (err error) { // Make sure the logic below for choosing offsets works. if fileSize < int64(readSize) { err = fmt.Errorf( "File size of %d bytes not large enough for reads of %d bytes", fileSize, readSize) return } buf := make([]byte, readSize) start := time.Now() var readCount int64 var bytesRead int64 for time.Since(start) < desiredDuration { // Choose a random offset at which to read. off := rand.Int63n(fileSize - int64(readSize)) // Read, ignoring io.EOF which io.ReaderAt is allowed to return for reads // that abut the end of the file. var n int n, err = r.ReadAt(buf, off) switch { case err == io.EOF && n == readSize: err = nil case err != nil: err = fmt.Errorf("ReadAt: %v", err) return } readCount++ bytesRead += int64(n) } d := time.Since(start) // Report. seconds := float64(d) / float64(time.Second) readsPerSec := float64(readCount) / seconds fmt.Printf( "Read %d times (%s) in %v (%.1f Hz)\n", readCount, format.Bytes(float64(bytesRead)), d, readsPerSec) fmt.Println() return }
// repair attempts to repair a file by uploading missing pieces to more hosts. func (f *file) repair(r io.ReaderAt, pieceMap repairMap, hosts []uploader) error { // For each chunk with missing pieces, re-encode the chunk and upload each // missing piece. var wg sync.WaitGroup for chunkIndex, missingPieces := range pieceMap { // can only upload to hosts that aren't already storing this chunk // TODO: what if we're renewing? // curHosts := f.chunkHosts(chunkIndex) // var newHosts []uploader // outer: // for _, h := range hosts { // for _, ip := range curHosts { // if ip == h.addr() { // continue outer // } // } // newHosts = append(newHosts, h) // } newHosts := hosts // don't bother encoding if there aren't any hosts to upload to if len(newHosts) == 0 { newHosts = hosts } // read chunk data and encode chunk := make([]byte, f.chunkSize()) _, err := r.ReadAt(chunk, int64(chunkIndex*f.chunkSize())) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return err } pieces, err := f.erasureCode.Encode(chunk) if err != nil { return err } // upload pieces, split evenly among hosts wg.Add(len(missingPieces)) for j, pieceIndex := range missingPieces { host := newHosts[j%len(newHosts)] up := uploadPiece{pieces[pieceIndex], chunkIndex, pieceIndex} go func(host uploader, up uploadPiece) { err := host.addPiece(up) if err == nil { // update contract f.mu.Lock() contract := host.fileContract() f.contracts[contract.ID] = contract f.mu.Unlock() } wg.Done() }(host, up) } wg.Wait() } return nil }
// repair attempts to repair a file by uploading missing pieces to more hosts. func (f *file) repair(r io.ReaderAt, pieceMap map[uint64][]uint64, hosts []uploader) error { // For each chunk with missing pieces, re-encode the chunk and upload each // missing piece. var wg sync.WaitGroup for chunkIndex, missingPieces := range pieceMap { // can only upload to hosts that aren't already storing this chunk curHosts := f.chunkHosts(chunkIndex) var newHosts []uploader outer: for _, h := range hosts { for _, ip := range curHosts { if ip == h.addr() { continue outer } } newHosts = append(newHosts, h) } // don't bother encoding if there aren't any hosts to upload to if len(newHosts) == 0 { continue } // read chunk data and encode chunk := make([]byte, f.chunkSize()) _, err := r.ReadAt(chunk, int64(chunkIndex*f.chunkSize())) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return err } pieces, err := f.erasureCode.Encode(chunk) if err != nil { return err } // upload pieces, split evenly among hosts wg.Add(len(missingPieces)) for j, pieceIndex := range missingPieces { host := newHosts[j%len(newHosts)] up := uploadPiece{pieces[pieceIndex], chunkIndex, pieceIndex} go func(host uploader, up uploadPiece) { err := host.addPiece(up) if err == nil { atomic.AddUint64(&f.bytesUploaded, uint64(len(up.data))) } wg.Done() }(host, up) } wg.Wait() atomic.AddUint64(&f.chunksUploaded, 1) // update contracts for _, h := range hosts { contract := h.fileContract() f.contracts[contract.IP] = contract } } return nil }
// Request returns the specified data segment by reading it from local disk. // Implements the protocol.Model interface. func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset int64, size int) ([]byte, error) { // Verify that the requested file exists in the local model. m.fmut.RLock() r, ok := m.folderFiles[folder] m.fmut.RUnlock() if !ok { l.Warnf("Request from %s for file %s in nonexistent folder %q", deviceID, name, folder) return nil, ErrNoSuchFile } lf := r.Get(protocol.LocalDeviceID, name) if lf.IsInvalid() || lf.IsDeleted() { if debug { l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", m, deviceID, folder, name, offset, size, lf) } return nil, ErrInvalid } if offset > lf.Size() { if debug { l.Debugf("%v REQ(in; nonexistent): %s: %q o=%d s=%d", m, deviceID, name, offset, size) } return nil, ErrNoSuchFile } if debug && deviceID != protocol.LocalDeviceID { l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size) } m.fmut.RLock() fn := filepath.Join(m.folderCfgs[folder].Path, name) m.fmut.RUnlock() var reader io.ReaderAt var err error if lf.IsSymlink() { target, _, err := symlinks.Read(fn) if err != nil { return nil, err } reader = strings.NewReader(target) } else { reader, err = os.Open(fn) // XXX: Inefficient, should cache fd? if err != nil { return nil, err } defer reader.(*os.File).Close() } buf := make([]byte, size) _, err = reader.ReadAt(buf, offset) if err != nil { return nil, err } return buf, nil }
func read_index(offset int64, fh io.ReaderAt) []byte { // io.ReaderAt?? as a pointer?? line := make([]byte, LINE_LEN) n, err := fh.ReadAt(offset) // We may be reading too much, no? if err != nil && err != os.EOF { fmt.Fprintf(os.Stderr, "An error occurred while trying to read from file: %s\n", err) os.Exit(1) } return line }
func (info *blockListInfo) ReadData(reader io.ReaderAt) (*[]byte, error) { //Reads the data from the location specific in the info/entry if info.Entry.Free > 0 { return nil, errors.New("filemanager: ReadData: Info is free, unable to read") } data := make([]byte, info.Entry.Size) _, err := reader.ReadAt(data, info.Entry.Start) return &data, err }
func (o *Store) ItemValRead(c *Collection, i *Item, r io.ReaderAt, offset int64, valLength uint32) error { if o.callbacks.ItemValRead != nil { return o.callbacks.ItemValRead(c, i, r, offset, valLength) } i.Val = make([]byte, valLength) _, err := r.ReadAt(i.Val, offset) return err }
func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) { // look for directoryEndSignature in the last 1k, then in the last 65k var buf []byte var directoryEndOffset int64 for i, bLen := range []int64{1024, 65 * 1024} { if bLen > size { bLen = size } buf = make([]byte, int(bLen)) if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF { return nil, err } if p := findSignatureInBlock(buf); p >= 0 { buf = buf[p:] directoryEndOffset = size - bLen + int64(p) break } if i == 1 || bLen == size { return nil, ErrFormat } } // read header into struct b := readBuf(buf[4:]) // skip signature d := &directoryEnd{ diskNbr: uint32(b.uint16()), dirDiskNbr: uint32(b.uint16()), dirRecordsThisDisk: uint64(b.uint16()), directoryRecords: uint64(b.uint16()), directorySize: uint64(b.uint32()), directoryOffset: uint64(b.uint32()), commentLen: b.uint16(), } l := int(d.commentLen) if l > len(b) { return nil, errors.New("zip: invalid comment length") } d.comment = string(b[:l]) // These values mean that the file can be a zip64 file if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff { p, err := findDirectory64End(r, directoryEndOffset) if err == nil && p >= 0 { err = readDirectory64End(r, p, d) } if err != nil { return nil, err } } // Make sure directoryOffset points to somewhere in our file. if o := int64(d.directoryOffset); o < 0 || o >= size { return nil, ErrFormat } return d, nil }
func readTuple(r io.ReaderAt, offset uint32) (uint32, uint32, error) { tuple := make([]byte, 8) _, err := r.ReadAt(tuple, int64(offset)) if err != nil { return 0, 0, err } first := binary.LittleEndian.Uint32(tuple[:4]) second := binary.LittleEndian.Uint32(tuple[4:]) return first, second, nil }
func readFarPointerAt(r io.ReaderAt, off int64, n int) int64 { var b [3]byte off += int64(len(b)) * int64(n) _, err := r.ReadAt(b[:], off) if err != nil { // BUG: shouldn't panic panic(err) } bank := int64(b[0]) return bank<<14 + int64(b[2])&0x3F<<8 + int64(b[1]) }
func readNearPointerAt(r io.ReaderAt, off int64, n int) int64 { var b [2]byte off += int64(len(b)) * int64(n) _, err := r.ReadAt(b[:], off) if err != nil { // BUG: shouldn't panic panic(err) } p := off&^0x3FFF + int64(b[1])&0x3F<<8 + int64(b[0]) return p }
func Decode24(r io.ReaderAt) (*Id3v24Tag, error) { headerBytes := make([]byte, 10) if _, err := r.ReadAt(headerBytes, 0); err != nil { return nil, err } header, err := parseId3v24Header(headerBytes) if err != nil { return nil, err } br := bufio.NewReader(io.NewSectionReader(r, 10, int64(header.Size))) var extendedHeader Id3v24ExtendedHeader if header.Flags.ExtendedHeader { var err error if extendedHeader, err = parseId3v24ExtendedHeader(br); err != nil { return nil, err } } result := &Id3v24Tag{ Header: header, ExtendedHeader: extendedHeader, Frames: make(map[string][]*Id3v24Frame), } var totalSize uint32 totalSize += extendedHeader.Size for totalSize < header.Size { hasFrame, err := hasId3v24Frame(br) if err != nil { return nil, err } if !hasFrame { break } frame, err := parseId3v24Frame(br) if err != nil { return nil, err } // 10 bytes for the frame header, and the body. totalSize += 10 + frame.Header.Size result.Frames[frame.Header.Id] = append(result.Frames[frame.Header.Id], frame) } return result, nil }
func ripPalette(r io.ReaderAt, off int64, n int) color.Palette { var palette [4]byte _, err := r.ReadAt(palette[:], off+int64(n)*int64(len(palette))) if err != nil { return nil } return color.Palette{ color.White, RGB15(int16(palette[0]) + int16(palette[1])<<8), RGB15(int16(palette[2]) + int16(palette[3])<<8), color.Black, } }