func readFromFile(file *os.File, offset, size int) ([]byte, error) { res := make([]byte, size, size) if _, err := file.ReadAt(res, int64(offset)); err != nil { return nil, err } return res, nil }
// 读取数据,外部需要准备好够存放的desBuf func (this *BigFile) Read(i BigFileIndex, desBuf []byte) error { if i.FileNo >= this.bigfileStat.FileCnt { return log.Error("BigFile.Read FileNo[%d] Error", i.FileNo) } if i.Length > uint32(len(desBuf)) { return log.Error("BigFile.Read BigFileIndex.Length[%d] > len(desBuf)[%d]", i.Length, uint32(len(desBuf))) } var f *os.File if i.FileNo == this.bigfileStat.FileCnt-1 { f = this.readwriteFile } else { f = this.readOnlyFile[i.FileNo] } n, err := f.ReadAt(desBuf[:i.Length], int64(i.Offset)) if err == io.EOF { if uint32(n) == i.Length { // 刚刚好读完 return nil } } if uint32(n) != i.Length { return log.Error("Read Length Error offset[%d] destBuf len[%d],ReadAt len[%d]", i.Offset, i.Length, n) } if err != nil { return log.Error("ReadAt file", err.Error()) } return nil }
func ReadHashNode(f *os.File, offset int64, dir string) (*HashNode, error) { b := make([]byte, hashNodeSize) l, err := f.ReadAt(b, offset) if err != nil { return nil, err } if l != hashNodeSize { return nil, errors.New("didn't read 628 bytes") } r := bytes.NewReader(b) h := &HashNode{Offset: offset} binary.Read(r, binary.LittleEndian, &h.Hash) if h.Hash == 0 { return h, nil } h.Namespace = nullTerminatedString(b[4 : 128+4]) namespaceDetailsReader := bytes.NewReader(b[128+4:]) nd, err := ReadNamespaceDetails(namespaceDetailsReader) nd.NamespaceBase = strings.Split(h.Namespace, ".")[0] if err != nil { return nil, err } h.NamespaceDetails = nd h.NamespaceDetails.Dir = dir return h, nil }
// fetch the messages in the supplied fetchlist and send them to the channel func (p *MessagePartition) fetchByFetchlist(fetchList []fetchEntry, messageC chan MessageAndId) error { var fileId uint64 var file *os.File var err error var lastMsgId uint64 for _, f := range fetchList { if lastMsgId == 0 { lastMsgId = f.messageId - 1 } lastMsgId = f.messageId // ensure, that we read on the correct file if file == nil || fileId != f.fileId { file, err = p.checkoutMessagefile(f.fileId) if err != nil { return err } defer p.releaseMessagefile(f.fileId, file) fileId = f.fileId } msg := make([]byte, f.size, f.size) _, err = file.ReadAt(msg, f.offset) if err != nil { return err } messageC <- MessageAndId{f.messageId, msg} } return nil }
// The Go build ID is stored at the beginning of the Mach-O __text segment. // The caller has already opened filename, to get f, and read a few kB out, in data. // Sadly, that's not guaranteed to hold the note, because there is an arbitrary amount // of other junk placed in the file ahead of the main text. func readMachoGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) { // If the data we want has already been read, don't worry about Mach-O parsing. // This is both an optimization and a hedge against the Mach-O parsing failing // in the future due to, for example, the name of the __text section changing. if b, err := readRawGoBuildID(filename, data); b != "" && err == nil { return b, err } mf, err := macho.NewFile(f) if err != nil { return "", &os.PathError{Path: filename, Op: "parse", Err: err} } sect := mf.Section("__text") if sect == nil { // Every binary has a __text section. Something is wrong. return "", &os.PathError{Path: filename, Op: "parse", Err: fmt.Errorf("cannot find __text section")} } // It should be in the first few bytes, but read a lot just in case, // especially given our past problems on OS X with the build ID moving. // There shouldn't be much difference between reading 4kB and 32kB: // the hard part is getting to the data, not transferring it. n := sect.Size if n > uint64(BuildIDReadSize) { n = uint64(BuildIDReadSize) } buf := make([]byte, n) if _, err := f.ReadAt(buf, int64(sect.Offset)); err != nil { return "", err } return readRawGoBuildID(filename, buf) }
func Offset(bin *os.File) (int64, error) { var n int fi, err := bin.Stat() if err != nil { return 0, err } buf := make([]byte, len(tag)) n, err = bin.ReadAt(buf, fi.Size()-int64(len(buf))-8) if n < len(buf) { return 0, InvalidFileErr } for i, tagByte := range tag { if buf[i] != tagByte { return 0, InvalidFileErr } } buf = make([]byte, 8) n, err = bin.ReadAt(buf, fi.Size()-int64(len(buf))) if n < len(buf) { return 0, InvalidFileErr } dataSize := int64(binary.LittleEndian.Uint64(buf)) offset := fi.Size() - dataSize - int64(len(tag)) - 8 return offset, nil }
// verifyChecksum computes the hash of a file and compares it // to a checksum. If comparison fails, it returns an error. func verifyChecksum(fd *os.File, checksum string) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("verifyChecksum() -> %v", e) } }() var h hash.Hash h = sha256.New() buf := make([]byte, 4096) var offset int64 = 0 for { block, err := fd.ReadAt(buf, offset) if err != nil && err != io.EOF { panic(err) } if block == 0 { break } h.Write(buf[:block]) offset += int64(block) } hexhash := fmt.Sprintf("%x", h.Sum(nil)) if hexhash != checksum { return fmt.Errorf("Checksum validation failed. Got '%s', Expected '%s'.", hexhash, checksum) } return }
func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) { const ( minPageSize int64 = 4 * 1024 maxPageSize int64 = 4 * 1024 * 1024 ) info, err := file.Stat() if err != nil { return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err) } blobSize := info.Size() if info.Size()%minPageSize != 0 { blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize)) } emptyPage := make([]byte, minPageSize) type byteRange struct { offset int64 length int64 } var nonEmptyRanges []byteRange var currentRange byteRange for i := int64(0); i < blobSize; i += minPageSize { pageBuf := make([]byte, minPageSize) _, err = file.ReadAt(pageBuf, i) if err != nil && err != io.EOF { return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err) } if bytes.Equal(pageBuf, emptyPage) { if currentRange.length != 0 { nonEmptyRanges = append(nonEmptyRanges, currentRange) } currentRange = byteRange{ offset: i + minPageSize, } } else { currentRange.length += minPageSize if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) { nonEmptyRanges = append(nonEmptyRanges, currentRange) currentRange = byteRange{ offset: i + minPageSize, } } } } var pages []resourceArmStorageBlobPage for _, nonEmptyRange := range nonEmptyRanges { pages = append(pages, resourceArmStorageBlobPage{ offset: nonEmptyRange.offset, section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length), }) } return info.Size(), pages, nil }
// walks through the index file, calls fn function with each key, offset, size // stops with the error returned by the fn function func WalkIndexFile(r *os.File, fn func(key uint64, offset, size uint32) error) error { var readerOffset int64 bytes := make([]byte, 16*RowsToRead) count, e := r.ReadAt(bytes, readerOffset) glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e) readerOffset += int64(count) var ( key uint64 offset, size uint32 i int ) for count > 0 && e == nil || e == io.EOF { for i = 0; i+16 <= count; i += 16 { key, offset, size = idxFileEntry(bytes[i : i+16]) if e = fn(key, offset, size); e != nil { return e } } if e == io.EOF { return nil } count, e = r.ReadAt(bytes, readerOffset) glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e) readerOffset += int64(count) } return e }
func (me data) ReadAt(p []byte, off int64) (n int, err error) { for _, fi := range me.info.UpvertedFiles() { if off >= fi.Length { off -= fi.Length continue } n1 := len(p) if int64(n1) > fi.Length-off { n1 = int(fi.Length - off) } var f *os.File f, err = os.Open(me.fileInfoName(fi)) if err != nil { return } n1, err = f.ReadAt(p[:n1], off) f.Close() if err != nil { return } n += n1 off = 0 p = p[n1:] if len(p) == 0 { break } } return }
// ###### Implementation func openExisting(file *os.File, filename string, config common.Config) (common.WriteAheadLog, error) { // Create a buffer for the 8-byte file header. // The first 3 bytes are the signature `LOG` followed by an 8-bit version // and the boolean flags. Then read the file header into the buffer. buf := make([]byte, 8) _, err := file.ReadAt(buf, 0) // If there was an error reading the file header, close the file and return // a nil log and the read error. if err != nil { file.Close() return nil, err } // If the header was read sucessfully, verify the file signature matches // the expected "LOG" signature. If the first 3 bytes do not match `LOG`, // return a `nil` log and a `ErrInvalidFileSignature`. if !bytes.Equal(buf[0:3], LogFileSignature) { return nil, ErrInvalidFileSignature } // Read the boolean flags from the file header and overwrite the config // flags with the ones from the file. flags, err := xbinary.LittleEndian.Uint32(buf, 4) if err != nil { return nil, err } config.Flags = flags // The config version is updated to reflect the actual version of the file. // Then return the proper log parser based on the file version. config.Version = uint8(buf[3]) return selectVersion(file, filename, config) }
func getMinMaxValuesWithIndexFromFile(file *os.File, startIndex int, lastIndex int, segmentByteSize int) ([]int64, []int64) { numberOfSegments := lastIndex - startIndex data := make([]byte, segmentByteSize*numberOfSegments) mins := make([]int64, numberOfSegments) maxs := make([]int64, numberOfSegments) n, err := file.ReadAt(data, int64(startIndex*segmentByteSize)) if err != nil { if err == io.EOF { return nil, nil } log.Fatal(err) } var start int var last int for i := 0; i < numberOfSegments; i++ { start = i * segmentByteSize last = (i + 1) * segmentByteSize if last > n { last = n } min, max := getMinMaxValue(data[start:last], last-start) mins[i] = min maxs[i] = max } return mins, maxs }
func readBlock(file *os.File, stat os.FileInfo, doneBlocks *int64, threadID int) { block := make([]byte, 4096) numBlocks := stat.Size()/4096 + 1 for { blockID := rand.Int63() % numBlocks file.ReadAt(block, blockID*4096) atomic.AddInt64(doneBlocks, 1) } }
// ReadAt read into unaligned data buffer via direct I/O // Use AllocateAligned to avoid extra data fuffer copy func ReadAt(file *os.File, data []byte, offset int64) (int, error) { if alignmentShift(data) == 0 { return file.ReadAt(data, offset) } buf := AllocateAligned(len(data)) n, err := file.ReadAt(buf, offset) copy(data, buf) return n, err }
func copyPacket(dst *os.File, src *os.File, offset int64, size int) (err os.Error) { buf := make([]byte, size) _, err = src.ReadAt(buf, offset) if err != nil && err != os.EOF { return err } dst.Write(buf) return err }
func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err error) { if offset < 0 { err = fmt.Errorf("offset %d for index file is invalid", offset) return } bytes = make([]byte, NeedleIndexSize) _, err = indexFile.ReadAt(bytes, offset) return }
func readIndexEntry(file *os.File, indexPosition int64) (msgOffset uint64, msgSize uint32, err error) { msgOffsetBuff := make([]byte, INDEX_ENTRY_SIZE) if _, err := file.ReadAt(msgOffsetBuff, indexPosition); err != nil { return 0, 0, err } msgOffset = binary.LittleEndian.Uint64(msgOffsetBuff) msgSize = binary.LittleEndian.Uint32(msgOffsetBuff[8:]) return msgOffset, msgSize, nil }
func ReadPlainARFile(f *os.File) ARFile { ar_file := make(map[string]ARFileHeaderContents) per_file_header_size := 60 hbuf := make([]byte, per_file_header_size) // Assume magic number header is already read. offset := int64(len(AR_MAGIC)) special_long_filename_file := make([]byte, 0) // Go through the AR, reading more and more file-headers + file-bodies. for { n, err := f.ReadAt(hbuf, offset) if err == io.EOF && n == 0 { break } if err != nil { panic("Failed to read AR sub-file header: " + err.Error() + " reading " + string(n)) } // Okay, hbuf now has the header contents. offset += int64(n) fsize, err := strconv.Atoi(strings.TrimSpace(string(hbuf[48:58]))) if err != nil { panic("Failed to parse AR file size: " + err.Error()) } filename := translateFilename(string(hbuf[0:16]), special_long_filename_file) new_header := ARFileHeader{ Filename: filename, Timestamp: strings.TrimSpace(string(hbuf[16:28])), OwnerID: strings.TrimSpace(string(hbuf[28:34])), GroupID: strings.TrimSpace(string(hbuf[34:40])), FileMode: strings.TrimSpace(string(hbuf[40:48])), FileSize: uint32(fsize)} body_buf := make([]byte, fsize) _, err2 := f.ReadAt(body_buf, offset) if err2 != nil { panic("Failed to read AR sub-file contents: " + err2.Error()) } if filename == "/" { // Skipping the special GNU symbol-table file. // (not adding it to the ar_file map) } else if filename == "//" { // This is the long-filename file. // (not adding it to the ar_file map) special_long_filename_file = append(special_long_filename_file, body_buf...) } else { // Normal file, index it! ar_file[filename] = ARFileHeaderContents{new_header, body_buf} } offset += int64(fsize) // Data section should be aligned to 2 bytes. if offset%2 != 0 { offset += 1 } } return ar_file }
func DoRead(file *os.File, offset int64, buf *[]byte, done chan int64) { bytes, err := file.ReadAt(*buf, offset) if err != nil && err != io.EOF && bytes <= 0 { panic(fmt.Sprintf("error reading at %d: %s", offset, err.Error())) } done <- offset }
func readVHDHeader(f *os.File) (header VHDHeader) { buff := make([]byte, 512) _, err := f.ReadAt(buff, 0) check(err) binary.Read(bytes.NewBuffer(buff[:]), binary.BigEndian, &header) return header }
func (diskio *DiskIO) readBlock(file *os.File, block BlockInfo, offset int64) []byte { blockData := make([]byte, block.length) n, err := file.ReadAt(blockData, offset) if err != nil { log.Fatal(err) } log.Printf("DiskIO : readBlock: Read block %x:%x[%x]\n", block.pieceIndex, block.begin, n) return blockData }
// readField reads a specified number of bytes from the raw file based // on an offset. Returns the bytes read or error. func readField(offset int64, bytesToRead uint32, f *os.File) (bytes []byte, err error) { cache := make([]byte, bytesToRead) bytesRead, err := f.ReadAt(cache, int64(offset)) if bytesRead != int(bytesToRead) { err = fmt.Errorf("read %d bytes; expected %d\n", bytesRead, bytesToRead) } return cache, err }
// addPCFileTable is like addPCTable, but it renumbers the file names referred to by the table // to use the global numbering maintained in the files map. It adds new files to the // map as necessary. func addPCFileTable(p *Prog, b *SymBuffer, f *os.File, loc goobj.Data, sym *Sym, files map[string]int) int { if loc.Size == 0 { return 0 } off := b.Size() src := make([]byte, loc.Size) _, err := f.ReadAt(src, loc.Offset) if err != nil { p.errorf("%v", err) return 0 } filenum := make([]int, len(sym.Func.File)) for i, name := range sym.Func.File { num := files[name] if num == 0 { num = len(files) + 1 files[name] = num } filenum[i] = num } var dst []byte newval := int32(-1) var it PCIter for it.Init(p, src); !it.Done; it.Next() { // value delta oldval := it.Value val := oldval if oldval != -1 { if oldval < 0 || int(oldval) >= len(filenum) { p.errorf("%s: corrupt pc-file table", sym) break } val = int32(filenum[oldval]) } dv := val - newval newval = val uv := uint32(dv<<1) ^ uint32(dv>>31) dst = appendVarint(dst, uv) // pc delta dst = appendVarint(dst, it.NextPC-it.PC) } if it.Corrupt { p.errorf("%s: corrupt pc-file table", sym) } // terminating value delta dst = appendVarint(dst, 0) b.SetSize(off + len(dst)) copy(b.data[off:], dst) return off }
func GetStruct(file *os.File, hd interface{}, length int, offset int64) (rd int, err error) { b := make([]byte, length) rd, err = file.ReadAt(b, offset) if err != nil { return } buf := bytes.NewBuffer(b) err = binary.Read(buf, binary.BigEndian, hd) return }
func decode(file *os.File) (*Mbr, error) { var mbr Mbr if _, err := file.ReadAt(mbr.raw[:], 0); err != nil { return nil, err } if mbr.raw[0x1FE] == 0x55 && mbr.raw[0x1FF] == 0xAA { return &mbr, nil } return nil, nil }
func readVHDFooter(f *os.File) (header VHDHeader) { info, err := f.Stat() check(err) buff := make([]byte, 512) _, err = f.ReadAt(buff, info.Size()-512) check(err) binary.Read(bytes.NewBuffer(buff[:]), binary.BigEndian, &header) return header }
func fileReadOffset(f *os.File, offset int64, length int) (b []byte, err error) { bs := make([]byte, length) _, err = f.ReadAt(bs, offset) if err != nil { return b, err } if err == io.EOF { return b, err } return bs, nil }
// addPCTable appends the PC-data table stored in the file f at the location loc // to the symbol buffer b. It returns the offset of the beginning of the table // in the buffer. func addPCTable(p *Prog, b *SymBuffer, f *os.File, loc goobj.Data) int { if loc.Size == 0 { return 0 } off := b.Size() b.SetSize(off + int(loc.Size)) _, err := f.ReadAt(b.data[off:off+int(loc.Size)], loc.Offset) if err != nil { p.errorf("%v", err) } return off }
func sendDataPkt(file *os.File, blockSize int, blockIndex int, packetCh chan *Packet, blockType BlockType) { bytes := make([]byte, blockSize) numBytes, _ := file.ReadAt(bytes, int64(blockIndex*blockSize)) //if we are at the end of the file, chances are the bytes left will //be less than blockSize, so adjust if numBytes < blockSize { bytes = bytes[0:numBytes] } block := Block{Number: blockIndex, Data: bytes, Type: blockType} outPkt := &Packet{Type: DATA, Payload: block} packetCh <- outPkt }
func readFileAt(fi *os.File, offset int, str string) error { d := make([]byte, len(str)) if _, err := fi.ReadAt(d, int64(offset)); err != nil { return err } if string(d) != str { return fmt.Errorf("Expected %s to equal %s.", str, d) } return nil }