func (wc *FileWriteCache) PReadThrough(offset int64, p []byte, r blobstore.PReader) error { nr := int64(len(p)) remo := offset remp := p for _, patch := range wc.ps { if nr <= 0 { return nil } if remo > patch.Right() { continue } if remo < patch.Left() { fallbackLen := util.Int64Min(nr, patch.Left()-remo) if err := r.PRead(remo, remp[:fallbackLen]); err != nil { return err } remp = remp[fallbackLen:] nr -= fallbackLen remo += fallbackLen } if nr <= 0 { return nil } applyOffset := remo - patch.Offset applyLen := util.Int64Min(int64(len(patch.P))-applyOffset, nr) copy(remp[:applyLen], patch.P[applyOffset:]) remp = remp[applyLen:] nr -= applyLen remo += applyLen } if err := r.PRead(remo, remp); err != nil { return err } return nil }
func (be *CachedBlobEntry) PRead(p []byte, offset int64) error { // FIXME: may be we should allow stale reads w/o lock be.mu.Lock() defer be.mu.Unlock() be.lastUsed = time.Now() requiredlen := util.Int64Min(offset+int64(len(p)), be.bloblen) for be.validlen < requiredlen { logger.Infof(mylog, "Waiting for cache to be fulfilled: reqlen: %d, validlen: %d", requiredlen, be.validlen) be.validlenExtended.Wait() } return be.cachebh.PRead(p, offset) }
func (cfio *ChunkedFileIO) ReadAt(p []byte, offset int64) (int, error) { remo := offset remp := p if offset < 0 { return 0, fmt.Errorf("negative offset %d given", offset) } cs, err := cfio.caio.Read() if err != nil { return 0, fmt.Errorf("Failed to read cs array: %v", err) } if !fl.IsReadAllowed(cfio.bs.Flags()) { return 0, EPERM } // fmt.Printf("cs: %v\n", cs) for i := 0; i < len(cs) && len(remp) > 0; i++ { c := cs[i] if c.Left() > remo+int64(len(remp)) { break } if c.Right() <= remo { continue } coff := remo - c.Left() if coff < 0 { // Fill gap with zero n := util.Int64Min(int64(len(remp)), -coff) for j := int64(0); j < n; j++ { remp[j] = 0 } remo += n coff = 0 if len(remp) == 0 { return int(remo - offset), nil } } bh, err := cfio.bs.Open(c.BlobPath, fl.O_RDONLY) if err != nil { return int(remo - offset), fmt.Errorf("Failed to open path \"%s\" for reading: %v", c.BlobPath, err) } defer func() { if err := bh.Close(); err != nil { logger.Criticalf(mylog, "blobhandle Close failed: %v", err) } }() cio := cfio.newChunkIO(bh, cfio.c, c.Offset) defer func() { if err := cio.Close(); err != nil { logger.Criticalf(mylog, "cio Close failed: %v", err) } }() n := util.Int64Min(int64(len(p)), c.Length-coff) if err := cio.PRead(remp[:n], coff); err != nil { return int(remo - offset), err } remo += n remp = remp[n:] if len(remp) == 0 { return int(remo - offset), nil } } // logger.Debugf(mylog, "cs: %+v", cs) return int(remo - offset), nil }
func (wc *FileWriteCache) ReadAtThrough(p []byte, offset int64, r ReadAter) (int, error) { nr := 0 remo := offset remp := p for _, patch := range wc.ps { if len(remp) == 0 { return nr, nil } if patch.IsSentinel() { break } if remo > patch.Right() { continue } if remo < patch.Left() { fallbackLen64 := util.Int64Min(int64(len(remp)), patch.Left()-remo) if fallbackLen64 > math.MaxInt32 { panic("Logic error: fallbackLen should always be in int32 range") } fallbackLen := int(fallbackLen64) n, err := r.ReadAt(remp[:fallbackLen], remo) logger.Debugf(wclog, "BeforePatch: ReadAt issued offset %d, len %d bytes, read %d bytes", remo, fallbackLen, n) if err != nil { return nr + n, err } if n < fallbackLen { zerofill(remp[n:fallbackLen]) } nr += fallbackLen remp = remp[fallbackLen:] remo += int64(fallbackLen) } if len(remp) == 0 { return nr, nil } applyOffset64 := remo - patch.Offset if applyOffset64 > math.MaxInt32 { panic("Logic error: applyOffset should always be in int32 range") } applyOffset := int(applyOffset64) applyLen := util.IntMin(len(patch.P)-applyOffset, len(remp)) copy(remp[:applyLen], patch.P[applyOffset:]) nr += applyLen remp = remp[applyLen:] remo += int64(applyLen) } n, err := r.ReadAt(remp, remo) logger.Debugf(wclog, "Last: ReadAt read %d bytes", n) if err != nil { return nr, err } nr += n return nr, nil }