Esempio n. 1
0
func (f *mem) ReadAt(b []byte, off int64) (n int, err error) {
	avail := f.size - off
	pi := off >> f.pgBits
	po := int(off) & f.pgMask
	rem := len(b)
	if int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	var zeroPage *[]byte
	for rem != 0 && avail > 0 {
		pg := f.m[pi]
		if pg == nil {
			if zeroPage == nil {
				zeroPage = buffer.CGet(f.pgSize)
				defer buffer.Put(zeroPage)
			}
			pg = zeroPage
		}
		nc := copy(b[:mathutil.Min(rem, f.pgSize)], (*pg)[po:])
		pi++
		po = 0
		rem -= nc
		n += nc
		b = b[nc:]
	}
	return n, err
}
Esempio n. 2
0
func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
	off0 := off
	pgI := off >> bfBits
	pgO := int(off & bfMask)
	n = len(b)
	rem := n
	var nc int
	for rem != 0 {
		pg := f.m[pgI]
		if pg == nil {
			pg = &bitPage{}
			pg.pdata = buffer.CGet(bfSize)
			pg.data = *pg.pdata
			if f.parent != nil {
				_, err = f.parent.ReadAt(pg.data, off&^bfMask)
				if err != nil && !fileutil.IsEOF(err) {
					return
				}

				err = nil
			}
			f.m[pgI] = pg
		}
		nc = copy(pg.data[pgO:], b)
		pgI++
		pg.dirty = true
		pgO = 0
		rem -= nc
		b = b[nc:]
		off += int64(nc)
	}
	f.size = mathutil.MaxInt64(f.size, off0+int64(n))
	return
}
Esempio n. 3
0
func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
	avail := f.size - off
	pgI := off >> bfBits
	pgO := int(off & bfMask)
	rem := len(b)
	if int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	for rem != 0 && avail > 0 {
		pg := f.m[pgI]
		if pg == nil {
			pg = &bitPage{}
			pg.pdata = buffer.CGet(bfSize)
			pg.data = *pg.pdata
			if f.parent != nil {
				_, err = f.parent.ReadAt(pg.data, off&^bfMask)
				if err != nil && !fileutil.IsEOF(err) {
					return
				}

				err = nil
			}
			f.m[pgI] = pg
		}
		nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
		pgI++
		pgO = 0
		rem -= nc
		n += nc
		b = b[nc:]
		off += int64(nc)
	}
	return
}
Esempio n. 4
0
func (f *mem) WriteAt(b []byte, off int64) (n int, err error) {
	pi := off >> f.pgBits
	po := int(off) & f.pgMask
	n = len(b)
	rem := n
	var nc int
	for rem != 0 {
		pg := f.m[pi]
		if pg == nil {
			pg = buffer.CGet(f.pgSize)
			f.m[pi] = pg
		}
		nc = copy((*pg)[po:], b)
		pi++
		po = 0
		rem -= nc
		b = b[nc:]
	}
	f.size = mathutil.MaxInt64(f.size, off+int64(n))
	return n, nil
}
Esempio n. 5
0
func (f *bitFiler) PunchHole(off, size int64) (err error) {
	first := off >> bfBits
	if off&bfMask != 0 {
		first++
	}
	off += size - 1
	last := off >> bfBits
	if off&bfMask != 0 {
		last--
	}
	if limit := f.size >> bfBits; last > limit {
		last = limit
	}
	for pgI := first; pgI <= last; pgI++ {
		pg := &bitPage{}
		pg.pdata = buffer.CGet(bfSize)
		pg.data = *pg.pdata
		pg.dirty = true
		f.m[pgI] = pg
	}
	return
}
Esempio n. 6
0
func (a *Allocator) realloc(handle int64, b []byte) (err error) {
	var dlen, needAtoms0 int

	pb8 := buffer.Get(8)
	defer buffer.Put(pb8)
	b8 := *pb8
	pdst := buffer.Get(zappy.MaxEncodedLen(len(b)))
	defer buffer.Put(pdst)
	dst := *pdst
	b, needAtoms0, cc, err := a.makeUsedBlock(dst, b)
	if err != nil {
		return
	}

	needAtoms := int64(needAtoms0)
	off := h2off(handle)
	if err = a.read(b8[:], off); err != nil {
		return
	}

	switch tag := b8[0]; tag {
	default:
		dlen = int(b8[0])
	case tagUsedLong:
		dlen = m2n(int(b8[1])<<8 | int(b8[2]))
	case tagUsedRelocated:
		if err = a.free(b2h(b8[1:]), handle, false); err != nil {
			return err
		}

		dlen = 0
	case tagFreeShort, tagFreeLong:
		return &ErrINVAL{"Allocator.Realloc: invalid handle", handle}
	}

	atoms := int64(n2atoms(dlen))
retry:
	switch {
	case needAtoms < atoms:
		// in place shrink
		if err = a.writeUsedBlock(handle, cc, b); err != nil {
			return
		}

		fh, fa := handle+needAtoms, atoms-needAtoms
		sz, err := a.f.Size()
		if err != nil {
			return err
		}

		if h2off(fh)+16*fa == sz {
			return a.f.Truncate(h2off(fh))
		}

		return a.free2(fh, fa)
	case needAtoms == atoms:
		// in place replace
		return a.writeUsedBlock(handle, cc, b)
	}

	// case needAtoms > atoms:
	// in place extend or relocate
	var sz int64
	if sz, err = a.f.Size(); err != nil {
		return
	}

	off = h2off(handle)
	switch {
	case off+atoms*16 == sz:
		// relocating tail block - shortcut
		return a.writeUsedBlock(handle, cc, b)
	default:
		if off+atoms*16 < sz {
			// handle is not a tail block, check right neighbour
			rh := handle + atoms
			rtag, ratoms, p, n, e := a.nfo(rh)
			if e != nil {
				return e
			}

			if rtag == tagFreeShort || rtag == tagFreeLong {
				// Right neighbour is a free block
				if needAtoms <= atoms+ratoms {
					// can expand in place
					if err = a.unlink(rh, ratoms, p, n); err != nil {
						return
					}

					atoms += ratoms
					goto retry

				}
			}
		}
	}

	if atoms > 1 {
		if err = a.realloc(handle, nil); err != nil {
			return
		}
	}

	var newH int64
	if newH, err = a.alloc(b, cc); err != nil {
		return err
	}

	prb := buffer.CGet(16)
	defer buffer.Put(prb)
	rb := *prb
	rb[0] = tagUsedRelocated
	h2b(rb[1:], newH)
	if err = a.writeAt(rb[:], h2off(handle)); err != nil {
		return
	}

	return a.writeUsedBlock(newH, cc, b)
}