Exemple #1
0
func (f *flt) setHead(h, atoms int64, fi Filer) (err error) {
	switch {
	case atoms < 1:
		panic(atoms)
	case atoms >= maxFLTRq:
		pb := buffer.Get(7)
		defer buffer.Put(pb)
		b := *pb
		if _, err = fi.WriteAt(h2b(b[:], h), 8*13+1); err != nil {
			return
		}

		f[13].head = h
		return
	default:
		lg := mathutil.Log2Uint16(uint16(atoms))
		g := f[lg:]
		for i := range f {
			if atoms < g[i+1].minSize {
				pb := buffer.Get(7)
				defer buffer.Put(pb)
				b := *pb
				if _, err = fi.WriteAt(h2b(b[:], h), 8*int64(i+lg)+1); err != nil {
					return
				}

				g[i].head = h
				return
			}
		}
		panic("internal error")
	}
}
Exemple #2
0
// cBstr returns a Go byte slice from C char*. Caller is expected to recycle
// the returned buffer when no more needed.
//
//	p := cBstr(s)
//	b := *b
//	... use b, finally
//	buffer.Put(p)
func cBstr(s *int8) *[]byte {
	pb := buffer.Get(16)
	b := (*pb)[:0]
	for p := (*uintptr)(unsafe.Pointer(&s)); *s != 0; (*p)++ {
		if cap(b) == 0 {
			npb := buffer.Get(2 * len(b))
			copy(*npb, b)
			buffer.Put(pb)
			pb = npb
			b = (*pb)[:len(b)]
		}
		b = append(b, byte(*s))
	}
	return pb
}
Exemple #3
0
func writeTo(f Interface, w io.Writer) (n int64, err error) {
	p := buffer.Get(copyBufSize)
	b := *p
	defer buffer.Put(p)

	var off int64
	var werr error
	for {
		rn, rerr := f.ReadAt(b, off)
		if rn != 0 {
			_, werr = w.Write(b[:rn])
			n += int64(rn)
			off += int64(rn)
		}
		if rerr != nil {
			if !fileutil.IsEOF(rerr) {
				err = rerr
			}
			break
		}

		if werr != nil {
			err = werr
			break
		}
	}
	return n, err
}
Exemple #4
0
func readFrom(f Interface, r io.Reader) (n int64, err error) {
	f.Truncate(0)
	p := buffer.Get(copyBufSize)
	b := *p
	defer buffer.Put(p)

	var off int64
	var werr error
	for {
		rn, rerr := r.Read(b)
		if rn != 0 {
			_, werr = f.WriteAt(b[:rn], off)
			n += int64(rn)
			off += int64(rn)
		}
		if rerr != nil {
			if !fileutil.IsEOF(rerr) {
				err = rerr
			}
			break
		}

		if werr != nil {
			err = werr
			break
		}
	}
	return n, err
}
Exemple #5
0
func (a *Allocator) write(off int64, b ...[]byte) (err error) {
	rq := 0
	for _, part := range b {
		rq += len(part)
	}
	pbuf := buffer.Get(rq)
	defer buffer.Put(pbuf)
	buf := *pbuf
	buf = buf[:0]
	for _, part := range b {
		buf = append(buf, part...)
	}
	return a.writeAt(buf, off)
}
Exemple #6
0
// Alloc allocates storage space for b and returns the handle of the new block
// with content set to b or an error, if any. The returned handle is valid only
// while the block is used - until the block is deallocated. No two valid
// handles share the same value within the same Filer, but any value of a
// handle not referring to any used block may become valid any time as a result
// of Alloc.
//
// Invoking Alloc on an empty Allocator is guaranteed to return handle with
// value 1. The intended use of content of handle 1 is a root "directory" of
// other data held by an Allocator.
//
// Passing handles not obtained initially from Alloc or not anymore valid to
// any other Allocator methods can result in an irreparably corrupted database.
func (a *Allocator) Alloc(b []byte) (handle int64, err error) {
	pbuf := buffer.Get(zappy.MaxEncodedLen(len(b)))
	defer buffer.Put(pbuf)
	buf := *pbuf
	buf, _, cc, err := a.makeUsedBlock(buf, b)
	if err != nil {
		return
	}

	if handle, err = a.alloc(buf, cc); err == nil {
		a.cadd(b, handle)
	}
	return
}
Exemple #7
0
func (f *flt) load(fi Filer, off int64) (err error) {
	pb := buffer.Get(fltSz)
	defer buffer.Put(pb)
	b := *pb
	if _, err = fi.ReadAt(b[:], off); err != nil {
		return
	}

	for i := range *f {
		off := 8*i + 1
		f[i].head = b2h(b[off:])
	}
	return
}
Exemple #8
0
// Set h.next = n
func (a *Allocator) next(h, n int64) (err error) {
	pb := buffer.Get(7)
	defer buffer.Put(pb)
	b := *pb
	off := h2off(h)
	if err = a.read(b[:1], off); err != nil {
		return
	}

	switch tag := b[0]; tag {
	default:
		return &ErrILSEQ{Type: ErrExpFreeTag, Off: off, Arg: int64(tag)}
	case tagFreeShort:
		off += 8
	case tagFreeLong:
		off += 15
	}
	return a.writeAt(h2b(b[:7], n), off)
}
Exemple #9
0
// nfo returns h's tag. If it's a free block then return also (s)ize (in
// atoms), (p)rev and (n)ext. If it's a used block then only (s)ize is returned
// (again in atoms). If it's a used relocate block then (n)ext is set to the
// relocation target handle.
func (a *Allocator) nfo(h int64) (tag byte, s, p, n int64, err error) {
	off := h2off(h)
	rq := int64(22)
	sz, err := a.f.Size()
	if err != nil {
		return
	}

	if off+rq >= sz {
		if rq = sz - off; rq < 15 {
			err = io.ErrUnexpectedEOF
			return
		}
	}

	pbuf := buffer.Get(22)
	defer buffer.Put(pbuf)
	buf := *pbuf
	if err = a.read(buf[:rq], off); err != nil {
		return
	}

	switch tag = buf[0]; tag {
	default:
		s = int64(n2atoms(int(tag)))
	case tagUsedLong:
		s = int64(n2atoms(m2n(int(buf[1])<<8 | int(buf[2]))))
	case tagFreeLong:
		if rq < 22 {
			err = io.ErrUnexpectedEOF
			return
		}

		s, p, n = b2h(buf[1:]), b2h(buf[8:]), b2h(buf[15:])
	case tagUsedRelocated:
		s, n = 1, b2h(buf[1:])
	case tagFreeShort:
		s, p, n = 1, b2h(buf[1:]), b2h(buf[8:])
	}
	return
}
Exemple #10
0
func (a *Allocator) writeUsedBlock(h int64, cc byte, b []byte) (err error) {
	n := len(b)
	rq := n2atoms(n) << 4
	pbuf := buffer.Get(rq)
	defer buffer.Put(pbuf)
	buf := *pbuf
	switch n <= maxShort {
	case true:
		buf[0] = byte(n)
		copy(buf[1:], b)
	case false:
		m := n2m(n)
		buf[0], buf[1], buf[2] = tagUsedLong, byte(m>>8), byte(m)
		copy(buf[3:], b)
	}
	if p := n2padding(n); p != 0 {
		copy(buf[rq-1-p:], zeros[:])
	}
	buf[rq-1] = cc
	return a.writeAt(buf, h2off(h))
}
Exemple #11
0
// leftNfo returns nfo for h's left neighbor if h > 1 and the left neighbor is
// a free block. Otherwise all zero values are returned instead.
func (a *Allocator) leftNfo(h int64) (tag byte, s, p, n int64, err error) {
	if !(h > 1) {
		return
	}

	pbuf := buffer.Get(8)
	defer buffer.Put(pbuf)
	buf := *pbuf
	off := h2off(h)
	if err = a.read(buf[:], off-8); err != nil {
		return
	}

	switch tag := buf[7]; tag {
	case tagFreeShort:
		return a.nfo(h - 1)
	case tagFreeLong:
		return a.nfo(h - b2h(buf[:]))
	}
	return
}
Exemple #12
0
// Make the filer image @h a free block.
func (a *Allocator) makeFree(h, atoms, prev, next int64) (err error) {
	pbuf := buffer.Get(22)
	defer buffer.Put(pbuf)
	buf := *pbuf
	switch {
	case atoms == 1:
		buf[0], buf[15] = tagFreeShort, tagFreeShort
		h2b(buf[1:], prev)
		h2b(buf[8:], next)
		if err = a.write(h2off(h), buf[:16]); err != nil {
			return
		}
	default:

		buf[0] = tagFreeLong
		h2b(buf[1:], atoms)
		h2b(buf[8:], prev)
		h2b(buf[15:], next)
		if err = a.write(h2off(h), buf[:22]); err != nil {
			return
		}

		h2b(buf[:], atoms)
		buf[7] = tagFreeLong
		if err = a.write(h2off(h+atoms)-8, buf[:8]); err != nil {
			return
		}
	}
	if prev != 0 {
		if err = a.next(prev, h); err != nil {
			return
		}
	}

	if next != 0 {
		err = a.prev(next, h)
	}
	return
}
Exemple #13
0
func (a *Allocator) realloc(handle int64, b []byte) (err error) {
	var dlen, needAtoms0 int

	pb8 := buffer.Get(8)
	defer buffer.Put(pb8)
	b8 := *pb8
	pdst := buffer.Get(zappy.MaxEncodedLen(len(b)))
	defer buffer.Put(pdst)
	dst := *pdst
	b, needAtoms0, cc, err := a.makeUsedBlock(dst, b)
	if err != nil {
		return
	}

	needAtoms := int64(needAtoms0)
	off := h2off(handle)
	if err = a.read(b8[:], off); err != nil {
		return
	}

	switch tag := b8[0]; tag {
	default:
		dlen = int(b8[0])
	case tagUsedLong:
		dlen = m2n(int(b8[1])<<8 | int(b8[2]))
	case tagUsedRelocated:
		if err = a.free(b2h(b8[1:]), handle, false); err != nil {
			return err
		}

		dlen = 0
	case tagFreeShort, tagFreeLong:
		return &ErrINVAL{"Allocator.Realloc: invalid handle", handle}
	}

	atoms := int64(n2atoms(dlen))
retry:
	switch {
	case needAtoms < atoms:
		// in place shrink
		if err = a.writeUsedBlock(handle, cc, b); err != nil {
			return
		}

		fh, fa := handle+needAtoms, atoms-needAtoms
		sz, err := a.f.Size()
		if err != nil {
			return err
		}

		if h2off(fh)+16*fa == sz {
			return a.f.Truncate(h2off(fh))
		}

		return a.free2(fh, fa)
	case needAtoms == atoms:
		// in place replace
		return a.writeUsedBlock(handle, cc, b)
	}

	// case needAtoms > atoms:
	// in place extend or relocate
	var sz int64
	if sz, err = a.f.Size(); err != nil {
		return
	}

	off = h2off(handle)
	switch {
	case off+atoms*16 == sz:
		// relocating tail block - shortcut
		return a.writeUsedBlock(handle, cc, b)
	default:
		if off+atoms*16 < sz {
			// handle is not a tail block, check right neighbour
			rh := handle + atoms
			rtag, ratoms, p, n, e := a.nfo(rh)
			if e != nil {
				return e
			}

			if rtag == tagFreeShort || rtag == tagFreeLong {
				// Right neighbour is a free block
				if needAtoms <= atoms+ratoms {
					// can expand in place
					if err = a.unlink(rh, ratoms, p, n); err != nil {
						return
					}

					atoms += ratoms
					goto retry

				}
			}
		}
	}

	if atoms > 1 {
		if err = a.realloc(handle, nil); err != nil {
			return
		}
	}

	var newH int64
	if newH, err = a.alloc(b, cc); err != nil {
		return err
	}

	prb := buffer.CGet(16)
	defer buffer.Put(prb)
	rb := *prb
	rb[0] = tagUsedRelocated
	h2b(rb[1:], newH)
	if err = a.writeAt(rb[:], h2off(handle)); err != nil {
		return
	}

	return a.writeUsedBlock(newH, cc, b)
}
Exemple #14
0
// Get returns the data content of a block referred to by handle or an error if
// any.  The returned slice may be a sub-slice of buf if buf was large enough
// to hold the entire content.  Otherwise, a newly allocated slice will be
// returned.  It is valid to pass a nil buf.
//
// If the content was stored using compression then it is transparently
// returned decompressed.
//
// Handle must have been obtained initially from Alloc and must be still valid,
// otherwise invalid data may be returned without detecting the error.
//
// Get is safe for concurrent access by multiple goroutines iff no other
// goroutine mutates the DB.
func (a *Allocator) Get(buf []byte, handle int64) (b []byte, err error) {
	buf = buf[:cap(buf)]
	a.mu.Lock() // X1+
	if n, ok := a.m[handle]; ok {
		a.lru.moveToFront(n)
		b = need(len(n.b), buf)
		copy(b, n.b)
		a.expHit++
		a.hit++
		a.mu.Unlock() // X1-
		return
	}

	a.expMiss++
	a.miss++
	if a.miss > 10 && len(a.m) < 500 {
		if 100*a.hit/a.miss < 95 {
			a.cacheSz++
		}
		a.hit, a.miss = 0, 0
	}
	a.mu.Unlock() // X1-

	defer func(h int64) {
		if err == nil {
			a.mu.Lock() // X2+
			a.cadd(b, h)
			a.mu.Unlock() // X2-
		}
	}(handle)

	pfirst := buffer.Get(16)
	defer buffer.Put(pfirst)
	first := *pfirst
	relocated := false
	relocSrc := handle
reloc:
	if handle <= 0 || handle > maxHandle {
		return nil, &ErrINVAL{"Allocator.Get: handle out of limits", handle}
	}

	off := h2off(handle)
	if err = a.read(first, off); err != nil {
		return
	}

	switch tag := first[0]; tag {
	default:
		dlen := int(tag)
		atoms := n2atoms(dlen)
		switch atoms {
		case 1:
			switch tag := first[15]; tag {
			default:
				return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
			case tagNotCompressed:
				b = need(dlen, buf)
				copy(b, first[1:])
				return
			case tagCompressed:
				return zappy.Decode(buf, first[1:dlen+1])
			}
		default:
			pcc := buffer.Get(1)
			defer buffer.Put(pcc)
			cc := *pcc
			dlen := int(tag)
			atoms := n2atoms(dlen)
			tailOff := off + 16*int64(atoms) - 1
			if err = a.read(cc, tailOff); err != nil {
				return
			}

			switch tag := cc[0]; tag {
			default:
				return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
			case tagNotCompressed:
				b = need(dlen, buf)
				off += 1
				if err = a.read(b, off); err != nil {
					b = buf[:0]
				}
				return
			case tagCompressed:
				pzbuf := buffer.Get(dlen)
				defer buffer.Put(pzbuf)
				zbuf := *pzbuf
				off += 1
				if err = a.read(zbuf, off); err != nil {
					return buf[:0], err
				}

				return zappy.Decode(buf, zbuf)
			}
		}
	case 0:
		return buf[:0], nil
	case tagUsedLong:
		pcc := buffer.Get(1)
		defer buffer.Put(pcc)
		cc := *pcc
		dlen := m2n(int(first[1])<<8 | int(first[2]))
		atoms := n2atoms(dlen)
		tailOff := off + 16*int64(atoms) - 1
		if err = a.read(cc, tailOff); err != nil {
			return
		}

		switch tag := cc[0]; tag {
		default:
			return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
		case tagNotCompressed:
			b = need(dlen, buf)
			off += 3
			if err = a.read(b, off); err != nil {
				b = buf[:0]
			}
			return
		case tagCompressed:
			pzbuf := buffer.Get(dlen)
			defer buffer.Put(pzbuf)
			zbuf := *pzbuf
			off += 3
			if err = a.read(zbuf, off); err != nil {
				return buf[:0], err
			}

			return zappy.Decode(buf, zbuf)
		}
	case tagFreeShort, tagFreeLong:
		return nil, &ErrILSEQ{Type: ErrExpUsedTag, Off: off, Arg: int64(tag)}
	case tagUsedRelocated:
		if relocated {
			return nil, &ErrILSEQ{Type: ErrUnexpReloc, Off: off, Arg: relocSrc}
		}

		handle = b2h(first[1:])
		relocated = true
		goto reloc
	}
}