Beispiel #1
0
func (blob *Blob) saveChunk(key cas.Key, level uint8) (cas.Key, error) {
	if !key.IsPrivate() {
		// already saved
		return key, nil
	}

	chunk, err := blob.stash.Get(key, blob.m.Type, level)
	if err != nil {
		return key, err
	}

	if level > 0 {
		for off := uint32(0); off+cas.KeySize <= uint32(len(chunk.Buf)); off += cas.KeySize {
			cur := cas.NewKeyPrivate(chunk.Buf[off : off+cas.KeySize])
			if cur.IsReserved() {
				return key, fmt.Errorf("invalid stored key: key @%d in %v is %v", off, key, chunk.Buf[off:off+cas.KeySize])
			}
			// recurses at most `level` deep
			saved, err := blob.saveChunk(cur, level-1)
			if err != nil {
				return key, err
			}
			copy(chunk.Buf[off:off+cas.KeySize], saved.Bytes())
		}
	}

	chunk.Buf = trim(chunk.Buf)
	return blob.stash.Save(key)
}
Beispiel #2
0
func TestKeyNewPrivateNum(t *testing.T) {
	k := cas.NewKeyPrivateNum(31337)
	buf := k.Bytes()
	k2 := cas.NewKey(buf)
	if g, e := k2, cas.Invalid; g != e {
		t.Errorf("expected NewKey to give Invalid: %v", g)
	}
	k3 := cas.NewKeyPrivate(buf)
	if g, e := k3, k; g != e {
		t.Errorf("expected NewKeyPrivate to give original key: %v", g)
	}
	priv, ok := k3.Private()
	if !ok {
		t.Fatalf("expected Private to work: %v %v", priv, ok)
	}
	if g, e := priv, uint64(31337); g != e {
		t.Errorf("expected Private to match original: %v", g)
	}
	if g, e := k.IsSpecial(), true; g != e {
		t.Errorf("not Special: %v != %v", g, e)
	}
	if g, e := k.IsPrivate(), true; g != e {
		t.Errorf("not Private: %v != %v", g, e)
	}
	if g, e := k.IsReserved(), false; g != e {
		t.Errorf("bad Reserved: %v != %v", g, e)
	}
}
Beispiel #3
0
// Decreases depth, always selecting only the leftmost tree,
// and dropping all Private chunks in the rest.
func (blob *Blob) shrink(level uint8) error {
	for blob.depth > level {
		chunk, err := blob.stash.Get(blob.m.Root, blob.m.Type, blob.depth)
		if err != nil {
			return err
		}

		if blob.m.Root.IsPrivate() {
			// blob.depth must be >0 if we're here, so it's always a
			// pointer chunk; iterate all non-first keys and drop
			// Private chunks
			err = blob.discardAfter(chunk, 1, blob.depth)
			if err != nil {
				return err
			}
		}

		// now all non-left top-level private nodes have been dropped
		keybuf := safeSlice(chunk.Buf, 0, cas.KeySize)
		key := cas.NewKeyPrivate(keybuf)
		blob.m.Root = key
		blob.depth -= 1
	}
	return nil
}
Beispiel #4
0
// lookup fetches the data chunk for given global byte offset.
//
// The returned Chunk remains zero trimmed.
//
// It may be a Private or a normal chunk. For writable Chunks, call
// lookupForWrite instead.
func (blob *Blob) lookup(off uint64) (*chunks.Chunk, error) {
	gidx := uint32(off / uint64(blob.m.ChunkSize))
	lidxs := localChunkIndexes(blob.m.Fanout, gidx)
	level := blob.depth

	// walk down from the root
	var ptrKey = blob.m.Root
	for ; level > 0; level-- {
		// follow pointer chunks
		var idx uint32
		if int(level)-1 < len(lidxs) {
			idx = lidxs[level-1]
		}

		chunk, err := blob.stash.Get(ptrKey, blob.m.Type, level)
		if err != nil {
			return nil, err
		}

		keyoff := int64(idx) * cas.KeySize
		// zero trimming may have cut the key off, even in the middle
		// TODO ugly int conversion
		keybuf := safeSlice(chunk.Buf, int(keyoff), int(keyoff+cas.KeySize))
		ptrKey = cas.NewKeyPrivate(keybuf)
	}

	chunk, err := blob.stash.Get(ptrKey, blob.m.Type, 0)
	return chunk, err
}
Beispiel #5
0
func TestKeyInvalidPrivate(t *testing.T) {
	buf := make([]byte, cas.KeySize)
	buf[len(buf)-1] = 0x42
	k := cas.NewKeyPrivate(buf)
	if g, e := k, cas.Invalid; g != e {
		t.Errorf("not Invalid: %q != %q", g, e)
	}
}
Beispiel #6
0
// chunk must be a Private chunk
func (blob *Blob) discardAfter(chunk *chunks.Chunk, lidx uint32, level uint8) error {
	if level == 0 {
		return nil
	}
	for ; lidx < blob.m.Fanout; lidx++ {
		keyoff := lidx * cas.KeySize
		keybuf := chunk.Buf[keyoff : keyoff+cas.KeySize]
		key := cas.NewKeyPrivate(keybuf)
		if key.IsPrivate() {
			// there can't be any Private chunks if they key wasn't Private
			chunk, err := blob.stash.Get(key, blob.m.Type, level-1)
			if err != nil {
				return err
			}
			err = blob.discardAfter(chunk, 0, level-1)
			if err != nil {
				return err
			}
			blob.stash.Drop(key)
		}
		copy(chunk.Buf[keyoff:keyoff+cas.KeySize], cas.Empty.Bytes())
	}
	return nil
}
Beispiel #7
0
// Truncate adjusts the size of the blob. If the new size is less than
// the old size, data past that point is lost. If the new size is
// greater than the old size, the new part is full of zeroes.
func (blob *Blob) Truncate(size uint64) error {
	switch {
	case size == 0:
		// special case shrink to nothing
		blob.m.Root = cas.Empty
		blob.m.Size = 0
		blob.stash.Clear()

	case size < blob.m.Size:
		// shrink

		// i really am starting to hate the idea of file offsets being
		// int64's, but can't fight all the windmills at once.
		if size > math.MaxInt64 {
			return errors.New("cannot discard past 63-bit file size")
		}

		// we know size>0 from above
		off := size - 1
		gidx := uint32(off / uint64(blob.m.ChunkSize))
		lidxs := localChunkIndexes(blob.m.Fanout, gidx)
		err := blob.shrink(uint8(len(lidxs)))
		if err != nil {
			return err
		}

		// we don't need to always cow here (if everything is
		// perfectly aligned / already zero), but it's a rare enough
		// case that let's not care for now
		//
		// TODO this makes a tight loop on Open and Save wasteful

		{
			// TODO clone all the way down to be able to trim leaf chunk,
			// abusing lookupForWrite for now

			// we know size > 0 from above
			_, err := blob.lookupForWrite(size - 1)
			if err != nil {
				return err
			}
		}

		// now zero-fill on the right; guaranteed cow by the above kludge
		key := blob.m.Root
		if debugTruncate {
			if !key.IsPrivate() {
				panic(fmt.Errorf("Truncate root is not private: %v", key))
			}
		}
		for level := blob.depth; level > 0; level-- {
			chunk, err := blob.stash.Get(key, blob.m.Type, level)
			if err != nil {
				return err
			}
			err = blob.discardAfter(chunk, lidxs[level-1]+1, level)
			if err != nil {
				return err
			}
			keyoff := int64(lidxs[level-1]) * cas.KeySize
			keybuf := chunk.Buf[keyoff : keyoff+cas.KeySize]
			key = cas.NewKeyPrivate(keybuf)
			if debugTruncate {
				if !key.IsPrivate() {
					panic(fmt.Errorf("Truncate key at level %d not private: %v", level, key))
				}
			}
		}

		// and finally the leaf chunk
		chunk, err := blob.stash.Get(key, blob.m.Type, 0)
		if err != nil {
			return err
		}
		{
			// TODO is there anything to clear here; beware modulo wraparound

			// size is also the offset of the next byte
			loff := uint32(size % uint64(blob.m.ChunkSize))
			zeroSlice(chunk.Buf[loff:])
		}

		// TODO what's the right time to adjust size, wrt errors
		blob.m.Size = size

		// TODO unit tests that checks we don't leak chunks?

	case size > blob.m.Size:
		// grow
		off := size - 1
		gidx := uint32(off / uint64(blob.m.ChunkSize))
		lidxs := localChunkIndexes(blob.m.Fanout, gidx)
		err := blob.grow(uint8(len(lidxs)))
		if err != nil {
			return err
		}
		blob.m.Size = size
	}
	return nil
}
Beispiel #8
0
// lookupForWrite fetches the data chunk for the given offset and
// ensures it is Private and reinflated, and thus writable.
func (blob *Blob) lookupForWrite(off uint64) (*chunks.Chunk, error) {
	gidx := uint32(off / uint64(blob.m.ChunkSize))
	lidxs := localChunkIndexes(blob.m.Fanout, gidx)

	err := blob.grow(uint8(len(lidxs)))
	if err != nil {
		return nil, err
	}

	level := blob.depth

	var parentChunk *chunks.Chunk
	{
		// clone root if necessary
		var k cas.Key
		var err error
		size := blob.chunkSizeForLevel(level)
		k, parentChunk, err = blob.stash.Clone(blob.m.Root, blob.m.Type, level, size)
		if err != nil {
			return nil, err
		}
		blob.m.Root = k
	}

	// walk down from the root
	var ptrKey = blob.m.Root
	for ; level > 0; level-- {
		// follow pointer chunks
		var idx uint32
		if int(level)-1 < len(lidxs) {
			idx = lidxs[level-1]
		}

		keyoff := int64(idx) * cas.KeySize
		{
			k := cas.NewKeyPrivate(parentChunk.Buf[keyoff : keyoff+cas.KeySize])
			if k.IsReserved() {
				return nil, fmt.Errorf("invalid stored key: key @%d in %v is %v", keyoff, ptrKey, parentChunk.Buf[keyoff:keyoff+cas.KeySize])
			}
			ptrKey = k
		}

		// clone it (nop if already cloned)
		size := blob.chunkSizeForLevel(level - 1)
		ptrKey, child, err := blob.stash.Clone(ptrKey, blob.m.Type, level-1, size)
		if err != nil {
			return nil, err
		}

		if debugLookup {
			if uint64(len(child.Buf)) != uint64(size) {
				panic(fmt.Errorf("lookupForWrite clone for level %d made weird size %d != %d, key %v", level-1, len(child.Buf), size, ptrKey))
			}
		}

		// update the key in parent
		n := copy(parentChunk.Buf[keyoff:keyoff+cas.KeySize], ptrKey.Bytes())
		if debugLookup {
			if n != cas.KeySize {
				panic(fmt.Errorf("lookupForWrite copied only %d of the key", n))
			}
		}
		parentChunk = child
	}

	if debugLookup {
		if parentChunk.Level != 0 {
			panic(fmt.Errorf("lookupForWrite got a non-leaf: %v", parentChunk.Level))
		}
		if uint64(len(parentChunk.Buf)) != uint64(blob.m.ChunkSize) {
			panic(fmt.Errorf("lookupForWrite got short leaf: %v", len(parentChunk.Buf)))
		}
	}

	return parentChunk, nil
}