Пример #1
0
// GetBlocks returns a slice of bytes corresponding to all the blocks along a span in X
func (d *Data) GetBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	indexBeg := dvid.IndexZYX(start)
	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	begTKey := NewTKey(&indexBeg)
	endTKey := NewTKey(&indexEnd)

	ctx := datastore.NewVersionedCtx(d, v)

	iv := dvid.InstanceVersion{d.DataName(), v}
	mapping := labels.MergeCache.LabelMap(iv)

	keyvalues, err := store.GetRange(ctx, begTKey, endTKey)
	if err != nil {
		return nil, err
	}

	var buf bytes.Buffer

	// Save the # of keyvalues actually obtained.
	numkv := len(keyvalues)
	binary.Write(&buf, binary.LittleEndian, int32(numkv))

	// Write the block indices in XYZ little-endian format + the size of each block
	uncompress := true
	for _, kv := range keyvalues {
		block, _, err := dvid.DeserializeData(kv.V, uncompress)
		if err != nil {
			return nil, fmt.Errorf("Unable to deserialize block, %s (%v): %v", ctx, kv.K, err)
		}
		if mapping != nil {
			n := len(block) / 8
			for i := 0; i < n; i++ {
				orig := binary.LittleEndian.Uint64(block[i*8 : i*8+8])
				mapped, found := mapping.FinalLabel(orig)
				if !found {
					mapped = orig
				}
				binary.LittleEndian.PutUint64(block[i*8:i*8+8], mapped)
			}
		}

		_, err = buf.Write(block)
		if err != nil {
			return nil, err
		}
	}

	return buf.Bytes(), nil
}
Пример #2
0
// Loads blocks with old data if they exist.
func (d *Data) loadOldBlocks(v dvid.VersionID, vox *Voxels, blocks storage.TKeyValues) error {
	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	ctx := datastore.NewVersionedCtx(d, v)

	// Create a map of old blocks indexed by the index
	oldBlocks := map[dvid.IZYXString]([]byte){}

	// Iterate through index space for this data using ZYX ordering.
	blockSize := d.BlockSize()
	blockNum := 0
	for it, err := vox.IndexIterator(blockSize); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get previous data.
		keyvalues, err := store.GetRange(ctx, begTKey, endTKey)
		if err != nil {
			return err
		}
		for _, kv := range keyvalues {
			indexZYX, err := DecodeTKey(kv.K)
			if err != nil {
				return err
			}
			block, _, err := dvid.DeserializeData(kv.V, true)
			if err != nil {
				return fmt.Errorf("Unable to deserialize block, %s: %v", ctx, err)
			}
			oldBlocks[indexZYX.ToIZYXString()] = block
		}

		// Load previous data into blocks
		ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
		ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)
			curTKey := NewTKey(&curIndex)
			blocks[blockNum].K = curTKey
			block, ok := oldBlocks[curIndex.ToIZYXString()]
			if ok {
				copy(blocks[blockNum].V, block)
			}
			blockNum++
		}
	}
	return nil
}
Пример #3
0
// GetVoxels copies voxels from the storage engine to Voxels, a requested subvolume or 2d image.
func (d *Data) GetVoxels(v dvid.VersionID, vox *Voxels, r *ROI) error {
	timedLog := dvid.NewTimeLog()
	defer timedLog.Infof("GetVoxels %s", vox)

	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	ctx := datastore.NewVersionedCtx(d, v)

	wg := new(sync.WaitGroup)
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get set of blocks in ROI if ROI provided
		var chunkOp *storage.ChunkOp
		if r != nil && r.Iter != nil {
			ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
			ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
			begX := ptBeg.Value(0)
			endX := ptEnd.Value(0)

			blocksInROI := make(map[string]bool, (endX - begX + 1))
			c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
			for x := begX; x <= endX; x++ {
				c[0] = x
				curIndex := dvid.IndexZYX(c)
				if r.Iter.InsideFast(curIndex) {
					indexString := string(curIndex.Bytes())
					blocksInROI[indexString] = true
				}
			}
			chunkOp = &storage.ChunkOp{&getOperation{vox, blocksInROI, r.attenuation}, wg}
		} else {
			chunkOp = &storage.ChunkOp{&getOperation{vox, nil, 0}, wg}
		}

		// Send the entire range of key-value pairs to chunk processor
		err = store.ProcessRange(ctx, begTKey, endTKey, chunkOp, storage.ChunkFunc(d.ReadChunk))
		if err != nil {
			return fmt.Errorf("Unable to GET data %s: %v", ctx, err)
		}
	}
	if err != nil {
		return err
	}
	wg.Wait()
	return nil
}
Пример #4
0
// NewTKey returns an imagetile-specific key component based on the components of a tile request.
func NewTKey(tile dvid.ChunkPoint3d, plane dvid.DataShape, scale Scaling) storage.TKey {
	buf := bytes.NewBuffer(plane.Bytes())
	buf.WriteByte(byte(scale))
	buf.WriteByte(byte(3))
	idx := dvid.IndexZYX(tile)
	buf.Write(idx.Bytes())
	return buf.Bytes()
}
Пример #5
0
// Writes a XY image into the blocks that intersect it.  This function assumes the
// blocks have been allocated and if necessary, filled with old data.
func (d *Data) writeXYImage(v dvid.VersionID, vox *Voxels, b storage.TKeyValues) (extentChanged bool, err error) {

	// Setup concurrency in image -> block transfers.
	var wg sync.WaitGroup
	defer func() {
		wg.Wait()
	}()

	// Iterate through index space for this data using ZYX ordering.
	blockSize := d.BlockSize()
	var startingBlock int32

	for it, err := vox.IndexIterator(blockSize); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return extentChanged, err
		}

		ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
		ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)

		// Track point extents
		if d.Extents().AdjustIndices(ptBeg, ptEnd) {
			extentChanged = true
		}

		// Do image -> block transfers in concurrent goroutines.
		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)

		<-server.HandlerToken
		wg.Add(1)
		go func(blockNum int32) {
			c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
			for x := begX; x <= endX; x++ {
				c[0] = x
				curIndex := dvid.IndexZYX(c)
				b[blockNum].K = NewTKey(&curIndex)

				// Write this slice data into the block.
				vox.WriteBlock(&(b[blockNum]), blockSize)
				blockNum++
			}
			server.HandlerToken <- 1
			wg.Done()
		}(startingBlock)

		startingBlock += (endX - begX + 1)
	}
	return
}
Пример #6
0
func (d *Data) GetArbitraryImage(ctx storage.Context, tlStr, trStr, blStr, resStr string) (*dvid.Image, error) {
	// Setup the image buffer
	arb, err := d.NewArbSliceFromStrings(tlStr, trStr, blStr, resStr, "_")
	if err != nil {
		return nil, err
	}

	// Iterate across arbitrary image using res increments, retrieving trilinear interpolation
	// at each point.
	cache := NewValueCache(100)
	keyF := func(pt dvid.Point3d) []byte {
		chunkPt := pt.Chunk(d.BlockSize()).(dvid.ChunkPoint3d)
		idx := dvid.IndexZYX(chunkPt)
		return NewTKey(&idx)
	}

	// TODO: Add concurrency.
	leftPt := arb.topLeft
	var i int32
	var wg sync.WaitGroup
	for y := int32(0); y < arb.size[1]; y++ {
		<-server.HandlerToken
		wg.Add(1)
		go func(curPt dvid.Vector3d, dstI int32) {
			defer func() {
				server.HandlerToken <- 1
				wg.Done()
			}()
			for x := int32(0); x < arb.size[0]; x++ {
				value, err := d.computeValue(curPt, ctx, KeyFunc(keyF), cache)
				if err != nil {
					dvid.Errorf("Error in concurrent arbitrary image calc: %v", err)
					return
				}
				copy(arb.data[dstI:dstI+arb.bytesPerVoxel], value)

				curPt.Increment(arb.incrX)
				dstI += arb.bytesPerVoxel
			}
		}(leftPt, i)
		leftPt.Increment(arb.incrY)
		i += arb.size[0] * arb.bytesPerVoxel
	}
	wg.Wait()

	return dvid.ImageFromData(arb.size[0], arb.size[1], arb.data, d.Properties.Values, d.Properties.Interpolable)
}
Пример #7
0
// Construct all tiles for an image with offset and send to out function.  extractTiles assumes
// the image and offset are in the XY plane.
func (d *Data) extractTiles(v *imageblk.Voxels, offset dvid.Point, scaling Scaling, outF outFunc) error {
	if d.Levels == nil || scaling < 0 || scaling >= Scaling(len(d.Levels)) {
		return fmt.Errorf("Bad scaling level specified: %d", scaling)
	}
	levelSpec, found := d.Levels[scaling]
	if !found {
		return fmt.Errorf("No scaling specs available for scaling level %d", scaling)
	}
	srcW := v.Size().Value(0)
	srcH := v.Size().Value(1)

	tileW, tileH, err := v.DataShape().GetSize2D(levelSpec.TileSize)
	if err != nil {
		return err
	}

	// Split image into tiles and store into datastore.
	src, err := v.GetImage2d()
	if err != nil {
		return err
	}
	var x0, y0, x1, y1 int32
	y1 = tileH
	for y0 = 0; y0 < srcH; y0 += tileH {
		x1 = tileW
		for x0 = 0; x0 < srcW; x0 += tileW {
			tileRect := image.Rect(int(x0), int(y0), int(x1), int(y1))
			tile, err := src.SubImage(tileRect)
			if err != nil {
				return err
			}
			tileCoord, err := v.DataShape().PlaneToChunkPoint3d(x0, y0, offset, levelSpec.TileSize)
			// fmt.Printf("Tile Coord: %s > %s\n", tileCoord, tileRect)
			tileIndex := NewIndexTile(dvid.IndexZYX(tileCoord), v.DataShape(), Scaling(scaling))
			if err = outF(tileIndex, tile); err != nil {
				return err
			}
			x1 += tileW
		}
		y1 += tileH
	}
	return nil
}
Пример #8
0
// GetLabelBytesAtPoint returns the 8 byte slice corresponding to a 64-bit label at a point.
func (d *Data) GetLabelBytesAtPoint(v dvid.VersionID, pt dvid.Point) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}

	// Compute the block key that contains the given point.
	coord, ok := pt.(dvid.Chunkable)
	if !ok {
		return nil, fmt.Errorf("Can't determine block of point %s", pt)
	}
	blockSize := d.BlockSize()
	blockCoord := coord.Chunk(blockSize).(dvid.ChunkPoint3d)
	index := dvid.IndexZYX(blockCoord)

	// Retrieve the block of labels
	ctx := datastore.NewVersionedCtx(d, v)
	serialization, err := store.Get(ctx, NewTKey(&index))
	if err != nil {
		return nil, fmt.Errorf("Error getting '%s' block for index %s\n", d.DataName(), blockCoord)
	}
	if serialization == nil {
		return zeroLabelBytes, nil
	}
	labelData, _, err := dvid.DeserializeData(serialization, true)
	if err != nil {
		return nil, fmt.Errorf("Unable to deserialize block %s in '%s': %v\n", blockCoord, d.DataName(), err)
	}

	// Retrieve the particular label within the block.
	ptInBlock := coord.PointInChunk(blockSize)
	nx := int64(blockSize.Value(0))
	nxy := nx * int64(blockSize.Value(1))
	i := (int64(ptInBlock.Value(0)) + int64(ptInBlock.Value(1))*nx + int64(ptInBlock.Value(2))*nxy) * 8
	return labelData[i : i+8], nil
}
Пример #9
0
// GetImage returns an image given a 2d orthogonal image description.  Since imagetile tiles
// have precomputed XY, XZ, and YZ orientations, reconstruction of the desired image should
// be much faster than computing the image from voxel blocks.
func (d *Data) GetImage(ctx storage.Context, src *imageblk.Data, geom dvid.Geometry, isotropic bool) (*dvid.Image, error) {
	// Iterate through tiles that intersect our geometry.
	if d.Levels == nil || len(d.Levels) == 0 {
		return nil, fmt.Errorf("%s has no specification for tiles at highest resolution",
			d.DataName())
	}
	levelSpec := d.Levels[0]
	minSlice, err := dvid.Isotropy2D(src.VoxelSize, geom, isotropic)
	if err != nil {
		return nil, err
	}

	// Create an image of appropriate size and type using source's ExtData creation.
	dstW := minSlice.Size().Value(0)
	dstH := minSlice.Size().Value(1)
	dst, err := src.BlankImage(dstW, dstH)
	if err != nil {
		return nil, err
	}

	// Read each tile that intersects the geometry and store into final image.
	slice := minSlice.DataShape()
	tileW, tileH, err := slice.GetSize2D(levelSpec.TileSize)
	if err != nil {
		return nil, err
	}
	tileSize := dvid.Point2d{tileW, tileH}
	minPtX, minPtY, err := slice.GetSize2D(minSlice.StartPoint())
	if err != nil {
		return nil, err
	}

	wg := new(sync.WaitGroup)
	topLeftGlobal := dvid.Point2d{minPtX, minPtY}
	tilePt := topLeftGlobal.Chunk(tileSize)
	bottomRightGlobal := tilePt.MaxPoint(tileSize).(dvid.Point2d)
	y0 := int32(0)
	y1 := bottomRightGlobal[1] - minPtY + 1
	for y0 < dstH {
		x0 := int32(0)
		x1 := bottomRightGlobal[0] - minPtX + 1
		for x0 < dstW {
			wg.Add(1)
			go func(x0, y0, x1, y1 int32) {
				defer wg.Done()

				// Get this tile from datastore
				tileCoord, err := slice.PlaneToChunkPoint3d(x0, y0, minSlice.StartPoint(), levelSpec.TileSize)
				goImg, err := d.GetTile(ctx, slice, 0, dvid.IndexZYX(tileCoord))
				if err != nil || goImg == nil {
					return
				}

				// Get tile space coordinate for top left.
				curStart := dvid.Point2d{x0 + minPtX, y0 + minPtY}
				p := curStart.PointInChunk(tileSize)
				ptInTile := image.Point{int(p.Value(0)), int(p.Value(1))}

				// Paste the pertinent rectangle from this tile into our destination.
				r := image.Rect(int(x0), int(y0), int(x1), int(y1))
				draw.Draw(dst.GetDrawable(), r, goImg, ptInTile, draw.Src)
			}(x0, y0, x1, y1)
			x0 = x1
			x1 += tileW
		}
		y0 = y1
		y1 += tileH
	}
	wg.Wait()

	if isotropic {
		dstW := int(geom.Size().Value(0))
		dstH := int(geom.Size().Value(1))
		dst, err = dst.ScaleImage(dstW, dstH)
		if err != nil {
			return nil, err
		}
	}
	return dst, nil
}
Пример #10
0
// GetBlocks returns a slice of bytes corresponding to all the blocks along a span in X
func (d *Data) GetBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int32) ([]byte, error) {
	timedLog := dvid.NewTimeLog()
	defer timedLog.Infof("GetBlocks %s, span %d", start, span)

	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	indexBeg := dvid.IndexZYX(start)
	sx, sy, sz := indexBeg.Unpack()

	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	keyBeg := NewTKey(&indexBeg)
	keyEnd := NewTKey(&indexEnd)

	// Allocate one uncompressed-sized slice with background values.
	blockBytes := int32(d.BlockSize().Prod()) * d.Values.BytesPerElement()
	numBytes := blockBytes * span

	buf := make([]byte, numBytes, numBytes)
	if d.Background != 0 {
		for i := range buf {
			buf[i] = byte(d.Background)
		}
	}

	// Write the blocks that we can get concurrently on this byte slice.
	ctx := datastore.NewVersionedCtx(d, v)

	var wg sync.WaitGroup
	err = store.ProcessRange(ctx, keyBeg, keyEnd, &storage.ChunkOp{}, func(c *storage.Chunk) error {
		if c == nil || c.TKeyValue == nil {
			return nil
		}
		kv := c.TKeyValue
		if kv.V == nil {
			return nil
		}

		// Determine which block this is.
		indexZYX, err := DecodeTKey(kv.K)
		if err != nil {
			return err
		}
		x, y, z := indexZYX.Unpack()
		if z != sz || y != sy || x < sx || x >= sx+int32(span) {
			return fmt.Errorf("Received key-value for %s, not supposed to be within span range %s, length %d", *indexZYX, start, span)
		}
		n := x - sx
		i := n * blockBytes
		j := i + blockBytes

		// Spawn goroutine to transfer data
		wg.Add(1)
		go xferBlock(buf[i:j], c, &wg)
		return nil
	})
	if err != nil {
		return nil, err
	}
	wg.Wait()
	return buf, nil
}
Пример #11
0
func (d *Data) DeleteBlocks(ctx *datastore.VersionedCtx, start dvid.ChunkPoint3d, span int) error {
	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type labelblk had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelblk requires batch-enabled store, which %q is not\n", store)
	}

	indexBeg := dvid.IndexZYX(start)
	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	begTKey := NewTKey(&indexBeg)
	endTKey := NewTKey(&indexEnd)

	iv := dvid.InstanceVersion{d.DataName(), ctx.VersionID()}
	mapping := labels.MergeCache.LabelMap(iv)

	kvs, err := store.GetRange(ctx, begTKey, endTKey)
	if err != nil {
		return err
	}

	batch := batcher.NewBatch(ctx)
	uncompress := true
	for _, kv := range kvs {
		izyx, err := DecodeTKey(kv.K)
		if err != nil {
			return err
		}

		// Delete the labelblk (really tombstones it)
		batch.Delete(kv.K)

		// Send data to delete associated labelvol for labels in this block
		block, _, err := dvid.DeserializeData(kv.V, uncompress)
		if err != nil {
			return fmt.Errorf("Unable to deserialize block, %s (%v): %v", ctx, kv.K, err)
		}
		if mapping != nil {
			n := len(block) / 8
			for i := 0; i < n; i++ {
				orig := binary.LittleEndian.Uint64(block[i*8 : i*8+8])
				mapped, found := mapping.FinalLabel(orig)
				if !found {
					mapped = orig
				}
				binary.LittleEndian.PutUint64(block[i*8:i*8+8], mapped)
			}
		}

		// Notify any subscribers that we've deleted this block.
		evt := datastore.SyncEvent{d.DataName(), labels.DeleteBlockEvent}
		msg := datastore.SyncMessage{ctx.VersionID(), labels.DeleteBlock{izyx, block}}
		if err := datastore.NotifySubscribers(evt, msg); err != nil {
			return err
		}

	}
	return batch.Commit()
}
Пример #12
0
// PutBlocks stores blocks of data in a span along X
func (d *Data) PutBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int, data io.ReadCloser) error {
	store, err := storage.BigDataStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type imageblk requires batch-enabled store, which %q is not\n", store)
	}

	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	// Read blocks from the stream until we can output a batch put.
	const BatchSize = 1000
	var readBlocks int
	numBlockBytes := d.BlockSize().Prod()
	chunkPt := start
	buf := make([]byte, numBlockBytes)
	for {
		// Read a block's worth of data
		readBytes := int64(0)
		for {
			n, err := data.Read(buf[readBytes:])
			readBytes += int64(n)
			if readBytes == numBlockBytes {
				break
			}
			if err == io.EOF {
				return fmt.Errorf("Block data ceased before all block data read")
			}
			if err != nil {
				return fmt.Errorf("Error reading blocks: %v\n", err)
			}
		}

		if readBytes != numBlockBytes {
			return fmt.Errorf("Expected %d bytes in block read, got %d instead!  Aborting.", numBlockBytes, readBytes)
		}

		serialization, err := dvid.SerializeData(buf, d.Compression(), d.Checksum())
		if err != nil {
			return err
		}
		zyx := dvid.IndexZYX(chunkPt)
		batch.Put(NewTKey(&zyx), serialization)

		// Notify any subscribers that you've changed block.
		evt := datastore.SyncEvent{d.DataName(), ChangeBlockEvent}
		msg := datastore.SyncMessage{v, Block{&zyx, buf}}
		if err := datastore.NotifySubscribers(evt, msg); err != nil {
			return err
		}

		// Advance to next block
		chunkPt[0]++
		readBlocks++
		finish := (readBlocks == span)
		if finish || readBlocks%BatchSize == 0 {
			if err := batch.Commit(); err != nil {
				return fmt.Errorf("Error on batch commit, block %d: %v\n", readBlocks, err)
			}
			batch = batcher.NewBatch(ctx)
		}
		if finish {
			break
		}
	}
	return nil
}
Пример #13
0
// PutVoxels persists voxels from a subvolume into the storage engine.
// The subvolume must be aligned to blocks of the data instance.
//
// This requirement simplifies the coding quite a bit.  Earlier versions
// of DVID allowed 2d writes, which required reading blocks, writing subsets of data
// into those blocks, and then writing the result all within a transaction. It is
// difficult to scale without requiring GETs within transactions and more complicated
// coordination when moving to distributed front-end DVIDs.
func (d *Data) PutVoxels(v dvid.VersionID, vox *Voxels, roi *ROI) error {

	// Make sure vox is block-aligned
	if !dvid.BlockAligned(vox, d.BlockSize()) {
		return fmt.Errorf("cannot store voxels in non-block aligned geometry %s -> %s", vox.StartPoint(), vox.EndPoint())
	}

	wg := new(sync.WaitGroup)

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	// Keep track of changing extents and mark repo as dirty if changed.
	var extentChanged bool
	defer func() {
		if extentChanged {
			err := datastore.SaveDataByVersion(v, d)
			if err != nil {
				dvid.Infof("Error in trying to save repo on change: %v\n", err)
			}
		}
	}()

	// Track point extents
	extents := d.Extents()
	if extents.AdjustPoints(vox.StartPoint(), vox.EndPoint()) {
		extentChanged = true
	}

	// Iterate through index space for this data.
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		i0, i1, err := it.IndexSpan()
		if err != nil {
			return err
		}
		ptBeg := i0.Duplicate().(dvid.ChunkIndexer)
		ptEnd := i1.Duplicate().(dvid.ChunkIndexer)

		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)

		if extents.AdjustIndices(ptBeg, ptEnd) {
			extentChanged = true
		}

		wg.Add(int(endX-begX) + 1)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)

			// Don't PUT if this index is outside a specified ROI
			if roi != nil && roi.Iter != nil && !roi.Iter.InsideFast(curIndex) {
				wg.Done()
				continue
			}

			kv := &storage.TKeyValue{K: NewTKey(&curIndex)}
			putOp := &putOperation{vox, curIndex, v}
			op := &storage.ChunkOp{putOp, wg}
			d.PutChunk(&storage.Chunk{op, kv})
		}
	}

	wg.Wait()
	return nil
}
Пример #14
0
// PutBlocks stores blocks of data in a span along X
func (d *Data) PutBlocks(v dvid.VersionID, mutID uint64, start dvid.ChunkPoint3d, span int, data io.ReadCloser, mutate bool) error {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		return err
	}

	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	// Read blocks from the stream until we can output a batch put.
	const BatchSize = 1000
	var readBlocks int
	numBlockBytes := d.BlockSize().Prod()
	chunkPt := start
	buf := make([]byte, numBlockBytes)
	for {
		// Read a block's worth of data
		readBytes := int64(0)
		for {
			n, err := data.Read(buf[readBytes:])
			readBytes += int64(n)
			if readBytes == numBlockBytes {
				break
			}
			if err == io.EOF {
				return fmt.Errorf("Block data ceased before all block data read")
			}
			if err != nil {
				return fmt.Errorf("Error reading blocks: %v\n", err)
			}
		}

		if readBytes != numBlockBytes {
			return fmt.Errorf("Expected %d bytes in block read, got %d instead!  Aborting.", numBlockBytes, readBytes)
		}

		serialization, err := dvid.SerializeData(buf, d.Compression(), d.Checksum())
		if err != nil {
			return err
		}
		zyx := dvid.IndexZYX(chunkPt)
		tk := NewTKey(&zyx)

		// If we are mutating, get the previous block of data.
		var oldBlock []byte
		if mutate {
			oldBlock, err = d.loadOldBlock(v, tk)
			if err != nil {
				return fmt.Errorf("Unable to load previous block in %q, key %v: %v\n", d.DataName(), tk, err)
			}
		}

		// Write the new block
		batch.Put(tk, serialization)

		// Notify any subscribers that you've changed block.
		var event string
		var delta interface{}
		if mutate {
			event = MutateBlockEvent
			delta = MutatedBlock{&zyx, oldBlock, buf, mutID}
		} else {
			event = IngestBlockEvent
			delta = Block{&zyx, buf, mutID}
		}
		evt := datastore.SyncEvent{d.DataUUID(), event}
		msg := datastore.SyncMessage{event, v, delta}
		if err := datastore.NotifySubscribers(evt, msg); err != nil {
			return err
		}

		// Advance to next block
		chunkPt[0]++
		readBlocks++
		finish := (readBlocks == span)
		if finish || readBlocks%BatchSize == 0 {
			if err := batch.Commit(); err != nil {
				return fmt.Errorf("Error on batch commit, block %d: %v\n", readBlocks, err)
			}
			batch = batcher.NewBatch(ctx)
		}
		if finish {
			break
		}
	}
	return nil
}
Пример #15
0
// PutVoxels persists voxels from a subvolume into the storage engine.
// The subvolume must be aligned to blocks of the data instance, which simplifies
// the routine if the PUT is a mutation (signals MutateBlockEvent) instead of ingestion.
func (d *Data) PutVoxels(v dvid.VersionID, mutID uint64, vox *Voxels, roiname dvid.InstanceName, mutate bool) error {
	r, err := GetROI(v, roiname, vox)
	if err != nil {
		return err
	}

	// Make sure vox is block-aligned
	if !dvid.BlockAligned(vox, d.BlockSize()) {
		return fmt.Errorf("cannot store voxels in non-block aligned geometry %s -> %s", vox.StartPoint(), vox.EndPoint())
	}

	wg := new(sync.WaitGroup)

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	// Keep track of changing extents and mark repo as dirty if changed.
	var extentChanged bool
	defer func() {
		if extentChanged {
			err := datastore.SaveDataByVersion(v, d)
			if err != nil {
				dvid.Infof("Error in trying to save repo on change: %v\n", err)
			}
		}
	}()

	// Track point extents
	extents := d.Extents()
	if extents.AdjustPoints(vox.StartPoint(), vox.EndPoint()) {
		extentChanged = true
	}

	// extract buffer interface if it exists
	var putbuffer storage.RequestBuffer
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}
	if req, ok := store.(storage.KeyValueRequester); ok {
		ctx := datastore.NewVersionedCtx(d, v)
		putbuffer = req.NewBuffer(ctx)
	}

	// Iterate through index space for this data.
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		i0, i1, err := it.IndexSpan()
		if err != nil {
			return err
		}
		ptBeg := i0.Duplicate().(dvid.ChunkIndexer)
		ptEnd := i1.Duplicate().(dvid.ChunkIndexer)

		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)

		if extents.AdjustIndices(ptBeg, ptEnd) {
			extentChanged = true
		}

		wg.Add(int(endX-begX) + 1)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)

			// Don't PUT if this index is outside a specified ROI
			if r != nil && r.Iter != nil && !r.Iter.InsideFast(curIndex) {
				wg.Done()
				continue
			}

			kv := &storage.TKeyValue{K: NewTKey(&curIndex)}
			putOp := &putOperation{vox, curIndex, v, mutate, mutID}
			op := &storage.ChunkOp{putOp, wg}
			d.PutChunk(&storage.Chunk{op, kv}, putbuffer)
		}
	}

	wg.Wait()

	// if a bufferable op, flush
	if putbuffer != nil {
		putbuffer.Flush()
	}

	return nil
}
Пример #16
0
func NewBlockTKey(pt dvid.ChunkPoint3d) storage.TKey {
	idx := dvid.IndexZYX(pt)
	return storage.NewTKey(keyBlock, idx.Bytes())
}
Пример #17
0
// Index returns a channel-specific Index
func (c *Channel) Index(p dvid.ChunkPoint) dvid.Index {
	return &dvid.IndexCZYX{c.channelNum, dvid.IndexZYX(p.(dvid.ChunkPoint3d))}
}
Пример #18
0
// GetLabels copies labels from the storage engine to Labels, a requested subvolume or 2d image.
func (d *Data) GetLabels(v dvid.VersionID, vox *Labels, r *imageblk.ROI) error {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	ctx := datastore.NewVersionedCtx(d, v)

	iv := dvid.InstanceVersion{d.DataUUID(), v}
	mapping := labels.LabelMap(iv)

	wg := new(sync.WaitGroup)

	okv := store.(storage.BufferableOps)
	// extract buffer interface
	req, hasbuffer := okv.(storage.KeyValueRequester)
	if hasbuffer {
		okv = req.NewBuffer(ctx)
	}

	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get set of blocks in ROI if ROI provided
		var chunkOp *storage.ChunkOp
		if r != nil && r.Iter != nil {
			ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
			ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
			begX := ptBeg.Value(0)
			endX := ptEnd.Value(0)

			blocksInROI := make(map[string]bool, (endX - begX + 1))
			c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
			for x := begX; x <= endX; x++ {
				c[0] = x
				curIndex := dvid.IndexZYX(c)
				if r.Iter.InsideFast(curIndex) {
					indexString := string(curIndex.Bytes())
					blocksInROI[indexString] = true
				}
			}
			chunkOp = &storage.ChunkOp{&getOperation{vox, blocksInROI, mapping}, wg}
		} else {
			chunkOp = &storage.ChunkOp{&getOperation{vox, nil, mapping}, wg}
		}

		// Send the entire range of key-value pairs to chunk processor
		err = okv.ProcessRange(ctx, begTKey, endTKey, chunkOp, storage.ChunkFunc(d.ReadChunk))
		if err != nil {
			return fmt.Errorf("Unable to GET data %s: %v", ctx, err)
		}
	}

	if hasbuffer {
		// submit the entire buffer to the DB
		err = okv.(storage.RequestBuffer).Flush()

		if err != nil {
			return fmt.Errorf("Unable to GET data %s: %v", ctx, err)

		}
	}

	if err != nil {
		return err
	}
	wg.Wait()
	return nil
}