Beispiel #1
0
// Deletes an ROI.
func (d *Data) Delete(ctx storage.VersionedCtx) error {
	smalldata, err := storage.SmallDataStore()
	if err != nil {
		return err
	}

	// We only want one PUT on given version for given data to prevent interleaved PUTs.
	putMutex := ctx.Mutex()
	putMutex.Lock()
	defer func() {
		putMutex.Unlock()
	}()

	d.MinZ = math.MaxInt32
	d.MaxZ = math.MinInt32
	if err := datastore.SaveDataByVersion(ctx.VersionID(), d); err != nil {
		return fmt.Errorf("Error in trying to save repo on roi extent change: %v\n", err)
	}

	// Delete all spans for this ROI for just this version.
	if err := smalldata.DeleteAll(ctx, false); err != nil {
		return err
	}
	return nil
}
Beispiel #2
0
// LoadImages bulk loads images using different techniques if it is a multidimensional
// file like HDF5 or a sequence of PNG/JPG/TIF images.
func (d *Data) LoadImages(v dvid.VersionID, offset dvid.Point, filenames []string) error {
	if len(filenames) == 0 {
		return nil
	}
	timedLog := dvid.NewTimeLog()

	// We only want one PUT on given version for given data to prevent interleaved
	// chunk PUTs that could potentially overwrite slice modifications.
	ctx := storage.NewDataContext(d, v)
	loadMutex := ctx.Mutex()
	loadMutex.Lock()

	// Handle cleanup given multiple goroutines still writing data.
	load := &bulkLoadInfo{filenames: filenames, versionID: v, offset: offset}
	defer func() {
		loadMutex.Unlock()

		if load.extentChanged.Value() {
			err := datastore.SaveDataByVersion(v, d)
			if err != nil {
				dvid.Errorf("Error in trying to save repo for voxel extent change: %v\n", err)
			}
		}
	}()

	// Use different loading techniques if we have a potentially multidimensional HDF5 file
	// or many 2d images.
	var err error
	if dvid.Filename(filenames[0]).HasExtensionPrefix("hdf", "h5") {
		err = d.loadHDF(load)
	} else {
		err = d.loadXYImages(load)
	}

	if err != nil {
		timedLog.Infof("RPC load of %d files had error: %v\n", err)
	} else {
		timedLog.Infof("RPC load of %d files completed.\n", len(filenames))
	}
	return err
}
Beispiel #3
0
// PutVoxels persists voxels from a subvolume into the storage engine.
// The subvolume must be aligned to blocks of the data instance.
//
// This requirement simplifies the coding quite a bit.  Earlier versions
// of DVID allowed 2d writes, which required reading blocks, writing subsets of data
// into those blocks, and then writing the result all within a transaction. It is
// difficult to scale without requiring GETs within transactions and more complicated
// coordination when moving to distributed front-end DVIDs.
func (d *Data) PutVoxels(v dvid.VersionID, vox *Voxels, roi *ROI) error {

	// Make sure vox is block-aligned
	if !dvid.BlockAligned(vox, d.BlockSize()) {
		return fmt.Errorf("cannot store voxels in non-block aligned geometry %s -> %s", vox.StartPoint(), vox.EndPoint())
	}

	wg := new(sync.WaitGroup)

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	// Keep track of changing extents and mark repo as dirty if changed.
	var extentChanged bool
	defer func() {
		if extentChanged {
			err := datastore.SaveDataByVersion(v, d)
			if err != nil {
				dvid.Infof("Error in trying to save repo on change: %v\n", err)
			}
		}
	}()

	// Track point extents
	extents := d.Extents()
	if extents.AdjustPoints(vox.StartPoint(), vox.EndPoint()) {
		extentChanged = true
	}

	// Iterate through index space for this data.
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		i0, i1, err := it.IndexSpan()
		if err != nil {
			return err
		}
		ptBeg := i0.Duplicate().(dvid.ChunkIndexer)
		ptEnd := i1.Duplicate().(dvid.ChunkIndexer)

		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)

		if extents.AdjustIndices(ptBeg, ptEnd) {
			extentChanged = true
		}

		wg.Add(int(endX-begX) + 1)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)

			// Don't PUT if this index is outside a specified ROI
			if roi != nil && roi.Iter != nil && !roi.Iter.InsideFast(curIndex) {
				wg.Done()
				continue
			}

			kv := &storage.TKeyValue{K: NewTKey(&curIndex)}
			putOp := &putOperation{vox, curIndex, v}
			op := &storage.ChunkOp{putOp, wg}
			d.PutChunk(&storage.Chunk{op, kv})
		}
	}

	wg.Wait()
	return nil
}
Beispiel #4
0
// PutSpans saves a slice of spans representing an ROI into the datastore.
// If the init parameter is true, all previous spans of this ROI are deleted before
// writing these spans.
func (d *Data) PutSpans(versionID dvid.VersionID, spans []dvid.Span, init bool) error {
	ctx := datastore.NewVersionedCtx(d, versionID)

	db, err := storage.SmallDataStore()
	if err != nil {
		return err
	}

	// Delete the old key/values
	if init {
		if err := d.Delete(ctx); err != nil {
			return err
		}
	}

	// Make sure our small data store can do batching.
	batcher, ok := db.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Unable to store ROI: small data store can't do batching!")
	}

	// We only want one PUT on given version for given data to prevent interleaved PUTs.
	putMutex := ctx.Mutex()
	putMutex.Lock()

	// Save new extents after finished.
	defer func() {
		err := datastore.SaveDataByVersion(ctx.VersionID(), d)
		if err != nil {
			dvid.Errorf("Error in trying to save repo on roi extent change: %v\n", err)
		}
		putMutex.Unlock()
	}()

	// Put the new key/values
	const BATCH_SIZE = 10000
	batch := batcher.NewBatch(ctx)
	for i, span := range spans {
		if span[0] < d.MinZ {
			d.MinZ = span[0]
		}
		if span[0] > d.MaxZ {
			d.MaxZ = span[0]
		}
		if span[3] < span[2] {
			return fmt.Errorf("Got weird span %v.  span[3] (X1) < span[2] (X0)", span)
		}
		index := indexRLE{
			start: dvid.IndexZYX{span[2], span[1], span[0]},
			span:  uint32(span[3] - span[2] + 1),
		}
		tk := storage.NewTKey(keyROI, index.Bytes())
		batch.Put(tk, dvid.EmptyValue())
		if (i+1)%BATCH_SIZE == 0 {
			if err := batch.Commit(); err != nil {
				return fmt.Errorf("Error on batch PUT at span %d: %v\n", i, err)
			}
			batch = batcher.NewBatch(ctx)
		}
	}
	if len(spans)%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			return fmt.Errorf("Error on last batch PUT: %v\n", err)
		}
	}
	return nil
}
Beispiel #5
0
// PutVoxels persists voxels from a subvolume into the storage engine.
// The subvolume must be aligned to blocks of the data instance, which simplifies
// the routine if the PUT is a mutation (signals MutateBlockEvent) instead of ingestion.
func (d *Data) PutVoxels(v dvid.VersionID, mutID uint64, vox *Voxels, roiname dvid.InstanceName, mutate bool) error {
	r, err := GetROI(v, roiname, vox)
	if err != nil {
		return err
	}

	// Make sure vox is block-aligned
	if !dvid.BlockAligned(vox, d.BlockSize()) {
		return fmt.Errorf("cannot store voxels in non-block aligned geometry %s -> %s", vox.StartPoint(), vox.EndPoint())
	}

	wg := new(sync.WaitGroup)

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	// Keep track of changing extents and mark repo as dirty if changed.
	var extentChanged bool
	defer func() {
		if extentChanged {
			err := datastore.SaveDataByVersion(v, d)
			if err != nil {
				dvid.Infof("Error in trying to save repo on change: %v\n", err)
			}
		}
	}()

	// Track point extents
	extents := d.Extents()
	if extents.AdjustPoints(vox.StartPoint(), vox.EndPoint()) {
		extentChanged = true
	}

	// extract buffer interface if it exists
	var putbuffer storage.RequestBuffer
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}
	if req, ok := store.(storage.KeyValueRequester); ok {
		ctx := datastore.NewVersionedCtx(d, v)
		putbuffer = req.NewBuffer(ctx)
	}

	// Iterate through index space for this data.
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		i0, i1, err := it.IndexSpan()
		if err != nil {
			return err
		}
		ptBeg := i0.Duplicate().(dvid.ChunkIndexer)
		ptEnd := i1.Duplicate().(dvid.ChunkIndexer)

		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)

		if extents.AdjustIndices(ptBeg, ptEnd) {
			extentChanged = true
		}

		wg.Add(int(endX-begX) + 1)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)

			// Don't PUT if this index is outside a specified ROI
			if r != nil && r.Iter != nil && !r.Iter.InsideFast(curIndex) {
				wg.Done()
				continue
			}

			kv := &storage.TKeyValue{K: NewTKey(&curIndex)}
			putOp := &putOperation{vox, curIndex, v, mutate, mutID}
			op := &storage.ChunkOp{putOp, wg}
			d.PutChunk(&storage.Chunk{op, kv}, putbuffer)
		}
	}

	wg.Wait()

	// if a bufferable op, flush
	if putbuffer != nil {
		putbuffer.Flush()
	}

	return nil
}