Exemple #1
0
// instanceSelector retrieves the data instance given its complete string name and
// forwards the request to that instance's HTTP handler.
func instanceSelector(c *web.C, h http.Handler) http.Handler {
	fn := func(w http.ResponseWriter, r *http.Request) {
		var err error
		dataname := dvid.InstanceName(c.URLParams["dataname"])
		uuid, ok := c.Env["uuid"].(dvid.UUID)
		if !ok {
			msg := fmt.Sprintf("Bad format for UUID %q\n", c.Env["uuid"])
			BadRequest(w, r, msg)
			return
		}
		data, err := datastore.GetDataByUUID(uuid, dataname)
		if err != nil {
			BadRequest(w, r, err)
			return
		}
		v, err := datastore.VersionFromUUID(uuid)
		if err != nil {
			BadRequest(w, r, err)
		}
		ctx := datastore.NewVersionedCtx(data, v)

		// Handle DVID-wide query string commands like non-interactive call designations
		queryValues := r.URL.Query()

		// All HTTP requests are interactive so let server tally request.
		interactive := queryValues.Get("interactive")
		if interactive == "" || (interactive != "false" && interactive != "0") {
			GotInteractiveRequest()
		}

		// TODO: setup routing for data instances as well.
		data.ServeHTTP(uuid, ctx, w, r)
	}
	return http.HandlerFunc(fn)
}
Exemple #2
0
// Returns RLEs for a given label where the key of the returned map is the block index
// in string format.
func (d *Data) GetLabelRLEs(v dvid.VersionID, label uint64) (dvid.BlockRLEs, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begIndex := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endIndex := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	labelRLEs := dvid.BlockRLEs{}
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		// Get the block index where the fromLabel is present
		_, blockStr, err := DecodeTKey(chunk.K)
		if err != nil {
			return fmt.Errorf("Can't recover block index with chunk key %v: %v\n", chunk.K, err)
		}

		var blockRLEs dvid.RLEs
		if err := blockRLEs.UnmarshalBinary(chunk.V); err != nil {
			return fmt.Errorf("Unable to unmarshal RLE for label in block %v", chunk.K)
		}
		labelRLEs[blockStr] = blockRLEs
		return nil
	}
	ctx := datastore.NewVersionedCtx(d, v)
	err = store.ProcessRange(ctx, begIndex, endIndex, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}
	fmt.Printf("Found %d blocks with label %d\n", len(labelRLEs), label)
	return labelRLEs, nil
}
Exemple #3
0
func NewIterator(roiName dvid.InstanceName, versionID dvid.VersionID, b dvid.Bounder) (*Iterator, error) {
	dataservice, err := datastore.GetDataByVersion(versionID, roiName)
	if err != nil {
		return nil, fmt.Errorf("Can't get ROI with name %q: %v", roiName, err)
	}
	data, ok := dataservice.(*Data)
	if !ok {
		return nil, fmt.Errorf("Data name %q was not of roi data type\n", roiName)
	}

	// Convert voxel extents to block Z extents
	minPt := b.StartPoint().(dvid.Chunkable)
	maxPt := b.EndPoint().(dvid.Chunkable)

	minBlockCoord := minPt.Chunk(data.BlockSize)
	maxBlockCoord := maxPt.Chunk(data.BlockSize)

	minIndex := minIndexByBlockZ(minBlockCoord.Value(2))
	maxIndex := maxIndexByBlockZ(maxBlockCoord.Value(2))

	ctx := datastore.NewVersionedCtx(data, versionID)
	it := new(Iterator)
	it.spans, err = getSpans(ctx, minIndex, maxIndex)
	return it, err
}
Exemple #4
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		select {
		case <-done:
			return
		default:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
		}
	}
}
Exemple #5
0
// GetSize returns the size in voxels of the given label.
func (d *Data) GetSize(v dvid.VersionID, label uint64) (uint64, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return 0, fmt.Errorf("Data type imagesz had error initializing store: %v\n", err)
	}

	// Get the start/end keys for the label.
	firstKey := NewLabelSizeTKey(label, 0)
	lastKey := NewLabelSizeTKey(label, math.MaxUint64)

	// Grab all keys for this range in one sequential read.
	ctx := datastore.NewVersionedCtx(d, v)
	keys, err := store.KeysInRange(ctx, firstKey, lastKey)
	if err != nil {
		return 0, err
	}

	if len(keys) == 0 {
		return 0, fmt.Errorf("found no size for label %d", label)
	}
	if len(keys) > 1 {
		return 0, fmt.Errorf("found %d sizes for label %d!", len(keys), label)
	}
	_, size, err := DecodeLabelSizeTKey(keys[0])
	return size, err
}
Exemple #6
0
// Returns function that stores a tile as an optionally compressed PNG image.
func (d *Data) putTileFunc(versionID dvid.VersionID) (outFunc, error) {
	db, err := d.GetKeyValueDB()
	if err != nil {
		return nil, fmt.Errorf("Cannot open imagetile store: %v\n", err)
	}
	ctx := datastore.NewVersionedCtx(d, versionID)

	return func(req TileReq, tile *dvid.Image) error {
		var err error
		var data []byte

		switch d.Encoding {
		case LZ4:
			compression, err := dvid.NewCompression(dvid.LZ4, dvid.DefaultCompression)
			if err != nil {
				return err
			}
			data, err = tile.Serialize(compression, d.Checksum())
		case PNG:
			data, err = tile.GetPNG()
		case JPG:
			data, err = tile.GetJPEG(d.Quality)
		}
		if err != nil {
			return err
		}
		return db.Put(ctx, NewTKeyByTileReq(req), data)
	}, nil
}
Exemple #7
0
// InitVersion initializes max label tracking for a new version if it has a parent
func (d *Data) InitVersion(uuid dvid.UUID, v dvid.VersionID) error {
	// Get the parent max label
	parents, err := datastore.GetParentsByVersion(v)
	if err != nil {
		return err
	}
	if len(parents) < 1 {
		return fmt.Errorf("InitVersion(%s, %d) called on node with no parents, which shouldn't be possible for branch", uuid, v)
	}
	var maxMax uint64
	for _, parent := range parents {
		maxLabel, ok := d.MaxLabel[parent]
		if !ok {
			return fmt.Errorf("parent of uuid %s had no max label", uuid)
		}
		if maxLabel > maxMax {
			maxMax = maxLabel
		}
	}
	d.MaxLabel[v] = maxMax

	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, maxMax)
	ctx := datastore.NewVersionedCtx(d, v)
	store, err := storage.SmallDataStore()
	if err != nil {
		return fmt.Errorf("data type labelvol had error initializing store: %v\n", err)
	}
	store.Put(ctx, maxLabelTKey, buf)
	return nil
}
Exemple #8
0
// Returns function that stores a tile as an optionally compressed PNG image.
func (d *Data) putTileFunc(versionID dvid.VersionID) (outFunc, error) {
	bigdata, err := storage.BigDataStore()
	if err != nil {
		return nil, fmt.Errorf("Cannot open big data store: %v\n", err)
	}
	ctx := datastore.NewVersionedCtx(d, versionID)

	return func(index *IndexTile, tile *dvid.Image) error {
		var err error
		var data []byte

		switch d.Encoding {
		case LZ4:
			compression, err := dvid.NewCompression(dvid.LZ4, dvid.DefaultCompression)
			if err != nil {
				return err
			}
			data, err = tile.Serialize(compression, d.Checksum())
		case PNG:
			data, err = tile.GetPNG()
		case JPG:
			data, err = tile.GetJPEG(d.Quality)
		}
		if err != nil {
			return err
		}
		return bigdata.Put(ctx, index.Bytes(), data)
	}, nil
}
Exemple #9
0
// write label volume in sorted order if available.
func (d *Data) writeLabelVol(v dvid.VersionID, label uint64, brles dvid.BlockRLEs, sortblks []dvid.IZYXString) error {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
	}

	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)
	if sortblks != nil {
		for _, izyxStr := range sortblks {
			serialization, err := brles[izyxStr].MarshalBinary()
			if err != nil {
				return fmt.Errorf("Error serializing RLEs for label %d: %v\n", label, err)
			}
			batch.Put(NewTKey(label, izyxStr), serialization)
		}
	} else {
		for izyxStr, rles := range brles {
			serialization, err := rles.MarshalBinary()
			if err != nil {
				return fmt.Errorf("Error serializing RLEs for label %d: %v\n", label, err)
			}
			batch.Put(NewTKey(label, izyxStr), serialization)
		}
	}
	if err := batch.Commit(); err != nil {
		return fmt.Errorf("Error on updating RLEs for label %d: %v\n", label, err)
	}
	return nil
}
Exemple #10
0
// put handles a PUT command-line request.
func (d *Data) put(cmd datastore.Request, reply *datastore.Response) error {
	if len(cmd.Command) < 5 {
		return fmt.Errorf("The key name must be specified after 'put'")
	}
	if len(cmd.Input) == 0 {
		return fmt.Errorf("No data was passed into standard input")
	}
	var uuidStr, dataName, cmdStr, keyStr string
	cmd.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &keyStr)

	_, versionID, err := datastore.MatchingUUID(uuidStr)
	if err != nil {
		return err
	}

	// Store data
	if !d.Versioned() {
		// Map everything to root version.
		versionID, err = datastore.GetRepoRootVersion(versionID)
		if err != nil {
			return err
		}
	}
	ctx := datastore.NewVersionedCtx(d, versionID)
	if err = d.PutData(ctx, keyStr, cmd.Input); err != nil {
		return fmt.Errorf("Error on put to key %q for keyvalue %q: %v\n", keyStr, d.DataName(), err)
	}

	reply.Output = []byte(fmt.Sprintf("Put %d bytes into key %q for keyvalue %q, uuid %s\n",
		len(cmd.Input), keyStr, d.DataName(), uuidStr))
	return nil
}
Exemple #11
0
// Processes each labelblk change as we get it.
func (d *Data) processEvents() {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		dvid.Errorf("handleBlockEvent %v\n", err)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			d.handleSyncMessage(ctx, msg, batcher)

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Exemple #12
0
func TestLabelblkDirectAPI(t *testing.T) {
	tests.UseStore()
	defer tests.CloseStore()

	uuid, versionID := initTestRepo()
	labels := newDataInstance(uuid, t, "mylabels")
	labelsCtx := datastore.NewVersionedCtx(labels, versionID)

	// Create a fake block-aligned label volume
	offset := dvid.Point3d{32, 0, 64}
	size := dvid.Point3d{96, 64, 160}
	subvol := dvid.NewSubvolume(offset, size)
	data := makeVolume(offset, size)

	// Store it into datastore at root
	v, err := labels.NewVoxels(subvol, data)
	if err != nil {
		t.Fatalf("Unable to make new labels Voxels: %v\n", err)
	}
	if err = labels.PutVoxels(versionID, v, nil); err != nil {
		t.Errorf("Unable to put labels for %s: %v\n", labelsCtx, err)
	}
	if v.NumVoxels() != int64(len(data))/8 {
		t.Errorf("# voxels (%d) after PutVoxels != # original voxels (%d)\n",
			v.NumVoxels(), int64(len(data))/8)
	}

	// Read the stored image
	v2, err := labels.NewVoxels(subvol, nil)
	if err != nil {
		t.Errorf("Unable to make new labels ExtHandler: %v\n", err)
	}
	if err = labels.GetVoxels(versionID, v2, nil); err != nil {
		t.Errorf("Unable to get voxels for %s: %v\n", labelsCtx, err)
	}

	// Make sure the retrieved image matches the original
	if v.Stride() != v2.Stride() {
		t.Errorf("Stride in retrieved subvol incorrect\n")
	}
	if v.Interpolable() != v2.Interpolable() {
		t.Errorf("Interpolable bool in retrieved subvol incorrect\n")
	}
	if !reflect.DeepEqual(v.Size(), v2.Size()) {
		t.Errorf("Size in retrieved subvol incorrect: %s vs expected %s\n",
			v2.Size(), v.Size())
	}
	if v.NumVoxels() != v2.NumVoxels() {
		t.Errorf("# voxels in retrieved is different: %d vs expected %d\n",
			v2.NumVoxels(), v.NumVoxels())
	}
	byteData := v2.Data()

	for i := int64(0); i < v2.NumVoxels()*8; i++ {
		if byteData[i] != data[i] {
			t.Logf("Size of data: %d bytes from GET, %d bytes in PUT\n", len(data), len(data))
			t.Fatalf("GET subvol (%d) != PUT subvol (%d) @ uint64 #%d", byteData[i], data[i], i)
		}
	}
}
Exemple #13
0
func (d *Data) syncSplit(name dvid.InstanceName, in <-chan datastore.SyncMessage, done <-chan struct{}) {
	// Start N goroutines to process blocks.  Don't need transactional support for
	// GET-PUT combo if each spatial coordinate (block) is only handled serially by a one goroutine.
	const numprocs = 32
	const splitBufSize = 10
	var pch [numprocs]chan splitOp
	for i := 0; i < numprocs; i++ {
		pch[i] = make(chan splitOp, splitBufSize)
		go d.splitBlock(pch[i])
	}

	for msg := range in {
		select {
		case <-done:
			for i := 0; i < numprocs; i++ {
				close(pch[i])
			}
			return
		default:
			switch delta := msg.Delta.(type) {
			case labels.DeltaSplit:
				ctx := datastore.NewVersionedCtx(d, msg.Version)
				n := delta.OldLabel % numprocs
				pch[n] <- splitOp{delta, *ctx}
			case labels.DeltaSplitStart:
				// Mark the old label is under transition
				iv := dvid.InstanceVersion{name, msg.Version}
				splitCache.Incr(iv, delta.OldLabel)
			default:
				dvid.Criticalf("bad delta in split event: %v\n", delta)
				continue
			}
		}
	}
}
Exemple #14
0
// NewLabel returns a new label for the given version.
func (d *Data) NewLabel(v dvid.VersionID) (uint64, error) {
	d.ml_mu.Lock()
	defer d.ml_mu.Unlock()

	// Make sure we aren't trying to increment a label on a locked node.
	locked, err := datastore.LockedVersion(v)
	if err != nil {
		return 0, err
	}
	if locked {
		return 0, fmt.Errorf("can't ask for new label in a locked version id %d", v)
	}

	// Increment and store.
	d.MaxRepoLabel++
	d.MaxLabel[v] = d.MaxRepoLabel

	store, err := storage.SmallDataStore()
	if err != nil {
		return 0, fmt.Errorf("can't initializing small data store: %v\n", err)
	}
	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, d.MaxRepoLabel)
	ctx := datastore.NewVersionedCtx(d, v)
	store.Put(ctx, maxLabelTKey, buf)

	ctx2 := storage.NewDataContext(d, 0)
	store.Put(ctx2, maxRepoLabelTKey, buf)

	return d.MaxRepoLabel, nil
}
Exemple #15
0
// GetVoxels copies voxels from the storage engine to Voxels, a requested subvolume or 2d image.
func (d *Data) GetVoxels(v dvid.VersionID, vox *Voxels, r *ROI) error {
	timedLog := dvid.NewTimeLog()
	defer timedLog.Infof("GetVoxels %s", vox)

	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	ctx := datastore.NewVersionedCtx(d, v)

	wg := new(sync.WaitGroup)
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get set of blocks in ROI if ROI provided
		var chunkOp *storage.ChunkOp
		if r != nil && r.Iter != nil {
			ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
			ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
			begX := ptBeg.Value(0)
			endX := ptEnd.Value(0)

			blocksInROI := make(map[string]bool, (endX - begX + 1))
			c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
			for x := begX; x <= endX; x++ {
				c[0] = x
				curIndex := dvid.IndexZYX(c)
				if r.Iter.InsideFast(curIndex) {
					indexString := string(curIndex.Bytes())
					blocksInROI[indexString] = true
				}
			}
			chunkOp = &storage.ChunkOp{&getOperation{vox, blocksInROI, r.attenuation}, wg}
		} else {
			chunkOp = &storage.ChunkOp{&getOperation{vox, nil, 0}, wg}
		}

		// Send the entire range of key-value pairs to chunk processor
		err = store.ProcessRange(ctx, begTKey, endTKey, chunkOp, storage.ChunkFunc(d.ReadChunk))
		if err != nil {
			return fmt.Errorf("Unable to GET data %s: %v", ctx, err)
		}
	}
	if err != nil {
		return err
	}
	wg.Wait()
	return nil
}
Exemple #16
0
// Loads blocks with old data if they exist.
func (d *Data) loadOldBlocks(v dvid.VersionID, vox *Voxels, blocks storage.TKeyValues) error {
	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	ctx := datastore.NewVersionedCtx(d, v)

	// Create a map of old blocks indexed by the index
	oldBlocks := map[dvid.IZYXString]([]byte){}

	// Iterate through index space for this data using ZYX ordering.
	blockSize := d.BlockSize()
	blockNum := 0
	for it, err := vox.IndexIterator(blockSize); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get previous data.
		keyvalues, err := store.GetRange(ctx, begTKey, endTKey)
		if err != nil {
			return err
		}
		for _, kv := range keyvalues {
			indexZYX, err := DecodeTKey(kv.K)
			if err != nil {
				return err
			}
			block, _, err := dvid.DeserializeData(kv.V, true)
			if err != nil {
				return fmt.Errorf("Unable to deserialize block, %s: %v", ctx, err)
			}
			oldBlocks[indexZYX.ToIZYXString()] = block
		}

		// Load previous data into blocks
		ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
		ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)
			curTKey := NewTKey(&curIndex)
			blocks[blockNum].K = curTKey
			block, ok := oldBlocks[curIndex.ToIZYXString()]
			if ok {
				copy(blocks[blockNum].V, block)
			}
			blockNum++
		}
	}
	return nil
}
Exemple #17
0
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes.
// writeBlocks ingests blocks of voxel data asynchronously using batch writes.
func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		return err
	}

	preCompress, postCompress := 0, 0

	ctx := datastore.NewVersionedCtx(d, v)
	evt := datastore.SyncEvent{d.DataUUID(), IngestBlockEvent}

	<-server.HandlerToken
	go func() {
		defer func() {
			wg1.Done()
			wg2.Done()
			dvid.Debugf("Wrote voxel blocks.  Before %s: %d bytes.  After: %d bytes\n", d.Compression(), preCompress, postCompress)
			server.HandlerToken <- 1
		}()

		mutID := d.NewMutationID()
		batch := batcher.NewBatch(ctx)
		for i, block := range b {
			serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum())
			preCompress += len(block.V)
			postCompress += len(serialization)
			if err != nil {
				dvid.Errorf("Unable to serialize block: %v\n", err)
				return
			}
			batch.Put(block.K, serialization)

			indexZYX, err := DecodeTKey(block.K)
			if err != nil {
				dvid.Errorf("Unable to recover index from block key: %v\n", block.K)
				return
			}
			msg := datastore.SyncMessage{IngestBlockEvent, v, Block{indexZYX, block.V, mutID}}
			if err := datastore.NotifySubscribers(evt, msg); err != nil {
				dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName())
				return
			}

			// Check if we should commit
			if i%KVWriteSize == KVWriteSize-1 {
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on trying to write batch: %v\n", err)
					return
				}
				batch = batcher.NewBatch(ctx)
			}
		}
		if err := batch.Commit(); err != nil {
			dvid.Errorf("Error on trying to write batch: %v\n", err)
			return
		}
	}()
	return nil
}
Exemple #18
0
func (d *Data) mergeLabels(batcher storage.KeyValueBatcher, v dvid.VersionID, op labels.MergeOp) error {
	d.Lock()
	defer d.Unlock()

	d.StartUpdate()
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	// Get the target label
	targetTk := NewLabelTKey(op.Target)
	targetElems, err := getElements(ctx, targetTk)
	if err != nil {
		return fmt.Errorf("get annotations for instance %q, target %d, in syncMerge: %v\n", d.DataName(), op.Target, err)
	}

	// Iterate through each merged label, read old elements, delete that k/v, then add it to the current target elements.
	var delta DeltaModifyElements
	elemsAdded := 0
	for label := range op.Merged {
		tk := NewLabelTKey(label)
		elems, err := getElements(ctx, tk)
		if err != nil {
			return fmt.Errorf("unable to get annotation elements for instance %q, label %d in syncMerge: %v\n", d.DataName(), label, err)
		}
		if elems == nil || len(elems) == 0 {
			continue
		}
		batch.Delete(tk)
		elemsAdded += len(elems)
		targetElems = append(targetElems, elems...)

		// for labelsz.  TODO, only do this computation if really subscribed.
		for _, elem := range elems {
			delta.Add = append(delta.Add, ElementPos{Label: op.Target, Kind: elem.Kind, Pos: elem.Pos})
			delta.Del = append(delta.Del, ElementPos{Label: label, Kind: elem.Kind, Pos: elem.Pos})
		}
	}
	if elemsAdded > 0 {
		val, err := json.Marshal(targetElems)
		if err != nil {
			return fmt.Errorf("couldn't serialize annotation elements in instance %q: %v\n", d.DataName(), err)
		}
		batch.Put(targetTk, val)
		if err := batch.Commit(); err != nil {
			return fmt.Errorf("unable to commit merge for instance %q: %v\n", d.DataName(), err)
		}
	}
	d.StopUpdate()

	// Notify any subscribers of label annotation changes.
	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
	}
	return nil
}
Exemple #19
0
// GetBlocks returns a slice of bytes corresponding to all the blocks along a span in X
func (d *Data) GetBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	indexBeg := dvid.IndexZYX(start)
	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	begTKey := NewTKey(&indexBeg)
	endTKey := NewTKey(&indexEnd)

	ctx := datastore.NewVersionedCtx(d, v)

	iv := dvid.InstanceVersion{d.DataName(), v}
	mapping := labels.MergeCache.LabelMap(iv)

	keyvalues, err := store.GetRange(ctx, begTKey, endTKey)
	if err != nil {
		return nil, err
	}

	var buf bytes.Buffer

	// Save the # of keyvalues actually obtained.
	numkv := len(keyvalues)
	binary.Write(&buf, binary.LittleEndian, int32(numkv))

	// Write the block indices in XYZ little-endian format + the size of each block
	uncompress := true
	for _, kv := range keyvalues {
		block, _, err := dvid.DeserializeData(kv.V, uncompress)
		if err != nil {
			return nil, fmt.Errorf("Unable to deserialize block, %s (%v): %v", ctx, kv.K, err)
		}
		if mapping != nil {
			n := len(block) / 8
			for i := 0; i < n; i++ {
				orig := binary.LittleEndian.Uint64(block[i*8 : i*8+8])
				mapped, found := mapping.FinalLabel(orig)
				if !found {
					mapped = orig
				}
				binary.LittleEndian.PutUint64(block[i*8:i*8+8], mapped)
			}
		}

		_, err = buf.Write(block)
		if err != nil {
			return nil, err
		}
	}

	return buf.Bytes(), nil
}
Exemple #20
0
func TestROIPartition(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	// Create the ROI dataservice.
	uuid, versionID := initTestRepo()

	config := dvid.NewConfig()
	dataservice, err := datastore.NewData(uuid, roitype, "roi", config)
	if err != nil {
		t.Errorf("Error creating new roi instance: %v\n", err)
	}
	data, ok := dataservice.(*Data)
	if !ok {
		t.Errorf("Returned new data instance is not roi.Data\n")
	}

	// PUT an ROI
	roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName())
	req, err := http.NewRequest("POST", roiRequest, getSpansJSON(testSpans))
	if err != nil {
		t.Errorf("Unsuccessful POST request (%s): %v\n", roiRequest, err)
	}
	ctx := datastore.NewVersionedCtx(data, versionID)
	w := httptest.NewRecorder()
	data.ServeHTTP(uuid, ctx, w, req)
	if w.Code != http.StatusOK {
		t.Errorf("Bad server response roi POST, status %s, for roi %q\n", w.Code, data.DataName())
	}

	// Request the standard subvolume partitioning
	partitionReq := fmt.Sprintf("%snode/%s/%s/partition?batchsize=5&optimized=true", server.WebAPIPath, uuid,
		data.DataName())
	req, err = http.NewRequest("GET", partitionReq, nil)
	if err != nil {
		t.Errorf("Unsuccessful GET request (%s): %v\n", partitionReq, err)
	}
	w = httptest.NewRecorder()
	data.ServeHTTP(uuid, ctx, w, req)
	if w.Code != http.StatusOK {
		t.Errorf("Bad server response roi GET, status %s, for roi %q\n", w.Code, data.DataName())
	}
	var subvolJSON, expectedJSON interface{}
	response := w.Body.Bytes()
	if err := json.Unmarshal(response, &subvolJSON); err != nil {
		t.Errorf("Can't unmarshal JSON: %s\n", w.Body.Bytes())
	}
	json.Unmarshal([]byte(expectedPartition), &expectedJSON)
	if !reflect.DeepEqual(subvolJSON, expectedJSON) {
		t.Errorf("Error doing optimized subvolume partitioning.  Got bad result:\n%s\n",
			string(response))
	}
}
Exemple #21
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent() {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			d.StartUpdate()
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case imageblk.MutatedBlock:
				d.mutateBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
			d.StopUpdate()

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Exemple #22
0
func (d *Data) syncMerge(name dvid.InstanceName, in <-chan datastore.SyncMessage, done <-chan struct{}) {
	// Start N goroutines to process blocks.  Don't need transactional support for
	// GET-PUT combo if each spatial coordinate (block) is only handled serially by a one goroutine.
	const numprocs = 32
	const mergeBufSize = 100
	var pch [numprocs]chan mergeOp
	for i := 0; i < numprocs; i++ {
		pch[i] = make(chan mergeOp, mergeBufSize)
		go d.mergeBlock(pch[i])
	}

	// Process incoming merge messages
	for msg := range in {
		select {
		case <-done:
			for i := 0; i < numprocs; i++ {
				close(pch[i])
			}
			return
		default:
			iv := dvid.InstanceVersion{name, msg.Version}
			switch delta := msg.Delta.(type) {
			case labels.DeltaMerge:
				ctx := datastore.NewVersionedCtx(d, msg.Version)
				wg := new(sync.WaitGroup)
				for izyxStr := range delta.Blocks {
					n := hashStr(izyxStr, numprocs)
					wg.Add(1)
					pch[n] <- mergeOp{delta.MergeOp, ctx, izyxStr, wg}
				}
				// When we've processed all the delta blocks, we can remove this merge op
				// from the merge cache since all labels will have completed.
				go func(wg *sync.WaitGroup) {
					wg.Wait()
					labels.MergeCache.Remove(iv, delta.MergeOp)
				}(wg)

			case labels.DeltaMergeStart:
				// Add this merge into the cached blockRLEs
				labels.MergeCache.Add(iv, delta.MergeOp)

			default:
				dvid.Criticalf("bad delta in merge event: %v\n", delta)
				continue
			}
		}
	}
}
Exemple #23
0
// load block of data from storage
func (d *Data) loadOldBlock(v dvid.VersionID, k storage.TKey) ([]byte, error) {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		return nil, fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	ctx := datastore.NewVersionedCtx(d, v)
	serialization, err := store.Get(ctx, k)
	if err != nil {
		return nil, err
	}
	data, _, err := dvid.DeserializeData(serialization, true)
	if err != nil {
		return nil, fmt.Errorf("Unable to deserialize block, %s: %v", ctx, err)
	}
	return data, err
}
Exemple #24
0
// Handles a stream of block operations for a unique shard of block coordinates.
// Since the same block coordinate always gets mapped to the same goroutine we can
// do a GET/PUT without worrying about interleaving PUT from other goroutines, as
// long as there is only one DVID server.
func (d *Data) processBlock(ch <-chan procMsg) {
	for msg := range ch {
		ctx := datastore.NewVersionedCtx(d, msg.v)
		switch op := msg.op.(type) {
		case mergeOp:
			d.mergeBlock(ctx, op)

		case splitOp:
			d.splitBlock(ctx, op)

		case deltaBlock:
			d.downsizeAdd(msg.v, op)

		default:
			dvid.Criticalf("Received unknown processing msg in processBlock: %v\n", msg)
		}
	}
}
Exemple #25
0
// If annotation elements are added or deleted, adjust the label counts.
func (d *Data) processEvents() {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		dvid.Errorf("Exiting sync goroutine for labelsz %q after annotation modifications: %v\n", d.DataName(), err)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			d.StartUpdate()
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case annotation.DeltaModifyElements:
				d.modifyElements(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync annotations from modify element.  Got unexpected delta: %v\n", msg)
			}
			d.StopUpdate()

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Exemple #26
0
func (d *Data) adjustMaxLabels(store storage.SmallDataStorer, root dvid.VersionID) error {
	buf := make([]byte, 8)

	parentMax, ok := d.MaxLabel[root]
	if !ok {
		return fmt.Errorf("can't adjust version id %d since none exists in metadata", root)
	}
	childIDs, err := datastore.GetChildrenByVersion(root)
	if err != nil {
		return err
	}
	for _, childID := range childIDs {
		var save bool
		childMax, ok := d.MaxLabel[childID]
		if !ok {
			// set to parent max
			d.MaxLabel[childID] = parentMax
			save = true
		} else if childMax < parentMax {
			d.MaxLabel[childID] = parentMax + childMax + 1
			save = true
		}

		// save the key-value
		if save {
			binary.LittleEndian.PutUint64(buf, d.MaxLabel[childID])
			ctx := datastore.NewVersionedCtx(d, childID)
			store.Put(ctx, maxLabelTKey, buf)
		}

		// recurse for depth-first
		if err := d.adjustMaxLabels(store, childID); err != nil {
			return err
		}
	}
	return nil
}
Exemple #27
0
func TestKeyvalueRoundTrip(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	uuid, versionID := initTestRepo()

	// Add data
	config := dvid.NewConfig()
	dataservice, err := datastore.NewData(uuid, kvtype, "roundtripper", config)
	if err != nil {
		t.Errorf("Error creating new keyvalue instance: %v\n", err)
	}
	kvdata, ok := dataservice.(*Data)
	if !ok {
		t.Errorf("Returned new data instance is not keyvalue.Data\n")
	}

	ctx := datastore.NewVersionedCtx(dataservice, versionID)

	keyStr := "testkey.-{}03`~| %@\x01"
	value := []byte("I like Japan and this is some unicode: \u65e5\u672c\u8a9e")

	if err = kvdata.PutData(ctx, keyStr, value); err != nil {
		t.Errorf("Could not put keyvalue data: %v\n", err)
	}

	retrieved, found, err := kvdata.GetData(ctx, keyStr)
	if err != nil {
		t.Fatalf("Could not get keyvalue data: %v\n", err)
	}
	if !found {
		t.Fatalf("Could not find put keyvalue\n")
	}
	if bytes.Compare(value, retrieved) != 0 {
		t.Errorf("keyvalue retrieved %q != put %q\n", string(retrieved), string(value))
	}
}
Exemple #28
0
// GetSizeRange returns a JSON list of mapped labels that have volumes within the given range.
// If maxSize is 0, all mapped labels are returned >= minSize.
func (d *Data) GetSizeRange(v dvid.VersionID, minSize, maxSize uint64) (string, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return "{}", fmt.Errorf("Data type imagesz had error initializing store: %v\n", err)
	}

	// Get the start/end keys for the size range.
	firstKey := NewSizeLabelTKey(minSize, 0)
	var upperBound uint64
	if maxSize != 0 {
		upperBound = maxSize
	} else {
		upperBound = math.MaxUint64
	}
	lastKey := NewSizeLabelTKey(upperBound, math.MaxUint64)

	// Grab all keys for this range in one sequential read.
	ctx := datastore.NewVersionedCtx(d, v)
	keys, err := store.KeysInRange(ctx, firstKey, lastKey)
	if err != nil {
		return "{}", err
	}

	// Convert them to a JSON compatible structure.
	labels := make([]uint64, len(keys))
	for i, key := range keys {
		labels[i], _, err = DecodeSizeLabelTKey(key)
		if err != nil {
			return "{}", err
		}
	}
	m, err := json.Marshal(labels)
	if err != nil {
		return "{}", nil
	}
	return string(m), nil
}
Exemple #29
0
// GetLabelBytesAtPoint returns the 8 byte slice corresponding to a 64-bit label at a point.
func (d *Data) GetLabelBytesAtPoint(v dvid.VersionID, pt dvid.Point) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}

	// Compute the block key that contains the given point.
	coord, ok := pt.(dvid.Chunkable)
	if !ok {
		return nil, fmt.Errorf("Can't determine block of point %s", pt)
	}
	blockSize := d.BlockSize()
	blockCoord := coord.Chunk(blockSize).(dvid.ChunkPoint3d)
	index := dvid.IndexZYX(blockCoord)

	// Retrieve the block of labels
	ctx := datastore.NewVersionedCtx(d, v)
	serialization, err := store.Get(ctx, NewTKey(&index))
	if err != nil {
		return nil, fmt.Errorf("Error getting '%s' block for index %s\n", d.DataName(), blockCoord)
	}
	if serialization == nil {
		return zeroLabelBytes, nil
	}
	labelData, _, err := dvid.DeserializeData(serialization, true)
	if err != nil {
		return nil, fmt.Errorf("Unable to deserialize block %s in '%s': %v\n", blockCoord, d.DataName(), err)
	}

	// Retrieve the particular label within the block.
	ptInBlock := coord.PointInChunk(blockSize)
	nx := int64(blockSize.Value(0))
	nxy := nx * int64(blockSize.Value(1))
	i := (int64(ptInBlock.Value(0)) + int64(ptInBlock.Value(1))*nx + int64(ptInBlock.Value(2))*nxy) * 8
	return labelData[i : i+8], nil
}
Exemple #30
0
func (d *Data) handleSizeEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		ctx := datastore.NewVersionedCtx(d, msg.Version)

		select {
		case <-done:
			return
		default:
			switch delta := msg.Delta.(type) {
			case labels.DeltaNewSize:
				batch := batcher.NewBatch(ctx)
				newKey := NewSizeLabelTKey(delta.Size, delta.Label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(delta.Label, delta.Size)
				batch.Put(newKey, dvid.EmptyValue())
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			case labels.DeltaDeleteSize:
				if delta.OldKnown {
					batch := batcher.NewBatch(ctx)
					oldKey := NewSizeLabelTKey(delta.OldSize, delta.Label)
					batch.Delete(oldKey)
					oldKey = NewLabelSizeTKey(delta.Label, delta.OldSize)
					batch.Delete(oldKey)
					if err := batch.Commit(); err != nil {
						dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
					}
				} else {
					// TODO -- Make transactional or force sequentially to label-specific goroutine
					begKey := NewLabelSizeTKey(delta.Label, 0)
					endKey := NewLabelSizeTKey(delta.Label, math.MaxUint64)
					keys, err := store.KeysInRange(ctx, begKey, endKey)
					if err != nil {
						dvid.Errorf("Unable to get size keys for label %d: %v\n", delta.Label, err)
						continue
					}
					if len(keys) != 1 {
						dvid.Errorf("Cannot modify size of label %d when no prior size recorded!", delta.Label)
						continue
					}

					// Modify label size
					label, oldSize, err := DecodeLabelSizeTKey(keys[0])
					if label != delta.Label {
						dvid.Errorf("Requested size of label %d and got key for label %d!\n", delta.Label, label)
						continue
					}
					batch := batcher.NewBatch(ctx)
					oldKey := NewSizeLabelTKey(oldSize, delta.Label)
					batch.Delete(oldKey)
					oldKey = NewLabelSizeTKey(delta.Label, oldSize)
					batch.Delete(oldKey)
					if err := batch.Commit(); err != nil {
						dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
					}
				}

			case labels.DeltaModSize:
				// TODO -- Make transactional or force sequentially to label-specific goroutine
				// Get old label size
				begKey := NewLabelSizeTKey(delta.Label, 0)
				endKey := NewLabelSizeTKey(delta.Label, math.MaxUint64)
				keys, err := store.KeysInRange(ctx, begKey, endKey)
				if err != nil {
					dvid.Errorf("Unable to get size keys for label %d: %v\n", delta.Label, err)
					continue
				}
				if len(keys) != 1 {
					dvid.Errorf("Cannot modify size of label %d when no prior size recorded!", delta.Label)
					continue
				}

				// Modify label size
				label, oldSize, err := DecodeLabelSizeTKey(keys[0])
				if label != delta.Label {
					dvid.Errorf("Requested size of label %d and got key for label %d!\n", delta.Label, label)
					continue
				}
				// Assumes 63 bits is sufficient for any label
				size := int64(oldSize)
				size += delta.SizeChange
				newSize := uint64(size)

				// Delete old label size keys
				batch := batcher.NewBatch(ctx)
				oldKey := NewSizeLabelTKey(oldSize, label)
				batch.Delete(oldKey)
				oldKey = NewLabelSizeTKey(label, oldSize)
				batch.Delete(oldKey)

				// Add new one
				newKey := NewSizeLabelTKey(newSize, label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(label, newSize)
				batch.Put(newKey, dvid.EmptyValue())

				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			case labels.DeltaReplaceSize:
				batch := batcher.NewBatch(ctx)
				oldKey := NewSizeLabelTKey(delta.OldSize, delta.Label)
				batch.Delete(oldKey)
				oldKey = NewLabelSizeTKey(delta.Label, delta.OldSize)
				batch.Delete(oldKey)
				newKey := NewSizeLabelTKey(delta.NewSize, delta.Label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(delta.Label, delta.NewSize)
				batch.Put(newKey, dvid.EmptyValue())
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			default:
				dvid.Criticalf("Cannot sync labelsz using unexpected delta: %v", msg)
			}
		}
	}
}