예제 #1
0
파일: roi.go 프로젝트: jwohlwend/dvid
// Deletes an ROI.
func (d *Data) Delete(ctx storage.VersionedCtx) error {
	smalldata, err := storage.SmallDataStore()
	if err != nil {
		return err
	}

	// We only want one PUT on given version for given data to prevent interleaved PUTs.
	putMutex := ctx.Mutex()
	putMutex.Lock()
	defer func() {
		putMutex.Unlock()
	}()

	d.MinZ = math.MaxInt32
	d.MaxZ = math.MinInt32
	if err := datastore.SaveDataByVersion(ctx.VersionID(), d); err != nil {
		return fmt.Errorf("Error in trying to save repo on roi extent change: %v\n", err)
	}

	// Delete all spans for this ROI for just this version.
	if err := smalldata.DeleteAll(ctx, false); err != nil {
		return err
	}
	return nil
}
예제 #2
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// Returns RLEs for a given label where the key of the returned map is the block index
// in string format.
func (d *Data) GetLabelRLEs(v dvid.VersionID, label uint64) (dvid.BlockRLEs, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begIndex := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endIndex := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	labelRLEs := dvid.BlockRLEs{}
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		// Get the block index where the fromLabel is present
		_, blockStr, err := DecodeTKey(chunk.K)
		if err != nil {
			return fmt.Errorf("Can't recover block index with chunk key %v: %v\n", chunk.K, err)
		}

		var blockRLEs dvid.RLEs
		if err := blockRLEs.UnmarshalBinary(chunk.V); err != nil {
			return fmt.Errorf("Unable to unmarshal RLE for label in block %v", chunk.K)
		}
		labelRLEs[blockStr] = blockRLEs
		return nil
	}
	ctx := datastore.NewVersionedCtx(d, v)
	err = store.ProcessRange(ctx, begIndex, endIndex, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}
	fmt.Printf("Found %d blocks with label %d\n", len(labelRLEs), label)
	return labelRLEs, nil
}
예제 #3
0
파일: labelsz.go 프로젝트: jwohlwend/dvid
// GetSize returns the size in voxels of the given label.
func (d *Data) GetSize(v dvid.VersionID, label uint64) (uint64, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return 0, fmt.Errorf("Data type imagesz had error initializing store: %v\n", err)
	}

	// Get the start/end keys for the label.
	firstKey := NewLabelSizeTKey(label, 0)
	lastKey := NewLabelSizeTKey(label, math.MaxUint64)

	// Grab all keys for this range in one sequential read.
	ctx := datastore.NewVersionedCtx(d, v)
	keys, err := store.KeysInRange(ctx, firstKey, lastKey)
	if err != nil {
		return 0, err
	}

	if len(keys) == 0 {
		return 0, fmt.Errorf("found no size for label %d", label)
	}
	if len(keys) > 1 {
		return 0, fmt.Errorf("found %d sizes for label %d!", len(keys), label)
	}
	_, size, err := DecodeLabelSizeTKey(keys[0])
	return size, err
}
예제 #4
0
// write label volume in sorted order if available.
func (d *Data) writeLabelVol(v dvid.VersionID, label uint64, brles dvid.BlockRLEs, sortblks []dvid.IZYXString) error {
	store, err := storage.SmallDataStore()
	if err != nil {
		return fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
	}

	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)
	if sortblks != nil {
		for _, izyxStr := range sortblks {
			serialization, err := brles[izyxStr].MarshalBinary()
			if err != nil {
				return fmt.Errorf("Error serializing RLEs for label %d: %v\n", label, err)
			}
			batch.Put(NewTKey(label, izyxStr), serialization)
		}
	} else {
		for izyxStr, rles := range brles {
			serialization, err := rles.MarshalBinary()
			if err != nil {
				return fmt.Errorf("Error serializing RLEs for label %d: %v\n", label, err)
			}
			batch.Put(NewTKey(label, izyxStr), serialization)
		}
	}
	if err := batch.Commit(); err != nil {
		return fmt.Errorf("Error on updating RLEs for label %d: %v\n", label, err)
	}
	return nil
}
예제 #5
0
파일: roi.go 프로젝트: jwohlwend/dvid
// Returns all (z, y, x0, x1) Spans in sorted order: z, then y, then x0.
func getSpans(ctx storage.VersionedCtx, minIndex, maxIndex indexRLE) ([]dvid.Span, error) {
	db, err := storage.SmallDataStore()
	if err != nil {
		return nil, err
	}
	spans := []dvid.Span{}

	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		ibytes, err := chunk.K.ClassBytes(keyROI)
		if err != nil {
			return err
		}
		index := new(indexRLE)
		if err = index.IndexFromBytes(ibytes); err != nil {
			return fmt.Errorf("Unable to get indexRLE out of []byte encoding: %v\n", err)
		}
		z := index.start.Value(2)
		y := index.start.Value(1)
		x0 := index.start.Value(0)
		x1 := x0 + int32(index.span) - 1
		spans = append(spans, dvid.Span{z, y, x0, x1})
		return nil
	}
	mintk := storage.NewTKey(keyROI, minIndex.Bytes())
	maxtk := storage.NewTKey(keyROI, maxIndex.Bytes())
	err = db.ProcessRange(ctx, mintk, maxtk, &storage.ChunkOp{}, f)
	return spans, err
}
예제 #6
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// InitVersion initializes max label tracking for a new version if it has a parent
func (d *Data) InitVersion(uuid dvid.UUID, v dvid.VersionID) error {
	// Get the parent max label
	parents, err := datastore.GetParentsByVersion(v)
	if err != nil {
		return err
	}
	if len(parents) < 1 {
		return fmt.Errorf("InitVersion(%s, %d) called on node with no parents, which shouldn't be possible for branch", uuid, v)
	}
	var maxMax uint64
	for _, parent := range parents {
		maxLabel, ok := d.MaxLabel[parent]
		if !ok {
			return fmt.Errorf("parent of uuid %s had no max label", uuid)
		}
		if maxLabel > maxMax {
			maxMax = maxLabel
		}
	}
	d.MaxLabel[v] = maxMax

	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, maxMax)
	ctx := datastore.NewVersionedCtx(d, v)
	store, err := storage.SmallDataStore()
	if err != nil {
		return fmt.Errorf("data type labelvol had error initializing store: %v\n", err)
	}
	store.Put(ctx, maxLabelTKey, buf)
	return nil
}
예제 #7
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// NewLabel returns a new label for the given version.
func (d *Data) NewLabel(v dvid.VersionID) (uint64, error) {
	d.ml_mu.Lock()
	defer d.ml_mu.Unlock()

	// Make sure we aren't trying to increment a label on a locked node.
	locked, err := datastore.LockedVersion(v)
	if err != nil {
		return 0, err
	}
	if locked {
		return 0, fmt.Errorf("can't ask for new label in a locked version id %d", v)
	}

	// Increment and store.
	d.MaxRepoLabel++
	d.MaxLabel[v] = d.MaxRepoLabel

	store, err := storage.SmallDataStore()
	if err != nil {
		return 0, fmt.Errorf("can't initializing small data store: %v\n", err)
	}
	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, d.MaxRepoLabel)
	ctx := datastore.NewVersionedCtx(d, v)
	store.Put(ctx, maxLabelTKey, buf)

	ctx2 := storage.NewDataContext(d, 0)
	store.Put(ctx2, maxRepoLabelTKey, buf)

	return d.MaxRepoLabel, nil
}
예제 #8
0
파일: sync.go 프로젝트: jwohlwend/dvid
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		select {
		case <-done:
			return
		default:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
		}
	}
}
예제 #9
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// GetSparseCoarseVol returns an encoded sparse volume given a label.  The encoding has the
// following format where integers are little endian:
// 		byte     Set to 0
// 		uint8    Number of dimensions
// 		uint8    Dimension of run (typically 0 = X)
// 		byte     Reserved (to be used later)
// 		uint32    # Blocks [TODO.  0 for now]
// 		uint32    # Spans
// 		Repeating unit of:
//     		int32   Block coordinate of run start (dimension 0)
//     		int32   Block coordinate of run start (dimension 1)
//     		int32   Block coordinate of run start (dimension 2)
//     		int32   Length of run
//
func GetSparseCoarseVol(ctx storage.Context, label uint64) ([]byte, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Create the sparse volume header
	buf := new(bytes.Buffer)
	buf.WriteByte(dvid.EncodingBinary)
	binary.Write(buf, binary.LittleEndian, uint8(3))  // # of dimensions
	binary.Write(buf, binary.LittleEndian, byte(0))   // dimension of run (X = 0)
	buf.WriteByte(byte(0))                            // reserved for later
	binary.Write(buf, binary.LittleEndian, uint32(0)) // Placeholder for # blocks
	encoding := buf.Bytes()

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begTKey := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endTKey := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	var numBlocks uint32
	var span *dvid.Span
	var spans dvid.Spans
	keys, err := store.KeysInRange(ctx, begTKey, endTKey)
	if err != nil {
		return nil, fmt.Errorf("Cannot get keys for coarse sparse volume: %v", err)
	}
	for _, tk := range keys {
		numBlocks++
		_, blockStr, err := DecodeTKey(tk)
		if err != nil {
			return nil, fmt.Errorf("Error retrieving RLE runs for label %d: %v", label, err)
		}
		indexZYX, err := blockStr.IndexZYX()
		if err != nil {
			return nil, fmt.Errorf("Error decoding block coordinate (%v) for sparse volume: %v\n", blockStr, err)
		}
		x, y, z := indexZYX.Unpack()
		if span == nil {
			span = &dvid.Span{z, y, x, x}
		} else if !span.Extends(x, y, z) {
			spans = append(spans, *span)
			span = &dvid.Span{z, y, x, x}
		}
	}
	if err != nil {
		return nil, err
	}
	if span != nil {
		spans = append(spans, *span)
	}
	spansBytes, err := spans.MarshalBinary()
	if err != nil {
		return nil, err
	}
	encoding = append(encoding, spansBytes...)
	dvid.Debugf("[%s] coarse subvol for label %d: found %d blocks\n", ctx, label, numBlocks)
	return encoding, nil
}
예제 #10
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// LoadMutable loads mutable properties of label volumes like the maximum labels
// for each version.  Note that we load these max labels from key-value pairs
// rather than data instance properties persistence, because in the case of a crash,
// the actually stored repo data structure may be out-of-date compared to the guaranteed
// up-to-date key-value pairs for max labels.
func (d *Data) LoadMutable(root dvid.VersionID, storedVersion, expectedVersion uint64) (bool, error) {
	ctx := storage.NewDataContext(d, 0)
	store, err := storage.SmallDataStore()
	if err != nil {
		return false, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	wg := new(sync.WaitGroup)
	wg.Add(1)
	ch := make(chan *storage.KeyValue)

	// Start appropriate migration function if any.
	var saveRequired bool

	switch storedVersion {
	case 0:
		// Need to update all max labels and set repo-level max label.
		saveRequired = true
		go d.migrateMaxLabels(root, wg, ch)
	default:
		// Load in each version max label without migration.
		go d.loadMaxLabels(wg, ch)

		// Load in the repo-wide max label.
		data, err := store.Get(ctx, maxRepoLabelTKey)
		if err != nil {
			return false, err
		}
		d.MaxRepoLabel = binary.LittleEndian.Uint64(data)
	}

	// Send the max label data per version
	minKey, err := ctx.MinVersionKey(maxLabelTKey)
	if err != nil {
		return false, err
	}
	maxKey, err := ctx.MaxVersionKey(maxLabelTKey)
	if err != nil {
		return false, err
	}
	keysOnly := false
	if err = store.SendRange(minKey, maxKey, keysOnly, ch); err != nil {
		return false, err
	}
	wg.Wait()

	dvid.Infof("Loaded max label values for labelvol %q with repo-wide max %d\n", d.DataName(), d.MaxRepoLabel)
	return saveRequired, nil
}
예제 #11
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
func (d *Data) migrateMaxLabels(root dvid.VersionID, wg *sync.WaitGroup, ch chan *storage.KeyValue) {
	ctx := storage.NewDataContext(d, 0)
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Can't initializing small data store: %v\n", err)
	}

	var maxRepoLabel uint64
	d.MaxLabel = make(map[dvid.VersionID]uint64)
	for {
		kv := <-ch
		if kv == nil {
			break
		}
		v, err := ctx.VersionFromKey(kv.K)
		if err != nil {
			dvid.Errorf("Can't decode key when loading mutable data for %s", d.DataName())
			continue
		}
		if len(kv.V) != 8 {
			dvid.Errorf("Got bad value.  Expected 64-bit label, got %v", kv.V)
			continue
		}
		label := binary.LittleEndian.Uint64(kv.V)
		d.MaxLabel[v] = label
		if label > maxRepoLabel {
			maxRepoLabel = label
		}
	}

	// Adjust the MaxLabel data to make sure we correct for any case of child max < parent max.
	d.adjustMaxLabels(store, root)

	// Set the repo-wide max label.
	d.MaxRepoLabel = maxRepoLabel

	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, maxRepoLabel)
	store.Put(ctx, maxRepoLabelTKey, buf)

	wg.Done()
	return
}
예제 #12
0
파일: labelsz.go 프로젝트: jwohlwend/dvid
// GetSizeRange returns a JSON list of mapped labels that have volumes within the given range.
// If maxSize is 0, all mapped labels are returned >= minSize.
func (d *Data) GetSizeRange(v dvid.VersionID, minSize, maxSize uint64) (string, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return "{}", fmt.Errorf("Data type imagesz had error initializing store: %v\n", err)
	}

	// Get the start/end keys for the size range.
	firstKey := NewSizeLabelTKey(minSize, 0)
	var upperBound uint64
	if maxSize != 0 {
		upperBound = maxSize
	} else {
		upperBound = math.MaxUint64
	}
	lastKey := NewSizeLabelTKey(upperBound, math.MaxUint64)

	// Grab all keys for this range in one sequential read.
	ctx := datastore.NewVersionedCtx(d, v)
	keys, err := store.KeysInRange(ctx, firstKey, lastKey)
	if err != nil {
		return "{}", err
	}

	// Convert them to a JSON compatible structure.
	labels := make([]uint64, len(keys))
	for i, key := range keys {
		labels[i], _, err = DecodeSizeLabelTKey(key)
		if err != nil {
			return "{}", err
		}
	}
	m, err := json.Marshal(labels)
	if err != nil {
		return "{}", nil
	}
	return string(m), nil
}
예제 #13
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// Given a stored label, make sure our max label tracking is updated.
func (d *Data) casMaxLabel(batch storage.Batch, v dvid.VersionID, label uint64) {
	d.ml_mu.Lock()
	defer d.ml_mu.Unlock()

	save := false
	maxLabel, found := d.MaxLabel[v]
	if !found {
		dvid.Errorf("Bad max label of version %d -- none found!\n", v)
		maxLabel = 0
	}
	if maxLabel < label {
		maxLabel = label
		save = true
	}
	if save {
		buf := make([]byte, 8)
		binary.LittleEndian.PutUint64(buf, maxLabel)
		batch.Put(maxLabelTKey, buf)
		d.MaxLabel[v] = maxLabel

		if d.MaxRepoLabel < maxLabel {
			d.MaxRepoLabel = maxLabel
			ctx := storage.NewDataContext(d, 0)
			store, err := storage.SmallDataStore()
			if err != nil {
				dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
			} else {
				store.Put(ctx, maxRepoLabelTKey, buf)
			}
		}
	}
	if err := batch.Commit(); err != nil {
		dvid.Errorf("batch put: %v\n", err)
		return
	}
}
예제 #14
0
// MergeLabels handles merging of any number of labels throughout the various label data
// structures.  It assumes that the merges aren't cascading, e.g., there is no attempt
// to merge label 3 into 4 and also 4 into 5.  The caller should have flattened the merges.
// TODO: Provide some indication that subset of labels are under evolution, returning
//   an "unavailable" status or 203 for non-authoritative response.  This might not be
//   feasible for clustered DVID front-ends due to coordination issues.
//
// EVENTS
//
// labels.MergeStartEvent occurs at very start of merge and transmits labels.DeltaMergeStart struct.
//
// labels.MergeBlockEvent occurs for every block of a merged label and transmits labels.DeltaMerge struct.
//
// labels.MergeEndEvent occurs at end of merge and transmits labels.DeltaMergeEnd struct.
//
func (d *Data) MergeLabels(v dvid.VersionID, m labels.MergeOp) error {
	store, err := storage.SmallDataStore()
	if err != nil {
		return fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
	}

	dvid.Debugf("Merging %s into label %d ...\n", m.Merged, m.Target)

	// Signal that we are starting a merge.
	evt := datastore.SyncEvent{d.DataName(), labels.MergeStartEvent}
	msg := datastore.SyncMessage{v, labels.DeltaMergeStart{m}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	// Mark these labels as dirty until done.
	iv := dvid.InstanceVersion{d.DataName(), v}
	dirtyLabels.AddMerge(iv, m)
	defer dirtyLabels.RemoveMerge(iv, m)

	// All blocks that have changed during this merge.  Key = string of block index
	blocksChanged := make(map[dvid.IZYXString]struct{})

	// Get the block-level RLEs for the toLabel
	toLabel := m.Target
	toLabelRLEs, err := d.GetLabelRLEs(v, toLabel)
	if err != nil {
		return fmt.Errorf("Can't get block-level RLEs for label %d: %v", toLabel, err)
	}
	toLabelSize := toLabelRLEs.NumVoxels()

	// Iterate through all labels to be merged.
	var addedVoxels uint64
	for fromLabel := range m.Merged {
		dvid.Debugf("Merging label %d to label %d...\n", fromLabel, toLabel)

		fromLabelRLEs, err := d.GetLabelRLEs(v, fromLabel)
		if err != nil {
			return fmt.Errorf("Can't get block-level RLEs for label %d: %v", fromLabel, err)
		}
		fromLabelSize := fromLabelRLEs.NumVoxels()
		if fromLabelSize == 0 || len(fromLabelRLEs) == 0 {
			dvid.Debugf("Label %d is empty.  Skipping.\n", fromLabel)
			continue
		}
		addedVoxels += fromLabelSize

		// Notify linked labelsz instances
		delta := labels.DeltaDeleteSize{
			Label:    fromLabel,
			OldSize:  fromLabelSize,
			OldKnown: true,
		}
		evt := datastore.SyncEvent{d.DataName(), labels.ChangeSizeEvent}
		msg := datastore.SyncMessage{v, delta}
		if err := datastore.NotifySubscribers(evt, msg); err != nil {
			return err
		}

		// Append or insert RLE runs from fromLabel blocks into toLabel blocks.
		for blockStr, fromRLEs := range fromLabelRLEs {
			// Mark the fromLabel blocks as modified
			blocksChanged[blockStr] = struct{}{}

			// Get the toLabel RLEs for this block and add the fromLabel RLEs
			toRLEs, found := toLabelRLEs[blockStr]
			if found {
				toRLEs.Add(fromRLEs)
			} else {
				toRLEs = fromRLEs
			}
			toLabelRLEs[blockStr] = toRLEs
		}

		// Delete all fromLabel RLEs since they are all integrated into toLabel RLEs
		minTKey := NewTKey(fromLabel, dvid.MinIndexZYX.ToIZYXString())
		maxTKey := NewTKey(fromLabel, dvid.MaxIndexZYX.ToIZYXString())
		ctx := datastore.NewVersionedCtx(d, v)
		if err := store.DeleteRange(ctx, minTKey, maxTKey); err != nil {
			return fmt.Errorf("Can't delete label %d RLEs: %v", fromLabel, err)
		}
	}

	if len(blocksChanged) == 0 {
		dvid.Debugf("No changes needed when merging %s into %d.  Aborting.\n", m.Merged, m.Target)
		return nil
	}

	// Publish block-level merge
	evt = datastore.SyncEvent{d.DataName(), labels.MergeBlockEvent}
	msg = datastore.SyncMessage{v, labels.DeltaMerge{m, blocksChanged}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	// Update datastore with all toLabel RLEs that were changed
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)
	for blockStr := range blocksChanged {
		tk := NewTKey(toLabel, blockStr)
		serialization, err := toLabelRLEs[blockStr].MarshalBinary()
		if err != nil {
			return fmt.Errorf("Error serializing RLEs for label %d: %v\n", toLabel, err)
		}
		batch.Put(tk, serialization)
	}
	if err := batch.Commit(); err != nil {
		dvid.Errorf("Error on updating RLEs for label %d: %v\n", toLabel, err)
	}
	delta := labels.DeltaReplaceSize{
		Label:   toLabel,
		OldSize: toLabelSize,
		NewSize: toLabelSize + addedVoxels,
	}
	evt = datastore.SyncEvent{d.DataName(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{v, delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	evt = datastore.SyncEvent{d.DataName(), labels.MergeEndEvent}
	msg = datastore.SyncMessage{v, labels.DeltaMergeEnd{m}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	return nil
}
예제 #15
0
// SplitLabels splits a portion of a label's voxels into a new label, which is returned.
// The input is a binary sparse volume and should preferably be the smaller portion of a labeled region.
// In other words, the caller should chose to submit for relabeling the smaller portion of any split.
//
// EVENTS
//
// labels.SplitStartEvent occurs at very start of split and transmits labels.DeltaSplitStart struct.
//
// labels.SplitBlockEvent occurs for every block of a split label and transmits labels.DeltaSplit struct.
//
// labels.SplitEndEvent occurs at end of split and transmits labels.DeltaSplitEnd struct.
//
func (d *Data) SplitLabels(v dvid.VersionID, fromLabel uint64, r io.ReadCloser) (toLabel uint64, err error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		err = fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		err = fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	// Mark these labels as dirty until done.
	iv := dvid.InstanceVersion{d.DataName(), v}
	dirtyLabels.Incr(iv, fromLabel)
	defer dirtyLabels.Decr(iv, fromLabel)

	// Create a new label id for this version that will persist to store
	toLabel, err = d.NewLabel(v)
	if err != nil {
		return
	}
	dvid.Debugf("Splitting subset of label %d into label %d ...\n", fromLabel, toLabel)

	// Signal that we are starting a split.
	evt := datastore.SyncEvent{d.DataName(), labels.SplitStartEvent}
	msg := datastore.SyncMessage{v, labels.DeltaSplitStart{fromLabel, toLabel}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Read the sparse volume from reader.
	var split dvid.RLEs
	split, err = dvid.ReadRLEs(r)
	if err != nil {
		return
	}
	toLabelSize, _ := split.Stats()

	// Partition the split spans into blocks.
	var splitmap dvid.BlockRLEs
	splitmap, err = split.Partition(d.BlockSize)
	if err != nil {
		return
	}

	// Get a sorted list of blocks that cover split.
	splitblks := splitmap.SortedKeys()

	// Publish split event
	evt = datastore.SyncEvent{d.DataName(), labels.SplitLabelEvent}
	msg = datastore.SyncMessage{v, labels.DeltaSplit{fromLabel, toLabel, splitmap, splitblks}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Write the split sparse vol.
	if err = d.writeLabelVol(v, toLabel, splitmap, splitblks); err != nil {
		return
	}

	// Iterate through the split blocks, read the original block.  If the RLEs
	// are identical, just delete the original.  If not, modify the original.
	// TODO: Modifications should be transactional since it's GET-PUT, therefore use
	// hash on block coord to direct it to block-specific goroutine; we serialize
	// requests to handle concurrency.
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	for _, splitblk := range splitblks {

		// Get original block
		tk := NewTKey(fromLabel, splitblk)
		val, err := store.Get(ctx, tk)
		if err != nil {
			return toLabel, err
		}

		if val == nil {
			return toLabel, fmt.Errorf("Split RLEs at block %s are not part of original label %d", splitblk.Print(), fromLabel)
		}
		var rles dvid.RLEs
		if err := rles.UnmarshalBinary(val); err != nil {
			return toLabel, fmt.Errorf("Unable to unmarshal RLE for original labels in block %s", splitblk.Print())
		}

		// Compare and process based on modifications required.
		remain, err := rles.Split(splitmap[splitblk])
		if err != nil {
			return toLabel, err
		}
		if len(remain) == 0 {
			batch.Delete(tk)
		} else {
			rleBytes, err := remain.MarshalBinary()
			if err != nil {
				return toLabel, fmt.Errorf("can't serialize remain RLEs for split of %d: %v\n", fromLabel, err)
			}
			batch.Put(tk, rleBytes)
		}
	}

	if err := batch.Commit(); err != nil {
		dvid.Errorf("Batch PUT during split of %q label %d: %v\n", d.DataName(), fromLabel, err)
	}

	// Publish change in label sizes.
	delta := labels.DeltaNewSize{
		Label: toLabel,
		Size:  toLabelSize,
	}
	evt = datastore.SyncEvent{d.DataName(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{v, delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	delta2 := labels.DeltaModSize{
		Label:      fromLabel,
		SizeChange: int64(-toLabelSize),
	}
	evt = datastore.SyncEvent{d.DataName(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{v, delta2}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Publish split end
	evt = datastore.SyncEvent{d.DataName(), labels.SplitEndEvent}
	msg = datastore.SyncMessage{v, labels.DeltaSplitEnd{fromLabel, toLabel}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	return toLabel, nil
}
예제 #16
0
파일: roi.go 프로젝트: jwohlwend/dvid
// Partition returns JSON of differently sized subvolumes that attempt to distribute
// the number of active blocks per subvolume.
func (d *Data) Partition(ctx storage.Context, batchsize int32) ([]byte, error) {
	// Partition Z as perfectly as we can.
	dz := d.MaxZ - d.MinZ + 1
	zleft := dz % batchsize

	// Adjust Z range
	layerBegZ := d.MinZ
	layerEndZ := layerBegZ + batchsize - 1

	// Iterate through blocks in ascending Z, calculating active extents and subvolume coverage.
	// Keep track of current layer = batchsize of blocks in Z.
	var subvolumes subvolumesT
	subvolumes.Subvolumes = []subvolumeT{}
	subvolumes.ROI.MinChunk[2] = d.MinZ
	subvolumes.ROI.MaxChunk[2] = d.MaxZ

	layer := d.newLayer(layerBegZ, layerEndZ)

	db, err := storage.SmallDataStore()
	if err != nil {
		return nil, err
	}
	merge := true
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		ibytes, err := chunk.K.ClassBytes(keyROI)
		if err != nil {
			return err
		}
		index := new(indexRLE)
		if err = index.IndexFromBytes(ibytes); err != nil {
			return fmt.Errorf("Unable to get indexRLE out of []byte encoding: %v\n", err)
		}

		// If we are in new layer, process last one.
		z := index.start.Value(2)
		if z > layerEndZ {
			// Process last layer
			dvid.Debugf("Computing subvolumes in layer with Z %d -> %d (dz %d)\n",
				layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
			d.addSubvolumes(layer, &subvolumes, batchsize, merge)

			// Init variables for next layer
			layerBegZ = layerEndZ + 1
			layerEndZ += batchsize
			if zleft > 0 {
				layerEndZ++
				zleft--
			}
			layer = d.newLayer(layerBegZ, layerEndZ)
		}

		// Check this block against current layer extents
		layer.extend(index)
		return nil
	}
	mintk := storage.MinTKey(keyROI)
	maxtk := storage.MaxTKey(keyROI)
	err = db.ProcessRange(ctx, mintk, maxtk, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}

	// Process last incomplete layer if there is one.
	if len(layer.activeBlocks) > 0 {
		dvid.Debugf("Computing subvolumes for final layer Z %d -> %d (dz %d)\n",
			layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
		d.addSubvolumes(layer, &subvolumes, batchsize, merge)
	}
	subvolumes.NumSubvolumes = int32(len(subvolumes.Subvolumes))

	// Encode as JSON
	jsonBytes, err := json.MarshalIndent(subvolumes, "", "    ")
	if err != nil {
		return nil, err
	}
	return jsonBytes, err
}
예제 #17
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// GetSparseVol returns an encoded sparse volume given a label.  The encoding has the
// following format where integers are little endian:
//    byte     Payload descriptor:
//               Bit 0 (LSB) - 8-bit grayscale
//               Bit 1 - 16-bit grayscale
//               Bit 2 - 16-bit normal
//               ...
//    uint8    Number of dimensions
//    uint8    Dimension of run (typically 0 = X)
//    byte     Reserved (to be used later)
//    uint32    # Voxels
//    uint32    # Spans
//    Repeating unit of:
//        int32   Coordinate of run start (dimension 0)
//        int32   Coordinate of run start (dimension 1)
//        int32   Coordinate of run start (dimension 2)
//        int32   Length of run
//        bytes   Optional payload dependent on first byte descriptor
//
func GetSparseVol(ctx storage.Context, label uint64, bounds Bounds) ([]byte, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Create the sparse volume header
	buf := new(bytes.Buffer)
	buf.WriteByte(dvid.EncodingBinary)
	binary.Write(buf, binary.LittleEndian, uint8(3))  // # of dimensions
	binary.Write(buf, binary.LittleEndian, byte(0))   // dimension of run (X = 0)
	buf.WriteByte(byte(0))                            // reserved for later
	binary.Write(buf, binary.LittleEndian, uint32(0)) // Placeholder for # voxels
	binary.Write(buf, binary.LittleEndian, uint32(0)) // Placeholder for # spans

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	minZYX := dvid.MinIndexZYX
	maxZYX := dvid.MaxIndexZYX
	blockBounds := bounds.BlockBounds
	if blockBounds == nil {
		blockBounds = new(dvid.Bounds)
	}
	if minZ, ok := blockBounds.MinZ(); ok {
		minZYX[2] = minZ
	}
	if maxZ, ok := blockBounds.MaxZ(); ok {
		maxZYX[2] = maxZ
	}
	begTKey := NewTKey(label, minZYX.ToIZYXString())
	endTKey := NewTKey(label, maxZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	// TODO -- Make processing asynchronous so can overlap with range disk read now that
	//   there could be more processing due to bounding calcs.
	var numRuns, numBlocks uint32
	encoding := buf.Bytes()

	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		// Make sure this block is within the optinonal bounding.
		if blockBounds.BoundedX() || blockBounds.BoundedY() {
			_, blockStr, err := DecodeTKey(chunk.K)
			if err != nil {
				return fmt.Errorf("Error decoding sparse volume key (%v): %v\n", chunk.K, err)
			}
			indexZYX, err := blockStr.IndexZYX()
			if err != nil {
				return fmt.Errorf("Error decoding block coordinate (%v) for sparse volume: %v\n", blockStr, err)
			}
			blockX, blockY, _ := indexZYX.Unpack()
			if blockBounds.OutsideX(blockX) || blockBounds.OutsideY(blockY) {
				return nil
			}
		}

		// Adjust RLEs within block if we are bounded.
		var rles []byte
		var err error
		if bounds.Exact && bounds.VoxelBounds.IsSet() {
			rles, err = boundRLEs(chunk.V, bounds.VoxelBounds)
			if err != nil {
				return fmt.Errorf("Error in adjusting RLEs to bounds: %v\n", err)
			}
		} else {
			rles = chunk.V
		}

		numRuns += uint32(len(rles) / 16)
		numBlocks++
		if int64(len(encoding))+int64(len(rles)) > server.MaxDataRequest {
			return fmt.Errorf("Sparse volume read aborted because length exceeds %d bytes", server.MaxDataRequest)
		}
		encoding = append(encoding, rles...)
		return nil
	}

	if err := store.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, f); err != nil {
		return nil, err
	}
	binary.LittleEndian.PutUint32(encoding[8:12], numRuns)

	dvid.Debugf("[%s] label %d: found %d blocks, %d runs\n", ctx, label, numBlocks, numRuns)
	return encoding, nil
}
예제 #18
0
파일: roi.go 프로젝트: jwohlwend/dvid
// PutSpans saves a slice of spans representing an ROI into the datastore.
// If the init parameter is true, all previous spans of this ROI are deleted before
// writing these spans.
func (d *Data) PutSpans(versionID dvid.VersionID, spans []dvid.Span, init bool) error {
	ctx := datastore.NewVersionedCtx(d, versionID)

	db, err := storage.SmallDataStore()
	if err != nil {
		return err
	}

	// Delete the old key/values
	if init {
		if err := d.Delete(ctx); err != nil {
			return err
		}
	}

	// Make sure our small data store can do batching.
	batcher, ok := db.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Unable to store ROI: small data store can't do batching!")
	}

	// We only want one PUT on given version for given data to prevent interleaved PUTs.
	putMutex := ctx.Mutex()
	putMutex.Lock()

	// Save new extents after finished.
	defer func() {
		err := datastore.SaveDataByVersion(ctx.VersionID(), d)
		if err != nil {
			dvid.Errorf("Error in trying to save repo on roi extent change: %v\n", err)
		}
		putMutex.Unlock()
	}()

	// Put the new key/values
	const BATCH_SIZE = 10000
	batch := batcher.NewBatch(ctx)
	for i, span := range spans {
		if span[0] < d.MinZ {
			d.MinZ = span[0]
		}
		if span[0] > d.MaxZ {
			d.MaxZ = span[0]
		}
		if span[3] < span[2] {
			return fmt.Errorf("Got weird span %v.  span[3] (X1) < span[2] (X0)", span)
		}
		index := indexRLE{
			start: dvid.IndexZYX{span[2], span[1], span[0]},
			span:  uint32(span[3] - span[2] + 1),
		}
		tk := storage.NewTKey(keyROI, index.Bytes())
		batch.Put(tk, dvid.EmptyValue())
		if (i+1)%BATCH_SIZE == 0 {
			if err := batch.Commit(); err != nil {
				return fmt.Errorf("Error on batch PUT at span %d: %v\n", i, err)
			}
			batch = batcher.NewBatch(ctx)
		}
	}
	if len(spans)%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			return fmt.Errorf("Error on last batch PUT: %v\n", err)
		}
	}
	return nil
}
예제 #19
0
파일: sync.go 프로젝트: jwohlwend/dvid
func (d *Data) handleSizeEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		ctx := datastore.NewVersionedCtx(d, msg.Version)

		select {
		case <-done:
			return
		default:
			switch delta := msg.Delta.(type) {
			case labels.DeltaNewSize:
				batch := batcher.NewBatch(ctx)
				newKey := NewSizeLabelTKey(delta.Size, delta.Label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(delta.Label, delta.Size)
				batch.Put(newKey, dvid.EmptyValue())
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			case labels.DeltaDeleteSize:
				if delta.OldKnown {
					batch := batcher.NewBatch(ctx)
					oldKey := NewSizeLabelTKey(delta.OldSize, delta.Label)
					batch.Delete(oldKey)
					oldKey = NewLabelSizeTKey(delta.Label, delta.OldSize)
					batch.Delete(oldKey)
					if err := batch.Commit(); err != nil {
						dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
					}
				} else {
					// TODO -- Make transactional or force sequentially to label-specific goroutine
					begKey := NewLabelSizeTKey(delta.Label, 0)
					endKey := NewLabelSizeTKey(delta.Label, math.MaxUint64)
					keys, err := store.KeysInRange(ctx, begKey, endKey)
					if err != nil {
						dvid.Errorf("Unable to get size keys for label %d: %v\n", delta.Label, err)
						continue
					}
					if len(keys) != 1 {
						dvid.Errorf("Cannot modify size of label %d when no prior size recorded!", delta.Label)
						continue
					}

					// Modify label size
					label, oldSize, err := DecodeLabelSizeTKey(keys[0])
					if label != delta.Label {
						dvid.Errorf("Requested size of label %d and got key for label %d!\n", delta.Label, label)
						continue
					}
					batch := batcher.NewBatch(ctx)
					oldKey := NewSizeLabelTKey(oldSize, delta.Label)
					batch.Delete(oldKey)
					oldKey = NewLabelSizeTKey(delta.Label, oldSize)
					batch.Delete(oldKey)
					if err := batch.Commit(); err != nil {
						dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
					}
				}

			case labels.DeltaModSize:
				// TODO -- Make transactional or force sequentially to label-specific goroutine
				// Get old label size
				begKey := NewLabelSizeTKey(delta.Label, 0)
				endKey := NewLabelSizeTKey(delta.Label, math.MaxUint64)
				keys, err := store.KeysInRange(ctx, begKey, endKey)
				if err != nil {
					dvid.Errorf("Unable to get size keys for label %d: %v\n", delta.Label, err)
					continue
				}
				if len(keys) != 1 {
					dvid.Errorf("Cannot modify size of label %d when no prior size recorded!", delta.Label)
					continue
				}

				// Modify label size
				label, oldSize, err := DecodeLabelSizeTKey(keys[0])
				if label != delta.Label {
					dvid.Errorf("Requested size of label %d and got key for label %d!\n", delta.Label, label)
					continue
				}
				// Assumes 63 bits is sufficient for any label
				size := int64(oldSize)
				size += delta.SizeChange
				newSize := uint64(size)

				// Delete old label size keys
				batch := batcher.NewBatch(ctx)
				oldKey := NewSizeLabelTKey(oldSize, label)
				batch.Delete(oldKey)
				oldKey = NewLabelSizeTKey(label, oldSize)
				batch.Delete(oldKey)

				// Add new one
				newKey := NewSizeLabelTKey(newSize, label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(label, newSize)
				batch.Put(newKey, dvid.EmptyValue())

				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			case labels.DeltaReplaceSize:
				batch := batcher.NewBatch(ctx)
				oldKey := NewSizeLabelTKey(delta.OldSize, delta.Label)
				batch.Delete(oldKey)
				oldKey = NewLabelSizeTKey(delta.Label, delta.OldSize)
				batch.Delete(oldKey)
				newKey := NewSizeLabelTKey(delta.NewSize, delta.Label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(delta.Label, delta.NewSize)
				batch.Put(newKey, dvid.EmptyValue())
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			default:
				dvid.Criticalf("Cannot sync labelsz using unexpected delta: %v", msg)
			}
		}
	}
}
예제 #20
0
파일: labelvol.go 프로젝트: jwohlwend/dvid
// PutSparseVol stores an encoded sparse volume that stays within a given forward label.
// Note that this encoded sparse volume is added to any existing sparse volume for
// a body rather than replace it.  To carve out a part of an existing sparse volume,
// use the SplitLabels().
//
// This function handles modification/deletion of all denormalized data touched by this
// sparse label volume.
//
// EVENTS
//
//
func (d *Data) PutSparseVol(v dvid.VersionID, label uint64, r io.Reader) error {
	store, err := storage.SmallDataStore()
	if err != nil {
		return fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
	}

	// Mark the label as dirty until done.
	iv := dvid.InstanceVersion{d.DataName(), v}
	dirtyLabels.Incr(iv, label)
	defer dirtyLabels.Decr(iv, label)

	// TODO -- Signal that we are starting a label modification.

	// Read the sparse volume from reader.
	header := make([]byte, 8)
	if _, err = io.ReadFull(r, header); err != nil {
		return err
	}
	if header[0] != dvid.EncodingBinary {
		return fmt.Errorf("sparse vol for split has unknown encoding format: %v", header[0])
	}
	var numSpans uint32
	if err = binary.Read(r, binary.LittleEndian, &numSpans); err != nil {
		return err
	}
	var mods dvid.RLEs
	if err = mods.UnmarshalBinaryReader(r, numSpans); err != nil {
		return err
	}

	// Partition the mods spans into blocks.
	var modmap dvid.BlockRLEs
	modmap, err = mods.Partition(d.BlockSize)
	if err != nil {
		return err
	}

	// Publish sparsevol mod event
	evt := datastore.SyncEvent{d.DataName(), labels.SparsevolModEvent}
	msg := datastore.SyncMessage{v, labels.DeltaSparsevol{label, modmap}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	// Get a sorted list of blocks that cover mods.
	modblks := modmap.SortedKeys()

	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)
	var voxelsAdded int64

	for _, modblk := range modblks {

		// Get original block
		tk := NewTKey(label, modblk)
		val, err := store.Get(ctx, tk)
		if err != nil {
			return err
		}

		// If there's no original block, just write the mod block.
		if val == nil || len(val) == 0 {
			numVoxels, _ := modmap[modblk].Stats()
			voxelsAdded += int64(numVoxels)
			rleBytes, err := modmap[modblk].MarshalBinary()
			if err != nil {
				return fmt.Errorf("can't serialize modified RLEs for %d: %v\n", label, err)
			}
			batch.Put(tk, rleBytes)
			continue
		}

		// if there's an original, integrate and write merged RLE.
		var rles dvid.RLEs
		if err := rles.UnmarshalBinary(val); err != nil {
			return fmt.Errorf("Unable to unmarshal RLE for original labels in block %s", modblk.Print())
		}
		voxelsAdded += rles.Add(modmap[modblk])
		rleBytes, err := rles.MarshalBinary()
		if err != nil {
			return fmt.Errorf("can't serialize modified RLEs of %d: %v\n", label, err)
		}
		batch.Put(tk, rleBytes)
	}

	if err := batch.Commit(); err != nil {
		return fmt.Errorf("Batch commit during mod of %s label %d: %v\n", d.DataName(), label, err)
	}

	// Publish change in label sizes.
	delta := labels.DeltaModSize{
		Label:      label,
		SizeChange: voxelsAdded,
	}
	evt = datastore.SyncEvent{d.DataName(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{v, delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	// TODO -- Publish label mod end

	return nil
}