Пример #1
0
// Returns RLEs for a given label where the key of the returned map is the block index
// in string format.
func (d *Data) GetLabelRLEs(v dvid.VersionID, label uint64) (dvid.BlockRLEs, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begIndex := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endIndex := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	labelRLEs := dvid.BlockRLEs{}
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		// Get the block index where the fromLabel is present
		_, blockStr, err := DecodeTKey(chunk.K)
		if err != nil {
			return fmt.Errorf("Can't recover block index with chunk key %v: %v\n", chunk.K, err)
		}

		var blockRLEs dvid.RLEs
		if err := blockRLEs.UnmarshalBinary(chunk.V); err != nil {
			return fmt.Errorf("Unable to unmarshal RLE for label in block %v", chunk.K)
		}
		labelRLEs[blockStr] = blockRLEs
		return nil
	}
	ctx := datastore.NewVersionedCtx(d, v)
	err = store.ProcessRange(ctx, begIndex, endIndex, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}
	dvid.Infof("Found %d blocks with label %d\n", len(labelRLEs), label)
	return labelRLEs, nil
}
Пример #2
0
// NewLabel returns a new label for the given version.
func (d *Data) NewLabel(v dvid.VersionID) (uint64, error) {
	d.ml_mu.Lock()
	defer d.ml_mu.Unlock()

	// Make sure we aren't trying to increment a label on a locked node.
	locked, err := datastore.LockedVersion(v)
	if err != nil {
		return 0, err
	}
	if locked {
		return 0, fmt.Errorf("can't ask for new label in a locked version id %d", v)
	}

	// Increment and store.
	d.MaxRepoLabel++
	d.MaxLabel[v] = d.MaxRepoLabel

	store, err := storage.MutableStore()
	if err != nil {
		return 0, fmt.Errorf("can't initializing small data store: %v\n", err)
	}
	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, d.MaxRepoLabel)
	ctx := datastore.NewVersionedCtx(d, v)
	store.Put(ctx, maxLabelTKey, buf)

	ctx2 := storage.NewDataContext(d, 0)
	store.Put(ctx2, maxRepoLabelTKey, buf)

	return d.MaxRepoLabel, nil
}
Пример #3
0
// Deletes an ROI.
func (d *Data) Delete(ctx storage.VersionedCtx) error {
	db, err := storage.MutableStore()
	if err != nil {
		return err
	}

	// We only want one PUT on given version for given data to prevent interleaved PUTs.
	putMutex := ctx.Mutex()
	putMutex.Lock()
	defer func() {
		putMutex.Unlock()
	}()

	d.MinZ = math.MaxInt32
	d.MaxZ = math.MinInt32
	if err := datastore.SaveDataByVersion(ctx.VersionID(), d); err != nil {
		return fmt.Errorf("Error in trying to save repo on roi extent change: %v\n", err)
	}

	// Delete all spans for this ROI for just this version.
	if err := db.DeleteAll(ctx, false); err != nil {
		return err
	}
	return nil
}
Пример #4
0
// write label volume in sorted order if available.
func (d *Data) writeLabelVol(v dvid.VersionID, label uint64, brles dvid.BlockRLEs, sortblks []dvid.IZYXString) error {
	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
	}

	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)
	if sortblks != nil {
		for _, izyxStr := range sortblks {
			serialization, err := brles[izyxStr].MarshalBinary()
			if err != nil {
				return fmt.Errorf("Error serializing RLEs for label %d: %v\n", label, err)
			}
			batch.Put(NewTKey(label, izyxStr), serialization)
		}
	} else {
		for izyxStr, rles := range brles {
			serialization, err := rles.MarshalBinary()
			if err != nil {
				return fmt.Errorf("Error serializing RLEs for label %d: %v\n", label, err)
			}
			batch.Put(NewTKey(label, izyxStr), serialization)
		}
	}
	if err := batch.Commit(); err != nil {
		return fmt.Errorf("Error on updating RLEs for label %d: %v\n", label, err)
	}
	return nil
}
Пример #5
0
// GetSize returns the size in voxels of the given label.
func (d *Data) GetSize(v dvid.VersionID, label uint64) (uint64, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return 0, fmt.Errorf("Data type imagesz had error initializing store: %v\n", err)
	}

	// Get the start/end keys for the label.
	firstKey := NewLabelSizeTKey(label, 0)
	lastKey := NewLabelSizeTKey(label, math.MaxUint64)

	// Grab all keys for this range in one sequential read.
	ctx := datastore.NewVersionedCtx(d, v)
	keys, err := store.KeysInRange(ctx, firstKey, lastKey)
	if err != nil {
		return 0, err
	}

	if len(keys) == 0 {
		return 0, fmt.Errorf("found no size for label %d", label)
	}
	if len(keys) > 1 {
		return 0, fmt.Errorf("found %d sizes for label %d!", len(keys), label)
	}
	_, size, err := DecodeLabelSizeTKey(keys[0])
	return size, err
}
Пример #6
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		select {
		case <-done:
			return
		default:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
		}
	}
}
Пример #7
0
func (d *Data) GetKeysInRange(ctx storage.Context, keyBeg, keyEnd string) ([]string, error) {
	db, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}

	// Compute first and last key for range
	first, err := NewTKey(keyBeg)
	if err != nil {
		return nil, err
	}
	last, err := NewTKey(keyEnd)
	if err != nil {
		return nil, err
	}
	keys, err := db.KeysInRange(ctx, first, last)
	if err != nil {
		return nil, err
	}
	keyList := []string{}
	for _, key := range keys {
		keyStr, err := DecodeTKey(key)
		if err != nil {
			return nil, err
		}
		keyList = append(keyList, keyStr)
	}
	return keyList, nil
}
Пример #8
0
// Returns all (z, y, x0, x1) Spans in sorted order: z, then y, then x0.
func getSpans(ctx storage.VersionedCtx, minIndex, maxIndex indexRLE) ([]dvid.Span, error) {
	db, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}
	spans := []dvid.Span{}

	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		ibytes, err := chunk.K.ClassBytes(keyROI)
		if err != nil {
			return err
		}
		index := new(indexRLE)
		if err = index.IndexFromBytes(ibytes); err != nil {
			return fmt.Errorf("Unable to get indexRLE out of []byte encoding: %v\n", err)
		}
		z := index.start.Value(2)
		y := index.start.Value(1)
		x0 := index.start.Value(0)
		x1 := x0 + int32(index.span) - 1
		spans = append(spans, dvid.Span{z, y, x0, x1})
		return nil
	}
	mintk := storage.NewTKey(keyROI, minIndex.Bytes())
	maxtk := storage.NewTKey(keyROI, maxIndex.Bytes())
	err = db.ProcessRange(ctx, mintk, maxtk, &storage.ChunkOp{}, f)
	return spans, err
}
Пример #9
0
// GetVoxels copies voxels from the storage engine to Voxels, a requested subvolume or 2d image.
func (d *Data) GetVoxels(v dvid.VersionID, vox *Voxels, r *ROI) error {
	timedLog := dvid.NewTimeLog()
	defer timedLog.Infof("GetVoxels %s", vox)

	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	// Only do one request at a time, although each request can start many goroutines.
	server.SpawnGoroutineMutex.Lock()
	defer server.SpawnGoroutineMutex.Unlock()

	ctx := datastore.NewVersionedCtx(d, v)

	wg := new(sync.WaitGroup)
	for it, err := vox.IndexIterator(d.BlockSize()); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get set of blocks in ROI if ROI provided
		var chunkOp *storage.ChunkOp
		if r != nil && r.Iter != nil {
			ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
			ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
			begX := ptBeg.Value(0)
			endX := ptEnd.Value(0)

			blocksInROI := make(map[string]bool, (endX - begX + 1))
			c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
			for x := begX; x <= endX; x++ {
				c[0] = x
				curIndex := dvid.IndexZYX(c)
				if r.Iter.InsideFast(curIndex) {
					indexString := string(curIndex.Bytes())
					blocksInROI[indexString] = true
				}
			}
			chunkOp = &storage.ChunkOp{&getOperation{vox, blocksInROI, r.attenuation}, wg}
		} else {
			chunkOp = &storage.ChunkOp{&getOperation{vox, nil, 0}, wg}
		}

		// Send the entire range of key-value pairs to chunk processor
		err = store.ProcessRange(ctx, begTKey, endTKey, chunkOp, storage.ChunkFunc(d.ReadChunk))
		if err != nil {
			return fmt.Errorf("Unable to GET data %s: %v", ctx, err)
		}
	}
	if err != nil {
		return err
	}
	wg.Wait()
	return nil
}
Пример #10
0
// Loads blocks with old data if they exist.
func (d *Data) loadOldBlocks(v dvid.VersionID, vox *Voxels, blocks storage.TKeyValues) error {
	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	ctx := datastore.NewVersionedCtx(d, v)

	// Create a map of old blocks indexed by the index
	oldBlocks := map[dvid.IZYXString]([]byte){}

	// Iterate through index space for this data using ZYX ordering.
	blockSize := d.BlockSize()
	blockNum := 0
	for it, err := vox.IndexIterator(blockSize); err == nil && it.Valid(); it.NextSpan() {
		indexBeg, indexEnd, err := it.IndexSpan()
		if err != nil {
			return err
		}
		begTKey := NewTKey(indexBeg)
		endTKey := NewTKey(indexEnd)

		// Get previous data.
		keyvalues, err := store.GetRange(ctx, begTKey, endTKey)
		if err != nil {
			return err
		}
		for _, kv := range keyvalues {
			indexZYX, err := DecodeTKey(kv.K)
			if err != nil {
				return err
			}
			block, _, err := dvid.DeserializeData(kv.V, true)
			if err != nil {
				return fmt.Errorf("Unable to deserialize block, %s: %v", ctx, err)
			}
			oldBlocks[indexZYX.ToIZYXString()] = block
		}

		// Load previous data into blocks
		ptBeg := indexBeg.Duplicate().(dvid.ChunkIndexer)
		ptEnd := indexEnd.Duplicate().(dvid.ChunkIndexer)
		begX := ptBeg.Value(0)
		endX := ptEnd.Value(0)
		c := dvid.ChunkPoint3d{begX, ptBeg.Value(1), ptBeg.Value(2)}
		for x := begX; x <= endX; x++ {
			c[0] = x
			curIndex := dvid.IndexZYX(c)
			curTKey := NewTKey(&curIndex)
			blocks[blockNum].K = curTKey
			block, ok := oldBlocks[curIndex.ToIZYXString()]
			if ok {
				copy(blocks[blockNum].V, block)
			}
			blockNum++
		}
	}
	return nil
}
Пример #11
0
// GetSparseCoarseVol returns an encoded sparse volume given a label.  The encoding has the
// following format where integers are little endian:
// 		byte     Set to 0
// 		uint8    Number of dimensions
// 		uint8    Dimension of run (typically 0 = X)
// 		byte     Reserved (to be used later)
// 		uint32    # Blocks [TODO.  0 for now]
// 		uint32    # Spans
// 		Repeating unit of:
//     		int32   Block coordinate of run start (dimension 0)
//     		int32   Block coordinate of run start (dimension 1)
//     		int32   Block coordinate of run start (dimension 2)
//     		int32   Length of run
//
func GetSparseCoarseVol(ctx storage.Context, label uint64) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Create the sparse volume header
	buf := new(bytes.Buffer)
	buf.WriteByte(dvid.EncodingBinary)
	binary.Write(buf, binary.LittleEndian, uint8(3))  // # of dimensions
	binary.Write(buf, binary.LittleEndian, byte(0))   // dimension of run (X = 0)
	buf.WriteByte(byte(0))                            // reserved for later
	binary.Write(buf, binary.LittleEndian, uint32(0)) // Placeholder for # blocks
	encoding := buf.Bytes()

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begTKey := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endTKey := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	var numBlocks uint32
	var span *dvid.Span
	var spans dvid.Spans
	keys, err := store.KeysInRange(ctx, begTKey, endTKey)
	if err != nil {
		return nil, fmt.Errorf("Cannot get keys for coarse sparse volume: %v", err)
	}
	for _, tk := range keys {
		numBlocks++
		_, blockStr, err := DecodeTKey(tk)
		if err != nil {
			return nil, fmt.Errorf("Error retrieving RLE runs for label %d: %v", label, err)
		}
		indexZYX, err := blockStr.IndexZYX()
		if err != nil {
			return nil, fmt.Errorf("Error decoding block coordinate (%v) for sparse volume: %v\n", blockStr, err)
		}
		x, y, z := indexZYX.Unpack()
		if span == nil {
			span = &dvid.Span{z, y, x, x}
		} else if !span.Extends(x, y, z) {
			spans = append(spans, *span)
			span = &dvid.Span{z, y, x, x}
		}
	}
	if err != nil {
		return nil, err
	}
	if span != nil {
		spans = append(spans, *span)
	}
	spansBytes, err := spans.MarshalBinary()
	if err != nil {
		return nil, err
	}
	encoding = append(encoding, spansBytes...)
	dvid.Debugf("[%s] coarse subvol for label %d: found %d blocks\n", ctx, label, numBlocks)
	return encoding, nil
}
Пример #12
0
// GetBlocks returns a slice of bytes corresponding to all the blocks along a span in X
func (d *Data) GetBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	indexBeg := dvid.IndexZYX(start)
	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	begTKey := NewTKey(&indexBeg)
	endTKey := NewTKey(&indexEnd)

	ctx := datastore.NewVersionedCtx(d, v)

	iv := dvid.InstanceVersion{d.DataName(), v}
	mapping := labels.MergeCache.LabelMap(iv)

	keyvalues, err := store.GetRange(ctx, begTKey, endTKey)
	if err != nil {
		return nil, err
	}

	var buf bytes.Buffer

	// Save the # of keyvalues actually obtained.
	numkv := len(keyvalues)
	binary.Write(&buf, binary.LittleEndian, int32(numkv))

	// Write the block indices in XYZ little-endian format + the size of each block
	uncompress := true
	for _, kv := range keyvalues {
		block, _, err := dvid.DeserializeData(kv.V, uncompress)
		if err != nil {
			return nil, fmt.Errorf("Unable to deserialize block, %s (%v): %v", ctx, kv.K, err)
		}
		if mapping != nil {
			n := len(block) / 8
			for i := 0; i < n; i++ {
				orig := binary.LittleEndian.Uint64(block[i*8 : i*8+8])
				mapped, found := mapping.FinalLabel(orig)
				if !found {
					mapped = orig
				}
				binary.LittleEndian.PutUint64(block[i*8:i*8+8], mapped)
			}
		}

		_, err = buf.Write(block)
		if err != nil {
			return nil, err
		}
	}

	return buf.Bytes(), nil
}
Пример #13
0
// Goroutine that handles relabeling of blocks during a merge operation.
// Since the same block coordinate always gets mapped to the same goroutine, we handle
// concurrency by serializing GET/PUT for a particular block coordinate.
func (d *Data) mergeBlock(in <-chan mergeOp) {
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)

	for op := range in {
		tk := NewTKeyByCoord(op.izyx)
		data, err := store.Get(op.ctx, tk)
		if err != nil {
			dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.izyx)
			op.wg.Done()
			continue
		}
		if data == nil {
			dvid.Errorf("nil label block where merge was done!\n")
			op.wg.Done()
			continue
		}

		blockData, _, err := dvid.DeserializeData(data, true)
		if err != nil {
			dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
			op.wg.Done()
			continue
		}
		if len(blockData) != blockBytes {
			dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
			op.wg.Done()
			continue
		}

		// Iterate through this block of labels and relabel if label in merge.
		for i := 0; i < blockBytes; i += 8 {
			label := binary.LittleEndian.Uint64(blockData[i : i+8])
			if _, merged := op.Merged[label]; merged {
				binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
			}
		}

		// Store this block.
		serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
		if err != nil {
			dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
			op.wg.Done()
			continue
		}
		if err := store.Put(op.ctx, tk, serialization); err != nil {
			dvid.Errorf("Error in putting key %v: %v\n", tk, err)
		}
		op.wg.Done()
	}
}
Пример #14
0
// DeleteData deletes a key-value pair
func (d *Data) DeleteData(ctx storage.Context, keyStr string) error {
	db, err := storage.MutableStore()
	if err != nil {
		return err
	}
	tk, err := NewTKey(keyStr)
	if err != nil {
		return err
	}
	return db.Delete(ctx, tk)
}
Пример #15
0
func (d *Data) loadMaxLabels(wg *sync.WaitGroup, ch chan *storage.KeyValue) {
	ctx := storage.NewDataContext(d, 0)
	var repoMax uint64
	d.MaxLabel = make(map[dvid.VersionID]uint64)
	for {
		kv := <-ch
		if kv == nil {
			break
		}
		v, err := ctx.VersionFromKey(kv.K)
		if err != nil {
			dvid.Errorf("Can't decode key when loading mutable data for %s", d.DataName())
			continue
		}
		if len(kv.V) != 8 {
			dvid.Errorf("Got bad value.  Expected 64-bit label, got %v", kv.V)
			continue
		}
		label := binary.LittleEndian.Uint64(kv.V)
		d.MaxLabel[v] = label
		if label > repoMax {
			repoMax = label
		}
	}

	// Load in the repo-wide max label.
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	data, err := store.Get(ctx, maxRepoLabelTKey)
	if err != nil {
		dvid.Errorf("Error getting repo-wide max label: %v\n", err)
		return
	}
	if data == nil || len(data) != 8 {
		dvid.Errorf("Could not load repo-wide max label for instance %q.  Only got %d bytes, not 64-bit label.\n", d.DataName(), len(data))
		dvid.Errorf("Using max label across versions: %d\n", repoMax)
		d.MaxRepoLabel = repoMax
	} else {
		d.MaxRepoLabel = binary.LittleEndian.Uint64(data)
		if d.MaxRepoLabel < repoMax {
			dvid.Errorf("Saved repo-wide max for instance %q was %d, changed to largest version max %d\n", d.DataName(), d.MaxRepoLabel, repoMax)
			d.MaxRepoLabel = repoMax
		}
	}
	wg.Done()
}
Пример #16
0
// PutData puts a key-value at a given uuid
func (d *Data) PutData(ctx storage.Context, keyStr string, value []byte) error {
	db, err := storage.MutableStore()
	if err != nil {
		return err
	}
	serialization, err := dvid.SerializeData(value, d.Compression(), d.Checksum())
	if err != nil {
		return fmt.Errorf("Unable to serialize data: %v\n", err)
	}
	tk, err := NewTKey(keyStr)
	if err != nil {
		return err
	}
	return db.Put(ctx, tk, serialization)
}
Пример #17
0
func (d *Data) migrateMaxLabels(root dvid.VersionID, wg *sync.WaitGroup, ch chan *storage.KeyValue) {
	ctx := storage.NewDataContext(d, 0)
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Can't initializing small data store: %v\n", err)
	}

	var maxRepoLabel uint64
	d.MaxLabel = make(map[dvid.VersionID]uint64)
	for {
		kv := <-ch
		if kv == nil {
			break
		}
		v, err := ctx.VersionFromKey(kv.K)
		if err != nil {
			dvid.Errorf("Can't decode key when loading mutable data for %s", d.DataName())
			continue
		}
		if len(kv.V) != 8 {
			dvid.Errorf("Got bad value.  Expected 64-bit label, got %v", kv.V)
			continue
		}
		label := binary.LittleEndian.Uint64(kv.V)
		d.MaxLabel[v] = label
		if label > maxRepoLabel {
			maxRepoLabel = label
		}
	}

	// Adjust the MaxLabel data to make sure we correct for any case of child max < parent max.
	d.adjustMaxLabels(store, root)

	// Set the repo-wide max label.
	d.MaxRepoLabel = maxRepoLabel

	buf := make([]byte, 8)
	binary.LittleEndian.PutUint64(buf, maxRepoLabel)
	store.Put(ctx, maxRepoLabelTKey, buf)

	wg.Done()
	return
}
Пример #18
0
// LoadMutable loads mutable properties of label volumes like the maximum labels
// for each version.  Note that we load these max labels from key-value pairs
// rather than data instance properties persistence, because in the case of a crash,
// the actually stored repo data structure may be out-of-date compared to the guaranteed
// up-to-date key-value pairs for max labels.
func (d *Data) LoadMutable(root dvid.VersionID, storedVersion, expectedVersion uint64) (bool, error) {
	ctx := storage.NewDataContext(d, 0)
	store, err := storage.MutableStore()
	if err != nil {
		return false, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	wg := new(sync.WaitGroup)
	wg.Add(1)
	ch := make(chan *storage.KeyValue)

	// Start appropriate migration function if any.
	var saveRequired bool

	switch storedVersion {
	case 0:
		// Need to update all max labels and set repo-level max label.
		saveRequired = true
		dvid.Infof("Migrating old version of labelvol %q to new version\n", d.DataName())
		go d.migrateMaxLabels(root, wg, ch)
	default:
		// Load in each version max label without migration.
		go d.loadMaxLabels(wg, ch)
	}

	// Send the max label data per version
	minKey, err := ctx.MinVersionKey(maxLabelTKey)
	if err != nil {
		return false, err
	}
	maxKey, err := ctx.MaxVersionKey(maxLabelTKey)
	if err != nil {
		return false, err
	}
	keysOnly := false
	if err = store.RawRangeQuery(minKey, maxKey, keysOnly, ch); err != nil {
		return false, err
	}
	wg.Wait()

	dvid.Infof("Loaded max label values for labelvol %q with repo-wide max %d\n", d.DataName(), d.MaxRepoLabel)
	return saveRequired, nil
}
Пример #19
0
// GetSurface returns a gzipped byte array with # voxels and float32 arrays for vertices and
// normals.
func GetSurface(ctx storage.Context, label uint64) ([]byte, bool, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, false, fmt.Errorf("Cannot get datastore that handles big data: %v\n", err)
	}

	// Retrieve the precomputed surface or that it's not available.
	data, err := store.Get(ctx, NewLabelSurfaceIndex(label))
	if err != nil {
		return nil, false, fmt.Errorf("Error in retrieving surface for label %d: %v", label, err)
	}
	if data == nil {
		return []byte{}, false, nil
	}
	uncompress := false
	surfaceBytes, _, err := dvid.DeserializeData(data, uncompress)
	if err != nil {
		return nil, false, fmt.Errorf("Unable to deserialize surface for label %d: %v\n", label, err)
	}
	return surfaceBytes, true, nil
}
Пример #20
0
// GetData gets a value using a key
func (d *Data) GetData(ctx storage.Context, keyStr string) ([]byte, bool, error) {
	db, err := storage.MutableStore()
	if err != nil {
		return nil, false, err
	}
	tk, err := NewTKey(keyStr)
	if err != nil {
		return nil, false, err
	}
	data, err := db.Get(ctx, tk)
	if err != nil {
		return nil, false, fmt.Errorf("Error in retrieving key '%s': %v", keyStr, err)
	}
	if data == nil {
		return nil, false, nil
	}
	uncompress := true
	value, _, err := dvid.DeserializeData(data, uncompress)
	if err != nil {
		return nil, false, fmt.Errorf("Unable to deserialize data for key '%s': %v\n", keyStr, err)
	}
	return value, true, nil
}
Пример #21
0
// GetSizeRange returns a JSON list of mapped labels that have volumes within the given range.
// If maxSize is 0, all mapped labels are returned >= minSize.
func (d *Data) GetSizeRange(v dvid.VersionID, minSize, maxSize uint64) (string, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return "{}", fmt.Errorf("Data type imagesz had error initializing store: %v\n", err)
	}

	// Get the start/end keys for the size range.
	firstKey := NewSizeLabelTKey(minSize, 0)
	var upperBound uint64
	if maxSize != 0 {
		upperBound = maxSize
	} else {
		upperBound = math.MaxUint64
	}
	lastKey := NewSizeLabelTKey(upperBound, math.MaxUint64)

	// Grab all keys for this range in one sequential read.
	ctx := datastore.NewVersionedCtx(d, v)
	keys, err := store.KeysInRange(ctx, firstKey, lastKey)
	if err != nil {
		return "{}", err
	}

	// Convert them to a JSON compatible structure.
	labels := make([]uint64, len(keys))
	for i, key := range keys {
		labels[i], _, err = DecodeSizeLabelTKey(key)
		if err != nil {
			return "{}", err
		}
	}
	m, err := json.Marshal(labels)
	if err != nil {
		return "{}", nil
	}
	return string(m), nil
}
Пример #22
0
// Given a stored label, make sure our max label tracking is updated.
func (d *Data) casMaxLabel(batch storage.Batch, v dvid.VersionID, label uint64) {
	d.ml_mu.Lock()
	defer d.ml_mu.Unlock()

	save := false
	maxLabel, found := d.MaxLabel[v]
	if !found {
		dvid.Errorf("Bad max label of version %d -- none found!\n", v)
		maxLabel = 0
	}
	if maxLabel < label {
		maxLabel = label
		save = true
	}
	if save {
		buf := make([]byte, 8)
		binary.LittleEndian.PutUint64(buf, maxLabel)
		batch.Put(maxLabelTKey, buf)
		d.MaxLabel[v] = maxLabel

		if d.MaxRepoLabel < maxLabel {
			d.MaxRepoLabel = maxLabel
			ctx := storage.NewDataContext(d, 0)
			store, err := storage.MutableStore()
			if err != nil {
				dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
			} else {
				store.Put(ctx, maxRepoLabelTKey, buf)
			}
		}
	}
	if err := batch.Commit(); err != nil {
		dvid.Errorf("batch put: %v\n", err)
		return
	}
}
Пример #23
0
// GetLabelBytesAtPoint returns the 8 byte slice corresponding to a 64-bit label at a point.
func (d *Data) GetLabelBytesAtPoint(v dvid.VersionID, pt dvid.Point) ([]byte, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}

	// Compute the block key that contains the given point.
	coord, ok := pt.(dvid.Chunkable)
	if !ok {
		return nil, fmt.Errorf("Can't determine block of point %s", pt)
	}
	blockSize := d.BlockSize()
	blockCoord := coord.Chunk(blockSize).(dvid.ChunkPoint3d)
	index := dvid.IndexZYX(blockCoord)

	// Retrieve the block of labels
	ctx := datastore.NewVersionedCtx(d, v)
	serialization, err := store.Get(ctx, NewTKey(&index))
	if err != nil {
		return nil, fmt.Errorf("Error getting '%s' block for index %s\n", d.DataName(), blockCoord)
	}
	if serialization == nil {
		return zeroLabelBytes, nil
	}
	labelData, _, err := dvid.DeserializeData(serialization, true)
	if err != nil {
		return nil, fmt.Errorf("Unable to deserialize block %s in '%s': %v\n", blockCoord, d.DataName(), err)
	}

	// Retrieve the particular label within the block.
	ptInBlock := coord.PointInChunk(blockSize)
	nx := int64(blockSize.Value(0))
	nxy := nx * int64(blockSize.Value(1))
	i := (int64(ptInBlock.Value(0)) + int64(ptInBlock.Value(1))*nx + int64(ptInBlock.Value(2))*nxy) * 8
	return labelData[i : i+8], nil
}
Пример #24
0
func (d *Data) DeleteBlocks(ctx *datastore.VersionedCtx, start dvid.ChunkPoint3d, span int) error {
	store, err := storage.MutableStore()
	if err != nil {
		return fmt.Errorf("Data type labelblk had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type labelblk requires batch-enabled store, which %q is not\n", store)
	}

	indexBeg := dvid.IndexZYX(start)
	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	begTKey := NewTKey(&indexBeg)
	endTKey := NewTKey(&indexEnd)

	iv := dvid.InstanceVersion{d.DataName(), ctx.VersionID()}
	mapping := labels.MergeCache.LabelMap(iv)

	kvs, err := store.GetRange(ctx, begTKey, endTKey)
	if err != nil {
		return err
	}

	batch := batcher.NewBatch(ctx)
	uncompress := true
	for _, kv := range kvs {
		izyx, err := DecodeTKey(kv.K)
		if err != nil {
			return err
		}

		// Delete the labelblk (really tombstones it)
		batch.Delete(kv.K)

		// Send data to delete associated labelvol for labels in this block
		block, _, err := dvid.DeserializeData(kv.V, uncompress)
		if err != nil {
			return fmt.Errorf("Unable to deserialize block, %s (%v): %v", ctx, kv.K, err)
		}
		if mapping != nil {
			n := len(block) / 8
			for i := 0; i < n; i++ {
				orig := binary.LittleEndian.Uint64(block[i*8 : i*8+8])
				mapped, found := mapping.FinalLabel(orig)
				if !found {
					mapped = orig
				}
				binary.LittleEndian.PutUint64(block[i*8:i*8+8], mapped)
			}
		}

		// Notify any subscribers that we've deleted this block.
		evt := datastore.SyncEvent{d.DataName(), labels.DeleteBlockEvent}
		msg := datastore.SyncMessage{ctx.VersionID(), labels.DeleteBlock{izyx, block}}
		if err := datastore.NotifySubscribers(evt, msg); err != nil {
			return err
		}

	}
	return batch.Commit()
}
Пример #25
0
// Goroutine that handles splits across a lot of blocks for one label.
func (d *Data) splitBlock(in <-chan splitOp) {
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		err = fmt.Errorf("Data type labelblk requires batch-enabled store, which %q is not\n", store)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)

	for op := range in {
		// Iterate through all the modified blocks, inserting the new label using the RLEs for that block.
		timedLog := dvid.NewTimeLog()
		splitCache.Incr(op.ctx.InstanceVersion(), op.OldLabel)
		batch := batcher.NewBatch(&op.ctx)
		for _, zyxStr := range op.SortedBlocks {
			// Read the block.
			tk := NewTKeyByCoord(zyxStr)
			data, err := store.Get(&op.ctx, tk)
			if err != nil {
				dvid.Errorf("Error on GET of labelblk with coord string %v\n", []byte(zyxStr))
				continue
			}
			if data == nil {
				dvid.Errorf("nil label block where split was done, coord %v\n", []byte(zyxStr))
				continue
			}
			bdata, _, err := dvid.DeserializeData(data, true)
			if err != nil {
				dvid.Criticalf("unable to deserialize label block in '%s' key %v: %v\n", d.DataName(), []byte(zyxStr), err)
				continue
			}
			if len(bdata) != blockBytes {
				dvid.Criticalf("splitBlock: coord %v got back %d bytes, expected %d bytes\n", []byte(zyxStr), len(bdata), blockBytes)
				continue
			}

			// Modify the block using either voxel-level changes or coarser block-level mods.
			if op.Split != nil {
				rles, found := op.Split[zyxStr]
				if !found {
					dvid.Errorf("split block %s not present in block RLEs\n", zyxStr.Print())
					continue
				}
				if err := d.storeRLEs(bdata, op.NewLabel, zyxStr, rles); err != nil {
					dvid.Errorf("can't store label %d RLEs into block %s: %v\n", op.NewLabel, zyxStr.Print(), err)
					continue
				}
			} else {
				// We are doing coarse split and will replace all
				if err := d.replaceLabel(bdata, op.OldLabel, op.NewLabel); err != nil {
					dvid.Errorf("can't replace label %d with %d in block %s: %v\n", op.OldLabel, op.NewLabel, zyxStr.Print(), err)
					continue
				}
			}

			// Write the modified block.
			serialization, err := dvid.SerializeData(bdata, d.Compression(), d.Checksum())
			if err != nil {
				dvid.Criticalf("Unable to serialize block %s in %q: %v\n", zyxStr.Print(), d.DataName(), err)
				continue
			}
			batch.Put(tk, serialization)
		}
		if err := batch.Commit(); err != nil {
			dvid.Errorf("Batch PUT during %q block split of %d: %v\n", d.DataName(), op.OldLabel, err)
		}
		splitCache.Decr(op.ctx.InstanceVersion(), op.OldLabel)
		timedLog.Debugf("labelblk sync complete for split of %d -> %d", op.OldLabel, op.NewLabel)
	}
}
Пример #26
0
func (d *Data) foregroundROI(v dvid.VersionID, dest *roi.Data, background dvid.PointNd) {
	dest.Ready = false

	store, err := storage.MutableStore()
	if err != nil {
		dvid.Criticalf("Data type imageblk had error initializing store: %v\n", err)
		return
	}

	timedLog := dvid.NewTimeLog()
	timedLog.Infof("Starting foreground ROI %q for %s", dest.DataName(), d.DataName())

	// Iterate through all voxel blocks, loading and then checking blocks
	// for any foreground voxels.
	ctx := datastore.NewVersionedCtx(d, v)

	backgroundBytes := make([]byte, len(background))
	for i, b := range background {
		backgroundBytes[i] = byte(b)
	}

	const BATCH_SIZE = 1000
	var numBatches int
	var span *dvid.Span
	spans := []dvid.Span{}

	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		if chunk == nil || chunk.V == nil {
			return nil
		}
		data, _, err := dvid.DeserializeData(chunk.V, true)
		if err != nil {
			return fmt.Errorf("Error decoding block: %v\n", err)
		}
		numVoxels := d.BlockSize().Prod()
		var foreground bool
		for i := int64(0); i < numVoxels; i++ {
			isBackground := false
			for _, b := range backgroundBytes {
				if data[i] == b {
					isBackground = true
					break
				}
			}
			if !isBackground {
				foreground = true
				break
			}
		}
		if foreground {
			indexZYX, err := DecodeTKey(chunk.K)
			if err != nil {
				return fmt.Errorf("Error decoding voxel block key: %v\n", err)
			}
			x, y, z := indexZYX.Unpack()
			if span == nil {
				span = &dvid.Span{z, y, x, x}
			} else if !span.Extends(x, y, z) {
				spans = append(spans, *span)
				if len(spans) >= BATCH_SIZE {
					init := (numBatches == 0)
					numBatches++
					go func(spans []dvid.Span) {
						if err := dest.PutSpans(v, spans, init); err != nil {
							dvid.Errorf("Error in storing ROI: %v\n", err)
						} else {
							timedLog.Debugf("-- Wrote batch %d of spans for foreground ROI %q", numBatches, dest.DataName())
						}
					}(spans)
					spans = []dvid.Span{}
				}
				span = &dvid.Span{z, y, x, x}
			}
		}
		server.BlockOnInteractiveRequests("voxels [compute foreground ROI]")
		return nil
	}

	minTKey := storage.MinTKey(keyImageBlock)
	maxTKey := storage.MaxTKey(keyImageBlock)

	err = store.ProcessRange(ctx, minTKey, maxTKey, &storage.ChunkOp{}, f)
	if err != nil {
		dvid.Errorf("Error in processing chunks in ROI: %v\n", err)
		return
	}
	if span != nil {
		spans = append(spans, *span)
	}

	// Save new ROI
	if len(spans) > 0 {
		if err := dest.PutSpans(v, spans, numBatches == 0); err != nil {
			dvid.Errorf("Error in storing ROI: %v\n", err)
			return
		}
	}
	timedLog.Infof("Created foreground ROI %q for %s", dest.DataName(), d.DataName())
	dest.Ready = true
}
Пример #27
0
// GetBlocks returns a slice of bytes corresponding to all the blocks along a span in X
func (d *Data) GetBlocks(v dvid.VersionID, start dvid.ChunkPoint3d, span int32) ([]byte, error) {
	timedLog := dvid.NewTimeLog()
	defer timedLog.Infof("GetBlocks %s, span %d", start, span)

	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}

	indexBeg := dvid.IndexZYX(start)
	sx, sy, sz := indexBeg.Unpack()

	end := start
	end[0] += int32(span - 1)
	indexEnd := dvid.IndexZYX(end)
	keyBeg := NewTKey(&indexBeg)
	keyEnd := NewTKey(&indexEnd)

	// Allocate one uncompressed-sized slice with background values.
	blockBytes := int32(d.BlockSize().Prod()) * d.Values.BytesPerElement()
	numBytes := blockBytes * span

	buf := make([]byte, numBytes, numBytes)
	if d.Background != 0 {
		for i := range buf {
			buf[i] = byte(d.Background)
		}
	}

	// Write the blocks that we can get concurrently on this byte slice.
	ctx := datastore.NewVersionedCtx(d, v)

	var wg sync.WaitGroup
	err = store.ProcessRange(ctx, keyBeg, keyEnd, &storage.ChunkOp{}, func(c *storage.Chunk) error {
		if c == nil || c.TKeyValue == nil {
			return nil
		}
		kv := c.TKeyValue
		if kv.V == nil {
			return nil
		}

		// Determine which block this is.
		indexZYX, err := DecodeTKey(kv.K)
		if err != nil {
			return err
		}
		x, y, z := indexZYX.Unpack()
		if z != sz || y != sy || x < sx || x >= sx+int32(span) {
			return fmt.Errorf("Received key-value for %s, not supposed to be within span range %s, length %d", *indexZYX, start, span)
		}
		n := x - sx
		i := n * blockBytes
		j := i + blockBytes

		// Spawn goroutine to transfer data
		wg.Add(1)
		go xferBlock(buf[i:j], c, &wg)
		return nil
	})
	if err != nil {
		return nil, err
	}
	wg.Wait()
	return buf, nil
}
Пример #28
0
// PutSpans saves a slice of spans representing an ROI into the datastore.
// If the init parameter is true, all previous spans of this ROI are deleted before
// writing these spans.
func (d *Data) PutSpans(versionID dvid.VersionID, spans []dvid.Span, init bool) error {
	ctx := datastore.NewVersionedCtx(d, versionID)

	db, err := storage.MutableStore()
	if err != nil {
		return err
	}

	// Delete the old key/values
	if init {
		if err := d.Delete(ctx); err != nil {
			return err
		}
	}

	// Make sure our small data store can do batching.
	batcher, ok := db.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Unable to store ROI: small data store can't do batching!")
	}

	// We only want one PUT on given version for given data to prevent interleaved PUTs.
	putMutex := ctx.Mutex()
	putMutex.Lock()

	// Save new extents after finished.
	defer func() {
		err := datastore.SaveDataByVersion(ctx.VersionID(), d)
		if err != nil {
			dvid.Errorf("Error in trying to save repo on roi extent change: %v\n", err)
		}
		putMutex.Unlock()
	}()

	// Put the new key/values
	const BATCH_SIZE = 10000
	batch := batcher.NewBatch(ctx)
	for i, span := range spans {
		if span[0] < d.MinZ {
			d.MinZ = span[0]
		}
		if span[0] > d.MaxZ {
			d.MaxZ = span[0]
		}
		if span[3] < span[2] {
			return fmt.Errorf("Got weird span %v.  span[3] (X1) < span[2] (X0)", span)
		}
		index := indexRLE{
			start: dvid.IndexZYX{span[2], span[1], span[0]},
			span:  uint32(span[3] - span[2] + 1),
		}
		tk := storage.NewTKey(keyROI, index.Bytes())
		batch.Put(tk, dvid.EmptyValue())
		if (i+1)%BATCH_SIZE == 0 {
			if err := batch.Commit(); err != nil {
				return fmt.Errorf("Error on batch PUT at span %d: %v\n", i, err)
			}
			batch = batcher.NewBatch(ctx)
		}
	}
	if len(spans)%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			return fmt.Errorf("Error on last batch PUT: %v\n", err)
		}
	}
	return nil
}
Пример #29
0
func (d *Data) handleSizeEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		ctx := datastore.NewVersionedCtx(d, msg.Version)

		select {
		case <-done:
			return
		default:
			switch delta := msg.Delta.(type) {
			case labels.DeltaNewSize:
				batch := batcher.NewBatch(ctx)
				newKey := NewSizeLabelTKey(delta.Size, delta.Label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(delta.Label, delta.Size)
				batch.Put(newKey, dvid.EmptyValue())
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			case labels.DeltaDeleteSize:
				if delta.OldKnown {
					batch := batcher.NewBatch(ctx)
					oldKey := NewSizeLabelTKey(delta.OldSize, delta.Label)
					batch.Delete(oldKey)
					oldKey = NewLabelSizeTKey(delta.Label, delta.OldSize)
					batch.Delete(oldKey)
					if err := batch.Commit(); err != nil {
						dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
					}
				} else {
					// TODO -- Make transactional or force sequentially to label-specific goroutine
					begKey := NewLabelSizeTKey(delta.Label, 0)
					endKey := NewLabelSizeTKey(delta.Label, math.MaxUint64)
					keys, err := store.KeysInRange(ctx, begKey, endKey)
					if err != nil {
						dvid.Errorf("Unable to get size keys for label %d: %v\n", delta.Label, err)
						continue
					}
					if len(keys) != 1 {
						dvid.Errorf("Cannot modify size of label %d when no prior size recorded!", delta.Label)
						continue
					}

					// Modify label size
					label, oldSize, err := DecodeLabelSizeTKey(keys[0])
					if label != delta.Label {
						dvid.Errorf("Requested size of label %d and got key for label %d!\n", delta.Label, label)
						continue
					}
					batch := batcher.NewBatch(ctx)
					oldKey := NewSizeLabelTKey(oldSize, delta.Label)
					batch.Delete(oldKey)
					oldKey = NewLabelSizeTKey(delta.Label, oldSize)
					batch.Delete(oldKey)
					if err := batch.Commit(); err != nil {
						dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
					}
				}

			case labels.DeltaModSize:
				// TODO -- Make transactional or force sequentially to label-specific goroutine
				// Get old label size
				begKey := NewLabelSizeTKey(delta.Label, 0)
				endKey := NewLabelSizeTKey(delta.Label, math.MaxUint64)
				keys, err := store.KeysInRange(ctx, begKey, endKey)
				if err != nil {
					dvid.Errorf("Unable to get size keys for label %d: %v\n", delta.Label, err)
					continue
				}
				if len(keys) != 1 {
					dvid.Errorf("Cannot modify size of label %d when no prior size recorded!", delta.Label)
					continue
				}

				// Modify label size
				label, oldSize, err := DecodeLabelSizeTKey(keys[0])
				if label != delta.Label {
					dvid.Errorf("Requested size of label %d and got key for label %d!\n", delta.Label, label)
					continue
				}
				// Assumes 63 bits is sufficient for any label
				size := int64(oldSize)
				size += delta.SizeChange
				newSize := uint64(size)

				// Delete old label size keys
				batch := batcher.NewBatch(ctx)
				oldKey := NewSizeLabelTKey(oldSize, label)
				batch.Delete(oldKey)
				oldKey = NewLabelSizeTKey(label, oldSize)
				batch.Delete(oldKey)

				// Add new one
				newKey := NewSizeLabelTKey(newSize, label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(label, newSize)
				batch.Put(newKey, dvid.EmptyValue())

				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			case labels.DeltaReplaceSize:
				batch := batcher.NewBatch(ctx)
				oldKey := NewSizeLabelTKey(delta.OldSize, delta.Label)
				batch.Delete(oldKey)
				oldKey = NewLabelSizeTKey(delta.Label, delta.OldSize)
				batch.Delete(oldKey)
				newKey := NewSizeLabelTKey(delta.NewSize, delta.Label)
				batch.Put(newKey, dvid.EmptyValue())
				newKey = NewLabelSizeTKey(delta.Label, delta.NewSize)
				batch.Put(newKey, dvid.EmptyValue())
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on updating label sizes on %s: %v\n", ctx, err)
				}

			default:
				dvid.Criticalf("Cannot sync labelsz using unexpected delta: %v", msg)
			}
		}
	}
}
Пример #30
0
// Partition returns JSON of differently sized subvolumes that attempt to distribute
// the number of active blocks per subvolume.
func (d *Data) Partition(ctx storage.Context, batchsize int32) ([]byte, error) {
	// Partition Z as perfectly as we can.
	dz := d.MaxZ - d.MinZ + 1
	zleft := dz % batchsize

	// Adjust Z range
	layerBegZ := d.MinZ
	layerEndZ := layerBegZ + batchsize - 1

	// Iterate through blocks in ascending Z, calculating active extents and subvolume coverage.
	// Keep track of current layer = batchsize of blocks in Z.
	var subvolumes subvolumesT
	subvolumes.Subvolumes = []subvolumeT{}
	subvolumes.ROI.MinChunk[2] = d.MinZ
	subvolumes.ROI.MaxChunk[2] = d.MaxZ

	layer := d.newLayer(layerBegZ, layerEndZ)

	db, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}
	merge := true
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		ibytes, err := chunk.K.ClassBytes(keyROI)
		if err != nil {
			return err
		}
		index := new(indexRLE)
		if err = index.IndexFromBytes(ibytes); err != nil {
			return fmt.Errorf("Unable to get indexRLE out of []byte encoding: %v\n", err)
		}

		// If we are in new layer, process last one.
		z := index.start.Value(2)
		if z > layerEndZ {
			// Process last layer
			dvid.Debugf("Computing subvolumes in layer with Z %d -> %d (dz %d)\n",
				layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
			d.addSubvolumes(layer, &subvolumes, batchsize, merge)

			// Init variables for next layer
			layerBegZ = layerEndZ + 1
			layerEndZ += batchsize
			if zleft > 0 {
				layerEndZ++
				zleft--
			}
			layer = d.newLayer(layerBegZ, layerEndZ)
		}

		// Check this block against current layer extents
		layer.extend(index)
		return nil
	}
	mintk := storage.MinTKey(keyROI)
	maxtk := storage.MaxTKey(keyROI)
	err = db.ProcessRange(ctx, mintk, maxtk, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}

	// Process last incomplete layer if there is one.
	if len(layer.activeBlocks) > 0 {
		dvid.Debugf("Computing subvolumes for final layer Z %d -> %d (dz %d)\n",
			layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
		d.addSubvolumes(layer, &subvolumes, batchsize, merge)
	}
	subvolumes.NumSubvolumes = int32(len(subvolumes.Subvolumes))

	// Encode as JSON
	jsonBytes, err := json.MarshalIndent(subvolumes, "", "    ")
	if err != nil {
		return nil, err
	}
	return jsonBytes, err
}