Ejemplo n.º 1
0
func (d *Data) GetKeys(ctx storage.Context) ([]string, error) {
	db, err := storage.MutableStore()
	if err != nil {
		return nil, err
	}

	// Compute first and last key for range
	first := storage.MinTKey(keyStandard)
	last := storage.MaxTKey(keyStandard)
	keys, err := db.KeysInRange(ctx, first, last)
	if err != nil {
		return nil, err
	}
	keyList := []string{}
	for _, key := range keys {
		keyStr, err := DecodeTKey(key)
		if err != nil {
			return nil, err
		}
		keyList = append(keyList, keyStr)
	}
	return keyList, nil
}
Ejemplo n.º 2
0
func (db *LevelDB) deleteAllVersions(ctx storage.Context) error {
	dvid.StartCgo()

	var err error
	var minKey, maxKey storage.Key

	vctx, versioned := ctx.(storage.VersionedCtx)
	if versioned {
		// Don't have to worry about tombstones.  Delete all keys from all versions for this instance id.
		minTKey := storage.MinTKey(storage.TKeyMinClass)
		maxTKey := storage.MaxTKey(storage.TKeyMaxClass)
		minKey, err = vctx.MinVersionKey(minTKey)
		if err != nil {
			return err
		}
		maxKey, err = vctx.MaxVersionKey(maxTKey)
		if err != nil {
			return err
		}
	} else {
		minKey, maxKey = ctx.KeyRange()
	}

	const BATCH_SIZE = 10000
	batch := db.NewBatch(ctx).(*goBatch)

	ro := levigo.NewReadOptions()
	it := db.ldb.NewIterator(ro)
	defer func() {
		it.Close()
		dvid.StopCgo()
	}()

	numKV := 0
	it.Seek(minKey)
	for {
		if err := it.GetError(); err != nil {
			return fmt.Errorf("Error iterating during DeleteAll for %s: %v", ctx, err)
		}
		if it.Valid() {
			itKey := it.Key()
			storage.StoreKeyBytesRead <- len(itKey)
			// Did we pass the final key?
			if bytes.Compare(itKey, maxKey) > 0 {
				break
			}

			batch.WriteBatch.Delete(itKey)
			if (numKV+1)%BATCH_SIZE == 0 {
				if err := batch.Commit(); err != nil {
					dvid.Criticalf("Error on batch commit of DeleteAll at key-value pair %d: %v\n", numKV, err)
					return fmt.Errorf("Error on batch commit of DeleteAll at key-value pair %d: %v", numKV, err)
				}
				batch = db.NewBatch(ctx).(*goBatch)
				dvid.Debugf("Deleted %d key-value pairs in ongoing DELETE ALL for %s.\n", numKV+1, ctx)
			}
			numKV++
			it.Next()
		} else {
			break
		}
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			dvid.Criticalf("Error on last batch commit of DeleteAll: %v\n", err)
			return fmt.Errorf("Error on last batch commit of DeleteAll: %v", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via DELETE ALL for %s.\n", numKV, ctx)
	return nil
}
Ejemplo n.º 3
0
func (db *LevelDB) deleteSingleVersion(vctx storage.VersionedCtx) error {
	dvid.StartCgo()

	minTKey := storage.MinTKey(storage.TKeyMinClass)
	maxTKey := storage.MaxTKey(storage.TKeyMaxClass)
	minKey, err := vctx.MinVersionKey(minTKey)
	if err != nil {
		return err
	}
	maxKey, err := vctx.MaxVersionKey(maxTKey)
	if err != nil {
		return err
	}

	const BATCH_SIZE = 10000
	batch := db.NewBatch(vctx).(*goBatch)

	ro := levigo.NewReadOptions()
	it := db.ldb.NewIterator(ro)
	defer func() {
		it.Close()
		dvid.StopCgo()
	}()

	numKV := 0
	it.Seek(minKey)
	deleteVersion := vctx.VersionID()
	for {
		if err := it.GetError(); err != nil {
			return fmt.Errorf("Error iterating during DeleteAll for %s: %v", vctx, err)
		}
		if it.Valid() {
			itKey := it.Key()
			storage.StoreKeyBytesRead <- len(itKey)
			// Did we pass the final key?
			if bytes.Compare(itKey, maxKey) > 0 {
				break
			}
			_, v, _, err := storage.DataKeyToLocalIDs(itKey)
			if err != nil {
				return fmt.Errorf("Error on DELETE ALL for version %d: %v", vctx.VersionID(), err)
			}
			if v == deleteVersion {
				batch.WriteBatch.Delete(itKey)
				if (numKV+1)%BATCH_SIZE == 0 {
					if err := batch.Commit(); err != nil {
						dvid.Criticalf("Error on batch commit of DeleteAll at key-value pair %d: %v\n", numKV, err)
						return fmt.Errorf("Error on batch commit of DeleteAll at key-value pair %d: %v", numKV, err)
					}
					batch = db.NewBatch(vctx).(*goBatch)
					dvid.Debugf("Deleted %d key-value pairs in ongoing DELETE ALL for %s.\n", numKV+1, vctx)
				}
				numKV++
			}

			it.Next()
		} else {
			break
		}
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			dvid.Criticalf("Error on last batch commit of DeleteAll: %v\n", err)
			return fmt.Errorf("Error on last batch commit of DeleteAll: %v", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via DELETE ALL for %s.\n", numKV, vctx)
	return nil
}
Ejemplo n.º 4
0
// Partition returns JSON of differently sized subvolumes that attempt to distribute
// the number of active blocks per subvolume.
func (d *Data) Partition(ctx storage.Context, batchsize int32) ([]byte, error) {
	// Partition Z as perfectly as we can.
	dz := d.MaxZ - d.MinZ + 1
	zleft := dz % batchsize

	// Adjust Z range
	layerBegZ := d.MinZ
	layerEndZ := layerBegZ + batchsize - 1

	// Iterate through blocks in ascending Z, calculating active extents and subvolume coverage.
	// Keep track of current layer = batchsize of blocks in Z.
	var subvolumes subvolumesT
	subvolumes.Subvolumes = []subvolumeT{}
	subvolumes.ROI.MinChunk[2] = d.MinZ
	subvolumes.ROI.MaxChunk[2] = d.MaxZ

	layer := d.newLayer(layerBegZ, layerEndZ)

	db, err := storage.SmallDataStore()
	if err != nil {
		return nil, err
	}
	merge := true
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		ibytes, err := chunk.K.ClassBytes(keyROI)
		if err != nil {
			return err
		}
		index := new(indexRLE)
		if err = index.IndexFromBytes(ibytes); err != nil {
			return fmt.Errorf("Unable to get indexRLE out of []byte encoding: %v\n", err)
		}

		// If we are in new layer, process last one.
		z := index.start.Value(2)
		if z > layerEndZ {
			// Process last layer
			dvid.Debugf("Computing subvolumes in layer with Z %d -> %d (dz %d)\n",
				layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
			d.addSubvolumes(layer, &subvolumes, batchsize, merge)

			// Init variables for next layer
			layerBegZ = layerEndZ + 1
			layerEndZ += batchsize
			if zleft > 0 {
				layerEndZ++
				zleft--
			}
			layer = d.newLayer(layerBegZ, layerEndZ)
		}

		// Check this block against current layer extents
		layer.extend(index)
		return nil
	}
	mintk := storage.MinTKey(keyROI)
	maxtk := storage.MaxTKey(keyROI)
	err = db.ProcessRange(ctx, mintk, maxtk, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}

	// Process last incomplete layer if there is one.
	if len(layer.activeBlocks) > 0 {
		dvid.Debugf("Computing subvolumes for final layer Z %d -> %d (dz %d)\n",
			layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
		d.addSubvolumes(layer, &subvolumes, batchsize, merge)
	}
	subvolumes.NumSubvolumes = int32(len(subvolumes.Subvolumes))

	// Encode as JSON
	jsonBytes, err := json.MarshalIndent(subvolumes, "", "    ")
	if err != nil {
		return nil, err
	}
	return jsonBytes, err
}
Ejemplo n.º 5
0
func (d *Data) foregroundROI(v dvid.VersionID, dest *roi.Data, background dvid.PointNd) {
	dest.Ready = false

	store, err := storage.MutableStore()
	if err != nil {
		dvid.Criticalf("Data type imageblk had error initializing store: %v\n", err)
		return
	}

	timedLog := dvid.NewTimeLog()
	timedLog.Infof("Starting foreground ROI %q for %s", dest.DataName(), d.DataName())

	// Iterate through all voxel blocks, loading and then checking blocks
	// for any foreground voxels.
	ctx := datastore.NewVersionedCtx(d, v)

	backgroundBytes := make([]byte, len(background))
	for i, b := range background {
		backgroundBytes[i] = byte(b)
	}

	const BATCH_SIZE = 1000
	var numBatches int
	var span *dvid.Span
	spans := []dvid.Span{}

	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		if chunk == nil || chunk.V == nil {
			return nil
		}
		data, _, err := dvid.DeserializeData(chunk.V, true)
		if err != nil {
			return fmt.Errorf("Error decoding block: %v\n", err)
		}
		numVoxels := d.BlockSize().Prod()
		var foreground bool
		for i := int64(0); i < numVoxels; i++ {
			isBackground := false
			for _, b := range backgroundBytes {
				if data[i] == b {
					isBackground = true
					break
				}
			}
			if !isBackground {
				foreground = true
				break
			}
		}
		if foreground {
			indexZYX, err := DecodeTKey(chunk.K)
			if err != nil {
				return fmt.Errorf("Error decoding voxel block key: %v\n", err)
			}
			x, y, z := indexZYX.Unpack()
			if span == nil {
				span = &dvid.Span{z, y, x, x}
			} else if !span.Extends(x, y, z) {
				spans = append(spans, *span)
				if len(spans) >= BATCH_SIZE {
					init := (numBatches == 0)
					numBatches++
					go func(spans []dvid.Span) {
						if err := dest.PutSpans(v, spans, init); err != nil {
							dvid.Errorf("Error in storing ROI: %v\n", err)
						} else {
							timedLog.Debugf("-- Wrote batch %d of spans for foreground ROI %q", numBatches, dest.DataName())
						}
					}(spans)
					spans = []dvid.Span{}
				}
				span = &dvid.Span{z, y, x, x}
			}
		}
		server.BlockOnInteractiveRequests("voxels [compute foreground ROI]")
		return nil
	}

	minTKey := storage.MinTKey(keyImageBlock)
	maxTKey := storage.MaxTKey(keyImageBlock)

	err = store.ProcessRange(ctx, minTKey, maxTKey, &storage.ChunkOp{}, f)
	if err != nil {
		dvid.Errorf("Error in processing chunks in ROI: %v\n", err)
		return
	}
	if span != nil {
		spans = append(spans, *span)
	}

	// Save new ROI
	if len(spans) > 0 {
		if err := dest.PutSpans(v, spans, numBatches == 0); err != nil {
			dvid.Errorf("Error in storing ROI: %v\n", err)
			return
		}
	}
	timedLog.Infof("Created foreground ROI %q for %s", dest.DataName(), d.DataName())
	dest.Ready = true
}
Ejemplo n.º 6
0
// DeleteAll deletes all key-value associated with a context (data instance and version).
func (db *LevelDB) DeleteAll(ctx storage.Context, allVersions bool) error {
	if ctx == nil {
		return fmt.Errorf("Received nil context in Delete()")
	}
	dvid.StartCgo()

	// Don't have to worry about tombstones.  Delete all keys from all versions for this instance id.
	minTKey := storage.MinTKey(storage.TKeyMinClass)
	maxTKey := storage.MaxTKey(storage.TKeyMaxClass)
	vctx, ok := ctx.(storage.VersionedCtx)
	if !ok {
		return fmt.Errorf("Non-versioned context passed to DELETE ALL VERSIONS in basholeveldb driver: %v", ctx)
	}
	minKey, err := vctx.MinVersionKey(minTKey)
	if err != nil {
		return err
	}
	maxKey, err := vctx.MaxVersionKey(maxTKey)
	if err != nil {
		return err
	}

	const BATCH_SIZE = 10000
	batch := db.NewBatch(ctx).(*goBatch)

	ro := levigo.NewReadOptions()
	it := db.ldb.NewIterator(ro)
	defer func() {
		it.Close()
		dvid.StopCgo()
	}()

	numKV := 0
	it.Seek(minKey)
	deleteVersion := ctx.VersionID()
	for {
		if it.Valid() {
			itKey := it.Key()
			storage.StoreKeyBytesRead <- len(itKey)
			// Did we pass the final key?
			if bytes.Compare(itKey, maxKey) > 0 {
				break
			}
			if !allVersions {
				_, v, _, err := storage.DataKeyToLocalIDs(itKey)
				if err != nil {
					return fmt.Errorf("Error on DELETE ALL for version %d: %v", ctx.VersionID(), err)
				}
				if v != deleteVersion {
					it.Next()
					continue
				}
			}
			batch.WriteBatch.Delete(itKey)
			if (numKV+1)%BATCH_SIZE == 0 {
				if err := batch.Commit(); err != nil {
					batch.Close()
					return fmt.Errorf("Error on DELETE ALL at key-value pair %d: %v", numKV, err)
				}
				batch = db.NewBatch(ctx).(*goBatch)
			}
			numKV++

			it.Next()
		} else {
			break
		}
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			batch.Close()
			return fmt.Errorf("Error on last batch DELETE: %v\n", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via DELETE ALL for %s.\n", numKV, ctx)
	return nil
}