Beispiel #1
0
func (d *Data) deleteBlock(ctx *datastore.VersionedCtx, block labels.DeleteBlock, batcher storage.KeyValueBatcher) {
	batch := batcher.NewBatch(ctx)

	// Iterate through this block of labels and get set of labels.
	blockBytes := len(block.Data)
	if blockBytes != int(d.BlockSize.Prod())*8 {
		dvid.Criticalf("Deserialized label block %d bytes, not uint64 size times %d block elements\n",
			blockBytes, d.BlockSize.Prod())
		return
	}
	labelSet := make(map[uint64]struct{})
	for i := 0; i < blockBytes; i += 8 {
		label := binary.LittleEndian.Uint64(block.Data[i : i+8])
		if label != 0 {
			labelSet[label] = struct{}{}
		}
	}

	// Go through all non-zero labels and delete the corresponding labelvol k/v pair.
	zyx := block.Index.ToIZYXString()
	for label := range labelSet {
		tk := NewTKey(label, zyx)
		batch.Delete(tk)
	}

	if err := batch.Commit(); err != nil {
		dvid.Criticalf("Bad sync in labelvol.  Couldn't commit block %s\n", zyx.Print())
	}
	return
}
Beispiel #2
0
// Middleware that recovers from panics, sends email if a notification email
// has been provided, and log issues.
func recoverHandler(c *web.C, h http.Handler) http.Handler {
	fn := func(w http.ResponseWriter, r *http.Request) {
		reqID := middleware.GetReqID(*c)

		defer func() {
			if err := recover(); err != nil {
				buf := make([]byte, 1<<16)
				size := runtime.Stack(buf, false)
				stackTrace := string(buf[0:size])
				message := fmt.Sprintf("Panic detected on request %s:\n%+v\nIP: %v, URL: %s\nStack trace:\n%s\n",
					reqID, err, r.RemoteAddr, r.URL.Path, stackTrace)
				dvid.Criticalf("%s\n", message)

				if err := SendNotification(message, nil); err != nil {
					dvid.Criticalf("Couldn't send email notifcation: %v\n", err)
				}

				http.Error(w, http.StatusText(500), 500)
			}
		}()

		h.ServeHTTP(w, r)
	}
	return http.HandlerFunc(fn)
}
Beispiel #3
0
// Goroutine that handles splits across a lot of blocks for one label.
func (d *Data) splitBlock(ctx *datastore.VersionedCtx, op splitOp) {
	defer d.MutDone(op.mutID)

	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}

	// Read the block.
	tk := NewTKeyByCoord(op.block)
	data, err := store.Get(ctx, tk)
	if err != nil {
		dvid.Errorf("Error on GET of labelblk with coord string %v\n", []byte(op.block))
		return
	}
	if data == nil {
		dvid.Errorf("nil label block where split was done, coord %v\n", []byte(op.block))
		return
	}
	blockData, _, err := dvid.DeserializeData(data, true)
	if err != nil {
		dvid.Criticalf("unable to deserialize label block in '%s' key %v: %v\n", d.DataName(), []byte(op.block), err)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)
	if len(blockData) != blockBytes {
		dvid.Criticalf("splitBlock: coord %v got back %d bytes, expected %d bytes\n", []byte(op.block), len(blockData), blockBytes)
		return
	}

	// Modify the block using either voxel-level changes or coarser block-level mods.
	if op.rles != nil {
		if err := d.storeRLEs(blockData, op.newLabel, op.block, op.rles); err != nil {
			dvid.Errorf("can't store label %d RLEs into block %s: %v\n", op.newLabel, op.block, err)
			return
		}
	} else {
		// We are doing coarse split and will replace all
		if err := d.replaceLabel(blockData, op.oldLabel, op.newLabel); err != nil {
			dvid.Errorf("can't replace label %d with %d in block %s: %v\n", op.oldLabel, op.newLabel, op.block, err)
			return
		}
	}

	// Write the modified block.
	serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
	if err != nil {
		dvid.Criticalf("Unable to serialize block %s in %q: %v\n", op.block, d.DataName(), err)
		return
	}
	if err := store.Put(ctx, tk, serialization); err != nil {
		dvid.Errorf("Error in putting key %v: %v\n", tk, err)
	}

	// Notify any downstream downres instance.
	d.publishBlockChange(ctx.VersionID(), op.mutID, op.block, blockData)
}
Beispiel #4
0
// DeleteRange removes all key-value pairs with keys in the given range.
func (db *LevelDB) DeleteRange(ctx storage.Context, kStart, kEnd storage.TKey) error {
	if ctx == nil {
		return fmt.Errorf("Received nil context in DeleteRange()")
	}

	// For leveldb, we just iterate over keys in range and delete each one using batch.
	const BATCH_SIZE = 10000
	batch := db.NewBatch(ctx).(*goBatch)

	ch := make(chan errorableKV)

	// Run the keys-only range query in a goroutine.
	go func() {
		if ctx == nil || !ctx.Versioned() {
			db.unversionedRange(ctx, kStart, kEnd, ch, true)
		} else {
			db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, true)
		}
	}()

	// Consume the key-value pairs.
	numKV := 0
	for {
		result := <-ch
		if result.KeyValue == nil {
			break
		}
		if result.error != nil {
			return result.error
		}

		// The key coming down channel is not index but full key, so no need to construct key using context.
		// If versioned, write a tombstone using current version id since we don't want to delete locked ancestors.
		// If unversioned, just delete.
		tk, err := ctx.TKeyFromKey(result.KeyValue.K)
		if err != nil {
			return err
		}
		batch.Delete(tk)

		if (numKV+1)%BATCH_SIZE == 0 {
			if err := batch.Commit(); err != nil {
				dvid.Criticalf("Error on batch commit of DeleteRange at key-value pair %d: %v\n", numKV, err)
				return fmt.Errorf("Error on batch commit of DeleteRange at key-value pair %d: %v\n", numKV, err)
			}
			batch = db.NewBatch(ctx).(*goBatch)
		}
		numKV++
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			dvid.Criticalf("Error on last batch commit of DeleteRange: %v\n", err)
			return fmt.Errorf("Error on last batch commit of DeleteRange: %v\n", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via delete range for %s.\n", numKV, ctx)
	return nil
}
Beispiel #5
0
// Goroutine that handles relabeling of blocks during a merge operation.
// Since the same block coordinate always gets mapped to the same goroutine, we handle
// concurrency by serializing GET/PUT for a particular block coordinate.
func (d *Data) mergeBlock(in <-chan mergeOp) {
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)

	for op := range in {
		tk := NewTKeyByCoord(op.izyx)
		data, err := store.Get(op.ctx, tk)
		if err != nil {
			dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.izyx)
			op.wg.Done()
			continue
		}
		if data == nil {
			dvid.Errorf("nil label block where merge was done!\n")
			op.wg.Done()
			continue
		}

		blockData, _, err := dvid.DeserializeData(data, true)
		if err != nil {
			dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
			op.wg.Done()
			continue
		}
		if len(blockData) != blockBytes {
			dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
			op.wg.Done()
			continue
		}

		// Iterate through this block of labels and relabel if label in merge.
		for i := 0; i < blockBytes; i += 8 {
			label := binary.LittleEndian.Uint64(blockData[i : i+8])
			if _, merged := op.Merged[label]; merged {
				binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
			}
		}

		// Store this block.
		serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
		if err != nil {
			dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
			op.wg.Done()
			continue
		}
		if err := store.Put(op.ctx, tk, serialization); err != nil {
			dvid.Errorf("Error in putting key %v: %v\n", tk, err)
		}
		op.wg.Done()
	}
}
Beispiel #6
0
// handles relabeling of blocks during a merge operation.
func (d *Data) mergeBlock(ctx *datastore.VersionedCtx, op mergeOp) {
	defer d.MutDone(op.mutID)

	store, err := d.GetKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}

	tk := NewTKeyByCoord(op.block)
	data, err := store.Get(ctx, tk)
	if err != nil {
		dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.block)
		return
	}
	if data == nil {
		dvid.Errorf("nil label block where merge was done!\n")
		return
	}

	blockData, _, err := dvid.DeserializeData(data, true)
	if err != nil {
		dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)
	if len(blockData) != blockBytes {
		dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
		return
	}

	// Iterate through this block of labels and relabel if label in merge.
	for i := 0; i < blockBytes; i += 8 {
		label := binary.LittleEndian.Uint64(blockData[i : i+8])
		if _, merged := op.Merged[label]; merged {
			binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
		}
	}

	// Store this block.
	serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
	if err != nil {
		dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
		return
	}
	if err := store.Put(ctx, tk, serialization); err != nil {
		dvid.Errorf("Error in putting key %v: %v\n", tk, err)
	}

	// Notify any downstream downres instance.
	d.publishBlockChange(ctx.VersionID(), op.mutID, op.block, blockData)
}
Beispiel #7
0
// Put writes a value with given key.
func (db *LevelDB) Put(ctx storage.Context, tk storage.TKey, v []byte) error {
	if ctx == nil {
		return fmt.Errorf("Received nil context in Put()")
	}
	wo := db.options.WriteOptions

	var err error
	key := ctx.ConstructKey(tk)
	if !ctx.Versioned() {
		dvid.StartCgo()
		err = db.ldb.Put(wo, key, v)
		dvid.StopCgo()
	} else {
		vctx, ok := ctx.(storage.VersionedCtx)
		if !ok {
			return fmt.Errorf("Non-versioned context that says it's versioned received in Put(): %v", ctx)
		}
		tombstoneKey := vctx.TombstoneKey(tk)
		batch := db.NewBatch(vctx).(*goBatch)
		batch.WriteBatch.Delete(tombstoneKey)
		batch.WriteBatch.Put(key, v)
		if err = batch.Commit(); err != nil {
			dvid.Criticalf("Error on batch commit of Put: %v\n", err)
			err = fmt.Errorf("Error on batch commit of Put: %v", err)
		}
	}

	storage.StoreKeyBytesWritten <- len(key)
	storage.StoreValueBytesWritten <- len(v)
	return err
}
Beispiel #8
0
// NewBatch returns an implementation that allows batch writes
func (db *BigTable) NewBatch(ctx storage.Context) storage.Batch {
	if ctx == nil {
		dvid.Criticalf("Received nil context in NewBatch()")
		return nil
	}
	return &goBatch{db, ctx, []storage.TKeyValue{}}
}
Beispiel #9
0
func (d *Data) publishDownresCommit(v dvid.VersionID, mutID uint64) {
	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: DownsizeCommitEvent}
	msg := datastore.SyncMessage{Event: DownsizeCommitEvent, Version: v, Delta: mutID}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
	}
}
Beispiel #10
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		select {
		case <-done:
			return
		default:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
		}
	}
}
Beispiel #11
0
func (d *Data) syncSplit(name dvid.InstanceName, in <-chan datastore.SyncMessage, done <-chan struct{}) {
	// Start N goroutines to process blocks.  Don't need transactional support for
	// GET-PUT combo if each spatial coordinate (block) is only handled serially by a one goroutine.
	const numprocs = 32
	const splitBufSize = 10
	var pch [numprocs]chan splitOp
	for i := 0; i < numprocs; i++ {
		pch[i] = make(chan splitOp, splitBufSize)
		go d.splitBlock(pch[i])
	}

	for msg := range in {
		select {
		case <-done:
			for i := 0; i < numprocs; i++ {
				close(pch[i])
			}
			return
		default:
			switch delta := msg.Delta.(type) {
			case labels.DeltaSplit:
				ctx := datastore.NewVersionedCtx(d, msg.Version)
				n := delta.OldLabel % numprocs
				pch[n] <- splitOp{delta, *ctx}
			case labels.DeltaSplitStart:
				// Mark the old label is under transition
				iv := dvid.InstanceVersion{name, msg.Version}
				splitCache.Incr(iv, delta.OldLabel)
			default:
				dvid.Criticalf("bad delta in split event: %v\n", delta)
				continue
			}
		}
	}
}
Beispiel #12
0
// Serve starts HTTP and RPC servers.
func Serve() {
	// Use defaults if not set via TOML config file.
	if tc.Server.Host == "" {
		tc.Server.Host = DefaultHost
	}
	if tc.Server.HTTPAddress == "" {
		tc.Server.HTTPAddress = DefaultWebAddress
	}
	if tc.Server.RPCAddress == "" {
		tc.Server.RPCAddress = DefaultRPCAddress
	}

	dvid.Infof("------------------\n")
	dvid.Infof("DVID code version: %s\n", gitVersion)
	dvid.Infof("Serving HTTP on %s (host alias %q)\n", tc.Server.HTTPAddress, tc.Server.Host)
	dvid.Infof("Serving command-line use via RPC %s\n", tc.Server.RPCAddress)
	dvid.Infof("Using web client files from %s\n", tc.Server.WebClient)
	dvid.Infof("Using %d of %d logical CPUs for DVID.\n", dvid.NumCPU, runtime.NumCPU())

	// Launch the web server
	go serveHTTP()

	// Launch the rpc server
	go func() {
		if err := rpc.StartServer(tc.Server.RPCAddress); err != nil {
			dvid.Criticalf("Could not start RPC server: %v\n", err)
		}
	}()

	<-shutdownCh
}
Beispiel #13
0
// Delete removes a value with given key.
func (db *LevelDB) Delete(ctx storage.Context, tk storage.TKey) error {
	if db == nil {
		return fmt.Errorf("Can't call Delete on nil LevelDB")
	}
	if ctx == nil {
		return fmt.Errorf("Received nil context in Delete()")
	}
	wo := db.options.WriteOptions

	var err error
	key := ctx.ConstructKey(tk)
	if !ctx.Versioned() {
		dvid.StartCgo()
		err = db.ldb.Delete(wo, key)
		dvid.StopCgo()
	} else {
		vctx, ok := ctx.(storage.VersionedCtx)
		if !ok {
			return fmt.Errorf("Non-versioned context that says it's versioned received in Delete(): %v", ctx)
		}
		tombstoneKey := vctx.TombstoneKey(tk)
		batch := db.NewBatch(vctx).(*goBatch)
		batch.WriteBatch.Delete(key)
		batch.WriteBatch.Put(tombstoneKey, dvid.EmptyValue())
		if err = batch.Commit(); err != nil {
			dvid.Criticalf("Error on batch commit of Delete: %v\n", err)
			err = fmt.Errorf("Error on batch commit of Delete: %v", err)
		}
	}

	return err
}
Beispiel #14
0
// Handle upstream mods on a labelblk we are downresing.
func (d *Data) downsizeAdd(v dvid.VersionID, delta deltaBlock) {
	defer d.MutDone(delta.mutID)

	lobuf, err := d.getLoresCache(v, delta.block)
	if err != nil {
		dvid.Criticalf("unable to initialize block cache for labelblk %q: %v\n", d.DataName(), err)
		return
	}

	// Offsets from corner of 2x2x2 voxel neighborhood to neighbor in highres block.
	blockSize := d.BlockSize()
	bx := blockSize.Value(0) * 8
	bxy := blockSize.Value(1) * bx

	var off [8]int32
	off[0] = 0
	off[1] = 8
	off[2] = bx
	off[3] = bx + 8
	off[4] = bxy
	off[5] = bxy + off[1]
	off[6] = bxy + off[2]
	off[7] = bxy + off[3]

	var lo int32 // lores byte offset
	for z := int32(0); z < blockSize.Value(2); z += 2 {
		for y := int32(0); y < blockSize.Value(1); y += 2 {
			hi := z*bxy + y*bx // hires byte offset to 2^3 neighborhood corner
			for x := int32(0); x < blockSize.Value(0); x += 2 {
				counts := make(map[uint64]int)
				for n := 0; n < 8; n++ {
					i := hi + off[n]
					label := binary.LittleEndian.Uint64(delta.data[i : i+8])
					counts[label]++
				}

				// get best label and if there's a tie use smaller label
				var most int
				var best uint64
				for label, count := range counts {
					if count > most {
						best = label
						most = count
					} else if count == most && label > best {
						best = label
					}
				}

				// store into downres cache
				//dvid.Infof("Data %q: best %d for (%d,%d,%d)\n", d.DataName(), best, x/2, y/2, z/2)
				binary.LittleEndian.PutUint64(lobuf[lo:lo+8], best)

				// Move to next corner of 8 block voxels
				lo += 8
				hi += 16 // 2 * label byte size
			}
		}
	}
}
Beispiel #15
0
func (u *Updater) StopUpdate() {
	u.Lock()
	u.updates--
	if u.updates < 0 {
		dvid.Criticalf("StopUpdate() called more than StartUpdate().  Issue with data instance code.\n")
	}
	u.Unlock()
}
Beispiel #16
0
func (batch *goBatch) Put(tk storage.TKey, v []byte) {
	if batch == nil || batch.ctx == nil {
		dvid.Criticalf("Received nil batch or nil batch context in batch.Put()\n")
		return
	}
	key := batch.ctx.ConstructKey(tk)
	batch.kvs = append(batch.kvs, storage.KeyValue{key, v})
}
Beispiel #17
0
func hashStr(s dvid.IZYXString, n int) int {
	hash := fnv.New32()
	_, err := hash.Write([]byte(s))
	if err != nil {
		dvid.Criticalf("Could not write to fnv hash in labelblk.hashStr()")
		return 0
	}
	return int(hash.Sum32()) % n
}
Beispiel #18
0
func (d *Data) mergeLabels(batcher storage.KeyValueBatcher, v dvid.VersionID, op labels.MergeOp) error {
	d.Lock()
	defer d.Unlock()

	d.StartUpdate()
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	// Get the target label
	targetTk := NewLabelTKey(op.Target)
	targetElems, err := getElements(ctx, targetTk)
	if err != nil {
		return fmt.Errorf("get annotations for instance %q, target %d, in syncMerge: %v\n", d.DataName(), op.Target, err)
	}

	// Iterate through each merged label, read old elements, delete that k/v, then add it to the current target elements.
	var delta DeltaModifyElements
	elemsAdded := 0
	for label := range op.Merged {
		tk := NewLabelTKey(label)
		elems, err := getElements(ctx, tk)
		if err != nil {
			return fmt.Errorf("unable to get annotation elements for instance %q, label %d in syncMerge: %v\n", d.DataName(), label, err)
		}
		if elems == nil || len(elems) == 0 {
			continue
		}
		batch.Delete(tk)
		elemsAdded += len(elems)
		targetElems = append(targetElems, elems...)

		// for labelsz.  TODO, only do this computation if really subscribed.
		for _, elem := range elems {
			delta.Add = append(delta.Add, ElementPos{Label: op.Target, Kind: elem.Kind, Pos: elem.Pos})
			delta.Del = append(delta.Del, ElementPos{Label: label, Kind: elem.Kind, Pos: elem.Pos})
		}
	}
	if elemsAdded > 0 {
		val, err := json.Marshal(targetElems)
		if err != nil {
			return fmt.Errorf("couldn't serialize annotation elements in instance %q: %v\n", d.DataName(), err)
		}
		batch.Put(targetTk, val)
		if err := batch.Commit(); err != nil {
			return fmt.Errorf("unable to commit merge for instance %q: %v\n", d.DataName(), err)
		}
	}
	d.StopUpdate()

	// Notify any subscribers of label annotation changes.
	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
	}
	return nil
}
Beispiel #19
0
// NewBatch returns an implementation that allows batch writes
func (db *LevelDB) NewBatch(ctx storage.Context) storage.Batch {
	if db == nil {
		dvid.Criticalf("Can't call NewBatch on nil LevelDB\n")
		return nil
	}
	if ctx == nil {
		dvid.Criticalf("Received nil context in NewBatch()")
		return nil
	}
	dvid.StartCgo()
	defer dvid.StopCgo()

	var vctx storage.VersionedCtx
	var ok bool
	vctx, ok = ctx.(storage.VersionedCtx)
	if !ok {
		vctx = nil
	}
	return &goBatch{ctx, vctx, levigo.NewWriteBatch(), db.options.WriteOptions, db.ldb}
}
Beispiel #20
0
// Notify any downstream downres instance of block change.
func (d *Data) publishBlockChange(v dvid.VersionID, mutID uint64, block dvid.IZYXString, blockData []byte) {
	evt := datastore.SyncEvent{d.DataUUID(), DownsizeBlockEvent}
	delta := deltaBlock{
		mutID: mutID,
		block: block,
		data:  blockData,
	}
	msg := datastore.SyncMessage{DownsizeBlockEvent, v, delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
	}
}
Beispiel #21
0
func (batch *goBatch) Put(tkey storage.TKey, value []byte) {
	if batch == nil || batch.ctx == nil {
		dvid.Criticalf("Received nil batch or nil batch context in batch.Put()\n")
		return
	}

	batch.db.Put(batch.ctx, tkey, value)

	batch.kvs = append(batch.kvs, storage.TKeyValue{tkey, value})
	storage.StoreKeyBytesWritten <- len(tkey)
	storage.StoreValueBytesWritten <- len(value)
}
Beispiel #22
0
// NewBatch returns an implementation that allows batch writes
func (db *KVAutobus) NewBatch(ctx storage.Context) storage.Batch {
	if ctx == nil {
		dvid.Criticalf("Received nil context in NewBatch()")
		return nil
	}
	var vctx storage.VersionedCtx
	var ok bool
	vctx, ok = ctx.(storage.VersionedCtx)
	if !ok {
		vctx = nil
	}
	return &goBatch{db, ctx, vctx, []storage.KeyValue{}}
}
Beispiel #23
0
func (d *Data) handleEvent(msg datastore.SyncMessage) {
	switch delta := msg.Delta.(type) {
	case labels.DeltaMergeStart:
		// Add this merge into the cached blockRLEs
		iv := dvid.InstanceVersion{d.DataUUID(), msg.Version}
		d.StartUpdate()
		labels.MergeStart(iv, delta.MergeOp)
		d.StopUpdate()

	case labels.DeltaMerge:
		d.processMerge(msg.Version, delta)

	case labels.DeltaSplit:
		d.processSplit(msg.Version, delta)

	case deltaBlock: // received downres processing from upstream
		// NOTE: need to add wait here since there will be delay through channel compared to commit event.
		if d.MutAdd(delta.mutID) {
			d.StartUpdate()
		}
		n := delta.block.Hash(numBlockHandlers)
		d.procCh[n] <- procMsg{op: delta, v: msg.Version}

	case imageblk.Block:
		if d.MutAdd(delta.MutID) {
			d.StartUpdate()
		}
		n := delta.Index.Hash(numBlockHandlers)
		block := delta.Index.ToIZYXString()
		d.procCh[n] <- procMsg{op: deltaBlock{delta.MutID, block, delta.Data}, v: msg.Version}

	case imageblk.MutatedBlock:
		if d.MutAdd(delta.MutID) {
			d.StartUpdate()
		}
		n := delta.Index.Hash(numBlockHandlers)
		block := delta.Index.ToIZYXString()
		d.procCh[n] <- procMsg{op: deltaBlock{delta.MutID, block, delta.Data}, v: msg.Version}

	case labels.DeleteBlock:
		if d.MutAdd(delta.MutID) {
			d.StartUpdate()
		}
		n := delta.Index.Hash(numBlockHandlers)
		block := delta.Index.ToIZYXString()
		d.procCh[n] <- procMsg{op: deltaBlock{delta.MutID, block, nil}, v: msg.Version}

	default:
		dvid.Criticalf("Received unknown delta in labelblk.processEvents(): %v\n", msg)
	}
}
Beispiel #24
0
// PutRange puts type key-value pairs that have been sorted in sequential key order.
// Current implementation in levigo driver simply does a batch write.
func (db *LevelDB) PutRange(ctx storage.Context, kvs []storage.TKeyValue) error {
	if ctx == nil {
		return fmt.Errorf("Received nil context in PutRange()")
	}
	batch := db.NewBatch(ctx).(*goBatch)
	for _, kv := range kvs {
		batch.Put(kv.K, kv.V)
	}
	if err := batch.Commit(); err != nil {
		dvid.Criticalf("Error on batch commit of PutRange: %v\n", err)
		return err
	}
	return nil
}
Beispiel #25
0
func (batch *goBatch) Delete(tk storage.TKey) {
	if batch == nil || batch.ctx == nil {
		dvid.Criticalf("Received nil batch or nil batch context in batch.Delete()\n")
		return
	}
	dvid.StartCgo()
	defer dvid.StopCgo()

	key := batch.ctx.ConstructKey(tk)
	if batch.vctx != nil {
		tombstone := batch.vctx.TombstoneKey(tk) // This will now have current version
		batch.WriteBatch.Put(tombstone, dvid.EmptyValue())
	}
	batch.WriteBatch.Delete(key)
}
Beispiel #26
0
// IsTombstone returns true if the given key is a tombstone key.
func (k Key) IsTombstone() bool {
	sz := len(k)
	if sz == 0 {
		return false
	}
	switch k[sz-1] {
	case MarkData:
		return false
	case MarkTombstone:
		return true
	default:
		dvid.Criticalf("Illegal key checked for tombstone marker: %v\n", k)
	}
	return false
}
Beispiel #27
0
func (avail DataAvail) String() string {
	switch avail {
	case DataDelta:
		return "Delta"
	case DataComplete:
		return "Complete Copy"
	case DataRoot:
		return "Unversioned"
	case DataDeleted:
		return "Deleted"
	default:
		dvid.Criticalf("Unknown DataAvail (%v) with String()\n", avail)
		return "Unknown"
	}
}
Beispiel #28
0
func (d *Data) syncMerge(name dvid.InstanceName, in <-chan datastore.SyncMessage, done <-chan struct{}) {
	// Start N goroutines to process blocks.  Don't need transactional support for
	// GET-PUT combo if each spatial coordinate (block) is only handled serially by a one goroutine.
	const numprocs = 32
	const mergeBufSize = 100
	var pch [numprocs]chan mergeOp
	for i := 0; i < numprocs; i++ {
		pch[i] = make(chan mergeOp, mergeBufSize)
		go d.mergeBlock(pch[i])
	}

	// Process incoming merge messages
	for msg := range in {
		select {
		case <-done:
			for i := 0; i < numprocs; i++ {
				close(pch[i])
			}
			return
		default:
			iv := dvid.InstanceVersion{name, msg.Version}
			switch delta := msg.Delta.(type) {
			case labels.DeltaMerge:
				ctx := datastore.NewVersionedCtx(d, msg.Version)
				wg := new(sync.WaitGroup)
				for izyxStr := range delta.Blocks {
					n := hashStr(izyxStr, numprocs)
					wg.Add(1)
					pch[n] <- mergeOp{delta.MergeOp, ctx, izyxStr, wg}
				}
				// When we've processed all the delta blocks, we can remove this merge op
				// from the merge cache since all labels will have completed.
				go func(wg *sync.WaitGroup) {
					wg.Wait()
					labels.MergeCache.Remove(iv, delta.MergeOp)
				}(wg)

			case labels.DeltaMergeStart:
				// Add this merge into the cached blockRLEs
				labels.MergeCache.Add(iv, delta.MergeOp)

			default:
				dvid.Criticalf("bad delta in merge event: %v\n", delta)
				continue
			}
		}
	}
}
Beispiel #29
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent() {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			d.StartUpdate()
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case imageblk.MutatedBlock:
				d.mutateBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
			d.StopUpdate()

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Beispiel #30
0
func (batch *goBatch) Put(tk storage.TKey, v []byte) {
	if batch == nil || batch.ctx == nil {
		dvid.Criticalf("Received nil batch or nil batch context in batch.Put()\n")
		return
	}
	dvid.StartCgo()
	defer dvid.StopCgo()

	key := batch.ctx.ConstructKey(tk)
	if batch.vctx != nil {
		tombstone := batch.vctx.TombstoneKey(tk) // This will now have current version
		batch.WriteBatch.Delete(tombstone)
	}
	storage.StoreKeyBytesWritten <- len(key)
	storage.StoreValueBytesWritten <- len(v)
	batch.WriteBatch.Put(key, v)
}