Пример #1
0
// Handler for web client and other static content
func mainHandler(w http.ResponseWriter, r *http.Request) {
	path := r.URL.Path
	if config == nil {
		log.Fatalf("mainHandler() called when server was not configured!\n")
	}

	// Serve from embedded files in executable if not web client directory was specified
	if config.WebClient() == "" {
		if len(path) > 0 && path[0:1] == "/" {
			path = path[1:]
		}
		dvid.Debugf("[%s] Serving from embedded files: %s\n", r.Method, path)

		resource := nrsc.Get(path)
		if resource == nil {
			http.NotFound(w, r)
			return
		}
		rsrc, err := resource.Open()
		if err != nil {
			BadRequest(w, r, err)
			return
		}
		data, err := ioutil.ReadAll(rsrc)
		if err != nil {
			BadRequest(w, r, err)
			return
		}
		dvid.SendHTTP(w, r, path, data)
	} else {
		filename := filepath.Join(config.WebClient(), path)
		dvid.Debugf("[%s] Serving from webclient directory: %s\n", r.Method, filename)
		http.ServeFile(w, r, filename)
	}
}
Пример #2
0
func (db *KVAutobus) RawPut(key storage.Key, value []byte) error {
	b64key := encodeKey(key)
	url := fmt.Sprintf("%s/kvautobus/api/value/%s/", db.host, b64key)
	bin := Binary(value)

	dvid.Debugf("Begin RawPut on key %s (%d bytes)\n", hex.EncodeToString(key), len(bin))

	// Create pipe from encoding to posting
	pr, pw := io.Pipe()
	w := msgp.NewWriter(pw)
	go func() {
		dvid.Debugf("Starting msgpack encoding...\n")
		bin.EncodeMsg(w)
		w.Flush()
		pw.Close()
		dvid.Debugf("Done msgpack encoding.\n")
	}()

	dvid.Debugf("Beginning POST to kvautobus: %s\n", url)
	resp, err := http.Post(url, "application/x-msgpack", pr)
	dvid.Debugf("Done POST with err %v\n", err)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if resp.StatusCode == http.StatusConflict {
		return fmt.Errorf("Can't POST to an already stored key.  KVAutobus returned status %d (%s)", resp.StatusCode, url)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("Bad status code returned (%d) from put request: %s", resp.StatusCode, url)
	}
	return nil
}
Пример #3
0
func (s *Session) Close() error {
	dvid.Debugf("session %d close: waiting for any jobs to complete...\n", s.id)
	s.Wait()
	dvid.Debugf("sending session end to remote...\n")
	_, err := s.dc.Call(sendEndSession, s.id)
	dvid.Debugf("stopping client...\n")
	s.c.Stop()
	return err
}
Пример #4
0
// NewFilter returns a Filter for use with a push of key-value pairs.
func (d *Data) NewFilter(fs storage.FilterSpec) (storage.Filter, error) {
	roiIterator, _, found, err := roi.NewIteratorBySpec(fs, d)
	if err != nil {
		dvid.Debugf("No filter found that was parsable: %s\n", fs)
		return nil, err
	}
	if !found || roiIterator == nil {
		dvid.Debugf("No ROI found so using generic data push for data %q.\n", d.DataName())
		return nil, nil
	}
	return &Filter{d, fs, roiIterator}, nil
}
Пример #5
0
// compares remote Repo with local one, determining a list of versions that
// need to be sent from remote to bring the local DVID up-to-date.
func getDeltaAll(remote *repoT, uuid dvid.UUID) (map[dvid.VersionID]struct{}, error) {
	// Determine all version ids of remote DAG nodes that aren't in the local DAG.
	// Since VersionID can differ among DVID servers, we need to compare using UUIDs
	// then convert to VersionID.
	delta := make(map[dvid.VersionID]struct{})
	for _, rnode := range remote.dag.nodes {
		lv, found := manager.uuidToVersion[rnode.uuid]
		if found {
			dvid.Debugf("Both remote and local have uuid %s... skipping\n", rnode.uuid)
		} else {
			dvid.Debugf("Found version %s in remote not in local: sending local version id %d\n", rnode.uuid, lv)
			delta[lv] = struct{}{}
		}
	}
	return delta, nil
}
Пример #6
0
// MergeLabels handles merging of any number of labels throughout the various label data
// structures.  It assumes that the merges aren't cascading, e.g., there is no attempt
// to merge label 3 into 4 and also 4 into 5.  The caller should have flattened the merges.
// TODO: Provide some indication that subset of labels are under evolution, returning
//   an "unavailable" status or 203 for non-authoritative response.  This might not be
//   feasible for clustered DVID front-ends due to coordination issues.
//
// EVENTS
//
// labels.MergeStartEvent occurs at very start of merge and transmits labels.DeltaMergeStart struct.
//
// labels.MergeBlockEvent occurs for every block of a merged label and transmits labels.DeltaMerge struct.
//
// labels.MergeEndEvent occurs at end of merge and transmits labels.DeltaMergeEnd struct.
//
func (d *Data) MergeLabels(v dvid.VersionID, m labels.MergeOp) error {
	dvid.Debugf("Merging %s into label %d ...\n", m.Merged, m.Target)

	// Signal that we are starting a merge.
	evt := datastore.SyncEvent{d.DataUUID(), labels.MergeStartEvent}
	msg := datastore.SyncMessage{labels.MergeStartEvent, v, labels.DeltaMergeStart{m}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return err
	}

	// Asynchronously perform merge and handle any concurrent requests using the cache map until
	// labelvol and labelblk are updated and consistent.
	// Mark these labels as dirty until done.
	if err := labels.MergeStart(d.getMergeIV(v), m); err != nil {
		return err
	}
	go func() {
		d.asyncMergeLabels(v, m)

		// Remove dirty labels and updating flag when done.
		labels.MergeStop(d.getMergeIV(v), m)
	}()

	return nil
}
Пример #7
0
// Amount of data to build up in memory (backed by an unsorted log
// on disk) before converting to a sorted on-disk file.
//
// Larger values increase performance, especially during bulk loads.
// Up to two write buffers may be held in memory at the same time,
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened.
func (opts *leveldbOptions) SetWriteBufferSize(nBytes int) {
	if nBytes != opts.writeBufferSize {
		dvid.Debugf("Write buffer set to %d bytes.\n", nBytes)
		opts.Options.SetWriteBufferSize(nBytes)
		opts.writeBufferSize = nBytes
	}
}
Пример #8
0
// Approximate size of user data packed per block.  Note that the
// block size specified here corresponds to uncompressed data.  The
// actual size of the unit read from disk may be smaller if
// compression is enabled.  This parameter can be changed dynamically.
func (opts *leveldbOptions) SetBlockSize(nBytes int) {
	if nBytes != opts.blockSize {
		dvid.Debugf("Block size set to %d bytes.\n", nBytes)
		opts.Options.SetBlockSize(nBytes)
		opts.blockSize = nBytes
	}
}
Пример #9
0
// NewFilter returns a Filter for use with a push of key-value pairs.
func (d *Data) NewFilter(fs storage.FilterSpec) (storage.Filter, error) {
	filter := &Filter{Data: d, fs: fs}

	// if there's no filter, just use base Data send.
	roidata, roiV, roiFound, err := roi.DataByFilter(fs)
	if err != nil {
		return nil, fmt.Errorf("No filter found that was parsable (%s): %v\n", fs, err)
	}
	filter.roi = roidata
	tilespec, tilespecFound := fs.GetFilterSpec("tile")

	if (!roiFound || roidata == nil) && !tilespecFound {
		dvid.Debugf("No ROI or tile filter found for imagetile push, so using generic data push.\n")
		return nil, nil
	}
	if tilespecFound {
		filter.planes = strings.Split(tilespec, ",")
	}

	// Get the spans once from datastore.
	filter.spans, err = roidata.GetSpans(roiV)
	if err != nil {
		return nil, err
	}

	return filter, nil
}
Пример #10
0
// GetSparseCoarseVol returns an encoded sparse volume given a label.  The encoding has the
// following format where integers are little endian:
// 		byte     Set to 0
// 		uint8    Number of dimensions
// 		uint8    Dimension of run (typically 0 = X)
// 		byte     Reserved (to be used later)
// 		uint32    # Blocks [TODO.  0 for now]
// 		uint32    # Spans
// 		Repeating unit of:
//     		int32   Block coordinate of run start (dimension 0)
//     		int32   Block coordinate of run start (dimension 1)
//     		int32   Block coordinate of run start (dimension 2)
//     		int32   Length of run
//
func GetSparseCoarseVol(ctx storage.Context, label uint64) ([]byte, error) {
	store, err := storage.SmallDataStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Create the sparse volume header
	buf := new(bytes.Buffer)
	buf.WriteByte(dvid.EncodingBinary)
	binary.Write(buf, binary.LittleEndian, uint8(3))  // # of dimensions
	binary.Write(buf, binary.LittleEndian, byte(0))   // dimension of run (X = 0)
	buf.WriteByte(byte(0))                            // reserved for later
	binary.Write(buf, binary.LittleEndian, uint32(0)) // Placeholder for # blocks
	encoding := buf.Bytes()

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begTKey := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endTKey := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	var numBlocks uint32
	var span *dvid.Span
	var spans dvid.Spans
	keys, err := store.KeysInRange(ctx, begTKey, endTKey)
	if err != nil {
		return nil, fmt.Errorf("Cannot get keys for coarse sparse volume: %v", err)
	}
	for _, tk := range keys {
		numBlocks++
		_, blockStr, err := DecodeTKey(tk)
		if err != nil {
			return nil, fmt.Errorf("Error retrieving RLE runs for label %d: %v", label, err)
		}
		indexZYX, err := blockStr.IndexZYX()
		if err != nil {
			return nil, fmt.Errorf("Error decoding block coordinate (%v) for sparse volume: %v\n", blockStr, err)
		}
		x, y, z := indexZYX.Unpack()
		if span == nil {
			span = &dvid.Span{z, y, x, x}
		} else if !span.Extends(x, y, z) {
			spans = append(spans, *span)
			span = &dvid.Span{z, y, x, x}
		}
	}
	if err != nil {
		return nil, err
	}
	if span != nil {
		spans = append(spans, *span)
	}
	spansBytes, err := spans.MarshalBinary()
	if err != nil {
		return nil, err
	}
	encoding = append(encoding, spansBytes...)
	dvid.Debugf("[%s] coarse subvol for label %d: found %d blocks\n", ctx, label, numBlocks)
	return encoding, nil
}
Пример #11
0
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes.
// writeBlocks ingests blocks of voxel data asynchronously using batch writes.
func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		return err
	}

	preCompress, postCompress := 0, 0

	ctx := datastore.NewVersionedCtx(d, v)
	evt := datastore.SyncEvent{d.DataUUID(), IngestBlockEvent}

	<-server.HandlerToken
	go func() {
		defer func() {
			wg1.Done()
			wg2.Done()
			dvid.Debugf("Wrote voxel blocks.  Before %s: %d bytes.  After: %d bytes\n", d.Compression(), preCompress, postCompress)
			server.HandlerToken <- 1
		}()

		mutID := d.NewMutationID()
		batch := batcher.NewBatch(ctx)
		for i, block := range b {
			serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum())
			preCompress += len(block.V)
			postCompress += len(serialization)
			if err != nil {
				dvid.Errorf("Unable to serialize block: %v\n", err)
				return
			}
			batch.Put(block.K, serialization)

			indexZYX, err := DecodeTKey(block.K)
			if err != nil {
				dvid.Errorf("Unable to recover index from block key: %v\n", block.K)
				return
			}
			msg := datastore.SyncMessage{IngestBlockEvent, v, Block{indexZYX, block.V, mutID}}
			if err := datastore.NotifySubscribers(evt, msg); err != nil {
				dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName())
				return
			}

			// Check if we should commit
			if i%KVWriteSize == KVWriteSize-1 {
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on trying to write batch: %v\n", err)
					return
				}
				batch = batcher.NewBatch(ctx)
			}
		}
		if err := batch.Commit(); err != nil {
			dvid.Errorf("Error on trying to write batch: %v\n", err)
			return
		}
	}()
	return nil
}
Пример #12
0
// RegisterEngine registers an Engine for DVID use.
func RegisterEngine(e Engine) {
	dvid.Debugf("Engine %q registered with DVID server.\n", e)
	if availEngines == nil {
		availEngines = map[string]Engine{e.GetName(): e}
	} else {
		availEngines[e.GetName()] = e
	}
}
Пример #13
0
// DeleteRange removes all key-value pairs with keys in the given range.
func (db *LevelDB) DeleteRange(ctx storage.Context, kStart, kEnd storage.TKey) error {
	if ctx == nil {
		return fmt.Errorf("Received nil context in DeleteRange()")
	}

	// For leveldb, we just iterate over keys in range and delete each one using batch.
	const BATCH_SIZE = 10000
	batch := db.NewBatch(ctx).(*goBatch)

	ch := make(chan errorableKV)

	// Run the keys-only range query in a goroutine.
	go func() {
		if ctx == nil || !ctx.Versioned() {
			db.unversionedRange(ctx, kStart, kEnd, ch, true)
		} else {
			db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, true)
		}
	}()

	// Consume the key-value pairs.
	numKV := 0
	for {
		result := <-ch
		if result.KeyValue == nil {
			break
		}
		if result.error != nil {
			return result.error
		}

		// The key coming down channel is not index but full key, so no need to construct key using context.
		// If versioned, write a tombstone using current version id since we don't want to delete locked ancestors.
		// If unversioned, just delete.
		tk, err := ctx.TKeyFromKey(result.KeyValue.K)
		if err != nil {
			return err
		}
		batch.Delete(tk)

		if (numKV+1)%BATCH_SIZE == 0 {
			if err := batch.Commit(); err != nil {
				batch.Close()
				return fmt.Errorf("Error on batch DELETE at key-value pair %d: %v\n", numKV, err)
			}
			batch = db.NewBatch(ctx).(*goBatch)
		}
		numKV++
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			batch.Close()
			return fmt.Errorf("Error on last batch DELETE: %v\n", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via delete range for %s.\n", numKV, ctx)
	return nil
}
Пример #14
0
// DoRPC acts as a switchboard for RPC commands.
func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error {
	switch req.TypeCommand() {
	case "load":
		if len(req.Command) < 5 {
			return fmt.Errorf("Poorly formatted load command.  See command-line help.")
		}
		// Parse the request
		var uuidStr, dataName, cmdStr, offsetStr string
		filenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr)
		if err != nil {
			return err
		}
		if len(filenames) == 0 {
			return fmt.Errorf("Need to include at least one file to add: %s", req)
		}

		// Get offset
		offset, err := dvid.StringToPoint(offsetStr, ",")
		if err != nil {
			return fmt.Errorf("Illegal offset specification: %s: %v", offsetStr, err)
		}

		// Get list of files to add
		var addedFiles string
		if len(filenames) == 1 {
			addedFiles = filenames[0]
		} else {
			addedFiles = fmt.Sprintf("filenames: %s [%d more]", filenames[0], len(filenames)-1)
		}
		dvid.Debugf(addedFiles + "\n")

		uuid, versionID, err := datastore.MatchingUUID(uuidStr)
		if err != nil {
			return err
		}
		if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil {
			return err
		}
		if err = d.LoadImages(versionID, offset, filenames); err != nil {
			return err
		}
		if err := datastore.SaveDataByUUID(uuid, d); err != nil {
			return err
		}
		return nil

	case "composite":
		if len(req.Command) < 6 {
			return fmt.Errorf("Poorly formatted composite command.  See command-line help.")
		}
		return d.CreateComposite(req, reply)

	default:
		return fmt.Errorf("Unknown command.  Data type '%s' [%s] does not support '%s' command.",
			d.DataName(), d.TypeName(), req.TypeCommand())
	}
	return nil
}
Пример #15
0
func (p *pusher) Close() error {
	gb := float64(p.received) / 1000000000
	dvid.Debugf("Closing push of uuid %s: received %.1f GBytes in %s\n", p.repo.uuid, gb, time.Since(p.startTime))

	// Add this repo to current DVID server
	if err := manager.addRepo(p.repo); err != nil {
		return err
	}
	return nil
}
Пример #16
0
// SetCache sets the size of the LRU cache that caches frequently used
// uncompressed blocks.
func (opts *leveldbOptions) SetLRUCacheSize(nBytes int) {
	if nBytes != opts.nLRUCacheBytes {
		if opts.cache != nil {
			opts.cache.Close()
		}
		dvid.Debugf("LRU cache size set to %d bytes.\n", nBytes)
		opts.cache = levigo.NewLRUCache(nBytes)
		opts.nLRUCacheBytes = nBytes
		opts.Options.SetCache(opts.cache)
	}
}
Пример #17
0
// MergeStop marks the end of a merge operation.
func MergeStop(iv dvid.InstanceVersion, op MergeOp) {
	// Adjust the dirty counts on the involved labels.
	labelsMerging.RemoveMerge(iv, op)

	// Remove the merge from the mapping.
	mc.Remove(iv, op)

	// If the instance version's dirty cache is empty, we can delete the merge cache.
	if labelsMerging.Empty(iv) {
		dvid.Debugf("Merge cache now empty for %s\n", iv)
		mc.DeleteMap(iv)
	}
}
Пример #18
0
func init() {
	shutdownCh = make(chan struct{})

	// Set the GC closer to old Go 1.4 setting
	old := debug.SetGCPercent(defaultGCPercent)
	dvid.Debugf("DVID server GC target percentage changed from %d to %d\n", old, defaultGCPercent)

	// Initialize the number of handler tokens available.
	for i := 0; i < MaxChunkHandlers; i++ {
		HandlerToken <- 1
	}

	// Monitor the handler token load, resetting every second.
	loadCheckTimer := time.Tick(10 * time.Millisecond)
	ticks := 0
	go func() {
		for {
			<-loadCheckTimer
			ticks = (ticks + 1) % 100
			if ticks == 0 {
				ActiveHandlers = curActiveHandlers
				curActiveHandlers = 0
			}
			numHandlers := MaxChunkHandlers - len(HandlerToken)
			if numHandlers > curActiveHandlers {
				curActiveHandlers = numHandlers
			}
		}
	}()

	// Monitor the # of interactive requests over last 2 minutes.
	go func() {
		tick := time.Tick(5 * time.Second)
		for {
			select {
			case <-interactiveOpsCh:
				interactiveOps[0]++
			case <-tick:
				newCount := InteractiveOpsPer2Min - interactiveOps[23] + interactiveOps[0]
				InteractiveOpsPer2Min = newCount
				copy(interactiveOps[1:], interactiveOps[:23])
				interactiveOps[0] = 0
			}
		}
	}()
}
Пример #19
0
func (p *pusher) startData(d *DataTxInit) error {
	p.stats = new(txStats)
	p.stats.lastTime = time.Now()
	p.stats.lastBytes = 0

	p.dname = d.DataName

	// Get the store associated with this data instance.
	store, err := storage.GetAssignedStore(d.DataName, p.uuid, d.TypeName)
	if err != nil {
		return err
	}
	var ok bool
	p.store, ok = store.(storage.KeyValueDB)
	if !ok {
		return fmt.Errorf("backend store %q for data type %q of tx data %q is not KeyValueDB-compatable", p.store, d.TypeName, d.DataName)
	}
	dvid.Debugf("Push (session %d) starting transfer of data %q...\n", p.sessionID, d.DataName)
	return nil
}
Пример #20
0
// record stats on size of values
func (t *txStats) addKV(k, v []byte) {
	t.numKV++

	vBytes := len(v)
	kBytes := len(k)
	curBytes := uint64(kBytes + vBytes)
	t.lastBytes += curBytes
	t.totalBytes += curBytes

	switch {
	case vBytes == 0:
		t.numV0++
	case vBytes < 10:
		t.numV1++
	case vBytes < 100:
		t.numV10++
	case vBytes < 1000:
		t.numV100++
	case vBytes < 10000:
		t.numV1k++
	case vBytes < 100000:
		t.numV10k++
	case vBytes < 1000000:
		t.numV100k++
	case vBytes < 10000000:
		t.numV1m++
	default:
		t.numV10m++
	}

	// Print progress?
	if elapsed := time.Since(t.lastTime); elapsed > time.Minute {
		mb := float64(t.lastBytes) / 1000000
		sec := elapsed.Seconds()
		throughput := mb / sec
		dvid.Debugf("Transfer throughput: %5.2f MB/s (%s in %4.1f seconds).  Total %s\n", throughput, humanize.Bytes(t.lastBytes), sec, humanize.Bytes(t.totalBytes))

		t.lastTime = time.Now()
		t.lastBytes = 0
	}
}
Пример #21
0
// NewFilter returns a Filter for use with a push of key-value pairs.
func (d *Data) NewFilter(fs storage.FilterSpec) (storage.Filter, error) {
	filter := &Filter{Data: d, fs: fs}

	// Get associated labelblk.  If none, we can't use roi filter so just do standard data send.
	lblk, err := d.GetSyncedLabelblk()
	if err != nil {
		dvid.Infof("Unable to get synced labelblk for labelvol %q.  Unable to do any ROI-based filtering.\n", d.DataName())
		return nil, nil
	}

	roiIterator, _, found, err := roi.NewIteratorBySpec(fs, lblk)
	if err != nil {
		return nil, err
	}
	if !found || roiIterator == nil {
		dvid.Debugf("No ROI found so using generic data push for data %q.\n", d.DataName())
		return nil, nil
	}
	filter.it = roiIterator

	return filter, nil
}
Пример #22
0
// SplitCoarseLabels splits a portion of a label's voxels into a given split label or, if the given split
// label is 0, a new label, which is returned.  The input is a binary sparse volume defined by block
// coordinates and should be the smaller portion of a labeled region-to-be-split.
//
// EVENTS
//
// labels.SplitStartEvent occurs at very start of split and transmits labels.DeltaSplitStart struct.
//
// labels.SplitBlockEvent occurs for every block of a split label and transmits labels.DeltaSplit struct.
//
// labels.SplitEndEvent occurs at end of split and transmits labels.DeltaSplitEnd struct.
//
func (d *Data) SplitCoarseLabels(v dvid.VersionID, fromLabel, splitLabel uint64, r io.ReadCloser) (toLabel uint64, err error) {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		err = fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		err = fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	// Create a new label id for this version that will persist to store
	if splitLabel != 0 {
		toLabel = splitLabel
		dvid.Debugf("Splitting coarse subset of label %d into given label %d ...\n", fromLabel, splitLabel)
	} else {
		toLabel, err = d.NewLabel(v)
		if err != nil {
			return
		}
		dvid.Debugf("Splitting coarse subset of label %d into new label %d ...\n", fromLabel, toLabel)
	}

	evt := datastore.SyncEvent{d.DataUUID(), labels.SplitStartEvent}
	splitOpStart := labels.DeltaSplitStart{fromLabel, toLabel}
	splitOpEnd := labels.DeltaSplitEnd{fromLabel, toLabel}

	// Make sure we can split given current merges in progress
	if err := labels.SplitStart(d.getMergeIV(v), splitOpStart); err != nil {
		return toLabel, err
	}
	defer labels.SplitStop(d.getMergeIV(v), splitOpEnd)

	// Signal that we are starting a split.
	msg := datastore.SyncMessage{labels.SplitStartEvent, v, splitOpStart}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Read the sparse volume from reader.
	var splits dvid.RLEs
	splits, err = dvid.ReadRLEs(r)
	if err != nil {
		return
	}
	numBlocks, _ := splits.Stats()

	// Order the split blocks
	splitblks := make(dvid.IZYXSlice, numBlocks)
	n := 0
	for _, rle := range splits {
		p := rle.StartPt()
		run := rle.Length()
		for i := int32(0); i < run; i++ {
			izyx := dvid.IndexZYX{p[0] + i, p[1], p[2]}
			splitblks[n] = izyx.ToIZYXString()
			n++
		}
	}
	sort.Sort(splitblks)

	// Iterate through the split blocks, read the original block and change labels.
	// TODO: Modifications should be transactional since it's GET-PUT, therefore use
	// hash on block coord to direct it to block-specific goroutine; we serialize
	// requests to handle concurrency.
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	var toLabelSize uint64
	for _, splitblk := range splitblks {
		// Get original block
		tk := NewTKey(fromLabel, splitblk)
		val, err := store.Get(ctx, tk)
		if err != nil {
			return toLabel, err
		}
		if val == nil {
			return toLabel, fmt.Errorf("Split block %s is not part of original label %d", splitblk, fromLabel)
		}
		var rles dvid.RLEs
		if err := rles.UnmarshalBinary(val); err != nil {
			return toLabel, fmt.Errorf("Unable to unmarshal RLE for original labels in block %s", splitblk)
		}
		numVoxels, _ := rles.Stats()
		toLabelSize += numVoxels

		// Delete the old block and save the sparse volume but under a new label.
		batch.Delete(tk)
		tk2 := NewTKey(toLabel, splitblk)
		batch.Put(tk2, val)
	}

	if err := batch.Commit(); err != nil {
		dvid.Errorf("Batch PUT during split of %q label %d: %v\n", d.DataName(), fromLabel, err)
	}

	// Publish split event
	evt = datastore.SyncEvent{d.DataUUID(), labels.SplitLabelEvent}
	msg = datastore.SyncMessage{labels.SplitLabelEvent, v, labels.DeltaSplit{fromLabel, toLabel, nil, splitblks}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Publish change in label sizes.
	delta := labels.DeltaNewSize{
		Label: toLabel,
		Size:  toLabelSize,
	}
	evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	delta2 := labels.DeltaModSize{
		Label:      fromLabel,
		SizeChange: int64(-toLabelSize),
	}
	evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, delta2}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Publish split end
	evt = datastore.SyncEvent{d.DataUUID(), labels.SplitEndEvent}
	msg = datastore.SyncMessage{labels.SplitEndEvent, v, splitOpEnd}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}
	dvid.Infof("Split %d voxels from label %d to label %d\n", toLabelSize, fromLabel, toLabel)

	return toLabel, nil
}
Пример #23
0
func (d *Data) asyncMergeLabels(v dvid.VersionID, m labels.MergeOp) {
	// Get storage objects
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	// All blocks that have changed during this merge.  Key = string of block index
	blocksChanged := make(map[dvid.IZYXString]struct{})

	// Get the block-level RLEs for the toLabel
	toLabel := m.Target
	toLabelRLEs, err := d.GetLabelRLEs(v, toLabel)
	if err != nil {
		dvid.Criticalf("Can't get block-level RLEs for label %d: %v", toLabel, err)
		return
	}
	toLabelSize := toLabelRLEs.NumVoxels()

	// Iterate through all labels to be merged.
	var addedVoxels uint64
	for fromLabel := range m.Merged {
		dvid.Debugf("Merging label %d to label %d...\n", fromLabel, toLabel)

		fromLabelRLEs, err := d.GetLabelRLEs(v, fromLabel)
		if err != nil {
			dvid.Errorf("Can't get block-level RLEs for label %d: %v", fromLabel, err)
			return
		}
		fromLabelSize := fromLabelRLEs.NumVoxels()
		if fromLabelSize == 0 || len(fromLabelRLEs) == 0 {
			dvid.Debugf("Label %d is empty.  Skipping.\n", fromLabel)
			continue
		}
		addedVoxels += fromLabelSize

		// Notify linked labelsz instances
		delta := labels.DeltaDeleteSize{
			Label:    fromLabel,
			OldSize:  fromLabelSize,
			OldKnown: true,
		}
		evt := datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
		msg := datastore.SyncMessage{labels.ChangeSizeEvent, v, delta}
		if err := datastore.NotifySubscribers(evt, msg); err != nil {
			dvid.Criticalf("can't notify subscribers for event %v: %v\n", evt, err)
		}

		// Append or insert RLE runs from fromLabel blocks into toLabel blocks.
		for blockStr, fromRLEs := range fromLabelRLEs {
			// Mark the fromLabel blocks as modified
			blocksChanged[blockStr] = struct{}{}

			// Get the toLabel RLEs for this block and add the fromLabel RLEs
			toRLEs, found := toLabelRLEs[blockStr]
			if found {
				toRLEs.Add(fromRLEs)
			} else {
				toRLEs = fromRLEs
			}
			toLabelRLEs[blockStr] = toRLEs
		}

		// Delete all fromLabel RLEs since they are all integrated into toLabel RLEs
		minTKey := NewTKey(fromLabel, dvid.MinIndexZYX.ToIZYXString())
		maxTKey := NewTKey(fromLabel, dvid.MaxIndexZYX.ToIZYXString())
		ctx := datastore.NewVersionedCtx(d, v)
		if err := store.DeleteRange(ctx, minTKey, maxTKey); err != nil {
			dvid.Criticalf("Can't delete label %d RLEs: %v", fromLabel, err)
		}
	}

	if len(blocksChanged) == 0 {
		dvid.Debugf("No changes needed when merging %s into %d.  Aborting.\n", m.Merged, m.Target)
		return
	}

	// Publish block-level merge
	evt := datastore.SyncEvent{d.DataUUID(), labels.MergeBlockEvent}
	msg := datastore.SyncMessage{labels.MergeBlockEvent, v, labels.DeltaMerge{m, blocksChanged}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
	}

	// Update datastore with all toLabel RLEs that were changed
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)
	for blockStr := range blocksChanged {
		tk := NewTKey(toLabel, blockStr)
		serialization, err := toLabelRLEs[blockStr].MarshalBinary()
		if err != nil {
			dvid.Errorf("Error serializing RLEs for label %d: %v\n", toLabel, err)
		}
		batch.Put(tk, serialization)
	}
	if err := batch.Commit(); err != nil {
		dvid.Errorf("Error on updating RLEs for label %d: %v\n", toLabel, err)
	}
	delta := labels.DeltaReplaceSize{
		Label:   toLabel,
		OldSize: toLabelSize,
		NewSize: toLabelSize + addedVoxels,
	}
	evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, delta}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
	}

	evt = datastore.SyncEvent{d.DataUUID(), labels.MergeEndEvent}
	msg = datastore.SyncMessage{labels.MergeEndEvent, v, labels.DeltaMergeEnd{m}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		dvid.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
	}
}
Пример #24
0
// Partition returns JSON of differently sized subvolumes that attempt to distribute
// the number of active blocks per subvolume.
func (d *Data) Partition(ctx storage.Context, batchsize int32) ([]byte, error) {
	// Partition Z as perfectly as we can.
	dz := d.MaxZ - d.MinZ + 1
	zleft := dz % batchsize

	// Adjust Z range
	layerBegZ := d.MinZ
	layerEndZ := layerBegZ + batchsize - 1

	// Iterate through blocks in ascending Z, calculating active extents and subvolume coverage.
	// Keep track of current layer = batchsize of blocks in Z.
	var subvolumes subvolumesT
	subvolumes.Subvolumes = []subvolumeT{}
	subvolumes.ROI.MinChunk[2] = d.MinZ
	subvolumes.ROI.MaxChunk[2] = d.MaxZ

	layer := d.newLayer(layerBegZ, layerEndZ)

	db, err := storage.SmallDataStore()
	if err != nil {
		return nil, err
	}
	merge := true
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		ibytes, err := chunk.K.ClassBytes(keyROI)
		if err != nil {
			return err
		}
		index := new(indexRLE)
		if err = index.IndexFromBytes(ibytes); err != nil {
			return fmt.Errorf("Unable to get indexRLE out of []byte encoding: %v\n", err)
		}

		// If we are in new layer, process last one.
		z := index.start.Value(2)
		if z > layerEndZ {
			// Process last layer
			dvid.Debugf("Computing subvolumes in layer with Z %d -> %d (dz %d)\n",
				layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
			d.addSubvolumes(layer, &subvolumes, batchsize, merge)

			// Init variables for next layer
			layerBegZ = layerEndZ + 1
			layerEndZ += batchsize
			if zleft > 0 {
				layerEndZ++
				zleft--
			}
			layer = d.newLayer(layerBegZ, layerEndZ)
		}

		// Check this block against current layer extents
		layer.extend(index)
		return nil
	}
	mintk := storage.MinTKey(keyROI)
	maxtk := storage.MaxTKey(keyROI)
	err = db.ProcessRange(ctx, mintk, maxtk, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}

	// Process last incomplete layer if there is one.
	if len(layer.activeBlocks) > 0 {
		dvid.Debugf("Computing subvolumes for final layer Z %d -> %d (dz %d)\n",
			layer.minZ, layer.maxZ, layer.maxZ-layer.minZ+1)
		d.addSubvolumes(layer, &subvolumes, batchsize, merge)
	}
	subvolumes.NumSubvolumes = int32(len(subvolumes.Subvolumes))

	// Encode as JSON
	jsonBytes, err := json.MarshalIndent(subvolumes, "", "    ")
	if err != nil {
		return nil, err
	}
	return jsonBytes, err
}
Пример #25
0
// SplitLabels splits a portion of a label's voxels into a given split label or, if the given split
// label is 0, a new label, which is returned.  The input is a binary sparse volume and should
// preferably be the smaller portion of a labeled region.  In other words, the caller should chose
// to submit for relabeling the smaller portion of any split.  It is assumed that the given split
// voxels are within the fromLabel set of voxels and will generate unspecified behavior if this is
// not the case.
//
// EVENTS
//
// labels.SplitStartEvent occurs at very start of split and transmits labels.DeltaSplitStart struct.
//
// labels.SplitBlockEvent occurs for every block of a split label and transmits labels.DeltaSplit struct.
//
// labels.SplitEndEvent occurs at end of split and transmits labels.DeltaSplitEnd struct.
//
func (d *Data) SplitLabels(v dvid.VersionID, fromLabel, splitLabel uint64, r io.ReadCloser) (toLabel uint64, err error) {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		err = fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		err = fmt.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	// Create a new label id for this version that will persist to store
	if splitLabel != 0 {
		toLabel = splitLabel
		dvid.Debugf("Splitting subset of label %d into given label %d ...\n", fromLabel, splitLabel)
	} else {
		toLabel, err = d.NewLabel(v)
		if err != nil {
			return
		}
		dvid.Debugf("Splitting subset of label %d into new label %d ...\n", fromLabel, toLabel)
	}

	evt := datastore.SyncEvent{d.DataUUID(), labels.SplitStartEvent}
	splitOpStart := labels.DeltaSplitStart{fromLabel, toLabel}
	splitOpEnd := labels.DeltaSplitEnd{fromLabel, toLabel}

	// Make sure we can split given current merges in progress
	if err := labels.SplitStart(d.getMergeIV(v), splitOpStart); err != nil {
		return toLabel, err
	}
	defer labels.SplitStop(d.getMergeIV(v), splitOpEnd)

	// Signal that we are starting a split.
	msg := datastore.SyncMessage{labels.SplitStartEvent, v, splitOpStart}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		return 0, err
	}

	// Read the sparse volume from reader.
	var split dvid.RLEs
	split, err = dvid.ReadRLEs(r)
	if err != nil {
		return
	}
	toLabelSize, _ := split.Stats()

	// Partition the split spans into blocks.
	var splitmap dvid.BlockRLEs
	splitmap, err = split.Partition(d.BlockSize)
	if err != nil {
		return
	}

	// Get a sorted list of blocks that cover split.
	splitblks := splitmap.SortedKeys()

	// Iterate through the split blocks, read the original block.  If the RLEs
	// are identical, just delete the original.  If not, modify the original.
	// TODO: Modifications should be transactional since it's GET-PUT, therefore use
	// hash on block coord to direct it to blockLabel, splitLabel-specific goroutine; we serialize
	// requests to handle concurrency.
	ctx := datastore.NewVersionedCtx(d, v)
	batch := batcher.NewBatch(ctx)

	for _, splitblk := range splitblks {

		// Get original block
		tk := NewTKey(fromLabel, splitblk)
		val, err := store.Get(ctx, tk)
		if err != nil {
			return toLabel, err
		}

		if val == nil {
			return toLabel, fmt.Errorf("Split RLEs at block %s are not part of original label %d", splitblk, fromLabel)
		}
		var rles dvid.RLEs
		if err := rles.UnmarshalBinary(val); err != nil {
			return toLabel, fmt.Errorf("Unable to unmarshal RLE for original labels in block %s", splitblk)
		}

		// Compare and process based on modifications required.
		remain, err := rles.Split(splitmap[splitblk])
		if err != nil {
			return toLabel, err
		}
		if len(remain) == 0 {
			batch.Delete(tk)
		} else {
			rleBytes, err := remain.MarshalBinary()
			if err != nil {
				return toLabel, fmt.Errorf("can't serialize remain RLEs for split of %d: %v\n", fromLabel, err)
			}
			batch.Put(tk, rleBytes)
		}
	}

	if err = batch.Commit(); err != nil {
		err = fmt.Errorf("Batch PUT during split of %q label %d: %v\n", d.DataName(), fromLabel, err)
		return
	}

	// Publish split event
	evt = datastore.SyncEvent{d.DataUUID(), labels.SplitLabelEvent}
	msg = datastore.SyncMessage{labels.SplitLabelEvent, v, labels.DeltaSplit{fromLabel, toLabel, splitmap, splitblks}}
	if err = datastore.NotifySubscribers(evt, msg); err != nil {
		return
	}

	// Write the split sparse vol.
	if err = d.writeLabelVol(v, toLabel, splitmap, splitblks); err != nil {
		return
	}

	// Publish change in label sizes.
	delta := labels.DeltaNewSize{
		Label: toLabel,
		Size:  toLabelSize,
	}
	evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, delta}
	if err = datastore.NotifySubscribers(evt, msg); err != nil {
		return
	}

	delta2 := labels.DeltaModSize{
		Label:      fromLabel,
		SizeChange: int64(-toLabelSize),
	}
	evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
	msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, delta2}
	if err = datastore.NotifySubscribers(evt, msg); err != nil {
		return
	}

	// Publish split end
	evt = datastore.SyncEvent{d.DataUUID(), labels.SplitEndEvent}
	msg = datastore.SyncMessage{labels.SplitEndEvent, v, splitOpEnd}
	if err = datastore.NotifySubscribers(evt, msg); err != nil {
		return
	}

	return toLabel, nil
}
Пример #26
0
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes.
// writeBlocks persists blocks of voxel data asynchronously using batch writes.
func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error {
	store, err := storage.BigDataStore()
	if err != nil {
		return fmt.Errorf("Data type imageblk had error initializing store: %v\n", err)
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		return fmt.Errorf("Data type imageblk requires batch-enabled store, which %q is not\n", store)
	}

	preCompress, postCompress := 0, 0

	ctx := datastore.NewVersionedCtx(d, v)
	evt := datastore.SyncEvent{d.DataName(), ChangeBlockEvent}

	<-server.HandlerToken
	go func() {
		defer func() {
			wg1.Done()
			wg2.Done()
			dvid.Debugf("Wrote voxel blocks.  Before %s: %d bytes.  After: %d bytes\n", d.Compression(), preCompress, postCompress)
			server.HandlerToken <- 1
		}()

		batch := batcher.NewBatch(ctx)
		for i, block := range b {
			serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum())
			preCompress += len(block.V)
			postCompress += len(serialization)
			if err != nil {
				dvid.Errorf("Unable to serialize block: %v\n", err)
				return
			}
			batch.Put(block.K, serialization)

			indexZYX, err := DecodeTKey(block.K)
			if err != nil {
				dvid.Errorf("Unable to recover index from block key: %v\n", block.K)
				return
			}
			msg := datastore.SyncMessage{v, Block{indexZYX, block.V}}
			if err := datastore.NotifySubscribers(evt, msg); err != nil {
				dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName())
				return
			}

			// Check if we should commit
			if i%KVWriteSize == KVWriteSize-1 {
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on trying to write batch: %v\n", err)
					return
				}
				batch = batcher.NewBatch(ctx)
			}
		}
		if err := batch.Commit(); err != nil {
			dvid.Errorf("Error on trying to write batch: %v\n", err)
			return
		}
	}()
	return nil
}
Пример #27
0
// Optimized bulk loading of XY images by loading all slices for a block before processing.
// Trades off memory for speed.
func (d *Data) loadXYImages(load *bulkLoadInfo) error {
	// Load first slice, get dimensions, allocate blocks for whole slice.
	// Note: We don't need to lock the block slices because goroutines do NOT
	// access the same elements of a slice.
	const numLayers = 2
	var numBlocks int
	var blocks [numLayers]storage.TKeyValues
	var layerTransferred, layerWritten [numLayers]sync.WaitGroup
	var waitForWrites sync.WaitGroup

	curBlocks := 0
	blockSize := d.BlockSize()
	blockBytes := blockSize.Prod() * int64(d.Values.BytesPerElement())

	// Iterate through XY slices batched into the Z length of blocks.
	fileNum := 1
	for _, filename := range load.filenames {
		server.BlockOnInteractiveRequests("imageblk.loadXYImages")

		timedLog := dvid.NewTimeLog()

		zInBlock := load.offset.Value(2) % blockSize.Value(2)
		firstSlice := fileNum == 1
		lastSlice := fileNum == len(load.filenames)
		firstSliceInBlock := firstSlice || zInBlock == 0
		lastSliceInBlock := lastSlice || zInBlock == blockSize.Value(2)-1
		lastBlocks := fileNum+int(blockSize.Value(2)) > len(load.filenames)

		// Load images synchronously
		vox, err := d.loadXYImage(filename, load.offset)
		if err != nil {
			return err
		}

		// Allocate blocks and/or load old block data if first/last XY blocks.
		// Note: Slices are only zeroed out on first and last slice with assumption
		// that ExtData is packed in XY footprint (values cover full extent).
		// If that is NOT the case, we need to zero out blocks for each block layer.
		if fileNum == 1 || (lastBlocks && firstSliceInBlock) {
			numBlocks = dvid.GetNumBlocks(vox, blockSize)
			if fileNum == 1 {
				for layer := 0; layer < numLayers; layer++ {
					blocks[layer] = make(storage.TKeyValues, numBlocks, numBlocks)
					for b := 0; b < numBlocks; b++ {
						blocks[layer][b].V = d.BackgroundBlock()
					}
				}
				var bufSize uint64 = uint64(blockBytes) * uint64(numBlocks) * uint64(numLayers) / 1000000
				dvid.Debugf("Allocated %d MB for buffers.\n", bufSize)
			} else {
				blocks[curBlocks] = make(storage.TKeyValues, numBlocks, numBlocks)
				for b := 0; b < numBlocks; b++ {
					blocks[curBlocks][b].V = d.BackgroundBlock()
				}
			}
			err = d.loadOldBlocks(load.versionID, vox, blocks[curBlocks])
			if err != nil {
				return err
			}
		}

		// Transfer data between external<->internal blocks asynchronously
		layerTransferred[curBlocks].Add(1)
		go func(vox *Voxels, curBlocks int) {
			// Track point extents
			if d.Extents().AdjustPoints(vox.StartPoint(), vox.EndPoint()) {
				load.extentChanged.SetTrue()
			}

			// Process an XY image (slice).
			changed, err := d.writeXYImage(load.versionID, vox, blocks[curBlocks])
			if err != nil {
				dvid.Infof("Error writing XY image: %v\n", err)
			}
			if changed {
				load.extentChanged.SetTrue()
			}
			layerTransferred[curBlocks].Done()
		}(vox, curBlocks)

		// If this is the end of a block (or filenames), wait until all goroutines complete,
		// then asynchronously write blocks.
		if lastSliceInBlock {
			waitForWrites.Add(1)
			layerWritten[curBlocks].Add(1)
			go func(curBlocks int) {
				layerTransferred[curBlocks].Wait()
				dvid.Debugf("Writing block buffer %d using %s and %s...\n",
					curBlocks, d.Compression(), d.Checksum())
				err := d.writeBlocks(load.versionID, blocks[curBlocks], &layerWritten[curBlocks], &waitForWrites)
				if err != nil {
					dvid.Errorf("Error in async write of voxel blocks: %v", err)
				}
			}(curBlocks)
			// We can't move to buffer X until all blocks from buffer X have already been written.
			curBlocks = (curBlocks + 1) % numLayers
			dvid.Debugf("Waiting for layer %d to be written before reusing layer %d blocks\n",
				curBlocks, curBlocks)
			layerWritten[curBlocks].Wait()
			dvid.Debugf("Using layer %d...\n", curBlocks)
		}

		fileNum++
		load.offset = load.offset.Add(dvid.Point3d{0, 0, 1})
		timedLog.Infof("Loaded %s slice %s", d.DataName(), vox)
	}
	waitForWrites.Wait()
	return nil
}
Пример #28
0
func (db *LevelDB) deleteSingleVersion(vctx storage.VersionedCtx) error {
	dvid.StartCgo()

	minTKey := storage.MinTKey(storage.TKeyMinClass)
	maxTKey := storage.MaxTKey(storage.TKeyMaxClass)
	minKey, err := vctx.MinVersionKey(minTKey)
	if err != nil {
		return err
	}
	maxKey, err := vctx.MaxVersionKey(maxTKey)
	if err != nil {
		return err
	}

	const BATCH_SIZE = 10000
	batch := db.NewBatch(vctx).(*goBatch)

	ro := levigo.NewReadOptions()
	it := db.ldb.NewIterator(ro)
	defer func() {
		it.Close()
		dvid.StopCgo()
	}()

	numKV := 0
	it.Seek(minKey)
	deleteVersion := vctx.VersionID()
	for {
		if err := it.GetError(); err != nil {
			return fmt.Errorf("Error iterating during DeleteAll for %s: %v", vctx, err)
		}
		if it.Valid() {
			itKey := it.Key()
			storage.StoreKeyBytesRead <- len(itKey)
			// Did we pass the final key?
			if bytes.Compare(itKey, maxKey) > 0 {
				break
			}
			_, v, _, err := storage.DataKeyToLocalIDs(itKey)
			if err != nil {
				return fmt.Errorf("Error on DELETE ALL for version %d: %v", vctx.VersionID(), err)
			}
			if v == deleteVersion {
				batch.WriteBatch.Delete(itKey)
				if (numKV+1)%BATCH_SIZE == 0 {
					if err := batch.Commit(); err != nil {
						dvid.Criticalf("Error on batch commit of DeleteAll at key-value pair %d: %v\n", numKV, err)
						return fmt.Errorf("Error on batch commit of DeleteAll at key-value pair %d: %v", numKV, err)
					}
					batch = db.NewBatch(vctx).(*goBatch)
					dvid.Debugf("Deleted %d key-value pairs in ongoing DELETE ALL for %s.\n", numKV+1, vctx)
				}
				numKV++
			}

			it.Next()
		} else {
			break
		}
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			dvid.Criticalf("Error on last batch commit of DeleteAll: %v\n", err)
			return fmt.Errorf("Error on last batch commit of DeleteAll: %v", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via DELETE ALL for %s.\n", numKV, vctx)
	return nil
}
Пример #29
0
func (db *LevelDB) deleteAllVersions(ctx storage.Context) error {
	dvid.StartCgo()

	var err error
	var minKey, maxKey storage.Key

	vctx, versioned := ctx.(storage.VersionedCtx)
	if versioned {
		// Don't have to worry about tombstones.  Delete all keys from all versions for this instance id.
		minTKey := storage.MinTKey(storage.TKeyMinClass)
		maxTKey := storage.MaxTKey(storage.TKeyMaxClass)
		minKey, err = vctx.MinVersionKey(minTKey)
		if err != nil {
			return err
		}
		maxKey, err = vctx.MaxVersionKey(maxTKey)
		if err != nil {
			return err
		}
	} else {
		minKey, maxKey = ctx.KeyRange()
	}

	const BATCH_SIZE = 10000
	batch := db.NewBatch(ctx).(*goBatch)

	ro := levigo.NewReadOptions()
	it := db.ldb.NewIterator(ro)
	defer func() {
		it.Close()
		dvid.StopCgo()
	}()

	numKV := 0
	it.Seek(minKey)
	for {
		if err := it.GetError(); err != nil {
			return fmt.Errorf("Error iterating during DeleteAll for %s: %v", ctx, err)
		}
		if it.Valid() {
			itKey := it.Key()
			storage.StoreKeyBytesRead <- len(itKey)
			// Did we pass the final key?
			if bytes.Compare(itKey, maxKey) > 0 {
				break
			}

			batch.WriteBatch.Delete(itKey)
			if (numKV+1)%BATCH_SIZE == 0 {
				if err := batch.Commit(); err != nil {
					dvid.Criticalf("Error on batch commit of DeleteAll at key-value pair %d: %v\n", numKV, err)
					return fmt.Errorf("Error on batch commit of DeleteAll at key-value pair %d: %v", numKV, err)
				}
				batch = db.NewBatch(ctx).(*goBatch)
				dvid.Debugf("Deleted %d key-value pairs in ongoing DELETE ALL for %s.\n", numKV+1, ctx)
			}
			numKV++
			it.Next()
		} else {
			break
		}
	}
	if numKV%BATCH_SIZE != 0 {
		if err := batch.Commit(); err != nil {
			dvid.Criticalf("Error on last batch commit of DeleteAll: %v\n", err)
			return fmt.Errorf("Error on last batch commit of DeleteAll: %v", err)
		}
	}
	dvid.Debugf("Deleted %d key-value pairs via DELETE ALL for %s.\n", numKV, ctx)
	return nil
}
Пример #30
0
func (d *Data) ConstructTiles(uuidStr string, tileSpec TileSpec, request datastore.Request) error {
	config := request.Settings()
	uuid, versionID, err := datastore.MatchingUUID(uuidStr)
	if err != nil {
		return err
	}
	if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil {
		return err
	}

	source, err := datastore.GetDataByUUID(uuid, d.Source)
	if err != nil {
		return err
	}
	src, ok := source.(*imageblk.Data)
	if !ok {
		return fmt.Errorf("Cannot construct imagetile for non-voxels data: %s", d.Source)
	}

	// Save the current tile specification
	d.Levels = tileSpec
	if err := datastore.SaveDataByUUID(uuid, d); err != nil {
		return err
	}

	// Get size of tile at lowest resolution.
	lastLevel := Scaling(len(tileSpec) - 1)
	loresSpec, found := tileSpec[lastLevel]
	if !found {
		return fmt.Errorf("Illegal tile spec.  Should have levels 0 to absent %d.", lastLevel)
	}
	var loresSize [3]float64
	for i := 0; i < 3; i++ {
		loresSize[i] = float64(loresSpec.Resolution[i]) * float64(DefaultTileSize[i])
	}
	loresMag := dvid.Point3d{1, 1, 1}
	for i := Scaling(0); i < lastLevel; i++ {
		levelMag := tileSpec[i].levelMag
		loresMag[0] *= levelMag[0]
		loresMag[1] *= levelMag[1]
		loresMag[2] *= levelMag[2]
	}

	// Get min and max points in terms of distance.
	var minPtDist, maxPtDist [3]float64
	for i := uint8(0); i < 3; i++ {
		minPtDist[i] = float64(src.MinPoint.Value(i)) * float64(src.VoxelSize[i])
		maxPtDist[i] = float64(src.MaxPoint.Value(i)) * float64(src.VoxelSize[i])
	}

	// Adjust min and max points for the tileable surface at lowest resolution.
	var minTiledPt, maxTiledPt dvid.Point3d
	for i := 0; i < 3; i++ {
		minInt, _ := math.Modf(minPtDist[i] / loresSize[i])
		maxInt, _ := math.Modf(maxPtDist[i] / loresSize[i])
		minTileCoord := int32(minInt)
		maxTileCoord := int32(maxInt)
		minTiledPt[i] = minTileCoord * DefaultTileSize[i] * loresMag[i]
		maxTiledPt[i] = (maxTileCoord+1)*DefaultTileSize[i]*loresMag[i] - 1
	}
	sizeVolume := maxTiledPt.Sub(minTiledPt).AddScalar(1)

	// Setup swappable ExtData buffers (the stitched slices) so we can be generating tiles
	// at same time we are reading and stitching them.
	var bufferLock [2]sync.Mutex
	var sliceBuffers [2]*imageblk.Voxels
	var bufferNum int

	// Get the planes we should tile.
	planes, err := config.GetShapes("planes", ";")
	if planes == nil {
		// If no planes are specified, construct imagetile for 3 orthogonal planes.
		planes = []dvid.DataShape{dvid.XY, dvid.XZ, dvid.YZ}
	}

	outF, err := d.putTileFunc(versionID)

	// sort the tile spec keys to iterate from highest to lowest resolution
	var sortedKeys []int
	for scaling, _ := range tileSpec {
		sortedKeys = append(sortedKeys, int(scaling))
	}
	sort.Ints(sortedKeys)

	for _, plane := range planes {
		timedLog := dvid.NewTimeLog()
		offset := minTiledPt.Duplicate()

		switch {

		case plane.Equals(dvid.XY):
			width, height, err := plane.GetSize2D(sizeVolume)
			if err != nil {
				return err
			}
			dvid.Debugf("Tiling XY image %d x %d pixels\n", width, height)
			for z := src.MinPoint.Value(2); z <= src.MaxPoint.Value(2); z++ {
				server.BlockOnInteractiveRequests("imagetile.ConstructTiles [xy]")

				sliceLog := dvid.NewTimeLog()
				offset = offset.Modify(map[uint8]int32{2: z})
				slice, err := dvid.NewOrthogSlice(dvid.XY, offset, dvid.Point2d{width, height})
				if err != nil {
					return err
				}
				bufferLock[bufferNum].Lock()
				sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil)
				if err != nil {
					return err
				}
				if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], nil); err != nil {
					return err
				}
				// Iterate through the different scales, extracting tiles at each resolution.
				go func(bufferNum int, offset dvid.Point) {
					defer bufferLock[bufferNum].Unlock()
					timedLog := dvid.NewTimeLog()
					for _, key := range sortedKeys {
						scaling := Scaling(key)
						levelSpec := tileSpec[scaling]
						if err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if int(scaling) < len(tileSpec)-1 {
							if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil {
								dvid.Errorf("Error in tiling: %v\n", err)
								return
							}
						}
					}
					timedLog.Debugf("Tiled XY Tile using buffer %d", bufferNum)
				}(bufferNum, offset)

				sliceLog.Infof("Read XY Tile @ Z = %d, now tiling...", z)
				bufferNum = (bufferNum + 1) % 2
			}
			timedLog.Infof("Total time to generate XY Tiles")

		case plane.Equals(dvid.XZ):
			width, height, err := plane.GetSize2D(sizeVolume)
			if err != nil {
				return err
			}
			dvid.Debugf("Tiling XZ image %d x %d pixels\n", width, height)
			for y := src.MinPoint.Value(1); y <= src.MaxPoint.Value(1); y++ {
				server.BlockOnInteractiveRequests("imagetile.ConstructTiles [xz]")

				sliceLog := dvid.NewTimeLog()
				offset = offset.Modify(map[uint8]int32{1: y})
				slice, err := dvid.NewOrthogSlice(dvid.XZ, offset, dvid.Point2d{width, height})
				if err != nil {
					return err
				}
				bufferLock[bufferNum].Lock()
				sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil)
				if err != nil {
					return err
				}
				if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], nil); err != nil {
					return err
				}
				// Iterate through the different scales, extracting tiles at each resolution.
				go func(bufferNum int, offset dvid.Point) {
					defer bufferLock[bufferNum].Unlock()
					timedLog := dvid.NewTimeLog()
					for _, key := range sortedKeys {
						scaling := Scaling(key)
						levelSpec := tileSpec[scaling]
						if err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if int(scaling) < len(tileSpec)-1 {
							if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil {
								dvid.Errorf("Error in tiling: %v\n", err)
								return
							}
						}
					}
					timedLog.Debugf("Tiled XZ Tile using buffer %d", bufferNum)
				}(bufferNum, offset)

				sliceLog.Infof("Read XZ Tile @ Y = %d, now tiling...", y)
				bufferNum = (bufferNum + 1) % 2
			}
			timedLog.Infof("Total time to generate XZ Tiles")

		case plane.Equals(dvid.YZ):
			width, height, err := plane.GetSize2D(sizeVolume)
			if err != nil {
				return err
			}
			dvid.Debugf("Tiling YZ image %d x %d pixels\n", width, height)
			for x := src.MinPoint.Value(0); x <= src.MaxPoint.Value(0); x++ {
				server.BlockOnInteractiveRequests("imagetile.ConstructTiles [yz]")

				sliceLog := dvid.NewTimeLog()
				offset = offset.Modify(map[uint8]int32{0: x})
				slice, err := dvid.NewOrthogSlice(dvid.YZ, offset, dvid.Point2d{width, height})
				if err != nil {
					return err
				}
				bufferLock[bufferNum].Lock()
				sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil)
				if err != nil {
					return err
				}
				if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], nil); err != nil {
					return err
				}
				// Iterate through the different scales, extracting tiles at each resolution.
				go func(bufferNum int, offset dvid.Point) {
					defer bufferLock[bufferNum].Unlock()
					timedLog := dvid.NewTimeLog()
					for _, key := range sortedKeys {
						scaling := Scaling(key)
						levelSpec := tileSpec[scaling]
						outF, err := d.putTileFunc(versionID)
						if err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if int(scaling) < len(tileSpec)-1 {
							if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil {
								dvid.Errorf("Error in tiling: %v\n", err)
								return
							}
						}
					}
					timedLog.Debugf("Tiled YZ Tile using buffer %d", bufferNum)
				}(bufferNum, offset)

				sliceLog.Debugf("Read YZ Tile @ X = %d, now tiling...", x)
				bufferNum = (bufferNum + 1) % 2
			}
			timedLog.Infof("Total time to generate YZ Tiles")

		default:
			dvid.Infof("Skipping request to tile '%s'.  Unsupported.", plane)
		}
	}
	return nil
}