Esempio n. 1
0
func getValue(r api.Row, verKey []byte) ([]byte, error) {

	for _, readItem := range r[familyName] {

		//readItem.Column contains the family prefix as part of the key,
		//We have to trim the prefix and decode to bytes.
		//If we try to compare this by encoding the verKey it wouldn't work
		itemVer, err := decodeKey(readItem.Column)
		if err != nil {
			return nil, fmt.Errorf("Error in getValue(): %s\n", err)
		}

		if bytes.Equal(itemVer, verKey) {
			return readItem.Value, nil
		}
	}

	//Debug
	dvid.Errorf("Row available %s", r[familyName])
	for _, readItem := range r[familyName] {

		itemVer, _ := decodeKey(readItem.Column)
		dvid.Errorf("Versions available %s", itemVer)

	}

	return nil, fmt.Errorf("Failed to find version %s in Row", verKey)
}
Esempio n. 2
0
// MigrateInstance migrates a data instance locally from an old storage
// engine to the current configured storage.  After completion of the copy,
// the data instance in the old storage is deleted.
func MigrateInstance(uuid dvid.UUID, source dvid.InstanceName, oldStore dvid.Store, c dvid.Config) error {
	if manager == nil {
		return ErrManagerNotInitialized
	}

	// Get flatten or not
	transmit, _, err := c.GetString("transmit")
	if err != nil {
		return err
	}
	var flatten bool
	if transmit == "flatten" {
		flatten = true
	}

	// Get the source data instance.
	d, err := manager.getDataByUUIDName(uuid, source)
	if err != nil {
		return err
	}

	// Get the current store for this data instance.
	storer, ok := d.(storage.Accessor)
	if !ok {
		return fmt.Errorf("unable to migrate data %q: unable to access backing store", d.DataName())
	}
	curKV, err := storer.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("unable to get backing store for data %q: %v\n", source, err)
	}

	// Get the old store.
	oldKV, ok := oldStore.(storage.OrderedKeyValueDB)
	if !ok {
		return fmt.Errorf("unable to migrate data %q from store %s which isn't ordered kv store", source, storer)
	}

	// Abort if the two stores are the same.
	if curKV == oldKV {
		return fmt.Errorf("old store for data %q seems same as current store", source)
	}

	// Migrate data asynchronously.
	go func() {
		if err := copyData(oldKV, curKV, d, nil, uuid, nil, flatten); err != nil {
			dvid.Errorf("error in migration of data %q: %v\n", source, err)
			return
		}
		// delete data off old store.
		dvid.Infof("Starting delete of instance %q from old storage %q\n", d.DataName(), oldKV)
		ctx := storage.NewDataContext(d, 0)
		if err := oldKV.DeleteAll(ctx, true); err != nil {
			dvid.Errorf("deleting instance %q from %q after copy to %q: %v\n", d.DataName(), oldKV, curKV, err)
			return
		}
	}()

	dvid.Infof("Migrating data %q from store %q to store %q ...\n", d.DataName(), oldKV, curKV)
	return nil
}
Esempio n. 3
0
// Put key-value pairs.  Note that it could be more efficient to use the Batcher
// interface so you don't have to create and keep a slice of KeyValue.  Some
// databases like leveldb will copy on batch put anyway.
func (db *BigTable) PutRange(ctx storage.Context, TKeyValue []storage.TKeyValue) error {
	if db == nil {
		return fmt.Errorf("Can't call PutRange() on nil BigTable")
	}
	if ctx == nil {
		return fmt.Errorf("Received nil context in PutRange()")
	}

	for _, tkeyvalue := range TKeyValue {

		unvKey, verKey, err := ctx.SplitKey(tkeyvalue.K)
		if err != nil {
			dvid.Errorf("Error in PutRange(): %v\n", err)
		}

		mut := api.NewMutation()

		mut.Set(familyName, encodeKey(verKey), 0, tkeyvalue.V)
		err = tbl.Apply(db.ctx, encodeKey(unvKey), mut)
		if err != nil {
			dvid.Errorf("Failed to Put value in PutRange()")
		}
	}

	return nil
}
Esempio n. 4
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
	store, err := storage.SmallDataStore()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}

	for msg := range in {
		select {
		case <-done:
			return
		default:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
		}
	}
}
Esempio n. 5
0
// MergeStart handles label map caches during an active merge operation.  Note that if there are
// multiple synced label instances, the InstanceVersion will always be the labelblk instance.
// Multiple merges into a single label are allowed, but chained merges are not.  For example,
// you can merge label 1, 2, and 3 into 4, then later merge 6 into 4.  However, you cannot
// concurrently merge label 4 into some other label because there can be a race condition between
// 3 -> 4 and 4 -> X.
func MergeStart(iv dvid.InstanceVersion, op MergeOp) error {
	dvid.Infof("MergeStart starting for iv %v with op %v.  mergeCache: %v\n", iv, op, mc.m)

	// Don't allow a merge to start in the middle of a concurrent merge/split.
	if labelsSplitting.IsDirty(iv, op.Target) { // we might be able to relax this one.
		return fmt.Errorf("can't merge into label %d while it has an ongoing split", op.Target)
	}
	if mc.MergingToOther(iv, op.Target) {
		dvid.Errorf("can't merge label %d while it is currently merging into another label", op.Target)
		return fmt.Errorf("can't merge label %d while it is currently merging into another label", op.Target)
	}
	for merged := range op.Merged {
		if labelsSplitting.IsDirty(iv, merged) {
			return fmt.Errorf("can't merge label %d while it has an ongoing split", merged)
		}
		if labelsMerging.IsDirty(iv, merged) {
			dvid.Errorf("can't merge label %d while it is currently involved in a merge", merged)
			return fmt.Errorf("can't merge label %d while it is currently involved in a merge", merged)
		}
	}

	// Add the merge to the mapping.
	if err := mc.Add(iv, op); err != nil {
		return err
	}

	// Adjust the dirty counts on the involved labels.
	labelsMerging.AddMerge(iv, op)

	return nil
}
Esempio n. 6
0
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes.
// writeBlocks ingests blocks of voxel data asynchronously using batch writes.
func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		return err
	}

	preCompress, postCompress := 0, 0

	ctx := datastore.NewVersionedCtx(d, v)
	evt := datastore.SyncEvent{d.DataUUID(), IngestBlockEvent}

	<-server.HandlerToken
	go func() {
		defer func() {
			wg1.Done()
			wg2.Done()
			dvid.Debugf("Wrote voxel blocks.  Before %s: %d bytes.  After: %d bytes\n", d.Compression(), preCompress, postCompress)
			server.HandlerToken <- 1
		}()

		mutID := d.NewMutationID()
		batch := batcher.NewBatch(ctx)
		for i, block := range b {
			serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum())
			preCompress += len(block.V)
			postCompress += len(serialization)
			if err != nil {
				dvid.Errorf("Unable to serialize block: %v\n", err)
				return
			}
			batch.Put(block.K, serialization)

			indexZYX, err := DecodeTKey(block.K)
			if err != nil {
				dvid.Errorf("Unable to recover index from block key: %v\n", block.K)
				return
			}
			msg := datastore.SyncMessage{IngestBlockEvent, v, Block{indexZYX, block.V, mutID}}
			if err := datastore.NotifySubscribers(evt, msg); err != nil {
				dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName())
				return
			}

			// Check if we should commit
			if i%KVWriteSize == KVWriteSize-1 {
				if err := batch.Commit(); err != nil {
					dvid.Errorf("Error on trying to write batch: %v\n", err)
					return
				}
				batch = batcher.NewBatch(ctx)
			}
		}
		if err := batch.Commit(); err != nil {
			dvid.Errorf("Error on trying to write batch: %v\n", err)
			return
		}
	}()
	return nil
}
Esempio n. 7
0
// Goroutine that handles relabeling of blocks during a merge operation.
// Since the same block coordinate always gets mapped to the same goroutine, we handle
// concurrency by serializing GET/PUT for a particular block coordinate.
func (d *Data) mergeBlock(in <-chan mergeOp) {
	store, err := storage.MutableStore()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)

	for op := range in {
		tk := NewTKeyByCoord(op.izyx)
		data, err := store.Get(op.ctx, tk)
		if err != nil {
			dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.izyx)
			op.wg.Done()
			continue
		}
		if data == nil {
			dvid.Errorf("nil label block where merge was done!\n")
			op.wg.Done()
			continue
		}

		blockData, _, err := dvid.DeserializeData(data, true)
		if err != nil {
			dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
			op.wg.Done()
			continue
		}
		if len(blockData) != blockBytes {
			dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
			op.wg.Done()
			continue
		}

		// Iterate through this block of labels and relabel if label in merge.
		for i := 0; i < blockBytes; i += 8 {
			label := binary.LittleEndian.Uint64(blockData[i : i+8])
			if _, merged := op.Merged[label]; merged {
				binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
			}
		}

		// Store this block.
		serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
		if err != nil {
			dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
			op.wg.Done()
			continue
		}
		if err := store.Put(op.ctx, tk, serialization); err != nil {
			dvid.Errorf("Error in putting key %v: %v\n", tk, err)
		}
		op.wg.Done()
	}
}
Esempio n. 8
0
// GetRange returns a range of values spanning (TkBeg, kEnd) keys.
func (db *BigTable) GetRange(ctx storage.Context, TkBeg, TkEnd storage.TKey) ([]*storage.TKeyValue, error) {
	if db == nil {
		return nil, fmt.Errorf("Can't call GetRange() on nil BigTable")
	}
	if ctx == nil {
		return nil, fmt.Errorf("Received nil context in GetRange()")
	}

	unvKeyBeg, _, err := ctx.SplitKey(TkBeg)
	if err != nil {
		dvid.Errorf("Error in GetRange(): %v\n", err)
	}
	unvKeyEnd, _, err := ctx.SplitKey(TkEnd)
	if err != nil {
		dvid.Errorf("Error in GetRange(): %v\n", err)
	}

	tKeyValues := make([]*storage.TKeyValue, 0)

	rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd))
	err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool {

		unvKeyRow, err := decodeKey(r.Key())
		if err != nil {
			dvid.Errorf("Error in GetRange() decodeKey(r.Key()): %v\n", err)
			return false
		}

		// dvid.Infof("GetRange() with row key= %v", r.Key())
		for _, readItem := range r[familyName] {

			verKey, err := decodeKey(readItem.Column)
			if err != nil {
				dvid.Errorf("Error in GetRange() decodeKey(readItem.Column): %v\n", err)
				return false
			}

			fullKey := storage.MergeKey(unvKeyRow, verKey)
			// dvid.Infof("colum key= %v , timestamp = %v", verKey, readItem.Timestamp)
			tkey, err := storage.TKeyFromKey(fullKey)
			if err != nil {
				dvid.Errorf("Error in GetRange() storage.TKeyFromKey(fullKey): %v\n", err)
				return false
			}

			kv := storage.TKeyValue{tkey, readItem.Value}
			tKeyValues = append(tKeyValues, &kv)
		}

		return true // keep going
	})

	return tKeyValues, err
}
Esempio n. 9
0
// Runs a queue of post-processing commands, calling functions previously registered
// through RegisterPostProcessing().  If a command has not been registered, it will
// be skipped and noted in log.
func (q PostProcQueue) Run() {
	for _, command := range q {
		callback, found := registeredOps.postproc[command.name]
		if !found {
			dvid.Errorf("Skipping unregistered post-processing command %q\n", command.name)
			continue
		}
		if err := callback(command.data); err != nil {
			dvid.Errorf("Error in post-proc command %q: %v\n", command.data, err)
		}
	}
}
Esempio n. 10
0
// handles relabeling of blocks during a merge operation.
func (d *Data) mergeBlock(ctx *datastore.VersionedCtx, op mergeOp) {
	defer d.MutDone(op.mutID)

	store, err := d.GetKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
		return
	}

	tk := NewTKeyByCoord(op.block)
	data, err := store.Get(ctx, tk)
	if err != nil {
		dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.block)
		return
	}
	if data == nil {
		dvid.Errorf("nil label block where merge was done!\n")
		return
	}

	blockData, _, err := dvid.DeserializeData(data, true)
	if err != nil {
		dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
		return
	}
	blockBytes := int(d.BlockSize().Prod() * 8)
	if len(blockData) != blockBytes {
		dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
		return
	}

	// Iterate through this block of labels and relabel if label in merge.
	for i := 0; i < blockBytes; i += 8 {
		label := binary.LittleEndian.Uint64(blockData[i : i+8])
		if _, merged := op.Merged[label]; merged {
			binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
		}
	}

	// Store this block.
	serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
	if err != nil {
		dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
		return
	}
	if err := store.Put(ctx, tk, serialization); err != nil {
		dvid.Errorf("Error in putting key %v: %v\n", tk, err)
	}

	// Notify any downstream downres instance.
	d.publishBlockChange(ctx.VersionID(), op.mutID, op.block, blockData)
}
Esempio n. 11
0
func init() {
	// Set default Host name for understandability from user perspective.
	// Assumes Linux or Mac.  From stackoverflow suggestion.
	cmd := exec.Command("/bin/hostname", "-f")
	var out bytes.Buffer
	cmd.Stdout = &out
	if err := cmd.Run(); err != nil {
		dvid.Errorf("Unable to get default Host name via /bin/hostname: %v\n", err)
		dvid.Errorf("Using 'localhost' as default Host name.\n")
		return
	}
	DefaultHost = out.String()
	DefaultHost = DefaultHost[:len(DefaultHost)-1] // removes EOL
}
Esempio n. 12
0
// SendKeysInRange sends a range of full keys down a key channel.
func (db *BigTable) SendKeysInRange(ctx storage.Context, TkBeg, TkEnd storage.TKey, ch storage.KeyChan) error {
	if db == nil {
		return fmt.Errorf("Can't call SendKeysInRange() on nil BigTable")
	}
	if ctx == nil {
		return fmt.Errorf("Received nil context in SendKeysInRange()")
	}

	unvKeyBeg, _, err := ctx.SplitKey(TkBeg)
	if err != nil {
		dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
	}

	unvKeyEnd, _, err := ctx.SplitKey(TkEnd)
	if err != nil {
		dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
	}

	rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd))

	err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool {

		unvKeyRow, err := decodeKey(r.Key())
		if err != nil {
			dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
			return false
		}

		//I need the versioned key to merged it with the unversioned
		// and send it throu the channel
		for _, readItem := range r[familyName] {

			verKey, err := decodeKey(readItem.Column)
			if err != nil {
				dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
				return false
			}

			fullKey := storage.MergeKey(unvKeyRow, verKey)

			ch <- fullKey
		}

		return true // keep going
	}, api.RowFilter(api.StripValueFilter()))

	return err
}
Esempio n. 13
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent() {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			d.StartUpdate()
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case imageblk.MutatedBlock:
				d.mutateBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
			d.StopUpdate()

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Esempio n. 14
0
// Get returns a value given a key.
func (db *BigTable) Get(ctx storage.Context, tk storage.TKey) ([]byte, error) {
	if db == nil {
		return nil, fmt.Errorf("Can't call Get() on nil BigTable")
	}
	if ctx == nil {
		return nil, fmt.Errorf("Received nil context in Get()")
	}

	unvKey, verKey, err := ctx.SplitKey(tk)
	if err != nil {
		dvid.Errorf("Error in Get(): %v\n", err)
	}

	r, err := tbl.ReadRow(db.ctx, encodeKey(unvKey))

	//A missing row will return a zero-length map and a nil error
	if len(r) == 0 {
		return nil, err
	}

	if err != nil {
		return nil, err
	}

	value, err := getValue(r, verKey)
	if err != nil {
		return nil, err
	}

	return value, nil
}
Esempio n. 15
0
// GetAssignedStore returns the store assigned based on (instance name, root uuid) or type.
// In some cases, this store may include a caching wrapper if the data instance has been
// configured to use groupcache.
func GetAssignedStore(dataname dvid.InstanceName, root dvid.UUID, typename dvid.TypeString) (dvid.Store, error) {
	if !manager.setup {
		return nil, fmt.Errorf("Storage manager not initialized before requesting store for %s/%s", dataname, root)
	}
	dataid := dvid.GetDataSpecifier(dataname, root)
	store, found := manager.instanceStore[dataid]
	var err error
	if !found {
		store, err = assignedStoreByType(typename)
		if err != nil {
			return nil, fmt.Errorf("Cannot get assigned store for data %q, type %q", dataname, typename)
		}
	}

	// See if this is using caching and if so, establish a wrapper around it.
	if _, supported := manager.gcache.supported[dataid]; supported {
		store, err = wrapGroupcache(store, manager.gcache.cache)
		if err != nil {
			dvid.Errorf("Unable to wrap groupcache around store %s for data instance %q (uuid %s): %v\n", store, dataname, root, err)
		} else {
			dvid.Infof("Returning groupcache-wrapped store %s for data instance %q @ %s\n", store, dataname, root)
		}
	}
	return store, nil
}
Esempio n. 16
0
// call KVAutobus keyvalue_range API
func (db *KVAutobus) getKVRange(kStart, kEnd storage.Key) (KVs, error) {
	b64key1 := encodeKey(kStart)
	b64key2 := encodeKey(kEnd)
	url := fmt.Sprintf("%s/kvautobus/api/keyvalue_range/%s/%s/", db.host, b64key1, b64key2)

	timedLog := dvid.NewTimeLog()
	resp, err := http.Get(url)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()
	if resp.StatusCode == http.StatusNotFound {
		return nil, nil // Handle no keys found.
	}

	r := msgp.NewReader(bufio.NewReader(resp.Body))
	var mkvs KVs
	if err := mkvs.DecodeMsg(r); err != nil {
		dvid.Errorf("Couldn't decode getKVRange return\n")
		return nil, err
	}

	timedLog.Infof("PROXY keyvalue_range to %s returned %d (%d kv pairs)\n", db.host, resp.StatusCode, len(mkvs))
	return mkvs, nil
}
Esempio n. 17
0
func repoDeleteHandler(c web.C, w http.ResponseWriter, r *http.Request) {
	uuid := c.Env["uuid"].(dvid.UUID)
	queryValues := r.URL.Query()
	imsure := queryValues.Get("imsure")
	if imsure != "true" {
		BadRequest(w, r, "Cannot delete instance unless query string 'imsure=true' is present!")
		return
	}

	dataname, ok := c.URLParams["dataname"]
	if !ok {
		BadRequest(w, r, "Error in retrieving data instance name from URL parameters")
		return
	}

	// Do the deletion asynchronously since they can take a very long time.
	go func() {
		if err := datastore.DeleteDataByUUID(uuid, dvid.InstanceName(dataname)); err != nil {
			dvid.Errorf("Error in deleting data instance %q: %v", dataname, err)
		}
	}()

	// Just respond that deletion was successfully started
	w.Header().Set("Content-Type", "application/json")
	fmt.Fprintf(w, `{"result": "Started deletion of data instance %q from repo with root %s"}`,
		dataname, uuid)
}
Esempio n. 18
0
// move all reference to given element point in the slice of tags.
// This is private method and assumes outer locking.
func (d *Data) moveElementInTags(ctx *datastore.VersionedCtx, batch storage.Batch, from, to dvid.Point3d, tags []Tag) error {
	for _, tag := range tags {
		// Get the elements in tag.
		tk, err := NewTagTKey(tag)
		if err != nil {
			return err
		}
		elems, err := getElementsNR(ctx, tk)
		if err != nil {
			return err
		}

		// Move element in tag.
		if moved, _ := elems.move(from, to, false); moved == nil {
			dvid.Errorf("Unable to find moved element %s in tag %q", from, tag)
			continue
		}

		// Save the tag.
		if err := putBatchElements(batch, tk, elems); err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 19
0
func (m *repoManager) deleteData(r *repoT, name dvid.InstanceName) error {
	r.Lock()
	defer r.Unlock()

	data, found := r.data[name]
	if !found {
		return ErrInvalidDataName
	}

	// Delete entries in the sync graph if this data needs to be synced with another data instance.
	_, syncable := data.(Syncer)
	if syncable {
		r.deleteSyncGraph(name)
	}

	// Remove this data instance from the repository and persist.
	tm := time.Now()
	r.updated = tm
	msg := fmt.Sprintf("Delete data instance '%s' of type '%s'", name, data.TypeName())
	message := fmt.Sprintf("%s  %s", tm.Format(time.RFC3339), msg)
	r.log = append(r.log, message)
	r.dag.deleteDataInstance(name)
	delete(r.data, name)

	// For all data tiers of storage, remove data key-value pairs that would be associated with this instance id.
	go func() {
		if err := storage.DeleteDataInstance(data); err != nil {
			dvid.Errorf("Error trying to do async data instance deletion: %v\n", err)
		}
	}()

	return r.save()
}
Esempio n. 20
0
func (d *Data) GetType() TypeService {
	typeservice, err := TypeServiceByURL(d.typeurl)
	if err != nil {
		dvid.Errorf("Data %q: %v\n", d.name, err)
	}
	return typeservice
}
Esempio n. 21
0
func xferBlock(buf []byte, chunk *storage.Chunk, wg *sync.WaitGroup) {
	defer wg.Done()

	kv := chunk.TKeyValue
	uncompress := true
	block, _, err := dvid.DeserializeData(kv.V, uncompress)
	if err != nil {
		dvid.Errorf("Unable to deserialize block (%v): %v", kv.K, err)
		return
	}
	if len(block) != len(buf) {
		dvid.Errorf("Deserialized block length (%d) != allocated block length (%d)", len(block), len(buf))
		return
	}
	copy(buf, block)
}
Esempio n. 22
0
// Processes each labelblk change as we get it.
func (d *Data) processEvents() {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		dvid.Errorf("handleBlockEvent %v\n", err)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			d.handleSyncMessage(ctx, msg, batcher)

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Esempio n. 23
0
func init() {
	ver, err := semver.Make("0.2.0")
	if err != nil {
		dvid.Errorf("Unable to make semver in kvautobus: %v\n", err)
	}
	e := Engine{"kvautobus", "Janelia KVAutobus", ver}
	storage.RegisterEngine(e)
}
Esempio n. 24
0
func (d *dirtyCache) decr(iv dvid.InstanceVersion, label uint64) {
	cnts, found := d.dirty[iv]
	if !found || cnts == nil {
		dvid.Errorf("decremented non-existant count for label %d, version %v\n", label, iv)
		return
	}
	cnts.Decr(label)
}
Esempio n. 25
0
func init() {
	ver, err := semver.Make("0.9.0")
	if err != nil {
		dvid.Errorf("Unable to make semver in basholeveldb: %v\n", err)
	}
	e := Engine{"basholeveldb", "Basho-tuned LevelDB", ver}
	storage.RegisterEngine(e)
}
Esempio n. 26
0
func init() {
	ver, err := semver.Make("0.1.0")
	if err != nil {
		dvid.Errorf("Unable to make semver in bigtable: %v\n", err)
	}
	e := Engine{"bigtable", "Google's Cloud BigTable", ver}
	storage.RegisterEngine(e)
}
Esempio n. 27
0
// Remove removes a merge operation from the given InstanceVersion's cache,
// allowing us to return no mapping if it's already been processed.
func (mc *mergeCache) Remove(iv dvid.InstanceVersion, op MergeOp) {
	mc.Lock()
	defer mc.Unlock()

	if mc.m == nil {
		dvid.Errorf("mergeCache.Remove() called with iv %s, op %v there are no mappings for any iv.\n", iv, op)
		return
	}
	mapping, found := mc.m[iv]
	if !found {
		dvid.Errorf("mergeCache.Remove() called with iv %s, op %v and there is no mapping for that iv.\n", iv, op)
		return
	}
	for merged := range op.Merged {
		mapping.delete(merged)
	}
}
Esempio n. 28
0
func (i *indexRLE) Bytes() []byte {
	buf := new(bytes.Buffer)
	_, err := buf.Write(i.start.Bytes())
	if err != nil {
		dvid.Errorf("Error in roi.go, indexRLE.Bytes(): %v\n", err)
	}
	binary.Write(buf, binary.BigEndian, i.span)
	return buf.Bytes()
}
Esempio n. 29
0
func (m *repoManager) deleteRepo(uuid dvid.UUID) error {
	m.Lock()
	defer m.Unlock()

	m.idMutex.RLock()
	defer m.idMutex.RUnlock()

	r, found := m.repos[uuid]
	if !found {
		return ErrInvalidUUID
	}

	r.Lock()

	// Start deletion of all data instances.
	for _, data := range r.data {
		go func(data dvid.Data) {
			if err := storage.DeleteDataInstance(data); err != nil {
				dvid.Errorf("Error trying to do async data instance %q deletion: %v\n", data.DataName(), err)
			}
		}(data)
	}

	// Delete the repo off the datastore.
	if err := r.delete(); err != nil {
		return fmt.Errorf("Unable to delete repo from datastore: %v", err)
	}
	r.Unlock()

	// Delete all UUIDs in this repo from metadata
	delete(m.repoToUUID, r.id)
	for v := range r.dag.nodes {
		u, found := m.versionToUUID[v]
		if !found {
			dvid.Errorf("Found version id %d with no corresponding UUID on delete of repo %s!\n", v, uuid)
			continue
		}
		delete(m.repos, u)
		delete(m.uuidToVersion, u)
		delete(m.versionToUUID, v)
	}
	return nil
}
Esempio n. 30
0
func (db *BigTable) Close() {
	if db == nil {
		dvid.Errorf("Can't call Close() on nil BigTable")
		return
	}

	if client != nil {
		client.Close()
	}
}