Exemplo n.º 1
0
// StartInstancePush initiates a data instance push.  After some number of Send
// calls, the EndInstancePush must be called.
func (p *PushSession) StartInstancePush(d dvid.Data) error {
	dmsg := DataTxInit{
		Session:    p.s.ID(),
		DataName:   d.DataName(),
		TypeName:   d.TypeName(),
		InstanceID: d.InstanceID(),
	}
	if _, err := p.s.Call()(StartDataMsg, dmsg); err != nil {
		return fmt.Errorf("couldn't send data instance %q start: %v\n", d.DataName(), err)
	}
	return nil
}
Exemplo n.º 2
0
func getGraphDB(d dvid.Data) (db storage.GraphDB, err error) {
	store, err := d.BackendStore()
	if err != nil {
		return nil, err
	}
	if store == nil {
		return nil, ErrInvalidStore
	}
	var ok bool
	db, ok = store.(storage.GraphDB)
	if !ok {
		return nil, fmt.Errorf("Store assigned to data %q (%s) is not a graph db", d.DataName(), store)
	}
	return
}
Exemplo n.º 3
0
// DeleteDataInstance removes a data instance across all versions and tiers of storage.
func DeleteDataInstance(data dvid.Data) error {
	if !manager.setup {
		return fmt.Errorf("Can't delete data instance %q before storage manager is initialized", data.DataName())
	}

	// Determine all database tiers that are distinct.
	dbs := []OrderedKeyValueDB{manager.mutable}
	if manager.mutable != manager.immutable {
		dbs = append(dbs, manager.immutable)
	}

	// For each storage tier, remove all key-values with the given instance id.
	dvid.Infof("Starting delete of instance %d: name %q, type %s\n", data.InstanceID(), data.DataName(), data.TypeName())
	ctx := NewDataContext(data, 0)
	for _, db := range dbs {
		if err := db.DeleteAll(ctx, true); err != nil {
			return err
		}
	}
	return nil
}
Exemplo n.º 4
0
// SetSyncByJSON takes a JSON object of sync names and UUID, and creates the sync graph
// and sets the data instance's sync.  If replace is false (default), the new sync
// is appended to the current syncs.
func SetSyncByJSON(d dvid.Data, uuid dvid.UUID, replace bool, in io.ReadCloser) error {
	if manager == nil {
		return ErrManagerNotInitialized
	}
	jsonData := make(map[string]string)
	decoder := json.NewDecoder(in)
	if err := decoder.Decode(&jsonData); err != nil && err != io.EOF {
		return fmt.Errorf("Malformed JSON request in sync request: %v", err)
	}
	syncedCSV, ok := jsonData["sync"]
	if !ok {
		return fmt.Errorf("Could not find 'sync' value in POSTed JSON to sync request.")
	}

	syncedNames := strings.Split(syncedCSV, ",")
	if len(syncedNames) == 0 || (len(syncedNames) == 1 && syncedNames[0] == "") {
		syncedNames = []string{}
	}

	if len(syncedNames) == 0 && !replace {
		dvid.Infof("Ignored attempt to append no syncs to instance %q.\n", d.DataName())
		return nil
	}

	// Make sure all synced names currently exist under this UUID, then transform to data UUIDs.
	syncs := make(dvid.UUIDSet)
	for _, name := range syncedNames {
		data, err := GetDataByUUIDName(uuid, dvid.InstanceName(name))
		if err != nil {
			return err
		}
		syncs[data.DataUUID()] = struct{}{}
	}

	if err := SetSyncData(d, syncs, replace); err != nil {
		return err
	}
	return nil
}
Exemplo n.º 5
0
// DeleteDataInstance removes a data instance.
func DeleteDataInstance(data dvid.Data) error {
	if !manager.setup {
		return fmt.Errorf("Can't delete data instance %q before storage manager is initialized", data.DataName())
	}

	// Get the store for the data instance.
	store, err := data.BackendStore()
	if err != nil {
		return err
	}
	db, ok := store.(OrderedKeyValueDB)
	if !ok {
		return fmt.Errorf("store assigned to data %q is not an ordered kv db with ability to delete all", data.DataName())
	}

	dvid.Infof("Starting delete of instance %d: name %q, type %s\n", data.InstanceID(), data.DataName(), data.TypeName())
	ctx := NewDataContext(data, 0)
	if err := db.DeleteAll(ctx, true); err != nil {
		return err
	}
	return nil
}
Exemplo n.º 6
0
// copyData copies all key-value pairs pertinent to the given data instance d2.  If d2 is nil,
// the destination data instance is d1, useful for migration of data to a new store.
// Each datatype can implement filters that can restrict the transmitted key-value pairs
// based on the given FilterSpec.
func copyData(oldKV, newKV storage.OrderedKeyValueDB, d1, d2 dvid.Data, uuid dvid.UUID, f storage.Filter, flatten bool) error {
	// Get data context for this UUID.
	v, err := VersionFromUUID(uuid)
	if err != nil {
		return err
	}
	srcCtx := NewVersionedCtx(d1, v)
	var dstCtx *VersionedCtx
	if d2 == nil {
		d2 = d1
		dstCtx = srcCtx
	} else {
		dstCtx = NewVersionedCtx(d2, v)
	}

	// Send this instance's key-value pairs
	var wg sync.WaitGroup
	wg.Add(1)

	stats := new(txStats)
	stats.lastTime = time.Now()

	var kvTotal, kvSent int
	var bytesTotal, bytesSent uint64
	keysOnly := false
	if flatten {
		// Start goroutine to receive flattened key-value pairs and store them.
		ch := make(chan *storage.TKeyValue, 1000)
		go func() {
			for {
				tkv := <-ch
				if tkv == nil {
					wg.Done()
					dvid.Infof("Copied %d %q key-value pairs (%s, out of %d kv pairs, %s) [flattened]\n",
						kvSent, d1.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal))
					stats.printStats()
					return
				}
				kvTotal++
				curBytes := uint64(len(tkv.V) + len(tkv.K))
				bytesTotal += curBytes
				if f != nil {
					skip, err := f.Check(tkv)
					if err != nil {
						dvid.Errorf("problem applying filter on data %q: %v\n", d1.DataName(), err)
						continue
					}
					if skip {
						continue
					}
				}
				kvSent++
				bytesSent += curBytes
				if err := newKV.Put(dstCtx, tkv.K, tkv.V); err != nil {
					dvid.Errorf("can't put k/v pair to destination instance %q: %v\n", d2.DataName(), err)
				}
				stats.addKV(tkv.K, tkv.V)
			}
		}()

		begKey, endKey := srcCtx.TKeyRange()
		err := oldKV.ProcessRange(srcCtx, begKey, endKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
			if c == nil {
				return fmt.Errorf("received nil chunk in flatten push for data %s", d1.DataName())
			}
			ch <- c.TKeyValue
			return nil
		})
		ch <- nil
		if err != nil {
			return fmt.Errorf("error in flatten push for data %q: %v", d1.DataName(), err)
		}
	} else {
		// Start goroutine to receive all key-value pairs and store them.
		ch := make(chan *storage.KeyValue, 1000)
		go func() {
			for {
				kv := <-ch
				if kv == nil {
					wg.Done()
					dvid.Infof("Sent %d %q key-value pairs (%s, out of %d kv pairs, %s)\n",
						kvSent, d1.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal))
					stats.printStats()
					return
				}
				tkey, err := storage.TKeyFromKey(kv.K)
				if err != nil {
					dvid.Errorf("couldn't get %q TKey from Key %v: %v\n", d1.DataName(), kv.K, err)
					continue
				}

				kvTotal++
				curBytes := uint64(len(kv.V) + len(kv.K))
				bytesTotal += curBytes
				if f != nil {
					skip, err := f.Check(&storage.TKeyValue{K: tkey, V: kv.V})
					if err != nil {
						dvid.Errorf("problem applying filter on data %q: %v\n", d1.DataName(), err)
						continue
					}
					if skip {
						continue
					}
				}
				kvSent++
				bytesSent += curBytes
				if dstCtx != nil {
					err := dstCtx.UpdateInstance(kv.K)
					if err != nil {
						dvid.Errorf("can't update raw key to new data instance %q: %v\n", d2.DataName(), err)
					}
				}
				if err := newKV.RawPut(kv.K, kv.V); err != nil {
					dvid.Errorf("can't put k/v pair to destination instance %q: %v\n", d2.DataName(), err)
				}
				stats.addKV(kv.K, kv.V)
			}
		}()

		begKey, endKey := srcCtx.KeyRange()
		if err = oldKV.RawRangeQuery(begKey, endKey, keysOnly, ch, nil); err != nil {
			return fmt.Errorf("push voxels %q range query: %v", d1.DataName(), err)
		}
	}
	wg.Wait()
	return nil
}
Exemplo n.º 7
0
// GetSyncSubs implements the datastore.Syncer interface.  Returns a list of subscriptions
// to the sync data instance that will notify the receiver.
func (d *Data) GetSyncSubs(synced dvid.Data) (subs datastore.SyncSubs, err error) {
	if d.syncCh == nil {
		if err = d.InitDataHandlers(); err != nil {
			err = fmt.Errorf("unable to initialize handlers for data %q: %v\n", d.DataName(), err)
			return
		}
	}

	// Our syncing depends on the datatype we are syncing.
	switch synced.TypeName() {
	case "labelblk":
		subs = datastore.SyncSubs{
			{
				Event:  datastore.SyncEvent{synced.DataUUID(), labels.IngestBlockEvent},
				Notify: d.DataUUID(),
				Ch:     d.syncCh,
			},
			{
				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MutateBlockEvent},
				Notify: d.DataUUID(),
				Ch:     d.syncCh,
			},
			{
				Event:  datastore.SyncEvent{synced.DataUUID(), labels.DeleteBlockEvent},
				Notify: d.DataUUID(),
				Ch:     d.syncCh,
			},
		}
	case "labelvol":
		subs = datastore.SyncSubs{
			datastore.SyncSub{
				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MergeBlockEvent},
				Notify: d.DataUUID(),
				Ch:     d.syncCh,
			},
			datastore.SyncSub{
				Event:  datastore.SyncEvent{synced.DataUUID(), labels.SplitLabelEvent},
				Notify: d.DataUUID(),
				Ch:     d.syncCh,
			},
		}
	default:
		err = fmt.Errorf("Unable to sync %s with %s since datatype %q is not supported.", d.DataName(), synced.DataName(), synced.TypeName())
	}
	return
}
Exemplo n.º 8
0
// GetSyncSubs implements the datastore.Syncer interface
func (d *Data) GetSyncSubs(synced dvid.Data) (datastore.SyncSubs, error) {
	if d.syncCh == nil {
		if err := d.InitDataHandlers(); err != nil {
			return nil, fmt.Errorf("unable to initialize handlers for data %q: %v\n", d.DataName(), err)
		}
	}

	var evts []string
	switch synced.TypeName() {
	case "labelblk": // For down-res support
		evts = []string{
			DownsizeBlockEvent, DownsizeCommitEvent,
			labels.IngestBlockEvent, labels.MutateBlockEvent, labels.DeleteBlockEvent,
		}
	case "labelvol":
		evts = []string{labels.MergeBlockEvent, labels.SplitLabelEvent}
	default:
		return nil, fmt.Errorf("Unable to sync %s with %s since datatype %q is not supported.", d.DataName(), synced.DataName(), synced.TypeName())
	}

	subs := make(datastore.SyncSubs, len(evts))
	for i, evt := range evts {
		subs[i] = datastore.SyncSub{
			Event:  datastore.SyncEvent{synced.DataUUID(), evt},
			Notify: d.DataUUID(),
			Ch:     d.syncCh,
		}
	}
	return subs, nil
}
Exemplo n.º 9
0
func getKeyValueBatcher(d dvid.Data) (db storage.KeyValueBatcher, err error) {
	store, err := d.BackendStore()
	if err != nil {
		return nil, err
	}
	if store == nil {
		return nil, ErrInvalidStore
	}
	var ok bool
	db, ok = store.(storage.KeyValueBatcher)
	if !ok {
		return nil, fmt.Errorf("Store assigned to data %q (%s) is not able to batch key-value ops", d.DataName(), store)
	}
	return
}
Exemplo n.º 10
0
// DeleteDataInstance removes a data instance across all versions and tiers of storage.
func DeleteDataInstance(data dvid.Data) error {
	if !manager.setup {
		return fmt.Errorf("Can't delete data instance %q before storage manager is initialized", data.DataName())
	}

	// Determine all database tiers that are distinct.
	dbs := []OrderedKeyValueDB{manager.smalldata}
	if manager.smalldata != manager.bigdata {
		dbs = append(dbs, manager.bigdata)
	}

	// For each storage tier, remove all key-values with the given instance id.
	ctx := NewDataContext(data, 0)
	for _, db := range dbs {
		if err := db.DeleteAll(ctx, true); err != nil {
			return err
		}
	}
	return nil
}
Exemplo n.º 11
0
// BadAPIRequest writes a standard error message to http.ResponseWriter for a badly formatted API call.
func BadAPIRequest(w http.ResponseWriter, r *http.Request, d dvid.Data) {
	helpURL := path.Join("api", "help", string(d.TypeName()))
	msg := fmt.Sprintf("Bad API call (%s) for data %q.  See API help at http://%s/%s", r.URL.Path, d.DataName(), GetConfig().Host(), helpURL)
	http.Error(w, msg, http.StatusBadRequest)
	dvid.Errorf("Bad API call (%s) for data %q\n", r.URL.Path, d.DataName())
}
Exemplo n.º 12
0
// PushData transfers all key-value pairs pertinent to the given data instance.
// Each datatype can implement filters that can restrict the transmitted key-value pairs
// based on the given FilterSpec.  Note that because of the generality of this function,
// a particular datatype implementation could be much more efficient when implementing
// filtering.  For example, the imageblk datatype could scan its key-values using the ROI
// to generate keys (since imageblk keys will likely be a vast superset of ROI spans),
// while this generic routine will scan every key-value pair for a data instance and
// query the ROI to see if this key is ok to send.
func PushData(d dvid.Data, p *PushSession) error {
	// We should be able to get the backing store (only ordered kv for now)
	storer, ok := d.(storage.Accessor)
	if !ok {
		return fmt.Errorf("unable to push data %q: unable to access backing store", d.DataName())
	}
	store, err := storer.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("unable to get backing store for data %q: %v\n", d.DataName(), err)
	}

	// See if this data instance implements a Send filter.
	var filter storage.Filter
	filterer, ok := d.(storage.Filterer)
	if ok {
		var err error
		filter, err = filterer.NewFilter(p.Filter)
		if err != nil {
			return err
		}
	}

	// pick any version because flatten transmit will only have one version, and all or branch transmit will
	// be looking at all versions anyway.
	if len(p.Versions) == 0 {
		return fmt.Errorf("need at least one version to send")
	}
	var v dvid.VersionID
	for v = range p.Versions {
		break
	}
	ctx := NewVersionedCtx(d, v)

	// Send the initial data instance start message
	if err := p.StartInstancePush(d); err != nil {
		return err
	}

	// Send this instance's key-value pairs
	var wg sync.WaitGroup
	wg.Add(1)

	var kvTotal, kvSent int
	var bytesTotal, bytesSent uint64
	keysOnly := false
	if p.t == rpc.TransmitFlatten {
		// Start goroutine to receive flattened key-value pairs and transmit to remote.
		ch := make(chan *storage.TKeyValue, 1000)
		go func() {
			for {
				tkv := <-ch
				if tkv == nil {
					if err := p.EndInstancePush(); err != nil {
						dvid.Errorf("Bad data %q termination: %v\n", d.DataName(), err)
					}
					wg.Done()
					dvid.Infof("Sent %d %q key-value pairs (%s, out of %d kv pairs, %s) [flattened]\n",
						kvSent, d.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal))
					return
				}
				kvTotal++
				curBytes := uint64(len(tkv.V) + len(tkv.K))
				bytesTotal += curBytes
				if filter != nil {
					skip, err := filter.Check(tkv)
					if err != nil {
						dvid.Errorf("problem applying filter on data %q: %v\n", d.DataName(), err)
						continue
					}
					if skip {
						continue
					}
				}
				kvSent++
				bytesSent += curBytes
				kv := storage.KeyValue{
					K: ctx.ConstructKey(tkv.K),
					V: tkv.V,
				}
				if err := p.SendKV(&kv); err != nil {
					dvid.Errorf("Bad data %q send KV: %v", d.DataName(), err)
				}
			}
		}()

		begKey, endKey := ctx.TKeyRange()
		err := store.ProcessRange(ctx, begKey, endKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
			if c == nil {
				return fmt.Errorf("received nil chunk in flatten push for data %s", d.DataName())
			}
			ch <- c.TKeyValue
			return nil
		})
		ch <- nil
		if err != nil {
			return fmt.Errorf("error in flatten push for data %q: %v", d.DataName(), err)
		}
	} else {
		// Start goroutine to receive all key-value pairs and transmit to remote.
		ch := make(chan *storage.KeyValue, 1000)
		go func() {
			for {
				kv := <-ch
				if kv == nil {
					if err := p.EndInstancePush(); err != nil {
						dvid.Errorf("Bad data %q termination: %v\n", d.DataName(), err)
					}
					wg.Done()
					dvid.Infof("Sent %d %q key-value pairs (%s, out of %d kv pairs, %s)\n",
						kvSent, d.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal))
					return
				}
				if !ctx.ValidKV(kv, p.Versions) {
					continue
				}
				kvTotal++
				curBytes := uint64(len(kv.V) + len(kv.K))
				bytesTotal += curBytes
				if filter != nil {
					tkey, err := storage.TKeyFromKey(kv.K)
					if err != nil {
						dvid.Errorf("couldn't get %q TKey from Key %v: %v\n", d.DataName(), kv.K, err)
						continue
					}
					skip, err := filter.Check(&storage.TKeyValue{K: tkey, V: kv.V})
					if err != nil {
						dvid.Errorf("problem applying filter on data %q: %v\n", d.DataName(), err)
						continue
					}
					if skip {
						continue
					}
				}
				kvSent++
				bytesSent += curBytes
				if err := p.SendKV(kv); err != nil {
					dvid.Errorf("Bad data %q send KV: %v", d.DataName(), err)
				}
			}
		}()

		begKey, endKey := ctx.KeyRange()
		if err = store.RawRangeQuery(begKey, endKey, keysOnly, ch, nil); err != nil {
			return fmt.Errorf("push voxels %q range query: %v", d.DataName(), err)
		}
	}
	wg.Wait()
	return nil
}