func deleteConflict(data DataService, extnode *extensionNode, k storage.Key) error { store, err := getOrderedKeyValueDB(data) if err != nil { return err } // Create new node if necessary if extnode.newUUID == dvid.NilUUID { childUUID, err := manager.newVersion(extnode.oldUUID, "Version for deleting conflicts before merge", nil) if err != nil { return err } extnode.newUUID = childUUID childV, err := manager.versionFromUUID(childUUID) if err != nil { return err } extnode.newV = childV } // Perform the deletion. tk, err := storage.TKeyFromKey(k) if err != nil { return err } ctx := NewVersionedCtx(data, extnode.newV) return store.Delete(ctx, tk) }
// GetRange returns a range of values spanning (kStart, kEnd) keys. These key-value // pairs will be sorted in ascending key order. If the keys are versioned, all key-value // pairs for the particular version will be returned. func (db *KVAutobus) GetRange(ctx storage.Context, kStart, kEnd storage.TKey) ([]*storage.TKeyValue, error) { if ctx == nil { return nil, fmt.Errorf("Received nil context in GetRange()") } ch := make(chan errorableKV) // Run the range query on a potentially versioned key in a goroutine. go func() { if ctx == nil || !ctx.Versioned() { db.unversionedRange(ctx, kStart, kEnd, ch, false) } else { db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, false) } }() // Consume the key-value pairs. values := []*storage.TKeyValue{} for { result := <-ch if result.KeyValue == nil { return values, nil } if result.error != nil { return nil, result.error } tk, err := storage.TKeyFromKey(result.KeyValue.K) if err != nil { return nil, err } tkv := storage.TKeyValue{tk, result.KeyValue.V} values = append(values, &tkv) } }
// KeysInRange returns a range of type-specific key components spanning (TkBeg, TkEnd). func (db *BigTable) KeysInRange(ctx storage.Context, TkBeg, TkEnd storage.TKey) ([]storage.TKey, error) { if db == nil { return nil, fmt.Errorf("Can't call KeysInRange() on nil BigTable") } if ctx == nil { return nil, fmt.Errorf("Received nil context in KeysInRange()") } tKeys := make([]storage.TKey, 0) unvKeyBeg, _, err := ctx.SplitKey(TkBeg) if err != nil { dvid.Errorf("Error in KeysInRange(): %v\n", err) } unvKeyEnd, _, err := ctx.SplitKey(TkEnd) if err != nil { dvid.Errorf("Error in KeysInRange(): %v\n", err) } rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd)) err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool { if len(r[familyName]) == 0 { dvid.Errorf("Error in KeysInRange(): row has no columns") return false } unvKeyRow, err := decodeKey(r.Key()) if err != nil { dvid.Errorf("Error in KeysInRange(): %v\n", err) return false } verKeyRow, err := decodeKey(r[familyName][0].Column) if err != nil { dvid.Errorf("Error in KeysInRange(): %v\n", err) return false } fullKey := storage.MergeKey(unvKeyRow, verKeyRow) tkey, err := storage.TKeyFromKey(fullKey) if err != nil { dvid.Errorf("Error in KeysInRange(): %v\n", err) return false } tKeys = append(tKeys, tkey) return true // keep going }, api.RowFilter(api.StripValueFilter())) return tKeys, err }
// versionedRange sends a range of key-value pairs for a particular version down a channel. func (db *KVAutobus) versionedRange(vctx storage.VersionedCtx, kStart, kEnd storage.TKey, ch chan errorableKV, keysOnly bool) { minKey, err := vctx.MinVersionKey(kStart) if err != nil { ch <- errorableKV{nil, err} return } maxKey, err := vctx.MaxVersionKey(kEnd) if err != nil { ch <- errorableKV{nil, err} return } maxVersionKey, err := vctx.MaxVersionKey(kStart) if err != nil { ch <- errorableKV{nil, err} return } kvs, err := db.getRange(vctx, minKey, maxKey) if err != nil { ch <- errorableKV{nil, err} return } versions := []*storage.KeyValue{} for _, kv := range kvs { // Did we pass all versions for last key read? if bytes.Compare(kv.K, maxVersionKey) > 0 { indexBytes, err := storage.TKeyFromKey(kv.K) if err != nil { ch <- errorableKV{nil, err} return } maxVersionKey, err = vctx.MaxVersionKey(indexBytes) if err != nil { ch <- errorableKV{nil, err} return } // log.Printf("->maxVersionKey %v (transmitting %d values)\n", maxVersionKey, len(values)) sendKV(vctx, versions, ch) versions = []*storage.KeyValue{} } // Did we pass the final key? if bytes.Compare(kv.K, maxKey) > 0 { if len(versions) > 0 { sendKV(vctx, versions, ch) } break } // log.Printf("Appending value with key %v\n", itKey) versions = append(versions, kv) } if len(versions) >= 0 { sendKV(vctx, versions, ch) } ch <- errorableKV{nil, nil} }
// GetRange returns a range of values spanning (TkBeg, kEnd) keys. func (db *BigTable) GetRange(ctx storage.Context, TkBeg, TkEnd storage.TKey) ([]*storage.TKeyValue, error) { if db == nil { return nil, fmt.Errorf("Can't call GetRange() on nil BigTable") } if ctx == nil { return nil, fmt.Errorf("Received nil context in GetRange()") } unvKeyBeg, _, err := ctx.SplitKey(TkBeg) if err != nil { dvid.Errorf("Error in GetRange(): %v\n", err) } unvKeyEnd, _, err := ctx.SplitKey(TkEnd) if err != nil { dvid.Errorf("Error in GetRange(): %v\n", err) } tKeyValues := make([]*storage.TKeyValue, 0) rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd)) err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool { unvKeyRow, err := decodeKey(r.Key()) if err != nil { dvid.Errorf("Error in GetRange() decodeKey(r.Key()): %v\n", err) return false } // dvid.Infof("GetRange() with row key= %v", r.Key()) for _, readItem := range r[familyName] { verKey, err := decodeKey(readItem.Column) if err != nil { dvid.Errorf("Error in GetRange() decodeKey(readItem.Column): %v\n", err) return false } fullKey := storage.MergeKey(unvKeyRow, verKey) // dvid.Infof("colum key= %v , timestamp = %v", verKey, readItem.Timestamp) tkey, err := storage.TKeyFromKey(fullKey) if err != nil { dvid.Errorf("Error in GetRange() storage.TKeyFromKey(fullKey): %v\n", err) return false } kv := storage.TKeyValue{tkey, readItem.Value} tKeyValues = append(tKeyValues, &kv) } return true // keep going }) return tKeyValues, err }
// ProcessRange sends a range of key-value pairs to chunk handlers. If the keys are versioned, // only key-value pairs for kStart's version will be transmitted. If f returns an error, the // function is immediately terminated and returns an error. func (db *LevelDB) ProcessRange(ctx storage.Context, kStart, kEnd storage.TKey, op *storage.ChunkOp, f storage.ChunkFunc) error { if db == nil { return fmt.Errorf("Can't call ProcessRange on nil LevelDB") } if ctx == nil { return fmt.Errorf("Received nil context in ProcessRange()") } ch := make(chan errorableKV) done := make(chan struct{}) defer close(done) // Run the range query on a potentially versioned key in a goroutine. go func() { if ctx == nil || !ctx.Versioned() { db.unversionedRange(ctx, kStart, kEnd, ch, done, false) } else { db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, done, false) } }() // Consume the key-value pairs. for { result := <-ch if result.KeyValue == nil { return nil } if result.error != nil { return result.error } if op != nil && op.Wg != nil { op.Wg.Add(1) } tk, err := storage.TKeyFromKey(result.KeyValue.K) if err != nil { return err } tkv := storage.TKeyValue{tk, result.KeyValue.V} chunk := &storage.Chunk{op, &tkv} if err := f(chunk); err != nil { return err } } }
func TestTileKey(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() server.CreateTestInstance(t, uuid, "imagetile", "tiles", dvid.Config{}) keyURL := fmt.Sprintf("%snode/%s/tiles/tilekey/xy/0/1_2_3", server.WebAPIPath, uuid) respStr := server.TestHTTP(t, "GET", keyURL, nil) keyResp := struct { Key string `json:"key"` }{} if err := json.Unmarshal(respStr, &keyResp); err != nil { t.Fatalf("Couldn't parse JSON response to tilekey request (%v):\n%s\n", err, keyResp) } kb := make([]byte, hex.DecodedLen(len(keyResp.Key))) _, err := hex.Decode(kb, []byte(keyResp.Key)) if err != nil { t.Fatalf("Couldn't parse return hex key: %s", keyResp.Key) } // Decipher TKey portion to make sure it's correct. key := storage.Key(kb) tk, err := storage.TKeyFromKey(key) if err != nil { t.Fatalf("Couldn't get TKey from returned key (%v): %x", err, kb) } tile, plane, scale, err := DecodeTKey(tk) if err != nil { t.Fatalf("Bad decode of TKey (%v): %x", err, tk) } expectTile := dvid.ChunkPoint3d{1, 2, 3} if tile != expectTile { t.Errorf("Expected tile %v, got %v\n", expectTile, tile) } if !plane.Equals(dvid.XY) { t.Errorf("Expected plane to be XY, got %v\n", plane) } if scale != 0 { t.Errorf("Expected scale to be 0, got %d\n", scale) } }
// KeysInRange returns a range of present keys spanning (kStart, kEnd). Values // associated with the keys are not read. If the keys are versioned, only keys // in the ancestor path of the current context's version will be returned. func (db *LevelDB) KeysInRange(ctx storage.Context, kStart, kEnd storage.TKey) ([]storage.TKey, error) { if db == nil { return nil, fmt.Errorf("Can't call KeysInRange on nil LevelDB") } if ctx == nil { return nil, fmt.Errorf("Received nil context in KeysInRange()") } ch := make(chan errorableKV) done := make(chan struct{}) defer close(done) // Run the range query on a potentially versioned key in a goroutine. go func() { if !ctx.Versioned() { db.unversionedRange(ctx, kStart, kEnd, ch, done, true) } else { db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, done, true) } }() // Consume the keys. values := []storage.TKey{} for { result := <-ch if result.KeyValue == nil { return values, nil } if result.error != nil { return nil, result.error } tk, err := storage.TKeyFromKey(result.KeyValue.K) if err != nil { return nil, err } values = append(values, tk) } }
// copyData copies all key-value pairs pertinent to the given data instance d2. If d2 is nil, // the destination data instance is d1, useful for migration of data to a new store. // Each datatype can implement filters that can restrict the transmitted key-value pairs // based on the given FilterSpec. func copyData(oldKV, newKV storage.OrderedKeyValueDB, d1, d2 dvid.Data, uuid dvid.UUID, f storage.Filter, flatten bool) error { // Get data context for this UUID. v, err := VersionFromUUID(uuid) if err != nil { return err } srcCtx := NewVersionedCtx(d1, v) var dstCtx *VersionedCtx if d2 == nil { d2 = d1 dstCtx = srcCtx } else { dstCtx = NewVersionedCtx(d2, v) } // Send this instance's key-value pairs var wg sync.WaitGroup wg.Add(1) stats := new(txStats) stats.lastTime = time.Now() var kvTotal, kvSent int var bytesTotal, bytesSent uint64 keysOnly := false if flatten { // Start goroutine to receive flattened key-value pairs and store them. ch := make(chan *storage.TKeyValue, 1000) go func() { for { tkv := <-ch if tkv == nil { wg.Done() dvid.Infof("Copied %d %q key-value pairs (%s, out of %d kv pairs, %s) [flattened]\n", kvSent, d1.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal)) stats.printStats() return } kvTotal++ curBytes := uint64(len(tkv.V) + len(tkv.K)) bytesTotal += curBytes if f != nil { skip, err := f.Check(tkv) if err != nil { dvid.Errorf("problem applying filter on data %q: %v\n", d1.DataName(), err) continue } if skip { continue } } kvSent++ bytesSent += curBytes if err := newKV.Put(dstCtx, tkv.K, tkv.V); err != nil { dvid.Errorf("can't put k/v pair to destination instance %q: %v\n", d2.DataName(), err) } stats.addKV(tkv.K, tkv.V) } }() begKey, endKey := srcCtx.TKeyRange() err := oldKV.ProcessRange(srcCtx, begKey, endKey, &storage.ChunkOp{}, func(c *storage.Chunk) error { if c == nil { return fmt.Errorf("received nil chunk in flatten push for data %s", d1.DataName()) } ch <- c.TKeyValue return nil }) ch <- nil if err != nil { return fmt.Errorf("error in flatten push for data %q: %v", d1.DataName(), err) } } else { // Start goroutine to receive all key-value pairs and store them. ch := make(chan *storage.KeyValue, 1000) go func() { for { kv := <-ch if kv == nil { wg.Done() dvid.Infof("Sent %d %q key-value pairs (%s, out of %d kv pairs, %s)\n", kvSent, d1.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal)) stats.printStats() return } tkey, err := storage.TKeyFromKey(kv.K) if err != nil { dvid.Errorf("couldn't get %q TKey from Key %v: %v\n", d1.DataName(), kv.K, err) continue } kvTotal++ curBytes := uint64(len(kv.V) + len(kv.K)) bytesTotal += curBytes if f != nil { skip, err := f.Check(&storage.TKeyValue{K: tkey, V: kv.V}) if err != nil { dvid.Errorf("problem applying filter on data %q: %v\n", d1.DataName(), err) continue } if skip { continue } } kvSent++ bytesSent += curBytes if dstCtx != nil { err := dstCtx.UpdateInstance(kv.K) if err != nil { dvid.Errorf("can't update raw key to new data instance %q: %v\n", d2.DataName(), err) } } if err := newKV.RawPut(kv.K, kv.V); err != nil { dvid.Errorf("can't put k/v pair to destination instance %q: %v\n", d2.DataName(), err) } stats.addKV(kv.K, kv.V) } }() begKey, endKey := srcCtx.KeyRange() if err = oldKV.RawRangeQuery(begKey, endKey, keysOnly, ch, nil); err != nil { return fmt.Errorf("push voxels %q range query: %v", d1.DataName(), err) } } wg.Wait() return nil }
// versionedRange sends a range of key-value pairs for a particular version down a channel. func (db *LevelDB) versionedRange(vctx storage.VersionedCtx, begTKey, endTKey storage.TKey, ch chan errorableKV, done <-chan struct{}, keysOnly bool) { dvid.StartCgo() ro := levigo.NewReadOptions() it := db.ldb.NewIterator(ro) defer func() { it.Close() dvid.StopCgo() }() minKey, err := vctx.MinVersionKey(begTKey) if err != nil { ch <- errorableKV{nil, err} return } maxKey, err := vctx.MaxVersionKey(endTKey) if err != nil { ch <- errorableKV{nil, err} return } values := []*storage.KeyValue{} maxVersionKey, err := vctx.MaxVersionKey(begTKey) if err != nil { ch <- errorableKV{nil, err} return } // log.Printf(" minKey %v\n", minKey) // log.Printf(" maxKey %v\n", maxKey) // log.Printf(" maxVersionKey %v\n", maxVersionKey) it.Seek(minKey) var itValue []byte for { select { case <-done: // only happens if we don't care about rest of data. ch <- errorableKV{nil, nil} return default: } if it.Valid() { if !keysOnly { itValue = it.Value() storage.StoreValueBytesRead <- len(itValue) } itKey := it.Key() // log.Printf(" +++valid key %v\n", itKey) storage.StoreKeyBytesRead <- len(itKey) // Did we pass all versions for last key read? if bytes.Compare(itKey, maxVersionKey) > 0 { indexBytes, err := storage.TKeyFromKey(itKey) if err != nil { ch <- errorableKV{nil, err} return } maxVersionKey, err = vctx.MaxVersionKey(indexBytes) if err != nil { ch <- errorableKV{nil, err} return } // log.Printf("->maxVersionKey %v (transmitting %d values)\n", maxVersionKey, len(values)) sendKV(vctx, values, ch) values = []*storage.KeyValue{} } // Did we pass the final key? if bytes.Compare(itKey, maxKey) > 0 { if len(values) > 0 { sendKV(vctx, values, ch) } ch <- errorableKV{nil, nil} return } // log.Printf("Appending value with key %v\n", itKey) values = append(values, &storage.KeyValue{K: itKey, V: itValue}) it.Next() } else { if err = it.GetError(); err != nil { ch <- errorableKV{nil, err} } else { sendKV(vctx, values, ch) ch <- errorableKV{nil, nil} } return } } }
// DeleteRange removes all key-value pairs with keys in the given range. func (db *LevelDB) DeleteRange(ctx storage.Context, kStart, kEnd storage.TKey) error { if db == nil { return fmt.Errorf("Can't call DeleteRange on nil LevelDB") } if ctx == nil { return fmt.Errorf("Received nil context in DeleteRange()") } // For leveldb, we just iterate over keys in range and delete each one using batch. const BATCH_SIZE = 10000 batch := db.NewBatch(ctx).(*goBatch) ch := make(chan errorableKV) done := make(chan struct{}) defer close(done) // Run the keys-only range query in a goroutine. go func() { if ctx == nil || !ctx.Versioned() { db.unversionedRange(ctx, kStart, kEnd, ch, done, true) } else { db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, done, true) } }() // Consume the key-value pairs. numKV := 0 for { dvid.Infof("Waiting for result...\n") result := <-ch dvid.Infof("Got result: %v\n", result) if result.KeyValue == nil { break } if result.error != nil { return result.error } // The key coming down channel is not index but full key, so no need to construct key using context. // If versioned, write a tombstone using current version id since we don't want to delete locked ancestors. // If unversioned, just delete. tk, err := storage.TKeyFromKey(result.KeyValue.K) if err != nil { return err } batch.Delete(tk) if (numKV+1)%BATCH_SIZE == 0 { if err := batch.Commit(); err != nil { dvid.Criticalf("Error on batch commit of DeleteRange at key-value pair %d: %v\n", numKV, err) return fmt.Errorf("Error on batch commit of DeleteRange at key-value pair %d: %v\n", numKV, err) } batch = db.NewBatch(ctx).(*goBatch) } numKV++ } if numKV%BATCH_SIZE != 0 { if err := batch.Commit(); err != nil { dvid.Criticalf("Error on last batch commit of DeleteRange: %v\n", err) return fmt.Errorf("Error on last batch commit of DeleteRange: %v\n", err) } } dvid.Debugf("Deleted %d key-value pairs via delete range for %s.\n", numKV, ctx) return nil }
// DeleteConflicts removes all conflicted kv pairs for the given data instance using the priority // established by parents. As a side effect, newParents are modified by new children of parents. func DeleteConflicts(uuid dvid.UUID, data DataService, oldParents, newParents []dvid.UUID) error { if manager == nil { return ErrManagerNotInitialized } // Convert UUIDs to versions + bool for whether it's a child suitable for add deletions. parents := make(map[dvid.VersionID]*extensionNode, len(oldParents)) parentsV := make([]dvid.VersionID, len(oldParents)) for i, oldUUID := range oldParents { oldV, err := manager.versionFromUUID(oldUUID) if err != nil { return err } parentsV[i] = oldV if newParents[i] != dvid.NilUUID { newV, err := manager.versionFromUUID(newParents[i]) if err != nil { return err } parents[oldV] = &extensionNode{oldUUID, newParents[i], newV} } else { parents[oldV] = &extensionNode{oldUUID, dvid.NilUUID, 0} } } // Process stream of incoming kv pair for this data instance. baseCtx := NewVersionedCtx(data, 0) ch := make(chan *storage.KeyValue, 1000) wg := new(sync.WaitGroup) wg.Add(1) go func() { defer wg.Done() var err error var curV dvid.VersionID var curTK, batchTK storage.TKey kvv := kvVersions{} for { kv := <-ch if kv == nil { curTK = nil } else { curV, err = baseCtx.VersionFromKey(kv.K) if err != nil { dvid.Errorf("Can't decode key when deleting conflicts for %s", data.DataName()) continue } // If we have a different TKey, then process the batch of versions. curTK, err = storage.TKeyFromKey(kv.K) if err != nil { dvid.Errorf("Error in processing kv pairs in DeleteConflicts: %v\n", err) continue } if batchTK == nil { batchTK = curTK } } if !bytes.Equal(curTK, batchTK) { // Get conflicts. toDelete, err := kvv.FindConflicts(parentsV) if err != nil { dvid.Errorf("Error finding conflicts: %v\n", err) continue } // Create new node if necessary to apply deletions, and if so, store new node. for v, k := range toDelete { if err := deleteConflict(data, parents[v], k); err != nil { dvid.Errorf("Unable to delete conflict: %v\n", err) continue } } // Delete the stash of kv pairs kvv = kvVersions{} batchTK = curTK } if kv == nil { return } kvv[curV] = kvvNode{kv: kv} } }() // Iterate through all k/v for this data instance. store, err := getOrderedKeyValueDB(data) if err != nil { return err } minKey, maxKey := baseCtx.KeyRange() keysOnly := true if err := store.RawRangeQuery(minKey, maxKey, keysOnly, ch, nil); err != nil { return err } wg.Wait() // Return the new parents which were needed for deletions. //newParents = make([]dvid.UUID, len(oldParents)) for i, oldV := range parentsV { if parents[oldV].newUUID == dvid.NilUUID { newParents[i] = parents[oldV].oldUUID } else { newParents[i] = parents[oldV].newUUID } } return nil }
// ProcessRange sends a range of type key-value pairs to type-specific chunk handlers, // allowing chunk processing to be concurrent with key-value sequential reads. // Since the chunks are typically sent during sequential read iteration, the // receiving function can be organized as a pool of chunk handling goroutines. // See datatype/imageblk.ProcessChunk() for an example. func (db *BigTable) ProcessRange(ctx storage.Context, TkBeg, TkEnd storage.TKey, op *storage.ChunkOp, f storage.ChunkFunc) error { if db == nil { return fmt.Errorf("Can't call ProcessRange() on nil BigTable") } if ctx == nil { return fmt.Errorf("Received nil context in ProcessRange()") } unvKeyBeg, verKey, err := ctx.SplitKey(TkBeg) if err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) } unvKeyEnd, _, err := ctx.SplitKey(TkEnd) if err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) } rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd)) err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool { if len(r[familyName]) == 0 { dvid.Errorf("Error in KeysInRange(): row has no columns") return false } unvKeyRow, err := decodeKey(r.Key()) if err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) return false } verKeyRow, err := decodeKey(r[familyName][0].Column) if err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) return false } fullKey := storage.MergeKey(unvKeyRow, verKeyRow) tkey, err := storage.TKeyFromKey(fullKey) if err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) return false } if op.Wg != nil { op.Wg.Add(1) } value, err := getValue(r, verKey) if err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) return false } tkv := storage.TKeyValue{tkey, value} chunk := &storage.Chunk{op, &tkv} if err := f(chunk); err != nil { dvid.Errorf("Error in ProcessRange(): %v\n", err) return false } return true // keep going }) return err }
// PushData transfers all key-value pairs pertinent to the given data instance. // Each datatype can implement filters that can restrict the transmitted key-value pairs // based on the given FilterSpec. Note that because of the generality of this function, // a particular datatype implementation could be much more efficient when implementing // filtering. For example, the imageblk datatype could scan its key-values using the ROI // to generate keys (since imageblk keys will likely be a vast superset of ROI spans), // while this generic routine will scan every key-value pair for a data instance and // query the ROI to see if this key is ok to send. func PushData(d dvid.Data, p *PushSession) error { // We should be able to get the backing store (only ordered kv for now) storer, ok := d.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d.DataName()) } store, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d.DataName(), err) } // See if this data instance implements a Send filter. var filter storage.Filter filterer, ok := d.(storage.Filterer) if ok { var err error filter, err = filterer.NewFilter(p.Filter) if err != nil { return err } } // pick any version because flatten transmit will only have one version, and all or branch transmit will // be looking at all versions anyway. if len(p.Versions) == 0 { return fmt.Errorf("need at least one version to send") } var v dvid.VersionID for v = range p.Versions { break } ctx := NewVersionedCtx(d, v) // Send the initial data instance start message if err := p.StartInstancePush(d); err != nil { return err } // Send this instance's key-value pairs var wg sync.WaitGroup wg.Add(1) var kvTotal, kvSent int var bytesTotal, bytesSent uint64 keysOnly := false if p.t == rpc.TransmitFlatten { // Start goroutine to receive flattened key-value pairs and transmit to remote. ch := make(chan *storage.TKeyValue, 1000) go func() { for { tkv := <-ch if tkv == nil { if err := p.EndInstancePush(); err != nil { dvid.Errorf("Bad data %q termination: %v\n", d.DataName(), err) } wg.Done() dvid.Infof("Sent %d %q key-value pairs (%s, out of %d kv pairs, %s) [flattened]\n", kvSent, d.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal)) return } kvTotal++ curBytes := uint64(len(tkv.V) + len(tkv.K)) bytesTotal += curBytes if filter != nil { skip, err := filter.Check(tkv) if err != nil { dvid.Errorf("problem applying filter on data %q: %v\n", d.DataName(), err) continue } if skip { continue } } kvSent++ bytesSent += curBytes kv := storage.KeyValue{ K: ctx.ConstructKey(tkv.K), V: tkv.V, } if err := p.SendKV(&kv); err != nil { dvid.Errorf("Bad data %q send KV: %v", d.DataName(), err) } } }() begKey, endKey := ctx.TKeyRange() err := store.ProcessRange(ctx, begKey, endKey, &storage.ChunkOp{}, func(c *storage.Chunk) error { if c == nil { return fmt.Errorf("received nil chunk in flatten push for data %s", d.DataName()) } ch <- c.TKeyValue return nil }) ch <- nil if err != nil { return fmt.Errorf("error in flatten push for data %q: %v", d.DataName(), err) } } else { // Start goroutine to receive all key-value pairs and transmit to remote. ch := make(chan *storage.KeyValue, 1000) go func() { for { kv := <-ch if kv == nil { if err := p.EndInstancePush(); err != nil { dvid.Errorf("Bad data %q termination: %v\n", d.DataName(), err) } wg.Done() dvid.Infof("Sent %d %q key-value pairs (%s, out of %d kv pairs, %s)\n", kvSent, d.DataName(), humanize.Bytes(bytesSent), kvTotal, humanize.Bytes(bytesTotal)) return } if !ctx.ValidKV(kv, p.Versions) { continue } kvTotal++ curBytes := uint64(len(kv.V) + len(kv.K)) bytesTotal += curBytes if filter != nil { tkey, err := storage.TKeyFromKey(kv.K) if err != nil { dvid.Errorf("couldn't get %q TKey from Key %v: %v\n", d.DataName(), kv.K, err) continue } skip, err := filter.Check(&storage.TKeyValue{K: tkey, V: kv.V}) if err != nil { dvid.Errorf("problem applying filter on data %q: %v\n", d.DataName(), err) continue } if skip { continue } } kvSent++ bytesSent += curBytes if err := p.SendKV(kv); err != nil { dvid.Errorf("Bad data %q send KV: %v", d.DataName(), err) } } }() begKey, endKey := ctx.KeyRange() if err = store.RawRangeQuery(begKey, endKey, keysOnly, ch, nil); err != nil { return fmt.Errorf("push voxels %q range query: %v", d.DataName(), err) } } wg.Wait() return nil }