// Note that this does not delete any removed labels in the block since we only get the CURRENT block // and not PAST blocks. To allow mutation of label blocks, not just ingestion, we need another function. func (d *Data) ingestBlock(ctx *datastore.VersionedCtx, block imageblk.Block, batcher storage.KeyValueBatcher) { // Iterate through this block of labels and create RLEs for each label. blockBytes := len(block.Data) if blockBytes != int(d.BlockSize.Prod())*8 { dvid.Criticalf("Deserialized label block %d bytes, not uint64 size times %d block elements\n", blockBytes, d.BlockSize.Prod()) return } labelRLEs := make(map[uint64]dvid.RLEs, 10) firstPt := block.Index.MinPoint(d.BlockSize) lastPt := block.Index.MaxPoint(d.BlockSize) var curStart dvid.Point3d var voxelLabel, curLabel, maxLabel uint64 var z, y, x, curRun int32 start := 0 for z = firstPt.Value(2); z <= lastPt.Value(2); z++ { for y = firstPt.Value(1); y <= lastPt.Value(1); y++ { for x = firstPt.Value(0); x <= lastPt.Value(0); x++ { voxelLabel = binary.LittleEndian.Uint64(block.Data[start : start+8]) if maxLabel < voxelLabel { maxLabel = voxelLabel } start += 8 // If we hit background or have switched label, save old run and start new one. if voxelLabel == 0 || voxelLabel != curLabel { // Save old run if curRun > 0 { labelRLEs[curLabel] = append(labelRLEs[curLabel], dvid.NewRLE(curStart, curRun)) } // Start new one if not zero label. if voxelLabel != 0 { curStart = dvid.Point3d{x, y, z} curRun = 1 } else { curRun = 0 } curLabel = voxelLabel } else { curRun++ } } // Force break of any runs when we finish x scan. if curRun > 0 { labelRLEs[curLabel] = append(labelRLEs[curLabel], dvid.NewRLE(curStart, curRun)) curLabel = 0 curRun = 0 } } } // Store the RLEs for each label in this block. if maxLabel > 0 { batch := batcher.NewBatch(ctx) blockStr := block.Index.ToIZYXString() for label, rles := range labelRLEs { tk := NewTKey(label, blockStr) rleBytes, err := rles.MarshalBinary() if err != nil { dvid.Errorf("Bad encoding labelvol keys for label %d: %v\n", label, err) return } batch.Put(tk, rleBytes) } // compare-and-set MaxLabel and batch commit d.casMaxLabel(batch, ctx.VersionID(), maxLabel) } }
func (d *Data) mutateBlock(ctx *datastore.VersionedCtx, block imageblk.MutatedBlock, batcher storage.KeyValueBatcher) { // Iterate through previous and current labels, detecting set of previous labels and RLEs for current labels. blockBytes := len(block.Data) if blockBytes != int(d.BlockSize.Prod())*8 { dvid.Criticalf("Deserialized label block %d bytes, not uint64 size times %d block elements\n", blockBytes, d.BlockSize.Prod()) return } labelRLEs := make(map[uint64]dvid.RLEs, 10) labelDiff := make(map[uint64]bool, 10) firstPt := block.Index.MinPoint(d.BlockSize) lastPt := block.Index.MaxPoint(d.BlockSize) var curStart dvid.Point3d var voxelLabel, curLabel, maxLabel uint64 var z, y, x, curRun int32 start := 0 for z = firstPt.Value(2); z <= lastPt.Value(2); z++ { for y = firstPt.Value(1); y <= lastPt.Value(1); y++ { for x = firstPt.Value(0); x <= lastPt.Value(0); x++ { var pastLabel uint64 if block.Prev == nil || len(block.Prev) == 0 { pastLabel = 0 } else { pastLabel = binary.LittleEndian.Uint64(block.Prev[start : start+8]) } voxelLabel = binary.LittleEndian.Uint64(block.Data[start : start+8]) if maxLabel < voxelLabel { maxLabel = voxelLabel } if pastLabel != 0 { if pastLabel != voxelLabel { labelDiff[pastLabel] = true } else { _, found := labelDiff[pastLabel] if !found { labelDiff[pastLabel] = false } } } start += 8 // If we hit background or have switched label, save old run and start new one. if voxelLabel == 0 || voxelLabel != curLabel { // Save old run if curRun > 0 { labelRLEs[curLabel] = append(labelRLEs[curLabel], dvid.NewRLE(curStart, curRun)) } // Start new one if not zero label. if voxelLabel != 0 { curStart = dvid.Point3d{x, y, z} curRun = 1 } else { curRun = 0 } curLabel = voxelLabel } else { curRun++ } } // Force break of any runs when we finish x scan. if curRun > 0 { labelRLEs[curLabel] = append(labelRLEs[curLabel], dvid.NewRLE(curStart, curRun)) curLabel = 0 curRun = 0 } } } // If a previous label has no change with current label RLE, then delete the label RLE since no changes // are necessary. Else if previous label is not present in current label RLEs, delete labelvol. var deletes []storage.TKey blockStr := block.Index.ToIZYXString() for label, diff := range labelDiff { _, found := labelRLEs[label] if diff && !found { // mark previous label's RLEs for deletion tk := NewTKey(label, blockStr) deletes = append(deletes, tk) } else if !diff && found { // delete current label's RLEs because there's no difference with past RLE delete(labelRLEs, label) } } if len(deletes) > 0 { batch := batcher.NewBatch(ctx) for _, tk := range deletes { batch.Delete(tk) } if err := batch.Commit(); err != nil { dvid.Errorf("batch commit on deleting previous labels' labelvols: %v\n", err) } } // Store the RLEs for each label in this block that are new or modified. if len(labelRLEs) > 0 { batch := batcher.NewBatch(ctx) for label, rles := range labelRLEs { tk := NewTKey(label, blockStr) rleBytes, err := rles.MarshalBinary() if err != nil { dvid.Errorf("Bad encoding labelvol keys for label %d: %v\n", label, err) return } batch.Put(tk, rleBytes) } // compare-and-set MaxLabel and batch commit d.casMaxLabel(batch, ctx.VersionID(), maxLabel) } }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations syncing with "labels" instance checking for deduplication. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies,labels,bodies,labels,bodies") dataservice, err := datastore.GetDataByUUIDName(uuid, "mysynapses") if err != nil { t.Fatal(err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't convert dataservice %v into datastore.Data\n", dataservice) } if len(data.SyncedData()) != 2 { t.Fatalf("Expected 2 syncs (uuids for labels and bodies], got %v\n", data.SyncedData()) } // PUT first batch of synapses testJSON, err := json.Marshal(testData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // Test if labels were properly denormalized. For the POST we have synchronized label denormalization. // If this were to become asynchronous, we'd want to block on updating like the labelblk<->labelvol sync. testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3NoRel, "%snode/%s/mysynapses/label/3", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) // Make change to labelblk and make sure our label synapses have been adjusted (case A) _ = modifyLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of labels->annotations: %v\n", err) } testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2a, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3a, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4NoRel, "%snode/%s/mysynapses/label/4", server.WebAPIPath, uuid) // Make change to labelvol and make sure our label synapses have been adjusted (case B). // Merge 3a into 2a. testMerge := mergeJSON(`[2, 3]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2b, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, nil, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) // Now split label 2b off and check if annotations also split // Create the sparsevol encoding for split area numspans := len(bodysplit.voxelSpans) rles := make(dvid.RLEs, numspans, numspans) for i, span := range bodysplit.voxelSpans { start := dvid.Point3d{span[2], span[1], span[0]} length := span[3] - span[2] + 1 rles[i] = dvid.NewRLE(start, length) } buf := getBytesRLE(t, rles) // Submit the split sparsevol reqStr := fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=7", server.WebAPIPath, uuid, "bodies", 2) r := server.TestHTTP(t, "POST", reqStr, buf) jsonVal := make(map[string]uint64) if err := json.Unmarshal(r, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Verify that the annotations are correct. if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of split->annotations: %v\n", err) } testResponseLabel(t, expectedLabel2c, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) url2 := fmt.Sprintf("%snode/%s/mysynapses/label/7?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel7, url2) // Change the name of the annotations. if err = datastore.RenameData(uuid, "mysynapses", "bodies", "foobar"); err == nil { t.Fatalf("Should have been prevented from renaming data 'mysynapses' to existing data 'bodies'!\n") } if err = datastore.RenameData(uuid, "mysynapses", "renamedData", "foobar"); err != nil { t.Fatalf("Error renaming annotations: %v\n", err) } // Make sure the old name is no longer there and the new one is. server.TestBadHTTP(t, "GET", url2, nil) testResponseLabel(t, expectedLabel2c, "%snode/%s/renamedData/label/2?relationships=true", server.WebAPIPath, uuid) // Try a coarse split. // Create the encoding for split area in block coordinates. rles = dvid.RLEs{ dvid.NewRLE(dvid.Point3d{3, 1, 3}, 1), } buf = getBytesRLE(t, rles) // Submit the coarse split reqStr = fmt.Sprintf("%snode/%s/%s/split-coarse/2?splitlabel=8", server.WebAPIPath, uuid, "bodies") r = server.TestHTTP(t, "POST", reqStr, buf) jsonVal = make(map[string]uint64) if err := json.Unmarshal(r, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Verify that the annotations are correct. if err := datastore.BlockOnUpdating(uuid, "renamedData"); err != nil { t.Fatalf("Error blocking on sync of split->annotations: %v\n", err) } testResponseLabel(t, expectedLabel2c, "%snode/%s/renamedData/label/8?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, nil, "%snode/%s/renamedData/label/2?relationships=true", server.WebAPIPath, uuid) // Delete a labeled annotation and make sure it's not in label delurl := fmt.Sprintf("%snode/%s/%s/element/20_30_40", server.WebAPIPath, uuid, "renamedData") server.TestHTTP(t, "DELETE", delurl, nil) testResponseLabel(t, afterDeleteOn7, "%snode/%s/%s/label/7?relationships=true", server.WebAPIPath, uuid, "renamedData") }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := datastore.NewTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations syncing with "labels" instance. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies") // Create a ROI that will be used for our labelsz. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create labelsz instances synced to the above annotations. server.CreateTestInstance(t, uuid, "labelsz", "noroi", config) server.CreateTestSync(t, uuid, "noroi", "mysynapses") config.Set("ROI", fmt.Sprintf("myroi,%s", uuid)) server.CreateTestInstance(t, uuid, "labelsz", "withroi", config) server.CreateTestSync(t, uuid, "withroi", "mysynapses") // PUT first batch of synapses. var synapses annotation.Elements var x, y, z int32 // This should put 31x31x31 (29,791) PostSyn in volume with fewer in label 200 than 300. // There will be 15 along each dimension from 0 -> 63, then 16 from 64 -> 127. // Label 100 will have 15 x 31 x 31 = 14415 // Label 200 will have 16 x 31 x 15 = 7440 // Label 300 will have 16 x 31 x 16 = 7936 for z = 4; z < 128; z += 4 { for y = 4; y < 128; y += 4 { for x = 4; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PostSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } // This should put 32x32x32 (32,768) PreSyn in volume split 1/2, 1/4, 1/4 for z = 2; z < 128; z += 4 { for y = 2; y < 128; y += 4 { for x = 2; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PreSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } testJSON, err := json.Marshal(synapses) if err != nil { t.Fatal(err) } url := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, strings.NewReader(string(testJSON))) // Check if we have correct sequencing for no ROI labelsz. if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data := server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/100/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PreSyn":16384}` { t.Errorf("Got back incorrect PreSyn noroi count for label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"PreSyn":8192}` { t.Errorf("Got back incorrect PreSyn noroi count for label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14415},{"Label":300,"Size":7936},{"Label":200,"Size":7440}]` { t.Errorf("Got back incorrect PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSync noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15633/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=1&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=8&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } // Check if we have correct sequencing for ROI labelsz. // ROI constitutes the inner eight 32^3 blocks. // There are 16 PostSyn in each ROI dimension. // There are also 16 PreSyn in each ROI dimension. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of withroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/0/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Incorrectly handled top n=0 case, expected [] got: %v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PostSyn withroi ranking:\n%v\n", string(data)) } // Check fewer and larger N requests. url = fmt.Sprintf("%snode/%s/noroi/top/2/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192}]` { t.Errorf("Got back incorrect N=2 ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/4/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect N=4 ranking:\n%v\n", string(data)) } // Test annotation move of a PostSyn from label 100->300 and also label 200->300 url = fmt.Sprintf("%snode/%s/mysynapses/move/32_32_32/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/move/68_20_20/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7938},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after move from label 100->300:\n%v\n", string(data)) } // First move took synapse out of ROI so there should be one less for label 100. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2047},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect post-move PostSyn withroi ranking:\n%v\n", string(data)) } // Test annotation deletion of moved PostSyn from label 300 url = fmt.Sprintf("%snode/%s/mysynapses/element/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/element/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7936},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after deletions from label 300:\n%v\n", string(data)) } // Check sync on merge. if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of bodies: %v\n", err) } testMerge := mergeJSON(`[200, 300]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PostSyn":2047}` { t.Errorf("Got back incorrect post-merge PostSyn withroi count of label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":31759},{"Label":100,"Size":30798}]` { t.Errorf("Got back incorrect post-merge AllSyn noroi ranking:\n%v\n", string(data)) } // Check threshold endpoint url = fmt.Sprintf("%snode/%s/withroi/threshold/2048/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/16384/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15000/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/0/PostSyn?offset=1&n=1", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold with offset/n:\n%v\n", string(data)) } // Create the sparsevol encoding for split area with label 100 -> 150. // Split has offset (0, 0, 0) with size (19, 19, 19). // PreSyn in split = 5 x 5 x 5 = 125 // PostSyn in split = 4 x 4 x 4 = 64 var rles dvid.RLEs for z := int32(0); z < 19; z++ { for y := int32(0); y < 19; y++ { start := dvid.Point3d{0, y, z} rles = append(rles, dvid.NewRLE(start, 19)) } } buf := getBytesRLE(t, rles) // Submit the split sparsevol url = fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=150", server.WebAPIPath, uuid, "bodies", 100) data = server.TestHTTP(t, "POST", url, buf) jsonVal := make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":16384},{"Label":100,"Size":16259},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-split PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14350},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-split PostSyn noroi ranking:\n%v\n", string(data)) } // Create the encoding for coarse split area in block coordinates from label 200. // Split has offset (64, 96, 96) with size (64, 32, 32). // PreSyn in split = 16 x 8 x 8 = 1024 // PostSyn in split = 16 x 8 x 8 = 1024 rles = dvid.RLEs{ dvid.NewRLE(dvid.Point3d{2, 3, 3}, 2), } buf = getBytesRLE(t, rles) // Submit the coarse split of 200 -> 250 url = fmt.Sprintf("%snode/%s/%s/split-coarse/200?splitlabel=250", server.WebAPIPath, uuid, "bodies") data = server.TestHTTP(t, "POST", url, buf) jsonVal = make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16259},{"Label":200,"Size":15360},{"Label":250,"Size":1024},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":14351},{"Label":100,"Size":14350},{"Label":250,"Size":1024},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30609},{"Label":200,"Size":29711},{"Label":250,"Size":2048},{"Label":150,"Size":189}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":29711}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi count of label 200:\n%v\n", string(data)) } // Check the ROI-restricted labelsz instance which should only be affected by merge. url = fmt.Sprintf("%snode/%s/withroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":4096},{"Label":100,"Size":4095}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":4096}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"AllSyn":4095}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 100:\n%v\n", string(data)) } }