func TestFloatInstanceCreation(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Create new voxels instance with optional parameters name := "weights" metadata := fmt.Sprintf(`{ "typename": "float32blk", "dataname": %q, "blocksize": "64,43,28", "VoxelSize": "13.1, 14.2, 15.3", "VoxelUnits": "picometers,nanometers,microns" }`, name) apiStr := fmt.Sprintf("%srepo/%s/instance", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(metadata)) // Get metadata and make sure optional settings have been set. apiStr = fmt.Sprintf("%snode/%s/%s/info", server.WebAPIPath, uuid, name) result := server.TestHTTP(t, "GET", apiStr, nil) var parsed = struct { Base struct { TypeName, Name string } Extended struct { BlockSize dvid.Point3d VoxelSize dvid.NdFloat32 VoxelUnits dvid.NdString } }{} if err := json.Unmarshal(result, &parsed); err != nil { t.Fatalf("Error parsing JSON response of new instance metadata: %v\n", err) } if parsed.Base.Name != name { t.Errorf("Parsed new instance has unexpected name: %s != %s (expected)\n", parsed.Base.Name, name) } if parsed.Base.TypeName != "float32blk" { t.Errorf("Parsed new instance has unexpected type name: %s != uint8blk (expected)\n", parsed.Base.TypeName) } if !parsed.Extended.BlockSize.Equals(dvid.Point3d{64, 43, 28}) { t.Errorf("Bad block size in new uint8blk instance: %s\n", parsed.Extended.BlockSize) } if !parsed.Extended.VoxelSize.Equals(dvid.NdFloat32{13.1, 14.2, 15.3}) { t.Errorf("Bad voxel size in new uint8blk instance: %s\n", parsed.Extended.VoxelSize) } if parsed.Extended.VoxelUnits[0] != "picometers" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } if parsed.Extended.VoxelUnits[1] != "nanometers" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } if parsed.Extended.VoxelUnits[2] != "microns" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } }
// Sets package-level testRepo and TestVersionID func initTestRepo() (dvid.UUID, dvid.VersionID) { if dtype == nil { var err error dtype, err = datastore.TypeServiceByName(TypeName) if err != nil { log.Fatalf("Can't get labelgraph type: %v\n", err) } } return datastore.NewTestRepo() }
func TestLog(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Post a log payload := bytes.NewBufferString(`{"log": ["line1", "line2", "some more stuff in a line"]}`) apiStr := fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Verify it was saved. r := TestHTTP(t, "GET", apiStr, nil) jsonResp := make(map[string][]string) if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal log response: %s\n", string(r)) } if len(jsonResp) != 1 { t.Errorf("Bad log return: %s\n", string(r)) } data, ok := jsonResp["log"] if !ok { t.Fatalf("No 'log' data returned: %s\n", string(r)) } if len(data) != 3 { t.Fatalf("Got wrong # of lines in log: %v\n", data) } testLog(t, data[0], "line1") testLog(t, data[1], "line2") testLog(t, data[2], "some more stuff in a line") // Add some more to log payload = bytes.NewBufferString(`{"log": ["line4", "line5"]}`) apiStr = fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Verify it was appended. r = TestHTTP(t, "GET", apiStr, nil) jsonResp = make(map[string][]string) if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal log response: %s\n", string(r)) } if len(jsonResp) != 1 { t.Errorf("Bad log return: %s\n", string(r)) } data, ok = jsonResp["log"] if !ok { t.Fatalf("No 'log' data returned: %s\n", string(r)) } if len(data) != 5 { t.Errorf("Got wrong # of lines in log: %v\n", data) } testLog(t, data[3], "line4") testLog(t, data[4], "line5") }
// Sets package-level testRepo and TestVersionID func initTestRepo() (dvid.UUID, dvid.VersionID) { testMu.Lock() defer testMu.Unlock() if roitype == nil { var err error roitype, err = datastore.TypeServiceByName(TypeName) if err != nil { log.Fatalf("Can't get ROI type: %v\n", err) } } return datastore.NewTestRepo() }
// Sets package-level testRepo and TestVersionID func initTestRepo() (dvid.UUID, dvid.VersionID) { testMu.Lock() defer testMu.Unlock() if labelsT == nil { var err error labelsT, err = datastore.TypeServiceByName("labelblk") if err != nil { log.Fatalf("Can't get labelblk type: %s\n", err) } } return datastore.NewTestRepo() }
func TestReloadMetadata(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Add data instances var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "roi", "someroi", config) // Reload the metadata apiStr := fmt.Sprintf("%sserver/reload-metadata", server.WebAPIPath) server.TestHTTP(t, "POST", apiStr, nil) // Make sure repo UUID still there jsonStr, err := datastore.MarshalJSON() if err != nil { t.Fatalf("can't get repos JSON: %v\n", err) } var jsonResp map[string](map[string]interface{}) if err := json.Unmarshal(jsonStr, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repos info response: %s\n", jsonStr) } if len(jsonResp) != 1 { t.Errorf("reloaded repos had more than one repo: %v\n", jsonResp) } for k := range jsonResp { if dvid.UUID(k) != uuid { t.Fatalf("Expected uuid %s, got %s. Full JSON:\n%v\n", uuid, k, jsonResp) } } // Make sure the data instances are still there. _, err = datastore.GetDataByUUIDName(uuid, "foo") if err != nil { t.Errorf("Couldn't get keyvalue data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "labels") if err != nil { t.Errorf("Couldn't get labelblk data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "someroi") if err != nil { t.Errorf("Couldn't get roi data instance after reload\n") } }
// Sets package-level testRepo and TestVersionID func initTestRepo() (dvid.UUID, dvid.VersionID) { testMu.Lock() defer testMu.Unlock() if mstype == nil { var err error mstype, err = datastore.TypeServiceByName(TypeName) if err != nil { log.Fatalf("Can't get imagetile type: %s\n", err) } grayscaleT, err = datastore.TypeServiceByName("uint8blk") if err != nil { log.Fatalf("Can't get grayscale type: %s\n", err) } } return datastore.NewTestRepo() }
// Sets package-level testRepo and TestVersionID func initTestRepo() (dvid.UUID, dvid.VersionID) { testMu.Lock() defer testMu.Unlock() if grayscaleT == nil { var err error grayscaleT, err = datastore.TypeServiceByName("uint8blk") if err != nil { log.Fatalf("Can't get uint8blk type: %s\n", err) } rgbaT, err = datastore.TypeServiceByName("rgba8blk") if err != nil { log.Fatalf("Can't get rgba8blk type: %s\n", err) } roiT, err = datastore.TypeServiceByName("roi") if err != nil { log.Fatalf("Can't get ROI type: %s\n", err) } } return datastore.NewTestRepo() }
func TestCommitBranchMergeDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", WebAPIPath, uuid) TestBadHTTP(t, "POST", branchReq, nil) // Check commit status checkReq := fmt.Sprintf("%snode/%s/commit", WebAPIPath, uuid) retVal := TestHTTP(t, "GET", checkReq, nil) if string(retVal) != `{"Locked":false}` { t.Errorf("Expected unlocked commit status, got: %s\n", string(retVal)) } // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr := fmt.Sprintf("%snode/%s/commit", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Check commit status checkReq = fmt.Sprintf("%snode/%s/commit", WebAPIPath, uuid) retVal = TestHTTP(t, "GET", checkReq, nil) if string(retVal) != `{"Locked":true}` { t.Errorf("Expected locked commit status, got: %s\n", string(retVal)) } // Make sure committed nodes can only be read. // We shouldn't be able to write to log. payload = bytes.NewBufferString(`{"log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestBadHTTP(t, "POST", apiStr, payload) // Should be able to create branch now that we've committed parent. respData := TestHTTP(t, "POST", branchReq, nil) resp := struct { Child string `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent1 := dvid.UUID(resp.Child) // Create a sibling. respData = TestHTTP(t, "POST", branchReq, nil) if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent2 := dvid.UUID(resp.Child) // Commit both parents payload = bytes.NewBufferString(`{"note": "This is first parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) payload = bytes.NewBufferString(`{"note": "This is second parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent2) TestHTTP(t, "POST", apiStr, payload) // Merge the two disjoint branches. mergeJSON := fmt.Sprintf(`{"mergeType": "conflict-free", "note": "This is my merged node", "parents": [%q, %q]}`, parent1[:7], parent2) payload = bytes.NewBufferString(mergeJSON) apiStr = fmt.Sprintf("%srepo/%s/merge", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() if len(uuid) < 5 { t.Fatalf("Bad root UUID for new repo: %s\n", uuid) } // Create a labelblk instance server.CreateTestInstance(t, uuid, "labelblk", "labels", dvid.Config{}) vol := labelVol{ startLabel: 2, size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 64, 96}, name: "labels", } vol.postLabelVolume(t, uuid, "", "", 0) // Test the blocks API vol.testBlocks(t, uuid, "", "") vol.testBlocks(t, uuid, "uncompressed", "") // Test the "label" endpoint. apiStr := fmt.Sprintf("%snode/%s/%s/label/100_64_96", server.WebAPIPath, uuid, "labels") jsonResp := server.TestHTTP(t, "GET", apiStr, nil) var r labelResp if err := json.Unmarshal(jsonResp, &r); err != nil { t.Errorf("Unable to parse 'label' endpoint response: %s\n", jsonResp) } if r.Label != 69 { t.Errorf("Expected label %d @ (100, 64, 96) got label %d\n", vol.label(100, 64, 96), r.Label) } apiStr = fmt.Sprintf("%snode/%s/%s/label/10000_64000_9600121", server.WebAPIPath, uuid, "labels") jsonResp = server.TestHTTP(t, "GET", apiStr, nil) if err := json.Unmarshal(jsonResp, &r); err != nil { t.Errorf("Unable to parse 'label' endpoint response: %s\n", jsonResp) } if r.Label != 0 { t.Errorf("Expected label 0 at random huge point, got label %d\n", r.Label) } // Test the "labels" endpoint. apiStr = fmt.Sprintf("%snode/%s/%s/labels", server.WebAPIPath, uuid, "labels") payload := `[[100,64,96],[78,93,156],[104,65,97]]` jsonResp = server.TestHTTP(t, "GET", apiStr, bytes.NewBufferString(payload)) var labels [3]uint64 if err := json.Unmarshal(jsonResp, &labels); err != nil { t.Errorf("Unable to parse 'labels' endpoint response: %s\n", jsonResp) } if labels[0] != vol.label(100, 64, 96) { t.Errorf("Expected label %d @ (100, 64, 96) got label %d\n", vol.label(100, 64, 96), labels[0]) } if labels[1] != vol.label(78, 93, 156) { t.Errorf("Expected label %d @ (78, 93, 156) got label %d\n", vol.label(78, 93, 156), labels[1]) } if labels[2] != vol.label(104, 65, 97) { t.Errorf("Expected label %d @ (104, 65, 97) got label %d\n", vol.label(104, 65, 97), labels[2]) } // Repost the label volume 3 more times with increasing starting values. vol.postLabelVolume(t, uuid, "", "", 2100) vol.postLabelVolume(t, uuid, "", "", 8176) vol.postLabelVolume(t, uuid, "", "", 16623) vol.testSlices(t, uuid) // Try to post last volume concurrently 3x and then check result. wg := new(sync.WaitGroup) wg.Add(3) go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") // Try concurrent write of disjoint subvolumes. vol2 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 64, 96}, name: "labels", } vol3 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 224, 96}, name: "labels", } vol4 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 224, 96}, name: "labels", } wg.Add(3) go func() { vol2.postLabelVolume(t, uuid, "lz4", "", 4000) wg.Done() }() go func() { vol3.postLabelVolume(t, uuid, "lz4", "", 8000) wg.Done() }() go func() { vol4.postLabelVolume(t, uuid, "lz4", "", 1200) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") vol2.testGetLabelVolume(t, uuid, "", "") vol3.testGetLabelVolume(t, uuid, "", "") vol4.testGetLabelVolume(t, uuid, "", "") // Verify various GET 3d volume with compressions and no ROI. vol.testGetLabelVolume(t, uuid, "", "") vol.testGetLabelVolume(t, uuid, "lz4", "") vol.testGetLabelVolume(t, uuid, "gzip", "") // Create a new ROI instance. roiName := "myroi" server.CreateTestInstance(t, uuid, "roi", roiName, dvid.Config{}) // Add ROI data apiStr = fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, roiName) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(labelsJSON())) // Post updated labels without ROI and make sure it returns those values. var labelNoROI uint64 = 20000 vol.postLabelVolume(t, uuid, "", "", labelNoROI) returned := vol.testGetLabelVolume(t, uuid, "", "") startLabel := binary.LittleEndian.Uint64(returned[0:8]) if startLabel != labelNoROI+1 { t.Errorf("Expected first voxel to be label %d and got %d instead\n", labelNoROI+1, startLabel) } // TODO - Use the ROI to retrieve a 2d xy image. // TODO - Make sure we aren't getting labels back in non-ROI points. // Post again but now with ROI var labelWithROI uint64 = 40000 vol.postLabelVolume(t, uuid, "", roiName, labelWithROI) // Verify ROI masking of POST where anything outside ROI is old labels. returned = vol.getLabelVolume(t, uuid, "", "") var newlabel uint64 = labelWithROI var oldlabel uint64 = labelNoROI nx := vol.size[0] * vol.blockSize[0] ny := vol.size[1] * vol.blockSize[1] nz := vol.size[2] * vol.blockSize[2] var x, y, z, v int32 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != oldlabel { t.Fatalf("Expected %d outside ROI, got %d\n", oldlabel, got) } } v += 8 } } } // Verify that a ROI-enabled GET has zeros everywhere outside ROI. returned = vol.getLabelVolume(t, uuid, "", roiName) newlabel = labelWithROI x, y, z, v = 0, 0, 0, 0 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != 0 { t.Fatalf("Expected zero outside ROI, got %d\n", got) } } v += 8 } } } }
func TestSyncs(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) server.CreateTestInstance(t, uuid, "annotation", "synapses", config) server.CreateTestSync(t, uuid, "synapses", "labels,bodies") labels, err := labelblk.GetByUUIDName(uuid, "labels") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } bodies, err := labelvol.GetByUUIDName(uuid, "bodies") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } synapses, err := annotation.GetByUUIDName(uuid, "synapses") if err != nil { t.Fatalf("Couldn't get synapses data instance: %v\n", err) } syncs := synapses.SyncedData() if len(syncs) != 2 { t.Errorf("Expected 2 syncs, got %d syncs instead.\n", len(syncs)) } _, found := syncs[labels.DataUUID()] if !found { t.Errorf("Expected labels UUID (%d) got: %v\n", labels.DataUUID(), syncs) } _, found = syncs[bodies.DataUUID()] if !found { t.Errorf("Expected bodies UUID (%d) got: %v\n", bodies.DataUUID(), syncs) } server.CreateTestInstance(t, uuid, "labelvol", "bodies2", config) bodies2, err := labelvol.GetByUUIDName(uuid, "bodies2") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } server.CreateTestSync(t, uuid, "synapses", "bodies2") syncs = synapses.SyncedData() if len(syncs) != 3 { t.Errorf("Expected 3 syncs, got %d syncs instead.\n", len(syncs)) } _, found = syncs[labels.DataUUID()] if !found { t.Errorf("Expected labels UUID (%d) got: %v\n", labels.DataUUID(), syncs) } _, found = syncs[bodies.DataUUID()] if !found { t.Errorf("Expected bodies UUID (%d) got: %v\n", bodies.DataUUID(), syncs) } _, found = syncs[bodies2.DataUUID()] if !found { t.Errorf("Expected bodies2 UUID (%d) got: %v\n", bodies2.DataUUID(), syncs) } server.CreateTestInstance(t, uuid, "labelvol", "bodies3", config) server.CreateTestReplaceSync(t, uuid, "synapses", "bodies3") syncs = synapses.SyncedData() if len(syncs) != 1 { t.Errorf("Expected 1 sync, got %d syncs instead.\n", len(syncs)) } bodies3, err := labelvol.GetByUUIDName(uuid, "bodies3") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } _, found = syncs[bodies3.DataUUID()] if !found { t.Errorf("Expected bodies3 UUID (%d) got: %v\n", bodies3.DataUUID(), syncs) } server.CreateTestReplaceSync(t, uuid, "synapses", "") syncs = synapses.SyncedData() if len(syncs) != 0 { t.Errorf("Expected 0 sync, got instead %v\n", syncs) } }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := datastore.NewTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations syncing with "labels" instance. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies") // Create a ROI that will be used for our labelsz. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create labelsz instances synced to the above annotations. server.CreateTestInstance(t, uuid, "labelsz", "noroi", config) server.CreateTestSync(t, uuid, "noroi", "mysynapses") config.Set("ROI", fmt.Sprintf("myroi,%s", uuid)) server.CreateTestInstance(t, uuid, "labelsz", "withroi", config) server.CreateTestSync(t, uuid, "withroi", "mysynapses") // PUT first batch of synapses. var synapses annotation.Elements var x, y, z int32 // This should put 31x31x31 (29,791) PostSyn in volume with fewer in label 200 than 300. // There will be 15 along each dimension from 0 -> 63, then 16 from 64 -> 127. // Label 100 will have 15 x 31 x 31 = 14415 // Label 200 will have 16 x 31 x 15 = 7440 // Label 300 will have 16 x 31 x 16 = 7936 for z = 4; z < 128; z += 4 { for y = 4; y < 128; y += 4 { for x = 4; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PostSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } // This should put 32x32x32 (32,768) PreSyn in volume split 1/2, 1/4, 1/4 for z = 2; z < 128; z += 4 { for y = 2; y < 128; y += 4 { for x = 2; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PreSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } testJSON, err := json.Marshal(synapses) if err != nil { t.Fatal(err) } url := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, strings.NewReader(string(testJSON))) // Check if we have correct sequencing for no ROI labelsz. if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data := server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/100/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PreSyn":16384}` { t.Errorf("Got back incorrect PreSyn noroi count for label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"PreSyn":8192}` { t.Errorf("Got back incorrect PreSyn noroi count for label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14415},{"Label":300,"Size":7936},{"Label":200,"Size":7440}]` { t.Errorf("Got back incorrect PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSync noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15633/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=1&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=8&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } // Check if we have correct sequencing for ROI labelsz. // ROI constitutes the inner eight 32^3 blocks. // There are 16 PostSyn in each ROI dimension. // There are also 16 PreSyn in each ROI dimension. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of withroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/0/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Incorrectly handled top n=0 case, expected [] got: %v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PostSyn withroi ranking:\n%v\n", string(data)) } // Check fewer and larger N requests. url = fmt.Sprintf("%snode/%s/noroi/top/2/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192}]` { t.Errorf("Got back incorrect N=2 ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/4/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect N=4 ranking:\n%v\n", string(data)) } // Test annotation move of a PostSyn from label 100->300 and also label 200->300 url = fmt.Sprintf("%snode/%s/mysynapses/move/32_32_32/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/move/68_20_20/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7938},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after move from label 100->300:\n%v\n", string(data)) } // First move took synapse out of ROI so there should be one less for label 100. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2047},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect post-move PostSyn withroi ranking:\n%v\n", string(data)) } // Test annotation deletion of moved PostSyn from label 300 url = fmt.Sprintf("%snode/%s/mysynapses/element/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/element/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7936},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after deletions from label 300:\n%v\n", string(data)) } // Check sync on merge. if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of bodies: %v\n", err) } testMerge := mergeJSON(`[200, 300]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PostSyn":2047}` { t.Errorf("Got back incorrect post-merge PostSyn withroi count of label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":31759},{"Label":100,"Size":30798}]` { t.Errorf("Got back incorrect post-merge AllSyn noroi ranking:\n%v\n", string(data)) } // Check threshold endpoint url = fmt.Sprintf("%snode/%s/withroi/threshold/2048/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/16384/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15000/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/0/PostSyn?offset=1&n=1", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold with offset/n:\n%v\n", string(data)) } // Create the sparsevol encoding for split area with label 100 -> 150. // Split has offset (0, 0, 0) with size (19, 19, 19). // PreSyn in split = 5 x 5 x 5 = 125 // PostSyn in split = 4 x 4 x 4 = 64 var rles dvid.RLEs for z := int32(0); z < 19; z++ { for y := int32(0); y < 19; y++ { start := dvid.Point3d{0, y, z} rles = append(rles, dvid.NewRLE(start, 19)) } } buf := getBytesRLE(t, rles) // Submit the split sparsevol url = fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=150", server.WebAPIPath, uuid, "bodies", 100) data = server.TestHTTP(t, "POST", url, buf) jsonVal := make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":16384},{"Label":100,"Size":16259},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-split PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14350},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-split PostSyn noroi ranking:\n%v\n", string(data)) } // Create the encoding for coarse split area in block coordinates from label 200. // Split has offset (64, 96, 96) with size (64, 32, 32). // PreSyn in split = 16 x 8 x 8 = 1024 // PostSyn in split = 16 x 8 x 8 = 1024 rles = dvid.RLEs{ dvid.NewRLE(dvid.Point3d{2, 3, 3}, 2), } buf = getBytesRLE(t, rles) // Submit the coarse split of 200 -> 250 url = fmt.Sprintf("%snode/%s/%s/split-coarse/200?splitlabel=250", server.WebAPIPath, uuid, "bodies") data = server.TestHTTP(t, "POST", url, buf) jsonVal = make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16259},{"Label":200,"Size":15360},{"Label":250,"Size":1024},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":14351},{"Label":100,"Size":14350},{"Label":250,"Size":1024},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30609},{"Label":200,"Size":29711},{"Label":250,"Size":2048},{"Label":150,"Size":189}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":29711}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi count of label 200:\n%v\n", string(data)) } // Check the ROI-restricted labelsz instance which should only be affected by merge. url = fmt.Sprintf("%snode/%s/withroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":4096},{"Label":100,"Size":4095}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":4096}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"AllSyn":4095}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 100:\n%v\n", string(data)) } }