func TestReloadMetadata(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Add data instances var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "roi", "someroi", config) // Reload the metadata apiStr := fmt.Sprintf("%sserver/reload-metadata", server.WebAPIPath) server.TestHTTP(t, "POST", apiStr, nil) // Make sure repo UUID still there jsonStr, err := datastore.MarshalJSON() if err != nil { t.Fatalf("can't get repos JSON: %v\n", err) } var jsonResp map[string](map[string]interface{}) if err := json.Unmarshal(jsonStr, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repos info response: %s\n", jsonStr) } if len(jsonResp) != 1 { t.Errorf("reloaded repos had more than one repo: %v\n", jsonResp) } for k := range jsonResp { if dvid.UUID(k) != uuid { t.Fatalf("Expected uuid %s, got %s. Full JSON:\n%v\n", uuid, k, jsonResp) } } // Make sure the data instances are still there. _, err = datastore.GetDataByUUIDName(uuid, "foo") if err != nil { t.Errorf("Couldn't get keyvalue data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "labels") if err != nil { t.Errorf("Couldn't get labelblk data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "someroi") if err != nil { t.Errorf("Couldn't get roi data instance after reload\n") } }
// Each voxel in volume has sequential labels in X, Y, then Z order. // volSize = size of volume in blocks // blockSize = size of a block in voxels func (vol labelVol) postLabelVolume(t *testing.T, labelsName string, uuid dvid.UUID) { server.CreateTestInstance(t, uuid, "labelblk", labelsName, dvid.Config{}) offset := vol.offset nx := vol.size[0] * vol.blockSize[0] ny := vol.size[1] * vol.blockSize[1] nz := vol.size[2] * vol.blockSize[2] buf := make([]byte, nx*ny*nz*8) var label uint64 var x, y, z, v int32 for z = 0; z < nz; z++ { for y = 0; y < ny; y++ { for x = 0; x < nx; x++ { label++ binary.LittleEndian.PutUint64(buf[v:v+8], label) v += 8 } } } apiStr := fmt.Sprintf("%snode/%s/%s/raw/0_1_2/%d_%d_%d/%d_%d_%d", server.WebAPIPath, uuid, labelsName, nx, ny, nz, offset[0], offset[1], offset[2]) server.TestHTTP(t, "POST", apiStr, bytes.NewBuffer(buf)) }
func TestCommitAndBranch(t *testing.T) { tests.UseStore() defer tests.CloseStore() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", branchReq, nil) // Add a keyvalue instance. server.CreateTestInstance(t, uuid, "keyvalue", "mykv", dvid.Config{}) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/commit", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to keyvalue.. keyReq := fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // Should be able to create branch now that we've committed parent. respData := server.TestHTTP(t, "POST", branchReq, nil) resp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } // We should be able to write to that keyvalue now in the child. keyReq = fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, resp.Child) server.TestHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) }
func TestDeleteInstance(t *testing.T) { tests.UseStore() defer tests.CloseStore() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Add a data instance. var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) // Make sure it exists. _, err := datastore.GetDataByUUID(uuid, "foo") if err != nil { t.Errorf("Couldn't create data instance 'foo'\n") } // Shouldn't be able to delete instance without "imsure" delReq := fmt.Sprintf("%srepo/%s/%s", server.WebAPIPath, uuid, "foo") server.TestBadHTTP(t, "DELETE", delReq, nil) delReq = fmt.Sprintf("%srepo/%s/%s?imsure=true", server.WebAPIPath, uuid, "foo") server.TestHTTP(t, "DELETE", delReq, nil) // Make sure it no longer exists. _, err = datastore.GetDataByUUID(uuid, "foo") if err == nil { t.Errorf("Shouldn't be able to access a deleted data instance 'foo'\n") } }
func TestTileKey(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() server.CreateTestInstance(t, uuid, "imagetile", "tiles", dvid.Config{}) keyURL := fmt.Sprintf("%snode/%s/tiles/tilekey/xy/0/1_2_3", server.WebAPIPath, uuid) respStr := server.TestHTTP(t, "GET", keyURL, nil) keyResp := struct { Key string `json:"key"` }{} if err := json.Unmarshal(respStr, &keyResp); err != nil { t.Fatalf("Couldn't parse JSON response to tilekey request (%v):\n%s\n", err, keyResp) } kb := make([]byte, hex.DecodedLen(len(keyResp.Key))) _, err := hex.Decode(kb, []byte(keyResp.Key)) if err != nil { t.Fatalf("Couldn't parse return hex key: %s", keyResp.Key) } // Decipher TKey portion to make sure it's correct. key := storage.Key(kb) tk, err := storage.TKeyFromKey(key) if err != nil { t.Fatalf("Couldn't get TKey from returned key (%v): %x", err, kb) } tile, plane, scale, err := DecodeTKey(tk) if err != nil { t.Fatalf("Bad decode of TKey (%v): %x", err, tk) } expectTile := dvid.ChunkPoint3d{1, 2, 3} if tile != expectTile { t.Errorf("Expected tile %v, got %v\n", expectTile, tile) } if !plane.Equals(dvid.XY) { t.Errorf("Expected plane to be XY, got %v\n", plane) } if scale != 0 { t.Errorf("Expected scale to be 0, got %d\n", scale) } }
func TestSetMetadata(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() server.CreateTestInstance(t, uuid, "imagetile", "tiles", dvid.Config{}) // Store Metadata url := fmt.Sprintf("%snode/%s/tiles/metadata", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, bytes.NewBufferString(testMetadata)) // Check instance really has it set. var metadata metadataJSON respStr := server.TestHTTP(t, "GET", url, nil) if err := json.Unmarshal(respStr, &metadata); err != nil { t.Fatalf("Couldn't parse JSON response to metadata request (%v):\n%s\n", err, respStr) } expectMin := dvid.Point3d{0, 0, 0} expectMax := dvid.Point3d{5, 5, 4} if !expectMin.Equals(metadata.MinTileCoord) { t.Errorf("Expected min tile coord %s, got %s\n", expectMin, metadata.MinTileCoord) } if !expectMax.Equals(metadata.MaxTileCoord) { t.Errorf("Expected max tile coord %s, got %s\n", expectMax, metadata.MaxTileCoord) } tileSpec, err := parseTileSpec(metadata.Levels) if err != nil { t.Errorf("Error parsing returned tile level spec:\n%v\n", metadata.Levels) } if len(tileSpec) != 4 { t.Errorf("Bad tile spec load: only %d elements != 4\n", len(tileSpec)) } if tileSpec[2].Resolution.GetMax() != 40.0 { t.Errorf("Bad tile spec at level 2: %v\n", tileSpec[2]) } if tileSpec[3].TileSize.Value(2) != 512 { t.Errorf("Bad tile spec at level 3: %v\n", tileSpec[3]) } }
func TestFloatDirectCalls(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() server.CreateTestInstance(t, uuid, "float32blk", "floatimg", dvid.Config{}) dataservice, err := datastore.GetDataByUUIDName(uuid, "floatimg") if err != nil { t.Fatal(err) } floatimg, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't convert dataservice %v into imageblk.Data\n", dataservice) } ctx := datastore.NewVersionedCtx(floatimg, versionID) // Create a block-aligned 8-bit grayscale image offset := dvid.Point3d{512, 32, 1024} size := dvid.Point3d{128, 96, 64} subvol := dvid.NewSubvolume(offset, size) testvol := createFloatTestVolume(t, uuid, "floatimg", offset, size) origData := make([]byte, len(testvol.data)) copy(origData, testvol.data) // Store it into datastore at root v, err := floatimg.NewVoxels(subvol, testvol.data) if err != nil { t.Fatalf("Unable to make new floatimg voxels: %v\n", err) } if err = floatimg.IngestVoxels(versionID, 1, v, ""); err != nil { t.Errorf("Unable to put voxels for %s: %v\n", ctx, err) } // Read the stored image v2, err := floatimg.NewVoxels(subvol, nil) if err != nil { t.Errorf("Unable to make new grayscale ExtHandler: %v\n", err) } if err = floatimg.GetVoxels(versionID, v2, ""); err != nil { t.Errorf("Unable to get voxels for %s: %v\n", ctx, err) } // Make sure the retrieved image matches the original if v.Stride() != v2.Stride() { t.Errorf("Stride in retrieved subvol incorrect\n") } if v.Interpolable() != v2.Interpolable() { t.Errorf("Interpolable bool in retrieved subvol incorrect\n") } if !reflect.DeepEqual(v.Size(), v2.Size()) { t.Errorf("Size in retrieved subvol incorrect: %s vs expected %s\n", v2.Size(), v.Size()) } if v.NumVoxels() != v2.NumVoxels() { t.Errorf("# voxels in retrieved is different: %d vs expected %d\n", v2.NumVoxels(), v.NumVoxels()) } if len(v.Data()) != len(v2.Data()) { t.Errorf("Expected %d bytes in retrieved data, got %d bytes\n", len(v.Data()), len(v2.Data())) } received := v2.Data() //dvid.PrintNonZero("original value", origData) //dvid.PrintNonZero("returned value", data) for i := int64(0); i < v2.NumVoxels(); i++ { if received[i] != origData[i] { t.Logf("Data returned != data stored for voxel %d\n", i) t.Logf("Size of data: %d bytes from GET, %d bytes in PUT\n", len(received), len(origData)) t.Fatalf("GET subvol (%d) != PUT subvol (%d) @ index %d", received[i], origData[i], i) } } }
func TestLabels(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid := dvid.UUID(server.NewTestRepo(t)) if len(uuid) < 5 { t.Fatalf("Bad root UUID for new repo: %s\n", uuid) } // Create a labelblk instance server.CreateTestInstance(t, uuid, "labelblk", "labels", dvid.Config{}) vol := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 64, 96}, name: "labels", } vol.postLabelVolume(t, uuid, "", "", 0) // Repost the label volume 3 more times with increasing starting values. vol.postLabelVolume(t, uuid, "", "", 2100) vol.postLabelVolume(t, uuid, "", "", 8176) vol.postLabelVolume(t, uuid, "", "", 16623) vol.testSlices(t, uuid) // Try to post last volume concurrently 3x and then check result. wg := new(sync.WaitGroup) wg.Add(3) go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") // Try concurrent write of disjoint subvolumes. vol2 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 64, 96}, name: "labels", } vol3 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 224, 96}, name: "labels", } vol4 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 224, 96}, name: "labels", } wg.Add(3) go func() { vol2.postLabelVolume(t, uuid, "lz4", "", 4000) wg.Done() }() go func() { vol3.postLabelVolume(t, uuid, "lz4", "", 8000) wg.Done() }() go func() { vol4.postLabelVolume(t, uuid, "lz4", "", 1200) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") vol2.testGetLabelVolume(t, uuid, "", "") vol3.testGetLabelVolume(t, uuid, "", "") vol4.testGetLabelVolume(t, uuid, "", "") // Verify various GET 3d volume with compressions and no ROI. vol.testGetLabelVolume(t, uuid, "", "") vol.testGetLabelVolume(t, uuid, "lz4", "") vol.testGetLabelVolume(t, uuid, "gzip", "") // Create a new ROI instance. roiName := "myroi" server.CreateTestInstance(t, uuid, "roi", roiName, dvid.Config{}) // Add ROI data apiStr := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, roiName) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(labelsJSON())) // Post updated labels without ROI and make sure it returns those values. var labelNoROI uint64 = 20000 vol.postLabelVolume(t, uuid, "", "", labelNoROI) returned := vol.testGetLabelVolume(t, uuid, "", "") startLabel := binary.LittleEndian.Uint64(returned[0:8]) if startLabel != labelNoROI+1 { t.Errorf("Expected first voxel to be label %d and got %d instead\n", labelNoROI+1, startLabel) } // TODO - Use the ROI to retrieve a 2d xy image. // TODO - Make sure we aren't getting labels back in non-ROI points. // Post again but now with ROI var labelWithROI uint64 = 40000 vol.postLabelVolume(t, uuid, "", roiName, labelWithROI) // Verify ROI masking of POST where anything outside ROI is old labels. returned = vol.getLabelVolume(t, uuid, "", "") var newlabel uint64 = labelWithROI var oldlabel uint64 = labelNoROI nx := vol.size[0] * vol.blockSize[0] ny := vol.size[1] * vol.blockSize[1] nz := vol.size[2] * vol.blockSize[2] var x, y, z, v int32 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != oldlabel { t.Fatalf("Expected %d outside ROI, got %d\n", oldlabel, got) } } v += 8 } } } // Verify that a ROI-enabled GET has zeros everywhere outside ROI. returned = vol.getLabelVolume(t, uuid, "", roiName) newlabel = labelWithROI x, y, z, v = 0, 0, 0, 0 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != 0 { t.Fatalf("Expected zero outside ROI, got %d\n", got) } } v += 8 } } } }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations syncing with "labels" instance checking for deduplication. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies,labels,bodies,labels,bodies") dataservice, err := datastore.GetDataByUUIDName(uuid, "mysynapses") if err != nil { t.Fatal(err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't convert dataservice %v into datastore.Data\n", dataservice) } if len(data.SyncedData()) != 2 { t.Fatalf("Expected 2 syncs (uuids for labels and bodies], got %v\n", data.SyncedData()) } // PUT first batch of synapses testJSON, err := json.Marshal(testData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // Test if labels were properly denormalized. For the POST we have synchronized label denormalization. // If this were to become asynchronous, we'd want to block on updating like the labelblk<->labelvol sync. testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3NoRel, "%snode/%s/mysynapses/label/3", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) // Make change to labelblk and make sure our label synapses have been adjusted (case A) _ = modifyLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of labels->annotations: %v\n", err) } testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2a, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3a, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4NoRel, "%snode/%s/mysynapses/label/4", server.WebAPIPath, uuid) // Make change to labelvol and make sure our label synapses have been adjusted (case B). // Merge 3a into 2a. testMerge := mergeJSON(`[2, 3]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2b, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, nil, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) // Now split label 2b off and check if annotations also split // Create the sparsevol encoding for split area numspans := len(bodysplit.voxelSpans) rles := make(dvid.RLEs, numspans, numspans) for i, span := range bodysplit.voxelSpans { start := dvid.Point3d{span[2], span[1], span[0]} length := span[3] - span[2] + 1 rles[i] = dvid.NewRLE(start, length) } buf := getBytesRLE(t, rles) // Submit the split sparsevol reqStr := fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=7", server.WebAPIPath, uuid, "bodies", 2) r := server.TestHTTP(t, "POST", reqStr, buf) jsonVal := make(map[string]uint64) if err := json.Unmarshal(r, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Verify that the annotations are correct. if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of split->annotations: %v\n", err) } testResponseLabel(t, expectedLabel2c, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) url2 := fmt.Sprintf("%snode/%s/mysynapses/label/7?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel7, url2) // Change the name of the annotations. if err = datastore.RenameData(uuid, "mysynapses", "bodies", "foobar"); err == nil { t.Fatalf("Should have been prevented from renaming data 'mysynapses' to existing data 'bodies'!\n") } if err = datastore.RenameData(uuid, "mysynapses", "renamedData", "foobar"); err != nil { t.Fatalf("Error renaming annotations: %v\n", err) } // Make sure the old name is no longer there and the new one is. server.TestBadHTTP(t, "GET", url2, nil) testResponseLabel(t, expectedLabel2c, "%snode/%s/renamedData/label/2?relationships=true", server.WebAPIPath, uuid) // Try a coarse split. // Create the encoding for split area in block coordinates. rles = dvid.RLEs{ dvid.NewRLE(dvid.Point3d{3, 1, 3}, 1), } buf = getBytesRLE(t, rles) // Submit the coarse split reqStr = fmt.Sprintf("%snode/%s/%s/split-coarse/2?splitlabel=8", server.WebAPIPath, uuid, "bodies") r = server.TestHTTP(t, "POST", reqStr, buf) jsonVal = make(map[string]uint64) if err := json.Unmarshal(r, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Verify that the annotations are correct. if err := datastore.BlockOnUpdating(uuid, "renamedData"); err != nil { t.Fatalf("Error blocking on sync of split->annotations: %v\n", err) } testResponseLabel(t, expectedLabel2c, "%snode/%s/renamedData/label/8?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, nil, "%snode/%s/renamedData/label/2?relationships=true", server.WebAPIPath, uuid) // Delete a labeled annotation and make sure it's not in label delurl := fmt.Sprintf("%snode/%s/%s/element/20_30_40", server.WebAPIPath, uuid, "renamedData") server.TestHTTP(t, "DELETE", delurl, nil) testResponseLabel(t, afterDeleteOn7, "%snode/%s/%s/label/7?relationships=true", server.WebAPIPath, uuid, "renamedData") }
func TestFilter(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) d, err := datastore.NewData(uuid, labelvolT, "bodies", config) if err != nil { t.Fatalf("Unable to create labelvol instance: %s\n", err) } server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of labels -> bodies: %v\n", err) } // Create a ROI that will be used for filter test. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create the filter spec fs := storage.FilterSpec(fmt.Sprintf("roi:myroi,%s", uuid)) var filter storage.Filter filterer, ok := d.(storage.Filterer) if !ok { t.Fatalf("labelvol instance does not implement storage.Filterer\n") } filter, err = filterer.NewFilter(fs) if err != nil { t.Fatalf("Can't create filter from spec %q: %v\n", fs, err) } if filter == nil { t.Fatalf("No filter could be created from spec %q\n", fs) } // Test the filter. tkv := storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{0, 0, 0}.ToIZYXString())} skip, err := filter.Check(&tkv) if !skip { t.Errorf("Expected filter check 1 to skip, instead filter.Check() returned not skip") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{1, 1, 1}.ToIZYXString())} skip, err = filter.Check(&tkv) if skip { t.Errorf("Expected filter check 2 to not skip!") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{2, 1, 2}.ToIZYXString())} skip, err = filter.Check(&tkv) if skip { t.Errorf("Expected filter check 2 to not skip!") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{3, 1, 1}.ToIZYXString())} skip, err = filter.Check(&tkv) if !skip { t.Errorf("Expected filter check 3 to skip!") } }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() if len(uuid) < 5 { t.Fatalf("Bad root UUID for new repo: %s\n", uuid) } // Create a labelblk instance server.CreateTestInstance(t, uuid, "labelblk", "labels", dvid.Config{}) vol := labelVol{ startLabel: 2, size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 64, 96}, name: "labels", } vol.postLabelVolume(t, uuid, "", "", 0) // Test the blocks API vol.testBlocks(t, uuid, "", "") vol.testBlocks(t, uuid, "uncompressed", "") // Test the "label" endpoint. apiStr := fmt.Sprintf("%snode/%s/%s/label/100_64_96", server.WebAPIPath, uuid, "labels") jsonResp := server.TestHTTP(t, "GET", apiStr, nil) var r labelResp if err := json.Unmarshal(jsonResp, &r); err != nil { t.Errorf("Unable to parse 'label' endpoint response: %s\n", jsonResp) } if r.Label != 69 { t.Errorf("Expected label %d @ (100, 64, 96) got label %d\n", vol.label(100, 64, 96), r.Label) } apiStr = fmt.Sprintf("%snode/%s/%s/label/10000_64000_9600121", server.WebAPIPath, uuid, "labels") jsonResp = server.TestHTTP(t, "GET", apiStr, nil) if err := json.Unmarshal(jsonResp, &r); err != nil { t.Errorf("Unable to parse 'label' endpoint response: %s\n", jsonResp) } if r.Label != 0 { t.Errorf("Expected label 0 at random huge point, got label %d\n", r.Label) } // Test the "labels" endpoint. apiStr = fmt.Sprintf("%snode/%s/%s/labels", server.WebAPIPath, uuid, "labels") payload := `[[100,64,96],[78,93,156],[104,65,97]]` jsonResp = server.TestHTTP(t, "GET", apiStr, bytes.NewBufferString(payload)) var labels [3]uint64 if err := json.Unmarshal(jsonResp, &labels); err != nil { t.Errorf("Unable to parse 'labels' endpoint response: %s\n", jsonResp) } if labels[0] != vol.label(100, 64, 96) { t.Errorf("Expected label %d @ (100, 64, 96) got label %d\n", vol.label(100, 64, 96), labels[0]) } if labels[1] != vol.label(78, 93, 156) { t.Errorf("Expected label %d @ (78, 93, 156) got label %d\n", vol.label(78, 93, 156), labels[1]) } if labels[2] != vol.label(104, 65, 97) { t.Errorf("Expected label %d @ (104, 65, 97) got label %d\n", vol.label(104, 65, 97), labels[2]) } // Repost the label volume 3 more times with increasing starting values. vol.postLabelVolume(t, uuid, "", "", 2100) vol.postLabelVolume(t, uuid, "", "", 8176) vol.postLabelVolume(t, uuid, "", "", 16623) vol.testSlices(t, uuid) // Try to post last volume concurrently 3x and then check result. wg := new(sync.WaitGroup) wg.Add(3) go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") // Try concurrent write of disjoint subvolumes. vol2 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 64, 96}, name: "labels", } vol3 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 224, 96}, name: "labels", } vol4 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 224, 96}, name: "labels", } wg.Add(3) go func() { vol2.postLabelVolume(t, uuid, "lz4", "", 4000) wg.Done() }() go func() { vol3.postLabelVolume(t, uuid, "lz4", "", 8000) wg.Done() }() go func() { vol4.postLabelVolume(t, uuid, "lz4", "", 1200) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") vol2.testGetLabelVolume(t, uuid, "", "") vol3.testGetLabelVolume(t, uuid, "", "") vol4.testGetLabelVolume(t, uuid, "", "") // Verify various GET 3d volume with compressions and no ROI. vol.testGetLabelVolume(t, uuid, "", "") vol.testGetLabelVolume(t, uuid, "lz4", "") vol.testGetLabelVolume(t, uuid, "gzip", "") // Create a new ROI instance. roiName := "myroi" server.CreateTestInstance(t, uuid, "roi", roiName, dvid.Config{}) // Add ROI data apiStr = fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, roiName) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(labelsJSON())) // Post updated labels without ROI and make sure it returns those values. var labelNoROI uint64 = 20000 vol.postLabelVolume(t, uuid, "", "", labelNoROI) returned := vol.testGetLabelVolume(t, uuid, "", "") startLabel := binary.LittleEndian.Uint64(returned[0:8]) if startLabel != labelNoROI+1 { t.Errorf("Expected first voxel to be label %d and got %d instead\n", labelNoROI+1, startLabel) } // TODO - Use the ROI to retrieve a 2d xy image. // TODO - Make sure we aren't getting labels back in non-ROI points. // Post again but now with ROI var labelWithROI uint64 = 40000 vol.postLabelVolume(t, uuid, "", roiName, labelWithROI) // Verify ROI masking of POST where anything outside ROI is old labels. returned = vol.getLabelVolume(t, uuid, "", "") var newlabel uint64 = labelWithROI var oldlabel uint64 = labelNoROI nx := vol.size[0] * vol.blockSize[0] ny := vol.size[1] * vol.blockSize[1] nz := vol.size[2] * vol.blockSize[2] var x, y, z, v int32 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != oldlabel { t.Fatalf("Expected %d outside ROI, got %d\n", oldlabel, got) } } v += 8 } } } // Verify that a ROI-enabled GET has zeros everywhere outside ROI. returned = vol.getLabelVolume(t, uuid, "", roiName) newlabel = labelWithROI x, y, z, v = 0, 0, 0, 0 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != 0 { t.Fatalf("Expected zero outside ROI, got %d\n", got) } } v += 8 } } } }
func TestMultiscale(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) // Add multiscale server.CreateTestInstance(t, uuid, "labelblk", "labels_1", config) // 64 x 64 x 64 server.CreateTestSync(t, uuid, "labels_1", "labels") server.CreateTestInstance(t, uuid, "labelblk", "labels_2", config) // 32 x 32 x 32 server.CreateTestSync(t, uuid, "labels_2", "labels_1") // Create an easily interpreted label volume with a couple of labels. volume := newTestVolume(128, 128, 128) volume.addSubvol(dvid.Point3d{40, 40, 40}, dvid.Point3d{40, 40, 40}, 1) volume.addSubvol(dvid.Point3d{40, 40, 80}, dvid.Point3d{40, 40, 40}, 2) volume.addSubvol(dvid.Point3d{80, 40, 40}, dvid.Point3d{40, 40, 40}, 13) volume.addSubvol(dvid.Point3d{40, 80, 40}, dvid.Point3d{40, 40, 40}, 209) volume.addSubvol(dvid.Point3d{80, 80, 40}, dvid.Point3d{40, 40, 40}, 311) volume.put(t, uuid, "labels") // Verify initial ingest for hi-res if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on update for labels: %v\n", err) } hires := newTestVolume(128, 128, 128) hires.get(t, uuid, "labels") hires.verifyLabel(t, 1, 45, 45, 45) hires.verifyLabel(t, 2, 50, 50, 100) hires.verifyLabel(t, 13, 100, 60, 60) hires.verifyLabel(t, 209, 55, 100, 55) hires.verifyLabel(t, 311, 81, 81, 41) // Check the first downres: 64^3 if err := datastore.BlockOnUpdating(uuid, "labels_1"); err != nil { t.Fatalf("Error blocking on update for labels_1: %v\n", err) } downres1 := newTestVolume(64, 64, 64) downres1.get(t, uuid, "labels_1") downres1.verifyLabel(t, 1, 30, 30, 30) downres1.verifyLabel(t, 2, 21, 21, 45) downres1.verifyLabel(t, 13, 45, 21, 36) downres1.verifyLabel(t, 209, 21, 50, 35) downres1.verifyLabel(t, 311, 45, 55, 35) expected1 := newTestVolume(64, 64, 64) expected1.addSubvol(dvid.Point3d{20, 20, 20}, dvid.Point3d{20, 20, 20}, 1) expected1.addSubvol(dvid.Point3d{20, 20, 40}, dvid.Point3d{20, 20, 20}, 2) expected1.addSubvol(dvid.Point3d{40, 20, 20}, dvid.Point3d{20, 20, 20}, 13) expected1.addSubvol(dvid.Point3d{20, 40, 20}, dvid.Point3d{20, 20, 20}, 209) expected1.addSubvol(dvid.Point3d{40, 40, 20}, dvid.Point3d{20, 20, 20}, 311) if err := downres1.equals(expected1); err != nil { t.Errorf("1st downres 'labels_1' isn't what is expected: %v\n", err) } // Check the second downres to voxel: 32^3 if err := datastore.BlockOnUpdating(uuid, "labels_2"); err != nil { t.Fatalf("Error blocking on update for labels_2: %v\n", err) } expected2 := newTestVolume(32, 32, 32) expected2.addSubvol(dvid.Point3d{10, 10, 10}, dvid.Point3d{10, 10, 10}, 1) expected2.addSubvol(dvid.Point3d{10, 10, 20}, dvid.Point3d{10, 10, 10}, 2) expected2.addSubvol(dvid.Point3d{20, 10, 10}, dvid.Point3d{10, 10, 10}, 13) expected2.addSubvol(dvid.Point3d{10, 20, 10}, dvid.Point3d{10, 10, 10}, 209) expected2.addSubvol(dvid.Point3d{20, 20, 10}, dvid.Point3d{10, 10, 10}, 311) downres2 := newTestVolume(32, 32, 32) downres2.get(t, uuid, "labels_2") if err := downres2.equals(expected2); err != nil { t.Errorf("2nd downres 'labels_2' isn't what is expected: %v\n", err) } }
func TestLabelsReload(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations without syncing. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) // PUT first batch of synapses testJSON, err := json.Marshal(testData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // Add the sync server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies") // Do a reload asynchronously reloadURL := fmt.Sprintf("%snode/%s/mysynapses/reload", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", reloadURL, nil) // Wait until done. if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of annotations: %v\n", err) } // Test if labels were properly denormalized. For the POST we have synchronized label denormalization. testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3NoRel, "%snode/%s/mysynapses/label/3", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) // Make change to labelblk and make sure our label synapses have been adjusted (case A) _ = modifyLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of labels->annotations: %v\n", err) } testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2a, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel3a, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4NoRel, "%snode/%s/mysynapses/label/4", server.WebAPIPath, uuid) // Make change to labelvol and make sure our label synapses have been adjusted (case B). // Merge 3a into 2a. testMerge := mergeJSON(`[2, 3]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of bodies: %v\n", err) } testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel2b, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, nil, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid) testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid) }
func TestSyncs(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) server.CreateTestInstance(t, uuid, "annotation", "synapses", config) server.CreateTestSync(t, uuid, "synapses", "labels,bodies") labels, err := labelblk.GetByUUIDName(uuid, "labels") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } bodies, err := labelvol.GetByUUIDName(uuid, "bodies") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } synapses, err := annotation.GetByUUIDName(uuid, "synapses") if err != nil { t.Fatalf("Couldn't get synapses data instance: %v\n", err) } syncs := synapses.SyncedData() if len(syncs) != 2 { t.Errorf("Expected 2 syncs, got %d syncs instead.\n", len(syncs)) } _, found := syncs[labels.DataUUID()] if !found { t.Errorf("Expected labels UUID (%d) got: %v\n", labels.DataUUID(), syncs) } _, found = syncs[bodies.DataUUID()] if !found { t.Errorf("Expected bodies UUID (%d) got: %v\n", bodies.DataUUID(), syncs) } server.CreateTestInstance(t, uuid, "labelvol", "bodies2", config) bodies2, err := labelvol.GetByUUIDName(uuid, "bodies2") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } server.CreateTestSync(t, uuid, "synapses", "bodies2") syncs = synapses.SyncedData() if len(syncs) != 3 { t.Errorf("Expected 3 syncs, got %d syncs instead.\n", len(syncs)) } _, found = syncs[labels.DataUUID()] if !found { t.Errorf("Expected labels UUID (%d) got: %v\n", labels.DataUUID(), syncs) } _, found = syncs[bodies.DataUUID()] if !found { t.Errorf("Expected bodies UUID (%d) got: %v\n", bodies.DataUUID(), syncs) } _, found = syncs[bodies2.DataUUID()] if !found { t.Errorf("Expected bodies2 UUID (%d) got: %v\n", bodies2.DataUUID(), syncs) } server.CreateTestInstance(t, uuid, "labelvol", "bodies3", config) server.CreateTestReplaceSync(t, uuid, "synapses", "bodies3") syncs = synapses.SyncedData() if len(syncs) != 1 { t.Errorf("Expected 1 sync, got %d syncs instead.\n", len(syncs)) } bodies3, err := labelvol.GetByUUIDName(uuid, "bodies3") if err != nil { t.Fatalf("Can't obtain data instance via GetByUUIDName: %v\n", err) } _, found = syncs[bodies3.DataUUID()] if !found { t.Errorf("Expected bodies3 UUID (%d) got: %v\n", bodies3.DataUUID(), syncs) } server.CreateTestReplaceSync(t, uuid, "synapses", "") syncs = synapses.SyncedData() if len(syncs) != 0 { t.Errorf("Expected 0 sync, got instead %v\n", syncs) } }
func TestCommitAndBranch(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", branchReq, nil) // Add a keyvalue instance. server.CreateTestInstance(t, uuid, "keyvalue", "mykv", dvid.Config{}) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/commit", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to keyvalue. keyReq := fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // Create a ROI with an immutable POST request via ptquery. Should be able to still POST to it. server.CreateTestInstance(t, uuid, "roi", "myroi", dvid.Config{}) apiStr = fmt.Sprintf("%snode/%s/myroi/ptquery", server.WebAPIPath, uuid) queryJSON := "[[10, 10, 10], [20, 20, 20], [30, 30, 30], [40, 40, 40], [50, 50, 50]]" server.TestHTTP(t, "POST", apiStr, bytes.NewReader([]byte(queryJSON))) // we have no ROI so just testing HTTP. // Should be able to create branch now that we've committed parent. respData := server.TestHTTP(t, "POST", branchReq, nil) resp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } // We should be able to write to that keyvalue now in the child. keyReq = fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, resp.Child) server.TestHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // We should also be able to write to the repo-wide log. logReq := fmt.Sprintf("%srepo/%s/log", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", logReq, bytes.NewBufferString(`{"log": ["a log mesage"]}`)) }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := datastore.NewTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations syncing with "labels" instance. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies") // Create a ROI that will be used for our labelsz. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create labelsz instances synced to the above annotations. server.CreateTestInstance(t, uuid, "labelsz", "noroi", config) server.CreateTestSync(t, uuid, "noroi", "mysynapses") config.Set("ROI", fmt.Sprintf("myroi,%s", uuid)) server.CreateTestInstance(t, uuid, "labelsz", "withroi", config) server.CreateTestSync(t, uuid, "withroi", "mysynapses") // PUT first batch of synapses. var synapses annotation.Elements var x, y, z int32 // This should put 31x31x31 (29,791) PostSyn in volume with fewer in label 200 than 300. // There will be 15 along each dimension from 0 -> 63, then 16 from 64 -> 127. // Label 100 will have 15 x 31 x 31 = 14415 // Label 200 will have 16 x 31 x 15 = 7440 // Label 300 will have 16 x 31 x 16 = 7936 for z = 4; z < 128; z += 4 { for y = 4; y < 128; y += 4 { for x = 4; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PostSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } // This should put 32x32x32 (32,768) PreSyn in volume split 1/2, 1/4, 1/4 for z = 2; z < 128; z += 4 { for y = 2; y < 128; y += 4 { for x = 2; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PreSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } testJSON, err := json.Marshal(synapses) if err != nil { t.Fatal(err) } url := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, strings.NewReader(string(testJSON))) // Check if we have correct sequencing for no ROI labelsz. if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data := server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/100/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PreSyn":16384}` { t.Errorf("Got back incorrect PreSyn noroi count for label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"PreSyn":8192}` { t.Errorf("Got back incorrect PreSyn noroi count for label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14415},{"Label":300,"Size":7936},{"Label":200,"Size":7440}]` { t.Errorf("Got back incorrect PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSync noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15633/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=1&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=8&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } // Check if we have correct sequencing for ROI labelsz. // ROI constitutes the inner eight 32^3 blocks. // There are 16 PostSyn in each ROI dimension. // There are also 16 PreSyn in each ROI dimension. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of withroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/0/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Incorrectly handled top n=0 case, expected [] got: %v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PostSyn withroi ranking:\n%v\n", string(data)) } // Check fewer and larger N requests. url = fmt.Sprintf("%snode/%s/noroi/top/2/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192}]` { t.Errorf("Got back incorrect N=2 ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/4/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect N=4 ranking:\n%v\n", string(data)) } // Test annotation move of a PostSyn from label 100->300 and also label 200->300 url = fmt.Sprintf("%snode/%s/mysynapses/move/32_32_32/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/move/68_20_20/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7938},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after move from label 100->300:\n%v\n", string(data)) } // First move took synapse out of ROI so there should be one less for label 100. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2047},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect post-move PostSyn withroi ranking:\n%v\n", string(data)) } // Test annotation deletion of moved PostSyn from label 300 url = fmt.Sprintf("%snode/%s/mysynapses/element/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/element/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7936},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after deletions from label 300:\n%v\n", string(data)) } // Check sync on merge. if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of bodies: %v\n", err) } testMerge := mergeJSON(`[200, 300]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PostSyn":2047}` { t.Errorf("Got back incorrect post-merge PostSyn withroi count of label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":31759},{"Label":100,"Size":30798}]` { t.Errorf("Got back incorrect post-merge AllSyn noroi ranking:\n%v\n", string(data)) } // Check threshold endpoint url = fmt.Sprintf("%snode/%s/withroi/threshold/2048/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/16384/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15000/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/0/PostSyn?offset=1&n=1", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold with offset/n:\n%v\n", string(data)) } // Create the sparsevol encoding for split area with label 100 -> 150. // Split has offset (0, 0, 0) with size (19, 19, 19). // PreSyn in split = 5 x 5 x 5 = 125 // PostSyn in split = 4 x 4 x 4 = 64 var rles dvid.RLEs for z := int32(0); z < 19; z++ { for y := int32(0); y < 19; y++ { start := dvid.Point3d{0, y, z} rles = append(rles, dvid.NewRLE(start, 19)) } } buf := getBytesRLE(t, rles) // Submit the split sparsevol url = fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=150", server.WebAPIPath, uuid, "bodies", 100) data = server.TestHTTP(t, "POST", url, buf) jsonVal := make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":16384},{"Label":100,"Size":16259},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-split PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14350},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-split PostSyn noroi ranking:\n%v\n", string(data)) } // Create the encoding for coarse split area in block coordinates from label 200. // Split has offset (64, 96, 96) with size (64, 32, 32). // PreSyn in split = 16 x 8 x 8 = 1024 // PostSyn in split = 16 x 8 x 8 = 1024 rles = dvid.RLEs{ dvid.NewRLE(dvid.Point3d{2, 3, 3}, 2), } buf = getBytesRLE(t, rles) // Submit the coarse split of 200 -> 250 url = fmt.Sprintf("%snode/%s/%s/split-coarse/200?splitlabel=250", server.WebAPIPath, uuid, "bodies") data = server.TestHTTP(t, "POST", url, buf) jsonVal = make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16259},{"Label":200,"Size":15360},{"Label":250,"Size":1024},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":14351},{"Label":100,"Size":14350},{"Label":250,"Size":1024},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30609},{"Label":200,"Size":29711},{"Label":250,"Size":2048},{"Label":150,"Size":189}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":29711}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi count of label 200:\n%v\n", string(data)) } // Check the ROI-restricted labelsz instance which should only be affected by merge. url = fmt.Sprintf("%snode/%s/withroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":4096},{"Label":100,"Size":4095}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":4096}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"AllSyn":4095}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 100:\n%v\n", string(data)) } }