// Make sure new keyvalue data have different IDs. func TestNewKeyvalueDifferent(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, kvtype, "instance1", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kv1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not keyvalue.Data\n") } if kv1.DataName() != "instance1" { t.Errorf("New keyvalue data instance name set incorrectly: %q != %q\n", kv1.DataName(), "instance1") } dataservice2, err := datastore.NewData(uuid, kvtype, "instance2", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kv2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not keyvalue.Data\n") } if kv1.InstanceID() == kv2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", kv1.InstanceID(), kv2.InstanceID()) } }
func TestLabelblkDirectAPI(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() labels := newDataInstance(uuid, t, "mylabels") labelsCtx := datastore.NewVersionedCtx(labels, versionID) // Create a fake block-aligned label volume offset := dvid.Point3d{32, 0, 64} size := dvid.Point3d{96, 64, 160} subvol := dvid.NewSubvolume(offset, size) data := makeVolume(offset, size) // Store it into datastore at root v, err := labels.NewVoxels(subvol, data) if err != nil { t.Fatalf("Unable to make new labels Voxels: %v\n", err) } if err = labels.IngestVoxels(versionID, 1, v, ""); err != nil { t.Errorf("Unable to put labels for %s: %v\n", labelsCtx, err) } if v.NumVoxels() != int64(len(data))/8 { t.Errorf("# voxels (%d) after PutVoxels != # original voxels (%d)\n", v.NumVoxels(), int64(len(data))/8) } // Read the stored image v2, err := labels.NewVoxels(subvol, nil) if err != nil { t.Errorf("Unable to make new labels ExtHandler: %v\n", err) } if err = labels.GetVoxels(versionID, v2, ""); err != nil { t.Errorf("Unable to get voxels for %s: %v\n", labelsCtx, err) } // Make sure the retrieved image matches the original if v.Stride() != v2.Stride() { t.Errorf("Stride in retrieved subvol incorrect\n") } if v.Interpolable() != v2.Interpolable() { t.Errorf("Interpolable bool in retrieved subvol incorrect\n") } if !reflect.DeepEqual(v.Size(), v2.Size()) { t.Errorf("Size in retrieved subvol incorrect: %s vs expected %s\n", v2.Size(), v.Size()) } if v.NumVoxels() != v2.NumVoxels() { t.Errorf("# voxels in retrieved is different: %d vs expected %d\n", v2.NumVoxels(), v.NumVoxels()) } byteData := v2.Data() for i := int64(0); i < v2.NumVoxels()*8; i++ { if byteData[i] != data[i] { t.Logf("Size of data: %d bytes from GET, %d bytes in PUT\n", len(data), len(data)) t.Fatalf("GET subvol (%d) != PUT subvol (%d) @ uint64 #%d", byteData[i], data[i], i) } } }
// Make sure new labelgraph data have different IDs. func TestNewLabelgraphDifferent(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, dtype, "lg1", config) if err != nil { t.Errorf("Error creating new labelgraph instance 1: %v\n", err) } data1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not labelgraph.Data\n") } dataservice2, err := datastore.NewData(uuid, dtype, "lg2", config) if err != nil { t.Errorf("Error creating new labelgraph instance 2: %v\n", err) } data2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not labelgraph.Data\n") } if data1.InstanceID() == data2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", data1.InstanceID(), data2.InstanceID()) } }
func TestROICreateAndSerialize(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not roi.Data\n") } if roi1.DataName() != "myroi" { t.Errorf("New roi data instance name set incorrectly: %q != %q\n", roi1.DataName(), "myroi") } config.Set("BlockSize", "15,16,17") dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not roi.Data\n") } if roi1.InstanceID() == roi2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", roi1.InstanceID(), roi2.InstanceID()) } // Test persistence of storage. roi2.MinZ = 13 roi2.MaxZ = 3098 gobBytes, err := roi2.GobEncode() if err != nil { t.Fatalf("Could not Gob encode roi: %v\n", err) } var received Data if err = received.GobDecode(gobBytes); err != nil { t.Fatalf("Could not decode Gob-encoded roi: %v\n", err) } if !roi2.Data.Equals(received.Data) { t.Errorf("ROI base Data has bad roundtrip:\nOriginal:\n%v\nReceived:\n%v\n", *(roi2.Data), *(received.Data)) } if !reflect.DeepEqual(roi2.Properties, received.Properties) { t.Errorf("ROI extended properties has bad roundtrip:\nOriginal:\n%v\nReceived:\n%v\n", roi2.Properties, received.Properties) } }
func TestKeyvalueRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() testRequest(t, uuid, versionID, "mykeyvalue") }
func TestFloatInstanceCreation(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Create new voxels instance with optional parameters name := "weights" metadata := fmt.Sprintf(`{ "typename": "float32blk", "dataname": %q, "blocksize": "64,43,28", "VoxelSize": "13.1, 14.2, 15.3", "VoxelUnits": "picometers,nanometers,microns" }`, name) apiStr := fmt.Sprintf("%srepo/%s/instance", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(metadata)) // Get metadata and make sure optional settings have been set. apiStr = fmt.Sprintf("%snode/%s/%s/info", server.WebAPIPath, uuid, name) result := server.TestHTTP(t, "GET", apiStr, nil) var parsed = struct { Base struct { TypeName, Name string } Extended struct { BlockSize dvid.Point3d VoxelSize dvid.NdFloat32 VoxelUnits dvid.NdString } }{} if err := json.Unmarshal(result, &parsed); err != nil { t.Fatalf("Error parsing JSON response of new instance metadata: %v\n", err) } if parsed.Base.Name != name { t.Errorf("Parsed new instance has unexpected name: %s != %s (expected)\n", parsed.Base.Name, name) } if parsed.Base.TypeName != "float32blk" { t.Errorf("Parsed new instance has unexpected type name: %s != uint8blk (expected)\n", parsed.Base.TypeName) } if !parsed.Extended.BlockSize.Equals(dvid.Point3d{64, 43, 28}) { t.Errorf("Bad block size in new uint8blk instance: %s\n", parsed.Extended.BlockSize) } if !parsed.Extended.VoxelSize.Equals(dvid.NdFloat32{13.1, 14.2, 15.3}) { t.Errorf("Bad voxel size in new uint8blk instance: %s\n", parsed.Extended.VoxelSize) } if parsed.Extended.VoxelUnits[0] != "picometers" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } if parsed.Extended.VoxelUnits[1] != "nanometers" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } if parsed.Extended.VoxelUnits[2] != "microns" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } }
func TestRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, syntype, "mysynapses", config) if err != nil { t.Fatalf("Error creating new data instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not synapse.Data\n") } // PUT first batch of synapses testJSON, err := json.Marshal(testData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/%s/elements", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // GET synapses back within superset bounding box and make sure all data is there. testResponse(t, testData, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // Test subset GET testResponse(t, expected3, "%snode/%s/%s/elements/5_5_5/126_60_97", server.WebAPIPath, uuid, data.DataName()) // Test Tag 1 tag := Tag("Synapse2") synapse2 := getTag(tag, testData) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // Test Tag 2 tag2 := Tag("Zlt90") zlt90 := getTag(tag2, testData) testResponse(t, zlt90, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag2) // Test move url5 := fmt.Sprintf("%snode/%s/%s/move/127_63_99/127_64_100", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url5, nil) testResponse(t, afterMove, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // --- check tag synapse2 = getTag(tag, afterMove) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // Test delete url6 := fmt.Sprintf("%snode/%s/%s/element/127_64_100", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "DELETE", url6, nil) testResponse(t, afterDelete, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // --- check tag synapse2 = getTag(tag, afterDelete) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) }
func TestLog(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid := createRepo(t) // Post a log payload := bytes.NewBufferString(`{"log": ["line1", "line2", "some more stuff in a line"]}`) apiStr := fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Verify it was saved. r := TestHTTP(t, "GET", apiStr, nil) jsonResp := make(map[string][]string) if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal log response: %s\n", string(r)) } if len(jsonResp) != 1 { t.Errorf("Bad log return: %s\n", string(r)) } data, ok := jsonResp["log"] if !ok { t.Fatalf("No 'log' data returned: %s\n", string(r)) } if len(data) != 3 { t.Fatalf("Got wrong # of lines in log: %v\n", data) } testLog(t, data[0], "line1") testLog(t, data[1], "line2") testLog(t, data[2], "some more stuff in a line") // Add some more to log payload = bytes.NewBufferString(`{"log": ["line4", "line5"]}`) apiStr = fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Verify it was appended. r = TestHTTP(t, "GET", apiStr, nil) jsonResp = make(map[string][]string) if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal log response: %s\n", string(r)) } if len(jsonResp) != 1 { t.Errorf("Bad log return: %s\n", string(r)) } data, ok = jsonResp["log"] if !ok { t.Fatalf("No 'log' data returned: %s\n", string(r)) } if len(data) != 5 { t.Errorf("Got wrong # of lines in log: %v\n", data) } testLog(t, data[3], "line4") testLog(t, data[4], "line5") }
func TestBasic(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() _, err := datastore.NewData(uuid, dtype, "instance1", config) if err != nil { t.Errorf("Error creating new multichan16 instance: %v\n", err) } }
func TestROIPartition(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, versionID := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) req, err := http.NewRequest("POST", roiRequest, getSpansJSON(testSpans)) if err != nil { t.Errorf("Unsuccessful POST request (%s): %v\n", roiRequest, err) } ctx := datastore.NewVersionedCtx(data, versionID) w := httptest.NewRecorder() data.ServeHTTP(uuid, ctx, w, req) if w.Code != http.StatusOK { t.Errorf("Bad server response roi POST, status %s, for roi %q\n", w.Code, data.DataName()) } // Request the standard subvolume partitioning partitionReq := fmt.Sprintf("%snode/%s/%s/partition?batchsize=5&optimized=true", server.WebAPIPath, uuid, data.DataName()) req, err = http.NewRequest("GET", partitionReq, nil) if err != nil { t.Errorf("Unsuccessful GET request (%s): %v\n", partitionReq, err) } w = httptest.NewRecorder() data.ServeHTTP(uuid, ctx, w, req) if w.Code != http.StatusOK { t.Errorf("Bad server response roi GET, status %s, for roi %q\n", w.Code, data.DataName()) } var subvolJSON, expectedJSON interface{} response := w.Body.Bytes() if err := json.Unmarshal(response, &subvolJSON); err != nil { t.Errorf("Can't unmarshal JSON: %s\n", w.Body.Bytes()) } json.Unmarshal([]byte(expectedPartition), &expectedJSON) if !reflect.DeepEqual(subvolJSON, expectedJSON) { t.Errorf("Error doing optimized subvolume partitioning. Got bad result:\n%s\n", string(response)) } }
// Test added after error in getting two paths to the same ancestor k/v after merge. func TestDiamondGetOnMerge(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "mergetest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "first child", nil) if err != nil { t.Fatalf("Unable to create 1st child off root %s: %v\n", uuid, err) } if err = datastore.Commit(uuid2, "first child", nil); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid3, err := datastore.NewVersion(uuid, "second child", nil) if err != nil { t.Fatalf("Unable to create 2nd child off root %s: %v\n", uuid, err) } if err = datastore.Commit(uuid3, "second child", nil); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid3, err) } child, err := datastore.Merge([]dvid.UUID{uuid2, uuid3}, "merging stuff", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should be able to see just the original uuid value of the k/v childreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, child, data.DataName(), key1) returnValue := server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != value1 { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key1, value1, string(returnValue)) } }
func TestBlockAPI(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() grayscale := makeGrayscale(uuid, t, "grayscale") // construct random blocks of data. numBlockBytes := int32(grayscale.BlockSize().Prod()) testBlocks := 1001 var blockData []byte for i := 0; i < testBlocks; i++ { blockData = append(blockData, dvid.RandomBytes(numBlockBytes)...) } // set start of span x := rand.Int31() y := rand.Int31() z := rand.Int31() // Test uncompressed roundtrip // send the blocks blockReq := fmt.Sprintf("%snode/%s/grayscale/blocks/%d_%d_%d/%d", server.WebAPIPath, uuid, x, y, z, testBlocks) server.TestHTTP(t, "POST", blockReq, bytes.NewBuffer(blockData)) // read same span of blocks returnedData := server.TestHTTP(t, "GET", blockReq, nil) // make sure blocks are same totBytes := testBlocks * int(numBlockBytes) if len(returnedData) != totBytes { t.Errorf("Returned %d bytes, expected %d bytes", len(returnedData), totBytes) } if !reflect.DeepEqual(returnedData, blockData) { t.Errorf("Returned block data != original block data\n") } // We should get blank blocks at different z z += 1 blockReq = fmt.Sprintf("%snode/%s/grayscale/blocks/%d_%d_%d/%d", server.WebAPIPath, uuid, x, y, z, testBlocks) returnedData = server.TestHTTP(t, "GET", blockReq, nil) if len(returnedData) != totBytes { t.Errorf("Returned %d bytes, expected %d bytes", len(returnedData), totBytes) } for i, b := range returnedData { if b != 0 { t.Fatalf("Expected 0 at returned byte %d, got %d instead.\n", i, b) } } }
func TestCommitAndBranch(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", branchReq, nil) // Add a keyvalue instance. server.CreateTestInstance(t, uuid, "keyvalue", "mykv", dvid.Config{}) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/commit", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to keyvalue.. keyReq := fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // Should be able to create branch now that we've committed parent. respData := server.TestHTTP(t, "POST", branchReq, nil) resp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } // We should be able to write to that keyvalue now in the child. keyReq = fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, resp.Child) server.TestHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) }
func TestDeleteInstance(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid := createRepo(t) // Shouldn't be able to delete instance without "imsure" delReq := fmt.Sprintf("%srepo/%s/%s", WebAPIPath, uuid, "absent-name") TestBadHTTP(t, "DELETE", delReq, nil) // Shouldn't be able to delete an instance that doesn't exist. delReq = fmt.Sprintf("%srepo/%s/%s?imsure=true", WebAPIPath, uuid, "absent-name") TestBadHTTP(t, "DELETE", delReq, nil) }
func TestReloadMetadata(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Add data instances var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "roi", "someroi", config) // Reload the metadata apiStr := fmt.Sprintf("%sserver/reload-metadata", server.WebAPIPath) server.TestHTTP(t, "POST", apiStr, nil) // Make sure repo UUID still there jsonStr, err := datastore.MarshalJSON() if err != nil { t.Fatalf("can't get repos JSON: %v\n", err) } var jsonResp map[string](map[string]interface{}) if err := json.Unmarshal(jsonStr, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repos info response: %s\n", jsonStr) } if len(jsonResp) != 1 { t.Errorf("reloaded repos had more than one repo: %v\n", jsonResp) } for k := range jsonResp { if dvid.UUID(k) != uuid { t.Fatalf("Expected uuid %s, got %s. Full JSON:\n%v\n", uuid, k, jsonResp) } } // Make sure the data instances are still there. _, err = datastore.GetDataByUUIDName(uuid, "foo") if err != nil { t.Errorf("Couldn't get keyvalue data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "labels") if err != nil { t.Errorf("Couldn't get labelblk data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "someroi") if err != nil { t.Errorf("Couldn't get roi data instance after reload\n") } }
func TestLabelsSyncing(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid := dvid.UUID(server.NewTestRepo(t)) if len(uuid) < 5 { t.Fatalf("Bad root UUID for new repo: %s\n", uuid) } // Create a labelblk instance vol := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 64, 96}, } vol.postLabelVolume(t, "labels", uuid) // TODO -- Test syncing across labelblk, labelvol, labelsz. }
// check subgraph endpoint func TestLabelgraphPostAndDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, dtype, "lg", config) if err != nil { t.Errorf("Error creating new labelgraph instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not labelgraph.Data\n") } // PUT a labelraph subgraphRequest := fmt.Sprintf("%snode/%s/%s/subgraph", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", subgraphRequest, getGraphJSON()) // Get back the labelgraph returnedData := server.TestHTTP(t, "GET", subgraphRequest, nil) retgraph, err := loadGraphJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(retgraph, getTestGraph()) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", getTestGraph(), retgraph) } // Delete the labelgraph _ = server.TestHTTP(t, "DELETE", subgraphRequest, nil) // Subgraph should now be empty returnedData = server.TestHTTP(t, "GET", subgraphRequest, nil) expectedResp := "{\"Transactions\":[],\"Vertices\":[],\"Edges\":[]}" if string(returnedData) != expectedResp { t.Errorf("Bad ROI after ROI delete. Should be %s got: %s\n", expectedResp, string(returnedData)) } }
func TestDeleteInstance(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Add a data instance. var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) // Make sure it exists. _, err := datastore.GetDataByUUID(uuid, "foo") if err != nil { t.Errorf("Couldn't create data instance 'foo'\n") } // Shouldn't be able to delete instance without "imsure" delReq := fmt.Sprintf("%srepo/%s/%s", server.WebAPIPath, uuid, "foo") server.TestBadHTTP(t, "DELETE", delReq, nil) delReq = fmt.Sprintf("%srepo/%s/%s?imsure=true", server.WebAPIPath, uuid, "foo") server.TestHTTP(t, "DELETE", delReq, nil) // Make sure it no longer exists. _, err = datastore.GetDataByUUID(uuid, "foo") if err == nil { t.Errorf("Shouldn't be able to access a deleted data instance 'foo'\n") } }
func TestROIPostAndDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Get back the ROI returnedData := server.TestHTTP(t, "GET", roiRequest, nil) spans, err := putSpansJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(spans, testSpans) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", testSpans, spans) } // Delete the ROI _ = server.TestHTTP(t, "DELETE", roiRequest, nil) // ROI should now be empty returnedData = server.TestHTTP(t, "GET", roiRequest, nil) if string(returnedData) != "[]" { t.Errorf("Bad ROI after ROI delete. Should be [ ] got: %s\n", string(returnedData)) } }
func TestTileKey(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() server.CreateTestInstance(t, uuid, "imagetile", "tiles", dvid.Config{}) keyURL := fmt.Sprintf("%snode/%s/tiles/tilekey/xy/0/1_2_3", server.WebAPIPath, uuid) respStr := server.TestHTTP(t, "GET", keyURL, nil) keyResp := struct { Key string `json:"key"` }{} if err := json.Unmarshal(respStr, &keyResp); err != nil { t.Fatalf("Couldn't parse JSON response to tilekey request (%v):\n%s\n", err, keyResp) } kb := make([]byte, hex.DecodedLen(len(keyResp.Key))) _, err := hex.Decode(kb, []byte(keyResp.Key)) if err != nil { t.Fatalf("Couldn't parse return hex key: %s", keyResp.Key) } // Decipher TKey portion to make sure it's correct. key := storage.Key(kb) tk, err := storage.TKeyFromKey(key) if err != nil { t.Fatalf("Couldn't get TKey from returned key (%v): %x", err, kb) } tile, plane, scale, err := DecodeTKey(tk) if err != nil { t.Fatalf("Bad decode of TKey (%v): %x", err, tk) } expectTile := dvid.ChunkPoint3d{1, 2, 3} if tile != expectTile { t.Errorf("Expected tile %v, got %v\n", expectTile, tile) } if !plane.Equals(dvid.XY) { t.Errorf("Expected plane to be XY, got %v\n", plane) } if scale != 0 { t.Errorf("Expected scale to be 0, got %d\n", scale) } }
func TestMultiscale2dRepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Make source uuid, _ := initTestRepo() makeGrayscale(uuid, t, "grayscale") // Make labels and set various properties config := dvid.NewConfig() config.Set("Placeholder", "true") config.Set("Format", "jpg") config.Set("Source", "grayscale") dataservice, err := datastore.NewData(uuid, mstype, "myimagetile", config) if err != nil { t.Errorf("Unable to create imagetile instance: %v\n", err) } msdata, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't cast imagetile data service into imagetile.Data\n") } oldData := *msdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, msdata); err != nil { t.Fatalf("Unable to save repo during imagetile persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUID(uuid, "myimagetile") if err != nil { t.Fatalf("Can't get keyvalue instance from reloaded test db: %v\n", err) } msdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imagetile.Data\n") } if !reflect.DeepEqual(oldData.Properties, msdata2.Properties) { t.Errorf("Expected properties %v, got %v\n", oldData.Properties, msdata2.Properties) } }
func TestSetMetadata(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() server.CreateTestInstance(t, uuid, "imagetile", "tiles", dvid.Config{}) // Store Metadata url := fmt.Sprintf("%snode/%s/tiles/metadata", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, bytes.NewBufferString(testMetadata)) // Check instance really has it set. var metadata metadataJSON respStr := server.TestHTTP(t, "GET", url, nil) if err := json.Unmarshal(respStr, &metadata); err != nil { t.Fatalf("Couldn't parse JSON response to metadata request (%v):\n%s\n", err, respStr) } expectMin := dvid.Point3d{0, 0, 0} expectMax := dvid.Point3d{5, 5, 4} if !expectMin.Equals(metadata.MinTileCoord) { t.Errorf("Expected min tile coord %s, got %s\n", expectMin, metadata.MinTileCoord) } if !expectMax.Equals(metadata.MaxTileCoord) { t.Errorf("Expected max tile coord %s, got %s\n", expectMax, metadata.MaxTileCoord) } tileSpec, err := parseTileSpec(metadata.Levels) if err != nil { t.Errorf("Error parsing returned tile level spec:\n%v\n", metadata.Levels) } if len(tileSpec) != 4 { t.Errorf("Bad tile spec load: only %d elements != 4\n", len(tileSpec)) } if tileSpec[2].Resolution.GetMax() != 40.0 { t.Errorf("Bad tile spec at level 2: %v\n", tileSpec[2]) } if tileSpec[3].TileSize.Value(2) != 512 { t.Errorf("Bad tile spec at level 3: %v\n", tileSpec[3]) } }
func TestFloat32RepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Make grayscale and set various properties config := dvid.NewConfig() config.Set("BlockSize", "12,13,14") config.Set("VoxelSize", "1.1,2.8,11") config.Set("VoxelUnits", "microns,millimeters,nanometers") dataservice, err := datastore.NewData(uuid, floatimgT, "floatimg", config) if err != nil { t.Errorf("Unable to create float32 instance: %s\n", err) } floatimg, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast float32 data service into Data\n") } oldData := *floatimg // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, floatimg); err != nil { t.Fatalf("Unable to save repo during floatimg persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUIDName(uuid, "floatimg") if err != nil { t.Fatalf("Can't get floatimg instance from reloaded test db: %v\n", err) } floatimg2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imageblk.Data\n") } if !oldData.Equals(floatimg2) { t.Errorf("Expected %v, got %v\n", oldData, *floatimg2) } }
func TestKeyvalueRoundTrip(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() // Add data config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "roundtripper", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kvdata, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not keyvalue.Data\n") } ctx := datastore.NewVersionedCtx(dataservice, versionID) keyStr := "testkey.-{}03`~| %@\x01" value := []byte("I like Japan and this is some unicode: \u65e5\u672c\u8a9e") if err = kvdata.PutData(ctx, keyStr, value); err != nil { t.Errorf("Could not put keyvalue data: %v\n", err) } retrieved, found, err := kvdata.GetData(ctx, keyStr) if err != nil { t.Fatalf("Could not get keyvalue data: %v\n", err) } if !found { t.Fatalf("Could not find put keyvalue\n") } if bytes.Compare(value, retrieved) != 0 { t.Errorf("keyvalue retrieved %q != put %q\n", string(retrieved), string(value)) } }
func TestMultichan16RepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, dtype, "mymultichan16", config) if err != nil { t.Errorf("Unable to create multichan16 instance: %v\n", err) } mcdata, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast multichan16 data service into multichan16.Data\n") } oldData := *mcdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, mcdata); err != nil { t.Fatalf("Unable to save repo during multichan16 persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUIDName(uuid, "mymultichan16") if err != nil { t.Fatalf("Can't get multichan16 instance from reloaded test db: %v\n", err) } mcdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not multichan16.Data\n") } if !oldData.Equals(mcdata2) { t.Errorf("Expected %v, got %v\n", oldData, *mcdata2) } }
func TestROIRepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not roi.Data\n") } if roi1.DataName() != "myroi" { t.Errorf("New roi data instance name set incorrectly: %q != %q\n", roi1.DataName(), "myroi") } config.Set("BlockSize", "15,16,17") dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not roi.Data\n") } roi2.MinZ = 13 roi2.MaxZ = 3098 oldData := *roi2 // Check instance IDs if roi1.InstanceID() == roi2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", roi1.InstanceID(), roi2.InstanceID()) } // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, dataservice1); err != nil { t.Fatalf("Unable to save data1 during ROI persistence test: %v\n", err) } if err = datastore.SaveDataByUUID(uuid, dataservice2); err != nil { t.Fatalf("Unable to save data2 during ROI persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice3, err := datastore.GetDataByUUID(uuid, "myroi2") if err != nil { t.Fatalf("Can't get first ROI instance from reloaded test db: %v\n", err) } roi2new, ok := dataservice3.(*Data) if !ok { t.Errorf("Returned new data instance 3 is not roi.Data\n") } if !oldData.Equals(roi2new) { t.Errorf("Expected %v, got %v\n", oldData, *roi2new) } }
func TestROIRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Get back the ROI returnedData := server.TestHTTP(t, "GET", roiRequest, nil) spans, err := putSpansJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(spans, testSpans) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", testSpans, spans) } // Test the ptquery ptqueryRequest := fmt.Sprintf("%snode/%s/%s/ptquery", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "POST", ptqueryRequest, getPointsJSON(testPoints)) inclusions, err := putInclusionJSON(returnedData) if err != nil { t.Fatalf("Error on getting back JSON from ptquery: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(inclusions, expectedInclusions) { t.Errorf("Bad ptquery results\nOriginal:\n%s\nReturned:\n%s\n", expectedInclusions, inclusions) } // Test ROI mask out of range -- should be all 0. maskRequest := fmt.Sprintf("%snode/%s/%s/mask/0_1_2/100_100_100/10_40_70", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "GET", maskRequest, nil) if len(returnedData) != 100*100*100 { t.Errorf("Expected mask volume of %d bytes, got %d bytes instead\n", 100*100*100, len(returnedData)) } for i, value := range returnedData { if value != 0 { t.Errorf("Expected all-zero mask, got %d at index %d\n", value, i) break } } // Test ROI mask within range. maskRequest = fmt.Sprintf("%snode/%s/%s/mask/0_1_2/100_100_100/6350_3232_3200", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "GET", maskRequest, nil) if len(returnedData) != 100*100*100 { t.Errorf("Expected mask volume of %d bytes, got %d bytes instead\n", 100*100*100, len(returnedData)) } // Check first block plane for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y < 64 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } // tuple{100, 103, 201, 212} if x <= 81 && y >= 64 && y < 96 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x > 81 && y >= 64 && y < 96 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } } } // Check second block plane offset := 32 * 100 * 100 // moves to next block in Z for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[offset+y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x <= 81 && y < 32 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x > 81 && y < 32 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 32 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } } } // Check last block plane offset = 96 * 100 * 100 // moves to last ROI layer in Z for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[offset+y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y < 32 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 32 && y < 64 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y >= 64 && y < 96 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 96 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } } } }
func TestCommitBranchMergeDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid := createRepo(t) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", WebAPIPath, uuid) TestBadHTTP(t, "POST", branchReq, nil) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr := fmt.Sprintf("%snode/%s/commit", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to log. payload = bytes.NewBufferString(`{"log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestBadHTTP(t, "POST", apiStr, payload) // Should be able to create branch now that we've committed parent. respData := TestHTTP(t, "POST", branchReq, nil) resp := struct { Child string `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent1 := dvid.UUID(resp.Child) // Create a sibling. respData = TestHTTP(t, "POST", branchReq, nil) if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent2 := dvid.UUID(resp.Child) // Commit both parents payload = bytes.NewBufferString(`{"note": "This is first parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) payload = bytes.NewBufferString(`{"note": "This is second parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent2) TestHTTP(t, "POST", apiStr, payload) // Merge the two disjoint branches. mergeJSON := fmt.Sprintf(`{"mergeType": "conflict-free", "note": "This is my merged node", "parents": [%q, %q]}`, parent1[:7], parent2) payload = bytes.NewBufferString(mergeJSON) apiStr = fmt.Sprintf("%srepo/%s/merge", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) // Get root version to check after delete repo. rootV, err := datastore.VersionFromUUID(uuid) if err != nil { t.Errorf("Got unexpected error on getting version from root UUID: %v\n", err) } // Delete the entire repo including all branches. apiStr = fmt.Sprintf("%srepo/%s", WebAPIPath, parent2) TestBadHTTP(t, "DELETE", apiStr, nil) // Requires query string apiStr = fmt.Sprintf("%srepo/%s?imsure=true", WebAPIPath, parent2) TestHTTP(t, "DELETE", apiStr, nil) // Requires query string // Make sure none of the repo is still accessible. jsonResp, err := datastore.GetRepoJSON(uuid) if err == nil { t.Errorf("Expected invalid UUID after repo delete but got json back: %s\n", jsonResp) } if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid UUID after repo delete but got unexpected error: %v\n", err) } _, err = datastore.VersionFromUUID(uuid) if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid rot UUID after repo delete but got unexpected error: %v\n", err) } _, err = datastore.VersionFromUUID(parent1) if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid UUID for 1st parent after repo delete but got unexpected error: %v\n", err) } _, err = datastore.VersionFromUUID(parent2) if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid UUID for 2nd parent after repo delete but got unexpected error: %v\n", err) } _, err = datastore.UUIDFromVersion(rootV) if err != datastore.ErrInvalidVersion { t.Errorf("Expected invalid version id for root after repo delete but got unexpected error: %v\n", err) } }
func TestKeyvalueVersioning(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "versiontest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Create a new version in repo if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } _, err = datastore.VersionFromUUID(uuid2) if err != nil { t.Fatalf("Unable to get version ID from new uuid %s: %v\n", uuid2, err) } // Change the 2nd k/v uuid2val := "this is completely different" uuid2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid2, data.DataName(), key2) server.TestHTTP(t, "POST", uuid2req, strings.NewReader(uuid2val)) // Get the first version value returnValue := server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } // Get the second version value returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key1, key2, string(returnValue)) } // Commit the repo if err = datastore.Commit(uuid2, "my 2nd commit msg", []string{"changed 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } // Make grandchild of root uuid3, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Delete the 2nd k/v uuid3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid3, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid3req, nil) server.TestBadHTTP(t, "GET", uuid3req, nil) // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Make a child if err = datastore.Commit(uuid3, "my 3rd commit msg", []string{"deleted 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid4, err := datastore.NewVersion(uuid3, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid3, err) } // Change the 2nd k/v uuid4val := "we are reintroducing this k/v" uuid4req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid4, data.DataName(), key2) server.TestHTTP(t, "POST", uuid4req, strings.NewReader(uuid4val)) if err = datastore.Commit(uuid4, "commit node 4", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid4, err) } // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } server.TestBadHTTP(t, "GET", uuid3req, nil) returnValue = server.TestHTTP(t, "GET", uuid4req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on fourth version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } // Let's try a merge! // Make a child off the 2nd version from root. uuid5, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Store new stuff in 2nd k/v uuid5val := "this is forked value" uuid5req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid5, data.DataName(), key2) server.TestHTTP(t, "POST", uuid5req, strings.NewReader(uuid5val)) returnValue = server.TestHTTP(t, "GET", uuid5req, nil) if string(returnValue) != uuid5val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid5val, string(returnValue)) } // Commit node if err = datastore.Commit(uuid5, "forked node", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid5, err) } // Should be able to merge using conflict-free (disjoint at key level) merge even though // its conflicted. Will get lazy error on request. badChild, err := datastore.Merge([]dvid.UUID{uuid4, uuid5}, "some child", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } childreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, badChild, data.DataName(), key2) server.TestBadHTTP(t, "GET", childreq, nil) // Manually fix conflict: Branch, and then delete 2nd k/v and commit. uuid6, err := datastore.NewVersion(uuid5, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid5, err) } uuid6req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid6, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid6req, nil) server.TestBadHTTP(t, "GET", uuid6req, nil) if err = datastore.Commit(uuid6, "deleted forked node 2nd k/v", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %s\n", uuid6, err) } // Should now be able to correctly merge the two branches. goodChild, err := datastore.Merge([]dvid.UUID{uuid4, uuid6}, "merging stuff", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should be able to see just the original uuid4 value of the 2nd k/v childreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, goodChild, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } // Apply the automatic conflict resolution using ordering. payload := fmt.Sprintf(`{"data":["versiontest"],"parents":[%q,%q],"note":"automatic resolved merge"}`, uuid5, uuid4) resolveReq := fmt.Sprintf("%srepo/%s/resolve", server.WebAPIPath, uuid4) returnValue = server.TestHTTP(t, "POST", resolveReq, bytes.NewBufferString(payload)) resolveResp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(returnValue, &resolveResp); err != nil { t.Fatalf("Can't parse return of resolve request: %s\n", string(returnValue)) } // We should now see the uuid5 version of the 2nd k/v in the returned merged node. childreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, resolveResp.Child, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != uuid5val { t.Errorf("Error on auto merged child, key %q: expected %q, got %q\n", key2, uuid5val, string(returnValue)) } // Introduce a child off root but don't add 2nd k/v to it. uuid7, err := datastore.NewVersion(uuid, "2nd child off root", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } if err = datastore.Commit(uuid7, "useless node", []string{"we modified nothing!"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid7, err) } // Now merge the previously merged node with the newly created "blank" child off root. if err = datastore.Commit(goodChild, "this was a good merge", []string{}); err != nil { t.Errorf("Unable to commit node %s: %v\n", goodChild, err) } merge2, err := datastore.Merge([]dvid.UUID{goodChild, uuid7}, "merging a useless path", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } merge3, err := datastore.Merge([]dvid.UUID{uuid7, goodChild}, "merging a useless path in reverse order", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should still be conflict free since 2nd key in left parent path will take precedent over shared 2nd key // in root. This tests our invalidation of ancestors. toughreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, merge2, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", toughreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } toughreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, merge3, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", toughreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } }
func TestFloatDirectCalls(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() server.CreateTestInstance(t, uuid, "float32blk", "floatimg", dvid.Config{}) dataservice, err := datastore.GetDataByUUIDName(uuid, "floatimg") if err != nil { t.Fatal(err) } floatimg, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't convert dataservice %v into imageblk.Data\n", dataservice) } ctx := datastore.NewVersionedCtx(floatimg, versionID) // Create a block-aligned 8-bit grayscale image offset := dvid.Point3d{512, 32, 1024} size := dvid.Point3d{128, 96, 64} subvol := dvid.NewSubvolume(offset, size) testvol := createFloatTestVolume(t, uuid, "floatimg", offset, size) origData := make([]byte, len(testvol.data)) copy(origData, testvol.data) // Store it into datastore at root v, err := floatimg.NewVoxels(subvol, testvol.data) if err != nil { t.Fatalf("Unable to make new floatimg voxels: %v\n", err) } if err = floatimg.IngestVoxels(versionID, 1, v, ""); err != nil { t.Errorf("Unable to put voxels for %s: %v\n", ctx, err) } // Read the stored image v2, err := floatimg.NewVoxels(subvol, nil) if err != nil { t.Errorf("Unable to make new grayscale ExtHandler: %v\n", err) } if err = floatimg.GetVoxels(versionID, v2, ""); err != nil { t.Errorf("Unable to get voxels for %s: %v\n", ctx, err) } // Make sure the retrieved image matches the original if v.Stride() != v2.Stride() { t.Errorf("Stride in retrieved subvol incorrect\n") } if v.Interpolable() != v2.Interpolable() { t.Errorf("Interpolable bool in retrieved subvol incorrect\n") } if !reflect.DeepEqual(v.Size(), v2.Size()) { t.Errorf("Size in retrieved subvol incorrect: %s vs expected %s\n", v2.Size(), v.Size()) } if v.NumVoxels() != v2.NumVoxels() { t.Errorf("# voxels in retrieved is different: %d vs expected %d\n", v2.NumVoxels(), v.NumVoxels()) } if len(v.Data()) != len(v2.Data()) { t.Errorf("Expected %d bytes in retrieved data, got %d bytes\n", len(v.Data()), len(v2.Data())) } received := v2.Data() //dvid.PrintNonZero("original value", origData) //dvid.PrintNonZero("returned value", data) for i := int64(0); i < v2.NumVoxels(); i++ { if received[i] != origData[i] { t.Logf("Data returned != data stored for voxel %d\n", i) t.Logf("Size of data: %d bytes from GET, %d bytes in PUT\n", len(received), len(origData)) t.Fatalf("GET subvol (%d) != PUT subvol (%d) @ index %d", received[i], origData[i], i) } } }