// Make sure new labelgraph data have different IDs. func TestNewLabelgraphDifferent(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, dtype, "lg1", config) if err != nil { t.Errorf("Error creating new labelgraph instance 1: %v\n", err) } data1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not labelgraph.Data\n") } dataservice2, err := datastore.NewData(uuid, dtype, "lg2", config) if err != nil { t.Errorf("Error creating new labelgraph instance 2: %v\n", err) } data2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not labelgraph.Data\n") } if data1.InstanceID() == data2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", data1.InstanceID(), data2.InstanceID()) } }
// TODO -- Maybe allow assignment of child UUID via JSON in POST. Right now, we only // allow this potentially dangerous function via command-line. func reposPostHandler(w http.ResponseWriter, r *http.Request) { config := dvid.NewConfig() if r.Body != nil { fmt.Printf("r.Body = %v\n", r.Body) if err := config.SetByJSON(r.Body); err != nil { BadRequest(w, r, fmt.Sprintf("Error decoding POSTed JSON config for new repo: %v", err)) return } } alias, _, err := config.GetString("alias") if err != nil { BadRequest(w, r, "POST on repos endpoint requires valid 'alias': %v", err) return } description, _, err := config.GetString("description") if err != nil { BadRequest(w, r, "POST on repos endpoint requires valid 'description': %v", err) return } root, err := datastore.NewRepo(alias, description, nil) if err != nil { BadRequest(w, r, err) return } w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, "{%q: %q}", "root", root) }
// DecodeJSON decodes JSON passed in a request into a dvid.Config. func DecodeJSON(r *http.Request) (dvid.Config, error) { config := dvid.NewConfig() if err := config.SetByJSON(r.Body); err != nil { return dvid.Config{}, fmt.Errorf("Malformed JSON request in body: %v", err) } return config, nil }
// Make sure new keyvalue data have different IDs. func TestNewKeyvalueDifferent(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, kvtype, "instance1", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kv1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not keyvalue.Data\n") } if kv1.DataName() != "instance1" { t.Errorf("New keyvalue data instance name set incorrectly: %q != %q\n", kv1.DataName(), "instance1") } dataservice2, err := datastore.NewData(uuid, kvtype, "instance2", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kv2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not keyvalue.Data\n") } if kv1.InstanceID() == kv2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", kv1.InstanceID(), kv2.InstanceID()) } }
func serverSettingsHandler(c web.C, w http.ResponseWriter, r *http.Request) { config := dvid.NewConfig() if err := config.SetByJSON(r.Body); err != nil { BadRequest(w, r, fmt.Sprintf("Error decoding POSTed JSON config for 'new': %v", err)) return } w.Header().Set("Content-Type", "text/plain") // Handle GC percentage setting percent, found, err := config.GetInt("gc") if err != nil { BadRequest(w, r, "POST on settings endpoint had bad parsing of 'gc' key: %v", err) return } if found { old := debug.SetGCPercent(percent) fmt.Fprintf(w, "DVID server garbage collection target percentage set to %d from %d\n", percent, old) } // Handle max throttle ops setting maxOps, found, err := config.GetInt("throttle") if err != nil { BadRequest(w, r, "POST on settings endpoint had bad parsing of 'throttle' key: %v", err) return } if found { old := maxThrottledOps SetMaxThrottleOps(maxOps) fmt.Fprintf(w, "Maximum throttled ops set to %d from %d\n", maxOps, old) } }
func repoNewDataHandler(c web.C, w http.ResponseWriter, r *http.Request) { uuid := c.Env["uuid"].(dvid.UUID) config := dvid.NewConfig() if err := config.SetByJSON(r.Body); err != nil { BadRequest(w, r, fmt.Sprintf("Error decoding POSTed JSON config for 'new': %v", err)) return } // Make sure that the passed configuration has data type and instance name. typename, found, err := config.GetString("typename") if !found || err != nil { BadRequest(w, r, "POST on repo endpoint requires specification of valid 'typename'") return } dataname, found, err := config.GetString("dataname") if !found || err != nil { BadRequest(w, r, "POST on repo endpoint requires specification of valid 'dataname'") return } typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { BadRequest(w, r, err) return } _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config) if err != nil { BadRequest(w, r, err) return } w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, "{%q: 'Added %s [%s] to node %s'}", "result", dataname, typename, uuid) }
func TestROICreateAndSerialize(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not roi.Data\n") } if roi1.DataName() != "myroi" { t.Errorf("New roi data instance name set incorrectly: %q != %q\n", roi1.DataName(), "myroi") } config.Set("BlockSize", "15,16,17") dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not roi.Data\n") } if roi1.InstanceID() == roi2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", roi1.InstanceID(), roi2.InstanceID()) } // Test persistence of storage. roi2.MinZ = 13 roi2.MaxZ = 3098 gobBytes, err := roi2.GobEncode() if err != nil { t.Fatalf("Could not Gob encode roi: %v\n", err) } var received Data if err = received.GobDecode(gobBytes); err != nil { t.Fatalf("Could not decode Gob-encoded roi: %v\n", err) } if !roi2.Data.Equals(received.Data) { t.Errorf("ROI base Data has bad roundtrip:\nOriginal:\n%v\nReceived:\n%v\n", *(roi2.Data), *(received.Data)) } if !reflect.DeepEqual(roi2.Properties, received.Properties) { t.Errorf("ROI extended properties has bad roundtrip:\nOriginal:\n%v\nReceived:\n%v\n", roi2.Properties, received.Properties) } }
func TestRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, syntype, "mysynapses", config) if err != nil { t.Fatalf("Error creating new data instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not synapse.Data\n") } // PUT first batch of synapses testJSON, err := json.Marshal(testData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/%s/elements", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // GET synapses back within superset bounding box and make sure all data is there. testResponse(t, testData, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // Test subset GET testResponse(t, expected3, "%snode/%s/%s/elements/5_5_5/126_60_97", server.WebAPIPath, uuid, data.DataName()) // Test Tag 1 tag := Tag("Synapse2") synapse2 := getTag(tag, testData) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // Test Tag 2 tag2 := Tag("Zlt90") zlt90 := getTag(tag2, testData) testResponse(t, zlt90, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag2) // Test move url5 := fmt.Sprintf("%snode/%s/%s/move/127_63_99/127_64_100", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url5, nil) testResponse(t, afterMove, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // --- check tag synapse2 = getTag(tag, afterMove) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // Test delete url6 := fmt.Sprintf("%snode/%s/%s/element/127_64_100", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "DELETE", url6, nil) testResponse(t, afterDelete, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // --- check tag synapse2 = getTag(tag, afterDelete) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) }
// Creates a new data instance for labelblk func newDataInstance(uuid dvid.UUID, t *testing.T, name dvid.InstanceName) *Data { config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, labelsT, name, config) if err != nil { t.Errorf("Unable to create labelblk instance %q: %v\n", name, err) } labels, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast labels data service into Data\n") } return labels }
func makeGrayscale(uuid dvid.UUID, t *testing.T, name dvid.InstanceName) *imageblk.Data { config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, grayscaleT, name, config) if err != nil { t.Errorf("Unable to create grayscale instance %q: %v\n", name, err) } grayscale, ok := dataservice.(*imageblk.Data) if !ok { t.Errorf("Can't cast data service into imageblk Data\n") } return grayscale }
func TestBasic(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() _, err := datastore.NewData(uuid, dtype, "instance1", config) if err != nil { t.Errorf("Error creating new multichan16 instance: %v\n", err) } }
func TestROIPartition(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, versionID := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) req, err := http.NewRequest("POST", roiRequest, getSpansJSON(testSpans)) if err != nil { t.Errorf("Unsuccessful POST request (%s): %v\n", roiRequest, err) } ctx := datastore.NewVersionedCtx(data, versionID) w := httptest.NewRecorder() data.ServeHTTP(uuid, ctx, w, req) if w.Code != http.StatusOK { t.Errorf("Bad server response roi POST, status %s, for roi %q\n", w.Code, data.DataName()) } // Request the standard subvolume partitioning partitionReq := fmt.Sprintf("%snode/%s/%s/partition?batchsize=5&optimized=true", server.WebAPIPath, uuid, data.DataName()) req, err = http.NewRequest("GET", partitionReq, nil) if err != nil { t.Errorf("Unsuccessful GET request (%s): %v\n", partitionReq, err) } w = httptest.NewRecorder() data.ServeHTTP(uuid, ctx, w, req) if w.Code != http.StatusOK { t.Errorf("Bad server response roi GET, status %s, for roi %q\n", w.Code, data.DataName()) } var subvolJSON, expectedJSON interface{} response := w.Body.Bytes() if err := json.Unmarshal(response, &subvolJSON); err != nil { t.Errorf("Can't unmarshal JSON: %s\n", w.Body.Bytes()) } json.Unmarshal([]byte(expectedPartition), &expectedJSON) if !reflect.DeepEqual(subvolJSON, expectedJSON) { t.Errorf("Error doing optimized subvolume partitioning. Got bad result:\n%s\n", string(response)) } }
// Test added after error in getting two paths to the same ancestor k/v after merge. func TestDiamondGetOnMerge(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "mergetest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "first child", nil) if err != nil { t.Fatalf("Unable to create 1st child off root %s: %v\n", uuid, err) } if err = datastore.Commit(uuid2, "first child", nil); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid3, err := datastore.NewVersion(uuid, "second child", nil) if err != nil { t.Fatalf("Unable to create 2nd child off root %s: %v\n", uuid, err) } if err = datastore.Commit(uuid3, "second child", nil); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid3, err) } child, err := datastore.Merge([]dvid.UUID{uuid2, uuid3}, "merging stuff", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should be able to see just the original uuid value of the k/v childreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, child, data.DataName(), key1) returnValue := server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != value1 { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key1, value1, string(returnValue)) } }
func serverGCHandler(c web.C, w http.ResponseWriter, r *http.Request) { config := dvid.NewConfig() if err := config.SetByJSON(r.Body); err != nil { BadRequest(w, r, fmt.Sprintf("Error decoding POSTed JSON config for 'new': %v", err)) return } percent, found, err := config.GetInt("percent") if !found || err != nil { BadRequest(w, r, "POST on gc endpoint requires specification of valid 'percent' number") return } old := debug.SetGCPercent(percent) w.Header().Set("Content-Type", "text/plain") fmt.Fprintf(w, "DVID server garbage collection target percentage set to %d from %d\n", percent, old) }
// ForegroundROI creates a new ROI by determining all non-background blocks. func (d *Data) ForegroundROI(req datastore.Request, reply *datastore.Response) error { if d.Values.BytesPerElement() != 1 { return fmt.Errorf("Foreground ROI command only implemented for 1 byte/voxel data!") } // Parse the request var uuidStr, dataName, cmdStr, destName, backgroundStr string req.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &destName, &backgroundStr) // Get the version and repo uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil { return err } // Use existing destination data or a new ROI data. var dest *roi.Data dest, err = roi.GetByUUID(uuid, dvid.InstanceName(destName)) if err != nil { config := dvid.NewConfig() typeservice, err := datastore.TypeServiceByName("roi") if err != nil { return err } dataservice, err := datastore.NewData(uuid, typeservice, dvid.InstanceName(destName), config) if err != nil { return err } var ok bool dest, ok = dataservice.(*roi.Data) if !ok { return fmt.Errorf("Could not create ROI data instance") } } // Asynchronously process the voxels. background, err := dvid.StringToPointNd(backgroundStr, ",") if err != nil { return err } go d.foregroundROI(versionID, dest, background) return nil }
// check subgraph endpoint func TestLabelgraphPostAndDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, dtype, "lg", config) if err != nil { t.Errorf("Error creating new labelgraph instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not labelgraph.Data\n") } // PUT a labelraph subgraphRequest := fmt.Sprintf("%snode/%s/%s/subgraph", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", subgraphRequest, getGraphJSON()) // Get back the labelgraph returnedData := server.TestHTTP(t, "GET", subgraphRequest, nil) retgraph, err := loadGraphJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(retgraph, getTestGraph()) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", getTestGraph(), retgraph) } // Delete the labelgraph _ = server.TestHTTP(t, "DELETE", subgraphRequest, nil) // Subgraph should now be empty returnedData = server.TestHTTP(t, "GET", subgraphRequest, nil) expectedResp := "{\"Transactions\":[],\"Vertices\":[],\"Edges\":[]}" if string(returnedData) != expectedResp { t.Errorf("Bad ROI after ROI delete. Should be %s got: %s\n", expectedResp, string(returnedData)) } }
func TestMultiscale2dRepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Make source uuid, _ := initTestRepo() makeGrayscale(uuid, t, "grayscale") // Make labels and set various properties config := dvid.NewConfig() config.Set("Placeholder", "true") config.Set("Format", "jpg") config.Set("Source", "grayscale") dataservice, err := datastore.NewData(uuid, mstype, "myimagetile", config) if err != nil { t.Errorf("Unable to create imagetile instance: %v\n", err) } msdata, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't cast imagetile data service into imagetile.Data\n") } oldData := *msdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, msdata); err != nil { t.Fatalf("Unable to save repo during imagetile persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUID(uuid, "myimagetile") if err != nil { t.Fatalf("Can't get keyvalue instance from reloaded test db: %v\n", err) } msdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imagetile.Data\n") } if !reflect.DeepEqual(oldData.Properties, msdata2.Properties) { t.Errorf("Expected properties %v, got %v\n", oldData.Properties, msdata2.Properties) } }
func TestROIPostAndDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Get back the ROI returnedData := server.TestHTTP(t, "GET", roiRequest, nil) spans, err := putSpansJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(spans, testSpans) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", testSpans, spans) } // Delete the ROI _ = server.TestHTTP(t, "DELETE", roiRequest, nil) // ROI should now be empty returnedData = server.TestHTTP(t, "GET", roiRequest, nil) if string(returnedData) != "[]" { t.Errorf("Bad ROI after ROI delete. Should be [ ] got: %s\n", string(returnedData)) } }
func TestFloat32RepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Make grayscale and set various properties config := dvid.NewConfig() config.Set("BlockSize", "12,13,14") config.Set("VoxelSize", "1.1,2.8,11") config.Set("VoxelUnits", "microns,millimeters,nanometers") dataservice, err := datastore.NewData(uuid, floatimgT, "floatimg", config) if err != nil { t.Errorf("Unable to create float32 instance: %s\n", err) } floatimg, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast float32 data service into Data\n") } oldData := *floatimg // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, floatimg); err != nil { t.Fatalf("Unable to save repo during floatimg persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUIDName(uuid, "floatimg") if err != nil { t.Fatalf("Can't get floatimg instance from reloaded test db: %v\n", err) } floatimg2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imageblk.Data\n") } if !oldData.Equals(floatimg2) { t.Errorf("Expected %v, got %v\n", oldData, *floatimg2) } }
func TestLabelblkRepoPersistence(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() config.Set("BlockSize", "12,13,14") config.Set("VoxelSize", "1.1,2.8,11") config.Set("VoxelUnits", "microns,millimeters,nanometers") dataservice, err := datastore.NewData(uuid, labelsT, "mylabels", config) if err != nil { t.Errorf("Unable to create labels instance: %v\n", err) } labels, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast labels data service into Data\n") } oldData := *labels // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, labels); err != nil { t.Fatalf("Unable to save repo during labels persistence test: %v\n", err) } tests.CloseReopenStore() dataservice2, err := datastore.GetDataByUUID(uuid, "mylabels") if err != nil { t.Fatalf("Can't get labels instance from reloaded test db: %v\n", err) } labels2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imageblk.Data\n") } if !oldData.Equals(labels2) { t.Errorf("Expected %v, got %v\n", oldData, *labels2) } }
func TestKeyvalueRoundTrip(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() // Add data config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "roundtripper", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kvdata, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not keyvalue.Data\n") } ctx := datastore.NewVersionedCtx(dataservice, versionID) keyStr := "testkey.-{}03`~| %@\x01" value := []byte("I like Japan and this is some unicode: \u65e5\u672c\u8a9e") if err = kvdata.PutData(ctx, keyStr, value); err != nil { t.Errorf("Could not put keyvalue data: %v\n", err) } retrieved, found, err := kvdata.GetData(ctx, keyStr) if err != nil { t.Fatalf("Could not get keyvalue data: %v\n", err) } if !found { t.Fatalf("Could not find put keyvalue\n") } if bytes.Compare(value, retrieved) != 0 { t.Errorf("keyvalue retrieved %q != put %q\n", string(retrieved), string(value)) } }
func TestMultichan16RepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, dtype, "mymultichan16", config) if err != nil { t.Errorf("Unable to create multichan16 instance: %v\n", err) } mcdata, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast multichan16 data service into multichan16.Data\n") } oldData := *mcdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, mcdata); err != nil { t.Fatalf("Unable to save repo during multichan16 persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUIDName(uuid, "mymultichan16") if err != nil { t.Fatalf("Can't get multichan16 instance from reloaded test db: %v\n", err) } mcdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not multichan16.Data\n") } if !oldData.Equals(mcdata2) { t.Errorf("Expected %v, got %v\n", oldData, *mcdata2) } }
func TestKeyvalueRepoPersistence(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "mykv", config) if err != nil { t.Errorf("Unable to create keyvalue instance: %v\n", err) } kvdata, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast keyvalue data service into keyvalue.Data\n") } oldData := *kvdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, kvdata); err != nil { t.Fatalf("Unable to save repo during keyvalue persistence test: %v\n", err) } tests.CloseReopenStore() dataservice2, err := datastore.GetDataByUUID(uuid, "mykv") if err != nil { t.Fatalf("Can't get keyvalue instance from reloaded test db: %v\n", err) } kvdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not keyvalue.Data\n") } if !oldData.Equals(kvdata2) { t.Errorf("Expected %v, got %v\n", oldData, *kvdata2) } }
func TestROIRepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not roi.Data\n") } if roi1.DataName() != "myroi" { t.Errorf("New roi data instance name set incorrectly: %q != %q\n", roi1.DataName(), "myroi") } config.Set("BlockSize", "15,16,17") dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not roi.Data\n") } roi2.MinZ = 13 roi2.MaxZ = 3098 oldData := *roi2 // Check instance IDs if roi1.InstanceID() == roi2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", roi1.InstanceID(), roi2.InstanceID()) } // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, dataservice1); err != nil { t.Fatalf("Unable to save data1 during ROI persistence test: %v\n", err) } if err = datastore.SaveDataByUUID(uuid, dataservice2); err != nil { t.Fatalf("Unable to save data2 during ROI persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice3, err := datastore.GetDataByUUID(uuid, "myroi2") if err != nil { t.Fatalf("Can't get first ROI instance from reloaded test db: %v\n", err) } roi2new, ok := dataservice3.(*Data) if !ok { t.Errorf("Returned new data instance 3 is not roi.Data\n") } if !oldData.Equals(roi2new) { t.Errorf("Expected %v, got %v\n", oldData, *roi2new) } }
func TestTagRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, syntype, "mysynapses", config) if err != nil { t.Fatalf("Error creating new data instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not synapse.Data\n") } // PUT first batch of synapses testJSON, err := json.Marshal(testTagData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/%s/elements", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // Test Tags expected := Elements{ { ElementNR{ Pos: dvid.Point3d{15, 27, 35}, // Label 1 Kind: PreSyn, Tags: []Tag{"Synapse1", "Zlt90"}, Prop: map[string]string{ "Im a T-Bar": "yes", "I'm not a PSD": "sure", "i'm really special": "", }, }, []Relationship{{Rel: PreSynTo, To: dvid.Point3d{20, 30, 40}}, {Rel: PreSynTo, To: dvid.Point3d{14, 25, 37}}, {Rel: PreSynTo, To: dvid.Point3d{33, 30, 31}}}, }, { ElementNR{ Pos: dvid.Point3d{21, 33, 40}, // Label 2 Kind: PostSyn, Tags: []Tag{"Synapse1"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag := Tag("Synapse1") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{20, 30, 40}, // Label 2 Kind: PostSyn, Tags: []Tag{"Synapse10"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag = Tag("Synapse10") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{14, 25, 37}, // Label 3 Kind: PostSyn, Tags: []Tag{"Synapse11", "Zlt90"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag = Tag("Synapse11") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{33, 30, 31}, Kind: PostSyn, Tags: []Tag{"Synapse111", "Zlt90"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag = Tag("Synapse111") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // delete an annotation and check if its deleted in tag delurl := fmt.Sprintf("%snode/%s/%s/element/15_27_35", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "DELETE", delurl, nil) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{21, 33, 40}, // Label 2 Kind: PostSyn, Tags: []Tag{"Synapse1"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } testResponse(t, expected, "%snode/%s/%s/tag/Synapse1?relationships=true", server.WebAPIPath, uuid, data.DataName()) }
// CreateComposite creates a new rgba8 image by combining hash of labels + the grayscale func (d *Data) CreateComposite(request datastore.Request, reply *datastore.Response) error { timedLog := dvid.NewTimeLog() // Parse the request var uuidStr, dataName, cmdStr, grayscaleName, destName string request.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &grayscaleName, &destName) // Get the version uuid, v, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Log request if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil { return err } // Get the grayscale data. dataservice, err := datastore.GetDataByUUIDName(uuid, dvid.InstanceName(grayscaleName)) if err != nil { return err } grayscale, ok := dataservice.(*imageblk.Data) if !ok { return fmt.Errorf("%s is not the name of uint8 data", grayscaleName) } // Create a new rgba8blk data. var compservice datastore.DataService compservice, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(destName)) if err == nil { return fmt.Errorf("Data instance with name %q already exists", destName) } typeService, err := datastore.TypeServiceByName("rgba8blk") if err != nil { return fmt.Errorf("Could not get rgba8 type service from DVID") } config := dvid.NewConfig() compservice, err = datastore.NewData(uuid, typeService, dvid.InstanceName(destName), config) if err != nil { return err } composite, ok := compservice.(*imageblk.Data) if !ok { return fmt.Errorf("Error: %s was unable to be set to rgba8 data", destName) } // Iterate through all labels and grayscale chunks incrementally in Z, a layer at a time. wg := new(sync.WaitGroup) op := &compositeOp{grayscale, composite, v} chunkOp := &storage.ChunkOp{op, wg} store, err := d.GetOrderedKeyValueDB() if err != nil { return err } ctx := datastore.NewVersionedCtx(d, v) extents := d.Extents() blockBeg := imageblk.NewTKey(extents.MinIndex) blockEnd := imageblk.NewTKey(extents.MaxIndex) err = store.ProcessRange(ctx, blockBeg, blockEnd, chunkOp, storage.ChunkFunc(d.CreateCompositeChunk)) wg.Wait() // Set new mapped data to same extents. composite.Properties.Extents = grayscale.Properties.Extents if err := datastore.SaveDataByUUID(uuid, composite); err != nil { dvid.Infof("Could not save new data '%s': %v\n", destName, err) } timedLog.Infof("Created composite of %s and %s", grayscaleName, destName) return nil }
func TestROIRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Get back the ROI returnedData := server.TestHTTP(t, "GET", roiRequest, nil) spans, err := putSpansJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(spans, testSpans) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", testSpans, spans) } // Test the ptquery ptqueryRequest := fmt.Sprintf("%snode/%s/%s/ptquery", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "POST", ptqueryRequest, getPointsJSON(testPoints)) inclusions, err := putInclusionJSON(returnedData) if err != nil { t.Fatalf("Error on getting back JSON from ptquery: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(inclusions, expectedInclusions) { t.Errorf("Bad ptquery results\nOriginal:\n%s\nReturned:\n%s\n", expectedInclusions, inclusions) } // Test ROI mask out of range -- should be all 0. maskRequest := fmt.Sprintf("%snode/%s/%s/mask/0_1_2/100_100_100/10_40_70", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "GET", maskRequest, nil) if len(returnedData) != 100*100*100 { t.Errorf("Expected mask volume of %d bytes, got %d bytes instead\n", 100*100*100, len(returnedData)) } for i, value := range returnedData { if value != 0 { t.Errorf("Expected all-zero mask, got %d at index %d\n", value, i) break } } // Test ROI mask within range. maskRequest = fmt.Sprintf("%snode/%s/%s/mask/0_1_2/100_100_100/6350_3232_3200", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "GET", maskRequest, nil) if len(returnedData) != 100*100*100 { t.Errorf("Expected mask volume of %d bytes, got %d bytes instead\n", 100*100*100, len(returnedData)) } // Check first block plane for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y < 64 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } // tuple{100, 103, 201, 212} if x <= 81 && y >= 64 && y < 96 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x > 81 && y >= 64 && y < 96 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } } } // Check second block plane offset := 32 * 100 * 100 // moves to next block in Z for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[offset+y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x <= 81 && y < 32 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x > 81 && y < 32 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 32 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } } } // Check last block plane offset = 96 * 100 * 100 // moves to last ROI layer in Z for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[offset+y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y < 32 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 32 && y < 64 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y >= 64 && y < 96 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 96 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } } } }
func testRequest(t *testing.T, uuid dvid.UUID, versionID dvid.VersionID, name dvid.InstanceName) { config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, name, config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Get back k/v returnValue := server.TestHTTP(t, "GET", key1req, nil) if string(returnValue) != value1 { t.Errorf("Error on key %q: expected %s, got %s\n", key1, value1, string(returnValue)) } // Expect error if no key used. badrequest := fmt.Sprintf("%snode/%s/%s/key/", server.WebAPIPath, uuid, data.DataName()) server.TestBadHTTP(t, "GET", badrequest, nil) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Add 3rd k/v key3 := "heresanotherkey" value3 := "my 3rd value" key3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key3) server.TestHTTP(t, "POST", key3req, strings.NewReader(value3)) // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key2, key1, string(returnValue)) } // Check return of all keys allkeyreq := fmt.Sprintf("%snode/%s/%s/keys", server.WebAPIPath, uuid, data.DataName()) returnValue = server.TestHTTP(t, "GET", allkeyreq, nil) if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 3 || retrievedKeys[0] != "heresanotherkey" && retrievedKeys[1] != "my2ndKey" && retrievedKeys[2] != "mykey" { t.Errorf("Bad all key request return. Expected: [%q,%q,%q]. Got: %s\n", key3, key2, key1, string(returnValue)) } }
func TestKeyvalueUnversioned(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() config.Set("versioned", "false") dataservice, err := datastore.NewData(uuid, kvtype, "unversiontest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Create a new version in repo if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } _, err = datastore.VersionFromUUID(uuid2) if err != nil { t.Fatalf("Unable to get version ID from new uuid %s: %v\n", uuid2, err) } // Change the 2nd k/v uuid2val := "this is completely different" uuid2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid2, data.DataName(), key2) server.TestHTTP(t, "POST", uuid2req, strings.NewReader(uuid2val)) // Now the first version value should equal the new value returnValue := server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on unversioned key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Get the second version value returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on unversioned key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key1, key2, string(returnValue)) } // Commit the repo if err = datastore.Commit(uuid2, "my 2nd commit msg", []string{"changed 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } // Make grandchild of root uuid3, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Delete the 2nd k/v uuid3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid3, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid3req, nil) server.TestBadHTTP(t, "GET", uuid3req, nil) // Make sure the 2nd k/v is now missing for previous versions. server.TestBadHTTP(t, "GET", key2req, nil) server.TestBadHTTP(t, "GET", uuid2req, nil) // Make a child if err = datastore.Commit(uuid3, "my 3rd commit msg", []string{"deleted 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid4, err := datastore.NewVersion(uuid3, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid3, err) } // Change the 2nd k/v uuid4val := "we are reintroducing this k/v" uuid4req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid4, data.DataName(), key2) server.TestHTTP(t, "POST", uuid4req, strings.NewReader(uuid4val)) if err = datastore.Commit(uuid4, "commit node 4", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid4, err) } // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid3req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on third version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid4req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on fourth version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } }
func TestKeyvalueVersioning(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "versiontest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Create a new version in repo if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } _, err = datastore.VersionFromUUID(uuid2) if err != nil { t.Fatalf("Unable to get version ID from new uuid %s: %v\n", uuid2, err) } // Change the 2nd k/v uuid2val := "this is completely different" uuid2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid2, data.DataName(), key2) server.TestHTTP(t, "POST", uuid2req, strings.NewReader(uuid2val)) // Get the first version value returnValue := server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } // Get the second version value returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key1, key2, string(returnValue)) } // Commit the repo if err = datastore.Commit(uuid2, "my 2nd commit msg", []string{"changed 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } // Make grandchild of root uuid3, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Delete the 2nd k/v uuid3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid3, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid3req, nil) server.TestBadHTTP(t, "GET", uuid3req, nil) // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Make a child if err = datastore.Commit(uuid3, "my 3rd commit msg", []string{"deleted 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid4, err := datastore.NewVersion(uuid3, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid3, err) } // Change the 2nd k/v uuid4val := "we are reintroducing this k/v" uuid4req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid4, data.DataName(), key2) server.TestHTTP(t, "POST", uuid4req, strings.NewReader(uuid4val)) if err = datastore.Commit(uuid4, "commit node 4", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid4, err) } // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } server.TestBadHTTP(t, "GET", uuid3req, nil) returnValue = server.TestHTTP(t, "GET", uuid4req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on fourth version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } // Let's try a merge! // Make a child off the 2nd version from root. uuid5, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Store new stuff in 2nd k/v uuid5val := "this is forked value" uuid5req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid5, data.DataName(), key2) server.TestHTTP(t, "POST", uuid5req, strings.NewReader(uuid5val)) returnValue = server.TestHTTP(t, "GET", uuid5req, nil) if string(returnValue) != uuid5val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid5val, string(returnValue)) } // Commit node if err = datastore.Commit(uuid5, "forked node", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid5, err) } // Should be able to merge using conflict-free (disjoint at key level) merge even though // its conflicted. Will get lazy error on request. badChild, err := datastore.Merge([]dvid.UUID{uuid4, uuid5}, "some child", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } childreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, badChild, data.DataName(), key2) server.TestBadHTTP(t, "GET", childreq, nil) // Manually fix conflict: Branch, and then delete 2nd k/v and commit. uuid6, err := datastore.NewVersion(uuid5, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid5, err) } uuid6req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid6, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid6req, nil) server.TestBadHTTP(t, "GET", uuid6req, nil) if err = datastore.Commit(uuid6, "deleted forked node 2nd k/v", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %s\n", uuid6, err) } // Should now be able to correctly merge the two branches. goodChild, err := datastore.Merge([]dvid.UUID{uuid4, uuid6}, "merging stuff", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should be able to see just the original uuid4 value of the 2nd k/v childreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, goodChild, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } // Apply the automatic conflict resolution using ordering. payload := fmt.Sprintf(`{"data":["versiontest"],"parents":[%q,%q],"note":"automatic resolved merge"}`, uuid5, uuid4) resolveReq := fmt.Sprintf("%srepo/%s/resolve", server.WebAPIPath, uuid4) returnValue = server.TestHTTP(t, "POST", resolveReq, bytes.NewBufferString(payload)) resolveResp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(returnValue, &resolveResp); err != nil { t.Fatalf("Can't parse return of resolve request: %s\n", string(returnValue)) } // We should now see the uuid5 version of the 2nd k/v in the returned merged node. childreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, resolveResp.Child, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != uuid5val { t.Errorf("Error on auto merged child, key %q: expected %q, got %q\n", key2, uuid5val, string(returnValue)) } // Introduce a child off root but don't add 2nd k/v to it. uuid7, err := datastore.NewVersion(uuid, "2nd child off root", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } if err = datastore.Commit(uuid7, "useless node", []string{"we modified nothing!"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid7, err) } // Now merge the previously merged node with the newly created "blank" child off root. if err = datastore.Commit(goodChild, "this was a good merge", []string{}); err != nil { t.Errorf("Unable to commit node %s: %v\n", goodChild, err) } merge2, err := datastore.Merge([]dvid.UUID{goodChild, uuid7}, "merging a useless path", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } merge3, err := datastore.Merge([]dvid.UUID{uuid7, goodChild}, "merging a useless path in reverse order", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should still be conflict free since 2nd key in left parent path will take precedent over shared 2nd key // in root. This tests our invalidation of ancestors. toughreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, merge2, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", toughreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } toughreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, merge3, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", toughreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } }