// Make sure new labelgraph data have different IDs. func TestNewLabelgraphDifferent(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, dtype, "lg1", config) if err != nil { t.Errorf("Error creating new labelgraph instance 1: %v\n", err) } data1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not labelgraph.Data\n") } dataservice2, err := datastore.NewData(uuid, dtype, "lg2", config) if err != nil { t.Errorf("Error creating new labelgraph instance 2: %v\n", err) } data2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not labelgraph.Data\n") } if data1.InstanceID() == data2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", data1.InstanceID(), data2.InstanceID()) } }
// Make sure new keyvalue data have different IDs. func TestNewKeyvalueDifferent(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, kvtype, "instance1", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kv1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not keyvalue.Data\n") } if kv1.DataName() != "instance1" { t.Errorf("New keyvalue data instance name set incorrectly: %q != %q\n", kv1.DataName(), "instance1") } dataservice2, err := datastore.NewData(uuid, kvtype, "instance2", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kv2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not keyvalue.Data\n") } if kv1.InstanceID() == kv2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", kv1.InstanceID(), kv2.InstanceID()) } }
func TestROICreateAndSerialize(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not roi.Data\n") } if roi1.DataName() != "myroi" { t.Errorf("New roi data instance name set incorrectly: %q != %q\n", roi1.DataName(), "myroi") } config.Set("BlockSize", "15,16,17") dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not roi.Data\n") } if roi1.InstanceID() == roi2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", roi1.InstanceID(), roi2.InstanceID()) } // Test persistence of storage. roi2.MinZ = 13 roi2.MaxZ = 3098 gobBytes, err := roi2.GobEncode() if err != nil { t.Fatalf("Could not Gob encode roi: %v\n", err) } var received Data if err = received.GobDecode(gobBytes); err != nil { t.Fatalf("Could not decode Gob-encoded roi: %v\n", err) } if !roi2.Data.Equals(received.Data) { t.Errorf("ROI base Data has bad roundtrip:\nOriginal:\n%v\nReceived:\n%v\n", *(roi2.Data), *(received.Data)) } if !reflect.DeepEqual(roi2.Properties, received.Properties) { t.Errorf("ROI extended properties has bad roundtrip:\nOriginal:\n%v\nReceived:\n%v\n", roi2.Properties, received.Properties) } }
func repoNewDataHandler(c web.C, w http.ResponseWriter, r *http.Request) { uuid := c.Env["uuid"].(dvid.UUID) config := dvid.NewConfig() if err := config.SetByJSON(r.Body); err != nil { BadRequest(w, r, fmt.Sprintf("Error decoding POSTed JSON config for 'new': %v", err)) return } // Make sure that the passed configuration has data type and instance name. typename, found, err := config.GetString("typename") if !found || err != nil { BadRequest(w, r, "POST on repo endpoint requires specification of valid 'typename'") return } dataname, found, err := config.GetString("dataname") if !found || err != nil { BadRequest(w, r, "POST on repo endpoint requires specification of valid 'dataname'") return } typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { BadRequest(w, r, err) return } _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config) if err != nil { BadRequest(w, r, err) return } w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, "{%q: 'Added %s [%s] to node %s'}", "result", dataname, typename, uuid) }
func TestRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, syntype, "mysynapses", config) if err != nil { t.Fatalf("Error creating new data instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not synapse.Data\n") } // PUT first batch of synapses testJSON, err := json.Marshal(testData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/%s/elements", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // GET synapses back within superset bounding box and make sure all data is there. testResponse(t, testData, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // Test subset GET testResponse(t, expected3, "%snode/%s/%s/elements/5_5_5/126_60_97", server.WebAPIPath, uuid, data.DataName()) // Test Tag 1 tag := Tag("Synapse2") synapse2 := getTag(tag, testData) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // Test Tag 2 tag2 := Tag("Zlt90") zlt90 := getTag(tag2, testData) testResponse(t, zlt90, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag2) // Test move url5 := fmt.Sprintf("%snode/%s/%s/move/127_63_99/127_64_100", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url5, nil) testResponse(t, afterMove, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // --- check tag synapse2 = getTag(tag, afterMove) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // Test delete url6 := fmt.Sprintf("%snode/%s/%s/element/127_64_100", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "DELETE", url6, nil) testResponse(t, afterDelete, "%snode/%s/%s/elements/1000_1000_1000/0_0_0", server.WebAPIPath, uuid, data.DataName()) // --- check tag synapse2 = getTag(tag, afterDelete) testResponse(t, synapse2, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) }
// Creates a new data instance for labelblk func newDataInstance(uuid dvid.UUID, t *testing.T, name dvid.InstanceName) *Data { config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, labelsT, name, config) if err != nil { t.Errorf("Unable to create labelblk instance %q: %v\n", name, err) } labels, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast labels data service into Data\n") } return labels }
func makeGrayscale(uuid dvid.UUID, t *testing.T, name dvid.InstanceName) *imageblk.Data { config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, grayscaleT, name, config) if err != nil { t.Errorf("Unable to create grayscale instance %q: %v\n", name, err) } grayscale, ok := dataservice.(*imageblk.Data) if !ok { t.Errorf("Can't cast data service into imageblk Data\n") } return grayscale }
func TestBasic(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() _, err := datastore.NewData(uuid, dtype, "instance1", config) if err != nil { t.Errorf("Error creating new multichan16 instance: %v\n", err) } }
func TestROIPartition(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, versionID := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) req, err := http.NewRequest("POST", roiRequest, getSpansJSON(testSpans)) if err != nil { t.Errorf("Unsuccessful POST request (%s): %v\n", roiRequest, err) } ctx := datastore.NewVersionedCtx(data, versionID) w := httptest.NewRecorder() data.ServeHTTP(uuid, ctx, w, req) if w.Code != http.StatusOK { t.Errorf("Bad server response roi POST, status %s, for roi %q\n", w.Code, data.DataName()) } // Request the standard subvolume partitioning partitionReq := fmt.Sprintf("%snode/%s/%s/partition?batchsize=5&optimized=true", server.WebAPIPath, uuid, data.DataName()) req, err = http.NewRequest("GET", partitionReq, nil) if err != nil { t.Errorf("Unsuccessful GET request (%s): %v\n", partitionReq, err) } w = httptest.NewRecorder() data.ServeHTTP(uuid, ctx, w, req) if w.Code != http.StatusOK { t.Errorf("Bad server response roi GET, status %s, for roi %q\n", w.Code, data.DataName()) } var subvolJSON, expectedJSON interface{} response := w.Body.Bytes() if err := json.Unmarshal(response, &subvolJSON); err != nil { t.Errorf("Can't unmarshal JSON: %s\n", w.Body.Bytes()) } json.Unmarshal([]byte(expectedPartition), &expectedJSON) if !reflect.DeepEqual(subvolJSON, expectedJSON) { t.Errorf("Error doing optimized subvolume partitioning. Got bad result:\n%s\n", string(response)) } }
// Test added after error in getting two paths to the same ancestor k/v after merge. func TestDiamondGetOnMerge(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "mergetest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "first child", nil) if err != nil { t.Fatalf("Unable to create 1st child off root %s: %v\n", uuid, err) } if err = datastore.Commit(uuid2, "first child", nil); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid3, err := datastore.NewVersion(uuid, "second child", nil) if err != nil { t.Fatalf("Unable to create 2nd child off root %s: %v\n", uuid, err) } if err = datastore.Commit(uuid3, "second child", nil); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid3, err) } child, err := datastore.Merge([]dvid.UUID{uuid2, uuid3}, "merging stuff", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should be able to see just the original uuid value of the k/v childreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, child, data.DataName(), key1) returnValue := server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != value1 { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key1, value1, string(returnValue)) } }
// ForegroundROI creates a new ROI by determining all non-background blocks. func (d *Data) ForegroundROI(req datastore.Request, reply *datastore.Response) error { if d.Values.BytesPerElement() != 1 { return fmt.Errorf("Foreground ROI command only implemented for 1 byte/voxel data!") } // Parse the request var uuidStr, dataName, cmdStr, destName, backgroundStr string req.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &destName, &backgroundStr) // Get the version and repo uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil { return err } // Use existing destination data or a new ROI data. var dest *roi.Data dest, err = roi.GetByUUID(uuid, dvid.InstanceName(destName)) if err != nil { config := dvid.NewConfig() typeservice, err := datastore.TypeServiceByName("roi") if err != nil { return err } dataservice, err := datastore.NewData(uuid, typeservice, dvid.InstanceName(destName), config) if err != nil { return err } var ok bool dest, ok = dataservice.(*roi.Data) if !ok { return fmt.Errorf("Could not create ROI data instance") } } // Asynchronously process the voxels. background, err := dvid.StringToPointNd(backgroundStr, ",") if err != nil { return err } go d.foregroundROI(versionID, dest, background) return nil }
// check subgraph endpoint func TestLabelgraphPostAndDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, dtype, "lg", config) if err != nil { t.Errorf("Error creating new labelgraph instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not labelgraph.Data\n") } // PUT a labelraph subgraphRequest := fmt.Sprintf("%snode/%s/%s/subgraph", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", subgraphRequest, getGraphJSON()) // Get back the labelgraph returnedData := server.TestHTTP(t, "GET", subgraphRequest, nil) retgraph, err := loadGraphJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(retgraph, getTestGraph()) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", getTestGraph(), retgraph) } // Delete the labelgraph _ = server.TestHTTP(t, "DELETE", subgraphRequest, nil) // Subgraph should now be empty returnedData = server.TestHTTP(t, "GET", subgraphRequest, nil) expectedResp := "{\"Transactions\":[],\"Vertices\":[],\"Edges\":[]}" if string(returnedData) != expectedResp { t.Errorf("Bad ROI after ROI delete. Should be %s got: %s\n", expectedResp, string(returnedData)) } }
func TestMultiscale2dRepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Make source uuid, _ := initTestRepo() makeGrayscale(uuid, t, "grayscale") // Make labels and set various properties config := dvid.NewConfig() config.Set("Placeholder", "true") config.Set("Format", "jpg") config.Set("Source", "grayscale") dataservice, err := datastore.NewData(uuid, mstype, "myimagetile", config) if err != nil { t.Errorf("Unable to create imagetile instance: %v\n", err) } msdata, ok := dataservice.(*Data) if !ok { t.Fatalf("Can't cast imagetile data service into imagetile.Data\n") } oldData := *msdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, msdata); err != nil { t.Fatalf("Unable to save repo during imagetile persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUID(uuid, "myimagetile") if err != nil { t.Fatalf("Can't get keyvalue instance from reloaded test db: %v\n", err) } msdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imagetile.Data\n") } if !reflect.DeepEqual(oldData.Properties, msdata2.Properties) { t.Errorf("Expected properties %v, got %v\n", oldData.Properties, msdata2.Properties) } }
func TestROIPostAndDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Get back the ROI returnedData := server.TestHTTP(t, "GET", roiRequest, nil) spans, err := putSpansJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(spans, testSpans) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", testSpans, spans) } // Delete the ROI _ = server.TestHTTP(t, "DELETE", roiRequest, nil) // ROI should now be empty returnedData = server.TestHTTP(t, "GET", roiRequest, nil) if string(returnedData) != "[]" { t.Errorf("Bad ROI after ROI delete. Should be [ ] got: %s\n", string(returnedData)) } }
func TestLabelblkRepoPersistence(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() config.Set("BlockSize", "12,13,14") config.Set("VoxelSize", "1.1,2.8,11") config.Set("VoxelUnits", "microns,millimeters,nanometers") dataservice, err := datastore.NewData(uuid, labelsT, "mylabels", config) if err != nil { t.Errorf("Unable to create labels instance: %v\n", err) } labels, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast labels data service into Data\n") } oldData := *labels // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, labels); err != nil { t.Fatalf("Unable to save repo during labels persistence test: %v\n", err) } tests.CloseReopenStore() dataservice2, err := datastore.GetDataByUUID(uuid, "mylabels") if err != nil { t.Fatalf("Can't get labels instance from reloaded test db: %v\n", err) } labels2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imageblk.Data\n") } if !oldData.Equals(labels2) { t.Errorf("Expected %v, got %v\n", oldData, *labels2) } }
func TestFloat32RepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Make grayscale and set various properties config := dvid.NewConfig() config.Set("BlockSize", "12,13,14") config.Set("VoxelSize", "1.1,2.8,11") config.Set("VoxelUnits", "microns,millimeters,nanometers") dataservice, err := datastore.NewData(uuid, floatimgT, "floatimg", config) if err != nil { t.Errorf("Unable to create float32 instance: %s\n", err) } floatimg, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast float32 data service into Data\n") } oldData := *floatimg // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, floatimg); err != nil { t.Fatalf("Unable to save repo during floatimg persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUIDName(uuid, "floatimg") if err != nil { t.Fatalf("Can't get floatimg instance from reloaded test db: %v\n", err) } floatimg2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not imageblk.Data\n") } if !oldData.Equals(floatimg2) { t.Errorf("Expected %v, got %v\n", oldData, *floatimg2) } }
func TestKeyvalueRoundTrip(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, versionID := initTestRepo() // Add data config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "roundtripper", config) if err != nil { t.Errorf("Error creating new keyvalue instance: %v\n", err) } kvdata, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not keyvalue.Data\n") } ctx := datastore.NewVersionedCtx(dataservice, versionID) keyStr := "testkey.-{}03`~| %@\x01" value := []byte("I like Japan and this is some unicode: \u65e5\u672c\u8a9e") if err = kvdata.PutData(ctx, keyStr, value); err != nil { t.Errorf("Could not put keyvalue data: %v\n", err) } retrieved, found, err := kvdata.GetData(ctx, keyStr) if err != nil { t.Fatalf("Could not get keyvalue data: %v\n", err) } if !found { t.Fatalf("Could not find put keyvalue\n") } if bytes.Compare(value, retrieved) != 0 { t.Errorf("keyvalue retrieved %q != put %q\n", string(retrieved), string(value)) } }
func TestMultichan16RepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, dtype, "mymultichan16", config) if err != nil { t.Errorf("Unable to create multichan16 instance: %v\n", err) } mcdata, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast multichan16 data service into multichan16.Data\n") } oldData := *mcdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, mcdata); err != nil { t.Fatalf("Unable to save repo during multichan16 persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice2, err := datastore.GetDataByUUIDName(uuid, "mymultichan16") if err != nil { t.Fatalf("Can't get multichan16 instance from reloaded test db: %v\n", err) } mcdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not multichan16.Data\n") } if !oldData.Equals(mcdata2) { t.Errorf("Expected %v, got %v\n", oldData, *mcdata2) } }
func TestKeyvalueRepoPersistence(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid, _ := initTestRepo() // Make labels and set various properties config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "mykv", config) if err != nil { t.Errorf("Unable to create keyvalue instance: %v\n", err) } kvdata, ok := dataservice.(*Data) if !ok { t.Errorf("Can't cast keyvalue data service into keyvalue.Data\n") } oldData := *kvdata // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, kvdata); err != nil { t.Fatalf("Unable to save repo during keyvalue persistence test: %v\n", err) } tests.CloseReopenStore() dataservice2, err := datastore.GetDataByUUID(uuid, "mykv") if err != nil { t.Fatalf("Can't get keyvalue instance from reloaded test db: %v\n", err) } kvdata2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not keyvalue.Data\n") } if !oldData.Equals(kvdata2) { t.Errorf("Expected %v, got %v\n", oldData, *kvdata2) } }
func TestTileCheck(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Make source uuid, _ := initTestRepo() makeGrayscale(uuid, t, "grayscale") // Make imagetile and set various properties config := dvid.NewConfig() config.Set("Placeholder", "true") config.Set("Format", "jpg") config.Set("Source", "grayscale") tileservice, err := datastore.NewData(uuid, mstype, "myimagetile", config) if err != nil { t.Errorf("Unable to create imagetile instance: %v\n", err) } msdata, ok := tileservice.(*Data) if !ok { t.Fatalf("Can't cast imagetile data service into imagetile.Data\n") } // Store Metadata url := fmt.Sprintf("%snode/%s/myimagetile/metadata", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, bytes.NewBufferString(testMetadata2)) // Create the ROI _, err = datastore.NewData(uuid, roitype, "myroi", dvid.NewConfig()) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Create fake filter spec := fmt.Sprintf("roi:myroi,%s", uuid) f, err := msdata.NewFilter(storage.FilterSpec(spec)) if err != nil { t.Errorf("Couldn't make filter: %v\n", err) } if f == nil { t.Fatalf("Couldn't detect myroi data instance\n") } // Check various key values for proper spatial checks. var tx, ty int32 tx = (205 * 32) / 512 ty = (101 * 32) / 512 tile := dvid.ChunkPoint3d{tx, ty, 101 * 32} scale := Scaling(0) tkv := &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err := f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if skip { t.Errorf("Expected false skip, got %v for tile %s\n", skip, tile) } tile = dvid.ChunkPoint3d{tx, ty, 106 * 32} tkv = &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err = f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if !skip { t.Errorf("Expected true skip, got %v for tile %s\n", skip, tile) } tx = (205 * 32) / 512 ty = (121 * 32) / 512 tile = dvid.ChunkPoint3d{tx, ty, 101 * 32} tkv = &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err = f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if !skip { t.Errorf("Expected true skip, got %v for tile %s\n", skip, tile) } tx = (225 * 32) / 512 ty = (101 * 32) / 512 tile = dvid.ChunkPoint3d{tx, ty, 101 * 32} tkv = &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err = f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if !skip { t.Errorf("Expected true skip, got %v for tile %s\n", skip, tile) } }
func TestROIRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create the ROI dataservice. uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, roitype, "roi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Errorf("Returned new data instance is not roi.Data\n") } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Get back the ROI returnedData := server.TestHTTP(t, "GET", roiRequest, nil) spans, err := putSpansJSON(returnedData) if err != nil { t.Errorf("Error on getting back JSON from roi GET: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(spans, testSpans) { t.Errorf("Bad PUT/GET ROI roundtrip\nOriginal:\n%s\nReturned:\n%s\n", testSpans, spans) } // Test the ptquery ptqueryRequest := fmt.Sprintf("%snode/%s/%s/ptquery", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "POST", ptqueryRequest, getPointsJSON(testPoints)) inclusions, err := putInclusionJSON(returnedData) if err != nil { t.Fatalf("Error on getting back JSON from ptquery: %v\n", err) } // Make sure the two are the same. if !reflect.DeepEqual(inclusions, expectedInclusions) { t.Errorf("Bad ptquery results\nOriginal:\n%s\nReturned:\n%s\n", expectedInclusions, inclusions) } // Test ROI mask out of range -- should be all 0. maskRequest := fmt.Sprintf("%snode/%s/%s/mask/0_1_2/100_100_100/10_40_70", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "GET", maskRequest, nil) if len(returnedData) != 100*100*100 { t.Errorf("Expected mask volume of %d bytes, got %d bytes instead\n", 100*100*100, len(returnedData)) } for i, value := range returnedData { if value != 0 { t.Errorf("Expected all-zero mask, got %d at index %d\n", value, i) break } } // Test ROI mask within range. maskRequest = fmt.Sprintf("%snode/%s/%s/mask/0_1_2/100_100_100/6350_3232_3200", server.WebAPIPath, uuid, data.DataName()) returnedData = server.TestHTTP(t, "GET", maskRequest, nil) if len(returnedData) != 100*100*100 { t.Errorf("Expected mask volume of %d bytes, got %d bytes instead\n", 100*100*100, len(returnedData)) } // Check first block plane for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y < 64 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } // tuple{100, 103, 201, 212} if x <= 81 && y >= 64 && y < 96 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x > 81 && y >= 64 && y < 96 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } } } // Check second block plane offset := 32 * 100 * 100 // moves to next block in Z for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[offset+y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x <= 81 && y < 32 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x > 81 && y < 32 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 32 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } } } // Check last block plane offset = 96 * 100 * 100 // moves to last ROI layer in Z for y := 0; y < 100; y++ { for x := 0; x < 100; x++ { value := returnedData[offset+y*100+x] if x < 50 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y < 32 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 32 && y < 64 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } if x >= 50 && y >= 64 && y < 96 && value != 1 { t.Errorf("Expected mask to be 1 at (%d, %d) within ROI, got %d instead\n", x, y, value) break } if y >= 96 && value != 0 { t.Errorf("Expected mask to be zero at (%d, %d) before ROI, got %d instead\n", x, y, value) break } } } }
func TestFilter(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) d, err := datastore.NewData(uuid, labelvolT, "bodies", config) if err != nil { t.Fatalf("Unable to create labelvol instance: %s\n", err) } server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of labels -> bodies: %v\n", err) } // Create a ROI that will be used for filter test. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create the filter spec fs := storage.FilterSpec(fmt.Sprintf("roi:myroi,%s", uuid)) var filter storage.Filter filterer, ok := d.(storage.Filterer) if !ok { t.Fatalf("labelvol instance does not implement storage.Filterer\n") } filter, err = filterer.NewFilter(fs) if err != nil { t.Fatalf("Can't create filter from spec %q: %v\n", fs, err) } if filter == nil { t.Fatalf("No filter could be created from spec %q\n", fs) } // Test the filter. tkv := storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{0, 0, 0}.ToIZYXString())} skip, err := filter.Check(&tkv) if !skip { t.Errorf("Expected filter check 1 to skip, instead filter.Check() returned not skip") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{1, 1, 1}.ToIZYXString())} skip, err = filter.Check(&tkv) if skip { t.Errorf("Expected filter check 2 to not skip!") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{2, 1, 2}.ToIZYXString())} skip, err = filter.Check(&tkv) if skip { t.Errorf("Expected filter check 2 to not skip!") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{3, 1, 1}.ToIZYXString())} skip, err = filter.Check(&tkv) if !skip { t.Errorf("Expected filter check 3 to skip!") } }
func TestTagRequests(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, syntype, "mysynapses", config) if err != nil { t.Fatalf("Error creating new data instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not synapse.Data\n") } // PUT first batch of synapses testJSON, err := json.Marshal(testTagData) if err != nil { t.Fatal(err) } url1 := fmt.Sprintf("%snode/%s/%s/elements", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON))) // Test Tags expected := Elements{ { ElementNR{ Pos: dvid.Point3d{15, 27, 35}, // Label 1 Kind: PreSyn, Tags: []Tag{"Synapse1", "Zlt90"}, Prop: map[string]string{ "Im a T-Bar": "yes", "I'm not a PSD": "sure", "i'm really special": "", }, }, []Relationship{{Rel: PreSynTo, To: dvid.Point3d{20, 30, 40}}, {Rel: PreSynTo, To: dvid.Point3d{14, 25, 37}}, {Rel: PreSynTo, To: dvid.Point3d{33, 30, 31}}}, }, { ElementNR{ Pos: dvid.Point3d{21, 33, 40}, // Label 2 Kind: PostSyn, Tags: []Tag{"Synapse1"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag := Tag("Synapse1") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{20, 30, 40}, // Label 2 Kind: PostSyn, Tags: []Tag{"Synapse10"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag = Tag("Synapse10") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{14, 25, 37}, // Label 3 Kind: PostSyn, Tags: []Tag{"Synapse11", "Zlt90"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag = Tag("Synapse11") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{33, 30, 31}, Kind: PostSyn, Tags: []Tag{"Synapse111", "Zlt90"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } tag = Tag("Synapse111") testResponse(t, expected, "%snode/%s/%s/tag/%s?relationships=true", server.WebAPIPath, uuid, data.DataName(), tag) // delete an annotation and check if its deleted in tag delurl := fmt.Sprintf("%snode/%s/%s/element/15_27_35", server.WebAPIPath, uuid, data.DataName()) server.TestHTTP(t, "DELETE", delurl, nil) expected = Elements{ { ElementNR{ Pos: dvid.Point3d{21, 33, 40}, // Label 2 Kind: PostSyn, Tags: []Tag{"Synapse1"}, }, []Relationship{{Rel: PostSynTo, To: dvid.Point3d{15, 27, 35}}}, }, } testResponse(t, expected, "%snode/%s/%s/tag/Synapse1?relationships=true", server.WebAPIPath, uuid, data.DataName()) }
// CreateComposite creates a new rgba8 image by combining hash of labels + the grayscale func (d *Data) CreateComposite(request datastore.Request, reply *datastore.Response) error { timedLog := dvid.NewTimeLog() // Parse the request var uuidStr, dataName, cmdStr, grayscaleName, destName string request.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &grayscaleName, &destName) // Get the version uuid, v, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Log request if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil { return err } // Get the grayscale data. dataservice, err := datastore.GetDataByUUIDName(uuid, dvid.InstanceName(grayscaleName)) if err != nil { return err } grayscale, ok := dataservice.(*imageblk.Data) if !ok { return fmt.Errorf("%s is not the name of uint8 data", grayscaleName) } // Create a new rgba8blk data. var compservice datastore.DataService compservice, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(destName)) if err == nil { return fmt.Errorf("Data instance with name %q already exists", destName) } typeService, err := datastore.TypeServiceByName("rgba8blk") if err != nil { return fmt.Errorf("Could not get rgba8 type service from DVID") } config := dvid.NewConfig() compservice, err = datastore.NewData(uuid, typeService, dvid.InstanceName(destName), config) if err != nil { return err } composite, ok := compservice.(*imageblk.Data) if !ok { return fmt.Errorf("Error: %s was unable to be set to rgba8 data", destName) } // Iterate through all labels and grayscale chunks incrementally in Z, a layer at a time. wg := new(sync.WaitGroup) op := &compositeOp{grayscale, composite, v} chunkOp := &storage.ChunkOp{op, wg} store, err := d.GetOrderedKeyValueDB() if err != nil { return err } ctx := datastore.NewVersionedCtx(d, v) extents := d.Extents() blockBeg := imageblk.NewTKey(extents.MinIndex) blockEnd := imageblk.NewTKey(extents.MaxIndex) err = store.ProcessRange(ctx, blockBeg, blockEnd, chunkOp, storage.ChunkFunc(d.CreateCompositeChunk)) wg.Wait() // Set new mapped data to same extents. composite.Properties.Extents = grayscale.Properties.Extents if err := datastore.SaveDataByUUID(uuid, composite); err != nil { dvid.Infof("Could not save new data '%s': %v\n", destName, err) } timedLog.Infof("Created composite of %s and %s", grayscaleName, destName) return nil }
func testRequest(t *testing.T, uuid dvid.UUID, versionID dvid.VersionID, name dvid.InstanceName) { config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, name, config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Get back k/v returnValue := server.TestHTTP(t, "GET", key1req, nil) if string(returnValue) != value1 { t.Errorf("Error on key %q: expected %s, got %s\n", key1, value1, string(returnValue)) } // Expect error if no key used. badrequest := fmt.Sprintf("%snode/%s/%s/key/", server.WebAPIPath, uuid, data.DataName()) server.TestBadHTTP(t, "GET", badrequest, nil) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Add 3rd k/v key3 := "heresanotherkey" value3 := "my 3rd value" key3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key3) server.TestHTTP(t, "POST", key3req, strings.NewReader(value3)) // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key2, key1, string(returnValue)) } // Check return of all keys allkeyreq := fmt.Sprintf("%snode/%s/%s/keys", server.WebAPIPath, uuid, data.DataName()) returnValue = server.TestHTTP(t, "GET", allkeyreq, nil) if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 3 || retrievedKeys[0] != "heresanotherkey" && retrievedKeys[1] != "my2ndKey" && retrievedKeys[2] != "mykey" { t.Errorf("Bad all key request return. Expected: [%q,%q,%q]. Got: %s\n", key3, key2, key1, string(returnValue)) } }
func TestKeyvalueUnversioned(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() config.Set("versioned", "false") dataservice, err := datastore.NewData(uuid, kvtype, "unversiontest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Create a new version in repo if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } _, err = datastore.VersionFromUUID(uuid2) if err != nil { t.Fatalf("Unable to get version ID from new uuid %s: %v\n", uuid2, err) } // Change the 2nd k/v uuid2val := "this is completely different" uuid2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid2, data.DataName(), key2) server.TestHTTP(t, "POST", uuid2req, strings.NewReader(uuid2val)) // Now the first version value should equal the new value returnValue := server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on unversioned key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Get the second version value returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on unversioned key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key1, key2, string(returnValue)) } // Commit the repo if err = datastore.Commit(uuid2, "my 2nd commit msg", []string{"changed 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } // Make grandchild of root uuid3, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Delete the 2nd k/v uuid3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid3, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid3req, nil) server.TestBadHTTP(t, "GET", uuid3req, nil) // Make sure the 2nd k/v is now missing for previous versions. server.TestBadHTTP(t, "GET", key2req, nil) server.TestBadHTTP(t, "GET", uuid2req, nil) // Make a child if err = datastore.Commit(uuid3, "my 3rd commit msg", []string{"deleted 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid4, err := datastore.NewVersion(uuid3, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid3, err) } // Change the 2nd k/v uuid4val := "we are reintroducing this k/v" uuid4req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid4, data.DataName(), key2) server.TestHTTP(t, "POST", uuid4req, strings.NewReader(uuid4val)) if err = datastore.Commit(uuid4, "commit node 4", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid4, err) } // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid3req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on third version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid4req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on fourth version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } }
func TestKeyvalueVersioning(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() config := dvid.NewConfig() dataservice, err := datastore.NewData(uuid, kvtype, "versiontest", config) if err != nil { t.Fatalf("Error creating new keyvalue instance: %v\n", err) } data, ok := dataservice.(*Data) if !ok { t.Fatalf("Returned new data instance is not roi.Data\n") } // PUT a value key1 := "mykey" value1 := "some stuff" key1req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key1) server.TestHTTP(t, "POST", key1req, strings.NewReader(value1)) // Add 2nd k/v key2 := "my2ndkey" value2 := "more good stuff" key2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid, data.DataName(), key2) server.TestHTTP(t, "POST", key2req, strings.NewReader(value2)) // Create a new version in repo if err = datastore.Commit(uuid, "my commit msg", []string{"stuff one", "stuff two"}); err != nil { t.Errorf("Unable to lock root node %s: %v\n", uuid, err) } uuid2, err := datastore.NewVersion(uuid, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } _, err = datastore.VersionFromUUID(uuid2) if err != nil { t.Fatalf("Unable to get version ID from new uuid %s: %v\n", uuid2, err) } // Change the 2nd k/v uuid2val := "this is completely different" uuid2req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid2, data.DataName(), key2) server.TestHTTP(t, "POST", uuid2req, strings.NewReader(uuid2val)) // Get the first version value returnValue := server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } // Get the second version value returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Check return of first two keys in range. rangereq := fmt.Sprintf("%snode/%s/%s/keyrange/%s/%s", server.WebAPIPath, uuid, data.DataName(), "my", "zebra") returnValue = server.TestHTTP(t, "GET", rangereq, nil) var retrievedKeys []string if err = json.Unmarshal(returnValue, &retrievedKeys); err != nil { t.Errorf("Bad key range request unmarshal: %v\n", err) } if len(retrievedKeys) != 2 || retrievedKeys[1] != "mykey" && retrievedKeys[0] != "my2ndKey" { t.Errorf("Bad key range request return. Expected: [%q,%q]. Got: %s\n", key1, key2, string(returnValue)) } // Commit the repo if err = datastore.Commit(uuid2, "my 2nd commit msg", []string{"changed 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } // Make grandchild of root uuid3, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Delete the 2nd k/v uuid3req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid3, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid3req, nil) server.TestBadHTTP(t, "GET", uuid3req, nil) // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } // Make a child if err = datastore.Commit(uuid3, "my 3rd commit msg", []string{"deleted 2nd k/v"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid2, err) } uuid4, err := datastore.NewVersion(uuid3, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid3, err) } // Change the 2nd k/v uuid4val := "we are reintroducing this k/v" uuid4req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid4, data.DataName(), key2) server.TestHTTP(t, "POST", uuid4req, strings.NewReader(uuid4val)) if err = datastore.Commit(uuid4, "commit node 4", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid4, err) } // Make sure the 2nd k/v is correct for each of previous versions. returnValue = server.TestHTTP(t, "GET", key2req, nil) if string(returnValue) != value2 { t.Errorf("Error on first version, key %q: expected %s, got %s\n", key2, value2, string(returnValue)) } returnValue = server.TestHTTP(t, "GET", uuid2req, nil) if string(returnValue) != uuid2val { t.Errorf("Error on second version, key %q: expected %s, got %s\n", key2, uuid2val, string(returnValue)) } server.TestBadHTTP(t, "GET", uuid3req, nil) returnValue = server.TestHTTP(t, "GET", uuid4req, nil) if string(returnValue) != uuid4val { t.Errorf("Error on fourth version, key %q: expected %s, got %s\n", key2, uuid4val, string(returnValue)) } // Let's try a merge! // Make a child off the 2nd version from root. uuid5, err := datastore.NewVersion(uuid2, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid2, err) } // Store new stuff in 2nd k/v uuid5val := "this is forked value" uuid5req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid5, data.DataName(), key2) server.TestHTTP(t, "POST", uuid5req, strings.NewReader(uuid5val)) returnValue = server.TestHTTP(t, "GET", uuid5req, nil) if string(returnValue) != uuid5val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid5val, string(returnValue)) } // Commit node if err = datastore.Commit(uuid5, "forked node", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid5, err) } // Should be able to merge using conflict-free (disjoint at key level) merge even though // its conflicted. Will get lazy error on request. badChild, err := datastore.Merge([]dvid.UUID{uuid4, uuid5}, "some child", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } childreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, badChild, data.DataName(), key2) server.TestBadHTTP(t, "GET", childreq, nil) // Manually fix conflict: Branch, and then delete 2nd k/v and commit. uuid6, err := datastore.NewVersion(uuid5, "some child", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid5, err) } uuid6req := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, uuid6, data.DataName(), key2) server.TestHTTP(t, "DELETE", uuid6req, nil) server.TestBadHTTP(t, "GET", uuid6req, nil) if err = datastore.Commit(uuid6, "deleted forked node 2nd k/v", []string{"we modified stuff"}); err != nil { t.Errorf("Unable to commit node %s: %s\n", uuid6, err) } // Should now be able to correctly merge the two branches. goodChild, err := datastore.Merge([]dvid.UUID{uuid4, uuid6}, "merging stuff", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should be able to see just the original uuid4 value of the 2nd k/v childreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, goodChild, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } // Apply the automatic conflict resolution using ordering. payload := fmt.Sprintf(`{"data":["versiontest"],"parents":[%q,%q],"note":"automatic resolved merge"}`, uuid5, uuid4) resolveReq := fmt.Sprintf("%srepo/%s/resolve", server.WebAPIPath, uuid4) returnValue = server.TestHTTP(t, "POST", resolveReq, bytes.NewBufferString(payload)) resolveResp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(returnValue, &resolveResp); err != nil { t.Fatalf("Can't parse return of resolve request: %s\n", string(returnValue)) } // We should now see the uuid5 version of the 2nd k/v in the returned merged node. childreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, resolveResp.Child, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", childreq, nil) if string(returnValue) != uuid5val { t.Errorf("Error on auto merged child, key %q: expected %q, got %q\n", key2, uuid5val, string(returnValue)) } // Introduce a child off root but don't add 2nd k/v to it. uuid7, err := datastore.NewVersion(uuid, "2nd child off root", nil) if err != nil { t.Fatalf("Unable to create new version off node %s: %v\n", uuid, err) } if err = datastore.Commit(uuid7, "useless node", []string{"we modified nothing!"}); err != nil { t.Errorf("Unable to commit node %s: %v\n", uuid7, err) } // Now merge the previously merged node with the newly created "blank" child off root. if err = datastore.Commit(goodChild, "this was a good merge", []string{}); err != nil { t.Errorf("Unable to commit node %s: %v\n", goodChild, err) } merge2, err := datastore.Merge([]dvid.UUID{goodChild, uuid7}, "merging a useless path", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } merge3, err := datastore.Merge([]dvid.UUID{uuid7, goodChild}, "merging a useless path in reverse order", datastore.MergeConflictFree) if err != nil { t.Errorf("Error doing merge: %v\n", err) } // We should still be conflict free since 2nd key in left parent path will take precedent over shared 2nd key // in root. This tests our invalidation of ancestors. toughreq := fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, merge2, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", toughreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } toughreq = fmt.Sprintf("%snode/%s/%s/key/%s", server.WebAPIPath, merge3, data.DataName(), key2) returnValue = server.TestHTTP(t, "GET", toughreq, nil) if string(returnValue) != uuid4val { t.Errorf("Error on merged child, key %q: expected %q, got %q\n", key2, uuid4val, string(returnValue)) } }
// Do acts as a switchboard for remote command execution func (c *RPCConnection) Do(cmd datastore.Request, reply *datastore.Response) error { if reply == nil { dvid.Debugf("reply is nil coming in!\n") return nil } if cmd.Name() == "" { return fmt.Errorf("Server error: got empty command!") } switch cmd.Name() { case "help": reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress()) case "shutdown": Shutdown() // Make this process shutdown in a second to allow time for RPC to finish. // TODO -- Better way to do this? log.Printf("DVID server halted due to 'shutdown' command.") reply.Text = fmt.Sprintf("DVID server at %s has been halted.\n", config.RPCAddress()) go func() { time.Sleep(1 * time.Second) os.Exit(0) }() case "types": if len(cmd.Command) == 1 { text := "\nData Types within this DVID Server\n" text += "----------------------------------\n" mapTypes, err := datastore.Types() if err != nil { return fmt.Errorf("Error trying to retrieve data types within this DVID server!") } for url, typeservice := range mapTypes { text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url) } reply.Text = text } else { if len(cmd.Command) != 3 || cmd.Command[2] != "help" { return fmt.Errorf("Unknown types command: %q", cmd.Command) } var typename string cmd.CommandArgs(1, &typename) typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { return err } reply.Text = typeservice.Help() } case "repos": var subcommand, alias, description, uuidStr string cmd.CommandArgs(1, &subcommand, &alias, &description, &uuidStr) switch subcommand { case "new": var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } root, err := datastore.NewRepo(alias, description, assign) if err != nil { return err } if err := datastore.SetRepoAlias(root, alias); err != nil { return err } if err := datastore.SetRepoDescription(root, description); err != nil { return err } reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root) default: return fmt.Errorf("Unknown repos command: %q", subcommand) } case "repo": var uuidStr, subcommand string cmd.CommandArgs(1, &uuidStr, &subcommand) uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } switch subcommand { case "new": var typename, dataname string cmd.CommandArgs(3, &typename, &dataname) // Get TypeService typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { return err } // Create new data config := cmd.Settings() _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config) if err != nil { return err } reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "branch": cmd.CommandArgs(3, &uuidStr) var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } child, err := datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign) if err != nil { return err } reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "merge": uuids := cmd.CommandArgs(2) parents := make([]dvid.UUID, len(uuids)+1) parents[0] = dvid.UUID(uuid) i := 1 for uuid := range uuids { parents[i] = dvid.UUID(uuid) i++ } child, err := datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree) if err != nil { return err } reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "push": /* var target string cmd.CommandArgs(3, &target) config := cmd.Settings() if err = datastore.Push(repo, target, config); err != nil { return err } reply.Text = fmt.Sprintf("Repo %q pushed to %q\n", repo.RootUUID(), target) */ return fmt.Errorf("push command has been temporarily suspended") default: return fmt.Errorf("Unknown command: %q", cmd) } case "node": var uuidStr, descriptor string cmd.CommandArgs(1, &uuidStr, &descriptor) uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Get the DataService dataname := dvid.InstanceName(descriptor) var subcommand string cmd.CommandArgs(3, &subcommand) dataservice, err := datastore.GetDataByUUID(uuid, dataname) if err != nil { return err } if subcommand == "help" { reply.Text = dataservice.Help() return nil } return dataservice.DoRPC(cmd, reply) default: return fmt.Errorf("Unknown command: '%s'", cmd) } return nil }
func TestROIRepoPersistence(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := initTestRepo() // Add data config := dvid.NewConfig() dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi1, ok := dataservice1.(*Data) if !ok { t.Errorf("Returned new data instance 1 is not roi.Data\n") } if roi1.DataName() != "myroi" { t.Errorf("New roi data instance name set incorrectly: %q != %q\n", roi1.DataName(), "myroi") } config.Set("BlockSize", "15,16,17") dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } roi2, ok := dataservice2.(*Data) if !ok { t.Errorf("Returned new data instance 2 is not roi.Data\n") } roi2.MinZ = 13 roi2.MaxZ = 3098 oldData := *roi2 // Check instance IDs if roi1.InstanceID() == roi2.InstanceID() { t.Errorf("Instance IDs should be different: %d == %d\n", roi1.InstanceID(), roi2.InstanceID()) } // Restart test datastore and see if datasets are still there. if err = datastore.SaveDataByUUID(uuid, dataservice1); err != nil { t.Fatalf("Unable to save data1 during ROI persistence test: %v\n", err) } if err = datastore.SaveDataByUUID(uuid, dataservice2); err != nil { t.Fatalf("Unable to save data2 during ROI persistence test: %v\n", err) } datastore.CloseReopenTest() dataservice3, err := datastore.GetDataByUUID(uuid, "myroi2") if err != nil { t.Fatalf("Can't get first ROI instance from reloaded test db: %v\n", err) } roi2new, ok := dataservice3.(*Data) if !ok { t.Errorf("Returned new data instance 3 is not roi.Data\n") } if !oldData.Equals(roi2new) { t.Errorf("Expected %v, got %v\n", oldData, *roi2new) } }
// switchboard for remote command execution func handleCommand(cmd *datastore.Request) (reply *datastore.Response, err error) { if cmd.Name() == "" { err = fmt.Errorf("Server error: got empty command!") return } reply = new(datastore.Response) switch cmd.Name() { case "help": reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress()) case "shutdown": dvid.Infof("DVID server halting due to 'shutdown' command.") reply.Text = fmt.Sprintf("DVID server at %s is being shutdown...\n", config.RPCAddress()) // launch goroutine shutdown so we can concurrently return shutdown message to client. go Shutdown() case "types": if len(cmd.Command) == 1 { text := "\nData Types within this DVID Server\n" text += "----------------------------------\n" var mapTypes map[dvid.URLString]datastore.TypeService if mapTypes, err = datastore.Types(); err != nil { err = fmt.Errorf("Error trying to retrieve data types within this DVID server!") return } for url, typeservice := range mapTypes { text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url) } reply.Text = text } else { if len(cmd.Command) != 3 || cmd.Command[2] != "help" { err = fmt.Errorf("Unknown types command: %q", cmd.Command) return } var typename string var typeservice datastore.TypeService cmd.CommandArgs(1, &typename) if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil { return } reply.Text = typeservice.Help() } case "repos": var subcommand string cmd.CommandArgs(1, &subcommand) switch subcommand { case "new": var alias, description string cmd.CommandArgs(2, &alias, &description) config := cmd.Settings() var uuidStr, passcode string var found bool if uuidStr, found, err = config.GetString("uuid"); err != nil { return } var assign *dvid.UUID if !found { assign = nil } else { uuid := dvid.UUID(uuidStr) assign = &uuid } if passcode, found, err = config.GetString("passcode"); err != nil { return } var root dvid.UUID root, err = datastore.NewRepo(alias, description, assign, passcode) if err != nil { return } if err = datastore.SetRepoAlias(root, alias); err != nil { return } if err = datastore.SetRepoDescription(root, description); err != nil { return } reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root) case "delete": var uuidStr, passcode string cmd.CommandArgs(2, &uuidStr, &passcode) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } if err = datastore.DeleteRepo(uuid, passcode); err != nil { return } reply.Text = fmt.Sprintf("Started deletion of repo %s.\n", uuid) default: err = fmt.Errorf("Unknown repos command: %q", subcommand) return } case "repo": var uuidStr, subcommand string cmd.CommandArgs(1, &uuidStr, &subcommand) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } switch subcommand { case "new": var typename, dataname string cmd.CommandArgs(3, &typename, &dataname) // Get TypeService var typeservice datastore.TypeService if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil { return } // Create new data config := cmd.Settings() if _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config); err != nil { return } reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "rename": var name1, name2, passcode string cmd.CommandArgs(3, &name1, &name2, &passcode) oldname := dvid.InstanceName(name1) newname := dvid.InstanceName(name2) // Make sure this instance exists. if _, err = datastore.GetDataByUUIDName(uuid, oldname); err != nil { err = fmt.Errorf("Error trying to rename %q for UUID %s: %v", oldname, uuid, err) return } // Do the rename. if err = datastore.RenameData(uuid, oldname, newname, passcode); err != nil { err = fmt.Errorf("Error renaming data instance %q to %q: %v", oldname, newname, err) return } reply.Text = fmt.Sprintf("Renamed data instance %q to %q from DAG subgraph @ root %s\n", oldname, newname, uuid) case "branch": cmd.CommandArgs(3, &uuidStr) var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } var child dvid.UUID if child, err = datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign); err != nil { return } reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "merge": uuids := cmd.CommandArgs(2) parents := make([]dvid.UUID, len(uuids)+1) parents[0] = dvid.UUID(uuid) i := 1 for uuid := range uuids { parents[i] = dvid.UUID(uuid) i++ } var child dvid.UUID child, err = datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree) if err != nil { return } reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "migrate": var source, oldStoreName string cmd.CommandArgs(3, &source, &oldStoreName) var store dvid.Store store, err = storage.GetStoreByAlias(storage.Alias(oldStoreName)) if err != nil { return } config := cmd.Settings() go func() { if err = datastore.MigrateInstance(uuid, dvid.InstanceName(source), store, config); err != nil { dvid.Errorf("migrate error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started migration of uuid %s data instance %q from old store %q...\n", uuid, source, oldStoreName) case "copy": var source, target string cmd.CommandArgs(3, &source, &target) config := cmd.Settings() go func() { if err = datastore.CopyInstance(uuid, dvid.InstanceName(source), dvid.InstanceName(target), config); err != nil { dvid.Errorf("copy error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started copy of uuid %s data instance %q to %q...\n", uuid, source, target) case "push": var target string cmd.CommandArgs(3, &target) config := cmd.Settings() go func() { if err = datastore.PushRepo(uuid, target, config); err != nil { dvid.Errorf("push error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started push of repo %s to %q...\n", uuid, target) /* case "pull": var target string cmd.CommandArgs(3, &target) config := cmd.Settings() if err = datastore.Pull(uuid, target, config); err != nil { return } reply.Text = fmt.Sprintf("Repo %s pulled from %q\n", uuid, target) */ case "delete": var dataname, passcode string cmd.CommandArgs(3, &dataname, &passcode) // Make sure this instance exists. if _, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(dataname)); err != nil { err = fmt.Errorf("Error trying to delete %q for UUID %s: %v", dataname, uuid, err) return } // Do the deletion. Under hood, modifies metadata immediately and launches async k/v deletion. if err = datastore.DeleteDataByName(uuid, dvid.InstanceName(dataname), passcode); err != nil { err = fmt.Errorf("Error deleting data instance %q: %v", dataname, err) return } reply.Text = fmt.Sprintf("Started deletion of data instance %q from repo with root %s\n", dataname, uuid) default: err = fmt.Errorf("Unknown command: %q", cmd) return } case "node": var uuidStr, descriptor string cmd.CommandArgs(1, &uuidStr, &descriptor) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } // Get the DataService dataname := dvid.InstanceName(descriptor) var subcommand string cmd.CommandArgs(3, &subcommand) var dataservice datastore.DataService if dataservice, err = datastore.GetDataByUUIDName(uuid, dataname); err != nil { return } if subcommand == "help" { reply.Text = dataservice.Help() return } err = dataservice.DoRPC(*cmd, reply) return default: // Check to see if it's a name of a compiled data type, in which case we refer it to the data type. types := datastore.CompiledTypes() for name, typeservice := range types { if name == dvid.TypeString(cmd.Argument(0)) { err = typeservice.Do(*cmd, reply) return } } err = fmt.Errorf("Unknown command: '%s'", *cmd) } return }