func TestCommitBranchMerge(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid := createRepo(t) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", WebAPIPath, uuid) TestBadHTTP(t, "POST", branchReq, nil) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr := fmt.Sprintf("%snode/%s/commit", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to log. payload = bytes.NewBufferString(`{"log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestBadHTTP(t, "POST", apiStr, payload) // Should be able to create branch now that we've committed parent. respData := TestHTTP(t, "POST", branchReq, nil) resp := struct { Child string `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent1 := dvid.UUID(resp.Child) // Create a sibling. respData = TestHTTP(t, "POST", branchReq, nil) if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent2 := dvid.UUID(resp.Child) // Commit both parents payload = bytes.NewBufferString(`{"note": "This is first parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) payload = bytes.NewBufferString(`{"note": "This is second parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent2) TestHTTP(t, "POST", apiStr, payload) // Merge the two disjoint branches. mergeJSON := fmt.Sprintf(`{"mergeType": "conflict-free", "note": "This is my merged node", "parents": [%q, %q]}`, parent1[:7], parent2) payload = bytes.NewBufferString(mergeJSON) apiStr = fmt.Sprintf("%srepo/%s/merge", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) }
func TestDataGobEncoding(t *testing.T) { compression, _ := dvid.NewCompression(dvid.LZ4, dvid.DefaultCompression) data := &TestData{&Data{ typename: "testtype", typeurl: "foo.bar.baz/testtype", typeversion: "1.0", id: dvid.InstanceID(13), name: "my fabulous data", rootUUID: dvid.UUID("42"), dataUUID: dvid.NewUUID(), compression: compression, checksum: dvid.DefaultChecksum, syncData: dvid.UUIDSet{"moo": struct{}{}, "bar": struct{}{}, "baz": struct{}{}}, }} encoding, err := data.GobEncode() if err != nil { t.Fatalf("Couldn't Gob encode test data: %v\n", err) } data2 := &TestData{new(Data)} if err = data2.GobDecode(encoding); err != nil { t.Fatalf("Couldn't Gob decode test data: %v\n", err) } if !reflect.DeepEqual(data, data2) { t.Errorf("Bad Gob roundtrip:\nOriginal: %v\nReturned: %v\n", data, data2) } }
// Lookup returns a mounted UUID Node. func (MountDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { uuid := dvid.UUID(name) // We expect fully formed UUID, not partial. mount, found := fuseServer.mounts[uuid] if found { return &VersionDir{mount}, nil } return nil, fuse.ENOENT }
func TestVoxelsInstanceCreation(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid := dvid.UUID(server.NewTestRepo(t)) // Create new voxels instance with optional parameters name := "grayscale" metadata := fmt.Sprintf(`{ "typename": "uint8blk", "dataname": %q, "blocksize": "64,43,28", "VoxelSize": "13.1, 14.2, 15.3", "VoxelUnits": "picometers,nanometers,microns" }`, name) apiStr := fmt.Sprintf("%srepo/%s/instance", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(metadata)) // Get metadata and make sure optional settings have been set. apiStr = fmt.Sprintf("%snode/%s/%s/info", server.WebAPIPath, uuid, name) result := server.TestHTTP(t, "GET", apiStr, nil) var parsed = struct { Base struct { TypeName, Name string } Extended struct { BlockSize dvid.Point3d VoxelSize dvid.NdFloat32 VoxelUnits dvid.NdString } }{} if err := json.Unmarshal(result, &parsed); err != nil { t.Fatalf("Error parsing JSON response of new instance metadata: %v\n", err) } if parsed.Base.Name != name { t.Errorf("Parsed new instance has unexpected name: %s != %s (expected)\n", parsed.Base.Name, name) } if parsed.Base.TypeName != "uint8blk" { t.Errorf("Parsed new instance has unexpected type name: %s != uint8blk (expected)\n", parsed.Base.TypeName) } if !parsed.Extended.BlockSize.Equals(dvid.Point3d{64, 43, 28}) { t.Errorf("Bad block size in new uint8blk instance: %s\n", parsed.Extended.BlockSize) } if !parsed.Extended.VoxelSize.Equals(dvid.NdFloat32{13.1, 14.2, 15.3}) { t.Errorf("Bad voxel size in new uint8blk instance: %s\n", parsed.Extended.VoxelSize) } if parsed.Extended.VoxelUnits[0] != "picometers" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } if parsed.Extended.VoxelUnits[1] != "nanometers" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } if parsed.Extended.VoxelUnits[2] != "microns" { t.Errorf("Got %q for X voxel units, not picometers\n", parsed.Extended.VoxelUnits[0]) } }
func setupGroupcache(config GroupcacheConfig) error { if config.GB == 0 { return nil } var cacheBytes int64 cacheBytes = int64(config.GB) << 30 pool := groupcache.NewHTTPPool(config.Host) if pool != nil { dvid.Infof("Initializing groupcache with %d GB at %s...\n", config.GB, config.Host) manager.gcache.cache = groupcache.NewGroup("immutable", cacheBytes, groupcache.GetterFunc( func(c groupcache.Context, key string, dest groupcache.Sink) error { // Use KeyValueDB defined as context. gctx, ok := c.(GroupcacheCtx) if !ok { return fmt.Errorf("bad groupcache context: expected GroupcacheCtx, got %v", c) } // First four bytes of key is instance ID to isolate groupcache collisions. tk := TKey(key[4:]) data, err := gctx.KeyValueDB.Get(gctx.Context, tk) if err != nil { return err } return dest.SetBytes(data) })) manager.gcache.supported = make(map[dvid.DataSpecifier]struct{}) for _, dataspec := range config.Instances { name := strings.Trim(dataspec, "\"") parts := strings.Split(name, ":") switch len(parts) { case 2: dataid := dvid.GetDataSpecifier(dvid.InstanceName(parts[0]), dvid.UUID(parts[1])) manager.gcache.supported[dataid] = struct{}{} default: dvid.Errorf("bad data instance specification %q given for groupcache support in config file\n", dataspec) } } // If we have additional peers, add them and start a listener via the HTTP port. if len(config.Peers) > 0 { peers := []string{config.Host} peers = append(peers, config.Peers...) pool.Set(peers...) dvid.Infof("Groupcache configuration has %d peers in addition to local host.\n", len(config.Peers)) dvid.Infof("Starting groupcache HTTP server on %s\n", config.Host) http.ListenAndServe(config.Host, http.HandlerFunc(pool.ServeHTTP)) } } return nil }
func TestCommitAndBranch(t *testing.T) { tests.UseStore() defer tests.CloseStore() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", branchReq, nil) // Add a keyvalue instance. server.CreateTestInstance(t, uuid, "keyvalue", "mykv", dvid.Config{}) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/commit", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to keyvalue.. keyReq := fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // Should be able to create branch now that we've committed parent. respData := server.TestHTTP(t, "POST", branchReq, nil) resp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } // We should be able to write to that keyvalue now in the child. keyReq = fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, resp.Child) server.TestHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) }
func parseConfig(config dvid.StoreConfig) (path string, timeout time.Duration, owner string, collection dvid.UUID, err error) { c := config.GetAll() v, found := c["path"] if !found { err = fmt.Errorf("%q must be specified for kvautobus configuration", "path") return } var ok bool path, ok = v.(string) if !ok { err = fmt.Errorf("%q setting must be a string (%v)", "path", v) return } v, found = c["timeout"] if found { t, ok := v.(int64) if !ok { err = fmt.Errorf("%q setting must be an int64 for # seconds, not %s (%v)", "timeout", reflect.TypeOf(v), v) return } if t != 0 { timeout = time.Duration(t) * time.Second } } v, found = c["collection"] if !found { err = fmt.Errorf("kvautobus store must have collection specification for billing.") return } cstr, ok := v.(string) if !ok { err = fmt.Errorf("%q setting must be a string, not %s (%v)", "collection", reflect.TypeOf(v), v) return } collection = dvid.UUID(cstr) v, found = c["owner"] if !found { err = fmt.Errorf("kvautobus store must have owner specification for billing.") return } owner, ok = v.(string) if !ok { err = fmt.Errorf("%q setting must be a string, not %s (%v)", "owner", reflect.TypeOf(v), v) return } return }
func TestRepoGobEncoding(t *testing.T) { uuid := dvid.UUID("19b87f38f873481b9f3ac688877dff0d") versionID := dvid.VersionID(23) repoID := dvid.RepoID(13) repo := newRepo(uuid, versionID, repoID, "foobar") repo.alias = "just some alias" repo.log = []string{ "Did this", "Then that", "And the other thing", } repo.properties = map[string]interface{}{ "foo": 42, "bar": "some string", "baz": []int{3, 9, 7}, } encoding, err := repo.GobEncode() if err != nil { t.Fatalf("Could not encode repo: %v\n", err) } received := repoT{} if err = received.GobDecode(encoding); err != nil { t.Fatalf("Could not decode repo: %v\n", err) } // Did we serialize OK repo.dag = nil received.dag = nil if len(received.properties) != 3 { t.Errorf("Repo Gob messed up properties: %v\n", received.properties) } foo, ok := received.properties["foo"] if !ok || foo != 42 { t.Errorf("Repo Gob messed up properties: %v\n", received.properties) } bar, ok := received.properties["bar"] if !ok || bar != "some string" { t.Errorf("Repo Gob messed up properties: %v\n", received.properties) } baz, ok := received.properties["baz"] if !ok || !reflect.DeepEqual(baz, []int{3, 9, 7}) { t.Errorf("Repo Gob messed up properties: %v\n", received.properties) } repo.properties = nil received.properties = nil if !reflect.DeepEqual(*repo, received) { t.Fatalf("Repo Gob messed up:\nOriginal: %v\nReceived: %v\n", *repo, received) } }
func TestUUIDAssignment(t *testing.T) { OpenTest() defer CloseTest() uuidStr1 := "de305d5475b4431badb2eb6b9e546014" myuuid := dvid.UUID(uuidStr1) root, err := NewRepo("test repo", "test repo description", &myuuid, "") if err != nil { t.Fatal(err) } if root != myuuid { t.Errorf("Assigned root UUID %q != created root UUID %q\n", myuuid, root) } // Check if branches can also have assigned UUIDs if err := Commit(root, "root node", nil); err != nil { t.Fatal(err) } uuidStr2 := "8fa05d5475b4431badb2eb6b9e0123014" myuuid2 := dvid.UUID(uuidStr2) child, err := NewVersion(myuuid, "note describing uuid2", &myuuid2) if err != nil { t.Fatal(err) } if child != myuuid2 { t.Errorf("Assigned child UUID %q != created child UUID %q\n", myuuid2, child) } // Make sure we can lookup assigned UUIDs uuid, _, err := MatchingUUID(uuidStr1[:10]) if err != nil { t.Errorf("Error matching UUID fragment %s: %v\n", uuidStr1[:10], err) } if uuid != myuuid { t.Errorf("Error getting back correct UUID %s from %s\n", myuuid, uuid) } }
func TestReloadMetadata(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid, _ := datastore.NewTestRepo() // Add data instances var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "roi", "someroi", config) // Reload the metadata apiStr := fmt.Sprintf("%sserver/reload-metadata", server.WebAPIPath) server.TestHTTP(t, "POST", apiStr, nil) // Make sure repo UUID still there jsonStr, err := datastore.MarshalJSON() if err != nil { t.Fatalf("can't get repos JSON: %v\n", err) } var jsonResp map[string](map[string]interface{}) if err := json.Unmarshal(jsonStr, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repos info response: %s\n", jsonStr) } if len(jsonResp) != 1 { t.Errorf("reloaded repos had more than one repo: %v\n", jsonResp) } for k := range jsonResp { if dvid.UUID(k) != uuid { t.Fatalf("Expected uuid %s, got %s. Full JSON:\n%v\n", uuid, k, jsonResp) } } // Make sure the data instances are still there. _, err = datastore.GetDataByUUIDName(uuid, "foo") if err != nil { t.Errorf("Couldn't get keyvalue data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "labels") if err != nil { t.Errorf("Couldn't get labelblk data instance after reload\n") } _, err = datastore.GetDataByUUIDName(uuid, "someroi") if err != nil { t.Errorf("Couldn't get roi data instance after reload\n") } }
func createRepo(t *testing.T) dvid.UUID { apiStr := fmt.Sprintf("%srepos", WebAPIPath) r := TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuid, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } return dvid.UUID(uuid) }
func TestDeleteInstance(t *testing.T) { tests.UseStore() defer tests.CloseStore() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Add a data instance. var config dvid.Config server.CreateTestInstance(t, uuid, "keyvalue", "foo", config) // Make sure it exists. _, err := datastore.GetDataByUUID(uuid, "foo") if err != nil { t.Errorf("Couldn't create data instance 'foo'\n") } // Shouldn't be able to delete instance without "imsure" delReq := fmt.Sprintf("%srepo/%s/%s", server.WebAPIPath, uuid, "foo") server.TestBadHTTP(t, "DELETE", delReq, nil) delReq = fmt.Sprintf("%srepo/%s/%s?imsure=true", server.WebAPIPath, uuid, "foo") server.TestHTTP(t, "DELETE", delReq, nil) // Make sure it no longer exists. _, err = datastore.GetDataByUUID(uuid, "foo") if err == nil { t.Errorf("Shouldn't be able to access a deleted data instance 'foo'\n") } }
func TestLabelsSyncing(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid := dvid.UUID(server.NewTestRepo(t)) if len(uuid) < 5 { t.Fatalf("Bad root UUID for new repo: %s\n", uuid) } // Create a labelblk instance vol := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 64, 96}, } vol.postLabelVolume(t, "labels", uuid) // TODO -- Test syncing across labelblk, labelvol, labelsz. }
func makeTestVersions(t *testing.T) { root, err := NewRepo("test repo", "test repo description", nil, "") if err != nil { t.Fatal(err) } if err := Commit(root, "root node", nil); err != nil { t.Fatal(err) } child1, err := NewVersion(root, "note describing child 1", nil) if err != nil { t.Fatal(err) } if err := Commit(child1, "child 1", nil); err != nil { t.Fatal(err) } // Test ability to set UUID of child assignedUUID := dvid.UUID("0c8bc973dba74729880dd1bdfd8d0c5e") child2, err := NewVersion(root, "note describing child 2", &assignedUUID) if err != nil { t.Fatal(err) } log2 := []string{"This is line 1 of log", "This is line 2 of log", "Last line for multiline log"} if err := Commit(child2, "child 2 assigned", log2); err != nil { t.Fatal(err) } // Make uncommitted child 3 child3, err := NewVersion(root, "note describing child 3", nil) if err != nil { t.Fatal(err) } nodelog := []string{`My first node-level log line.!(;#)}`, "Second line is here!!!"} if err := AddToNodeLog(child3, nodelog); err != nil { t.Fatal(err) } }
func TestParseConfig(t *testing.T) { var tc tomlConfig if _, err := toml.Decode(testConfig, &tc); err != nil { t.Fatalf("Could not decode TOML config: %v\n", err) } sc, ok := tc.Store["kvautobus"] if !ok { t.Fatalf("Couldn't find kvautobus config in test\n") } var config dvid.Config config.SetAll(sc) kvconfig := dvid.StoreConfig{ Config: config, Engine: "kvautobus", } path, timeout, owner, collection, err := parseConfig(kvconfig) if err != nil { t.Errorf("Error parsing kvautobus config: %v\n", err) } if path != "http://tem-dvid.int.janelia.org:9000" { t.Errorf("Bad parsing of kvautobus config. Path = %s, not http://tem-dvid.int.janelia.org:9000", path) } if timeout != time.Duration(30)*time.Second { t.Errorf("Expected parsing of kvautobus config: timeout = 30 * time.Second, got %d\n", timeout) } if owner != "flyEM" { t.Errorf("expected owner for kvautobus to be %q, got %q\n", "flyEM", owner) } if collection != dvid.UUID("99ef22cd85f143f58a623bd22aad0ef7") { t.Errorf("expected collection for kvautobus to be 99ef22cd85f143f58a623bd22aad0ef7, got %s\n", collection) } }
func TestCommitBranchMergeDelete(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() uuid := createRepo(t) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", WebAPIPath, uuid) TestBadHTTP(t, "POST", branchReq, nil) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr := fmt.Sprintf("%snode/%s/commit", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to log. payload = bytes.NewBufferString(`{"log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/log", WebAPIPath, uuid) TestBadHTTP(t, "POST", apiStr, payload) // Should be able to create branch now that we've committed parent. respData := TestHTTP(t, "POST", branchReq, nil) resp := struct { Child string `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent1 := dvid.UUID(resp.Child) // Create a sibling. respData = TestHTTP(t, "POST", branchReq, nil) if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } parent2 := dvid.UUID(resp.Child) // Commit both parents payload = bytes.NewBufferString(`{"note": "This is first parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) payload = bytes.NewBufferString(`{"note": "This is second parent"}`) apiStr = fmt.Sprintf("%snode/%s/commit", WebAPIPath, parent2) TestHTTP(t, "POST", apiStr, payload) // Merge the two disjoint branches. mergeJSON := fmt.Sprintf(`{"mergeType": "conflict-free", "note": "This is my merged node", "parents": [%q, %q]}`, parent1[:7], parent2) payload = bytes.NewBufferString(mergeJSON) apiStr = fmt.Sprintf("%srepo/%s/merge", WebAPIPath, parent1) TestHTTP(t, "POST", apiStr, payload) // Get root version to check after delete repo. rootV, err := datastore.VersionFromUUID(uuid) if err != nil { t.Errorf("Got unexpected error on getting version from root UUID: %v\n", err) } // Delete the entire repo including all branches. apiStr = fmt.Sprintf("%srepo/%s", WebAPIPath, parent2) TestBadHTTP(t, "DELETE", apiStr, nil) // Requires query string apiStr = fmt.Sprintf("%srepo/%s?imsure=true", WebAPIPath, parent2) TestHTTP(t, "DELETE", apiStr, nil) // Requires query string // Make sure none of the repo is still accessible. jsonResp, err := datastore.GetRepoJSON(uuid) if err == nil { t.Errorf("Expected invalid UUID after repo delete but got json back: %s\n", jsonResp) } if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid UUID after repo delete but got unexpected error: %v\n", err) } _, err = datastore.VersionFromUUID(uuid) if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid rot UUID after repo delete but got unexpected error: %v\n", err) } _, err = datastore.VersionFromUUID(parent1) if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid UUID for 1st parent after repo delete but got unexpected error: %v\n", err) } _, err = datastore.VersionFromUUID(parent2) if err != datastore.ErrInvalidUUID { t.Errorf("Expected invalid UUID for 2nd parent after repo delete but got unexpected error: %v\n", err) } _, err = datastore.UUIDFromVersion(rootV) if err != datastore.ErrInvalidVersion { t.Errorf("Expected invalid version id for root after repo delete but got unexpected error: %v\n", err) } }
// switchboard for remote command execution func handleCommand(cmd *datastore.Request) (reply *datastore.Response, err error) { if cmd.Name() == "" { err = fmt.Errorf("Server error: got empty command!") return } reply = new(datastore.Response) switch cmd.Name() { case "help": reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress()) case "shutdown": dvid.Infof("DVID server halting due to 'shutdown' command.") reply.Text = fmt.Sprintf("DVID server at %s is being shutdown...\n", config.RPCAddress()) // launch goroutine shutdown so we can concurrently return shutdown message to client. go Shutdown() case "types": if len(cmd.Command) == 1 { text := "\nData Types within this DVID Server\n" text += "----------------------------------\n" var mapTypes map[dvid.URLString]datastore.TypeService if mapTypes, err = datastore.Types(); err != nil { err = fmt.Errorf("Error trying to retrieve data types within this DVID server!") return } for url, typeservice := range mapTypes { text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url) } reply.Text = text } else { if len(cmd.Command) != 3 || cmd.Command[2] != "help" { err = fmt.Errorf("Unknown types command: %q", cmd.Command) return } var typename string var typeservice datastore.TypeService cmd.CommandArgs(1, &typename) if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil { return } reply.Text = typeservice.Help() } case "repos": var subcommand string cmd.CommandArgs(1, &subcommand) switch subcommand { case "new": var alias, description string cmd.CommandArgs(2, &alias, &description) config := cmd.Settings() var uuidStr, passcode string var found bool if uuidStr, found, err = config.GetString("uuid"); err != nil { return } var assign *dvid.UUID if !found { assign = nil } else { uuid := dvid.UUID(uuidStr) assign = &uuid } if passcode, found, err = config.GetString("passcode"); err != nil { return } var root dvid.UUID root, err = datastore.NewRepo(alias, description, assign, passcode) if err != nil { return } if err = datastore.SetRepoAlias(root, alias); err != nil { return } if err = datastore.SetRepoDescription(root, description); err != nil { return } reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root) case "delete": var uuidStr, passcode string cmd.CommandArgs(2, &uuidStr, &passcode) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } if err = datastore.DeleteRepo(uuid, passcode); err != nil { return } reply.Text = fmt.Sprintf("Started deletion of repo %s.\n", uuid) default: err = fmt.Errorf("Unknown repos command: %q", subcommand) return } case "repo": var uuidStr, subcommand string cmd.CommandArgs(1, &uuidStr, &subcommand) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } switch subcommand { case "new": var typename, dataname string cmd.CommandArgs(3, &typename, &dataname) // Get TypeService var typeservice datastore.TypeService if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil { return } // Create new data config := cmd.Settings() if _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config); err != nil { return } reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "rename": var name1, name2, passcode string cmd.CommandArgs(3, &name1, &name2, &passcode) oldname := dvid.InstanceName(name1) newname := dvid.InstanceName(name2) // Make sure this instance exists. if _, err = datastore.GetDataByUUIDName(uuid, oldname); err != nil { err = fmt.Errorf("Error trying to rename %q for UUID %s: %v", oldname, uuid, err) return } // Do the rename. if err = datastore.RenameData(uuid, oldname, newname, passcode); err != nil { err = fmt.Errorf("Error renaming data instance %q to %q: %v", oldname, newname, err) return } reply.Text = fmt.Sprintf("Renamed data instance %q to %q from DAG subgraph @ root %s\n", oldname, newname, uuid) case "branch": cmd.CommandArgs(3, &uuidStr) var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } var child dvid.UUID if child, err = datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign); err != nil { return } reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "merge": uuids := cmd.CommandArgs(2) parents := make([]dvid.UUID, len(uuids)+1) parents[0] = dvid.UUID(uuid) i := 1 for uuid := range uuids { parents[i] = dvid.UUID(uuid) i++ } var child dvid.UUID child, err = datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree) if err != nil { return } reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "migrate": var source, oldStoreName string cmd.CommandArgs(3, &source, &oldStoreName) var store dvid.Store store, err = storage.GetStoreByAlias(storage.Alias(oldStoreName)) if err != nil { return } config := cmd.Settings() go func() { if err = datastore.MigrateInstance(uuid, dvid.InstanceName(source), store, config); err != nil { dvid.Errorf("migrate error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started migration of uuid %s data instance %q from old store %q...\n", uuid, source, oldStoreName) case "copy": var source, target string cmd.CommandArgs(3, &source, &target) config := cmd.Settings() go func() { if err = datastore.CopyInstance(uuid, dvid.InstanceName(source), dvid.InstanceName(target), config); err != nil { dvid.Errorf("copy error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started copy of uuid %s data instance %q to %q...\n", uuid, source, target) case "push": var target string cmd.CommandArgs(3, &target) config := cmd.Settings() go func() { if err = datastore.PushRepo(uuid, target, config); err != nil { dvid.Errorf("push error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started push of repo %s to %q...\n", uuid, target) /* case "pull": var target string cmd.CommandArgs(3, &target) config := cmd.Settings() if err = datastore.Pull(uuid, target, config); err != nil { return } reply.Text = fmt.Sprintf("Repo %s pulled from %q\n", uuid, target) */ case "delete": var dataname, passcode string cmd.CommandArgs(3, &dataname, &passcode) // Make sure this instance exists. if _, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(dataname)); err != nil { err = fmt.Errorf("Error trying to delete %q for UUID %s: %v", dataname, uuid, err) return } // Do the deletion. Under hood, modifies metadata immediately and launches async k/v deletion. if err = datastore.DeleteDataByName(uuid, dvid.InstanceName(dataname), passcode); err != nil { err = fmt.Errorf("Error deleting data instance %q: %v", dataname, err) return } reply.Text = fmt.Sprintf("Started deletion of data instance %q from repo with root %s\n", dataname, uuid) default: err = fmt.Errorf("Unknown command: %q", cmd) return } case "node": var uuidStr, descriptor string cmd.CommandArgs(1, &uuidStr, &descriptor) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } // Get the DataService dataname := dvid.InstanceName(descriptor) var subcommand string cmd.CommandArgs(3, &subcommand) var dataservice datastore.DataService if dataservice, err = datastore.GetDataByUUIDName(uuid, dataname); err != nil { return } if subcommand == "help" { reply.Text = dataservice.Help() return } err = dataservice.DoRPC(*cmd, reply) return default: // Check to see if it's a name of a compiled data type, in which case we refer it to the data type. types := datastore.CompiledTypes() for name, typeservice := range types { if name == dvid.TypeString(cmd.Argument(0)) { err = typeservice.Do(*cmd, reply) return } } err = fmt.Errorf("Unknown command: '%s'", *cmd) } return }
func TestLabels(t *testing.T) { tests.UseStore() defer tests.CloseStore() uuid := dvid.UUID(server.NewTestRepo(t)) if len(uuid) < 5 { t.Fatalf("Bad root UUID for new repo: %s\n", uuid) } // Create a labelblk instance server.CreateTestInstance(t, uuid, "labelblk", "labels", dvid.Config{}) vol := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 64, 96}, name: "labels", } vol.postLabelVolume(t, uuid, "", "", 0) // Repost the label volume 3 more times with increasing starting values. vol.postLabelVolume(t, uuid, "", "", 2100) vol.postLabelVolume(t, uuid, "", "", 8176) vol.postLabelVolume(t, uuid, "", "", 16623) vol.testSlices(t, uuid) // Try to post last volume concurrently 3x and then check result. wg := new(sync.WaitGroup) wg.Add(3) go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() go func() { vol.postLabelVolume(t, uuid, "", "", 16623) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") // Try concurrent write of disjoint subvolumes. vol2 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 64, 96}, name: "labels", } vol3 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{192, 224, 96}, name: "labels", } vol4 := labelVol{ size: dvid.Point3d{5, 5, 5}, // in blocks blockSize: dvid.Point3d{32, 32, 32}, offset: dvid.Point3d{32, 224, 96}, name: "labels", } wg.Add(3) go func() { vol2.postLabelVolume(t, uuid, "lz4", "", 4000) wg.Done() }() go func() { vol3.postLabelVolume(t, uuid, "lz4", "", 8000) wg.Done() }() go func() { vol4.postLabelVolume(t, uuid, "lz4", "", 1200) wg.Done() }() wg.Wait() vol.testGetLabelVolume(t, uuid, "", "") vol2.testGetLabelVolume(t, uuid, "", "") vol3.testGetLabelVolume(t, uuid, "", "") vol4.testGetLabelVolume(t, uuid, "", "") // Verify various GET 3d volume with compressions and no ROI. vol.testGetLabelVolume(t, uuid, "", "") vol.testGetLabelVolume(t, uuid, "lz4", "") vol.testGetLabelVolume(t, uuid, "gzip", "") // Create a new ROI instance. roiName := "myroi" server.CreateTestInstance(t, uuid, "roi", roiName, dvid.Config{}) // Add ROI data apiStr := fmt.Sprintf("%snode/%s/%s/roi", server.WebAPIPath, uuid, roiName) server.TestHTTP(t, "POST", apiStr, bytes.NewBufferString(labelsJSON())) // Post updated labels without ROI and make sure it returns those values. var labelNoROI uint64 = 20000 vol.postLabelVolume(t, uuid, "", "", labelNoROI) returned := vol.testGetLabelVolume(t, uuid, "", "") startLabel := binary.LittleEndian.Uint64(returned[0:8]) if startLabel != labelNoROI+1 { t.Errorf("Expected first voxel to be label %d and got %d instead\n", labelNoROI+1, startLabel) } // TODO - Use the ROI to retrieve a 2d xy image. // TODO - Make sure we aren't getting labels back in non-ROI points. // Post again but now with ROI var labelWithROI uint64 = 40000 vol.postLabelVolume(t, uuid, "", roiName, labelWithROI) // Verify ROI masking of POST where anything outside ROI is old labels. returned = vol.getLabelVolume(t, uuid, "", "") var newlabel uint64 = labelWithROI var oldlabel uint64 = labelNoROI nx := vol.size[0] * vol.blockSize[0] ny := vol.size[1] * vol.blockSize[1] nz := vol.size[2] * vol.blockSize[2] var x, y, z, v int32 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != oldlabel { t.Fatalf("Expected %d outside ROI, got %d\n", oldlabel, got) } } v += 8 } } } // Verify that a ROI-enabled GET has zeros everywhere outside ROI. returned = vol.getLabelVolume(t, uuid, "", roiName) newlabel = labelWithROI x, y, z, v = 0, 0, 0, 0 for z = 0; z < nz; z++ { voxz := z + vol.offset[2] blockz := voxz / DefaultBlockSize for y = 0; y < ny; y++ { voxy := y + vol.offset[1] blocky := voxy / DefaultBlockSize for x = 0; x < nx; x++ { voxx := x + vol.offset[0] blockx := voxx / DefaultBlockSize oldlabel++ newlabel++ got := binary.LittleEndian.Uint64(returned[v : v+8]) if inroi(blockx, blocky, blockz) { if got != newlabel { t.Fatalf("Expected %d in ROI, got %d\n", newlabel, got) } } else { if got != 0 { t.Fatalf("Expected zero outside ROI, got %d\n", got) } } v += 8 } } } }
// Do acts as a switchboard for remote command execution func (c *RPCConnection) Do(cmd datastore.Request, reply *datastore.Response) error { if reply == nil { dvid.Debugf("reply is nil coming in!\n") return nil } if cmd.Name() == "" { return fmt.Errorf("Server error: got empty command!") } switch cmd.Name() { case "help": reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress()) case "shutdown": Shutdown() // Make this process shutdown in a second to allow time for RPC to finish. // TODO -- Better way to do this? log.Printf("DVID server halted due to 'shutdown' command.") reply.Text = fmt.Sprintf("DVID server at %s has been halted.\n", config.RPCAddress()) go func() { time.Sleep(1 * time.Second) os.Exit(0) }() case "types": if len(cmd.Command) == 1 { text := "\nData Types within this DVID Server\n" text += "----------------------------------\n" mapTypes, err := datastore.Types() if err != nil { return fmt.Errorf("Error trying to retrieve data types within this DVID server!") } for url, typeservice := range mapTypes { text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url) } reply.Text = text } else { if len(cmd.Command) != 3 || cmd.Command[2] != "help" { return fmt.Errorf("Unknown types command: %q", cmd.Command) } var typename string cmd.CommandArgs(1, &typename) typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { return err } reply.Text = typeservice.Help() } case "repos": var subcommand, alias, description, uuidStr string cmd.CommandArgs(1, &subcommand, &alias, &description, &uuidStr) switch subcommand { case "new": var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } root, err := datastore.NewRepo(alias, description, assign) if err != nil { return err } if err := datastore.SetRepoAlias(root, alias); err != nil { return err } if err := datastore.SetRepoDescription(root, description); err != nil { return err } reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root) default: return fmt.Errorf("Unknown repos command: %q", subcommand) } case "repo": var uuidStr, subcommand string cmd.CommandArgs(1, &uuidStr, &subcommand) uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } switch subcommand { case "new": var typename, dataname string cmd.CommandArgs(3, &typename, &dataname) // Get TypeService typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { return err } // Create new data config := cmd.Settings() _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config) if err != nil { return err } reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "branch": cmd.CommandArgs(3, &uuidStr) var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } child, err := datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign) if err != nil { return err } reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "merge": uuids := cmd.CommandArgs(2) parents := make([]dvid.UUID, len(uuids)+1) parents[0] = dvid.UUID(uuid) i := 1 for uuid := range uuids { parents[i] = dvid.UUID(uuid) i++ } child, err := datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree) if err != nil { return err } reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "push": /* var target string cmd.CommandArgs(3, &target) config := cmd.Settings() if err = datastore.Push(repo, target, config); err != nil { return err } reply.Text = fmt.Sprintf("Repo %q pushed to %q\n", repo.RootUUID(), target) */ return fmt.Errorf("push command has been temporarily suspended") default: return fmt.Errorf("Unknown command: %q", cmd) } case "node": var uuidStr, descriptor string cmd.CommandArgs(1, &uuidStr, &descriptor) uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Get the DataService dataname := dvid.InstanceName(descriptor) var subcommand string cmd.CommandArgs(3, &subcommand) dataservice, err := datastore.GetDataByUUID(uuid, dataname) if err != nil { return err } if subcommand == "help" { reply.Text = dataservice.Help() return nil } return dataservice.DoRPC(cmd, reply) default: return fmt.Errorf("Unknown command: '%s'", cmd) } return nil }
func TestCommitAndBranch(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() apiStr := fmt.Sprintf("%srepos", server.WebAPIPath) r := server.TestHTTP(t, "POST", apiStr, nil) var jsonResp map[string]interface{} if err := json.Unmarshal(r, &jsonResp); err != nil { t.Fatalf("Unable to unmarshal repo creation response: %s\n", string(r)) } v, ok := jsonResp["root"] if !ok { t.Fatalf("No 'root' metadata returned: %s\n", string(r)) } uuidStr, ok := v.(string) if !ok { t.Fatalf("Couldn't cast returned 'root' data (%v) into string.\n", v) } uuid := dvid.UUID(uuidStr) // Shouldn't be able to create branch on open node. branchReq := fmt.Sprintf("%snode/%s/branch", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", branchReq, nil) // Add a keyvalue instance. server.CreateTestInstance(t, uuid, "keyvalue", "mykv", dvid.Config{}) // Commit it. payload := bytes.NewBufferString(`{"note": "This is my test commit", "log": ["line1", "line2", "some more stuff in a line"]}`) apiStr = fmt.Sprintf("%snode/%s/commit", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", apiStr, payload) // Make sure committed nodes can only be read. // We shouldn't be able to write to keyvalue. keyReq := fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, uuid) server.TestBadHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // Create a ROI with an immutable POST request via ptquery. Should be able to still POST to it. server.CreateTestInstance(t, uuid, "roi", "myroi", dvid.Config{}) apiStr = fmt.Sprintf("%snode/%s/myroi/ptquery", server.WebAPIPath, uuid) queryJSON := "[[10, 10, 10], [20, 20, 20], [30, 30, 30], [40, 40, 40], [50, 50, 50]]" server.TestHTTP(t, "POST", apiStr, bytes.NewReader([]byte(queryJSON))) // we have no ROI so just testing HTTP. // Should be able to create branch now that we've committed parent. respData := server.TestHTTP(t, "POST", branchReq, nil) resp := struct { Child dvid.UUID `json:"child"` }{} if err := json.Unmarshal(respData, &resp); err != nil { t.Errorf("Expected 'child' JSON response. Got %s\n", string(respData)) } // We should be able to write to that keyvalue now in the child. keyReq = fmt.Sprintf("%snode/%s/mykv/key/foo", server.WebAPIPath, resp.Child) server.TestHTTP(t, "POST", keyReq, bytes.NewBufferString("some data")) // We should also be able to write to the repo-wide log. logReq := fmt.Sprintf("%srepo/%s/log", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", logReq, bytes.NewBufferString(`{"log": ["a log mesage"]}`)) }
// Initialize the storage systems. Returns a bool + error where the bool is // true if the metadata store is newly created and needs initialization. // The map of store configurations should be keyed by either a datatype name, // "default", or "metadata". func Initialize(cmdline dvid.Config, backend *Backend) (createdMetadata bool, err error) { dvid.Infof("backend:\n%v\n", *backend) // Open all the backend stores manager.stores = make(map[Alias]dvid.Store, len(backend.Stores)) var gotDefault, gotMetadata, createdDefault, lastCreated bool var lastStore dvid.Store for alias, dbconfig := range backend.Stores { var store dvid.Store for dbalias, db := range manager.stores { if db.Equal(dbconfig) { return false, fmt.Errorf("Store %q configuration is duplicate of store %q", alias, dbalias) } } store, created, err := NewStore(dbconfig) if err != nil { return false, fmt.Errorf("bad store %q: %v", alias, err) } if alias == backend.Metadata { gotMetadata = true createdMetadata = created manager.metadataStore = store } if alias == backend.Default { gotDefault = true createdDefault = created manager.defaultStore = store } manager.stores[alias] = store lastStore = store lastCreated = created } // Return if we don't have default or metadata stores. Should really be caught // at configuration loading, but here as well as double check. if !gotDefault { if len(backend.Stores) == 1 { manager.defaultStore = lastStore createdDefault = lastCreated } else { return false, fmt.Errorf("either backend.default or a single store must be set in configuration TOML file") } } if !gotMetadata { manager.metadataStore = manager.defaultStore createdMetadata = createdDefault } dvid.Infof("Default store: %s\n", manager.defaultStore) dvid.Infof("Metadata store: %s\n", manager.metadataStore) // Setup the groupcache if specified. err = setupGroupcache(backend.Groupcache) if err != nil { return } // Make all data instance or datatype-specific store assignments. manager.instanceStore = make(map[dvid.DataSpecifier]dvid.Store) manager.datatypeStore = make(map[dvid.TypeString]dvid.Store) for dataspec, alias := range backend.Mapping { if dataspec == "default" || dataspec == "metadata" { continue } store, found := manager.stores[alias] if !found { err = fmt.Errorf("bad backend store alias: %q -> %q", dataspec, alias) return } // Cache the store for mapped datatype or data instance. name := strings.Trim(string(dataspec), "\"") parts := strings.Split(name, ":") switch len(parts) { case 1: manager.datatypeStore[dvid.TypeString(name)] = store case 2: dataid := dvid.GetDataSpecifier(dvid.InstanceName(parts[0]), dvid.UUID(parts[1])) manager.instanceStore[dataid] = store default: err = fmt.Errorf("bad backend data specification: %s", dataspec) return } } manager.setup = true // Setup the graph store var store dvid.Store store, err = assignedStoreByType("labelgraph") if err != nil { return } var ok bool kvdb, ok := store.(OrderedKeyValueDB) if !ok { return false, fmt.Errorf("assigned labelgraph store %q isn't ordered kv db", store) } manager.graphDB, err = NewGraphStore(kvdb) if err != nil { return false, err } manager.graphSetter, ok = manager.graphDB.(GraphSetter) if !ok { return false, fmt.Errorf("Database %q cannot support a graph setter", kvdb) } manager.graphGetter, ok = manager.graphDB.(GraphGetter) if !ok { return false, fmt.Errorf("Database %q cannot support a graph getter", kvdb) } return }