// MigrateInstance migrates a data instance locally from an old storage // engine to the current configured storage. After completion of the copy, // the data instance in the old storage is deleted. func MigrateInstance(uuid dvid.UUID, source dvid.InstanceName, oldStore dvid.Store, c dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } // Get flatten or not transmit, _, err := c.GetString("transmit") if err != nil { return err } var flatten bool if transmit == "flatten" { flatten = true } // Get the source data instance. d, err := manager.getDataByUUIDName(uuid, source) if err != nil { return err } // Get the current store for this data instance. storer, ok := d.(storage.Accessor) if !ok { return fmt.Errorf("unable to migrate data %q: unable to access backing store", d.DataName()) } curKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", source, err) } // Get the old store. oldKV, ok := oldStore.(storage.OrderedKeyValueDB) if !ok { return fmt.Errorf("unable to migrate data %q from store %s which isn't ordered kv store", source, storer) } // Abort if the two stores are the same. if curKV == oldKV { return fmt.Errorf("old store for data %q seems same as current store", source) } // Migrate data asynchronously. go func() { if err := copyData(oldKV, curKV, d, nil, uuid, nil, flatten); err != nil { dvid.Errorf("error in migration of data %q: %v\n", source, err) return } // delete data off old store. dvid.Infof("Starting delete of instance %q from old storage %q\n", d.DataName(), oldKV) ctx := storage.NewDataContext(d, 0) if err := oldKV.DeleteAll(ctx, true); err != nil { dvid.Errorf("deleting instance %q from %q after copy to %q: %v\n", d.DataName(), oldKV, curKV, err) return } }() dvid.Infof("Migrating data %q from store %q to store %q ...\n", d.DataName(), oldKV, curKV) return nil }
// NewData returns a pointer to labelsz data. func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { // See if we have a valid DataService ROI var roistr string roistr, found, err := c.GetString("ROI") if err != nil { return nil, err } if found { parts := strings.Split(roistr, ",") if len(parts) != 2 { return nil, fmt.Errorf("bad ROI value (%q) expected %q", roistr, "<roiname>,<uuid>") } } // Initialize the Data for this data type basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ StaticROI: roistr, }, } return data, nil }
// NewData returns a pointer to new ROI data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } s, found, err := c.GetString("BlockSize") if err != nil { return nil, err } var blockSize dvid.Point3d if found { pt, err := dvid.StringToPoint(s, ",") if err != nil { return nil, err } if pt.NumDims() != 3 { return nil, fmt.Errorf("BlockSize must be 3d, not %dd", pt.NumDims()) } blockSize, _ = pt.(dvid.Point3d) } else { blockSize = dvid.Point3d{DefaultBlockSize, DefaultBlockSize, DefaultBlockSize} } d := &Data{ Data: basedata, Properties: Properties{blockSize, math.MaxInt32, math.MinInt32}, ready: make(map[dvid.VersionID]bool), } return d, nil }
// NewData returns a pointer to labelblk data. func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { imgblkData, err := dtype.Type.NewData(uuid, id, name, c) if err != nil { return nil, err } // Check if Raveler label. // TODO - Remove Raveler code outside of DVID. var labelType LabelType = Standard64bit s, found, err := c.GetString("LabelType") if found { switch strings.ToLower(s) { case "raveler": labelType = RavelerLabel case "standard": default: return nil, fmt.Errorf("unknown label type specified '%s'", s) } } dvid.Infof("Creating labelblk '%s' with %s", name, labelType) data := &Data{ Data: imgblkData, Labeling: labelType, } return data, nil }
func GetOptions(create bool, config dvid.Config) (*lmdbOptions, error) { sizeInGB, found, err := config.GetInt("Size") if err != nil { return nil, err } if !found && create { return nil, fmt.Errorf("Must specify 'Size=...' in gigabytes during Lightning MDB creation.") } if found { return &lmdbOptions{GBytes: sizeInGB}, nil } return &lmdbOptions{}, nil }
// GetTestConfig returns a set of store configurations suitable for testing // a basholeveldb storage system. func (e Engine) GetTestConfig() (*storage.Backend, error) { tc := map[string]interface{}{ "path": fmt.Sprintf("dvid-test-%x", uuid.NewV4().Bytes()), "testing": true, } var c dvid.Config c.SetAll(tc) testConfig := map[storage.Alias]dvid.StoreConfig{ "default": dvid.StoreConfig{Config: c, Engine: "basholeveldb"}, } backend := storage.Backend{ Stores: testConfig, } return &backend, nil }
// Make a copy of a repository, customizing it via config. // TODO -- modify data instance properties based on filters. func (r *repoT) customize(v dvid.VersionID, config dvid.Config) (*repoT, rpc.Transmit, error) { // Since we can have names separated by commas, split them namesStr, found, err := config.GetString("data") if err != nil { return nil, rpc.TransmitUnknown, err } var datanames dvid.InstanceNames if found { for _, name := range strings.Split(namesStr, ",") { datanames = append(datanames, dvid.InstanceName(strings.TrimSpace(name))) } } // Check the transmission behavior: all, flatten, or deltas. var versions map[dvid.VersionID]struct{} transmitStr, found, err := config.GetString("transmit") if err != nil { return nil, rpc.TransmitUnknown, err } if !found { transmitStr = "all" } var transmit rpc.Transmit switch transmitStr { case "flatten": transmit = rpc.TransmitFlatten versions = map[dvid.VersionID]struct{}{ v: struct{}{}, } case "all": transmit = rpc.TransmitAll versions = r.versionSet() case "branch": transmit = rpc.TransmitBranch versions = r.versionSet() default: return nil, rpc.TransmitUnknown, fmt.Errorf("unknown transmit %s", transmitStr) } // Make a copy filtering by allowed data instances. r.RLock() defer r.RUnlock() dup, err := r.duplicate(versions, datanames) return dup, transmit, err }
func (c tomlConfig) Stores() (map[storage.Alias]dvid.StoreConfig, error) { stores := make(map[storage.Alias]dvid.StoreConfig, len(c.Store)) for alias, sc := range c.Store { e, ok := sc["engine"] if !ok { return nil, fmt.Errorf("store configurations must have %q set to valid driver", "engine") } engine, ok := e.(string) if !ok { return nil, fmt.Errorf("engine set for store %q must be a string", alias) } var config dvid.Config config.SetAll(sc) stores[alias] = dvid.StoreConfig{ Config: config, Engine: engine, } } return stores, nil }
func (p *Properties) setByConfig(config dvid.Config) error { s, found, err := config.GetString("BlockSize") if err != nil { return err } if found { p.BlockSize, err = dvid.StringToPoint3d(s, ",") if err != nil { return err } } s, found, err = config.GetString("VoxelSize") if err != nil { return err } if found { dvid.Infof("Changing resolution of voxels to %s\n", s) p.Resolution.VoxelSize, err = dvid.StringToNdFloat32(s, ",") if err != nil { return err } } s, found, err = config.GetString("VoxelUnits") if err != nil { return err } if found { p.Resolution.VoxelUnits, err = dvid.StringToNdString(s, ",") if err != nil { return err } } return nil }
// NewData returns a pointer to labelvol data. func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { // Make sure we have a valid labelvol source s, found, err := c.GetString("Source") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make labelsz data without valid 'Source' specifying an associated labelvol.") } srcname := dvid.InstanceName(s) if _, err = labelvol.GetByUUID(uuid, srcname); err != nil { return nil, err } c.Set("sync", s) // This will set base data sync list // Initialize the Data for this data type basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } return &Data{basedata, srcname}, nil }
func TestParseConfig(t *testing.T) { var tc tomlConfig if _, err := toml.Decode(testConfig, &tc); err != nil { t.Fatalf("Could not decode TOML config: %v\n", err) } sc, ok := tc.Store["kvautobus"] if !ok { t.Fatalf("Couldn't find kvautobus config in test\n") } var config dvid.Config config.SetAll(sc) kvconfig := dvid.StoreConfig{ Config: config, Engine: "kvautobus", } path, timeout, owner, collection, err := parseConfig(kvconfig) if err != nil { t.Errorf("Error parsing kvautobus config: %v\n", err) } if path != "http://tem-dvid.int.janelia.org:9000" { t.Errorf("Bad parsing of kvautobus config. Path = %s, not http://tem-dvid.int.janelia.org:9000", path) } if timeout != time.Duration(30)*time.Second { t.Errorf("Expected parsing of kvautobus config: timeout = 30 * time.Second, got %d\n", timeout) } if owner != "flyEM" { t.Errorf("expected owner for kvautobus to be %q, got %q\n", "flyEM", owner) } if collection != dvid.UUID("99ef22cd85f143f58a623bd22aad0ef7") { t.Errorf("expected collection for kvautobus to be 99ef22cd85f143f58a623bd22aad0ef7, got %s\n", collection) } }
func CreateTestInstance(t *testing.T, uuid dvid.UUID, typename, name string, config dvid.Config) { config.Set("typename", typename) config.Set("dataname", name) jsonData, err := config.MarshalJSON() if err != nil { t.Fatalf("Unable to make JSON for instance creation: %v\n", config) } apiStr := fmt.Sprintf("%srepo/%s/instance", WebAPIPath, uuid) TestHTTP(t, "POST", apiStr, bytes.NewBuffer(jsonData)) }
// NewData returns a pointer to new googlevoxels data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { // Make sure we have needed volumeid and authentication key. volumeid, found, err := c.GetString("volumeid") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'volumeid' setting.") } jwtfile, found, err := c.GetString("jwtfile") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'jwtfile' specifying path to JSON Web Token") } // Read in the JSON Web Token jwtdata, err := ioutil.ReadFile(jwtfile) if err != nil { return nil, fmt.Errorf("Cannot load JSON Web Token file (%s): %v", jwtfile, err) } conf, err := google.JWTConfigFromJSON(jwtdata, "https://www.googleapis.com/auth/brainmaps") if err != nil { return nil, fmt.Errorf("Cannot establish JWT Config file from Google: %v", err) } client := conf.Client(oauth2.NoContext) // Make URL call to get the available scaled volumes. url := fmt.Sprintf("%s/volumes/%s", bmapsPrefix, volumeid) resp, err := client.Get(url) if err != nil { return nil, fmt.Errorf("Error getting volume metadata from Google: %v", err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Unexpected status code %d returned when getting volume metadata for %q", resp.StatusCode, volumeid) } metadata, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } resp.Body.Close() var m struct { Geoms Geometries `json:"geometry"` } if err := json.Unmarshal(metadata, &m); err != nil { return nil, fmt.Errorf("Error decoding volume JSON metadata: %v", err) } dvid.Infof("Successfully got geometries:\nmetadata:\n%s\nparsed JSON:\n%v\n", metadata, m) // Compute the mapping from tile scale/orientation to scaled volume index. geomMap := GeometryMap{} // (1) Find the highest resolution geometry. var highResIndex GeometryIndex minVoxelSize := dvid.NdFloat32{10000, 10000, 10000} for i, geom := range m.Geoms { if geom.PixelSize[0] < minVoxelSize[0] || geom.PixelSize[1] < minVoxelSize[1] || geom.PixelSize[2] < minVoxelSize[2] { minVoxelSize = geom.PixelSize highResIndex = GeometryIndex(i) } } dvid.Infof("Google voxels %q: found highest resolution was geometry %d: %s\n", name, highResIndex, minVoxelSize) // (2) For all geometries, find out what the scaling is relative to the highest resolution pixel size. for i, geom := range m.Geoms { if i == int(highResIndex) { geomMap[GSpec{0, XY}] = highResIndex geomMap[GSpec{0, XZ}] = highResIndex geomMap[GSpec{0, YZ}] = highResIndex geomMap[GSpec{0, XYZ}] = highResIndex } else { scaleX := geom.PixelSize[0] / minVoxelSize[0] scaleY := geom.PixelSize[1] / minVoxelSize[1] scaleZ := geom.PixelSize[2] / minVoxelSize[2] var shape Shape switch { case scaleX > scaleZ && scaleY > scaleZ: shape = XY case scaleX > scaleY && scaleZ > scaleY: shape = XZ case scaleY > scaleX && scaleZ > scaleX: shape = YZ default: shape = XYZ } var mag float32 if scaleX > mag { mag = scaleX } if scaleY > mag { mag = scaleY } if scaleZ > mag { mag = scaleZ } scaling := log2(mag) geomMap[GSpec{scaling, shape}] = GeometryIndex(i) dvid.Infof("%s at scaling %d set to geometry %d: resolution %s\n", shape, scaling, i, geom.PixelSize) } } // Create a client that will be authorized and authenticated on behalf of the account. // Initialize the googlevoxels data basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ VolumeID: volumeid, JWT: string(jwtdata), TileSize: DefaultTileSize, GeomMap: geomMap, Scales: m.Geoms, HighResIndex: highResIndex, }, client: client, } return data, nil }
// NewData returns a pointer to new tile data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { // Make sure we have a valid DataService source sourcename, found, err := c.GetString("Source") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make imagetile data without valid 'Source' setting.") } // See if we want placeholder imagetile. placeholder, found, err := c.GetBool("Placeholder") if err != nil { return nil, err } // Determine encoding for tile storage and this dictates what kind of compression we use. encoding, found, err := c.GetString("Format") if err != nil { return nil, err } format := PNG if found { switch strings.ToLower(encoding) { case "lz4": format = LZ4 case "png": format = PNG case "jpg": format = JPG default: return nil, fmt.Errorf("Unknown encoding specified: '%s' (should be 'lz4', 'png', or 'jpg'", encoding) } } // Compression is determined by encoding. Inform user if there's a discrepancy. var compression string switch format { case LZ4: compression = "lz4" case PNG: compression = "none" case JPG: compression = "none" } compressConfig, found, err := c.GetString("Compression") if err != nil { return nil, err } if found && strings.ToLower(compressConfig) != compression { return nil, fmt.Errorf("Conflict between specified compression '%s' and format '%s'. Suggest not dictating compression.", compressConfig, encoding) } c.Set("Compression", compression) // Initialize the imagetile data basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ Source: dvid.InstanceName(sourcename), Placeholder: placeholder, Encoding: format, }, } return data, nil }
// PushRepo pushes a Repo to a remote DVID server at the target address. func PushRepo(uuid dvid.UUID, target string, config dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } if target == "" { target = rpc.DefaultAddress dvid.Infof("No target specified for push, defaulting to %q\n", rpc.DefaultAddress) } // Get the full local repo thisRepo, err := manager.repoFromUUID(uuid) if err != nil { return err } // Get any filter filter, found, err := config.GetString("filter") if err != nil { return err } // Create a repo that is tailored by the push configuration, e.g., // keeping just given data instances, etc. v, found := manager.uuidToVersion[uuid] if !found { return ErrInvalidUUID } txRepo, transmit, err := thisRepo.customize(v, config) if err != nil { return err } // Establish session with target, which may be itself s, err := rpc.NewSession(target, pushMessageID) if err != nil { return fmt.Errorf("Unable to connect (%s) for push: %s", target, err.Error()) } defer s.Close() // TODO -- check if can hang if error occurs during job // Send the repo metadata, transmit type, and other useful data for remote server. dvid.Infof("Sending repo %s data to %q\n", uuid, target) repoSerialization, err := txRepo.GobEncode() if err != nil { return err } repoMsg := repoTxMsg{ Session: s.ID(), Transmit: transmit, UUID: uuid, Repo: repoSerialization, } resp, err := s.Call()(sendRepoMsg, repoMsg) if err != nil { return err } if resp == nil { return fmt.Errorf("push unnecessary -- versions at remote are already present") } // We should get back a version set to send, or nil = send all versions. versions, ok := resp.(map[dvid.VersionID]struct{}) if !ok { return fmt.Errorf("received response during repo push that wasn't expected set of delta versions") } dvid.Debugf("Remote sent list of %d versions to send\n", len(versions)) // For each data instance, send the data with optional datatype-specific filtering. ps := &PushSession{storage.FilterSpec(filter), versions, s, transmit} for _, d := range txRepo.data { dvid.Infof("Sending instance %q data to %q\n", d.DataName(), target) if err := d.PushData(ps); err != nil { dvid.Errorf("Aborting send of instance %q data\n", d.DataName()) return err } } return nil }
// NewData returns a pointer to new googlevoxels data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { // Make sure we have needed volumeid and authentication key. volumeid, found, err := c.GetString("volumeid") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'volumeid' setting.") } authkey, found, err := c.GetString("authkey") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'authkey' setting.") } // Make URL call to get the available scaled volumes. url := fmt.Sprintf("https://www.googleapis.com/brainmaps/v1beta1/volumes/%s?key=%s", volumeid, authkey) resp, err := http.Get(url) if err != nil { return nil, fmt.Errorf("Error getting volume metadata from Google: %v", err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Unexpected status code %d returned when getting volume metadata for %q", resp.StatusCode, volumeid) } metadata, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var m struct { Geoms Geometries `json:"geometrys"` } if err := json.Unmarshal(metadata, &m); err != nil { return nil, fmt.Errorf("Error decoding volume JSON metadata: %v", err) } // Compute the mapping from tile scale/orientation to scaled volume index. tileMap := GeometryMap{} // (1) Find the highest resolution geometry. var highResIndex GeometryIndex minVoxelSize := dvid.NdFloat32{10000, 10000, 10000} for i, geom := range m.Geoms { if geom.PixelSize[0] < minVoxelSize[0] || geom.PixelSize[1] < minVoxelSize[1] || geom.PixelSize[2] < minVoxelSize[2] { minVoxelSize = geom.PixelSize highResIndex = GeometryIndex(i) } } dvid.Infof("Google voxels %q: found highest resolution was geometry %d: %s\n", name, highResIndex, minVoxelSize) // (2) For all geometries, find out what the scaling is relative to the highest resolution pixel size. for i, geom := range m.Geoms { if i == int(highResIndex) { tileMap[TileSpec{0, XY}] = highResIndex tileMap[TileSpec{0, XZ}] = highResIndex tileMap[TileSpec{0, YZ}] = highResIndex } else { scaleX := geom.PixelSize[0] / minVoxelSize[0] scaleY := geom.PixelSize[1] / minVoxelSize[1] scaleZ := geom.PixelSize[2] / minVoxelSize[2] var plane TileOrientation switch { case scaleX > scaleZ && scaleY > scaleZ: plane = XY case scaleX > scaleY && scaleZ > scaleY: plane = XZ case scaleY > scaleX && scaleZ > scaleX: plane = YZ default: dvid.Infof("Odd geometry skipped for Google voxels %q with pixel size: %s\n", name, geom.PixelSize) dvid.Infof(" Scaling from highest resolution: %d x %d x %d\n", scaleX, scaleY, scaleZ) continue } var mag float32 if scaleX > mag { mag = scaleX } if scaleY > mag { mag = scaleY } if scaleZ > mag { mag = scaleZ } scaling := log2(mag) tileMap[TileSpec{scaling, plane}] = GeometryIndex(i) dvid.Infof("Plane %s at scaling %d set to geometry %d: resolution %s\n", plane, scaling, i, geom.PixelSize) } } // Initialize the googlevoxels data basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ VolumeID: volumeid, AuthKey: authkey, TileSize: DefaultTileSize, TileMap: tileMap, Scales: m.Geoms, HighResIndex: highResIndex, }, } return data, nil }
func TestLabels(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := datastore.NewTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) server.CreateTestInstance(t, uuid, "labelvol", "bodies", config) // Establish syncs server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } // Add annotations syncing with "labels" instance. server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config) server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies") // Create a ROI that will be used for our labelsz. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create labelsz instances synced to the above annotations. server.CreateTestInstance(t, uuid, "labelsz", "noroi", config) server.CreateTestSync(t, uuid, "noroi", "mysynapses") config.Set("ROI", fmt.Sprintf("myroi,%s", uuid)) server.CreateTestInstance(t, uuid, "labelsz", "withroi", config) server.CreateTestSync(t, uuid, "withroi", "mysynapses") // PUT first batch of synapses. var synapses annotation.Elements var x, y, z int32 // This should put 31x31x31 (29,791) PostSyn in volume with fewer in label 200 than 300. // There will be 15 along each dimension from 0 -> 63, then 16 from 64 -> 127. // Label 100 will have 15 x 31 x 31 = 14415 // Label 200 will have 16 x 31 x 15 = 7440 // Label 300 will have 16 x 31 x 16 = 7936 for z = 4; z < 128; z += 4 { for y = 4; y < 128; y += 4 { for x = 4; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PostSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } // This should put 32x32x32 (32,768) PreSyn in volume split 1/2, 1/4, 1/4 for z = 2; z < 128; z += 4 { for y = 2; y < 128; y += 4 { for x = 2; x < 128; x += 4 { e := annotation.Element{ annotation.ElementNR{ Pos: dvid.Point3d{x, y, z}, Kind: annotation.PreSyn, }, []annotation.Relationship{}, } synapses = append(synapses, e) } } } testJSON, err := json.Marshal(synapses) if err != nil { t.Fatal(err) } url := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, strings.NewReader(string(testJSON))) // Check if we have correct sequencing for no ROI labelsz. if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data := server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/100/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PreSyn":16384}` { t.Errorf("Got back incorrect PreSyn noroi count for label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"PreSyn":8192}` { t.Errorf("Got back incorrect PreSyn noroi count for label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14415},{"Label":300,"Size":7936},{"Label":200,"Size":7440}]` { t.Errorf("Got back incorrect PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSync noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15633/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30799},{"Label":300,"Size":16128}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=1&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":300,"Size":16128},{"Label":200,"Size":15632}]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/1000/AllSyn?offset=8&n=2", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Got back incorrect AllSyn noroi threshold:\n%v\n", string(data)) } // Check if we have correct sequencing for ROI labelsz. // ROI constitutes the inner eight 32^3 blocks. // There are 16 PostSyn in each ROI dimension. // There are also 16 PreSyn in each ROI dimension. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of withroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/0/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[]` { t.Errorf("Incorrectly handled top n=0 case, expected [] got: %v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect PostSyn withroi ranking:\n%v\n", string(data)) } // Check fewer and larger N requests. url = fmt.Sprintf("%snode/%s/noroi/top/2/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192}]` { t.Errorf("Got back incorrect N=2 ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/4/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":8192},{"Label":300,"Size":8192}]` { t.Errorf("Got back incorrect N=4 ranking:\n%v\n", string(data)) } // Test annotation move of a PostSyn from label 100->300 and also label 200->300 url = fmt.Sprintf("%snode/%s/mysynapses/move/32_32_32/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/move/68_20_20/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7938},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after move from label 100->300:\n%v\n", string(data)) } // First move took synapse out of ROI so there should be one less for label 100. if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2047},{"Label":200,"Size":1024},{"Label":300,"Size":1024}]` { t.Errorf("Got back incorrect post-move PostSyn withroi ranking:\n%v\n", string(data)) } // Test annotation deletion of moved PostSyn from label 300 url = fmt.Sprintf("%snode/%s/mysynapses/element/75_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) url = fmt.Sprintf("%snode/%s/mysynapses/element/77_21_69", server.WebAPIPath, uuid) server.TestHTTP(t, "DELETE", url, nil) if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of noroi labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414},{"Label":300,"Size":7936},{"Label":200,"Size":7439}]` { t.Errorf("Got back incorrect PostSyn noroi ranking after deletions from label 300:\n%v\n", string(data)) } // Check sync on merge. if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of bodies: %v\n", err) } testMerge := mergeJSON(`[200, 300]`) testMerge.send(t, uuid, "bodies") if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "withroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"PostSyn":2047}` { t.Errorf("Got back incorrect post-merge PostSyn withroi count of label 100:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":31759},{"Label":100,"Size":30798}]` { t.Errorf("Got back incorrect post-merge AllSyn noroi ranking:\n%v\n", string(data)) } // Check threshold endpoint url = fmt.Sprintf("%snode/%s/withroi/threshold/2048/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-merge PostSyn withroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/16384/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16384},{"Label":200,"Size":16384}]` { t.Errorf("Got back incorrect post-merge PreSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/15000/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/threshold/0/PostSyn?offset=1&n=1", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":14414}]` { t.Errorf("Got back incorrect post-merge PostSyn noroi threshold with offset/n:\n%v\n", string(data)) } // Create the sparsevol encoding for split area with label 100 -> 150. // Split has offset (0, 0, 0) with size (19, 19, 19). // PreSyn in split = 5 x 5 x 5 = 125 // PostSyn in split = 4 x 4 x 4 = 64 var rles dvid.RLEs for z := int32(0); z < 19; z++ { for y := int32(0); y < 19; y++ { start := dvid.Point3d{0, y, z} rles = append(rles, dvid.NewRLE(start, 19)) } } buf := getBytesRLE(t, rles) // Submit the split sparsevol url = fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=150", server.WebAPIPath, uuid, "bodies", 100) data = server.TestHTTP(t, "POST", url, buf) jsonVal := make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":16384},{"Label":100,"Size":16259},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-split PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/3/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":15375},{"Label":100,"Size":14350},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-split PostSyn noroi ranking:\n%v\n", string(data)) } // Create the encoding for coarse split area in block coordinates from label 200. // Split has offset (64, 96, 96) with size (64, 32, 32). // PreSyn in split = 16 x 8 x 8 = 1024 // PostSyn in split = 16 x 8 x 8 = 1024 rles = dvid.RLEs{ dvid.NewRLE(dvid.Point3d{2, 3, 3}, 2), } buf = getBytesRLE(t, rles) // Submit the coarse split of 200 -> 250 url = fmt.Sprintf("%snode/%s/%s/split-coarse/200?splitlabel=250", server.WebAPIPath, uuid, "bodies") data = server.TestHTTP(t, "POST", url, buf) jsonVal = make(map[string]uint64) if err := json.Unmarshal(data, &jsonVal); err != nil { t.Errorf("Unable to get new label from split. Instead got: %v\n", jsonVal) } // Check sync on split. if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil { t.Fatalf("Error blocking on sync of labels: %v\n", err) } time.Sleep(1 * time.Second) if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil { t.Fatalf("Error blocking on sync of synapses: %v\n", err) } if err := datastore.BlockOnUpdating(uuid, "noroi"); err != nil { t.Fatalf("Error blocking on sync of labelsz: %v\n", err) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":16259},{"Label":200,"Size":15360},{"Label":250,"Size":1024},{"Label":150,"Size":125}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":14351},{"Label":100,"Size":14350},{"Label":250,"Size":1024},{"Label":150,"Size":64}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":30609},{"Label":200,"Size":29711},{"Label":250,"Size":2048},{"Label":150,"Size":189}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/noroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":29711}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn noroi count of label 200:\n%v\n", string(data)) } // Check the ROI-restricted labelsz instance which should only be affected by merge. url = fmt.Sprintf("%snode/%s/withroi/top/5/PreSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":100,"Size":2048},{"Label":200,"Size":2048}]` { t.Errorf("Got back incorrect post-coarsesplit PreSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/PostSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":2048},{"Label":100,"Size":2047}]` { t.Errorf("Got back incorrect post-coarsesplit PostSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/top/5/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `[{"Label":200,"Size":4096},{"Label":100,"Size":4095}]` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi ranking:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/200/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":200,"AllSyn":4096}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 200:\n%v\n", string(data)) } url = fmt.Sprintf("%snode/%s/withroi/count/100/AllSyn", server.WebAPIPath, uuid) data = server.TestHTTP(t, "GET", url, nil) if string(data) != `{"Label":100,"AllSyn":4095}` { t.Errorf("Got back incorrect post-coarsesplit AllSyn withroi count of label 100:\n%v\n", string(data)) } }
func getOptions(config dvid.Config) (*leveldbOptions, error) { opt := &leveldbOptions{ Options: levigo.NewOptions(), ReadOptions: levigo.NewReadOptions(), WriteOptions: levigo.NewWriteOptions(), env: levigo.NewDefaultEnv(), } opt.WriteOptions.SetSync(DefaultSync) // Huge performance penalty to set sync to true // Set flags based on create parameter opt.SetCreateIfMissing(true) opt.SetErrorIfExists(false) // Create associated data structures with default values bloomBits, found, err := config.GetInt("BloomFilterBitsPerKey") if err != nil { return nil, err } if !found { bloomBits = DefaultBloomBits } opt.SetBloomFilterBitsPerKey(bloomBits) cacheSize, found, err := config.GetInt("CacheSize") if err != nil { return nil, err } if !found { cacheSize = DefaultCacheSize } else { cacheSize *= dvid.Mega } dvid.Infof("leveldb cache size: %s\n", humanize.Bytes(uint64(cacheSize))) opt.SetLRUCacheSize(cacheSize) writeBufferSize, found, err := config.GetInt("WriteBufferSize") if err != nil { return nil, err } if !found { writeBufferSize = DefaultWriteBufferSize } else { writeBufferSize *= dvid.Mega } dvid.Infof("leveldb write buffer size: %s\n", humanize.Bytes(uint64(writeBufferSize))) opt.SetWriteBufferSize(writeBufferSize) maxOpenFiles, found, err := config.GetInt("MaxOpenFiles") if err != nil { return nil, err } if !found { maxOpenFiles = DefaultMaxOpenFiles } opt.SetMaxOpenFiles(maxOpenFiles) blockSize, found, err := config.GetInt("BlockSize") if err != nil { return nil, err } if !found { blockSize = DefaultBlockSize } opt.SetBlockSize(blockSize) opt.SetInfoLog(nil) opt.SetParanoidChecks(false) //opt.SetBlockRestartInterval(8) // Don't bother with compression on leveldb side because it will be // selectively applied on DVID side. We may return and then transmit // Snappy-compressed data without ever decompressing on server-side. opt.SetCompression(levigo.NoCompression) // (levigo.SnappyCompression) return opt, nil }
// CopyInstance copies a data instance locally, perhaps to a different storage // engine if the new instance uses a different backend per a data instance-specific configuration. // (See sample config.example.toml file in root dvid source directory.) func CopyInstance(uuid dvid.UUID, source, target dvid.InstanceName, c dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } if source == "" || target == "" { return fmt.Errorf("both source and cloned name must be provided") } // Get any filter spec fstxt, found, err := c.GetString("filter") if err != nil { return err } var fs storage.FilterSpec if found { fs = storage.FilterSpec(fstxt) } // Get flatten or not transmit, found, err := c.GetString("transmit") if err != nil { return err } var flatten bool if transmit == "flatten" { flatten = true } // Get the source data instance. d1, err := manager.getDataByUUIDName(uuid, source) if err != nil { return err } // Create the target instance. t, err := TypeServiceByName(d1.TypeName()) if err != nil { return err } d2, err := manager.newData(uuid, t, target, c) if err != nil { return err } // Populate the new data instance properties from source. copier, ok := d2.(PropertyCopier) if ok { if err := copier.CopyPropertiesFrom(d1, fs); err != nil { return err } if err := SaveDataByUUID(uuid, d2); err != nil { return err } } // We should be able to get the backing store (only ordered kv for now) storer, ok := d1.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d1.DataName()) } oldKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d1.DataName(), err) } storer, ok = d2.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d2.DataName()) } newKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d2.DataName(), err) } dvid.Infof("Copying data %q (%s) to data %q (%s)...\n", d1.DataName(), oldKV, d2.DataName(), newKV) // See if this data instance implements a Send filter. var filter storage.Filter filterer, ok := d1.(storage.Filterer) if ok && fs != "" { var err error filter, err = filterer.NewFilter(fs) if err != nil { return err } } // copy data with optional datatype-specific filtering. return copyData(oldKV, newKV, d1, d2, uuid, filter, flatten) }
func (d *Data) ModifyConfig(config dvid.Config) error { // Set compression for this instance s, found, err := config.GetString("Compression") if err != nil { return err } if found { format := strings.ToLower(s) switch format { case "none": d.compression, _ = dvid.NewCompression(dvid.Uncompressed, dvid.DefaultCompression) case "snappy": d.compression, _ = dvid.NewCompression(dvid.Snappy, dvid.DefaultCompression) case "lz4": d.compression, _ = dvid.NewCompression(dvid.LZ4, dvid.DefaultCompression) case "gzip": d.compression, _ = dvid.NewCompression(dvid.Gzip, dvid.DefaultCompression) default: // Check for gzip + compression level parts := strings.Split(format, ":") if len(parts) == 2 && parts[0] == "gzip" { level, err := strconv.Atoi(parts[1]) if err != nil { return fmt.Errorf("Unable to parse gzip compression level ('%d'). Should be 'gzip:<level>'.", parts[1]) } d.compression, _ = dvid.NewCompression(dvid.Gzip, dvid.CompressionLevel(level)) } else { return fmt.Errorf("Illegal compression specified: %s", s) } } } // Set checksum for this instance s, found, err = config.GetString("Checksum") if err != nil { return err } if found { checksum := strings.ToLower(s) switch checksum { case "none": d.checksum = dvid.NoChecksum case "crc32": d.checksum = dvid.CRC32 default: return fmt.Errorf("Illegal checksum specified: %s", s) } } // Set data instances for syncing. s, found, err = config.GetString("sync") if err != nil { return err } if found { names := strings.Split(s, ",") if len(names) > 0 { for _, name := range names { d.syncs = append(d.syncs, dvid.InstanceName(name)) } } } // Set versioning s, found, err = config.GetString("Versioned") if err != nil { return err } if found { versioned := strings.ToLower(s) switch versioned { case "false", "0": d.unversioned = true case "true", "1": d.unversioned = false default: return fmt.Errorf("Illegal setting for 'versioned' (needs to be 'false', '0', 'true', or '1'): %s", s) } } return nil }