// MigrateInstance migrates a data instance locally from an old storage // engine to the current configured storage. After completion of the copy, // the data instance in the old storage is deleted. func MigrateInstance(uuid dvid.UUID, source dvid.InstanceName, oldStore dvid.Store, c dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } // Get flatten or not transmit, _, err := c.GetString("transmit") if err != nil { return err } var flatten bool if transmit == "flatten" { flatten = true } // Get the source data instance. d, err := manager.getDataByUUIDName(uuid, source) if err != nil { return err } // Get the current store for this data instance. storer, ok := d.(storage.Accessor) if !ok { return fmt.Errorf("unable to migrate data %q: unable to access backing store", d.DataName()) } curKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", source, err) } // Get the old store. oldKV, ok := oldStore.(storage.OrderedKeyValueDB) if !ok { return fmt.Errorf("unable to migrate data %q from store %s which isn't ordered kv store", source, storer) } // Abort if the two stores are the same. if curKV == oldKV { return fmt.Errorf("old store for data %q seems same as current store", source) } // Migrate data asynchronously. go func() { if err := copyData(oldKV, curKV, d, nil, uuid, nil, flatten); err != nil { dvid.Errorf("error in migration of data %q: %v\n", source, err) return } // delete data off old store. dvid.Infof("Starting delete of instance %q from old storage %q\n", d.DataName(), oldKV) ctx := storage.NewDataContext(d, 0) if err := oldKV.DeleteAll(ctx, true); err != nil { dvid.Errorf("deleting instance %q from %q after copy to %q: %v\n", d.DataName(), oldKV, curKV, err) return } }() dvid.Infof("Migrating data %q from store %q to store %q ...\n", d.DataName(), oldKV, curKV) return nil }
// NewData returns a pointer to labelsz data. func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { // See if we have a valid DataService ROI var roistr string roistr, found, err := c.GetString("ROI") if err != nil { return nil, err } if found { parts := strings.Split(roistr, ",") if len(parts) != 2 { return nil, fmt.Errorf("bad ROI value (%q) expected %q", roistr, "<roiname>,<uuid>") } } // Initialize the Data for this data type basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ StaticROI: roistr, }, } return data, nil }
// NewData returns a pointer to labelblk data. func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { imgblkData, err := dtype.Type.NewData(uuid, id, name, c) if err != nil { return nil, err } // Check if Raveler label. // TODO - Remove Raveler code outside of DVID. var labelType LabelType = Standard64bit s, found, err := c.GetString("LabelType") if found { switch strings.ToLower(s) { case "raveler": labelType = RavelerLabel case "standard": default: return nil, fmt.Errorf("unknown label type specified '%s'", s) } } dvid.Infof("Creating labelblk '%s' with %s", name, labelType) data := &Data{ Data: imgblkData, Labeling: labelType, } return data, nil }
// NewData returns a pointer to new ROI data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } s, found, err := c.GetString("BlockSize") if err != nil { return nil, err } var blockSize dvid.Point3d if found { pt, err := dvid.StringToPoint(s, ",") if err != nil { return nil, err } if pt.NumDims() != 3 { return nil, fmt.Errorf("BlockSize must be 3d, not %dd", pt.NumDims()) } blockSize, _ = pt.(dvid.Point3d) } else { blockSize = dvid.Point3d{DefaultBlockSize, DefaultBlockSize, DefaultBlockSize} } d := &Data{ Data: basedata, Properties: Properties{blockSize, math.MaxInt32, math.MinInt32}, ready: make(map[dvid.VersionID]bool), } return d, nil }
// Make a copy of a repository, customizing it via config. // TODO -- modify data instance properties based on filters. func (r *repoT) customize(v dvid.VersionID, config dvid.Config) (*repoT, rpc.Transmit, error) { // Since we can have names separated by commas, split them namesStr, found, err := config.GetString("data") if err != nil { return nil, rpc.TransmitUnknown, err } var datanames dvid.InstanceNames if found { for _, name := range strings.Split(namesStr, ",") { datanames = append(datanames, dvid.InstanceName(strings.TrimSpace(name))) } } // Check the transmission behavior: all, flatten, or deltas. var versions map[dvid.VersionID]struct{} transmitStr, found, err := config.GetString("transmit") if err != nil { return nil, rpc.TransmitUnknown, err } if !found { transmitStr = "all" } var transmit rpc.Transmit switch transmitStr { case "flatten": transmit = rpc.TransmitFlatten versions = map[dvid.VersionID]struct{}{ v: struct{}{}, } case "all": transmit = rpc.TransmitAll versions = r.versionSet() case "branch": transmit = rpc.TransmitBranch versions = r.versionSet() default: return nil, rpc.TransmitUnknown, fmt.Errorf("unknown transmit %s", transmitStr) } // Make a copy filtering by allowed data instances. r.RLock() defer r.RUnlock() dup, err := r.duplicate(versions, datanames) return dup, transmit, err }
func (p *Properties) setByConfig(config dvid.Config) error { s, found, err := config.GetString("BlockSize") if err != nil { return err } if found { p.BlockSize, err = dvid.StringToPoint3d(s, ",") if err != nil { return err } } s, found, err = config.GetString("VoxelSize") if err != nil { return err } if found { dvid.Infof("Changing resolution of voxels to %s\n", s) p.Resolution.VoxelSize, err = dvid.StringToNdFloat32(s, ",") if err != nil { return err } } s, found, err = config.GetString("VoxelUnits") if err != nil { return err } if found { p.Resolution.VoxelUnits, err = dvid.StringToNdString(s, ",") if err != nil { return err } } return nil }
// NewData returns a pointer to labelvol data. func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { // Make sure we have a valid labelvol source s, found, err := c.GetString("Source") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make labelsz data without valid 'Source' specifying an associated labelvol.") } srcname := dvid.InstanceName(s) if _, err = labelvol.GetByUUID(uuid, srcname); err != nil { return nil, err } c.Set("sync", s) // This will set base data sync list // Initialize the Data for this data type basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } return &Data{basedata, srcname}, nil }
// CopyInstance copies a data instance locally, perhaps to a different storage // engine if the new instance uses a different backend per a data instance-specific configuration. // (See sample config.example.toml file in root dvid source directory.) func CopyInstance(uuid dvid.UUID, source, target dvid.InstanceName, c dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } if source == "" || target == "" { return fmt.Errorf("both source and cloned name must be provided") } // Get any filter spec fstxt, found, err := c.GetString("filter") if err != nil { return err } var fs storage.FilterSpec if found { fs = storage.FilterSpec(fstxt) } // Get flatten or not transmit, found, err := c.GetString("transmit") if err != nil { return err } var flatten bool if transmit == "flatten" { flatten = true } // Get the source data instance. d1, err := manager.getDataByUUIDName(uuid, source) if err != nil { return err } // Create the target instance. t, err := TypeServiceByName(d1.TypeName()) if err != nil { return err } d2, err := manager.newData(uuid, t, target, c) if err != nil { return err } // Populate the new data instance properties from source. copier, ok := d2.(PropertyCopier) if ok { if err := copier.CopyPropertiesFrom(d1, fs); err != nil { return err } if err := SaveDataByUUID(uuid, d2); err != nil { return err } } // We should be able to get the backing store (only ordered kv for now) storer, ok := d1.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d1.DataName()) } oldKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d1.DataName(), err) } storer, ok = d2.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d2.DataName()) } newKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d2.DataName(), err) } dvid.Infof("Copying data %q (%s) to data %q (%s)...\n", d1.DataName(), oldKV, d2.DataName(), newKV) // See if this data instance implements a Send filter. var filter storage.Filter filterer, ok := d1.(storage.Filterer) if ok && fs != "" { var err error filter, err = filterer.NewFilter(fs) if err != nil { return err } } // copy data with optional datatype-specific filtering. return copyData(oldKV, newKV, d1, d2, uuid, filter, flatten) }
// NewData returns a pointer to new googlevoxels data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { // Make sure we have needed volumeid and authentication key. volumeid, found, err := c.GetString("volumeid") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'volumeid' setting.") } jwtfile, found, err := c.GetString("jwtfile") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'jwtfile' specifying path to JSON Web Token") } // Read in the JSON Web Token jwtdata, err := ioutil.ReadFile(jwtfile) if err != nil { return nil, fmt.Errorf("Cannot load JSON Web Token file (%s): %v", jwtfile, err) } conf, err := google.JWTConfigFromJSON(jwtdata, "https://www.googleapis.com/auth/brainmaps") if err != nil { return nil, fmt.Errorf("Cannot establish JWT Config file from Google: %v", err) } client := conf.Client(oauth2.NoContext) // Make URL call to get the available scaled volumes. url := fmt.Sprintf("%s/volumes/%s", bmapsPrefix, volumeid) resp, err := client.Get(url) if err != nil { return nil, fmt.Errorf("Error getting volume metadata from Google: %v", err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Unexpected status code %d returned when getting volume metadata for %q", resp.StatusCode, volumeid) } metadata, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } resp.Body.Close() var m struct { Geoms Geometries `json:"geometry"` } if err := json.Unmarshal(metadata, &m); err != nil { return nil, fmt.Errorf("Error decoding volume JSON metadata: %v", err) } dvid.Infof("Successfully got geometries:\nmetadata:\n%s\nparsed JSON:\n%v\n", metadata, m) // Compute the mapping from tile scale/orientation to scaled volume index. geomMap := GeometryMap{} // (1) Find the highest resolution geometry. var highResIndex GeometryIndex minVoxelSize := dvid.NdFloat32{10000, 10000, 10000} for i, geom := range m.Geoms { if geom.PixelSize[0] < minVoxelSize[0] || geom.PixelSize[1] < minVoxelSize[1] || geom.PixelSize[2] < minVoxelSize[2] { minVoxelSize = geom.PixelSize highResIndex = GeometryIndex(i) } } dvid.Infof("Google voxels %q: found highest resolution was geometry %d: %s\n", name, highResIndex, minVoxelSize) // (2) For all geometries, find out what the scaling is relative to the highest resolution pixel size. for i, geom := range m.Geoms { if i == int(highResIndex) { geomMap[GSpec{0, XY}] = highResIndex geomMap[GSpec{0, XZ}] = highResIndex geomMap[GSpec{0, YZ}] = highResIndex geomMap[GSpec{0, XYZ}] = highResIndex } else { scaleX := geom.PixelSize[0] / minVoxelSize[0] scaleY := geom.PixelSize[1] / minVoxelSize[1] scaleZ := geom.PixelSize[2] / minVoxelSize[2] var shape Shape switch { case scaleX > scaleZ && scaleY > scaleZ: shape = XY case scaleX > scaleY && scaleZ > scaleY: shape = XZ case scaleY > scaleX && scaleZ > scaleX: shape = YZ default: shape = XYZ } var mag float32 if scaleX > mag { mag = scaleX } if scaleY > mag { mag = scaleY } if scaleZ > mag { mag = scaleZ } scaling := log2(mag) geomMap[GSpec{scaling, shape}] = GeometryIndex(i) dvid.Infof("%s at scaling %d set to geometry %d: resolution %s\n", shape, scaling, i, geom.PixelSize) } } // Create a client that will be authorized and authenticated on behalf of the account. // Initialize the googlevoxels data basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ VolumeID: volumeid, JWT: string(jwtdata), TileSize: DefaultTileSize, GeomMap: geomMap, Scales: m.Geoms, HighResIndex: highResIndex, }, client: client, } return data, nil }
// NewData returns a pointer to new tile data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { // Make sure we have a valid DataService source sourcename, found, err := c.GetString("Source") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make imagetile data without valid 'Source' setting.") } // See if we want placeholder imagetile. placeholder, found, err := c.GetBool("Placeholder") if err != nil { return nil, err } // Determine encoding for tile storage and this dictates what kind of compression we use. encoding, found, err := c.GetString("Format") if err != nil { return nil, err } format := PNG if found { switch strings.ToLower(encoding) { case "lz4": format = LZ4 case "png": format = PNG case "jpg": format = JPG default: return nil, fmt.Errorf("Unknown encoding specified: '%s' (should be 'lz4', 'png', or 'jpg'", encoding) } } // Compression is determined by encoding. Inform user if there's a discrepancy. var compression string switch format { case LZ4: compression = "lz4" case PNG: compression = "none" case JPG: compression = "none" } compressConfig, found, err := c.GetString("Compression") if err != nil { return nil, err } if found && strings.ToLower(compressConfig) != compression { return nil, fmt.Errorf("Conflict between specified compression '%s' and format '%s'. Suggest not dictating compression.", compressConfig, encoding) } c.Set("Compression", compression) // Initialize the imagetile data basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ Source: dvid.InstanceName(sourcename), Placeholder: placeholder, Encoding: format, }, } return data, nil }
func (d *Data) ModifyConfig(config dvid.Config) error { // Set compression for this instance s, found, err := config.GetString("Compression") if err != nil { return err } if found { format := strings.ToLower(s) switch format { case "none": d.compression, _ = dvid.NewCompression(dvid.Uncompressed, dvid.DefaultCompression) case "snappy": d.compression, _ = dvid.NewCompression(dvid.Snappy, dvid.DefaultCompression) case "lz4": d.compression, _ = dvid.NewCompression(dvid.LZ4, dvid.DefaultCompression) case "gzip": d.compression, _ = dvid.NewCompression(dvid.Gzip, dvid.DefaultCompression) default: // Check for gzip + compression level parts := strings.Split(format, ":") if len(parts) == 2 && parts[0] == "gzip" { level, err := strconv.Atoi(parts[1]) if err != nil { return fmt.Errorf("Unable to parse gzip compression level ('%d'). Should be 'gzip:<level>'.", parts[1]) } d.compression, _ = dvid.NewCompression(dvid.Gzip, dvid.CompressionLevel(level)) } else { return fmt.Errorf("Illegal compression specified: %s", s) } } } // Set checksum for this instance s, found, err = config.GetString("Checksum") if err != nil { return err } if found { checksum := strings.ToLower(s) switch checksum { case "none": d.checksum = dvid.NoChecksum case "crc32": d.checksum = dvid.CRC32 default: return fmt.Errorf("Illegal checksum specified: %s", s) } } // Set data instances for syncing. s, found, err = config.GetString("sync") if err != nil { return err } if found { names := strings.Split(s, ",") if len(names) > 0 { for _, name := range names { d.syncs = append(d.syncs, dvid.InstanceName(name)) } } } // Set versioning s, found, err = config.GetString("Versioned") if err != nil { return err } if found { versioned := strings.ToLower(s) switch versioned { case "false", "0": d.unversioned = true case "true", "1": d.unversioned = false default: return fmt.Errorf("Illegal setting for 'versioned' (needs to be 'false', '0', 'true', or '1'): %s", s) } } return nil }
// NewData returns a pointer to new googlevoxels data with default values. func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { // Make sure we have needed volumeid and authentication key. volumeid, found, err := c.GetString("volumeid") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'volumeid' setting.") } authkey, found, err := c.GetString("authkey") if err != nil { return nil, err } if !found { return nil, fmt.Errorf("Cannot make googlevoxels data without valid 'authkey' setting.") } // Make URL call to get the available scaled volumes. url := fmt.Sprintf("https://www.googleapis.com/brainmaps/v1beta1/volumes/%s?key=%s", volumeid, authkey) resp, err := http.Get(url) if err != nil { return nil, fmt.Errorf("Error getting volume metadata from Google: %v", err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Unexpected status code %d returned when getting volume metadata for %q", resp.StatusCode, volumeid) } metadata, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var m struct { Geoms Geometries `json:"geometrys"` } if err := json.Unmarshal(metadata, &m); err != nil { return nil, fmt.Errorf("Error decoding volume JSON metadata: %v", err) } // Compute the mapping from tile scale/orientation to scaled volume index. tileMap := GeometryMap{} // (1) Find the highest resolution geometry. var highResIndex GeometryIndex minVoxelSize := dvid.NdFloat32{10000, 10000, 10000} for i, geom := range m.Geoms { if geom.PixelSize[0] < minVoxelSize[0] || geom.PixelSize[1] < minVoxelSize[1] || geom.PixelSize[2] < minVoxelSize[2] { minVoxelSize = geom.PixelSize highResIndex = GeometryIndex(i) } } dvid.Infof("Google voxels %q: found highest resolution was geometry %d: %s\n", name, highResIndex, minVoxelSize) // (2) For all geometries, find out what the scaling is relative to the highest resolution pixel size. for i, geom := range m.Geoms { if i == int(highResIndex) { tileMap[TileSpec{0, XY}] = highResIndex tileMap[TileSpec{0, XZ}] = highResIndex tileMap[TileSpec{0, YZ}] = highResIndex } else { scaleX := geom.PixelSize[0] / minVoxelSize[0] scaleY := geom.PixelSize[1] / minVoxelSize[1] scaleZ := geom.PixelSize[2] / minVoxelSize[2] var plane TileOrientation switch { case scaleX > scaleZ && scaleY > scaleZ: plane = XY case scaleX > scaleY && scaleZ > scaleY: plane = XZ case scaleY > scaleX && scaleZ > scaleX: plane = YZ default: dvid.Infof("Odd geometry skipped for Google voxels %q with pixel size: %s\n", name, geom.PixelSize) dvid.Infof(" Scaling from highest resolution: %d x %d x %d\n", scaleX, scaleY, scaleZ) continue } var mag float32 if scaleX > mag { mag = scaleX } if scaleY > mag { mag = scaleY } if scaleZ > mag { mag = scaleZ } scaling := log2(mag) tileMap[TileSpec{scaling, plane}] = GeometryIndex(i) dvid.Infof("Plane %s at scaling %d set to geometry %d: resolution %s\n", plane, scaling, i, geom.PixelSize) } } // Initialize the googlevoxels data basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) if err != nil { return nil, err } data := &Data{ Data: basedata, Properties: Properties{ VolumeID: volumeid, AuthKey: authkey, TileSize: DefaultTileSize, TileMap: tileMap, Scales: m.Geoms, HighResIndex: highResIndex, }, } return data, nil }
// PushRepo pushes a Repo to a remote DVID server at the target address. func PushRepo(uuid dvid.UUID, target string, config dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } if target == "" { target = rpc.DefaultAddress dvid.Infof("No target specified for push, defaulting to %q\n", rpc.DefaultAddress) } // Get the full local repo thisRepo, err := manager.repoFromUUID(uuid) if err != nil { return err } // Get any filter filter, found, err := config.GetString("filter") if err != nil { return err } // Create a repo that is tailored by the push configuration, e.g., // keeping just given data instances, etc. v, found := manager.uuidToVersion[uuid] if !found { return ErrInvalidUUID } txRepo, transmit, err := thisRepo.customize(v, config) if err != nil { return err } // Establish session with target, which may be itself s, err := rpc.NewSession(target, pushMessageID) if err != nil { return fmt.Errorf("Unable to connect (%s) for push: %s", target, err.Error()) } defer s.Close() // TODO -- check if can hang if error occurs during job // Send the repo metadata, transmit type, and other useful data for remote server. dvid.Infof("Sending repo %s data to %q\n", uuid, target) repoSerialization, err := txRepo.GobEncode() if err != nil { return err } repoMsg := repoTxMsg{ Session: s.ID(), Transmit: transmit, UUID: uuid, Repo: repoSerialization, } resp, err := s.Call()(sendRepoMsg, repoMsg) if err != nil { return err } if resp == nil { return fmt.Errorf("push unnecessary -- versions at remote are already present") } // We should get back a version set to send, or nil = send all versions. versions, ok := resp.(map[dvid.VersionID]struct{}) if !ok { return fmt.Errorf("received response during repo push that wasn't expected set of delta versions") } dvid.Debugf("Remote sent list of %d versions to send\n", len(versions)) // For each data instance, send the data with optional datatype-specific filtering. ps := &PushSession{storage.FilterSpec(filter), versions, s, transmit} for _, d := range txRepo.data { dvid.Infof("Sending instance %q data to %q\n", d.DataName(), target) if err := d.PushData(ps); err != nil { dvid.Errorf("Aborting send of instance %q data\n", d.DataName()) return err } } return nil }