// DataBySpec returns a ROI Data based on a string specification of the form // "<roiname>,<uuid>". If the given string is not parsable, the "found" return value is false. func DataBySpec(spec string) (d *Data, v dvid.VersionID, found bool, err error) { roispec := strings.Split(spec, ",") if len(roispec) != 2 { err = fmt.Errorf("Expect ROI filters to have format %q, but got %q", "roi:<roiname>,<uuid>", spec) return } roiName := dvid.InstanceName(roispec[0]) _, v, err = datastore.MatchingUUID(roispec[1]) if err != nil { return } var data datastore.DataService data, err = datastore.GetDataByVersionName(v, roiName) if err != nil { return } var ok bool d, ok = data.(*Data) if !ok { err = fmt.Errorf("Data instance %q is not ROI instance", roiName) return } found = true return }
// repoSelector retrieves the particular repo from a potentially partial string that uniquely // identifies the repo. func repoSelector(c *web.C, h http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { action := strings.ToLower(r.Method) if readonly && action != "get" && action != "head" { BadRequest(w, r, "Server in read-only mode and will only accept GET and HEAD requests") return } var err error var uuid dvid.UUID if uuid, c.Env["versionID"], err = datastore.MatchingUUID(c.URLParams["uuid"]); err != nil { BadRequest(w, r, err) return } c.Env["uuid"] = uuid // Make sure locked nodes can't use anything besides GET and HEAD locked, err := datastore.LockedUUID(uuid) if err != nil { BadRequest(w, r, err) return } branchRequest := (c.URLParams["action"] == "branch") mergeRequest := (c.URLParams["action"] == "merge") if locked && !branchRequest && !mergeRequest && action != "get" && action != "head" { BadRequest(w, r, "Cannot do %s on locked node %s", action, uuid) return } h.ServeHTTP(w, r) } return http.HandlerFunc(fn) }
// put handles a PUT command-line request. func (d *Data) put(cmd datastore.Request, reply *datastore.Response) error { if len(cmd.Command) < 5 { return fmt.Errorf("The key name must be specified after 'put'") } if len(cmd.Input) == 0 { return fmt.Errorf("No data was passed into standard input") } var uuidStr, dataName, cmdStr, keyStr string cmd.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &keyStr) _, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Store data if !d.Versioned() { // Map everything to root version. versionID, err = datastore.GetRepoRootVersion(versionID) if err != nil { return err } } ctx := datastore.NewVersionedCtx(d, versionID) if err = d.PutData(ctx, keyStr, cmd.Input); err != nil { return fmt.Errorf("Error on put to key %q for keyvalue %q: %v\n", keyStr, d.DataName(), err) } reply.Output = []byte(fmt.Sprintf("Put %d bytes into key %q for keyvalue %q, uuid %s\n", len(cmd.Input), keyStr, d.DataName(), uuidStr)) return nil }
// DoRPC acts as a switchboard for RPC commands. func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error { switch req.TypeCommand() { case "load": if len(req.Command) < 5 { return fmt.Errorf("Poorly formatted load command. See command-line help.") } // Parse the request var uuidStr, dataName, cmdStr, offsetStr string filenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr) if err != nil { return err } if len(filenames) == 0 { return fmt.Errorf("Need to include at least one file to add: %s", req) } // Get offset offset, err := dvid.StringToPoint(offsetStr, ",") if err != nil { return fmt.Errorf("Illegal offset specification: %s: %v", offsetStr, err) } // Get list of files to add var addedFiles string if len(filenames) == 1 { addedFiles = filenames[0] } else { addedFiles = fmt.Sprintf("filenames: %s [%d more]", filenames[0], len(filenames)-1) } dvid.Debugf(addedFiles + "\n") uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil { return err } if err = d.LoadImages(versionID, offset, filenames); err != nil { return err } if err := datastore.SaveDataByUUID(uuid, d); err != nil { return err } return nil case "composite": if len(req.Command) < 6 { return fmt.Errorf("Poorly formatted composite command. See command-line help.") } return d.CreateComposite(req, reply) default: return fmt.Errorf("Unknown command. Data type '%s' [%s] does not support '%s' command.", d.DataName(), d.TypeName(), req.TypeCommand()) } return nil }
func repoMergeHandler(c web.C, w http.ResponseWriter, r *http.Request) { if r.Body == nil { BadRequest(w, r, "merge requires JSON to be POSTed per API documentation") return } data, err := ioutil.ReadAll(r.Body) if err != nil { BadRequest(w, r, err) return } jsonData := struct { MergeType string `json:"mergeType"` Note string `json:"note"` Parents []string `json:"parents"` }{} if err := json.Unmarshal(data, &jsonData); err != nil { BadRequest(w, r, fmt.Sprintf("Malformed JSON request in body: %v", err)) return } if len(jsonData.Parents) < 2 { BadRequest(w, r, "Must specify at least two parent UUIDs using 'parents' field") return } // Convert JSON of parents into []UUID parents := make([]dvid.UUID, len(jsonData.Parents)) for i, uuidFrag := range jsonData.Parents { uuid, _, err := datastore.MatchingUUID(uuidFrag) if err != nil { BadRequest(w, r, fmt.Sprintf("can't match parent %q: %v", uuidFrag, err)) return } parents[i] = uuid } // Convert merge type designation var mt datastore.MergeType switch jsonData.MergeType { case "conflict-free": mt = datastore.MergeConflictFree default: BadRequest(w, r, fmt.Sprintf("'mergeType' must be 'conflict-free' at this time")) return } // Do the merge newuuid, err := datastore.Merge(parents, jsonData.Note, mt) if err != nil { BadRequest(w, r, err) } else { w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, "{%q: %q}", "child", newuuid) } }
// repoRawSelector retrieves the particular repo from a potentially partial string that uniquely // identifies the repo without any access restrictions. func repoRawSelector(c *web.C, h http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { var err error var uuid dvid.UUID if uuid, c.Env["versionID"], err = datastore.MatchingUUID(c.URLParams["uuid"]); err != nil { BadRequest(w, r, err) return } c.Env["uuid"] = uuid h.ServeHTTP(w, r) } return http.HandlerFunc(fn) }
// ParseFilterSpec returns the specified ROI instance name and version within a FilterSpec. // Currently, only one ROI can be specified in a FilterSpec. Multiple ROIs should use a // different FilterSpec like "intersect" instead of "roi". func ParseFilterSpec(spec storage.FilterSpec) (name dvid.InstanceName, v dvid.VersionID, found bool, err error) { var filterval string filterval, found = spec.GetFilterSpec("roi") if !found { return } roispec := strings.Split(filterval, ",") if len(roispec) != 2 { err = fmt.Errorf("bad ROI spec: %s", filterval) return } name = dvid.InstanceName(roispec[0]) _, v, err = datastore.MatchingUUID(roispec[1]) return }
// ForegroundROI creates a new ROI by determining all non-background blocks. func (d *Data) ForegroundROI(req datastore.Request, reply *datastore.Response) error { if d.Values.BytesPerElement() != 1 { return fmt.Errorf("Foreground ROI command only implemented for 1 byte/voxel data!") } // Parse the request var uuidStr, dataName, cmdStr, destName, backgroundStr string req.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &destName, &backgroundStr) // Get the version and repo uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil { return err } // Use existing destination data or a new ROI data. var dest *roi.Data dest, err = roi.GetByUUID(uuid, dvid.InstanceName(destName)) if err != nil { config := dvid.NewConfig() typeservice, err := datastore.TypeServiceByName("roi") if err != nil { return err } dataservice, err := datastore.NewData(uuid, typeservice, dvid.InstanceName(destName), config) if err != nil { return err } var ok bool dest, ok = dataservice.(*roi.Data) if !ok { return fmt.Errorf("Could not create ROI data instance") } } // Asynchronously process the voxels. background, err := dvid.StringToPointNd(backgroundStr, ",") if err != nil { return err } go d.foregroundROI(versionID, dest, background) return nil }
// repoSelector retrieves the particular repo from a potentially partial string that uniquely // identifies the repo and enforces read-only mode. func repoSelector(c *web.C, h http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { if httpUnavailable(w) { return } action := strings.ToLower(r.Method) if readonly && action != "get" && action != "head" { BadRequest(w, r, "Server in read-only mode and will only accept GET and HEAD requests") return } var err error var uuid dvid.UUID if uuid, c.Env["versionID"], err = datastore.MatchingUUID(c.URLParams["uuid"]); err != nil { BadRequest(w, r, err) return } c.Env["uuid"] = uuid h.ServeHTTP(w, r) } return http.HandlerFunc(fn) }
// LoadLocal adds image data to a version node. See HelpMessage for example of // command-line use of "load local". func (d *Data) LoadLocal(request datastore.Request, reply *datastore.Response) error { timedLog := dvid.NewTimeLog() // Parse the request var uuidStr, dataName, cmdStr, sourceStr, filename string _ = request.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &sourceStr, &filename) // Get the uuid from a uniquely identifiable string uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return fmt.Errorf("Could not find node with UUID %s: %v", uuidStr, err) } // Load the V3D Raw file. ext := filepath.Ext(filename) switch ext { case ".raw", ".v3draw": default: return fmt.Errorf("Unknown extension '%s' when expected V3D Raw file", ext) } file, err := os.Open(filename) if err != nil { return err } unmarshaler := V3DRawMarshaler{} channels, err := unmarshaler.UnmarshalV3DRaw(file) if err != nil { return err } // Store the metadata d.NumChannels = len(channels) d.Properties.Values = make(dvid.DataValues, d.NumChannels) if d.NumChannels > 0 { reply.Text = fmt.Sprintf("Loaded %s into data '%s': found %d channels\n", d.DataName(), filename, d.NumChannels) reply.Text += fmt.Sprintf(" %s", channels[0]) } else { reply.Text = fmt.Sprintf("Found no channels in file %s\n", filename) return nil } for i, channel := range channels { d.Properties.Values[i] = channel.Voxels.Values()[0] } // Get repo and save it. if err := datastore.SaveDataByUUID(uuid, d); err != nil { return err } // PUT each channel of the file into the datastore using a separate data name. for _, channel := range channels { dvid.Infof("Processing channel %d... \n", channel.channelNum) err = d.PutVoxels(versionID, channel.Voxels, nil) if err != nil { return err } } // Create a RGB composite from the first 3 channels. This is considered to be channel 0 // or can be accessed with the base data name. dvid.Infof("Creating composite image from channels...\n") err = d.storeComposite(versionID, channels) if err != nil { return err } timedLog.Infof("RPC load local '%s' completed", filename) return nil }
func repoResolveHandler(c web.C, w http.ResponseWriter, r *http.Request) { uuid, _, err := datastore.MatchingUUID(c.URLParams["uuid"]) if err != nil { BadRequest(w, r, err) return } if r.Body == nil { BadRequest(w, r, "merge resolving requires JSON to be POSTed per API documentation") return } data, err := ioutil.ReadAll(r.Body) if err != nil { BadRequest(w, r, err) return } jsonData := struct { Data []dvid.InstanceName `json:"data"` Note string `json:"note"` Parents []string `json:"parents"` }{} if err := json.Unmarshal(data, &jsonData); err != nil { BadRequest(w, r, fmt.Sprintf("Malformed JSON request in body: %v", err)) return } if len(jsonData.Data) == 0 { BadRequest(w, r, "Must specify at least one data instance using 'data' field") return } if len(jsonData.Parents) < 2 { BadRequest(w, r, "Must specify at least two parent UUIDs using 'parents' field") return } // Convert JSON of parents into []UUID and whether we need a child for them to add deletions. numParents := len(jsonData.Parents) oldParents := make([]dvid.UUID, numParents) newParents := make([]dvid.UUID, numParents, numParents) // UUID of any parent extension for deletions. for i, uuidFrag := range jsonData.Parents { uuid, _, err := datastore.MatchingUUID(uuidFrag) if err != nil { BadRequest(w, r, fmt.Sprintf("can't match parent %q: %v", uuidFrag, err)) return } oldParents[i] = uuid newParents[i] = dvid.NilUUID } // Iterate through all k/v for given data instances, making sure we find any conflicts. // If any are found, remove them with first UUIDs taking priority. for _, name := range jsonData.Data { data, err := datastore.GetDataByUUIDName(uuid, name) if err != nil { BadRequest(w, r, err) return } if err := datastore.DeleteConflicts(uuid, data, oldParents, newParents); err != nil { BadRequest(w, r, fmt.Errorf("Conflict deletion error for data %q: %v", data.DataName(), err)) return } } // If we have any new nodes to accomodate deletions, commit them. for i, oldUUID := range oldParents { if newParents[i] != oldUUID { err := datastore.Commit(newParents[i], "Version for deleting conflicts before merge", nil) if err != nil { BadRequest(w, r, "Error while creating new nodes to handle required deletions: %v", err) return } } } // Do the merge mt := datastore.MergeConflictFree newuuid, err := datastore.Merge(newParents, jsonData.Note, mt) if err != nil { BadRequest(w, r, err) } else { w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, "{%q: %q}", "child", newuuid) } }
func (d *Data) ConstructTiles(uuidStr string, tileSpec TileSpec, request datastore.Request) error { config := request.Settings() uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil { return err } source, err := datastore.GetDataByUUID(uuid, d.Source) if err != nil { return err } src, ok := source.(*imageblk.Data) if !ok { return fmt.Errorf("Cannot construct imagetile for non-voxels data: %s", d.Source) } // Save the current tile specification d.Levels = tileSpec if err := datastore.SaveDataByUUID(uuid, d); err != nil { return err } // Get size of tile at lowest resolution. lastLevel := Scaling(len(tileSpec) - 1) loresSpec, found := tileSpec[lastLevel] if !found { return fmt.Errorf("Illegal tile spec. Should have levels 0 to absent %d.", lastLevel) } var loresSize [3]float64 for i := 0; i < 3; i++ { loresSize[i] = float64(loresSpec.Resolution[i]) * float64(DefaultTileSize[i]) } loresMag := dvid.Point3d{1, 1, 1} for i := Scaling(0); i < lastLevel; i++ { levelMag := tileSpec[i].levelMag loresMag[0] *= levelMag[0] loresMag[1] *= levelMag[1] loresMag[2] *= levelMag[2] } // Get min and max points in terms of distance. var minPtDist, maxPtDist [3]float64 for i := uint8(0); i < 3; i++ { minPtDist[i] = float64(src.MinPoint.Value(i)) * float64(src.VoxelSize[i]) maxPtDist[i] = float64(src.MaxPoint.Value(i)) * float64(src.VoxelSize[i]) } // Adjust min and max points for the tileable surface at lowest resolution. var minTiledPt, maxTiledPt dvid.Point3d for i := 0; i < 3; i++ { minInt, _ := math.Modf(minPtDist[i] / loresSize[i]) maxInt, _ := math.Modf(maxPtDist[i] / loresSize[i]) minTileCoord := int32(minInt) maxTileCoord := int32(maxInt) minTiledPt[i] = minTileCoord * DefaultTileSize[i] * loresMag[i] maxTiledPt[i] = (maxTileCoord+1)*DefaultTileSize[i]*loresMag[i] - 1 } sizeVolume := maxTiledPt.Sub(minTiledPt).AddScalar(1) // Setup swappable ExtData buffers (the stitched slices) so we can be generating tiles // at same time we are reading and stitching them. var bufferLock [2]sync.Mutex var sliceBuffers [2]*imageblk.Voxels var bufferNum int // Get the planes we should tile. planes, err := config.GetShapes("planes", ";") if planes == nil { // If no planes are specified, construct imagetile for 3 orthogonal planes. planes = []dvid.DataShape{dvid.XY, dvid.XZ, dvid.YZ} } outF, err := d.putTileFunc(versionID) // sort the tile spec keys to iterate from highest to lowest resolution var sortedKeys []int for scaling, _ := range tileSpec { sortedKeys = append(sortedKeys, int(scaling)) } sort.Ints(sortedKeys) for _, plane := range planes { timedLog := dvid.NewTimeLog() offset := minTiledPt.Duplicate() switch { case plane.Equals(dvid.XY): width, height, err := plane.GetSize2D(sizeVolume) if err != nil { return err } dvid.Debugf("Tiling XY image %d x %d pixels\n", width, height) for z := src.MinPoint.Value(2); z <= src.MaxPoint.Value(2); z++ { server.BlockOnInteractiveRequests("imagetile.ConstructTiles [xy]") sliceLog := dvid.NewTimeLog() offset = offset.Modify(map[uint8]int32{2: z}) slice, err := dvid.NewOrthogSlice(dvid.XY, offset, dvid.Point2d{width, height}) if err != nil { return err } bufferLock[bufferNum].Lock() sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil) if err != nil { return err } if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], nil); err != nil { return err } // Iterate through the different scales, extracting tiles at each resolution. go func(bufferNum int, offset dvid.Point) { defer bufferLock[bufferNum].Unlock() timedLog := dvid.NewTimeLog() for _, key := range sortedKeys { scaling := Scaling(key) levelSpec := tileSpec[scaling] if err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } if int(scaling) < len(tileSpec)-1 { if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } } } timedLog.Debugf("Tiled XY Tile using buffer %d", bufferNum) }(bufferNum, offset) sliceLog.Infof("Read XY Tile @ Z = %d, now tiling...", z) bufferNum = (bufferNum + 1) % 2 } timedLog.Infof("Total time to generate XY Tiles") case plane.Equals(dvid.XZ): width, height, err := plane.GetSize2D(sizeVolume) if err != nil { return err } dvid.Debugf("Tiling XZ image %d x %d pixels\n", width, height) for y := src.MinPoint.Value(1); y <= src.MaxPoint.Value(1); y++ { server.BlockOnInteractiveRequests("imagetile.ConstructTiles [xz]") sliceLog := dvid.NewTimeLog() offset = offset.Modify(map[uint8]int32{1: y}) slice, err := dvid.NewOrthogSlice(dvid.XZ, offset, dvid.Point2d{width, height}) if err != nil { return err } bufferLock[bufferNum].Lock() sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil) if err != nil { return err } if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], nil); err != nil { return err } // Iterate through the different scales, extracting tiles at each resolution. go func(bufferNum int, offset dvid.Point) { defer bufferLock[bufferNum].Unlock() timedLog := dvid.NewTimeLog() for _, key := range sortedKeys { scaling := Scaling(key) levelSpec := tileSpec[scaling] if err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } if int(scaling) < len(tileSpec)-1 { if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } } } timedLog.Debugf("Tiled XZ Tile using buffer %d", bufferNum) }(bufferNum, offset) sliceLog.Infof("Read XZ Tile @ Y = %d, now tiling...", y) bufferNum = (bufferNum + 1) % 2 } timedLog.Infof("Total time to generate XZ Tiles") case plane.Equals(dvid.YZ): width, height, err := plane.GetSize2D(sizeVolume) if err != nil { return err } dvid.Debugf("Tiling YZ image %d x %d pixels\n", width, height) for x := src.MinPoint.Value(0); x <= src.MaxPoint.Value(0); x++ { server.BlockOnInteractiveRequests("imagetile.ConstructTiles [yz]") sliceLog := dvid.NewTimeLog() offset = offset.Modify(map[uint8]int32{0: x}) slice, err := dvid.NewOrthogSlice(dvid.YZ, offset, dvid.Point2d{width, height}) if err != nil { return err } bufferLock[bufferNum].Lock() sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil) if err != nil { return err } if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], nil); err != nil { return err } // Iterate through the different scales, extracting tiles at each resolution. go func(bufferNum int, offset dvid.Point) { defer bufferLock[bufferNum].Unlock() timedLog := dvid.NewTimeLog() for _, key := range sortedKeys { scaling := Scaling(key) levelSpec := tileSpec[scaling] outF, err := d.putTileFunc(versionID) if err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } if int(scaling) < len(tileSpec)-1 { if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil { dvid.Errorf("Error in tiling: %v\n", err) return } } } timedLog.Debugf("Tiled YZ Tile using buffer %d", bufferNum) }(bufferNum, offset) sliceLog.Debugf("Read YZ Tile @ X = %d, now tiling...", x) bufferNum = (bufferNum + 1) % 2 } timedLog.Infof("Total time to generate YZ Tiles") default: dvid.Infof("Skipping request to tile '%s'. Unsupported.", plane) } } return nil }
// Returns the default tile spec that will fully cover the source extents and scaling 0 // uses the original voxel resolutions with each subsequent scale causing a 2x zoom out. func (d *Data) DefaultTileSpec(uuidStr string) (TileSpec, error) { uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return nil, err } source, err := datastore.GetDataByUUID(uuid, d.Source) if err != nil { return nil, err } var ok bool var src *imageblk.Data src, ok = source.(*imageblk.Data) if !ok { return nil, fmt.Errorf("Cannot construct tile spec for non-voxels data: %s", d.Source) } // Set scaling 0 based on extents and resolution of source. //extents := src.Extents() resolution := src.Properties.Resolution.VoxelSize if len(resolution) != 3 { return nil, fmt.Errorf("Cannot construct tile spec for non-3d data: voxel is %d-d", len(resolution)) } // Expand min and max points to coincide with full tile boundaries of highest resolution. minTileCoord := src.MinPoint.(dvid.Chunkable).Chunk(DefaultTileSize) maxTileCoord := src.MaxPoint.(dvid.Chunkable).Chunk(DefaultTileSize) minTiledPt := minTileCoord.MinPoint(DefaultTileSize) maxTiledPt := maxTileCoord.MaxPoint(DefaultTileSize) sizeVolume := maxTiledPt.Sub(minTiledPt).AddScalar(1) dvid.Infof("Creating default multiscale tile spec for volume of size %s\n", sizeVolume) // For each dimension, calculate the number of scaling levels necessary to cover extent, // assuming we use the raw resolution at scaling 0. numScales := make([]int, 3) var maxScales int var dim uint8 for dim = 0; dim < 3; dim++ { numPixels := float64(sizeVolume.Value(dim)) tileSize := float64(DefaultTileSize.Value(dim)) if numPixels <= tileSize { numScales[dim] = 1 } else { numScales[dim] = int(math.Ceil(math.Log2(numPixels/tileSize))) + 1 } if numScales[dim] > maxScales { maxScales = numScales[dim] } } // Initialize the tile level specification specs := make(TileSpec, maxScales) curRes := resolution levelMag := dvid.Point3d{2, 2, 2} var scaling Scaling for scaling = 0; scaling < Scaling(maxScales); scaling++ { for dim = 0; dim < 3; dim++ { if scaling >= Scaling(numScales[dim]) { levelMag[dim] = 1 } } specs[scaling] = TileScaleSpec{ LevelSpec{curRes, DefaultTileSize}, levelMag, } curRes = curRes.MultScalar(2.0) } return specs, nil }
// switchboard for remote command execution func handleCommand(cmd *datastore.Request) (reply *datastore.Response, err error) { if cmd.Name() == "" { err = fmt.Errorf("Server error: got empty command!") return } reply = new(datastore.Response) switch cmd.Name() { case "help": reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress()) case "shutdown": dvid.Infof("DVID server halting due to 'shutdown' command.") reply.Text = fmt.Sprintf("DVID server at %s is being shutdown...\n", config.RPCAddress()) // launch goroutine shutdown so we can concurrently return shutdown message to client. go Shutdown() case "types": if len(cmd.Command) == 1 { text := "\nData Types within this DVID Server\n" text += "----------------------------------\n" var mapTypes map[dvid.URLString]datastore.TypeService if mapTypes, err = datastore.Types(); err != nil { err = fmt.Errorf("Error trying to retrieve data types within this DVID server!") return } for url, typeservice := range mapTypes { text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url) } reply.Text = text } else { if len(cmd.Command) != 3 || cmd.Command[2] != "help" { err = fmt.Errorf("Unknown types command: %q", cmd.Command) return } var typename string var typeservice datastore.TypeService cmd.CommandArgs(1, &typename) if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil { return } reply.Text = typeservice.Help() } case "repos": var subcommand string cmd.CommandArgs(1, &subcommand) switch subcommand { case "new": var alias, description string cmd.CommandArgs(2, &alias, &description) config := cmd.Settings() var uuidStr, passcode string var found bool if uuidStr, found, err = config.GetString("uuid"); err != nil { return } var assign *dvid.UUID if !found { assign = nil } else { uuid := dvid.UUID(uuidStr) assign = &uuid } if passcode, found, err = config.GetString("passcode"); err != nil { return } var root dvid.UUID root, err = datastore.NewRepo(alias, description, assign, passcode) if err != nil { return } if err = datastore.SetRepoAlias(root, alias); err != nil { return } if err = datastore.SetRepoDescription(root, description); err != nil { return } reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root) case "delete": var uuidStr, passcode string cmd.CommandArgs(2, &uuidStr, &passcode) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } if err = datastore.DeleteRepo(uuid, passcode); err != nil { return } reply.Text = fmt.Sprintf("Started deletion of repo %s.\n", uuid) default: err = fmt.Errorf("Unknown repos command: %q", subcommand) return } case "repo": var uuidStr, subcommand string cmd.CommandArgs(1, &uuidStr, &subcommand) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } switch subcommand { case "new": var typename, dataname string cmd.CommandArgs(3, &typename, &dataname) // Get TypeService var typeservice datastore.TypeService if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil { return } // Create new data config := cmd.Settings() if _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config); err != nil { return } reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "rename": var name1, name2, passcode string cmd.CommandArgs(3, &name1, &name2, &passcode) oldname := dvid.InstanceName(name1) newname := dvid.InstanceName(name2) // Make sure this instance exists. if _, err = datastore.GetDataByUUIDName(uuid, oldname); err != nil { err = fmt.Errorf("Error trying to rename %q for UUID %s: %v", oldname, uuid, err) return } // Do the rename. if err = datastore.RenameData(uuid, oldname, newname, passcode); err != nil { err = fmt.Errorf("Error renaming data instance %q to %q: %v", oldname, newname, err) return } reply.Text = fmt.Sprintf("Renamed data instance %q to %q from DAG subgraph @ root %s\n", oldname, newname, uuid) case "branch": cmd.CommandArgs(3, &uuidStr) var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } var child dvid.UUID if child, err = datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign); err != nil { return } reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "merge": uuids := cmd.CommandArgs(2) parents := make([]dvid.UUID, len(uuids)+1) parents[0] = dvid.UUID(uuid) i := 1 for uuid := range uuids { parents[i] = dvid.UUID(uuid) i++ } var child dvid.UUID child, err = datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree) if err != nil { return } reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "migrate": var source, oldStoreName string cmd.CommandArgs(3, &source, &oldStoreName) var store dvid.Store store, err = storage.GetStoreByAlias(storage.Alias(oldStoreName)) if err != nil { return } config := cmd.Settings() go func() { if err = datastore.MigrateInstance(uuid, dvid.InstanceName(source), store, config); err != nil { dvid.Errorf("migrate error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started migration of uuid %s data instance %q from old store %q...\n", uuid, source, oldStoreName) case "copy": var source, target string cmd.CommandArgs(3, &source, &target) config := cmd.Settings() go func() { if err = datastore.CopyInstance(uuid, dvid.InstanceName(source), dvid.InstanceName(target), config); err != nil { dvid.Errorf("copy error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started copy of uuid %s data instance %q to %q...\n", uuid, source, target) case "push": var target string cmd.CommandArgs(3, &target) config := cmd.Settings() go func() { if err = datastore.PushRepo(uuid, target, config); err != nil { dvid.Errorf("push error: %v\n", err) } }() reply.Text = fmt.Sprintf("Started push of repo %s to %q...\n", uuid, target) /* case "pull": var target string cmd.CommandArgs(3, &target) config := cmd.Settings() if err = datastore.Pull(uuid, target, config); err != nil { return } reply.Text = fmt.Sprintf("Repo %s pulled from %q\n", uuid, target) */ case "delete": var dataname, passcode string cmd.CommandArgs(3, &dataname, &passcode) // Make sure this instance exists. if _, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(dataname)); err != nil { err = fmt.Errorf("Error trying to delete %q for UUID %s: %v", dataname, uuid, err) return } // Do the deletion. Under hood, modifies metadata immediately and launches async k/v deletion. if err = datastore.DeleteDataByName(uuid, dvid.InstanceName(dataname), passcode); err != nil { err = fmt.Errorf("Error deleting data instance %q: %v", dataname, err) return } reply.Text = fmt.Sprintf("Started deletion of data instance %q from repo with root %s\n", dataname, uuid) default: err = fmt.Errorf("Unknown command: %q", cmd) return } case "node": var uuidStr, descriptor string cmd.CommandArgs(1, &uuidStr, &descriptor) var uuid dvid.UUID if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil { return } // Get the DataService dataname := dvid.InstanceName(descriptor) var subcommand string cmd.CommandArgs(3, &subcommand) var dataservice datastore.DataService if dataservice, err = datastore.GetDataByUUIDName(uuid, dataname); err != nil { return } if subcommand == "help" { reply.Text = dataservice.Help() return } err = dataservice.DoRPC(*cmd, reply) return default: // Check to see if it's a name of a compiled data type, in which case we refer it to the data type. types := datastore.CompiledTypes() for name, typeservice := range types { if name == dvid.TypeString(cmd.Argument(0)) { err = typeservice.Do(*cmd, reply) return } } err = fmt.Errorf("Unknown command: '%s'", *cmd) } return }
// PutLocal adds image data to a version node, altering underlying blocks if the image // intersects the block. // // The image filename glob MUST BE absolute file paths that are visible to the server. // This function is meant for mass ingestion of large data files, and it is inappropriate // to read gigabytes of data just to send it over the network to a local DVID. func (d *Data) PutLocal(request datastore.Request, reply *datastore.Response) error { timedLog := dvid.NewTimeLog() // Parse the request var uuidStr, dataName, cmdStr, sourceStr, planeStr, offsetStr string filenames := request.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &sourceStr, &planeStr, &offsetStr) if len(filenames) == 0 { return fmt.Errorf("Need to include at least one file to add: %s", request) } // Get offset offset, err := dvid.StringToPoint(offsetStr, ",") if err != nil { return fmt.Errorf("Illegal offset specification: %s: %v", offsetStr, err) } // Get list of files to add var addedFiles string if len(filenames) == 1 { addedFiles = filenames[0] } else { addedFiles = fmt.Sprintf("filenames: %s [%d more]", filenames[0], len(filenames)-1) } dvid.Debugf(addedFiles + "\n") // Get plane plane, err := dvid.DataShapeString(planeStr).DataShape() if err != nil { return err } // Get Repo and IDs uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Load and PUT each image. numSuccessful := 0 for _, filename := range filenames { sliceLog := dvid.NewTimeLog() img, _, err := dvid.GoImageFromFile(filename) if err != nil { return fmt.Errorf("Error after %d images successfully added: %v", numSuccessful, err) } slice, err := dvid.NewOrthogSlice(plane, offset, dvid.RectSize(img.Bounds())) if err != nil { return fmt.Errorf("Unable to determine slice: %v", err) } vox, err := d.NewVoxels(slice, img) if err != nil { return err } storage.FileBytesRead <- len(vox.Data()) if err = d.PutVoxels(versionID, vox, nil); err != nil { return err } sliceLog.Debugf("%s put local %s", d.DataName(), slice) numSuccessful++ offset = offset.Add(dvid.Point3d{0, 0, 1}) } if err := datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil { return err } timedLog.Infof("RPC put local (%s) completed", addedFiles) return nil }
// DoRPC acts as a switchboard for RPC commands. func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error { switch req.TypeCommand() { case "load": if len(req.Command) < 5 { return fmt.Errorf("Poorly formatted load command. See command-line help.") } // Parse the request var uuidStr, dataName, cmdStr, offsetStr string filenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr) if err != nil { return err } if len(filenames) == 0 { hostname, _ := os.Hostname() return fmt.Errorf("Couldn't find any files to add. Are they visible to DVID server on %s?", hostname) } // Get offset offset, err := dvid.StringToPoint(offsetStr, ",") if err != nil { return fmt.Errorf("Illegal offset specification: %s: %v", offsetStr, err) } // Get list of files to add var addedFiles string if len(filenames) == 1 { addedFiles = filenames[0] } else { addedFiles = fmt.Sprintf("filenames: %s [%d more]", filenames[0], len(filenames)-1) } dvid.Debugf(addedFiles + "\n") uuid, versionID, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil { return err } return d.LoadImages(versionID, offset, filenames) case "put": if len(req.Command) < 7 { return fmt.Errorf("Poorly formatted put command. See command-line help.") } source := req.Command[4] switch source { case "local": return d.PutLocal(req, reply) case "remote": return fmt.Errorf("put remote not yet implemented") default: return fmt.Errorf("Unknown command. Data instance '%s' [%s] does not support '%s' command.", d.DataName(), d.TypeName(), req.TypeCommand()) } case "roi": if len(req.Command) < 6 { return fmt.Errorf("Poorly formatted roi command. See command-line help.") } return d.ForegroundROI(req, reply) default: return fmt.Errorf("Unknown command. Data instance '%s' [%s] does not support '%s' command.", d.DataName(), d.TypeName(), req.TypeCommand()) } return nil }
// Do acts as a switchboard for remote command execution func (c *RPCConnection) Do(cmd datastore.Request, reply *datastore.Response) error { if reply == nil { dvid.Debugf("reply is nil coming in!\n") return nil } if cmd.Name() == "" { return fmt.Errorf("Server error: got empty command!") } switch cmd.Name() { case "help": reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress()) case "shutdown": Shutdown() // Make this process shutdown in a second to allow time for RPC to finish. // TODO -- Better way to do this? log.Printf("DVID server halted due to 'shutdown' command.") reply.Text = fmt.Sprintf("DVID server at %s has been halted.\n", config.RPCAddress()) go func() { time.Sleep(1 * time.Second) os.Exit(0) }() case "types": if len(cmd.Command) == 1 { text := "\nData Types within this DVID Server\n" text += "----------------------------------\n" mapTypes, err := datastore.Types() if err != nil { return fmt.Errorf("Error trying to retrieve data types within this DVID server!") } for url, typeservice := range mapTypes { text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url) } reply.Text = text } else { if len(cmd.Command) != 3 || cmd.Command[2] != "help" { return fmt.Errorf("Unknown types command: %q", cmd.Command) } var typename string cmd.CommandArgs(1, &typename) typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { return err } reply.Text = typeservice.Help() } case "repos": var subcommand, alias, description, uuidStr string cmd.CommandArgs(1, &subcommand, &alias, &description, &uuidStr) switch subcommand { case "new": var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } root, err := datastore.NewRepo(alias, description, assign) if err != nil { return err } if err := datastore.SetRepoAlias(root, alias); err != nil { return err } if err := datastore.SetRepoDescription(root, description); err != nil { return err } reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root) default: return fmt.Errorf("Unknown repos command: %q", subcommand) } case "repo": var uuidStr, subcommand string cmd.CommandArgs(1, &uuidStr, &subcommand) uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } switch subcommand { case "new": var typename, dataname string cmd.CommandArgs(3, &typename, &dataname) // Get TypeService typeservice, err := datastore.TypeServiceByName(dvid.TypeString(typename)) if err != nil { return err } // Create new data config := cmd.Settings() _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config) if err != nil { return err } reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "branch": cmd.CommandArgs(3, &uuidStr) var assign *dvid.UUID if uuidStr == "" { assign = nil } else { u := dvid.UUID(uuidStr) assign = &u } child, err := datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign) if err != nil { return err } reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "merge": uuids := cmd.CommandArgs(2) parents := make([]dvid.UUID, len(uuids)+1) parents[0] = dvid.UUID(uuid) i := 1 for uuid := range uuids { parents[i] = dvid.UUID(uuid) i++ } child, err := datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree) if err != nil { return err } reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child) datastore.AddToRepoLog(uuid, []string{cmd.String()}) case "push": /* var target string cmd.CommandArgs(3, &target) config := cmd.Settings() if err = datastore.Push(repo, target, config); err != nil { return err } reply.Text = fmt.Sprintf("Repo %q pushed to %q\n", repo.RootUUID(), target) */ return fmt.Errorf("push command has been temporarily suspended") default: return fmt.Errorf("Unknown command: %q", cmd) } case "node": var uuidStr, descriptor string cmd.CommandArgs(1, &uuidStr, &descriptor) uuid, _, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Get the DataService dataname := dvid.InstanceName(descriptor) var subcommand string cmd.CommandArgs(3, &subcommand) dataservice, err := datastore.GetDataByUUID(uuid, dataname) if err != nil { return err } if subcommand == "help" { reply.Text = dataservice.Help() return nil } return dataservice.DoRPC(cmd, reply) default: return fmt.Errorf("Unknown command: '%s'", cmd) } return nil }
// CreateComposite creates a new rgba8 image by combining hash of labels + the grayscale func (d *Data) CreateComposite(request datastore.Request, reply *datastore.Response) error { timedLog := dvid.NewTimeLog() // Parse the request var uuidStr, dataName, cmdStr, grayscaleName, destName string request.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &grayscaleName, &destName) // Get the version uuid, v, err := datastore.MatchingUUID(uuidStr) if err != nil { return err } // Log request if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil { return err } // Get the grayscale data. dataservice, err := datastore.GetDataByUUIDName(uuid, dvid.InstanceName(grayscaleName)) if err != nil { return err } grayscale, ok := dataservice.(*imageblk.Data) if !ok { return fmt.Errorf("%s is not the name of uint8 data", grayscaleName) } // Create a new rgba8blk data. var compservice datastore.DataService compservice, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(destName)) if err == nil { return fmt.Errorf("Data instance with name %q already exists", destName) } typeService, err := datastore.TypeServiceByName("rgba8blk") if err != nil { return fmt.Errorf("Could not get rgba8 type service from DVID") } config := dvid.NewConfig() compservice, err = datastore.NewData(uuid, typeService, dvid.InstanceName(destName), config) if err != nil { return err } composite, ok := compservice.(*imageblk.Data) if !ok { return fmt.Errorf("Error: %s was unable to be set to rgba8 data", destName) } // Iterate through all labels and grayscale chunks incrementally in Z, a layer at a time. wg := new(sync.WaitGroup) op := &compositeOp{grayscale, composite, v} chunkOp := &storage.ChunkOp{op, wg} store, err := d.GetOrderedKeyValueDB() if err != nil { return err } ctx := datastore.NewVersionedCtx(d, v) extents := d.Extents() blockBeg := imageblk.NewTKey(extents.MinIndex) blockEnd := imageblk.NewTKey(extents.MaxIndex) err = store.ProcessRange(ctx, blockBeg, blockEnd, chunkOp, storage.ChunkFunc(d.CreateCompositeChunk)) wg.Wait() // Set new mapped data to same extents. composite.Properties.Extents = grayscale.Properties.Extents if err := datastore.SaveDataByUUID(uuid, composite); err != nil { dvid.Infof("Could not save new data '%s': %v\n", destName, err) } timedLog.Infof("Created composite of %s and %s", grayscaleName, destName) return nil }