Пример #1
0
func TestReloadMetadata(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	uuid, _ := datastore.NewTestRepo()

	// Add data instances
	var config dvid.Config
	server.CreateTestInstance(t, uuid, "keyvalue", "foo", config)
	server.CreateTestInstance(t, uuid, "labelblk", "labels", config)
	server.CreateTestInstance(t, uuid, "roi", "someroi", config)

	// Reload the metadata
	apiStr := fmt.Sprintf("%sserver/reload-metadata", server.WebAPIPath)
	server.TestHTTP(t, "POST", apiStr, nil)

	// Make sure repo UUID still there
	jsonStr, err := datastore.MarshalJSON()
	if err != nil {
		t.Fatalf("can't get repos JSON: %v\n", err)
	}
	var jsonResp map[string](map[string]interface{})

	if err := json.Unmarshal(jsonStr, &jsonResp); err != nil {
		t.Fatalf("Unable to unmarshal repos info response: %s\n", jsonStr)
	}
	if len(jsonResp) != 1 {
		t.Errorf("reloaded repos had more than one repo: %v\n", jsonResp)
	}
	for k := range jsonResp {
		if dvid.UUID(k) != uuid {
			t.Fatalf("Expected uuid %s, got %s.  Full JSON:\n%v\n", uuid, k, jsonResp)
		}
	}

	// Make sure the data instances are still there.
	_, err = datastore.GetDataByUUIDName(uuid, "foo")
	if err != nil {
		t.Errorf("Couldn't get keyvalue data instance after reload\n")
	}
	_, err = datastore.GetDataByUUIDName(uuid, "labels")
	if err != nil {
		t.Errorf("Couldn't get labelblk data instance after reload\n")
	}
	_, err = datastore.GetDataByUUIDName(uuid, "someroi")
	if err != nil {
		t.Errorf("Couldn't get roi data instance after reload\n")
	}
}
Пример #2
0
// GetByUUIDName returns a pointer to annotation data given a version (UUID) and data name.
func GetByUUIDName(uuid dvid.UUID, name dvid.InstanceName) (*Data, error) {
	source, err := datastore.GetDataByUUIDName(uuid, name)
	if err != nil {
		return nil, err
	}
	data, ok := source.(*Data)
	if !ok {
		return nil, fmt.Errorf("Instance '%s' is not an annotation datatype!", name)
	}
	return data, nil
}
Пример #3
0
func TestMultiscale2dRepoPersistence(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	// Make source
	uuid, _ := initTestRepo()
	makeGrayscale(uuid, t, "grayscale")

	// Make labels and set various properties
	config := dvid.NewConfig()
	config.Set("Placeholder", "true")
	config.Set("Format", "jpg")
	config.Set("Source", "grayscale")
	dataservice, err := datastore.NewData(uuid, mstype, "myimagetile", config)
	if err != nil {
		t.Errorf("Unable to create imagetile instance: %v\n", err)
	}
	msdata, ok := dataservice.(*Data)
	if !ok {
		t.Fatalf("Can't cast imagetile data service into imagetile.Data\n")
	}
	oldData := *msdata

	// Restart test datastore and see if datasets are still there.
	if err = datastore.SaveDataByUUID(uuid, msdata); err != nil {
		t.Fatalf("Unable to save repo during imagetile persistence test: %v\n", err)
	}
	datastore.CloseReopenTest()

	dataservice2, err := datastore.GetDataByUUIDName(uuid, "myimagetile")
	if err != nil {
		t.Fatalf("Can't get keyvalue instance from reloaded test db: %v\n", err)
	}
	msdata2, ok := dataservice2.(*Data)
	if !ok {
		t.Errorf("Returned new data instance 2 is not imagetile.Data\n")
	}

	if !reflect.DeepEqual(oldData.Properties, msdata2.Properties) {
		t.Errorf("Expected properties %v, got %v\n", oldData.Properties, msdata2.Properties)
	}
}
Пример #4
0
func TestFloat32RepoPersistence(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	uuid, _ := initTestRepo()

	// Make grayscale and set various properties
	config := dvid.NewConfig()
	config.Set("BlockSize", "12,13,14")
	config.Set("VoxelSize", "1.1,2.8,11")
	config.Set("VoxelUnits", "microns,millimeters,nanometers")
	dataservice, err := datastore.NewData(uuid, floatimgT, "floatimg", config)
	if err != nil {
		t.Errorf("Unable to create float32 instance: %s\n", err)
	}
	floatimg, ok := dataservice.(*Data)
	if !ok {
		t.Errorf("Can't cast float32 data service into Data\n")
	}
	oldData := *floatimg

	// Restart test datastore and see if datasets are still there.
	if err = datastore.SaveDataByUUID(uuid, floatimg); err != nil {
		t.Fatalf("Unable to save repo during floatimg persistence test: %v\n", err)
	}
	datastore.CloseReopenTest()

	dataservice2, err := datastore.GetDataByUUIDName(uuid, "floatimg")
	if err != nil {
		t.Fatalf("Can't get floatimg instance from reloaded test db: %v\n", err)
	}
	floatimg2, ok := dataservice2.(*Data)
	if !ok {
		t.Errorf("Returned new data instance 2 is not imageblk.Data\n")
	}
	if !oldData.Equals(floatimg2) {
		t.Errorf("Expected %v, got %v\n", oldData, *floatimg2)
	}
}
Пример #5
0
func TestMultichan16RepoPersistence(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	uuid, _ := initTestRepo()

	// Make labels and set various properties
	config := dvid.NewConfig()
	dataservice, err := datastore.NewData(uuid, dtype, "mymultichan16", config)
	if err != nil {
		t.Errorf("Unable to create multichan16 instance: %v\n", err)
	}
	mcdata, ok := dataservice.(*Data)
	if !ok {
		t.Errorf("Can't cast multichan16 data service into multichan16.Data\n")
	}
	oldData := *mcdata

	// Restart test datastore and see if datasets are still there.
	if err = datastore.SaveDataByUUID(uuid, mcdata); err != nil {
		t.Fatalf("Unable to save repo during multichan16 persistence test: %v\n", err)
	}
	datastore.CloseReopenTest()

	dataservice2, err := datastore.GetDataByUUIDName(uuid, "mymultichan16")
	if err != nil {
		t.Fatalf("Can't get multichan16 instance from reloaded test db: %v\n", err)
	}
	mcdata2, ok := dataservice2.(*Data)
	if !ok {
		t.Errorf("Returned new data instance 2 is not multichan16.Data\n")
	}
	if !oldData.Equals(mcdata2) {
		t.Errorf("Expected %v, got %v\n", oldData, *mcdata2)
	}
}
Пример #6
0
func TestFloatDirectCalls(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	uuid, versionID := initTestRepo()

	server.CreateTestInstance(t, uuid, "float32blk", "floatimg", dvid.Config{})
	dataservice, err := datastore.GetDataByUUIDName(uuid, "floatimg")
	if err != nil {
		t.Fatal(err)
	}
	floatimg, ok := dataservice.(*Data)
	if !ok {
		t.Fatalf("Can't convert dataservice %v into imageblk.Data\n", dataservice)
	}
	ctx := datastore.NewVersionedCtx(floatimg, versionID)

	// Create a block-aligned 8-bit grayscale image
	offset := dvid.Point3d{512, 32, 1024}
	size := dvid.Point3d{128, 96, 64}
	subvol := dvid.NewSubvolume(offset, size)
	testvol := createFloatTestVolume(t, uuid, "floatimg", offset, size)
	origData := make([]byte, len(testvol.data))
	copy(origData, testvol.data)

	// Store it into datastore at root
	v, err := floatimg.NewVoxels(subvol, testvol.data)
	if err != nil {
		t.Fatalf("Unable to make new floatimg voxels: %v\n", err)
	}
	if err = floatimg.IngestVoxels(versionID, 1, v, ""); err != nil {
		t.Errorf("Unable to put voxels for %s: %v\n", ctx, err)
	}

	// Read the stored image
	v2, err := floatimg.NewVoxels(subvol, nil)
	if err != nil {
		t.Errorf("Unable to make new grayscale ExtHandler: %v\n", err)
	}
	if err = floatimg.GetVoxels(versionID, v2, ""); err != nil {
		t.Errorf("Unable to get voxels for %s: %v\n", ctx, err)
	}

	// Make sure the retrieved image matches the original
	if v.Stride() != v2.Stride() {
		t.Errorf("Stride in retrieved subvol incorrect\n")
	}
	if v.Interpolable() != v2.Interpolable() {
		t.Errorf("Interpolable bool in retrieved subvol incorrect\n")
	}
	if !reflect.DeepEqual(v.Size(), v2.Size()) {
		t.Errorf("Size in retrieved subvol incorrect: %s vs expected %s\n",
			v2.Size(), v.Size())
	}
	if v.NumVoxels() != v2.NumVoxels() {
		t.Errorf("# voxels in retrieved is different: %d vs expected %d\n",
			v2.NumVoxels(), v.NumVoxels())
	}
	if len(v.Data()) != len(v2.Data()) {
		t.Errorf("Expected %d bytes in retrieved data, got %d bytes\n", len(v.Data()), len(v2.Data()))
	}
	received := v2.Data()
	//dvid.PrintNonZero("original value", origData)
	//dvid.PrintNonZero("returned value", data)
	for i := int64(0); i < v2.NumVoxels(); i++ {
		if received[i] != origData[i] {
			t.Logf("Data returned != data stored for voxel %d\n", i)
			t.Logf("Size of data: %d bytes from GET, %d bytes in PUT\n", len(received), len(origData))
			t.Fatalf("GET subvol (%d) != PUT subvol (%d) @ index %d", received[i], origData[i], i)
		}
	}
}
Пример #7
0
// ServeHTTP handles all incoming HTTP requests for this data.
func (d *Data) ServeHTTP(uuid dvid.UUID, ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request) {
	timedLog := dvid.NewTimeLog()

	action := strings.ToLower(r.Method)
	switch action {
	case "get", "post":
		// Acceptable
	default:
		server.BadRequest(w, r, "Can only handle GET or POST HTTP verbs")
		return
	}

	// Break URL request into arguments
	url := r.URL.Path[len(server.WebAPIPath):]
	parts := strings.Split(url, "/")
	if len(parts[len(parts)-1]) == 0 {
		parts = parts[:len(parts)-1]
	}
	if len(parts) < 4 {
		server.BadRequest(w, r, "incomplete API request")
		return
	}

	switch parts[3] {
	case "help":
		w.Header().Set("Content-Type", "text/plain")
		fmt.Fprintln(w, d.Help())

	case "info":
		jsonBytes, err := d.MarshalJSON()
		if err != nil {
			server.BadRequest(w, r, err)
			return
		}
		w.Header().Set("Content-Type", "application/json")
		fmt.Fprintf(w, string(jsonBytes))

	case "metadata":
		switch action {
		case "post":
			jsonBytes, err := ioutil.ReadAll(r.Body)
			if err != nil {
				server.BadRequest(w, r, err)
				return
			}
			if err := d.SetMetadata(uuid, jsonBytes); err != nil {
				server.BadRequest(w, r, err)
				return
			}

		case "get":
			if d.Levels == nil || len(d.Levels) == 0 {
				server.BadRequest(w, r, "tile metadata for imagetile %q was not set\n", d.DataName())
				return
			}
			metadata := struct {
				MinTileCoord dvid.Point3d
				MaxTileCoord dvid.Point3d
				Levels       TileSpec
			}{
				d.MinTileCoord,
				d.MaxTileCoord,
				d.Levels,
			}
			jsonBytes, err := json.Marshal(metadata)
			if err != nil {
				server.BadRequest(w, r, err)
				return
			}
			w.Header().Set("Content-Type", "application/json")
			fmt.Fprintf(w, string(jsonBytes))
		}
		timedLog.Infof("HTTP %s: metadata (%s)", r.Method, r.URL)

	case "tile":
		switch action {
		case "post":
			err := d.PostTile(ctx, w, r, parts)
			if err != nil {
				server.BadRequest(w, r, "Error in posting tile with URL %q: %v\n", url, err)
				return
			}
		case "get":
			if err := d.ServeTile(ctx, w, r, parts); err != nil {
				server.BadRequest(w, r, err)
				return
			}
		}
		timedLog.Infof("HTTP %s: tile (%s)", r.Method, r.URL)

	case "tilekey":
		switch action {
		case "get":
			var err error
			var hexkey string
			if hexkey, err = d.GetTileKey(ctx, w, r, parts); err != nil {
				server.BadRequest(w, r, err)
				return
			}
			w.Header().Set("Content-Type", "application/json")
			fmt.Fprintf(w, `{"key": "%s"}`, hexkey)
			timedLog.Infof("HTTP %s: tilekey (%s) returns %s", r.Method, r.URL, hexkey)
		default:
			server.BadRequest(w, r, fmt.Errorf("Cannot use HTTP %s for tilekey endpoint", action))
			return
		}

	case "raw", "isotropic":
		if action == "post" {
			server.BadRequest(w, r, "imagetile '%s' can only PUT tiles not images", d.DataName())
			return
		}
		if len(parts) < 7 {
			server.BadRequest(w, r, "%q must be followed by shape/size/offset", parts[3])
			return
		}
		shapeStr, sizeStr, offsetStr := parts[4], parts[5], parts[6]
		planeStr := dvid.DataShapeString(shapeStr)
		plane, err := planeStr.DataShape()
		if err != nil {
			server.BadRequest(w, r, err)
			return
		}
		if plane.ShapeDimensions() != 2 {
			server.BadRequest(w, r, "Quadtrees can only return 2d images not %s", plane)
			return
		}
		slice, err := dvid.NewSliceFromStrings(planeStr, offsetStr, sizeStr, "_")
		if err != nil {
			server.BadRequest(w, r, err)
			return
		}
		source, err := datastore.GetDataByUUIDName(uuid, d.Source)
		if err != nil {
			server.BadRequest(w, r, err)
			return
		}
		src, ok := source.(*imageblk.Data)
		if !ok {
			server.BadRequest(w, r, "Cannot construct imagetile for non-voxels data: %s", d.Source)
			return
		}
		img, err := d.GetImage(ctx, src, slice, parts[3] == "isotropic")
		if err != nil {
			server.BadRequest(w, r, err)
			return
		}
		var formatStr string
		if len(parts) >= 8 {
			formatStr = parts[7]
		}
		err = dvid.WriteImageHttp(w, img.Get(), formatStr)
		if err != nil {
			server.BadRequest(w, r, err)
			return
		}
		timedLog.Infof("HTTP %s: tile-accelerated %s %s (%s)", r.Method, planeStr, parts[3], r.URL)
	default:
		server.BadAPIRequest(w, r, d)
	}
}
Пример #8
0
// DefaultTileSpec returns the default tile spec that will fully cover the source extents and
// scaling 0 uses the original voxel resolutions with each subsequent scale causing a 2x zoom out.
func (d *Data) DefaultTileSpec(uuidStr string) (TileSpec, error) {
	uuid, _, err := datastore.MatchingUUID(uuidStr)
	if err != nil {
		return nil, err
	}
	source, err := datastore.GetDataByUUIDName(uuid, d.Source)
	if err != nil {
		return nil, err
	}
	var ok bool
	var src *imageblk.Data
	src, ok = source.(*imageblk.Data)
	if !ok {
		return nil, fmt.Errorf("Cannot construct tile spec for non-voxels data: %s", d.Source)
	}

	// Set scaling 0 based on extents and resolution of source.
	//extents := src.Extents()
	resolution := src.Properties.Resolution.VoxelSize

	if len(resolution) != 3 {
		return nil, fmt.Errorf("Cannot construct tile spec for non-3d data: voxel is %d-d",
			len(resolution))
	}

	// Expand min and max points to coincide with full tile boundaries of highest resolution.
	minTileCoord := src.MinPoint.(dvid.Chunkable).Chunk(DefaultTileSize)
	maxTileCoord := src.MaxPoint.(dvid.Chunkable).Chunk(DefaultTileSize)
	minTiledPt := minTileCoord.MinPoint(DefaultTileSize)
	maxTiledPt := maxTileCoord.MaxPoint(DefaultTileSize)
	sizeVolume := maxTiledPt.Sub(minTiledPt).AddScalar(1)

	dvid.Infof("Creating default multiscale tile spec for volume of size %s\n", sizeVolume)

	// For each dimension, calculate the number of scaling levels necessary to cover extent,
	// assuming we use the raw resolution at scaling 0.
	numScales := make([]int, 3)
	var maxScales int
	var dim uint8
	for dim = 0; dim < 3; dim++ {
		numPixels := float64(sizeVolume.Value(dim))
		tileSize := float64(DefaultTileSize.Value(dim))
		if numPixels <= tileSize {
			numScales[dim] = 1
		} else {
			numScales[dim] = int(math.Ceil(math.Log2(numPixels/tileSize))) + 1
		}
		if numScales[dim] > maxScales {
			maxScales = numScales[dim]
		}
	}

	// Initialize the tile level specification
	specs := make(TileSpec, maxScales)
	curRes := resolution
	levelMag := dvid.Point3d{2, 2, 2}
	var scaling Scaling
	for scaling = 0; scaling < Scaling(maxScales); scaling++ {
		for dim = 0; dim < 3; dim++ {
			if scaling >= Scaling(numScales[dim]) {
				levelMag[dim] = 1
			}
		}
		specs[scaling] = TileScaleSpec{
			LevelSpec{curRes, DefaultTileSize},
			levelMag,
		}
		curRes = curRes.MultScalar(2.0)
	}
	return specs, nil
}
Пример #9
0
func (d *Data) ConstructTiles(uuidStr string, tileSpec TileSpec, request datastore.Request) error {
	config := request.Settings()
	uuid, versionID, err := datastore.MatchingUUID(uuidStr)
	if err != nil {
		return err
	}

	if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil {
		return err
	}

	source, err := datastore.GetDataByUUIDName(uuid, d.Source)
	if err != nil {
		return fmt.Errorf("Cannot get source %q for %q tile construction: %v", d.Source, d.DataName(), err)
	}
	src, ok := source.(*imageblk.Data)
	if !ok {
		return fmt.Errorf("Cannot construct imagetile for non-voxels data: %s", d.Source)
	}

	// Get size of tile at lowest resolution.
	lastLevel := Scaling(len(tileSpec) - 1)
	loresSpec, found := tileSpec[lastLevel]
	if !found {
		return fmt.Errorf("Illegal tile spec.  Should have levels 0 to absent %d.", lastLevel)
	}
	var loresSize [3]float64
	for i := 0; i < 3; i++ {
		loresSize[i] = float64(loresSpec.Resolution[i]) * float64(DefaultTileSize[i])
	}
	loresMag := dvid.Point3d{1, 1, 1}
	for i := Scaling(0); i < lastLevel; i++ {
		levelMag := tileSpec[i].levelMag
		loresMag[0] *= levelMag[0]
		loresMag[1] *= levelMag[1]
		loresMag[2] *= levelMag[2]
	}

	// Get min and max points in terms of distance.
	var minPtDist, maxPtDist [3]float64
	for i := uint8(0); i < 3; i++ {
		minPtDist[i] = float64(src.MinPoint.Value(i)) * float64(src.VoxelSize[i])
		maxPtDist[i] = float64(src.MaxPoint.Value(i)) * float64(src.VoxelSize[i])
	}

	// Adjust min and max points for the tileable surface at lowest resolution.
	var minTiledPt, maxTiledPt dvid.Point3d
	for i := 0; i < 3; i++ {
		minInt, _ := math.Modf(minPtDist[i] / loresSize[i])
		maxInt, _ := math.Modf(maxPtDist[i] / loresSize[i])
		d.MinTileCoord[i] = int32(minInt)
		d.MaxTileCoord[i] = int32(maxInt)
		minTiledPt[i] = d.MinTileCoord[i] * DefaultTileSize[i] * loresMag[i]
		maxTiledPt[i] = (d.MaxTileCoord[i]+1)*DefaultTileSize[i]*loresMag[i] - 1
	}
	sizeVolume := maxTiledPt.Sub(minTiledPt).AddScalar(1)

	// Save the current tile specification
	d.Levels = tileSpec
	if err := datastore.SaveDataByUUID(uuid, d); err != nil {
		return err
	}

	// Setup swappable ExtData buffers (the stitched slices) so we can be generating tiles
	// at same time we are reading and stitching them.
	var bufferLock [2]sync.Mutex
	var sliceBuffers [2]*imageblk.Voxels
	var bufferNum int

	// Get the planes we should tile.
	planes, err := config.GetShapes("planes", ";")
	if err != nil {
		return err
	}
	if planes == nil {
		// If no planes are specified, construct imagetile for 3 orthogonal planes.
		planes = []dvid.DataShape{dvid.XY, dvid.XZ, dvid.YZ}
	}

	outF, err := d.putTileFunc(versionID)
	if err != nil {
		return err
	}

	// Get bounds for tiling if specified
	minx, maxx, err := config.GetRange("xrange", ",")
	if err != nil {
		return err
	}
	miny, maxy, err := config.GetRange("yrange", ",")
	if err != nil {
		return err
	}
	minz, maxz, err := config.GetRange("zrange", ",")
	if err != nil {
		return err
	}

	// sort the tile spec keys to iterate from highest to lowest resolution
	var sortedKeys []int
	for scaling, _ := range tileSpec {
		sortedKeys = append(sortedKeys, int(scaling))
	}
	sort.Ints(sortedKeys)

	for _, plane := range planes {
		timedLog := dvid.NewTimeLog()
		offset := minTiledPt.Duplicate()

		switch {

		case plane.Equals(dvid.XY):
			width, height, err := plane.GetSize2D(sizeVolume)
			if err != nil {
				return err
			}
			dvid.Debugf("Tiling XY image %d x %d pixels\n", width, height)
			z0 := src.MinPoint.Value(2)
			z1 := src.MaxPoint.Value(2)
			if minz != nil && z0 < *minz {
				z0 = *minz
			}
			if maxz != nil && z1 > *maxz {
				z1 = *maxz
			}
			for z := z0; z <= z1; z++ {
				server.BlockOnInteractiveRequests("imagetile.ConstructTiles [xy]")

				sliceLog := dvid.NewTimeLog()
				offset = offset.Modify(map[uint8]int32{2: z})
				slice, err := dvid.NewOrthogSlice(dvid.XY, offset, dvid.Point2d{width, height})
				if err != nil {
					return err
				}
				bufferLock[bufferNum].Lock()
				sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil)
				if err != nil {
					return err
				}
				if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], ""); err != nil {
					return err
				}
				// Iterate through the different scales, extracting tiles at each resolution.
				go func(bufferNum int, offset dvid.Point) {
					defer bufferLock[bufferNum].Unlock()
					timedLog := dvid.NewTimeLog()
					for _, key := range sortedKeys {
						scaling := Scaling(key)
						levelSpec := tileSpec[scaling]
						if err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if int(scaling) < len(tileSpec)-1 {
							if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil {
								dvid.Errorf("Error in tiling: %v\n", err)
								return
							}
						}
					}
					timedLog.Debugf("Tiled XY Tile using buffer %d", bufferNum)
				}(bufferNum, offset)

				sliceLog.Infof("Read XY Tile @ Z = %d, now tiling...", z)
				bufferNum = (bufferNum + 1) % 2
			}
			timedLog.Infof("Total time to generate XY Tiles")

		case plane.Equals(dvid.XZ):
			width, height, err := plane.GetSize2D(sizeVolume)
			if err != nil {
				return err
			}
			dvid.Debugf("Tiling XZ image %d x %d pixels\n", width, height)
			y0 := src.MinPoint.Value(1)
			y1 := src.MaxPoint.Value(1)
			if miny != nil && y0 < *miny {
				y0 = *miny
			}
			if maxy != nil && y1 > *maxy {
				y1 = *maxy
			}
			for y := y0; y <= y1; y++ {
				server.BlockOnInteractiveRequests("imagetile.ConstructTiles [xz]")

				sliceLog := dvid.NewTimeLog()
				offset = offset.Modify(map[uint8]int32{1: y})
				slice, err := dvid.NewOrthogSlice(dvid.XZ, offset, dvid.Point2d{width, height})
				if err != nil {
					return err
				}
				bufferLock[bufferNum].Lock()
				sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil)
				if err != nil {
					return err
				}
				if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], ""); err != nil {
					return err
				}
				// Iterate through the different scales, extracting tiles at each resolution.
				go func(bufferNum int, offset dvid.Point) {
					defer bufferLock[bufferNum].Unlock()
					timedLog := dvid.NewTimeLog()
					for _, key := range sortedKeys {
						scaling := Scaling(key)
						levelSpec := tileSpec[scaling]
						if err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if int(scaling) < len(tileSpec)-1 {
							if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil {
								dvid.Errorf("Error in tiling: %v\n", err)
								return
							}
						}
					}
					timedLog.Debugf("Tiled XZ Tile using buffer %d", bufferNum)
				}(bufferNum, offset)

				sliceLog.Infof("Read XZ Tile @ Y = %d, now tiling...", y)
				bufferNum = (bufferNum + 1) % 2
			}
			timedLog.Infof("Total time to generate XZ Tiles")

		case plane.Equals(dvid.YZ):
			width, height, err := plane.GetSize2D(sizeVolume)
			if err != nil {
				return err
			}
			dvid.Debugf("Tiling YZ image %d x %d pixels\n", width, height)
			x0 := src.MinPoint.Value(0)
			x1 := src.MaxPoint.Value(0)
			if minx != nil && x0 < *minx {
				x0 = *minx
			}
			if maxz != nil && x1 > *maxx {
				x1 = *maxx
			}
			for x := x0; x <= x1; x++ {
				server.BlockOnInteractiveRequests("imagetile.ConstructTiles [yz]")

				sliceLog := dvid.NewTimeLog()
				offset = offset.Modify(map[uint8]int32{0: x})
				slice, err := dvid.NewOrthogSlice(dvid.YZ, offset, dvid.Point2d{width, height})
				if err != nil {
					return err
				}
				bufferLock[bufferNum].Lock()
				sliceBuffers[bufferNum], err = src.NewVoxels(slice, nil)
				if err != nil {
					return err
				}
				if err = src.GetVoxels(versionID, sliceBuffers[bufferNum], ""); err != nil {
					return err
				}
				// Iterate through the different scales, extracting tiles at each resolution.
				go func(bufferNum int, offset dvid.Point) {
					defer bufferLock[bufferNum].Unlock()
					timedLog := dvid.NewTimeLog()
					for _, key := range sortedKeys {
						scaling := Scaling(key)
						levelSpec := tileSpec[scaling]
						outF, err := d.putTileFunc(versionID)
						if err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if err := d.extractTiles(sliceBuffers[bufferNum], offset, scaling, outF); err != nil {
							dvid.Errorf("Error in tiling: %v\n", err)
							return
						}
						if int(scaling) < len(tileSpec)-1 {
							if err := sliceBuffers[bufferNum].DownRes(levelSpec.levelMag); err != nil {
								dvid.Errorf("Error in tiling: %v\n", err)
								return
							}
						}
					}
					timedLog.Debugf("Tiled YZ Tile using buffer %d", bufferNum)
				}(bufferNum, offset)

				sliceLog.Debugf("Read YZ Tile @ X = %d, now tiling...", x)
				bufferNum = (bufferNum + 1) % 2
			}
			timedLog.Infof("Total time to generate YZ Tiles")

		default:
			dvid.Infof("Skipping request to tile '%s'.  Unsupported.", plane)
		}
	}
	return nil
}
Пример #10
0
// CreateComposite creates a new rgba8 image by combining hash of labels + the grayscale
func (d *Data) CreateComposite(request datastore.Request, reply *datastore.Response) error {
	timedLog := dvid.NewTimeLog()

	// Parse the request
	var uuidStr, dataName, cmdStr, grayscaleName, destName string
	request.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &grayscaleName, &destName)

	// Get the version
	uuid, v, err := datastore.MatchingUUID(uuidStr)
	if err != nil {
		return err
	}

	// Log request
	if err = datastore.AddToNodeLog(uuid, []string{request.Command.String()}); err != nil {
		return err
	}

	// Get the grayscale data.
	dataservice, err := datastore.GetDataByUUIDName(uuid, dvid.InstanceName(grayscaleName))
	if err != nil {
		return err
	}
	grayscale, ok := dataservice.(*imageblk.Data)
	if !ok {
		return fmt.Errorf("%s is not the name of uint8 data", grayscaleName)
	}

	// Create a new rgba8blk data.
	var compservice datastore.DataService
	compservice, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(destName))
	if err == nil {
		return fmt.Errorf("Data instance with name %q already exists", destName)
	}
	typeService, err := datastore.TypeServiceByName("rgba8blk")
	if err != nil {
		return fmt.Errorf("Could not get rgba8 type service from DVID")
	}
	config := dvid.NewConfig()
	compservice, err = datastore.NewData(uuid, typeService, dvid.InstanceName(destName), config)
	if err != nil {
		return err
	}
	composite, ok := compservice.(*imageblk.Data)
	if !ok {
		return fmt.Errorf("Error: %s was unable to be set to rgba8 data", destName)
	}

	// Iterate through all labels and grayscale chunks incrementally in Z, a layer at a time.
	wg := new(sync.WaitGroup)
	op := &compositeOp{grayscale, composite, v}
	chunkOp := &storage.ChunkOp{op, wg}

	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		return err
	}
	ctx := datastore.NewVersionedCtx(d, v)
	extents := d.Extents()
	blockBeg := imageblk.NewTKey(extents.MinIndex)
	blockEnd := imageblk.NewTKey(extents.MaxIndex)
	err = store.ProcessRange(ctx, blockBeg, blockEnd, chunkOp, storage.ChunkFunc(d.CreateCompositeChunk))
	wg.Wait()

	// Set new mapped data to same extents.
	composite.Properties.Extents = grayscale.Properties.Extents
	if err := datastore.SaveDataByUUID(uuid, composite); err != nil {
		dvid.Infof("Could not save new data '%s': %v\n", destName, err)
	}

	timedLog.Infof("Created composite of %s and %s", grayscaleName, destName)
	return nil
}
Пример #11
0
func TestLabels(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	// Create testbed volume and data instances
	uuid, _ := initTestRepo()
	var config dvid.Config
	server.CreateTestInstance(t, uuid, "labelblk", "labels", config)
	server.CreateTestInstance(t, uuid, "labelvol", "bodies", config)

	// Establish syncs
	server.CreateTestSync(t, uuid, "labels", "bodies")
	server.CreateTestSync(t, uuid, "bodies", "labels")

	// Populate the labels, which should automatically populate the labelvol
	_ = createLabelTestVolume(t, uuid, "labels")

	if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil {
		t.Fatalf("Error blocking on sync of labels: %v\n", err)
	}

	// Add annotations syncing with "labels" instance checking for deduplication.
	server.CreateTestInstance(t, uuid, "annotation", "mysynapses", config)
	server.CreateTestSync(t, uuid, "mysynapses", "labels,bodies,labels,bodies,labels,bodies")
	dataservice, err := datastore.GetDataByUUIDName(uuid, "mysynapses")
	if err != nil {
		t.Fatal(err)
	}
	data, ok := dataservice.(*Data)
	if !ok {
		t.Fatalf("Can't convert dataservice %v into datastore.Data\n", dataservice)
	}
	if len(data.SyncedData()) != 2 {
		t.Fatalf("Expected 2 syncs (uuids for labels and bodies], got %v\n", data.SyncedData())
	}

	// PUT first batch of synapses
	testJSON, err := json.Marshal(testData)
	if err != nil {
		t.Fatal(err)
	}
	url1 := fmt.Sprintf("%snode/%s/mysynapses/elements", server.WebAPIPath, uuid)
	server.TestHTTP(t, "POST", url1, strings.NewReader(string(testJSON)))

	// Test if labels were properly denormalized.  For the POST we have synchronized label denormalization.
	// If this were to become asynchronous, we'd want to block on updating like the labelblk<->labelvol sync.

	testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel2, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel3, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel3NoRel, "%snode/%s/mysynapses/label/3", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid)

	// Make change to labelblk and make sure our label synapses have been adjusted (case A)
	_ = modifyLabelTestVolume(t, uuid, "labels")

	if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil {
		t.Fatalf("Error blocking on sync of labels->annotations: %v\n", err)
	}

	testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel2a, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel3a, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel4NoRel, "%snode/%s/mysynapses/label/4", server.WebAPIPath, uuid)

	// Make change to labelvol and make sure our label synapses have been adjusted (case B).
	// Merge 3a into 2a.
	testMerge := mergeJSON(`[2, 3]`)
	testMerge.send(t, uuid, "bodies")

	if err := datastore.BlockOnUpdating(uuid, "labels"); err != nil {
		t.Fatalf("Error blocking on sync of labels: %v\n", err)
	}

	if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil {
		t.Fatalf("Error blocking on sync of synapses: %v\n", err)
	}

	testResponseLabel(t, expectedLabel1, "%snode/%s/mysynapses/label/1?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel2b, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, nil, "%snode/%s/mysynapses/label/3?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel4, "%snode/%s/mysynapses/label/4?relationships=true", server.WebAPIPath, uuid)

	// Now split label 2b off and check if annotations also split

	// Create the sparsevol encoding for split area
	numspans := len(bodysplit.voxelSpans)
	rles := make(dvid.RLEs, numspans, numspans)
	for i, span := range bodysplit.voxelSpans {
		start := dvid.Point3d{span[2], span[1], span[0]}
		length := span[3] - span[2] + 1
		rles[i] = dvid.NewRLE(start, length)
	}
	buf := getBytesRLE(t, rles)

	// Submit the split sparsevol
	reqStr := fmt.Sprintf("%snode/%s/%s/split/%d?splitlabel=7", server.WebAPIPath, uuid, "bodies", 2)
	r := server.TestHTTP(t, "POST", reqStr, buf)
	jsonVal := make(map[string]uint64)
	if err := json.Unmarshal(r, &jsonVal); err != nil {
		t.Errorf("Unable to get new label from split.  Instead got: %v\n", jsonVal)
	}

	// Verify that the annotations are correct.
	if err := datastore.BlockOnUpdating(uuid, "mysynapses"); err != nil {
		t.Fatalf("Error blocking on sync of split->annotations: %v\n", err)
	}
	testResponseLabel(t, expectedLabel2c, "%snode/%s/mysynapses/label/2?relationships=true", server.WebAPIPath, uuid)
	url2 := fmt.Sprintf("%snode/%s/mysynapses/label/7?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, expectedLabel7, url2)

	// Change the name of the annotations.
	if err = datastore.RenameData(uuid, "mysynapses", "bodies", "foobar"); err == nil {
		t.Fatalf("Should have been prevented from renaming data 'mysynapses' to existing data 'bodies'!\n")
	}
	if err = datastore.RenameData(uuid, "mysynapses", "renamedData", "foobar"); err != nil {
		t.Fatalf("Error renaming annotations: %v\n", err)
	}

	// Make sure the old name is no longer there and the new one is.
	server.TestBadHTTP(t, "GET", url2, nil)
	testResponseLabel(t, expectedLabel2c, "%snode/%s/renamedData/label/2?relationships=true", server.WebAPIPath, uuid)

	// Try a coarse split.

	// Create the encoding for split area in block coordinates.
	rles = dvid.RLEs{
		dvid.NewRLE(dvid.Point3d{3, 1, 3}, 1),
	}
	buf = getBytesRLE(t, rles)

	// Submit the coarse split
	reqStr = fmt.Sprintf("%snode/%s/%s/split-coarse/2?splitlabel=8", server.WebAPIPath, uuid, "bodies")
	r = server.TestHTTP(t, "POST", reqStr, buf)
	jsonVal = make(map[string]uint64)
	if err := json.Unmarshal(r, &jsonVal); err != nil {
		t.Errorf("Unable to get new label from split.  Instead got: %v\n", jsonVal)
	}

	// Verify that the annotations are correct.
	if err := datastore.BlockOnUpdating(uuid, "renamedData"); err != nil {
		t.Fatalf("Error blocking on sync of split->annotations: %v\n", err)
	}
	testResponseLabel(t, expectedLabel2c, "%snode/%s/renamedData/label/8?relationships=true", server.WebAPIPath, uuid)
	testResponseLabel(t, nil, "%snode/%s/renamedData/label/2?relationships=true", server.WebAPIPath, uuid)

	// Delete a labeled annotation and make sure it's not in label
	delurl := fmt.Sprintf("%snode/%s/%s/element/20_30_40", server.WebAPIPath, uuid, "renamedData")
	server.TestHTTP(t, "DELETE", delurl, nil)
	testResponseLabel(t, afterDeleteOn7, "%snode/%s/%s/label/7?relationships=true", server.WebAPIPath, uuid, "renamedData")
}
Пример #12
0
func TestROIRepoPersistence(t *testing.T) {
	datastore.OpenTest()
	defer datastore.CloseTest()

	uuid, _ := initTestRepo()

	// Add data
	config := dvid.NewConfig()
	dataservice1, err := datastore.NewData(uuid, roitype, "myroi", config)
	if err != nil {
		t.Errorf("Error creating new roi instance: %v\n", err)
	}
	roi1, ok := dataservice1.(*Data)
	if !ok {
		t.Errorf("Returned new data instance 1 is not roi.Data\n")
	}
	if roi1.DataName() != "myroi" {
		t.Errorf("New roi data instance name set incorrectly: %q != %q\n",
			roi1.DataName(), "myroi")
	}

	config.Set("BlockSize", "15,16,17")
	dataservice2, err := datastore.NewData(uuid, roitype, "myroi2", config)
	if err != nil {
		t.Errorf("Error creating new roi instance: %v\n", err)
	}
	roi2, ok := dataservice2.(*Data)
	if !ok {
		t.Errorf("Returned new data instance 2 is not roi.Data\n")
	}
	roi2.MinZ = 13
	roi2.MaxZ = 3098
	oldData := *roi2

	// Check instance IDs
	if roi1.InstanceID() == roi2.InstanceID() {
		t.Errorf("Instance IDs should be different: %d == %d\n",
			roi1.InstanceID(), roi2.InstanceID())
	}

	// Restart test datastore and see if datasets are still there.
	if err = datastore.SaveDataByUUID(uuid, dataservice1); err != nil {
		t.Fatalf("Unable to save data1 during ROI persistence test: %v\n", err)
	}
	if err = datastore.SaveDataByUUID(uuid, dataservice2); err != nil {
		t.Fatalf("Unable to save data2 during ROI persistence test: %v\n", err)
	}

	datastore.CloseReopenTest()

	dataservice3, err := datastore.GetDataByUUIDName(uuid, "myroi2")
	if err != nil {
		t.Fatalf("Can't get first ROI instance from reloaded test db: %v\n", err)
	}
	roi2new, ok := dataservice3.(*Data)
	if !ok {
		t.Errorf("Returned new data instance 3 is not roi.Data\n")
	}
	if !oldData.Equals(roi2new) {
		t.Errorf("Expected %v, got %v\n", oldData, *roi2new)
	}
}
Пример #13
0
// switchboard for remote command execution
func handleCommand(cmd *datastore.Request) (reply *datastore.Response, err error) {
	if cmd.Name() == "" {
		err = fmt.Errorf("Server error: got empty command!")
		return
	}
	reply = new(datastore.Response)

	switch cmd.Name() {

	case "help":
		reply.Text = fmt.Sprintf(RPCHelpMessage, config.RPCAddress(), config.HTTPAddress())

	case "shutdown":
		dvid.Infof("DVID server halting due to 'shutdown' command.")
		reply.Text = fmt.Sprintf("DVID server at %s is being shutdown...\n", config.RPCAddress())
		// launch goroutine shutdown so we can concurrently return shutdown message to client.
		go Shutdown()

	case "types":
		if len(cmd.Command) == 1 {
			text := "\nData Types within this DVID Server\n"
			text += "----------------------------------\n"
			var mapTypes map[dvid.URLString]datastore.TypeService
			if mapTypes, err = datastore.Types(); err != nil {
				err = fmt.Errorf("Error trying to retrieve data types within this DVID server!")
				return
			}
			for url, typeservice := range mapTypes {
				text += fmt.Sprintf("%-20s %s\n", typeservice.GetTypeName(), url)
			}
			reply.Text = text
		} else {
			if len(cmd.Command) != 3 || cmd.Command[2] != "help" {
				err = fmt.Errorf("Unknown types command: %q", cmd.Command)
				return
			}
			var typename string
			var typeservice datastore.TypeService
			cmd.CommandArgs(1, &typename)
			if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil {
				return
			}
			reply.Text = typeservice.Help()
		}

	case "repos":
		var subcommand string
		cmd.CommandArgs(1, &subcommand)

		switch subcommand {
		case "new":
			var alias, description string
			cmd.CommandArgs(2, &alias, &description)

			config := cmd.Settings()
			var uuidStr, passcode string
			var found bool
			if uuidStr, found, err = config.GetString("uuid"); err != nil {
				return
			}
			var assign *dvid.UUID
			if !found {
				assign = nil
			} else {
				uuid := dvid.UUID(uuidStr)
				assign = &uuid
			}
			if passcode, found, err = config.GetString("passcode"); err != nil {
				return
			}
			var root dvid.UUID
			root, err = datastore.NewRepo(alias, description, assign, passcode)
			if err != nil {
				return
			}
			if err = datastore.SetRepoAlias(root, alias); err != nil {
				return
			}
			if err = datastore.SetRepoDescription(root, description); err != nil {
				return
			}
			reply.Text = fmt.Sprintf("New repo %q created with head node %s\n", alias, root)

		case "delete":
			var uuidStr, passcode string
			cmd.CommandArgs(2, &uuidStr, &passcode)

			var uuid dvid.UUID
			if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil {
				return
			}
			if err = datastore.DeleteRepo(uuid, passcode); err != nil {
				return
			}
			reply.Text = fmt.Sprintf("Started deletion of repo %s.\n", uuid)

		default:
			err = fmt.Errorf("Unknown repos command: %q", subcommand)
			return
		}

	case "repo":
		var uuidStr, subcommand string
		cmd.CommandArgs(1, &uuidStr, &subcommand)
		var uuid dvid.UUID
		if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil {
			return
		}

		switch subcommand {
		case "new":
			var typename, dataname string
			cmd.CommandArgs(3, &typename, &dataname)

			// Get TypeService
			var typeservice datastore.TypeService
			if typeservice, err = datastore.TypeServiceByName(dvid.TypeString(typename)); err != nil {
				return
			}

			// Create new data
			config := cmd.Settings()
			if _, err = datastore.NewData(uuid, typeservice, dvid.InstanceName(dataname), config); err != nil {
				return
			}
			reply.Text = fmt.Sprintf("Data %q [%s] added to node %s\n", dataname, typename, uuid)
			datastore.AddToRepoLog(uuid, []string{cmd.String()})

		case "rename":
			var name1, name2, passcode string
			cmd.CommandArgs(3, &name1, &name2, &passcode)
			oldname := dvid.InstanceName(name1)
			newname := dvid.InstanceName(name2)

			// Make sure this instance exists.
			if _, err = datastore.GetDataByUUIDName(uuid, oldname); err != nil {
				err = fmt.Errorf("Error trying to rename %q for UUID %s: %v", oldname, uuid, err)
				return
			}

			// Do the rename.
			if err = datastore.RenameData(uuid, oldname, newname, passcode); err != nil {
				err = fmt.Errorf("Error renaming data instance %q to %q: %v", oldname, newname, err)
				return
			}
			reply.Text = fmt.Sprintf("Renamed data instance %q to %q from DAG subgraph @ root %s\n", oldname, newname, uuid)

		case "branch":
			cmd.CommandArgs(3, &uuidStr)

			var assign *dvid.UUID
			if uuidStr == "" {
				assign = nil
			} else {
				u := dvid.UUID(uuidStr)
				assign = &u
			}
			var child dvid.UUID
			if child, err = datastore.NewVersion(uuid, fmt.Sprintf("branch of %s", uuid), assign); err != nil {
				return
			}
			reply.Text = fmt.Sprintf("Branch %s added to node %s\n", child, uuid)
			datastore.AddToRepoLog(uuid, []string{cmd.String()})

		case "merge":
			uuids := cmd.CommandArgs(2)

			parents := make([]dvid.UUID, len(uuids)+1)
			parents[0] = dvid.UUID(uuid)
			i := 1
			for uuid := range uuids {
				parents[i] = dvid.UUID(uuid)
				i++
			}
			var child dvid.UUID
			child, err = datastore.Merge(parents, fmt.Sprintf("merge of parents %v", parents), datastore.MergeConflictFree)
			if err != nil {
				return
			}
			reply.Text = fmt.Sprintf("Parents %v merged into node %s\n", parents, child)
			datastore.AddToRepoLog(uuid, []string{cmd.String()})

		case "migrate":
			var source, oldStoreName string
			cmd.CommandArgs(3, &source, &oldStoreName)
			var store dvid.Store
			store, err = storage.GetStoreByAlias(storage.Alias(oldStoreName))
			if err != nil {
				return
			}
			config := cmd.Settings()
			go func() {
				if err = datastore.MigrateInstance(uuid, dvid.InstanceName(source), store, config); err != nil {
					dvid.Errorf("migrate error: %v\n", err)
				}
			}()
			reply.Text = fmt.Sprintf("Started migration of uuid %s data instance %q from old store %q...\n", uuid, source, oldStoreName)

		case "copy":
			var source, target string
			cmd.CommandArgs(3, &source, &target)
			config := cmd.Settings()
			go func() {
				if err = datastore.CopyInstance(uuid, dvid.InstanceName(source), dvid.InstanceName(target), config); err != nil {
					dvid.Errorf("copy error: %v\n", err)
				}
			}()
			reply.Text = fmt.Sprintf("Started copy of uuid %s data instance %q to %q...\n", uuid, source, target)

		case "push":
			var target string
			cmd.CommandArgs(3, &target)
			config := cmd.Settings()
			go func() {
				if err = datastore.PushRepo(uuid, target, config); err != nil {
					dvid.Errorf("push error: %v\n", err)
				}
			}()
			reply.Text = fmt.Sprintf("Started push of repo %s to %q...\n", uuid, target)

			/*
				case "pull":
					var target string
					cmd.CommandArgs(3, &target)
					config := cmd.Settings()
					if err = datastore.Pull(uuid, target, config); err != nil {
						return
					}
					reply.Text = fmt.Sprintf("Repo %s pulled from %q\n", uuid, target)
			*/

		case "delete":
			var dataname, passcode string
			cmd.CommandArgs(3, &dataname, &passcode)

			// Make sure this instance exists.
			if _, err = datastore.GetDataByUUIDName(uuid, dvid.InstanceName(dataname)); err != nil {
				err = fmt.Errorf("Error trying to delete %q for UUID %s: %v", dataname, uuid, err)
				return
			}

			// Do the deletion.  Under hood, modifies metadata immediately and launches async k/v deletion.
			if err = datastore.DeleteDataByName(uuid, dvid.InstanceName(dataname), passcode); err != nil {
				err = fmt.Errorf("Error deleting data instance %q: %v", dataname, err)
				return
			}
			reply.Text = fmt.Sprintf("Started deletion of data instance %q from repo with root %s\n", dataname, uuid)

		default:
			err = fmt.Errorf("Unknown command: %q", cmd)
			return
		}

	case "node":
		var uuidStr, descriptor string
		cmd.CommandArgs(1, &uuidStr, &descriptor)
		var uuid dvid.UUID
		if uuid, _, err = datastore.MatchingUUID(uuidStr); err != nil {
			return
		}

		// Get the DataService
		dataname := dvid.InstanceName(descriptor)
		var subcommand string
		cmd.CommandArgs(3, &subcommand)
		var dataservice datastore.DataService
		if dataservice, err = datastore.GetDataByUUIDName(uuid, dataname); err != nil {
			return
		}
		if subcommand == "help" {
			reply.Text = dataservice.Help()
			return
		}
		err = dataservice.DoRPC(*cmd, reply)
		return

	default:
		// Check to see if it's a name of a compiled data type, in which case we refer it to the data type.
		types := datastore.CompiledTypes()
		for name, typeservice := range types {
			if name == dvid.TypeString(cmd.Argument(0)) {
				err = typeservice.Do(*cmd, reply)
				return
			}
		}

		err = fmt.Errorf("Unknown command: '%s'", *cmd)
	}
	return
}
Пример #14
0
// instanceSelector retrieves the data instance given its complete string name and
// forwards the request to that instance's HTTP handler.
func instanceSelector(c *web.C, h http.Handler) http.Handler {
	fn := func(w http.ResponseWriter, r *http.Request) {
		if httpUnavailable(w) {
			return
		}

		var err error
		dataname := dvid.InstanceName(c.URLParams["dataname"])
		uuid, ok := c.Env["uuid"].(dvid.UUID)
		if !ok {
			msg := fmt.Sprintf("Bad format for UUID %q\n", c.Env["uuid"])
			BadRequest(w, r, msg)
			return
		}
		data, err := datastore.GetDataByUUIDName(uuid, dataname)
		if err != nil {
			BadRequest(w, r, err)
			return
		}
		v, err := datastore.VersionFromUUID(uuid)
		if err != nil {
			BadRequest(w, r, err)
			return
		}

		if data.Versioned() {
			// Make sure we aren't trying mutable methods on committed nodes.
			locked, err := datastore.LockedUUID(uuid)
			if err != nil {
				BadRequest(w, r, err)
				return
			}
			if locked && data.IsMutationRequest(r.Method, c.URLParams["keyword"]) {
				BadRequest(w, r, "Cannot do %s on endpoint %q of locked node %s", r.Method, c.URLParams["keyword"], uuid)
				return
			}
		} else {
			// Map everything to root version.
			v, err = datastore.GetRepoRootVersion(v)
			if err != nil {
				BadRequest(w, r, err)
				return
			}
		}
		ctx := datastore.NewVersionedCtx(data, v)

		// Also set the web request information in case logging needs it downstream.
		ctx.SetRequestID(middleware.GetReqID(*c))

		// Handle DVID-wide query string commands like non-interactive call designations
		queryStrings := r.URL.Query()

		// All HTTP requests are interactive so let server tally request.
		interactive := queryStrings.Get("interactive")
		if interactive == "" || (interactive != "false" && interactive != "0") {
			GotInteractiveRequest()
		}

		// TODO: setup routing for data instances as well.
		if config != nil && config.AllowTiming() {
			w.Header().Set("Timing-Allow-Origin", "*")
		}
		data.ServeHTTP(uuid, ctx, w, r)
	}
	return http.HandlerFunc(fn)
}
Пример #15
0
func repoResolveHandler(c web.C, w http.ResponseWriter, r *http.Request) {
	uuid, _, err := datastore.MatchingUUID(c.URLParams["uuid"])
	if err != nil {
		BadRequest(w, r, err)
		return
	}
	if r.Body == nil {
		BadRequest(w, r, "merge resolving requires JSON to be POSTed per API documentation")
		return
	}
	data, err := ioutil.ReadAll(r.Body)
	if err != nil {
		BadRequest(w, r, err)
		return
	}

	jsonData := struct {
		Data    []dvid.InstanceName `json:"data"`
		Note    string              `json:"note"`
		Parents []string            `json:"parents"`
	}{}
	if err := json.Unmarshal(data, &jsonData); err != nil {
		BadRequest(w, r, fmt.Sprintf("Malformed JSON request in body: %v", err))
		return
	}
	if len(jsonData.Data) == 0 {
		BadRequest(w, r, "Must specify at least one data instance using 'data' field")
		return
	}
	if len(jsonData.Parents) < 2 {
		BadRequest(w, r, "Must specify at least two parent UUIDs using 'parents' field")
		return
	}

	// Convert JSON of parents into []UUID and whether we need a child for them to add deletions.
	numParents := len(jsonData.Parents)
	oldParents := make([]dvid.UUID, numParents)
	newParents := make([]dvid.UUID, numParents, numParents) // UUID of any parent extension for deletions.
	for i, uuidFrag := range jsonData.Parents {
		uuid, _, err := datastore.MatchingUUID(uuidFrag)
		if err != nil {
			BadRequest(w, r, fmt.Sprintf("can't match parent %q: %v", uuidFrag, err))
			return
		}
		oldParents[i] = uuid
		newParents[i] = dvid.NilUUID
	}

	// Iterate through all k/v for given data instances, making sure we find any conflicts.
	// If any are found, remove them with first UUIDs taking priority.
	for _, name := range jsonData.Data {
		data, err := datastore.GetDataByUUIDName(uuid, name)
		if err != nil {
			BadRequest(w, r, err)
			return
		}

		if err := datastore.DeleteConflicts(uuid, data, oldParents, newParents); err != nil {
			BadRequest(w, r, fmt.Errorf("Conflict deletion error for data %q: %v", data.DataName(), err))
			return
		}
	}

	// If we have any new nodes to accomodate deletions, commit them.
	for i, oldUUID := range oldParents {
		if newParents[i] != oldUUID {
			err := datastore.Commit(newParents[i], "Version for deleting conflicts before merge", nil)
			if err != nil {
				BadRequest(w, r, "Error while creating new nodes to handle required deletions: %v", err)
				return
			}
		}
	}

	// Do the merge
	mt := datastore.MergeConflictFree
	newuuid, err := datastore.Merge(newParents, jsonData.Note, mt)
	if err != nil {
		BadRequest(w, r, err)
	} else {
		w.Header().Set("Content-Type", "application/json")
		fmt.Fprintf(w, "{%q: %q}", "child", newuuid)
	}
}