// CopyInstance copies a data instance locally, perhaps to a different storage // engine if the new instance uses a different backend per a data instance-specific configuration. // (See sample config.example.toml file in root dvid source directory.) func CopyInstance(uuid dvid.UUID, source, target dvid.InstanceName, c dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } if source == "" || target == "" { return fmt.Errorf("both source and cloned name must be provided") } // Get any filter spec fstxt, found, err := c.GetString("filter") if err != nil { return err } var fs storage.FilterSpec if found { fs = storage.FilterSpec(fstxt) } // Get flatten or not transmit, found, err := c.GetString("transmit") if err != nil { return err } var flatten bool if transmit == "flatten" { flatten = true } // Get the source data instance. d1, err := manager.getDataByUUIDName(uuid, source) if err != nil { return err } // Create the target instance. t, err := TypeServiceByName(d1.TypeName()) if err != nil { return err } d2, err := manager.newData(uuid, t, target, c) if err != nil { return err } // Populate the new data instance properties from source. copier, ok := d2.(PropertyCopier) if ok { if err := copier.CopyPropertiesFrom(d1, fs); err != nil { return err } if err := SaveDataByUUID(uuid, d2); err != nil { return err } } // We should be able to get the backing store (only ordered kv for now) storer, ok := d1.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d1.DataName()) } oldKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d1.DataName(), err) } storer, ok = d2.(storage.Accessor) if !ok { return fmt.Errorf("unable to push data %q: unable to access backing store", d2.DataName()) } newKV, err := storer.GetOrderedKeyValueDB() if err != nil { return fmt.Errorf("unable to get backing store for data %q: %v\n", d2.DataName(), err) } dvid.Infof("Copying data %q (%s) to data %q (%s)...\n", d1.DataName(), oldKV, d2.DataName(), newKV) // See if this data instance implements a Send filter. var filter storage.Filter filterer, ok := d1.(storage.Filterer) if ok && fs != "" { var err error filter, err = filterer.NewFilter(fs) if err != nil { return err } } // copy data with optional datatype-specific filtering. return copyData(oldKV, newKV, d1, d2, uuid, filter, flatten) }
func TestTileCheck(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Make source uuid, _ := initTestRepo() makeGrayscale(uuid, t, "grayscale") // Make imagetile and set various properties config := dvid.NewConfig() config.Set("Placeholder", "true") config.Set("Format", "jpg") config.Set("Source", "grayscale") tileservice, err := datastore.NewData(uuid, mstype, "myimagetile", config) if err != nil { t.Errorf("Unable to create imagetile instance: %v\n", err) } msdata, ok := tileservice.(*Data) if !ok { t.Fatalf("Can't cast imagetile data service into imagetile.Data\n") } // Store Metadata url := fmt.Sprintf("%snode/%s/myimagetile/metadata", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", url, bytes.NewBufferString(testMetadata2)) // Create the ROI _, err = datastore.NewData(uuid, roitype, "myroi", dvid.NewConfig()) if err != nil { t.Errorf("Error creating new roi instance: %v\n", err) } // PUT an ROI roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getSpansJSON(testSpans)) // Create fake filter spec := fmt.Sprintf("roi:myroi,%s", uuid) f, err := msdata.NewFilter(storage.FilterSpec(spec)) if err != nil { t.Errorf("Couldn't make filter: %v\n", err) } if f == nil { t.Fatalf("Couldn't detect myroi data instance\n") } // Check various key values for proper spatial checks. var tx, ty int32 tx = (205 * 32) / 512 ty = (101 * 32) / 512 tile := dvid.ChunkPoint3d{tx, ty, 101 * 32} scale := Scaling(0) tkv := &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err := f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if skip { t.Errorf("Expected false skip, got %v for tile %s\n", skip, tile) } tile = dvid.ChunkPoint3d{tx, ty, 106 * 32} tkv = &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err = f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if !skip { t.Errorf("Expected true skip, got %v for tile %s\n", skip, tile) } tx = (205 * 32) / 512 ty = (121 * 32) / 512 tile = dvid.ChunkPoint3d{tx, ty, 101 * 32} tkv = &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err = f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if !skip { t.Errorf("Expected true skip, got %v for tile %s\n", skip, tile) } tx = (225 * 32) / 512 ty = (101 * 32) / 512 tile = dvid.ChunkPoint3d{tx, ty, 101 * 32} tkv = &storage.TKeyValue{K: NewTKey(tile, dvid.XY, scale)} skip, err = f.Check(tkv) if err != nil { t.Errorf("Error on Check of key %q: %v\n", tkv.K, err) } if !skip { t.Errorf("Expected true skip, got %v for tile %s\n", skip, tile) } }
func TestFilter(t *testing.T) { datastore.OpenTest() defer datastore.CloseTest() // Create testbed volume and data instances uuid, _ := initTestRepo() var config dvid.Config server.CreateTestInstance(t, uuid, "labelblk", "labels", config) d, err := datastore.NewData(uuid, labelvolT, "bodies", config) if err != nil { t.Fatalf("Unable to create labelvol instance: %s\n", err) } server.CreateTestSync(t, uuid, "labels", "bodies") server.CreateTestSync(t, uuid, "bodies", "labels") // Populate the labels, which should automatically populate the labelvol _ = createLabelTestVolume(t, uuid, "labels") if err := datastore.BlockOnUpdating(uuid, "bodies"); err != nil { t.Fatalf("Error blocking on sync of labels -> bodies: %v\n", err) } // Create a ROI that will be used for filter test. server.CreateTestInstance(t, uuid, "roi", "myroi", config) roiRequest := fmt.Sprintf("%snode/%s/myroi/roi", server.WebAPIPath, uuid) server.TestHTTP(t, "POST", roiRequest, getROIReader()) // Create the filter spec fs := storage.FilterSpec(fmt.Sprintf("roi:myroi,%s", uuid)) var filter storage.Filter filterer, ok := d.(storage.Filterer) if !ok { t.Fatalf("labelvol instance does not implement storage.Filterer\n") } filter, err = filterer.NewFilter(fs) if err != nil { t.Fatalf("Can't create filter from spec %q: %v\n", fs, err) } if filter == nil { t.Fatalf("No filter could be created from spec %q\n", fs) } // Test the filter. tkv := storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{0, 0, 0}.ToIZYXString())} skip, err := filter.Check(&tkv) if !skip { t.Errorf("Expected filter check 1 to skip, instead filter.Check() returned not skip") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{1, 1, 1}.ToIZYXString())} skip, err = filter.Check(&tkv) if skip { t.Errorf("Expected filter check 2 to not skip!") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{2, 1, 2}.ToIZYXString())} skip, err = filter.Check(&tkv) if skip { t.Errorf("Expected filter check 2 to not skip!") } tkv = storage.TKeyValue{K: NewTKey(23, dvid.ChunkPoint3d{3, 1, 1}.ToIZYXString())} skip, err = filter.Check(&tkv) if !skip { t.Errorf("Expected filter check 3 to skip!") } }
// PushRepo pushes a Repo to a remote DVID server at the target address. func PushRepo(uuid dvid.UUID, target string, config dvid.Config) error { if manager == nil { return ErrManagerNotInitialized } if target == "" { target = rpc.DefaultAddress dvid.Infof("No target specified for push, defaulting to %q\n", rpc.DefaultAddress) } // Get the full local repo thisRepo, err := manager.repoFromUUID(uuid) if err != nil { return err } // Get any filter filter, found, err := config.GetString("filter") if err != nil { return err } // Create a repo that is tailored by the push configuration, e.g., // keeping just given data instances, etc. v, found := manager.uuidToVersion[uuid] if !found { return ErrInvalidUUID } txRepo, transmit, err := thisRepo.customize(v, config) if err != nil { return err } // Establish session with target, which may be itself s, err := rpc.NewSession(target, pushMessageID) if err != nil { return fmt.Errorf("Unable to connect (%s) for push: %s", target, err.Error()) } defer s.Close() // TODO -- check if can hang if error occurs during job // Send the repo metadata, transmit type, and other useful data for remote server. dvid.Infof("Sending repo %s data to %q\n", uuid, target) repoSerialization, err := txRepo.GobEncode() if err != nil { return err } repoMsg := repoTxMsg{ Session: s.ID(), Transmit: transmit, UUID: uuid, Repo: repoSerialization, } resp, err := s.Call()(sendRepoMsg, repoMsg) if err != nil { return err } if resp == nil { return fmt.Errorf("push unnecessary -- versions at remote are already present") } // We should get back a version set to send, or nil = send all versions. versions, ok := resp.(map[dvid.VersionID]struct{}) if !ok { return fmt.Errorf("received response during repo push that wasn't expected set of delta versions") } dvid.Debugf("Remote sent list of %d versions to send\n", len(versions)) // For each data instance, send the data with optional datatype-specific filtering. ps := &PushSession{storage.FilterSpec(filter), versions, s, transmit} for _, d := range txRepo.data { dvid.Infof("Sending instance %q data to %q\n", d.DataName(), target) if err := d.PushData(ps); err != nil { dvid.Errorf("Aborting send of instance %q data\n", d.DataName()) return err } } return nil }