Example #1
0
// gets all the changes relevant to labelblk, then breaks up any multi-block op into
// separate block ops and puts them onto channels to index-specific handlers.
func (d *Data) processEvents() {
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			switch msg.Event {
			case DownsizeCommitEvent:
				d.StopUpdate()
				mutID := msg.Delta.(uint64)
				go d.downsizeCommit(msg.Version, mutID) // async since we will wait on any in waitgroup.

			default:
				d.handleEvent(msg)
			}

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Example #2
0
// MigrateInstance migrates a data instance locally from an old storage
// engine to the current configured storage.  After completion of the copy,
// the data instance in the old storage is deleted.
func MigrateInstance(uuid dvid.UUID, source dvid.InstanceName, oldStore dvid.Store, c dvid.Config) error {
	if manager == nil {
		return ErrManagerNotInitialized
	}

	// Get flatten or not
	transmit, _, err := c.GetString("transmit")
	if err != nil {
		return err
	}
	var flatten bool
	if transmit == "flatten" {
		flatten = true
	}

	// Get the source data instance.
	d, err := manager.getDataByUUIDName(uuid, source)
	if err != nil {
		return err
	}

	// Get the current store for this data instance.
	storer, ok := d.(storage.Accessor)
	if !ok {
		return fmt.Errorf("unable to migrate data %q: unable to access backing store", d.DataName())
	}
	curKV, err := storer.GetOrderedKeyValueDB()
	if err != nil {
		return fmt.Errorf("unable to get backing store for data %q: %v\n", source, err)
	}

	// Get the old store.
	oldKV, ok := oldStore.(storage.OrderedKeyValueDB)
	if !ok {
		return fmt.Errorf("unable to migrate data %q from store %s which isn't ordered kv store", source, storer)
	}

	// Abort if the two stores are the same.
	if curKV == oldKV {
		return fmt.Errorf("old store for data %q seems same as current store", source)
	}

	// Migrate data asynchronously.
	go func() {
		if err := copyData(oldKV, curKV, d, nil, uuid, nil, flatten); err != nil {
			dvid.Errorf("error in migration of data %q: %v\n", source, err)
			return
		}
		// delete data off old store.
		dvid.Infof("Starting delete of instance %q from old storage %q\n", d.DataName(), oldKV)
		ctx := storage.NewDataContext(d, 0)
		if err := oldKV.DeleteAll(ctx, true); err != nil {
			dvid.Errorf("deleting instance %q from %q after copy to %q: %v\n", d.DataName(), oldKV, curKV, err)
			return
		}
	}()

	dvid.Infof("Migrating data %q from store %q to store %q ...\n", d.DataName(), oldKV, curKV)
	return nil
}
Example #3
0
func (m *repoManager) loadMetadata() error {
	// Check the version of the metadata
	found, err := m.loadData(formatKey, &(m.formatVersion))
	if err != nil {
		return fmt.Errorf("Error in loading metadata format version: %v\n", err)
	}
	if found {
		dvid.Infof("Loading metadata with format version %d...\n", m.formatVersion)
	} else {
		dvid.Infof("Loading metadata without format version. Setting it to format version 0.\n")
		m.formatVersion = 0
	}

	switch m.formatVersion {
	case 0:
		return m.loadVersion0()
	case 1:
		// We aren't changing any of the metadata, just the labelvol datatype props.
		return m.loadVersion0()
	default:
		return fmt.Errorf("Unknown metadata format %d", m.formatVersion)
	}

	// Handle instance ID management
	if m.instanceIDGen == "sequential" && m.instanceIDStart > m.instanceID {
		m.instanceID = m.instanceIDStart
		return m.putNewIDs()
	}
	return nil
}
Example #4
0
// Shutdown handles graceful cleanup of server functions before exiting DVID.
// This may not be so graceful if the chunk handler uses cgo since the interrupt
// may be caught during cgo execution.
func Shutdown() {
	// Stop accepting HTTP requests.
	httpAvail = false

	// Wait for chunk handlers.
	waits := 0
	for {
		active := MaxChunkHandlers - len(HandlerToken)
		if waits >= 20 {
			dvid.Infof("Already waited for 20 seconds.  Continuing with shutdown...")
			break
		} else if active > 0 {
			dvid.Infof("Waiting for %d chunk handlers to finish...\n", active)
			waits++
		} else {
			dvid.Infof("No chunk handlers active. Proceeding...\n")
			break
		}
		time.Sleep(1 * time.Second)
	}
	dvid.Infof("Waiting 5 seconds for any HTTP requests to drain...\n")
	time.Sleep(5 * time.Second)
	datastore.Shutdown()
	dvid.BlockOnActiveCgo()
	rpc.Shutdown()
	dvid.Shutdown()
	shutdownCh <- struct{}{}
}
Example #5
0
// Processes each labelblk change as we get it.
func (d *Data) processEvents() {
	batcher, err := d.GetKeyValueBatcher()
	if err != nil {
		dvid.Errorf("handleBlockEvent %v\n", err)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			d.handleSyncMessage(ctx, msg, batcher)

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Example #6
0
// MergeLabels handles merging of any number of labels throughout the various label data
// structures.  It assumes that the merges aren't cascading, e.g., there is no attempt
// to merge label 3 into 4 and also 4 into 5.  The caller should have flattened the merges.
// TODO: Provide some indication that subset of labels are under evolution, returning
//   an "unavailable" status or 203 for non-authoritative response.  This might not be
//   feasible for clustered DVID front-ends due to coordination issues.
//
// EVENTS
//
// labels.MergeStartEvent occurs at very start of merge and transmits labels.DeltaMergeStart struct.
//
// labels.MergeBlockEvent occurs for every block of a merged label and transmits labels.DeltaMerge struct.
//
// labels.MergeEndEvent occurs at end of merge and transmits labels.DeltaMergeEnd struct.
//
func (d *Data) MergeLabels(v dvid.VersionID, m labels.MergeOp) error {
	dvid.Infof("Merging data %q (labels %s) into label %d ...\n", d.DataName(), m.Merged, m.Target)

	// Mark these labels as dirty until done, and make sure we can actually initiate the merge.
	if err := labels.MergeStart(d.getMergeIV(v), m); err != nil {
		return err
	}
	d.StartUpdate()

	// Signal that we are starting a merge.
	evt := datastore.SyncEvent{d.DataUUID(), labels.MergeStartEvent}
	msg := datastore.SyncMessage{labels.MergeStartEvent, v, labels.DeltaMergeStart{m}}
	if err := datastore.NotifySubscribers(evt, msg); err != nil {
		d.StopUpdate()
		return err
	}

	// Asynchronously perform merge and handle any concurrent requests using the cache map until
	// labelvol and labelblk are updated and consistent.
	go func() {
		d.asyncMergeLabels(v, m)

		// Remove dirty labels and updating flag when done.
		labels.MergeStop(d.getMergeIV(v), m)
		d.StopUpdate()
		dvid.Infof("Finished with merge of labels %s.\n", m)
	}()

	return nil
}
Example #7
0
// LoadTileSpec loads a TileSpec from JSON data.
// JSON data should look like:
// {
//    "0": { "Resolution": [3.1, 3.1, 40.0], "TileSize": [512, 512, 40] },
//    "1": { "Resolution": [6.2, 6.2, 40.0], "TileSize": [512, 512, 80] },
//    ...
// }
// Each line is a scale with a n-D resolution/voxel and a n-D tile size in voxels.
func LoadTileSpec(jsonBytes []byte) (TileSpec, error) {
	var config specJSON
	err := json.Unmarshal(jsonBytes, &config)
	if err != nil {
		return nil, err
	}

	// Allocate the tile specs
	numLevels := len(config)
	specs := make(TileSpec, numLevels)
	dvid.Infof("Found %d scaling levels for imagetile specification.\n", numLevels)

	// Store resolution and tile sizes per level.
	var hires, lores float64
	for scaleStr, levelSpec := range config {
		dvid.Infof("scale %s, levelSpec %v\n", scaleStr, levelSpec)
		scaleLevel, err := strconv.Atoi(scaleStr)
		if err != nil {
			return nil, fmt.Errorf("Scaling '%s' needs to be a number for the scale level.", scaleStr)
		}
		if scaleLevel >= numLevels {
			return nil, fmt.Errorf("Tile levels must be consecutive integers from [0,Max]: Got scale level %d > # levels (%d)\n",
				scaleLevel, numLevels)
		}
		specs[Scaling(scaleLevel)] = TileScaleSpec{LevelSpec: levelSpec}
	}

	// Compute the magnification between each level.
	for scaling := Scaling(0); scaling < Scaling(numLevels-1); scaling++ {
		levelSpec, found := specs[scaling]
		if !found {
			return nil, fmt.Errorf("Could not find tile spec for level %d", scaling)
		}
		nextSpec, found := specs[scaling+1]
		if !found {
			return nil, fmt.Errorf("Could not find tile spec for level %d", scaling+1)
		}
		var levelMag dvid.Point3d
		for i, curRes := range levelSpec.Resolution {
			hires = float64(curRes)
			lores = float64(nextSpec.Resolution[i])
			rem := math.Remainder(lores, hires)
			if rem > 0.001 {
				return nil, fmt.Errorf("Resolutions between scale %d and %d aren't integral magnifications!",
					scaling, scaling+1)
			}
			mag := lores / hires
			if mag < 0.99 {
				return nil, fmt.Errorf("A resolution between scale %d and %d actually increases!",
					scaling, scaling+1)
			}
			mag += 0.5
			levelMag[i] = int32(mag)
		}
		levelSpec.levelMag = levelMag
		specs[scaling] = levelSpec
	}
	return specs, nil
}
Example #8
0
// Initialize creates a repositories manager that is handled through package functions.
func Initialize(initMetadata bool, iconfig *InstanceConfig) error {
	m := &repoManager{
		repoToUUID:      make(map[dvid.RepoID]dvid.UUID),
		versionToUUID:   make(map[dvid.VersionID]dvid.UUID),
		uuidToVersion:   make(map[dvid.UUID]dvid.VersionID),
		repos:           make(map[dvid.UUID]*repoT),
		repoID:          1,
		versionID:       1,
		iids:            make(map[dvid.InstanceID]DataService),
		instanceIDGen:   iconfig.Gen,
		instanceIDStart: iconfig.Start,
	}
	if iconfig.Gen == "" {
		m.instanceIDGen = "sequential"
	}
	if iconfig.Start > 1 {
		m.instanceID = iconfig.Start
	} else {
		m.instanceID = 1
	}

	var err error
	m.store, err = storage.MetaDataStore()
	if err != nil {
		return err
	}

	// Set the package variable.  We are good to go...
	manager = m

	if initMetadata {
		// Initialize repo management data in storage
		dvid.Infof("Initializing repo management data in storage...\n")
		if err := m.putNewIDs(); err != nil {
			return err
		}
		if err := m.putCaches(); err != nil {
			return err
		}
		m.formatVersion = RepoFormatVersion
	} else {
		// Load the repo metadata
		dvid.Infof("Loading metadata from storage...\n")
		if err = m.loadMetadata(); err != nil {
			return fmt.Errorf("Error loading metadata: %v", err)
		}

		// If there are any migrations registered, run them.
		migrator_mu.RLock()
		defer migrator_mu.RUnlock()

		for desc, f := range migrators {
			dvid.Infof("Running migration: %s\n", desc)
			go f()
		}
	}
	return nil
}
Example #9
0
// newLevelDB returns a leveldb backend, creating leveldb
// at the path if it doesn't already exist.
func (e Engine) newLevelDB(config dvid.StoreConfig) (*LevelDB, bool, error) {
	path, _, err := parseConfig(config)
	if err != nil {
		return nil, false, err
	}

	// Is there a database already at this path?  If not, create.
	var created bool
	if _, err := os.Stat(path); os.IsNotExist(err) {
		dvid.Infof("Database not already at path (%s). Creating directory...\n", path)
		created = true
		// Make a directory at the path.
		if err := os.MkdirAll(path, 0744); err != nil {
			return nil, true, fmt.Errorf("Can't make directory at %s: %v", path, err)
		}
	} else {
		dvid.Infof("Found directory at %s (err = %v)\n", path, err)
	}

	// Open the database
	dvid.StartCgo()
	defer dvid.StopCgo()

	opt, err := getOptions(config.Config)
	if err != nil {
		return nil, false, err
	}

	leveldb := &LevelDB{
		directory: path,
		config:    config,
		options:   opt,
	}

	dvid.Infof("Opening basholeveldb @ path %s\n", path)
	ldb, err := levigo.Open(path, opt.Options)
	if err != nil {
		return nil, false, err
	}
	leveldb.ldb = ldb

	// if we know it's newly created, just return.
	if created {
		return leveldb, created, nil
	}

	// otherwise, check if there's been any metadata or we need to initialize it.
	metadataExists, err := leveldb.metadataExists()
	if err != nil {
		leveldb.Close()
		return nil, false, err
	}

	return leveldb, !metadataExists, nil
}
Example #10
0
func setupGroupcache(config GroupcacheConfig) error {
	if config.GB == 0 {
		return nil
	}
	var cacheBytes int64
	cacheBytes = int64(config.GB) << 30

	pool := groupcache.NewHTTPPool(config.Host)
	if pool != nil {
		dvid.Infof("Initializing groupcache with %d GB at %s...\n", config.GB, config.Host)
		manager.gcache.cache = groupcache.NewGroup("immutable", cacheBytes, groupcache.GetterFunc(
			func(c groupcache.Context, key string, dest groupcache.Sink) error {
				// Use KeyValueDB defined as context.
				gctx, ok := c.(GroupcacheCtx)
				if !ok {
					return fmt.Errorf("bad groupcache context: expected GroupcacheCtx, got %v", c)
				}

				// First four bytes of key is instance ID to isolate groupcache collisions.
				tk := TKey(key[4:])
				data, err := gctx.KeyValueDB.Get(gctx.Context, tk)
				if err != nil {
					return err
				}
				return dest.SetBytes(data)
			}))
		manager.gcache.supported = make(map[dvid.DataSpecifier]struct{})
		for _, dataspec := range config.Instances {
			name := strings.Trim(dataspec, "\"")
			parts := strings.Split(name, ":")
			switch len(parts) {
			case 2:
				dataid := dvid.GetDataSpecifier(dvid.InstanceName(parts[0]), dvid.UUID(parts[1]))
				manager.gcache.supported[dataid] = struct{}{}
			default:
				dvid.Errorf("bad data instance specification %q given for groupcache support in config file\n", dataspec)
			}
		}

		// If we have additional peers, add them and start a listener via the HTTP port.
		if len(config.Peers) > 0 {
			peers := []string{config.Host}
			peers = append(peers, config.Peers...)
			pool.Set(peers...)
			dvid.Infof("Groupcache configuration has %d peers in addition to local host.\n", len(config.Peers))
			dvid.Infof("Starting groupcache HTTP server on %s\n", config.Host)
			http.ListenAndServe(config.Host, http.HandlerFunc(pool.ServeHTTP))
		}
	}
	return nil
}
Example #11
0
// DoRPC handles the 'generate' command.
func (d *Data) DoRPC(request datastore.Request, reply *datastore.Response) error {
	if request.TypeCommand() != "generate" {
		return fmt.Errorf("Unknown command.  Data instance '%s' [%s] does not support '%s' command.",
			d.DataName(), d.TypeName(), request.TypeCommand())
	}
	var uuidStr, dataName, cmdStr string
	request.CommandArgs(1, &uuidStr, &dataName, &cmdStr)

	// Get the imagetile generation configuration from a file or stdin.
	var err error
	var tileSpec TileSpec
	if request.Input != nil {
		tileSpec, err = LoadTileSpec(request.Input)
		if err != nil {
			return err
		}
	} else {
		config := request.Settings()
		filename, found, err := config.GetString("filename")
		if err != nil {
			return err
		}
		if found {
			configData, err := storage.DataFromFile(filename)
			if err != nil {
				return err
			}
			tileSpec, err = LoadTileSpec(configData)
			if err != nil {
				return err
			}
			dvid.Infof("Using tile spec file: %s\n", filename)
		} else {
			dvid.Infof("Using default tile generation method since no tile spec file was given...\n")
			tileSpec, err = d.DefaultTileSpec(uuidStr)
			if err != nil {
				return err
			}
		}
	}
	reply.Text = fmt.Sprintf("Tiling data instance %q @ node %s...\n", dataName, uuidStr)
	go func() {
		err := d.ConstructTiles(uuidStr, tileSpec, request)
		if err != nil {
			dvid.Errorf("Cannot construct tiles for data instance %q @ node %s: %v\n", dataName, uuidStr, err)
		}
	}()
	return nil
}
Example #12
0
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time?  This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent() {
	store, err := d.GetOrderedKeyValueDB()
	if err != nil {
		dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
		return
	}
	batcher, ok := store.(storage.KeyValueBatcher)
	if !ok {
		dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
		return
	}
	var stop bool
	var wg *sync.WaitGroup
	for {
		select {
		case wg = <-d.syncDone:
			queued := len(d.syncCh)
			if queued > 0 {
				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
				stop = true
			} else {
				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
				wg.Done()
				return
			}
		case msg := <-d.syncCh:
			d.StartUpdate()
			ctx := datastore.NewVersionedCtx(d, msg.Version)
			switch delta := msg.Delta.(type) {
			case imageblk.Block:
				d.ingestBlock(ctx, delta, batcher)
			case imageblk.MutatedBlock:
				d.mutateBlock(ctx, delta, batcher)
			case labels.DeleteBlock:
				d.deleteBlock(ctx, delta, batcher)
			default:
				dvid.Criticalf("Cannot sync labelvol from block event.  Got unexpected delta: %v\n", msg)
			}
			d.StopUpdate()

			if stop && len(d.syncCh) == 0 {
				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
				wg.Done()
				return
			}
		}
	}
}
Example #13
0
func (db *LevelDB) metadataExists() (bool, error) {
	var ctx storage.MetadataContext
	keyBeg, keyEnd := ctx.KeyRange()
	dvid.StartCgo()
	ro := levigo.NewReadOptions()
	it := db.ldb.NewIterator(ro)
	defer func() {
		it.Close()
		dvid.StopCgo()
	}()

	it.Seek(keyBeg)
	for {
		if it.Valid() {
			// Did we pass the final key?
			if bytes.Compare(it.Key(), keyEnd) > 0 {
				break
			}
			return true, nil
		}
		break
	}
	if err := it.GetError(); err != nil {
		return false, err
	}
	dvid.Infof("No metadata found for %s...\n", db)
	return false, nil
}
Example #14
0
// NewData returns a pointer to labelblk data.
func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) {
	imgblkData, err := dtype.Type.NewData(uuid, id, name, c)
	if err != nil {
		return nil, err
	}

	// Check if Raveler label.
	// TODO - Remove Raveler code outside of DVID.
	var labelType LabelType = Standard64bit
	s, found, err := c.GetString("LabelType")
	if found {
		switch strings.ToLower(s) {
		case "raveler":
			labelType = RavelerLabel
		case "standard":
		default:
			return nil, fmt.Errorf("unknown label type specified '%s'", s)
		}
	}

	dvid.Infof("Creating labelblk '%s' with %s", name, labelType)
	data := &Data{
		Data:     imgblkData,
		Labeling: labelType,
	}
	return data, nil
}
Example #15
0
// Shutdown halts all RPC servers.
func Shutdown() {
	for _, s := range servers {
		s.Stop()
	}
	dvid.Infof("Halted %d RPC servers.\n", len(servers))
	servers = nil
}
Example #16
0
func (p *Properties) setByConfig(config dvid.Config) error {
	s, found, err := config.GetString("BlockSize")
	if err != nil {
		return err
	}
	if found {
		p.BlockSize, err = dvid.StringToPoint3d(s, ",")
		if err != nil {
			return err
		}
	}
	s, found, err = config.GetString("VoxelSize")
	if err != nil {
		return err
	}
	if found {
		dvid.Infof("Changing resolution of voxels to %s\n", s)
		p.Resolution.VoxelSize, err = dvid.StringToNdFloat32(s, ",")
		if err != nil {
			return err
		}
	}
	s, found, err = config.GetString("VoxelUnits")
	if err != nil {
		return err
	}
	if found {
		p.Resolution.VoxelUnits, err = dvid.StringToNdString(s, ",")
		if err != nil {
			return err
		}
	}
	return nil
}
Example #17
0
// check if any metadata has been written into this store.
func (db *KVAutobus) metadataExists() (bool, error) {
	var ctx storage.MetadataContext
	kStart, kEnd := ctx.KeyRange()
	b64key1 := encodeKey(kStart)
	b64key2 := encodeKey(kEnd)
	url := fmt.Sprintf("%s/kvautobus/api/key_range/%s/%s/%s/", db.host, db.collection, b64key1, b64key2)
	dvid.Infof("metdataExists: doing GET on %s\n", url)

	timedLog := dvid.NewTimeLog()
	resp, err := db.client.Get(url)
	if err != nil {
		return false, err
	}
	defer resp.Body.Close()
	if resp.StatusCode == http.StatusNotFound {
		return true, nil // Handle no keys found.
	}

	r := msgp.NewReader(bufio.NewReader(resp.Body))
	var mks Ks
	if err := mks.DecodeMsg(r); err != nil {
		return false, err
	}
	timedLog.Infof("PROXY key_range metadata to %s returned %d (%d keys)\n", db.host, resp.StatusCode, len(mks))
	if len(mks) == 0 {
		return false, nil
	}
	return true, nil
}
Example #18
0
// GetAssignedStore returns the store assigned based on (instance name, root uuid) or type.
// In some cases, this store may include a caching wrapper if the data instance has been
// configured to use groupcache.
func GetAssignedStore(dataname dvid.InstanceName, root dvid.UUID, typename dvid.TypeString) (dvid.Store, error) {
	if !manager.setup {
		return nil, fmt.Errorf("Storage manager not initialized before requesting store for %s/%s", dataname, root)
	}
	dataid := dvid.GetDataSpecifier(dataname, root)
	store, found := manager.instanceStore[dataid]
	var err error
	if !found {
		store, err = assignedStoreByType(typename)
		if err != nil {
			return nil, fmt.Errorf("Cannot get assigned store for data %q, type %q", dataname, typename)
		}
	}

	// See if this is using caching and if so, establish a wrapper around it.
	if _, supported := manager.gcache.supported[dataid]; supported {
		store, err = wrapGroupcache(store, manager.gcache.cache)
		if err != nil {
			dvid.Errorf("Unable to wrap groupcache around store %s for data instance %q (uuid %s): %v\n", store, dataname, root, err)
		} else {
			dvid.Infof("Returning groupcache-wrapped store %s for data instance %q @ %s\n", store, dataname, root)
		}
	}
	return store, nil
}
Example #19
0
// Get returns a value given a key.
func (db *KVAutobus) Get(ctx storage.Context, tk storage.TKey) ([]byte, error) {
	if ctx == nil {
		return nil, fmt.Errorf("Received nil context in Get()")
	}
	if ctx.Versioned() {
		vctx, ok := ctx.(storage.VersionedCtx)
		if !ok {
			return nil, fmt.Errorf("Bad Get(): context is versioned but doesn't fulfill interface: %v", ctx)
		}

		// Get all versions of this key and return the most recent
		// log.Printf("  kvautobus versioned get of key %v\n", k)
		key := ctx.ConstructKey(tk)
		dvid.Infof("   Get on key: %s\n", hex.EncodeToString(key))
		values, err := db.getSingleKeyVersions(vctx, tk)
		// log.Printf("            got back %v\n", values)
		if err != nil {
			return nil, err
		}
		kv, err := vctx.VersionedKeyValue(values)
		// log.Printf("  after deversioning: %v\n", kv)
		if kv != nil {
			return kv.V, err
		}
		return nil, err
	} else {
		key := ctx.ConstructKey(tk)
		// log.Printf("  kvautobus unversioned get of key %v\n", key)
		v, err := db.RawGet(key)
		storage.StoreValueBytesRead <- len(v)
		return v, err
	}
}
Example #20
0
// Initialize creates a repositories manager that is handled through package functions.
func Initialize() error {
	m := &repoManager{
		repoToUUID:    make(map[dvid.RepoID]dvid.UUID),
		versionToUUID: make(map[dvid.VersionID]dvid.UUID),
		uuidToVersion: make(map[dvid.UUID]dvid.VersionID),
		repos:         make(map[dvid.UUID]*repoT),
		repoID:        1,
		versionID:     1,
		instanceID:    1,
	}

	var err error
	m.store, err = storage.MetaDataStore()
	if err != nil {
		return err
	}

	// Set the package variable.  We are good to go...
	manager = m

	// Load the repo metadata
	if err = m.loadMetadata(); err != nil {
		return fmt.Errorf("Error loading metadata: %v", err)
	}

	// If there are any migrations registered, run them.
	migrator_mu.RLock()
	defer migrator_mu.RUnlock()

	for desc, f := range migrators {
		dvid.Infof("Running migration: %s\n", desc)
		go f()
	}
	return nil
}
Example #21
0
// MergeStart handles label map caches during an active merge operation.  Note that if there are
// multiple synced label instances, the InstanceVersion will always be the labelblk instance.
// Multiple merges into a single label are allowed, but chained merges are not.  For example,
// you can merge label 1, 2, and 3 into 4, then later merge 6 into 4.  However, you cannot
// concurrently merge label 4 into some other label because there can be a race condition between
// 3 -> 4 and 4 -> X.
func MergeStart(iv dvid.InstanceVersion, op MergeOp) error {
	dvid.Infof("MergeStart starting for iv %v with op %v.  mergeCache: %v\n", iv, op, mc.m)

	// Don't allow a merge to start in the middle of a concurrent merge/split.
	if labelsSplitting.IsDirty(iv, op.Target) { // we might be able to relax this one.
		return fmt.Errorf("can't merge into label %d while it has an ongoing split", op.Target)
	}
	if mc.MergingToOther(iv, op.Target) {
		dvid.Errorf("can't merge label %d while it is currently merging into another label", op.Target)
		return fmt.Errorf("can't merge label %d while it is currently merging into another label", op.Target)
	}
	for merged := range op.Merged {
		if labelsSplitting.IsDirty(iv, merged) {
			return fmt.Errorf("can't merge label %d while it has an ongoing split", merged)
		}
		if labelsMerging.IsDirty(iv, merged) {
			dvid.Errorf("can't merge label %d while it is currently involved in a merge", merged)
			return fmt.Errorf("can't merge label %d while it is currently involved in a merge", merged)
		}
	}

	// Add the merge to the mapping.
	if err := mc.Add(iv, op); err != nil {
		return err
	}

	// Adjust the dirty counts on the involved labels.
	labelsMerging.AddMerge(iv, op)

	return nil
}
Example #22
0
// Returns RLEs for a given label where the key of the returned map is the block index
// in string format.
func (d *Data) GetLabelRLEs(v dvid.VersionID, label uint64) (dvid.BlockRLEs, error) {
	store, err := storage.MutableStore()
	if err != nil {
		return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
	}

	// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
	begIndex := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
	endIndex := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())

	// Process all the b+s keys and their values, which contain RLE runs for that label.
	labelRLEs := dvid.BlockRLEs{}
	var f storage.ChunkFunc = func(chunk *storage.Chunk) error {
		// Get the block index where the fromLabel is present
		_, blockStr, err := DecodeTKey(chunk.K)
		if err != nil {
			return fmt.Errorf("Can't recover block index with chunk key %v: %v\n", chunk.K, err)
		}

		var blockRLEs dvid.RLEs
		if err := blockRLEs.UnmarshalBinary(chunk.V); err != nil {
			return fmt.Errorf("Unable to unmarshal RLE for label in block %v", chunk.K)
		}
		labelRLEs[blockStr] = blockRLEs
		return nil
	}
	ctx := datastore.NewVersionedCtx(d, v)
	err = store.ProcessRange(ctx, begIndex, endIndex, &storage.ChunkOp{}, f)
	if err != nil {
		return nil, err
	}
	dvid.Infof("Found %d blocks with label %d\n", len(labelRLEs), label)
	return labelRLEs, nil
}
Example #23
0
// Listen and serve HTTP requests using address and don't let stay-alive
// connections hog goroutines for more than an hour.
// See for discussion:
// http://stackoverflow.com/questions/10971800/golang-http-server-leaving-open-goroutines
func serveHttp(address, clientDir string) {
	var mode string
	if readonly {
		mode = " (read-only mode)"
	}
	dvid.Infof("Web server listening at %s%s ...\n", address, mode)
	if !webMux.routesSetup {
		initRoutes()
	}

	// Install our handler at the root of the standard net/http default mux.
	// This allows packages like expvar to continue working as expected.  (From goji.go)
	http.Handle("/", webMux)

	// TODO: Could have used "graceful" goji package but doesn't allow tailoring of timeouts
	// unless package is modified.  Not sure graceful features needed whereas tailoring
	// of server is more important.

	s := &http.Server{
		Addr:         address,
		WriteTimeout: WriteTimeout,
		ReadTimeout:  ReadTimeout,
	}
	log.Fatal(s.ListenAndServe())

	// graceful.HandleSignals()
	// if err := graceful.ListenAndServe(address, http.DefaultServeMux); err != nil {
	// 	log.Fatal(err)
	// }
	// graceful.Wait()
}
Example #24
0
// newRepo creates a new Repo with a new unique UUID unless one is provided as last parameter.
func (m *repoManager) newRepo(alias, description string, assign *dvid.UUID) (*repoT, error) {
	if assign != nil {
		// Make sure there's not already a repo with this UUID.
		if _, found := m.repos[*assign]; found {
			return nil, ErrExistingUUID
		}
	}
	uuid, v, err := m.newUUID(assign)
	if err != nil {
		return nil, err
	}
	id, err := m.newRepoID()
	if err != nil {
		return nil, err
	}
	r := newRepo(uuid, v, id)

	m.Lock()
	defer m.Unlock()
	m.repos[uuid] = r
	m.repoToUUID[id] = uuid

	r.alias = alias
	r.description = description

	if err := r.save(); err != nil {
		return r, err
	}
	dvid.Infof("Created and saved new repo %q, id %d\n", uuid, id)
	return r, m.putCaches()
}
Example #25
0
// Close handles any storage-specific shutdown procedures.
func Close() {
	if manager.setup {
		dvid.Infof("Closing datastore...\n")
		for _, store := range manager.uniqueDBs {
			store.Close()
		}
	}
}
Example #26
0
func (d *Data) GobDecode(b []byte) error {
	buf := bytes.NewBuffer(b)
	dec := gob.NewDecoder(buf)
	if err := dec.Decode(&(d.typename)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.typeurl)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.typeversion)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.name)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.id)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.rootUUID)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.compression)); err != nil {
		return err
	}
	if err := dec.Decode(&(d.checksum)); err != nil {
		return err
	}

	// legacy: we load but in future will be removed.
	if err := dec.Decode(&(d.syncNames)); err != nil {
		return err
	}

	if err := dec.Decode(&(d.unversioned)); err != nil {
		dvid.Infof("Data %q had no explicit versioning flag: assume it's versioned.\n", d.name)
	}
	if err := dec.Decode(&(d.dataUUID)); err != nil {
		dvid.Infof("Data %q had no data UUID.\n", d.name)
	}
	if err := dec.Decode(&(d.syncData)); err != nil {
		if len(d.syncNames) != 0 {
			dvid.Infof("Data %q has legacy sync names, will convert to data UUIDs...\n", d.name)
		}
	}
	return nil
}
Example #27
0
func openStore(create bool) {
	dvid.Infof("Opening test datastore.  Create = %v\n", create)
	if create {
		var err error
		testStore.backend, err = getTestStoreConfig()
		if err != nil {
			log.Fatalf("Unable to get testable storage configuration: %v\n", err)
		}
	}
	initMetadata, err := storage.Initialize(dvid.Config{}, testStore.backend)
	if err != nil {
		log.Fatalf("Can't initialize test datastore: %v\n", err)
	}
	if err := Initialize(initMetadata, &InstanceConfig{}); err != nil {
		log.Fatalf("Can't initialize datastore management: %v\n", err)
	}
	dvid.Infof("Storage initialized.  initMetadata = %v\n", initMetadata)
}
Example #28
0
// newLevelDB returns a leveldb backend.  If create is true, the leveldb
// will be created at the path if it doesn't already exist.
func (e Engine) newLevelDB(config dvid.EngineConfig) (*LevelDB, bool, error) {
	// Create path depending on whether it is testing database or not.
	path := config.Path
	if config.Testing {
		path = filepath.Join(os.TempDir(), config.Path)
	}

	// Is there a database already at this path?  If not, create.
	var created bool
	if _, err := os.Stat(path); os.IsNotExist(err) {
		dvid.Infof("Database not already at path (%s). Creating directory...\n", path)
		created = true
		// Make a directory at the path.
		if err := os.MkdirAll(path, 0744); err != nil {
			return nil, true, fmt.Errorf("Can't make directory at %s: %v", path, err)
		}
	} else {
		dvid.Infof("Found directory at %s (err = %v)\n", path, err)
	}

	// Open the database
	dvid.StartCgo()
	defer dvid.StopCgo()

	opt, err := getOptions(config.Config)
	if err != nil {
		return nil, false, err
	}

	leveldb := &LevelDB{
		directory: path,
		config:    config,
		options:   opt,
	}

	ldb, err := levigo.Open(path, opt.Options)
	if err != nil {
		return nil, false, err
	}
	leveldb.ldb = ldb

	return leveldb, created, nil
}
Example #29
0
func CloseTest() {
	dvid.Infof("Closing and deleting test datastore...\n")
	testableEng := storage.GetTestableEngine()
	if testableEng == nil {
		log.Fatalf("Could not find a storage engine that was testable")
	}
	config, _ := testStore.backend.StoreConfig("default")
	testableEng.Delete(config)
	testStore.Unlock()
}
Example #30
0
func CloseTest() {
	dvid.Infof("Closing and deleting test datastore...\n")
	testableEng := storage.GetTestableEngine()
	if testableEng == nil {
		log.Fatalf("Could not find a storage engine that was testable")
	}
	testableEng.Delete(testStore.config.Mutable)

	testStore.Unlock()
}