// GetAssignedStore returns the store assigned based on (instance name, root uuid) or type. // In some cases, this store may include a caching wrapper if the data instance has been // configured to use groupcache. func GetAssignedStore(dataname dvid.InstanceName, root dvid.UUID, typename dvid.TypeString) (dvid.Store, error) { if !manager.setup { return nil, fmt.Errorf("Storage manager not initialized before requesting store for %s/%s", dataname, root) } dataid := dvid.GetDataSpecifier(dataname, root) store, found := manager.instanceStore[dataid] var err error if !found { store, err = assignedStoreByType(typename) if err != nil { return nil, fmt.Errorf("Cannot get assigned store for data %q, type %q", dataname, typename) } } // See if this is using caching and if so, establish a wrapper around it. if _, supported := manager.gcache.supported[dataid]; supported { store, err = wrapGroupcache(store, manager.gcache.cache) if err != nil { dvid.Errorf("Unable to wrap groupcache around store %s for data instance %q (uuid %s): %v\n", store, dataname, root, err) } else { dvid.Infof("Returning groupcache-wrapped store %s for data instance %q @ %s\n", store, dataname, root) } } return store, nil }
func setupGroupcache(config GroupcacheConfig) error { if config.GB == 0 { return nil } var cacheBytes int64 cacheBytes = int64(config.GB) << 30 pool := groupcache.NewHTTPPool(config.Host) if pool != nil { dvid.Infof("Initializing groupcache with %d GB at %s...\n", config.GB, config.Host) manager.gcache.cache = groupcache.NewGroup("immutable", cacheBytes, groupcache.GetterFunc( func(c groupcache.Context, key string, dest groupcache.Sink) error { // Use KeyValueDB defined as context. gctx, ok := c.(GroupcacheCtx) if !ok { return fmt.Errorf("bad groupcache context: expected GroupcacheCtx, got %v", c) } // First four bytes of key is instance ID to isolate groupcache collisions. tk := TKey(key[4:]) data, err := gctx.KeyValueDB.Get(gctx.Context, tk) if err != nil { return err } return dest.SetBytes(data) })) manager.gcache.supported = make(map[dvid.DataSpecifier]struct{}) for _, dataspec := range config.Instances { name := strings.Trim(dataspec, "\"") parts := strings.Split(name, ":") switch len(parts) { case 2: dataid := dvid.GetDataSpecifier(dvid.InstanceName(parts[0]), dvid.UUID(parts[1])) manager.gcache.supported[dataid] = struct{}{} default: dvid.Errorf("bad data instance specification %q given for groupcache support in config file\n", dataspec) } } // If we have additional peers, add them and start a listener via the HTTP port. if len(config.Peers) > 0 { peers := []string{config.Host} peers = append(peers, config.Peers...) pool.Set(peers...) dvid.Infof("Groupcache configuration has %d peers in addition to local host.\n", len(config.Peers)) dvid.Infof("Starting groupcache HTTP server on %s\n", config.Host) http.ListenAndServe(config.Host, http.HandlerFunc(pool.ServeHTTP)) } } return nil }
// Initialize the storage systems. Returns a bool + error where the bool is // true if the metadata store is newly created and needs initialization. // The map of store configurations should be keyed by either a datatype name, // "default", or "metadata". func Initialize(cmdline dvid.Config, backend *Backend) (createdMetadata bool, err error) { dvid.Infof("backend:\n%v\n", *backend) // Open all the backend stores manager.stores = make(map[Alias]dvid.Store, len(backend.Stores)) var gotDefault, gotMetadata, createdDefault, lastCreated bool var lastStore dvid.Store for alias, dbconfig := range backend.Stores { var store dvid.Store for dbalias, db := range manager.stores { if db.Equal(dbconfig) { return false, fmt.Errorf("Store %q configuration is duplicate of store %q", alias, dbalias) } } store, created, err := NewStore(dbconfig) if err != nil { return false, fmt.Errorf("bad store %q: %v", alias, err) } if alias == backend.Metadata { gotMetadata = true createdMetadata = created manager.metadataStore = store } if alias == backend.Default { gotDefault = true createdDefault = created manager.defaultStore = store } manager.stores[alias] = store lastStore = store lastCreated = created } // Return if we don't have default or metadata stores. Should really be caught // at configuration loading, but here as well as double check. if !gotDefault { if len(backend.Stores) == 1 { manager.defaultStore = lastStore createdDefault = lastCreated } else { return false, fmt.Errorf("either backend.default or a single store must be set in configuration TOML file") } } if !gotMetadata { manager.metadataStore = manager.defaultStore createdMetadata = createdDefault } dvid.Infof("Default store: %s\n", manager.defaultStore) dvid.Infof("Metadata store: %s\n", manager.metadataStore) // Setup the groupcache if specified. err = setupGroupcache(backend.Groupcache) if err != nil { return } // Make all data instance or datatype-specific store assignments. manager.instanceStore = make(map[dvid.DataSpecifier]dvid.Store) manager.datatypeStore = make(map[dvid.TypeString]dvid.Store) for dataspec, alias := range backend.Mapping { if dataspec == "default" || dataspec == "metadata" { continue } store, found := manager.stores[alias] if !found { err = fmt.Errorf("bad backend store alias: %q -> %q", dataspec, alias) return } // Cache the store for mapped datatype or data instance. name := strings.Trim(string(dataspec), "\"") parts := strings.Split(name, ":") switch len(parts) { case 1: manager.datatypeStore[dvid.TypeString(name)] = store case 2: dataid := dvid.GetDataSpecifier(dvid.InstanceName(parts[0]), dvid.UUID(parts[1])) manager.instanceStore[dataid] = store default: err = fmt.Errorf("bad backend data specification: %s", dataspec) return } } manager.setup = true // Setup the graph store var store dvid.Store store, err = assignedStoreByType("labelgraph") if err != nil { return } var ok bool kvdb, ok := store.(OrderedKeyValueDB) if !ok { return false, fmt.Errorf("assigned labelgraph store %q isn't ordered kv db", store) } manager.graphDB, err = NewGraphStore(kvdb) if err != nil { return false, err } manager.graphSetter, ok = manager.graphDB.(GraphSetter) if !ok { return false, fmt.Errorf("Database %q cannot support a graph setter", kvdb) } manager.graphGetter, ok = manager.graphDB.(GraphGetter) if !ok { return false, fmt.Errorf("Database %q cannot support a graph getter", kvdb) } return }