func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { u, err := user.Current() if err != nil { return } root := &RootHandler{ BlobRoot: conf.OptionalString("blobRoot", ""), SearchRoot: conf.OptionalString("searchRoot", ""), OwnerName: conf.OptionalString("ownerName", u.Name), } root.Stealth = conf.OptionalBool("stealth", false) if err = conf.Validate(); err != nil { return } if root.BlobRoot != "" { bs, err := ld.GetStorage(root.BlobRoot) if err != nil { return nil, fmt.Errorf("Root handler's blobRoot of %q error: %v", root.BlobRoot, err) } root.Storage = bs } if root.SearchRoot != "" { h, _ := ld.GetHandler(root.SearchRoot) root.Search = h.(*search.Handler) } return root, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") kvConfig := config.RequiredObject("storage") if err := config.Validate(); err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConfig) if err != nil { return nil, err } ix, err := New(kv) if err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { ix.Close() return nil, err } ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource return ix, err }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") if err := conf.Validate(); err != nil { return nil, err } if devBlockStartupPrefix != "" { _, err := ld.GetHandler(devBlockStartupPrefix) if err != nil { return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) } } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(Index) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef, ok := blob.Parse(ownerBlobStr) if !ok { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } return &Handler{ index: indexer, owner: ownerBlobRef, }, nil }
func newMongoIndexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") mgw := &MongoWrapper{ Servers: config.OptionalString("host", "localhost"), Database: config.RequiredString("database"), User: config.OptionalString("user", ""), Password: config.OptionalString("password", ""), Collection: collectionName, } if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix, err := newMongoIndex(mgw) if err != nil { return nil, err } ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource if wipe, _ := strconv.ParseBool(os.Getenv("CAMLI_MONGO_WIPE")); wipe { err = ix.Storage().Delete("") if err != nil { return nil, err } } return ix, err }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (storage blobserver.Storage, err error) { sto := &condStorage{} receive := conf.OptionalStringOrObject("write") read := conf.RequiredString("read") remove := conf.OptionalString("remove", "") if err := conf.Validate(); err != nil { return nil, err } if receive != nil { sto.storageForReceive, err = buildStorageForReceive(ld, receive) if err != nil { return } } sto.read, err = ld.GetStorage(read) if err != nil { return } if remove != "" { sto.remove, err = ld.GetStorage(remove) if err != nil { return } } return sto, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") kvConfig := config.RequiredObject("storage") if err := config.Validate(); err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConfig) if err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix, err := New(kv) // TODO(mpl): next time we need to do another fix, make a new error // type that lets us apply the needed fix depending on its value or // something. For now just one value/fix. if err == errMissingWholeRef { // TODO: maybe we don't want to do that automatically. Brad says // we have to think about the case on GCE/CoreOS in particular. if err := ix.fixMissingWholeRef(sto); err != nil { ix.Close() return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err) } ix, err = New(kv) } if err != nil { return nil, err } ix.InitBlobSource(sto) return ix, err }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") file := config.RequiredString("file") if err := config.Validate(); err != nil { return nil, err } is, closer, err := NewStorage(file) if err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { closer.Close() return nil, err } ix := index.New(is) if err != nil { return nil, err } ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource return ix, err }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { var ( origin = config.RequiredString("origin") cache = config.RequiredString("cache") kvConf = config.RequiredObject("meta") maxCacheBytes = config.OptionalInt64("maxCacheBytes", 512<<20) ) if err := config.Validate(); err != nil { return nil, err } cacheSto, err := ld.GetStorage(cache) if err != nil { return nil, err } originSto, err := ld.GetStorage(origin) if err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConf) if err != nil { return nil, err } // TODO: enumerate through kv and calculate current size. // Maybe also even enumerate through cache to see if they match. // Or even: keep it only in memory and not in kv? s := &sto{ origin: originSto, cache: cacheSto, maxCacheBytes: maxCacheBytes, kv: kv, } return s, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (bs blobserver.Storage, err error) { sto := &storage{ SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{}, } key := config.OptionalString("key", "") keyFile := config.OptionalString("keyFile", "") switch { case key != "": sto.key = []byte(key) case keyFile != "": // TODO: check that keyFile's unix permissions aren't too permissive. sto.key, err = ioutil.ReadFile(keyFile) if err != nil { return } } sto.blobs, err = ld.GetStorage(config.RequiredString("blobs")) if err != nil { return } sto.meta, err = ld.GetStorage(config.RequiredString("meta")) if err != nil { return } if err := config.Validate(); err != nil { return nil, err } if sto.key == nil { // TODO: add a way to prompt from stdin on start? or keychain support? return nil, errors.New("no encryption key set with 'key' or 'keyFile'") } return sto, nil }
func indexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { is := &indexStorage{} var ( blobPrefix = config.RequiredString("blobSource") ns = config.OptionalString("namespace", "") ) if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } is.ns, err = sanitizeNamespace(ns) if err != nil { return nil, err } ix, err := index.New(is) if err != nil { return nil, err } ix.BlobSource = sto ix.KeyFetcher = ix.BlobSource // TODO(bradfitz): global search? something else? return ix, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") postgresConf, err := postgres.ConfigFromJSON(config) if err != nil { return nil, err } kv, err := postgres.NewKeyValue(postgresConf) if err != nil { return nil, err } ix, err := index.New(kv) if err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { ix.Close() return nil, err } ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource return ix, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { sto := &replicaStorage{ replicaPrefixes: config.RequiredList("backends"), } nReplicas := len(sto.replicaPrefixes) sto.minWritesForSuccess = config.OptionalInt("minWritesForSuccess", nReplicas) if err := config.Validate(); err != nil { return nil, err } if nReplicas == 0 { return nil, errors.New("replica: need at least one replica") } if sto.minWritesForSuccess == 0 { sto.minWritesForSuccess = nReplicas } sto.replicas = make([]blobserver.Storage, nReplicas) for i, prefix := range sto.replicaPrefixes { replicaSto, err := ld.GetStorage(prefix) if err != nil { return nil, err } sto.replicas[i] = replicaSto } return sto, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (bs blobserver.Storage, err error) { sto := &storage{ SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{}, index: index.NewMemoryStorage(), // TODO: temporary for development; let be configurable (mysql, etc) } agreement := config.OptionalString("I_AGREE", "") const wantAgreement = "that encryption support hasn't been peer-reviewed, isn't finished, and its format might change." if agreement != wantAgreement { return nil, errors.New("Use of the 'encrypt' target without the proper I_AGREE value.") } key := config.OptionalString("key", "") keyFile := config.OptionalString("keyFile", "") var keyb []byte switch { case key != "": keyb, err = hex.DecodeString(key) if err != nil || len(keyb) != 16 { return nil, fmt.Errorf("The 'key' parameter must be 16 bytes of 32 hex digits. (currently fixed at AES-128)") } case keyFile != "": // TODO: check that keyFile's unix permissions aren't too permissive. keyb, err = ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("Reading key file %v: %v", keyFile, err) } } blobStorage := config.RequiredString("blobs") metaStorage := config.RequiredString("meta") if err := config.Validate(); err != nil { return nil, err } sto.blobs, err = ld.GetStorage(blobStorage) if err != nil { return } sto.meta, err = ld.GetStorage(metaStorage) if err != nil { return } if keyb == nil { // TODO: add a way to prompt from stdin on start? or keychain support? return nil, errors.New("no encryption key set with 'key' or 'keyFile'") } if err := sto.setKey(keyb); err != nil { return nil, err } log.Printf("Reading encryption metadata...") if err := sto.readAllMetaBlobs(); err != nil { return nil, fmt.Errorf("Error scanning metadata on start-up: %v", err) } log.Printf("Read all encryption metadata.") return sto, nil }
func newStatusFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { if err := conf.Validate(); err != nil { return nil, err } return &StatusHandler{ prefix: ld.MyPrefix(), handlerFinder: ld, }, nil }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) { var ( smallPrefix = conf.RequiredString("smallBlobs") largePrefix = conf.RequiredString("largeBlobs") metaConf = conf.RequiredObject("metaIndex") ) if err := conf.Validate(); err != nil { return nil, err } small, err := ld.GetStorage(smallPrefix) if err != nil { return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err) } large, err := ld.GetStorage(largePrefix) if err != nil { return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err) } largeSubber, ok := large.(subFetcherStorage) if !ok { return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs", largePrefix, large) } meta, err := sorted.NewKeyValue(metaConf) if err != nil { return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err) } sto := &storage{ small: small, large: largeSubber, meta: meta, } sto.init() recoveryMu.Lock() defer recoveryMu.Unlock() if recovery { log.Print("Starting recovery of blobpacked index") if err := meta.Close(); err != nil { return nil, err } if err := sto.reindex(context.TODO(), func() (sorted.KeyValue, error) { return sorted.NewKeyValue(metaConf) }); err != nil { return nil, err } return sto, nil } // Check for a weird state: zip files exist, but no metadata about them // is recorded. This is probably a corrupt state, and the user likely // wants to recover. if !sto.anyMeta() && sto.anyZipPacks() { log.Fatal("Error: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode with -recovery.") } return sto, nil }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) { var ( smallPrefix = conf.RequiredString("smallBlobs") largePrefix = conf.RequiredString("largeBlobs") metaConf = conf.RequiredObject("metaIndex") ) if err := conf.Validate(); err != nil { return nil, err } small, err := ld.GetStorage(smallPrefix) if err != nil { return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err) } large, err := ld.GetStorage(largePrefix) if err != nil { return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err) } largeSubber, ok := large.(subFetcherStorage) if !ok { return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs", largePrefix, large) } meta, err := sorted.NewKeyValue(metaConf) if err != nil { return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err) } sto := &storage{ small: small, large: largeSubber, meta: meta, } sto.init() // Check for a weird state: zip files exist, but no metadata about them // is recorded. This is probably a corrupt state, and the user likely // wants to recover. if !sto.anyMeta() && sto.anyZipPacks() { log.Printf("Warning: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode.") // TODO: add a recovery mode. // Old TODO was: // fail with a "known corrupt" message and refuse to // start unless in recovery mode (perhaps a new environment // var? or flag passed down?) using StreamBlobs starting at // "l:". Could even do it automatically if total size is // small or fast enough? But that's confusing if it only // sometimes finishes recovery. We probably want various // server start-up modes anyway: "check", "recover", "garbage // collect", "readonly". So might as well introduce that // concept now. // TODO: test start-up recovery mode, once it works. } return sto, nil }
func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { pubKeyDestPrefix := conf.OptionalString("publicKeyDest", "") // either a short form ("26F5ABDA") or one the longer forms. keyId := conf.RequiredString("keyId") h := &Handler{ secretRing: conf.OptionalString("secretRing", ""), } var err error if err = conf.Validate(); err != nil { return nil, err } h.entity, err = jsonsign.EntityFromSecring(keyId, h.secretRingPath()) if err != nil { return nil, err } armoredPublicKey, err := jsonsign.ArmoredPublicKey(h.entity) ms := new(blobref.MemoryStore) h.pubKeyBlobRef, err = ms.AddBlob(crypto.SHA1, armoredPublicKey) if err != nil { return nil, err } h.pubKeyFetcher = ms if pubKeyDestPrefix != "" { sto, err := ld.GetStorage(pubKeyDestPrefix) if err != nil { return nil, err } h.pubKeyDest = sto if sto != nil { if ctxReq, ok := ld.GetRequestContext(); ok { if w, ok := sto.(blobserver.ContextWrapper); ok { sto = w.WrapContext(ctxReq) } } err := h.uploadPublicKey(sto, armoredPublicKey) if err != nil { return nil, fmt.Errorf("Error seeding self public key in storage: %v", err) } } } h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String() h.pubKeyHandler = &gethandler.Handler{ Fetcher: ms, AllowGlobalAccess: true, // just public keys } return h, nil }
func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { pubKeyDestPrefix := conf.OptionalString("publicKeyDest", "") // either a short form ("26F5ABDA") or one the longer forms. keyId := conf.RequiredString("keyId") h := &Handler{ secretRing: conf.OptionalString("secretRing", ""), } var err error if err = conf.Validate(); err != nil { return nil, err } h.entity, err = jsonsign.EntityFromSecring(keyId, h.secretRingPath()) if err != nil { return nil, err } armoredPublicKey, err := jsonsign.ArmoredPublicKey(h.entity) ms := new(blob.MemoryStore) h.pubKeyBlobRef, err = ms.AddBlob(crypto.SHA1, armoredPublicKey) if err != nil { return nil, err } h.pubKeyFetcher = ms if pubKeyDestPrefix != "" { sto, err := ld.GetStorage(pubKeyDestPrefix) if err != nil { return nil, err } h.pubKeyDest = sto if sto != nil { err := h.uploadPublicKey(sto, armoredPublicKey) if err != nil { return nil, fmt.Errorf("Error seeding self public key in storage: %v", err) } } } h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String() h.pubKeyHandler = &gethandler.Handler{ Fetcher: ms, } h.signer, err = schema.NewSigner(h.pubKeyBlobRef, strings.NewReader(armoredPublicKey), h.entity) if err != nil { return nil, err } return h, nil }
func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { root := &RootHandler{} root.Stealth = conf.OptionalBool("stealth", false) if err = conf.Validate(); err != nil { return } if _, h, err := ld.FindHandlerByType("ui"); err == nil { root.ui = h.(*UIHandler) } return root, nil }
func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { var ( // either a short form ("26F5ABDA") or one the longer forms. keyId = conf.RequiredString("keyId") pubKeyDestPrefix = conf.OptionalString("publicKeyDest", "") secretRing = conf.OptionalString("secretRing", "") ) if err := conf.Validate(); err != nil { return nil, err } h := &Handler{ secretRing: secretRing, } var err error h.entity, err = jsonsign.EntityFromSecring(keyId, h.secretRingPath()) if err != nil { return nil, err } h.pubKey, err = jsonsign.ArmoredPublicKey(h.entity) ms := &memory.Storage{} h.pubKeyBlobRef = blob.SHA1FromString(h.pubKey) if _, err := ms.ReceiveBlob(h.pubKeyBlobRef, strings.NewReader(h.pubKey)); err != nil { return nil, fmt.Errorf("could not store pub key blob: %v", err) } h.pubKeyFetcher = ms if pubKeyDestPrefix != "" { sto, err := ld.GetStorage(pubKeyDestPrefix) if err != nil { return nil, err } h.pubKeyDest = sto } h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String() h.pubKeyHandler = &gethandler.Handler{ Fetcher: ms, } h.signer, err = schema.NewSigner(h.pubKeyBlobRef, strings.NewReader(h.pubKey), h.entity) if err != nil { return nil, err } return h, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { from := conf.RequiredString("from") to := conf.RequiredString("to") fullSync := conf.OptionalBool("fullSyncOnStart", false) blockFullSync := conf.OptionalBool("blockingFullSyncOnStart", false) if err = conf.Validate(); err != nil { return } fromBs, err := ld.GetStorage(from) if err != nil { return } toBs, err := ld.GetStorage(to) if err != nil { return } fromQsc, ok := fromBs.(blobserver.StorageQueueCreator) if !ok { return nil, fmt.Errorf("Prefix %s (type %T) does not support being efficient replication source (queueing)", from, fromBs) } synch, err := createSyncHandler(from, to, fromQsc, toBs) if err != nil { return } if fullSync || blockFullSync { didFullSync := make(chan bool, 1) go func() { n := synch.runSync("queue", fromQsc, 0) log.Printf("Queue sync copied %d blobs", n) n = synch.runSync("full", fromBs, 0) log.Printf("Full sync copied %d blobs", n) didFullSync <- true synch.syncQueueLoop() }() if blockFullSync { log.Printf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync log.Printf("Full sync complete.") } } else { go synch.syncQueueLoop() } rootPrefix, _, err := ld.FindHandlerByType("root") switch err { case blobserver.ErrHandlerTypeNotFound: // ignore; okay to not have a root handler. case nil: h, err := ld.GetHandler(rootPrefix) if err != nil { return nil, err } h.(*RootHandler).registerSyncHandler(synch) default: return nil, fmt.Errorf("Error looking for root handler: %v", err) } return synch, nil }
func newMemoryIndexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix := NewMemoryIndex() ix.InitBlobSource(sto) return ix, err }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { var ( blobPrefix = config.RequiredString("blobSource") host = config.OptionalString("host", "localhost") user = config.RequiredString("user") password = config.OptionalString("password", "") database = config.RequiredString("database") ) if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } isto, err := NewStorage(host, user, password, database) if err != nil { return nil, err } is := isto.(*myIndexStorage) if err := is.ping(); err != nil { return nil, err } version, err := is.SchemaVersion() if err != nil { return nil, fmt.Errorf("error getting schema version (need to init database?): %v", err) } if version != requiredSchemaVersion { if version == 20 && requiredSchemaVersion == 21 { fmt.Fprintf(os.Stderr, fixSchema20to21) } if os.Getenv("CAMLI_DEV_CAMLI_ROOT") != "" { // Good signal that we're using the devcam server, so help out // the user with a more useful tip: return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) } return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", version, requiredSchemaVersion) } ix := index.New(is) ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource return ix, nil }
func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { username, _ := getUserName() root := &RootHandler{ BlobRoot: conf.OptionalString("blobRoot", ""), SearchRoot: conf.OptionalString("searchRoot", ""), OwnerName: conf.OptionalString("ownerName", username), Username: osutil.Username(), Prefix: ld.MyPrefix(), } root.Stealth = conf.OptionalBool("stealth", false) root.statusRoot = conf.OptionalString("statusRoot", "") if err = conf.Validate(); err != nil { return } if root.BlobRoot != "" { bs, err := ld.GetStorage(root.BlobRoot) if err != nil { return nil, fmt.Errorf("Root handler's blobRoot of %q error: %v", root.BlobRoot, err) } root.Storage = bs } root.searchInit = func() {} if root.SearchRoot != "" { prefix := root.SearchRoot if t := ld.GetHandlerType(prefix); t != "search" { if t == "" { return nil, fmt.Errorf("root handler's searchRoot of %q is invalid and doesn't refer to a declared handler", prefix) } return nil, fmt.Errorf("root handler's searchRoot of %q is of type %q, not %q", prefix, t, "search") } root.searchInit = func() { h, err := ld.GetHandler(prefix) if err != nil { log.Fatalf("Error fetching SearchRoot at %q: %v", prefix, err) } root.searchHandler = h.(*search.Handler) root.searchInit = nil } } if pfx, _, _ := ld.FindHandlerByType("importer"); err == nil { root.importerRoot = pfx } return root, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { var ( blobPrefix = config.RequiredString("blobSource") file = config.RequiredString("file") ) if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } fi, err := os.Stat(file) if os.IsNotExist(err) || (err == nil && fi.Size() == 0) { return nil, fmt.Errorf(`You need to initialize your SQLite index database with: camtool dbinit --dbname=%s --dbtype=sqlite`, file) } isto, err := NewStorage(file) if err != nil { return nil, err } is := isto.(*storage) version, err := is.SchemaVersion() if err != nil { return nil, fmt.Errorf("error getting schema version (need to init database with 'camtool dbinit %s'?): %v", file, err) } if err := is.ping(); err != nil { return nil, err } if version != requiredSchemaVersion { if os.Getenv("CAMLI_ADVERTISED_PASSWORD") != "" { // Good signal that we're using the dev-server script, so help out // the user with a more useful tip: return nil, fmt.Errorf("database schema version is %d; expect %d (run \"./dev-server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) } return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", version, requiredSchemaVersion) } ix := index.New(is) ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource return ix, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { sto := &nsto{} invConf := config.RequiredObject("inventory") masterName := config.RequiredString("storage") if err := config.Validate(); err != nil { return nil, err } sto.inventory, err = sorted.NewKeyValue(invConf) if err != nil { return nil, fmt.Errorf("Invalid 'inventory' configuration: %v", err) } sto.master, err = ld.GetStorage(masterName) if err != nil { return nil, fmt.Errorf("Invalid 'storage' configuration: %v", err) } return sto, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { from := conf.RequiredString("from") to := conf.RequiredString("to") fullSync := conf.OptionalBool("fullSyncOnStart", false) blockFullSync := conf.OptionalBool("blockingFullSyncOnStart", false) if err = conf.Validate(); err != nil { return } fromBs, err := ld.GetStorage(from) if err != nil { return } toBs, err := ld.GetStorage(to) if err != nil { return } fromQsc, ok := fromBs.(blobserver.StorageQueueCreator) if !ok { return nil, fmt.Errorf("Prefix %s (type %T) does not support being efficient replication source (queueing)", from, fromBs) } synch, err := createSyncHandler(from, to, fromQsc, toBs) if err != nil { return } if fullSync || blockFullSync { didFullSync := make(chan bool, 1) go func() { n := synch.runSync("queue", fromQsc, 0) log.Printf("Queue sync copied %d blobs", n) n = synch.runSync("full", fromBs, 0) log.Printf("Full sync copied %d blobs", n) didFullSync <- true synch.syncQueueLoop() }() if blockFullSync { log.Printf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync log.Printf("Full sync complete.") } } else { go synch.syncQueueLoop() } return synch, nil }
func newMemoryIndexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix := NewMemoryIndex() ix.BlobSource = sto // Good enough, for now: ix.KeyFetcher = ix.BlobSource return ix, err }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") slurpToMemory := conf.OptionalBool("slurpToMemory", false) if err := conf.Validate(); err != nil { return nil, err } if devBlockStartupPrefix != "" { _, err := ld.GetHandler(devBlockStartupPrefix) if err != nil { return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) } } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(index.Interface) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef, ok := blob.Parse(ownerBlobStr) if !ok { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } h := NewHandler(indexer, ownerBlobRef) if slurpToMemory { ii := indexer.(*index.Index) ii.Lock() corpus, err := ii.KeepInMemory() if err != nil { ii.Unlock() return nil, fmt.Errorf("error slurping index to memory: %v", err) } h.SetCorpus(corpus) ii.Unlock() } return h, nil }
func newFromConfig(ld blobserver.Loader, cfg jsonconfig.Obj) (http.Handler, error) { h := &Host{ baseURL: ld.BaseURL(), importerBase: ld.BaseURL() + ld.MyPrefix(), imp: make(map[string]*importer), } for k, impl := range importers { h.importers = append(h.importers, k) var clientID, clientSecret string if impConf := cfg.OptionalObject(k); impConf != nil { clientID = impConf.OptionalString("clientID", "") clientSecret = impConf.OptionalString("clientSecret", "") // Special case: allow clientSecret to be of form "clientID:clientSecret" // if the clientID is empty. if clientID == "" && strings.Contains(clientSecret, ":") { if f := strings.SplitN(clientSecret, ":", 2); len(f) == 2 { clientID, clientSecret = f[0], f[1] } } if err := impConf.Validate(); err != nil { return nil, fmt.Errorf("Invalid static configuration for importer %q: %v", k, err) } } if clientSecret != "" && clientID == "" { return nil, fmt.Errorf("Invalid static configuration for importer %q: clientSecret specified without clientID", k) } imp := &importer{ host: h, name: k, impl: impl, clientID: clientID, clientSecret: clientSecret, } h.imp[k] = imp } if err := cfg.Validate(); err != nil { return nil, err } sort.Strings(h.importers) return h, nil }