func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") kvConfig := config.RequiredObject("storage") if err := config.Validate(); err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConfig) if err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix, err := New(kv) // TODO(mpl): next time we need to do another fix, make a new error // type that lets us apply the needed fix depending on its value or // something. For now just one value/fix. if err == errMissingWholeRef { // TODO: maybe we don't want to do that automatically. Brad says // we have to think about the case on GCE/CoreOS in particular. if err := ix.fixMissingWholeRef(sto); err != nil { ix.Close() return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err) } ix, err = New(kv) } if err != nil { return nil, err } ix.InitBlobSource(sto) return ix, err }
// newKeyValueFromJSONConfig returns a KeyValue implementation on top of a // github.com/syndtr/goleveldb/leveldb file. func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { file := cfg.RequiredString("file") if err := cfg.Validate(); err != nil { return nil, err } strictness := opt.DefaultStrict if env.IsDev() { // Be more strict in dev mode. strictness = opt.StrictAll } opts := &opt.Options{ // The default is 10, // 8 means 2.126% or 1/47th disk check rate, // 10 means 0.812% error rate (1/2^(bits/1.44)) or 1/123th disk check rate, // 12 means 0.31% or 1/322th disk check rate. // TODO(tgulacsi): decide which number is the best here. Till that go with the default. Filter: filter.NewBloomFilter(10), Strict: strictness, } db, err := leveldb.OpenFile(file, opts) if err != nil { return nil, err } is := &kvis{ db: db, path: file, opts: opts, readOpts: &opt.ReadOptions{Strict: strictness}, // On machine crash we want to reindex anyway, and // fsyncs may impose great performance penalty. writeOpts: &opt.WriteOptions{Sync: false}, } return is, nil }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (storage blobserver.Storage, err error) { sto := &condStorage{} receive := conf.OptionalStringOrObject("write") read := conf.RequiredString("read") remove := conf.OptionalString("remove", "") if err := conf.Validate(); err != nil { return nil, err } if receive != nil { sto.storageForReceive, err = buildStorageForReceive(ld, receive) if err != nil { return } } sto.read, err = ld.GetStorage(read) if err != nil { return } if remove != "" { sto.remove, err = ld.GetStorage(remove) if err != nil { return } } return sto, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { var ( origin = config.RequiredString("origin") cache = config.RequiredString("cache") kvConf = config.RequiredObject("meta") maxCacheBytes = config.OptionalInt64("maxCacheBytes", 512<<20) ) if err := config.Validate(); err != nil { return nil, err } cacheSto, err := ld.GetStorage(cache) if err != nil { return nil, err } originSto, err := ld.GetStorage(origin) if err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConf) if err != nil { return nil, err } // TODO: enumerate through kv and calculate current size. // Maybe also even enumerate through cache to see if they match. // Or even: keep it only in memory and not in kv? s := &sto{ origin: originSto, cache: cacheSto, maxCacheBytes: maxCacheBytes, kv: kv, } return s, nil }
func indexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { is := &indexStorage{} var ( blobPrefix = config.RequiredString("blobSource") ns = config.OptionalString("namespace", "") ) if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } is.ns, err = sanitizeNamespace(ns) if err != nil { return nil, err } ix, err := index.New(is) if err != nil { return nil, err } ix.BlobSource = sto ix.KeyFetcher = ix.BlobSource // TODO(bradfitz): global search? something else? return ix, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { path := config.RequiredString("path") if err := config.Validate(); err != nil { return nil, err } return New(path) }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { url := config.RequiredString("url") auth := config.RequiredString("auth") skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } client := client.New(url) if err = client.SetupAuthFromString(auth); err != nil { return nil, err } client.SetLogger(log.New(os.Stderr, "remote", log.LstdFlags)) sto := &remoteStorage{ client: client, } if !skipStartupCheck { // Do a quick dummy operation to check that our credentials are // correct. // TODO(bradfitz,mpl): skip this operation smartly if it turns out this is annoying/slow for whatever reason. c := make(chan blob.SizedRef, 1) err = sto.EnumerateBlobs(context.TODO(), c, "", 1) if err != nil { return nil, err } } return sto, nil }
// Reads google storage config and creates a Client. Exits on error. func doConfig(t *testing.T) (gsa *Client, bucket string) { if *gsConfigPath == "" { t.Skip("Skipping manual test. Set flag --gs_config_path to test Google Storage.") } cf, err := osutil.NewJSONConfigParser().ReadFile(*gsConfigPath) if err != nil { t.Fatalf("Failed to read config: %v", err) } var config jsonconfig.Obj config = cf.RequiredObject("gsconf") if err := cf.Validate(); err != nil { t.Fatalf("Invalid config: %v", err) } auth := config.RequiredObject("auth") bucket = config.RequiredString("bucket") if err := config.Validate(); err != nil { t.Fatalf("Invalid config: %v", err) } gsa = NewClient(oauth2.NewClient(oauth2.NoContext, oauthutil.NewRefreshTokenSource(&oauth2.Config{ Scopes: []string{Scope}, Endpoint: google.Endpoint, ClientID: auth.RequiredString("client_id"), ClientSecret: auth.RequiredString("client_secret"), RedirectURL: oauthutil.TitleBarRedirectURL, }, auth.RequiredString("refresh_token")))) if err := auth.Validate(); err != nil { t.Fatalf("Invalid config: %v", err) } return }
func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { conninfo := fmt.Sprintf("user=%s dbname=%s host=%s password=%s sslmode=%s", cfg.RequiredString("user"), cfg.RequiredString("database"), cfg.OptionalString("host", "localhost"), cfg.OptionalString("password", ""), cfg.OptionalString("sslmode", "require"), ) if err := cfg.Validate(); err != nil { return nil, err } db, err := sql.Open("postgres", conninfo) if err != nil { return nil, err } for _, tableSql := range SQLCreateTables() { if _, err := db.Exec(tableSql); err != nil { return nil, fmt.Errorf("error creating table with %q: %v", tableSql, err) } } for _, statement := range SQLDefineReplace() { if _, err := db.Exec(statement); err != nil { return nil, fmt.Errorf("error setting up replace statement with %q: %v", statement, err) } } r, err := db.Query(fmt.Sprintf(`SELECT replaceintometa('version', '%d')`, SchemaVersion())) if err != nil { return nil, fmt.Errorf("error setting schema version: %v", err) } r.Close() kv := &keyValue{ db: db, KeyValue: &sqlkv.KeyValue{ DB: db, SetFunc: altSet, BatchSetFunc: altBatchSet, PlaceHolderFunc: replacePlaceHolders, }, } if err := kv.ping(); err != nil { return nil, fmt.Errorf("PostgreSQL db unreachable: %v", err) } version, err := kv.SchemaVersion() if err != nil { return nil, fmt.Errorf("error getting schema version (need to init database?): %v", err) } if version != requiredSchemaVersion { if env.IsDev() { // Good signal that we're using the devcam server, so help out // the user with a more useful tip: return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) } return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", version, requiredSchemaVersion) } return kv, nil }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) { var ( smallPrefix = conf.RequiredString("smallBlobs") largePrefix = conf.RequiredString("largeBlobs") metaConf = conf.RequiredObject("metaIndex") ) if err := conf.Validate(); err != nil { return nil, err } small, err := ld.GetStorage(smallPrefix) if err != nil { return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err) } large, err := ld.GetStorage(largePrefix) if err != nil { return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err) } largeSubber, ok := large.(subFetcherStorage) if !ok { return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs", largePrefix, large) } meta, err := sorted.NewKeyValue(metaConf) if err != nil { return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err) } sto := &storage{ small: small, large: largeSubber, meta: meta, } sto.init() recoveryMu.Lock() defer recoveryMu.Unlock() if recovery { log.Print("Starting recovery of blobpacked index") if err := meta.Close(); err != nil { return nil, err } if err := sto.reindex(context.TODO(), func() (sorted.KeyValue, error) { return sorted.NewKeyValue(metaConf) }); err != nil { return nil, err } return sto, nil } // Check for a weird state: zip files exist, but no metadata about them // is recorded. This is probably a corrupt state, and the user likely // wants to recover. if !sto.anyMeta() && sto.anyZipPacks() { log.Fatal("Error: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode with -recovery.") } return sto, nil }
// NewHandler returns a Handler that proxies requests to an app. Start() on the // Handler starts the app. // The apiHost must end in a slash and is the camlistored API server for the app // process to hit. // The appHandlerPrefix is the URL path prefix on apiHost where the app is mounted. // It must end in a slash, and be at minimum "/". // The conf object has the following members, related to the vars described in // doc/app-environment.txt: // "program", string, required. File name of the app's program executable. Either // an absolute path, or the name of a file located in CAMLI_APP_BINDIR or in PATH. // "backendURL", string, optional. Automatic if absent. It sets CAMLI_APP_BACKEND_URL. // "appConfig", object, optional. Additional configuration that the app can request from Camlistore. func NewHandler(conf jsonconfig.Obj, apiHost, appHandlerPrefix string) (*Handler, error) { // TODO: remove the appHandlerPrefix if/when we change where the app config JSON URL is made available. name := conf.RequiredString("program") backendURL := conf.OptionalString("backendURL", "") appConfig := conf.OptionalObject("appConfig") // TODO(mpl): add an auth token in the extra config of the dev server config, // that the hello app can use to setup a status handler than only responds // to requests with that token. if err := conf.Validate(); err != nil { return nil, err } if apiHost == "" { return nil, fmt.Errorf("app: could not initialize Handler for %q: Camlistore apiHost is unknown", name) } if appHandlerPrefix == "" { return nil, fmt.Errorf("app: could not initialize Handler for %q: empty appHandlerPrefix", name) } if backendURL == "" { var err error // If not specified in the conf, we're dynamically picking the port of the CAMLI_APP_BACKEND_URL // now (instead of letting the app itself do it), because we need to know it in advance in order // to set the app handler's proxy. backendURL, err = randPortBackendURL(apiHost, appHandlerPrefix) if err != nil { return nil, err } } username, password := auth.RandToken(20), auth.RandToken(20) camliAuth := username + ":" + password basicAuth := auth.NewBasicAuth(username, password) envVars := map[string]string{ "CAMLI_API_HOST": apiHost, "CAMLI_AUTH": camliAuth, "CAMLI_APP_BACKEND_URL": backendURL, } if appConfig != nil { envVars["CAMLI_APP_CONFIG_URL"] = apiHost + strings.TrimPrefix(appHandlerPrefix, "/") + "config.json" } proxyURL, err := url.Parse(backendURL) if err != nil { return nil, fmt.Errorf("could not parse backendURL %q: %v", backendURL, err) } return &Handler{ name: name, envVars: envVars, auth: basicAuth, appConfig: appConfig, proxy: httputil.NewSingleHostReverseProxy(proxyURL), backendURL: backendURL, }, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { var ( path = config.RequiredString("path") maxFileSize = config.OptionalInt("maxFileSize", 0) indexConf = config.OptionalObject("metaIndex") ) if err := config.Validate(); err != nil { return nil, err } return newStorage(path, int64(maxFileSize), indexConf) }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) { var ( smallPrefix = conf.RequiredString("smallBlobs") largePrefix = conf.RequiredString("largeBlobs") metaConf = conf.RequiredObject("metaIndex") ) if err := conf.Validate(); err != nil { return nil, err } small, err := ld.GetStorage(smallPrefix) if err != nil { return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err) } large, err := ld.GetStorage(largePrefix) if err != nil { return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err) } largeSubber, ok := large.(subFetcherStorage) if !ok { return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs", largePrefix, large) } meta, err := sorted.NewKeyValue(metaConf) if err != nil { return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err) } sto := &storage{ small: small, large: largeSubber, meta: meta, } sto.init() // Check for a weird state: zip files exist, but no metadata about them // is recorded. This is probably a corrupt state, and the user likely // wants to recover. if !sto.anyMeta() && sto.anyZipPacks() { log.Printf("Warning: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode.") // TODO: add a recovery mode. // Old TODO was: // fail with a "known corrupt" message and refuse to // start unless in recovery mode (perhaps a new environment // var? or flag passed down?) using StreamBlobs starting at // "l:". Could even do it automatically if total size is // small or fast enough? But that's confusing if it only // sometimes finishes recovery. We probably want various // server start-up modes anyway: "check", "recover", "garbage // collect", "readonly". So might as well introduce that // concept now. // TODO: test start-up recovery mode, once it works. } return sto, nil }
// ConfigFromJSON populates Config from cfg, and validates // cfg. It returns an error if cfg fails to validate. func configFromJSON(cfg jsonconfig.Obj) (config, error) { conf := config{ server: cfg.OptionalString("host", "localhost"), database: cfg.RequiredString("database"), collection: cfg.OptionalString("collection", "blobs"), user: cfg.OptionalString("user", ""), password: cfg.OptionalString("password", ""), } if err := cfg.Validate(); err != nil { return config{}, err } return conf, nil }
func newKeyValueFromConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { if !compiled { return nil, ErrNotCompiled } file := cfg.RequiredString("file") if err := cfg.Validate(); err != nil { return nil, err } fi, err := os.Stat(file) if os.IsNotExist(err) || (err == nil && fi.Size() == 0) { if err := initDB(file); err != nil { return nil, fmt.Errorf("could not initialize sqlite DB at %s: %v", file, err) } } db, err := sql.Open("sqlite3", file) if err != nil { return nil, err } kv := &keyValue{ file: file, db: db, KeyValue: &sqlkv.KeyValue{ DB: db, Gate: syncutil.NewGate(1), }, } version, err := kv.SchemaVersion() if err != nil { return nil, fmt.Errorf("error getting schema version (need to init database with 'camtool dbinit %s'?): %v", file, err) } if err := kv.ping(); err != nil { return nil, err } if version != requiredSchemaVersion { if env.IsDev() { // Good signal that we're using the devcam server, so help out // the user with a more useful tip: return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) } return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", version, requiredSchemaVersion) } return kv, nil }
func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { var ( // either a short form ("26F5ABDA") or one the longer forms. keyId = conf.RequiredString("keyId") pubKeyDestPrefix = conf.OptionalString("publicKeyDest", "") secretRing = conf.OptionalString("secretRing", "") ) if err := conf.Validate(); err != nil { return nil, err } h := &Handler{ secretRing: secretRing, } var err error h.entity, err = jsonsign.EntityFromSecring(keyId, h.secretRingPath()) if err != nil { return nil, err } h.pubKey, err = jsonsign.ArmoredPublicKey(h.entity) ms := &memory.Storage{} h.pubKeyBlobRef = blob.SHA1FromString(h.pubKey) if _, err := ms.ReceiveBlob(h.pubKeyBlobRef, strings.NewReader(h.pubKey)); err != nil { return nil, fmt.Errorf("could not store pub key blob: %v", err) } h.pubKeyFetcher = ms if pubKeyDestPrefix != "" { sto, err := ld.GetStorage(pubKeyDestPrefix) if err != nil { return nil, err } h.pubKeyDest = sto } h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String() h.pubKeyHandler = &gethandler.Handler{ Fetcher: ms, } h.signer, err = schema.NewSigner(h.pubKeyBlobRef, strings.NewReader(h.pubKey), h.entity) if err != nil { return nil, err } return h, nil }
func newMemoryIndexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") if err := config.Validate(); err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix := NewMemoryIndex() ix.InitBlobSource(sto) return ix, err }
func NewKeyValue(cfg jsonconfig.Obj) (KeyValue, error) { var s KeyValue var err error typ := cfg.RequiredString("type") ctor, ok := ctors[typ] if typ != "" && !ok { return nil, fmt.Errorf("Invalid sorted.KeyValue type %q", typ) } if ok { s, err = ctor(cfg) if err != nil { return nil, fmt.Errorf("error from %q KeyValue: %v", typ, err) } } return s, cfg.Validate() }
func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { ins := &instance{ server: cfg.OptionalString("host", "localhost"), database: cfg.RequiredString("database"), user: cfg.OptionalString("user", ""), password: cfg.OptionalString("password", ""), } if err := cfg.Validate(); err != nil { return nil, err } db, err := ins.getCollection() if err != nil { return nil, err } return &keyValue{db: db, session: ins.session}, nil }
// newKeyValueFromJSONConfig returns a KeyValue implementation on top of a // github.com/cznic/kv file. func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { file := cfg.RequiredString("file") if err := cfg.Validate(); err != nil { return nil, err } opts := &kv.Options{} db, err := kvutil.Open(file, opts) if err != nil { return nil, err } is := &kvis{ db: db, opts: opts, path: file, } return is, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { var ( auth = config.RequiredObject("auth") bucket = config.RequiredString("bucket") cacheSize = config.OptionalInt64("cacheSize", 32<<20) accountID = auth.RequiredString("account_id") appKey = auth.RequiredString("application_key") ) if err := config.Validate(); err != nil { return nil, err } if err := auth.Validate(); err != nil { return nil, err } var dirPrefix string if parts := strings.SplitN(bucket, "/", 2); len(parts) > 1 { dirPrefix = parts[1] bucket = parts[0] } if dirPrefix != "" && !strings.HasSuffix(dirPrefix, "/") { dirPrefix += "/" } cl, err := b2.NewClient(accountID, appKey, nil) if err != nil { return nil, err } b, err := cl.BucketByName(bucket, true) if err != nil { return nil, err } s := &Storage{ cl: cl, b: b, dirPrefix: dirPrefix, } if cacheSize != 0 { s.cache = memory.NewCache(cacheSize) } return s, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { sto := &nsto{} invConf := config.RequiredObject("inventory") masterName := config.RequiredString("storage") if err := config.Validate(); err != nil { return nil, err } sto.inventory, err = sorted.NewKeyValue(invConf) if err != nil { return nil, fmt.Errorf("Invalid 'inventory' configuration: %v", err) } sto.master, err = ld.GetStorage(masterName) if err != nil { return nil, fmt.Errorf("Invalid 'storage' configuration: %v", err) } return sto, nil }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") slurpToMemory := conf.OptionalBool("slurpToMemory", false) if err := conf.Validate(); err != nil { return nil, err } if devBlockStartupPrefix != "" { _, err := ld.GetHandler(devBlockStartupPrefix) if err != nil { return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) } } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(index.Interface) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef, ok := blob.Parse(ownerBlobStr) if !ok { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } h := NewHandler(indexer, ownerBlobRef) if slurpToMemory { ii := indexer.(*index.Index) ii.Lock() corpus, err := ii.KeepInMemory() if err != nil { ii.Unlock() return nil, fmt.Errorf("error slurping index to memory: %v", err) } h.SetCorpus(corpus) ii.Unlock() } return h, nil }
// FromJSONConfig creates an HandlerConfig from the contents of config. // serverBaseURL is used if it is not found in config. func FromJSONConfig(config jsonconfig.Obj, serverBaseURL string) (HandlerConfig, error) { hc := HandlerConfig{ Program: config.RequiredString("program"), Prefix: config.RequiredString("prefix"), BackendURL: config.OptionalString("backendURL", ""), Listen: config.OptionalString("listen", ""), APIHost: config.OptionalString("apiHost", ""), ServerListen: config.OptionalString("serverListen", ""), ServerBaseURL: config.OptionalString("serverBaseURL", ""), AppConfig: config.OptionalObject("appConfig"), } if hc.ServerBaseURL == "" { hc.ServerBaseURL = serverBaseURL } if err := config.Validate(); err != nil { return HandlerConfig{}, err } return hc, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { auth := config.RequiredObject("auth") oAuthClient := oauth2.NewClient(oauth2.NoContext, oauthutil.NewRefreshTokenSource(&oauth2.Config{ Scopes: []string{Scope}, Endpoint: google.Endpoint, ClientID: auth.RequiredString("client_id"), ClientSecret: auth.RequiredString("client_secret"), RedirectURL: oauthutil.TitleBarRedirectURL, }, auth.RequiredString("refresh_token"))) parent := config.RequiredString("parent_id") if err := config.Validate(); err != nil { return nil, err } service, err := service.New(oAuthClient, parent) sto := &driveStorage{ service: service, } return sto, err }
func newShareFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { blobRoot := conf.RequiredString("blobRoot") if blobRoot == "" { return nil, errors.New("No blobRoot defined for share handler") } if err := conf.Validate(); err != nil { return nil, err } bs, err := ld.GetStorage(blobRoot) if err != nil { return nil, fmt.Errorf("failed to get share handler's storage at %q: %v", blobRoot, err) } fetcher, ok := bs.(blob.Fetcher) if !ok { return nil, errors.New("share handler's storage not a Fetcher.") } sh := &shareHandler{ fetcher: fetcher, log: true, } return sh, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { hostname := config.OptionalString("hostname", "s3.amazonaws.com") cacheSize := config.OptionalInt64("cacheSize", 32<<20) client := &s3.Client{ Auth: &s3.Auth{ AccessKey: config.RequiredString("aws_access_key"), SecretAccessKey: config.RequiredString("aws_secret_access_key"), Hostname: hostname, }, PutGate: syncutil.NewGate(maxParallelHTTP), } bucket := config.RequiredString("bucket") var dirPrefix string if parts := strings.SplitN(bucket, "/", 2); len(parts) > 1 { dirPrefix = parts[1] bucket = parts[0] } if dirPrefix != "" && !strings.HasSuffix(dirPrefix, "/") { dirPrefix += "/" } sto := &s3Storage{ s3Client: client, bucket: bucket, dirPrefix: dirPrefix, hostname: hostname, } skipStartupCheck := config.OptionalBool("skipStartupCheck", false) if err := config.Validate(); err != nil { return nil, err } if cacheSize != 0 { sto.cache = memory.NewCache(cacheSize) } if !skipStartupCheck { _, err := client.ListBucket(sto.bucket, "", 1) if serr, ok := err.(*s3.Error); ok { if serr.AmazonCode == "NoSuchBucket" { return nil, fmt.Errorf("Bucket %q doesn't exist.", sto.bucket) } // This code appears when the hostname has dots in it: if serr.AmazonCode == "PermanentRedirect" { loc, lerr := client.BucketLocation(sto.bucket) if lerr != nil { return nil, fmt.Errorf("Wrong server for bucket %q; and error determining bucket's location: %v", sto.bucket, lerr) } client.Auth.Hostname = loc _, err = client.ListBucket(sto.bucket, "", 1) if err == nil { log.Printf("Warning: s3 server should be %q, not %q. Change config file to avoid start-up latency.", client.Auth.Hostname, hostname) } } // This path occurs when the user set the // wrong server, or didn't set one at all, but // the bucket doesn't have dots in it: if serr.UseEndpoint != "" { // UseEndpoint will be e.g. "brads3test-ca.s3-us-west-1.amazonaws.com" // But we only want the "s3-us-west-1.amazonaws.com" part. client.Auth.Hostname = strings.TrimPrefix(serr.UseEndpoint, sto.bucket+".") _, err = client.ListBucket(sto.bucket, "", 1) if err == nil { log.Printf("Warning: s3 server should be %q, not %q. Change config file to avoid start-up latency.", client.Auth.Hostname, hostname) } } } if err != nil { return nil, fmt.Errorf("Error listing bucket %s: %v", sto.bucket, err) } } return sto, nil }
func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { var ( user = cfg.RequiredString("user") database = cfg.RequiredString("database") host = cfg.OptionalString("host", "") password = cfg.OptionalString("password", "") ) if err := cfg.Validate(); err != nil { return nil, err } if !validDatabaseName(database) { return nil, fmt.Errorf("%q looks like an invalid database name", database) } var err error if host != "" { host, err = maybeRemapCloudSQL(host) if err != nil { return nil, err } if !strings.Contains(host, ":") { host += ":3306" } host = "tcp(" + host + ")" } // The DSN does NOT have a database name in it so it's // cacheable and can be shared between different queues & the // index, all sharing the same database server, cutting down // number of TCP connections required. We add the database // name in queries instead. dsn := fmt.Sprintf("%s:%s@%s/", user, password, host) db, err := openOrCachedDB(dsn) if err != nil { return nil, err } if err := CreateDB(db, database); err != nil { return nil, err } if err := createTables(db, database); err != nil { return nil, err } kv := &keyValue{ dsn: dsn, db: db, KeyValue: &sqlkv.KeyValue{ DB: db, TablePrefix: database + ".", Gate: syncutil.NewGate(20), // arbitrary limit. TODO: configurable, automatically-learned? }, } if err := kv.ping(); err != nil { return nil, fmt.Errorf("MySQL db unreachable: %v", err) } version, err := kv.SchemaVersion() if err != nil { return nil, fmt.Errorf("error getting current database schema version: %v", err) } if version == 0 { // Newly created table case if _, err := db.Exec(fmt.Sprintf(`REPLACE INTO %s.meta VALUES ('version', ?)`, database), requiredSchemaVersion); err != nil { return nil, fmt.Errorf("error setting schema version: %v", err) } return kv, nil } if version != requiredSchemaVersion { if version == 20 && requiredSchemaVersion == 21 { fmt.Fprintf(os.Stderr, fixSchema20to21) } if env.IsDev() { // Good signal that we're using the devcam server, so help out // the user with a more useful tip: return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) } return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", version, requiredSchemaVersion) } return kv, nil }
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { var ( from = conf.RequiredString("from") to = conf.RequiredString("to") fullSync = conf.OptionalBool("fullSyncOnStart", false) blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false) idle = conf.OptionalBool("idle", false) queueConf = conf.OptionalObject("queue") copierPoolSize = conf.OptionalInt("copierPoolSize", 5) validate = conf.OptionalBool("validateOnStart", validateOnStartDefault) ) if err := conf.Validate(); err != nil { return nil, err } if idle { return newIdleSyncHandler(from, to), nil } if len(queueConf) == 0 { return nil, errors.New(`Missing required "queue" object`) } q, err := sorted.NewKeyValue(queueConf) if err != nil { return nil, err } isToIndex := false fromBs, err := ld.GetStorage(from) if err != nil { return nil, err } toBs, err := ld.GetStorage(to) if err != nil { return nil, err } if _, ok := fromBs.(*index.Index); !ok { if _, ok := toBs.(*index.Index); ok { isToIndex = true } } sh := newSyncHandler(from, to, fromBs, toBs, q) sh.toIndex = isToIndex sh.copierPoolSize = copierPoolSize if err := sh.readQueueToMemory(); err != nil { return nil, fmt.Errorf("Error reading sync queue to memory: %v", err) } if fullSync || blockFullSync { sh.logf("Doing full sync") didFullSync := make(chan bool, 1) go func() { for { n := sh.runSync("pending blobs queue", sh.enumeratePendingBlobs) if n > 0 { sh.logf("Queue sync copied %d blobs", n) continue } break } n := sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs)) sh.logf("Full sync copied %d blobs", n) didFullSync <- true sh.syncLoop() }() if blockFullSync { sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to) <-didFullSync sh.logf("Full sync complete.") } } else { go sh.syncLoop() } if validate { go sh.startFullValidation() } blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue) return sh, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (bs blobserver.Storage, err error) { metaConf := config.RequiredObject("metaIndex") sto := &storage{} agreement := config.OptionalString("I_AGREE", "") const wantAgreement = "that encryption support hasn't been peer-reviewed, isn't finished, and its format might change." if agreement != wantAgreement { return nil, errors.New("Use of the 'encrypt' target without the proper I_AGREE value.") } key := config.OptionalString("key", "") keyFile := config.OptionalString("keyFile", "") var keyb []byte switch { case key != "": keyb, err = hex.DecodeString(key) if err != nil || len(keyb) != 16 { return nil, fmt.Errorf("The 'key' parameter must be 16 bytes of 32 hex digits. (currently fixed at AES-128)") } case keyFile != "": // TODO: check that keyFile's unix permissions aren't too permissive. keyb, err = ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("Reading key file %v: %v", keyFile, err) } } blobStorage := config.RequiredString("blobs") metaStorage := config.RequiredString("meta") if err := config.Validate(); err != nil { return nil, err } sto.index, err = sorted.NewKeyValue(metaConf) if err != nil { return } sto.blobs, err = ld.GetStorage(blobStorage) if err != nil { return } sto.meta, err = ld.GetStorage(metaStorage) if err != nil { return } if keyb == nil { // TODO: add a way to prompt from stdin on start? or keychain support? return nil, errors.New("no encryption key set with 'key' or 'keyFile'") } if err := sto.setKey(keyb); err != nil { return nil, err } start := time.Now() log.Printf("Reading encryption metadata...") if err := sto.readAllMetaBlobs(); err != nil { return nil, fmt.Errorf("Error scanning metadata on start-up: %v", err) } log.Printf("Read all encryption metadata in %.3f seconds", time.Since(start).Seconds()) return sto, nil }