func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { var ( origin = config.RequiredString("origin") cache = config.RequiredString("cache") kvConf = config.RequiredObject("meta") maxCacheBytes = config.OptionalInt64("maxCacheBytes", 512<<20) ) if err := config.Validate(); err != nil { return nil, err } cacheSto, err := ld.GetStorage(cache) if err != nil { return nil, err } originSto, err := ld.GetStorage(origin) if err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConf) if err != nil { return nil, err } // TODO: enumerate through kv and calculate current size. // Maybe also even enumerate through cache to see if they match. // Or even: keep it only in memory and not in kv? s := &sto{ origin: originSto, cache: cacheSto, maxCacheBytes: maxCacheBytes, kv: kv, } return s, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { blobPrefix := config.RequiredString("blobSource") kvConfig := config.RequiredObject("storage") if err := config.Validate(); err != nil { return nil, err } kv, err := sorted.NewKeyValue(kvConfig) if err != nil { return nil, err } sto, err := ld.GetStorage(blobPrefix) if err != nil { return nil, err } ix, err := New(kv) // TODO(mpl): next time we need to do another fix, make a new error // type that lets us apply the needed fix depending on its value or // something. For now just one value/fix. if err == errMissingWholeRef { // TODO: maybe we don't want to do that automatically. Brad says // we have to think about the case on GCE/CoreOS in particular. if err := ix.fixMissingWholeRef(sto); err != nil { ix.Close() return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err) } ix, err = New(kv) } if err != nil { return nil, err } ix.InitBlobSource(sto) return ix, err }
// Reads google storage config and creates a Client. Exits on error. func doConfig(t *testing.T) (gsa *Client, bucket string) { if *gsConfigPath == "" { t.Skip("Skipping manual test. Set flag --gs_config_path to test Google Storage.") } cf, err := osutil.NewJSONConfigParser().ReadFile(*gsConfigPath) if err != nil { t.Fatalf("Failed to read config: %v", err) } var config jsonconfig.Obj config = cf.RequiredObject("gsconf") if err := cf.Validate(); err != nil { t.Fatalf("Invalid config: %v", err) } auth := config.RequiredObject("auth") bucket = config.RequiredString("bucket") if err := config.Validate(); err != nil { t.Fatalf("Invalid config: %v", err) } gsa = NewClient(oauth2.NewClient(oauth2.NoContext, oauthutil.NewRefreshTokenSource(&oauth2.Config{ Scopes: []string{Scope}, Endpoint: google.Endpoint, ClientID: auth.RequiredString("client_id"), ClientSecret: auth.RequiredString("client_secret"), RedirectURL: oauthutil.TitleBarRedirectURL, }, auth.RequiredString("refresh_token")))) if err := auth.Validate(); err != nil { t.Fatalf("Invalid config: %v", err) } return }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) { var ( smallPrefix = conf.RequiredString("smallBlobs") largePrefix = conf.RequiredString("largeBlobs") metaConf = conf.RequiredObject("metaIndex") ) if err := conf.Validate(); err != nil { return nil, err } small, err := ld.GetStorage(smallPrefix) if err != nil { return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err) } large, err := ld.GetStorage(largePrefix) if err != nil { return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err) } largeSubber, ok := large.(subFetcherStorage) if !ok { return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs", largePrefix, large) } meta, err := sorted.NewKeyValue(metaConf) if err != nil { return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err) } sto := &storage{ small: small, large: largeSubber, meta: meta, } sto.init() recoveryMu.Lock() defer recoveryMu.Unlock() if recovery { log.Print("Starting recovery of blobpacked index") if err := meta.Close(); err != nil { return nil, err } if err := sto.reindex(context.TODO(), func() (sorted.KeyValue, error) { return sorted.NewKeyValue(metaConf) }); err != nil { return nil, err } return sto, nil } // Check for a weird state: zip files exist, but no metadata about them // is recorded. This is probably a corrupt state, and the user likely // wants to recover. if !sto.anyMeta() && sto.anyZipPacks() { log.Fatal("Error: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode with -recovery.") } return sto, nil }
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) { var ( smallPrefix = conf.RequiredString("smallBlobs") largePrefix = conf.RequiredString("largeBlobs") metaConf = conf.RequiredObject("metaIndex") ) if err := conf.Validate(); err != nil { return nil, err } small, err := ld.GetStorage(smallPrefix) if err != nil { return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err) } large, err := ld.GetStorage(largePrefix) if err != nil { return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err) } largeSubber, ok := large.(subFetcherStorage) if !ok { return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs", largePrefix, large) } meta, err := sorted.NewKeyValue(metaConf) if err != nil { return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err) } sto := &storage{ small: small, large: largeSubber, meta: meta, } sto.init() // Check for a weird state: zip files exist, but no metadata about them // is recorded. This is probably a corrupt state, and the user likely // wants to recover. if !sto.anyMeta() && sto.anyZipPacks() { log.Printf("Warning: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode.") // TODO: add a recovery mode. // Old TODO was: // fail with a "known corrupt" message and refuse to // start unless in recovery mode (perhaps a new environment // var? or flag passed down?) using StreamBlobs starting at // "l:". Could even do it automatically if total size is // small or fast enough? But that's confusing if it only // sometimes finishes recovery. We probably want various // server start-up modes anyway: "check", "recover", "garbage // collect", "readonly". So might as well introduce that // concept now. // TODO: test start-up recovery mode, once it works. } return sto, nil }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { sto := &nsto{} invConf := config.RequiredObject("inventory") masterName := config.RequiredString("storage") if err := config.Validate(); err != nil { return nil, err } sto.inventory, err = sorted.NewKeyValue(invConf) if err != nil { return nil, fmt.Errorf("Invalid 'inventory' configuration: %v", err) } sto.master, err = ld.GetStorage(masterName) if err != nil { return nil, fmt.Errorf("Invalid 'storage' configuration: %v", err) } return sto, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { var ( auth = config.RequiredObject("auth") bucket = config.RequiredString("bucket") cacheSize = config.OptionalInt64("cacheSize", 32<<20) accountID = auth.RequiredString("account_id") appKey = auth.RequiredString("application_key") ) if err := config.Validate(); err != nil { return nil, err } if err := auth.Validate(); err != nil { return nil, err } var dirPrefix string if parts := strings.SplitN(bucket, "/", 2); len(parts) > 1 { dirPrefix = parts[1] bucket = parts[0] } if dirPrefix != "" && !strings.HasSuffix(dirPrefix, "/") { dirPrefix += "/" } cl, err := b2.NewClient(accountID, appKey, nil) if err != nil { return nil, err } b, err := cl.BucketByName(bucket, true) if err != nil { return nil, err } s := &Storage{ cl: cl, b: b, dirPrefix: dirPrefix, } if cacheSize != 0 { s.cache = memory.NewCache(cacheSize) } return s, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { auth := config.RequiredObject("auth") oAuthClient := oauth2.NewClient(oauth2.NoContext, oauthutil.NewRefreshTokenSource(&oauth2.Config{ Scopes: []string{Scope}, Endpoint: google.Endpoint, ClientID: auth.RequiredString("client_id"), ClientSecret: auth.RequiredString("client_secret"), RedirectURL: oauthutil.TitleBarRedirectURL, }, auth.RequiredString("refresh_token"))) parent := config.RequiredString("parent_id") if err := config.Validate(); err != nil { return nil, err } service, err := service.New(oAuthClient, parent) sto := &driveStorage{ service: service, } return sto, err }
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (bs blobserver.Storage, err error) { metaConf := config.RequiredObject("metaIndex") sto := &storage{} agreement := config.OptionalString("I_AGREE", "") const wantAgreement = "that encryption support hasn't been peer-reviewed, isn't finished, and its format might change." if agreement != wantAgreement { return nil, errors.New("Use of the 'encrypt' target without the proper I_AGREE value.") } key := config.OptionalString("key", "") keyFile := config.OptionalString("keyFile", "") var keyb []byte switch { case key != "": keyb, err = hex.DecodeString(key) if err != nil || len(keyb) != 16 { return nil, fmt.Errorf("The 'key' parameter must be 16 bytes of 32 hex digits. (currently fixed at AES-128)") } case keyFile != "": // TODO: check that keyFile's unix permissions aren't too permissive. keyb, err = ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("Reading key file %v: %v", keyFile, err) } } blobStorage := config.RequiredString("blobs") metaStorage := config.RequiredString("meta") if err := config.Validate(); err != nil { return nil, err } sto.index, err = sorted.NewKeyValue(metaConf) if err != nil { return } sto.blobs, err = ld.GetStorage(blobStorage) if err != nil { return } sto.meta, err = ld.GetStorage(metaStorage) if err != nil { return } if keyb == nil { // TODO: add a way to prompt from stdin on start? or keychain support? return nil, errors.New("no encryption key set with 'key' or 'keyFile'") } if err := sto.setKey(keyb); err != nil { return nil, err } start := time.Now() log.Printf("Reading encryption metadata...") if err := sto.readAllMetaBlobs(); err != nil { return nil, fmt.Errorf("Error scanning metadata on start-up: %v", err) } log.Printf("Read all encryption metadata in %.3f seconds", time.Since(start).Seconds()) return sto, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { var ( auth = config.RequiredObject("auth") bucket = config.RequiredString("bucket") cacheSize = config.OptionalInt64("cacheSize", 32<<20) clientID = auth.RequiredString("client_id") // or "auto" for service accounts clientSecret = auth.OptionalString("client_secret", "") refreshToken = auth.OptionalString("refresh_token", "") ) if err := config.Validate(); err != nil { return nil, err } if err := auth.Validate(); err != nil { return nil, err } var dirPrefix string if parts := strings.SplitN(bucket, "/", 2); len(parts) > 1 { dirPrefix = parts[1] bucket = parts[0] } if dirPrefix != "" && !strings.HasSuffix(dirPrefix, "/") { dirPrefix += "/" } gs := &Storage{ bucket: bucket, dirPrefix: dirPrefix, } if clientID == "auto" { var err error gs.client, err = googlestorage.NewServiceClient() if err != nil { return nil, err } } else { if clientSecret == "" { return nil, errors.New("missing required parameter 'client_secret'") } if refreshToken == "" { return nil, errors.New("missing required parameter 'refresh_token'") } oAuthClient := oauth2.NewClient(oauth2.NoContext, oauthutil.NewRefreshTokenSource(&oauth2.Config{ Scopes: []string{googlestorage.Scope}, Endpoint: google.Endpoint, ClientID: clientID, ClientSecret: clientSecret, RedirectURL: oauthutil.TitleBarRedirectURL, }, refreshToken)) gs.client = googlestorage.NewClient(oAuthClient) } if cacheSize != 0 { gs.cache = memory.NewCache(cacheSize) } bi, err := gs.client.BucketInfo(bucket) if err != nil { return nil, fmt.Errorf("error statting bucket %q: %v", bucket, err) } hash := sha1.New() fmt.Fprintf(hash, "%v%v", bi.TimeCreated, bi.Metageneration) gs.genRandom = fmt.Sprintf("%x", hash.Sum(nil)) gs.genTime, _ = time.Parse(time.RFC3339, bi.TimeCreated) return gs, nil }
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { var ( auth = config.RequiredObject("auth") bucket = config.RequiredString("bucket") cacheSize = config.OptionalInt64("cacheSize", 32<<20) clientID = auth.RequiredString("client_id") // or "auto" for service accounts clientSecret = auth.OptionalString("client_secret", "") refreshToken = auth.OptionalString("refresh_token", "") ) if err := config.Validate(); err != nil { return nil, err } if err := auth.Validate(); err != nil { return nil, err } var dirPrefix string if parts := strings.SplitN(bucket, "/", 2); len(parts) > 1 { dirPrefix = parts[1] bucket = parts[0] } if dirPrefix != "" && !strings.HasSuffix(dirPrefix, "/") { dirPrefix += "/" } gs := &Storage{ bucket: bucket, dirPrefix: dirPrefix, } var ( ctx = context.Background() ts oauth2.TokenSource cl *storage.Client err error ) if clientID == "auto" { if !metadata.OnGCE() { return nil, errors.New(`Cannot use "auto" client_id when not running on GCE`) } ts, err = google.DefaultTokenSource(ctx, storage.ScopeReadWrite) if err != nil { return nil, err } cl, err = storage.NewClient(ctx) if err != nil { return nil, err } } else { if clientSecret == "" { return nil, errors.New("missing required parameter 'client_secret'") } if refreshToken == "" { return nil, errors.New("missing required parameter 'refresh_token'") } ts = oauthutil.NewRefreshTokenSource(&oauth2.Config{ Scopes: []string{storage.ScopeReadWrite}, Endpoint: google.Endpoint, ClientID: clientID, ClientSecret: clientSecret, RedirectURL: oauthutil.TitleBarRedirectURL, }, refreshToken) cl, err = storage.NewClient(ctx, cloud.WithTokenSource(ts)) if err != nil { return nil, err } } gs.baseHTTPClient = oauth2.NewClient(ctx, ts) gs.client = cl if cacheSize != 0 { gs.cache = memory.NewCache(cacheSize) } ba, err := gs.client.Bucket(gs.bucket).Attrs(ctx) if err != nil { return nil, fmt.Errorf("error statting bucket %q: %v", gs.bucket, err) } hash := sha1.New() fmt.Fprintf(hash, "%v%v", ba.Created, ba.MetaGeneration) gs.genRandom = fmt.Sprintf("%x", hash.Sum(nil)) gs.genTime = ba.Created return gs, nil }