// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from environment variables or a service account file func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) { bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET") if bucketName == "" { bucketName = conf["bucket"] if bucketName == "" { return nil, fmt.Errorf("env var GOOGLE_STORAGE_BUCKET or configuration parameter 'bucket' must be set") } } // path to service account JSON file credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") if credentialsFile == "" { credentialsFile = conf["credentials_file"] if credentialsFile == "" { return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set") } } client, err := storage.NewClient( context.Background(), option.WithServiceAccountFile(credentialsFile), ) if err != nil { return nil, fmt.Errorf("error establishing storage client: '%v'", err) } // check client connectivity by getting bucket attributes _, err = client.Bucket(bucketName).Attrs(context.Background()) if err != nil { return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/gcs: max_parallel set", "max_parallel", maxParInt) } } g := GCSBackend{ bucketName: bucketName, client: client, permitPool: NewPermitPool(maxParInt), logger: logger, } return &g, nil }
func Example_serviceAccountFile() { // Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS // and use the Application Default Credentials. ctx := context.Background() // Use a JSON key file associated with a Google service account to // authenticate and authorize. // Go to https://console.developers.google.com/permissions/serviceaccounts to create // and download a service account key for your project. // // Note: The example uses the datastore client, but the same steps apply to // the other client libraries underneath this package. client, err := datastore.NewClient(ctx, "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) if err != nil { // TODO: handle error. } // Use the client. _ = client }
AppVersion string // Version number for this module. SourceContexts []*cd.SourceContext // Description of source. Verbose bool ServiceAccountFile string // File containing service account credentials. } type serviceInterface interface { Register(req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) Update(debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) List(debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) } var newService = func(serviceAccountFile string) (serviceInterface, error) { opts := []option.ClientOption{option.WithScopes(cd.CloudDebuggerScope)} if serviceAccountFile != "" { opts = append(opts, option.WithServiceAccountFile(serviceAccountFile)) } httpClient, endpoint, err := transport.NewHTTPClient(context.Background(), opts...) if err != nil { return nil, err } s, err := cd.New(httpClient) if err != nil { return nil, err } if endpoint != "" { s.BasePath = endpoint } return &service{s: s}, nil }
func TestGCSBackend(t *testing.T) { credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") // projectID is only required for creating a bucket for this test projectID := os.Getenv("GOOGLE_PROJECT_ID") if credentialsFile == "" || projectID == "" { t.SkipNow() } client, err := storage.NewClient( context.Background(), option.WithServiceAccountFile(credentialsFile), ) if err != nil { t.Fatalf("error creating storage client: '%v'", err) } var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() bucketName := fmt.Sprintf("vault-gcs-testacc-%d", randInt) bucket := client.Bucket(bucketName) err = bucket.Create(context.Background(), projectID, nil) if err != nil { t.Fatalf("error creating bucket '%v': '%v'", bucketName, err) } // test bucket teardown defer func() { objects_it := bucket.Objects(context.Background(), nil) time.Sleep(ConsistencyDelays.beforeList) // have to delete all objects before deleting bucket for { objAttrs, err := objects_it.Next() if err == iterator.Done { break } if err != nil { t.Fatalf("error listing bucket '%v' contents: '%v'", bucketName, err) } // ignore errors in deleting a single object, we only care about deleting the bucket // occassionally we get "storage: object doesn't exist" which is fine bucket.Object(objAttrs.Name).Delete(context.Background()) } // not a list operation, but google lists to make sure the bucket is empty on delete time.Sleep(ConsistencyDelays.beforeList) err := bucket.Delete(context.Background()) if err != nil { t.Fatalf("error deleting bucket '%s': '%v'", bucketName, err) } }() logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("gcs", logger, map[string]string{ "bucket": bucketName, "credentials_file": credentialsFile, }) if err != nil { t.Fatalf("error creating google cloud storage backend: '%s'", err) } testEventuallyConsistentBackend(t, b, ConsistencyDelays) testEventuallyConsistentBackend_ListPrefix(t, b, ConsistencyDelays) }