func (cloud *CloudStore) Start() error { if cloud.Bucket == "" { return InvalidBucket } if cloud.Context == nil { return InvalidGoogleContext } client := &http.Client{ Transport: &oauth2.Transport{ Source: google.AppEngineTokenSource(cloud.Context, storage.DevstorageFullControlScope), Base: &urlfetch.Transport{Context: cloud.Context}, }, } service, err := storage.New(client) if err != nil { log.Printf("Unable to get storage service %v", err) return err } cloud.service = service if _, err := service.Buckets.Get(cloud.Bucket).Do(); err == nil { log.Printf("Got storage bucket %v %v", cloud.Bucket, err) } else { if _, err := service.Buckets.Insert(cloud.Project, &storage.Bucket{Name: cloud.Bucket}).Do(); err == nil { log.Printf("Created bucket: %v", cloud.Bucket) } else { return err } } return nil }
func TestGetGSResultFileLocations(t *testing.T) { testutils.SkipIfShort(t) storage, err := storage.New(http.DefaultClient) assert.Nil(t, err) startTS := time.Date(2014, time.December, 10, 0, 0, 0, 0, time.UTC).Unix() endTS := time.Date(2014, time.December, 10, 23, 59, 59, 0, time.UTC).Unix() // TODO(stephana): Switch this to a dedicated test bucket, so we are not // in danger of removing it. resultFiles, err := getGSResultsFileLocations(startTS, endTS, storage, "chromium-skia-gm", "dm-json-v1") assert.Nil(t, err) // Read the expected list of files and compare them. content, err := ioutil.ReadFile("./testdata/filelist_dec_10.txt") assert.Nil(t, err) lines := strings.Split(strings.TrimSpace(string(content)), "\n") sort.Strings(lines) resultNames := make([]string, len(resultFiles)) for idx, rf := range resultFiles { resultNames[idx] = rf.Name } sort.Strings(resultNames) assert.Equal(t, len(lines), len(resultNames)) assert.Equal(t, lines, resultNames) }
func handleStorageGet(c context.Context, w http.ResponseWriter, r *http.Request) { bucket := r.URL.Query().Get("bucket") name := r.URL.Query().Get("name") if bucket == "" || name == "" { w.WriteHeader(http.StatusBadRequest) w.Write([]byte("Missing bucket or name query parameter.")) return } client, err := google.DefaultClient(c, storage.DevstorageReadOnlyScope) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Failed to get default google client. " + err.Error())) return } service, err := storage.New(client) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Failed to get storage service. " + err.Error())) return } res, err := service.Objects.Get(bucket, name).Download() if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Failed to get object. " + err.Error())) return } w.Header().Set("Content-Type", res.Header.Get("Content-Type")) _, err = io.Copy(w, res.Body) if err != nil { // to late to change status code now clog.Errorf(c, "io.Copy failed to copy storage get to response. %v", err) } }
// Will need a local valid google_storage_token.data file with read write access // to run the below test. func Auth_TestUploadWorkerArtifacts(t *testing.T) { client, _ := GetOAuthClient() gs, err := NewGsUtil(client) if err != nil { t.Errorf("Unexpected error: %s", err) } testDir := "testupload" testPagesetType := "10ktest" StorageDir = "testdata" if err := gs.UploadWorkerArtifacts(testDir, testPagesetType, 1); err != nil { t.Errorf("Unexpected error: %s", err) } // Examine contents of the remote directory and then clean it up. service, err := storage.New(gs.client) if err != nil { t.Errorf("Unexpected error: %s", err) } gsDir := filepath.Join(testDir, testPagesetType, "slave1") resp, err := service.Objects.List(GS_BUCKET_NAME).Prefix(gsDir + "/").Do() if err != nil { t.Errorf("Unexpected error: %s", err) } assert.Equal(t, 3, len(resp.Items)) for index, fileName := range []string{"TIMESTAMP", "alexa1-1.py", "alexa2-2.py"} { filePath := fmt.Sprintf("%s/%s", gsDir, fileName) defer util.LogErr(service.Objects.Delete(GS_BUCKET_NAME, filePath).Do()) assert.Equal(t, filePath, resp.Items[index].Name) } }
// ListBuckets returns a slice of all the buckets in the given projectId. // [START ListBuckets] func ListBuckets(projectId string) ([]*storage.Bucket, error) { // Create the client that uses Application Default Credentials client, err := google.DefaultClient( oauth2.NoContext, "https://www.googleapis.com/auth/devstorage.read_only") if err != nil { return nil, err } // Create the Google Cloud Storage service service, err := storage.New(client) if err != nil { return nil, err } // Create the request to list buckets for the project id request := service.Buckets.List(projectId) // Execute the request buckets, err := request.Do() if err != nil { return nil, err } return buckets.Items, nil }
func handleStoragePut(c context.Context, w http.ResponseWriter, r *http.Request) { bucket := r.URL.Query().Get("bucket") name := r.URL.Query().Get("name") value := r.URL.Query().Get("value") if bucket == "" || name == "" || value == "" { w.WriteHeader(http.StatusBadRequest) w.Write([]byte("Missing bucket, name, or value query parameter.")) return } client, err := google.DefaultClient(c, storage.DevstorageReadWriteScope) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Failed to get default google client. " + err.Error())) return } service, err := storage.New(client) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Failed to get storage service. " + err.Error())) return } obj, err := service.Objects.Insert(bucket, &storage.Object{Name: name}).Media(strings.NewReader(value)).Do() if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Failed to insert object. " + err.Error())) return } w.Write([]byte(fmt.Sprintf("put succeeded: %v", obj))) }
// New constructs a new driver func New(params driverParameters) (storagedriver.StorageDriver, error) { rootDirectory := strings.Trim(params.rootDirectory, "/") if rootDirectory != "" { rootDirectory += "/" } if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) } service, err := storageapi.New(params.client) if err != nil { return nil, err } if _, err := service.Buckets.Get(params.bucket).Do(); err != nil { if _, err := service.Buckets.Insert(params.projectID, &storageapi.Bucket{Name: params.bucket}).Do(); err != nil { return nil, err } } d := &driver{ bucket: params.bucket, rootDirectory: rootDirectory, email: params.email, privateKey: params.privateKey, client: params.client, chunkSize: params.chunkSize, } return &base.Base{ StorageDriver: d, }, nil }
func NewClient(oauthClient *http.Client) *Client { service, _ := api.New(oauthClient) return &Client{ client: oauthClient, service: service, } }
func TestMediaErrHandling(t *testing.T) { handler := &myHandler{} server := httptest.NewServer(handler) defer server.Close() client := &http.Client{} s, err := storage.New(client) if err != nil { t.Fatalf("unable to create service: %v", err) } s.BasePath = server.URL const body = "fake media data" f := strings.NewReader(body) // The combination of TimeoutReader and OneByteReader causes the first byte to // be successfully delivered, but then a timeout error is reported. This // allows us to test the goroutine within the getMediaType function. r := iotest.TimeoutReader(iotest.OneByteReader(f)) o := &storage.Object{ Bucket: "mybucket", Name: "filename", ContentType: "plain/text", ContentEncoding: "utf-8", ContentLanguage: "en", } _, err = s.Objects.Insert("mybucket", o).Media(r).Do() if err == nil || !strings.Contains(err.Error(), "timeout") { t.Errorf("expected timeout error, got %v", err) } if handler.err != nil { t.Errorf("handler err = %v, want nil", handler.err) } }
func TestUserAgent(t *testing.T) { handler := &myHandler{} server := httptest.NewServer(handler) defer server.Close() client := &http.Client{} s, err := storage.New(client) if err != nil { t.Fatalf("unable to create service: %v", err) } s.BasePath = server.URL s.UserAgent = "myagent/1.0" f := strings.NewReader("fake media data") o := &storage.Object{ Bucket: "mybucket", Name: "filename", ContentType: "plain/text", ContentEncoding: "utf-8", ContentLanguage: "en", } _, err = s.Objects.Insert("mybucket", o).Media(f).Do() if err != nil { t.Fatalf("unable to insert object: %v", err) } g := handler.r if w, k := "google-api-go-client/0.5 myagent/1.0", "User-Agent"; len(g.Header[k]) != 1 || g.Header[k][0] != w { t.Errorf("header %q = %#v; want %q", k, g.Header[k], w) } }
func newGCSRepo(r *Repo, httpClient *http.Client) (*GCSRepo, error) { URL := r.GetURL() m := GCSRepoURLMatcher.FindStringSubmatch(URL) if len(m) != 2 { return nil, fmt.Errorf("URL must be of the form gs://<bucket>, was %s", URL) } if err := validateRepoType(r.GetType()); err != nil { return nil, err } if httpClient == nil { httpClient = http.DefaultClient } gcs, err := storage.New(httpClient) if err != nil { return nil, fmt.Errorf("cannot create storage service for %s: %s", URL, err) } gcsr := &GCSRepo{ Repo: *r, httpClient: httpClient, service: gcs, bucket: m[1], } return gcsr, nil }
func init() { var err error st, err = storage.New(util.NewTimeoutClient()) if err != nil { panic("Can't construct HTTP client") } ingester.Register(config.CONSTRUCTOR_NANO_TRYBOT, NewTrybotResultIngester) }
// Create sets up and starts a Google Compute Engine instance as defined in d.Conf. It // creates the necessary Google Storage buckets beforehand. func (d *Deployer) Create(ctx *context.Context) (*compute.Instance, error) { if err := d.checkProjectID(); err != nil { return nil, err } computeService, _ := compute.New(d.Client) storageService, _ := storage.New(d.Client) fwc := make(chan error, 1) go func() { fwc <- d.setFirewall(ctx, computeService) }() config := cloudConfig(d.Conf) const maxCloudConfig = 32 << 10 // per compute API docs if len(config) > maxCloudConfig { return nil, fmt.Errorf("cloud config length of %d bytes is over %d byte limit", len(config), maxCloudConfig) } // TODO(mpl): maybe add a wipe mode where we erase other instances before attempting to create. if zone, err := d.projectHasInstance(); zone != "" { return nil, instanceExistsError{ project: d.Conf.Project, zone: zone, } } else if err != nil { return nil, fmt.Errorf("could not scan project for existing instances: %v", err) } if err := d.setBuckets(storageService, ctx); err != nil { return nil, fmt.Errorf("could not create buckets: %v", err) } if err := d.setupHTTPS(storageService); err != nil { return nil, fmt.Errorf("could not setup HTTPS: %v", err) } if err := d.createInstance(computeService, ctx); err != nil { return nil, fmt.Errorf("could not create compute instance: %v", err) } inst, err := computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() if err != nil { return nil, fmt.Errorf("error getting instance after creation: %v", err) } if Verbose { ij, _ := json.MarshalIndent(inst, "", " ") log.Printf("Instance: %s", ij) } if err = <-fwc; err != nil { return nil, fmt.Errorf("could not create firewall rules: %v", err) } return inst, nil }
// GetGCSRegistry returns a new Google Cloud Storage . If there's a credential that is specified, will try // to fetch it and use it, and if there's no credential found, will fall back to unauthenticated client. func (gcsrp gcsRegistryProvider) GetGCSRegistry(cr common.Registry) (ObjectStorageRegistry, error) { client, err := gcsrp.createGCSClient(cr.CredentialName) if err != nil { return nil, err } service, err := storage.New(client) if err != nil { log.Fatalf("Unable to create storage service: %v", err) } return NewGCSRegistry(cr.Name, cr.URL, client, service) }
func setupOAuth() error { client, err := auth.NewDefaultJWTServiceAccountClient(auth.SCOPE_READ_WRITE) if err != nil { return fmt.Errorf("Problem setting up client OAuth: %v", err) } if storageService, err = storage.New(client); err != nil { return fmt.Errorf("Problem authenticating: %v", err) } return nil }
func main() { flag.Parse() flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:%v <bucket> [flags]\n", os.Args[0]) flag.PrintDefaults() } if flag.NArg() != 1 { fmt.Fprintf(os.Stderr, "Missing bucket\n") flag.Usage() os.Exit(1) } bucketName := flag.Arg(0) // DefaultClient uses DefaultTokenSource. // DefaultTokenSource is a token source that uses // "Application Default Credentials". // // It looks for credentials in the following places, // preferring the first location found: // // 1. A JSON file whose path is specified by the // GOOGLE_APPLICATION_CREDENTIALS environment variable. // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. // 3. On Google App Engine it uses the appengine.AccessToken function. // 4. On Google Compute Engine, it fetches credentials from the metadata server. // (In this final case any provided scopes are ignored.) // // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials // client, err := oauth2google.DefaultClient(context.Background(), storage.DevstorageReadOnlyScope) if err != nil { log.Fatal("Failed to get service account client.", err) } service, err := storage.New(client) if err != nil { log.Fatalln("Failed to create storage service.", err) } bucket, err := service.Buckets.Get(bucketName).Do() if err != nil { // If you're running under a VM (e.g., vmware fusion) and your clock is out of sync, // you might get an invalid_grant error. log.Fatalln("Error getting bucket.", err) } log.Println("Got bucket:", bucket.Name, bucket.SelfLink) }
func (fs *FileDiffStore) removeImageFromGS(d string) error { storage, err := storage.New(fs.client) if err != nil { return fmt.Errorf("Failed to create interface to Google Storage: %s\n", err) } objLocation := filepath.Join(fs.storageBaseDir, fmt.Sprintf("%s.%s", d, IMG_EXTENSION)) if err := storage.Objects.Delete(fs.gsBucketName, objLocation).Do(); err != nil { return fmt.Errorf("Unable to delete %s/%s: %s", fs.gsBucketName, objLocation, err) } return nil }
// NewGsUtil initializes and returns a utility for CT interations with Google // Storage. If client is nil then auth.NewClient is invoked. func NewGsUtil(client *http.Client) (*GsUtil, error) { if client == nil { var err error client, err = auth.NewClientWithTransport(true, GSTokenPath, ClientSecretPath, nil, auth.SCOPE_FULL_CONTROL) if err != nil { return nil, err } } service, err := storage.New(client) if err != nil { return nil, fmt.Errorf("Failed to create interface to Google Storage: %s", err) } return &GsUtil{client: client, service: service}, nil }
// NewGoogleStorageSource returns a new instance of GoogleStorageSource based // on the bucket and directory provided. The id is used to identify the Source // and is generally the same id as the ingester. func NewGoogleStorageSource(baseName, bucket, rootDir string, client *http.Client) (Source, error) { gStorage, err := storage.New(client) if err != nil { return nil, err } return &GoogleStorageSource{ bucket: bucket, rootDir: rootDir, id: fmt.Sprintf("%s:gs://%s/%s", baseName, bucket, rootDir), client: client, gStorage: gStorage, }, nil }
// NewClient creates a new Google Cloud Storage client. func NewClient(ctx context.Context, opts ...cloud.ClientOption) (*Client, error) { hc, _, err := transport.NewHTTPClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } rawService, err := raw.New(hc) if err != nil { return nil, fmt.Errorf("storage client: %v", err) } return &Client{ hc: hc, raw: rawService, }, nil }
// GetGCSRegistry returns a new Google Cloud Storage . If there's a credential that is specified, will try // to fetch it and use it, and if there's no credential found, will fall back to unauthenticated client. func (gcsrp gcsRegistryProvider) GetGCSRegistry(cr common.Registry) (ObjectStorageRegistry, error) { // If there's a credential that we need to use, fetch it and create a client for it. if cr.CredentialName == "" { return nil, fmt.Errorf("No CredentialName specified for %s", cr.Name) } client, err := gcsrp.createGCSClient(cr.CredentialName) if err != nil { return nil, err } service, err := storage.New(client) if err != nil { log.Fatalf("Unable to create storage service: %v", err) } return NewGCSRegistry(cr.Name, cr.URL, client, service) }
func (u *GSUploader) Setup(destination string, debugHTTP bool) error { u.Destination = destination u.DebugHTTP = debugHTTP client, err := u.getClient(storage.DevstorageFullControlScope) if err != nil { return errors.New(fmt.Sprintf("Error creating Google Cloud Storage client: %v", err)) } service, err := storage.New(client) if err != nil { return err } u.Service = service return nil }
// New はアップローダーを作成する。 func New(o option.Options) (*Uploader, error) { jsonFile, err := ioutil.ReadFile(o.JSON) if err != nil { log.Fatalf("Could not open json: %v", err) } config, err := google.JWTConfigFromJSON(jsonFile, scope) if err != nil { log.Fatalf("Could not parse json: %v", err) } client := config.Client(context.Background()) service, err := gcs.New(client) if err != nil { log.Fatalf("Unable to create storage service: %v", err) } return &Uploader{service, o.ProjectID, o.Bucket}, nil }
func (s *GCSSuite) TestGCSSuite(c *C) { bucket := "sply-test" prefix := "a" // create google client to store sample file client, err := google.DefaultClient(context.Background()) if err != nil { log.Fatalf("Unable to get default client: %v", err) } service, err := storage.New(client) if err != nil { log.Fatalf("Unable to create storage service: %v", err) } object := &storage.Object{Name: "a/sample"} buf := bytes.NewBuffer(make([]byte, 1000)) if res, err := service.Objects.Insert(bucket, object).Media(buf).Do(); err == nil { fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink) } else { log.Fatalf("Objects.Insert failed: %v", err) } // Create GCS connection to test operations connection := NewGCSConnection(bucket, prefix) status := &NullStatusCallback{} files, err := connection.ListDir("", status) c.Assert(len(files.Files) >= 1, Equals, true) var found *FileStat for _, x := range files.Files { if x.Name == "sample" { found = x } } c.Assert(found, NotNil) c.Assert(err, IsNil) localPath := c.MkDir() + "/dest" ioutil.WriteFile(localPath, make([]byte, 0), 0700) region, err := connection.PrepareForRead("sample", found.Etag, localPath, 0, 10, status) c.Assert(err, IsNil) c.Assert(region.Offset, Equals, uint64(0)) c.Assert(region.Length, Equals, uint64(10)) fmt.Printf("files=%s\n", files) }
func storageMain(client *http.Client, argv []string) { if len(argv) != 2 { fmt.Fprintln(os.Stderr, "Usage: storage filename bucket (to upload an object)") return } service, _ := storage.New(client) filename := argv[0] bucket := argv[1] goFile, err := os.Open(filename) if err != nil { log.Fatalf("error opening %q: %v", filename, err) } storageObject, err := service.Objects.Insert(bucket, &storage.Object{Name: filename}).Media(goFile).Do() log.Printf("Got storage.Object, err: %#v, %v", storageObject, err) }
func storageMain(client *http.Client, argv []string) { if len(argv) != 2 { fmt.Fprintln(os.Stderr, "Usage: storage filename bucket (to upload an object)") return } service, err := storage.New(client) if err != nil { log.Fatalf("Unable to create Storage service: %v", err) } filename := argv[0] bucket := argv[1] goFile, err := os.Open(filename) if err != nil { log.Fatalf("error opening %q: %v", filename, err) } storageObject, err := service.Objects.Insert(bucket, &storage.Object{Name: filename}).Media(goFile).Do() log.Printf("Got storage.Object, err: %#v, %v", storageObject, err) if err != nil { return } resp, err := service.Objects.Get(bucket, filename).Download() if err != nil { log.Fatalf("error downloading %q: %v", filename, err) } defer resp.Body.Close() n, err := io.Copy(ioutil.Discard, resp.Body) if err != nil { log.Fatalf("error downloading %q: %v", filename, err) } log.Printf("Downloaded %d bytes", n) // Test If-None-Match - should get a "HTTP 304 Not Modified" response. obj, err := service.Objects.Get(bucket, filename).IfNoneMatch(storageObject.Etag).Do() log.Printf("Got obj, err: %#v, %v", obj, err) if googleapi.IsNotModified(err) { log.Printf("Success. Object not modified since upload.") } else { log.Printf("Error: expected object to not be modified since upload.") } }
func InitConfig() { client, err := google.DefaultClient(context.Background(), scope) if err != nil { log.Fatalf("Unable to get default client: %v", err) } service, err = storage.New(client) if err != nil { log.Fatalf("Unable to create storage service: %v", err) } oService = storage.NewObjectsService(service) if err != nil { log.Fatalf("Unable to create objects storage service: %v", err) } }
// NewIngester creates an Ingester given the repo and tilestore specified. func NewIngester(git *gitinfo.GitInfo, tileStoreDir string, datasetName string, ri ResultIngester, nCommits int, minDuration time.Duration, config map[string]string, statusDir, metricName string) (*Ingester, error) { var storageService *storage.Service = nil var err error = nil // check if the ingestion source is coming from Google Storage if config["GSDir"] != "" { storageService, err = storage.New(client) if err != nil { return nil, fmt.Errorf("Failed to create interace to Google Storage: %s\n", err) } } var processedFiles *leveldb.DB = nil if statusDir != "" { statusDir = fileutil.Must(fileutil.EnsureDirExists(filepath.Join(statusDir, datasetName))) processedFiles, err = leveldb.OpenFile(filepath.Join(statusDir, "processed_files.ldb"), nil) if err != nil { glog.Fatalf("Unable to open status db at %s: %s", filepath.Join(statusDir, "processed_files.ldb"), err) } } i := &Ingester{ git: git, tileStore: filetilestore.NewFileTileStore(tileStoreDir, datasetName, -1), storage: storageService, hashToNumber: map[string]int{}, resultIngester: ri, config: config, datasetName: datasetName, elapsedTimePerUpdate: newGauge(metricName, "update"), metricsProcessed: newCounter(metricName, "processed"), lastSuccessfulUpdate: time.Now(), timeSinceLastSucceessfulUpdate: newGauge(metricName, "time-since-last-successful-update"), nCommits: nCommits, minDuration: minDuration, processedFiles: processedFiles, } i.timeSinceLastSucceessfulUpdate.Update(int64(time.Since(i.lastSuccessfulUpdate).Seconds())) go func() { for _ = range time.Tick(time.Minute) { i.timeSinceLastSucceessfulUpdate.Update(int64(time.Since(i.lastSuccessfulUpdate).Seconds())) } }() return i, nil }
// NewClient creates a new Google Cloud Storage client. // The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { o := []option.ClientOption{ option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent), } opts = append(o, opts...) hc, _, err := transport.NewHTTPClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } rawService, err := raw.New(hc) if err != nil { return nil, fmt.Errorf("storage client: %v", err) } return &Client{ hc: hc, raw: rawService, }, nil }
func setupOAuth() error { var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { useRedirectURL = *redirectURL } if err := login.InitFromMetadataOrJSON(useRedirectURL, login.DEFAULT_SCOPE, *authWhiteList); err != nil { return fmt.Errorf("Problem setting up server OAuth: %v", err) } client, err := auth.NewDefaultJWTServiceAccountClient(auth.SCOPE_READ_ONLY) if err != nil { return fmt.Errorf("Problem setting up client OAuth: %v", err) } storageService, err = storage.New(client) if err != nil { return fmt.Errorf("Problem authenticating: %v", err) } return nil }