func Setup(t *testing.T) *storage.Client { if testProject == "" || testBucket == "" { t.Skip("TESTPROJECT, and TESTBUCKET EnvVars must be set to perform integration test") } gcsctx := &cloudstorage.CloudStoreContext{ LogggingContext: "testing-config", TokenSource: cloudstorage.GCEDefaultOAuthToken, Project: testProject, Bucket: testBucket, } // Create http client with Google context auth googleClient, err := cloudstorage.NewGoogleClient(gcsctx) if err != nil { t.Errorf("Failed to create Google Client: %v\n", err) } gsc, err := storage.NewClient(context.Background(), option.WithHTTPClient(googleClient.Client())) if err != nil { t.Errorf("Error creating Google cloud storage client. project:%s gs://%s/ err:%v\n", gcsctx.Project, gcsctx.Bucket, err) } if gsc == nil { t.Errorf("storage Client returned is nil!") } return gsc }
// getInstalledTLS returns the TLS certificate and key stored on Google Cloud Storage for the // instance defined in d.Conf. // // If either the TLS keypair doesn't exist, the error is os.ErrNotExist. func (d *Deployer) getInstalledTLS() (certPEM, keyPEM []byte, err error) { ctx := context.Background() stoClient, err := cloudstorage.NewClient(ctx, option.WithHTTPClient(d.Client)) if err != nil { return nil, nil, fmt.Errorf("error creating Cloud Storage client to fetch TLS cert & key from new instance: %v", err) } getFile := func(name string) ([]byte, error) { sr, err := stoClient.Bucket(d.Conf.bucketBase()).Object(path.Join(configDir, name)).NewReader(ctx) if err == cloudstorage.ErrObjectNotExist { return nil, os.ErrNotExist } if err != nil { return nil, err } defer sr.Close() return ioutil.ReadAll(sr) } var grp syncutil.Group grp.Go(func() (err error) { certPEM, err = getFile(certFilename()) return }) grp.Go(func() (err error) { keyPEM, err = getFile(keyFilename()) return }) err = grp.Err() return }
func newTestClient(rt http.RoundTripper) *Client { t, err := NewClient(context.Background(), testProjectID, option.WithHTTPClient(&http.Client{Transport: rt})) if err != nil { panic(err) } return t }
// NewClient creates a new PubSub client. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { var o []option.ClientOption // Environment variables for gcloud emulator: // https://option.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { o = []option.ClientOption{ option.WithEndpoint("http://" + addr + "/"), option.WithHTTPClient(http.DefaultClient), } } else { o = []option.ClientOption{ option.WithEndpoint(prodAddr), option.WithScopes(raw.PubsubScope, raw.CloudPlatformScope), option.WithUserAgent(userAgent), } } o = append(o, opts...) httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } s, err := newPubSubService(httpClient, endpoint) if err != nil { return nil, fmt.Errorf("constructing pubsub client: %v", err) } c := &Client{ projectID: projectID, s: s, } return c, nil }
// Test that BucketIterator's Next method correctly terminates if there is // nothing to iterate over. func TestEmptyBucketIterator(t *testing.T) { hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(ioutil.Discard, r.Body) fmt.Fprintf(w, "{}") }) defer close() ctx := context.Background() client, err := NewClient(ctx, option.WithHTTPClient(hClient)) if err != nil { t.Fatal(err) } it := client.Buckets(ctx, "project") c := make(chan error, 1) go func() { _, err := it.Next() c <- err }() select { case err := <-c: if err != iterator.Done { t.Errorf("got %v, want Done", err) } case <-time.After(50 * time.Millisecond): t.Error("timed out") } }
func TestEncryption(t *testing.T) { ctx := context.Background() ft := &fakeTransport{} hc := &http.Client{Transport: ft} client, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatalf("error when creating client: %v", err) } obj := client.Bucket("bucketname").Object("filename1") key := []byte("secret-key-that-is-32-bytes-long") wc := obj.Key(key).NewWriter(ctx) // TODO(jba): use something other than fakeTransport, which always returns error. wc.Write([]byte("hello world")) wc.Close() if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want { t.Errorf("algorithm: got %q, want %q", got, want) } gotKey, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key")) if err != nil { t.Fatalf("decoding key: %v", err) } if !reflect.DeepEqual(gotKey, key) { t.Errorf("key: got %v, want %v", gotKey, key) } wantHash := sha256.Sum256(key) gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256")) if err != nil { t.Fatalf("decoding hash: %v", err) } if !reflect.DeepEqual(gotHash, wantHash[:]) { // wantHash is an array t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash) } }
func upload(srcPath string) { if !*flagUpload { return } destName := strings.Replace(filepath.Base(srcPath), "camlistore", "camlistore-"+releaseDate.Format(fileDateFormat), 1) versionedTarball := "monthly/" + destName log.Printf("Uploading %s/%s ...", bucket, versionedTarball) ts, err := tokenSource(bucket) if err != nil { log.Fatal(err) } ctx := context.Background() stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts))) if err != nil { log.Fatal(err) } w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx) w.ACL = publicACL(project) w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations? contentType := "application/x-gtar" if strings.HasSuffix(versionedTarball, ".zip") { contentType = "application/zip" } w.ContentType = contentType csw := sha256.New() mw := io.MultiWriter(w, csw) src, err := os.Open(srcPath) if err != nil { log.Fatal(err) } defer src.Close() if _, err := io.Copy(mw, src); err != nil { log.Fatalf("io.Copy: %v", err) } if err := w.Close(); err != nil { log.Fatalf("closing GCS storage writer: %v", err) } log.Printf("Uploaded monthly tarball to %s", versionedTarball) // And upload the corresponding checksum checkSumFile := versionedTarball + ".sha256" sum := fmt.Sprintf("%x", csw.Sum(nil)) w = stoClient.Bucket(bucket).Object(checkSumFile).NewWriter(ctx) w.ACL = publicACL(project) w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations? w.ContentType = "text/plain" if _, err := io.Copy(w, strings.NewReader(sum)); err != nil { log.Fatalf("error uploading checksum %v: %v", checkSumFile, err) } if err := w.Close(); err != nil { log.Fatalf("closing GCS storage writer: %v", err) } log.Printf("Uploaded monthly tarball checksum to %s", checkSumFile) }
// uploadReleaseTarball uploads the generated tarball of binaries in // camlistore-release/VERSION/camlistoreVERSION-REV-CONTENTS.EXT. It then makes a copy in // the same bucket and path, as camlistoreVERSION-CONTENTS.EXT. func uploadReleaseTarball() { proj := "camlistore-website" bucket := "camlistore-release" tarball := *flagVersion + "/" + filepath.Base(releaseTarball) versionedTarball := strings.Replace(tarball, "camlistore"+*flagVersion, "camlistore"+*flagVersion+"-"+rev(), 1) log.Printf("Uploading %s/%s ...", bucket, versionedTarball) ts, err := tokenSource(bucket) if err != nil { log.Fatal(err) } ctx := context.Background() stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts))) if err != nil { log.Fatal(err) } w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx) w.ACL = publicACL(proj) w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations? contentType := "application/x-gtar" if *buildOS == "windows" { contentType = "application/zip" } w.ContentType = contentType src, err := os.Open(releaseTarball) if err != nil { log.Fatal(err) } defer src.Close() if _, err := io.Copy(w, src); err != nil { log.Fatalf("io.Copy: %v", err) } if err := w.Close(); err != nil { log.Fatalf("closing GCS storage writer: %v", err) } log.Printf("Uploaded tarball to %s", versionedTarball) if !isWIP() { log.Printf("Copying tarball to %s/%s ...", bucket, tarball) dest := stoClient.Bucket(bucket).Object(tarball) if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo( ctx, dest, &storage.ObjectAttrs{ ACL: publicACL(proj), ContentType: contentType, }); err != nil { log.Fatalf("Error uploading %v: %v", tarball, err) } log.Printf("Uploaded tarball to %s", tarball) } }
func TestTranslateURL(t *testing.T) { // The translate API has all inputs in the URL. // Make sure we generate the right one. ctx := context.Background() ft := &fakeTransport{} c, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: ft})) if err != nil { t.Fatal(err) } for _, test := range []struct { target language.Tag inputs []string opts *Options want url.Values }{ {language.Spanish, []string{"text"}, nil, url.Values{ "q": []string{"text"}, "target": []string{"es"}, }}, {language.English, []string{"text"}, &Options{}, url.Values{ "q": []string{"text"}, "target": []string{"en"}, }}, {language.Turkish, []string{"t1", "t2"}, nil, url.Values{ "q": []string{"t1", "t2"}, "target": []string{"tr"}, }}, {language.English, []string{"text"}, &Options{Source: language.French}, url.Values{ "q": []string{"text"}, "source": []string{"fr"}, "target": []string{"en"}, }, }, {language.English, []string{"text"}, &Options{Source: language.French, Format: HTML}, url.Values{ "q": []string{"text"}, "source": []string{"fr"}, "format": []string{"html"}, "target": []string{"en"}, }}, } { _, err = c.Translate(ctx, test.inputs, test.target, test.opts) if err != nil { t.Fatal(err) } got := ft.req.URL.Query() test.want.Add("alt", "json") if !reflect.DeepEqual(got, test.want) { t.Errorf("Translate(%s, %v, %+v):\ngot %s\nwant %s", test.target, test.inputs, test.opts, got, test.want) } } }
// uploadBinary uploads the currently-running Linux binary. // It crashes if it fails. func (cl *cloudLaunch) uploadBinary() { ctx := context.Background() if cl.BinaryBucket == "" { log.Fatal("cloudlaunch: Config.BinaryBucket is empty") } stoClient, err := storage.NewClient(ctx, option.WithHTTPClient(cl.oauthClient)) if err != nil { log.Fatal(err) } w := stoClient.Bucket(cl.BinaryBucket).Object(cl.binaryObject()).NewWriter(ctx) if err != nil { log.Fatal(err) } w.ACL = []storage.ACLRule{ // If you don't give the owners access, the web UI seems to // have a bug and doesn't have access to see that it's public, so // won't render the "Shared Publicly" link. So we do that, even // though it's dumb and unnecessary otherwise: { Entity: storage.ACLEntity("project-owners-" + cl.GCEProjectID), Role: storage.RoleOwner, }, // Public, so our systemd unit can get it easily: { Entity: storage.AllUsers, Role: storage.RoleReader, }, } w.CacheControl = "no-cache" selfPath := getSelfPath() log.Printf("Uploading %q to %v", selfPath, cl.binaryURL()) f, err := os.Open(selfPath) if err != nil { log.Fatal(err) } defer f.Close() n, err := io.Copy(w, f) if err != nil { log.Fatal(err) } if err := w.Close(); err != nil { log.Fatal(err) } log.Printf("Uploaded %d bytes", n) }
// client returns the GCS Storage client instance. // If there isn't one yet, it tries to create one. func (bs *GCSBackupStorage) client() (*storage.Client, error) { bs.mu.Lock() defer bs.mu.Unlock() if bs._client == nil { // FIXME(alainjobart) add context.Context to BackupStorage API. ctx := context.TODO() authClient, err := google.DefaultClient(ctx) if err != nil { return nil, err } client, err := storage.NewClient(ctx, option.WithHTTPClient(authClient)) if err != nil { return nil, err } bs._client = client } return bs._client, nil }
func gcsCommonClient(client *http.Client, csctx *CloudStoreContext) (Store, error) { project := csctx.Project bucket := csctx.Bucket prefix := fmt.Sprintf("%s:(project=%s bucket=%s)", csctx.LogggingContext, project, bucket) l := LogConstructor(prefix) gcs, err := storage.NewClient(context.Background(), option.WithHTTPClient(client)) if err != nil { l.Errorf("%v error creating Google cloud storage client. project:%s gs://%s/ err:%v ", csctx.LogggingContext, project, bucket, err) return nil, err } store, err := NewGCSStore(gcs, bucket, csctx.TmpDir, maxResults, l) if err != nil { l.Errorf("error creating the store. err=%v ", err) return nil, err } return store, nil }
func TestErrorOnObjectsInsertCall(t *testing.T) { ctx := context.Background() hc := &http.Client{Transport: &fakeTransport{}} client, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatalf("error when creating client: %v", err) } wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) wc.ContentType = "text/plain" // We can't check that the Write fails, since it depends on the write to the // underling fakeTransport failing which is racy. wc.Write([]byte("hello world")) // Close must always return an error though since it waits for the transport to // have closed. if err := wc.Close(); err == nil { t.Errorf("expected error on close, got nil") } }
func init() { if !metadata.OnGCE() { return } hc, err := google.DefaultClient(oauth2.NoContext) if err != nil { registerBrokenFS(fmt.Errorf("could not get http client for context: %v", err)) return } ctx := context.Background() sc, err := storage.NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { registerBrokenFS(fmt.Errorf("could not get cloud storage client: %v", err)) return } wkfs.RegisterFS("/gcs/", &gcsFS{ ctx: ctx, sc: sc, }) }
func TestCopyToMissingFields(t *testing.T) { var tests = []struct { srcBucket, srcName, destBucket, destName string errMsg string }{ { "mybucket", "", "mybucket", "destname", "name is empty", }, { "mybucket", "srcname", "mybucket", "", "name is empty", }, { "", "srcfile", "mybucket", "destname", "name is empty", }, { "mybucket", "srcfile", "", "destname", "name is empty", }, } ctx := context.Background() client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}})) if err != nil { panic(err) } for i, test := range tests { src := client.Bucket(test.srcBucket).Object(test.srcName) dst := client.Bucket(test.destBucket).Object(test.destName) _, err := dst.CopierFrom(src).Run(ctx) if !strings.Contains(err.Error(), test.errMsg) { t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg) } } }
// listDownloads lists all the files found in the monthly repo, and from them, // builds the data that we'll feed to the template to generate the monthly // downloads camweb page. func listDownloads() (*ReleaseData, error) { ts, err := tokenSource(bucket) if err != nil { return nil, err } ctx := context.Background() stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts))) if err != nil { return nil, err } objList, err := stoClient.Bucket(bucket).List(ctx, &storage.Query{Prefix: "monthly/"}) if err != nil { return nil, err } platformBySuffix := map[string]string{ "src.zip": "Source", "linux.tar.gz": "Linux", "darwin.tar.gz": "Darwin", "windows.zip": "Windows", } getPlatform := func(name string) string { for suffix, platform := range platformBySuffix { if strings.HasSuffix(name, suffix) { return platform } } return "" } getChecksum := func(name string) (string, error) { r, err := stoClient.Bucket(bucket).Object(name).NewReader(ctx) if err != nil { return "", err } var buf bytes.Buffer if _, err := io.Copy(&buf, r); err != nil { return "", err } return buf.String(), nil } var date time.Time checkDate := func(objDate time.Time) error { if date.IsZero() { date = objDate return nil } d := date.Sub(objDate) if d < 0 { d = -d } if d < 24*time.Hour { return nil } return fmt.Errorf("objects in monthly have not been uploaded or updated the same day") } var ( downloadData []DownloadData nameToSum = make(map[string]string) ) fileDate := releaseDate.Format(fileDateFormat) for _, attrs := range objList.Results { if !strings.Contains(attrs.Name, fileDate) { continue } if err := checkDate(attrs.Updated); err != nil { return nil, err } if !strings.HasSuffix(attrs.Name, ".sha256") { continue } sum, err := getChecksum(attrs.Name) if err != nil { return nil, err } nameToSum[strings.TrimSuffix(attrs.Name, ".sha256")] = sum } for _, attrs := range objList.Results { if !strings.Contains(attrs.Name, fileDate) { continue } if strings.HasSuffix(attrs.Name, ".sha256") { continue } sum, ok := nameToSum[attrs.Name] if !ok { return nil, fmt.Errorf("%v has no checksum file!", attrs.Name) } downloadData = append(downloadData, DownloadData{ Filename: filepath.Base(attrs.Name), Platform: getPlatform(attrs.Name), Checksum: sum, }) } return &ReleaseData{ Date: releaseDate.Format(titleDateFormat), Download: downloadData, CamliVersion: rev(), GoVersion: goVersion, }, nil }
// makeRequests makes some requests. // req is an incoming request used to construct the trace. traceClient is the // client used to upload the trace. rt is the trace client's http client's // transport. This is used to retrieve the trace uploaded by the client, if // any. If expectTrace is true, we expect a trace will be uploaded. If // synchronous is true, the call to Finish is expected not to return before the // client has uploaded any traces. func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { span := traceClient.SpanFromRequest(req) ctx := NewContext(context.Background(), span) // An HTTP request. { req2, err := http.NewRequest("GET", "http://example.com/bar", nil) if err != nil { t.Fatal(err) } resp := &http.Response{StatusCode: 200} s := span.NewRemoteChild(req2) s.Finish(WithResponse(resp)) } // An autogenerated API call. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} computeClient, err := compute.New(hc) if err != nil { t.Fatal(err) } _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() if err != nil { t.Fatal(err) } } // A cloud library call that uses the autogenerated API. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } var objAttrsList []*storage.ObjectAttrs it := storageClient.Bucket("testbucket").Objects(ctx, nil) for { objAttrs, err := it.Next() if err != nil && err != iterator.Done { t.Fatal(err) } if err == iterator.Done { break } objAttrsList = append(objAttrsList, objAttrs) } } // A cloud library call that uses grpc internally. for _, fail := range []bool{false, true} { srv, err := testutil.NewServer() if err != nil { t.Fatalf("creating test datastore server: %v", err) } dspb.RegisterDatastoreServer(srv.Gsrv, &fakeDatastoreServer{fail: fail}) srv.Start() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), EnableGRPCTracingDialOption) if err != nil { t.Fatalf("connecting to test datastore server: %v", err) } datastoreClient, err := datastore.NewClient(ctx, testProjectID, option.WithGRPCConn(conn)) if err != nil { t.Fatalf("creating datastore client: %v", err) } k := datastore.NameKey("Entity", "stringID", nil) e := new(datastore.Entity) datastoreClient.Get(ctx, k, e) } done := make(chan struct{}) go func() { if synchronous { err := span.FinishWait() if err != nil { t.Errorf("Unexpected error from span.FinishWait: %v", err) } } else { span.Finish() } done <- struct{}{} }() if !expectTrace { <-done select { case <-rt.reqc: t.Errorf("Got a trace, expected none.") case <-time.After(5 * time.Millisecond): } return nil } else if !synchronous { <-done return <-rt.reqc } else { select { case <-done: t.Errorf("Synchronous Finish didn't wait for trace upload.") return <-rt.reqc case <-time.After(5 * time.Millisecond): r := <-rt.reqc <-done return r } } }
// WithBaseHTTP returns a ClientOption that specifies the HTTP client to // use as the basis of communications. This option may only be used with // services that support HTTP as their communication transport. func WithBaseHTTP(client *http.Client) ClientOption { return wrapOpt{option.WithHTTPClient(client)} }
func TestObjects(t *testing.T) { // TODO(djd): there are a lot of closely-related tests here which share // a common setup. Once we can depend on Go 1.7 features, we should refactor // this test to use the sub-test feature. This will increase the readability // of this test, and should also reduce the time it takes to execute. // https://golang.org/pkg/testing/#hdr-Subtests_and_Sub_benchmarks ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) const defaultType = "text/plain" // Populate object names and make a map for their contents. objects := []string{ "obj1", "obj2", "obj/with/slashes", } contents := make(map[string][]byte) // Test Writer. for _, obj := range objects { t.Logf("Writing %q", obj) wc := bkt.Object(obj).NewWriter(ctx) wc.ContentType = defaultType c := randomContents() if _, err := wc.Write(c); err != nil { t.Errorf("Write for %v failed with %v", obj, err) } if err := wc.Close(); err != nil { t.Errorf("Close for %v failed with %v", obj, err) } contents[obj] = c } testBucketList(t, bkt, objects) testObjectIterator(t, bkt, objects) // Test Reader. for _, obj := range objects { t.Logf("Creating a reader to read %v", obj) rc, err := bkt.Object(obj).NewReader(ctx) if err != nil { t.Errorf("Can't create a reader for %v, errored with %v", obj, err) continue } slurp, err := ioutil.ReadAll(rc) if err != nil { t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) } if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { t.Errorf("Contents (%q) = %q; want %q", obj, got, want) } if got, want := rc.Size(), len(contents[obj]); got != int64(want) { t.Errorf("Size (%q) = %d; want %d", obj, got, want) } if got, want := rc.ContentType(), "text/plain"; got != want { t.Errorf("ContentType (%q) = %q; want %q", obj, got, want) } rc.Close() // Test SignedURL opts := &SignedURLOptions{ GoogleAccessID: "xxx@clientid", PrivateKey: dummyKey("rsa"), Method: "GET", MD5: []byte("202cb962ac59075b964b07152d234b70"), Expires: time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC), ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, } u, err := SignedURL(bucket, obj, opts) if err != nil { t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err) } res, err := client.hc.Get(u) if err != nil { t.Fatalf("Can't get URL %q: %v", u, err) } slurp, err = ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err) } if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { t.Errorf("Contents (%v) = %q; want %q", obj, got, want) } res.Body.Close() } obj := objects[0] objlen := int64(len(contents[obj])) // Test Range Reader. for i, r := range []struct { offset, length, want int64 }{ {0, objlen, objlen}, {0, objlen / 2, objlen / 2}, {objlen / 2, objlen, objlen / 2}, {0, 0, 0}, {objlen / 2, 0, 0}, {objlen / 2, -1, objlen / 2}, {0, objlen * 2, objlen}, } { t.Logf("%d: bkt.Object(%v).NewRangeReader(ctx, %d, %d)", i, obj, r.offset, r.length) rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length) if err != nil { t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err) continue } if rc.Size() != objlen { t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen) } if rc.Remain() != r.want { t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want) } slurp, err := ioutil.ReadAll(rc) if err != nil { t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err) continue } if len(slurp) != int(r.want) { t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want) continue } if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) { t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want) } rc.Close() } // Test content encoding const zeroCount = 20 << 20 w := bkt.Object("gzip-test").NewWriter(ctx) w.ContentEncoding = "gzip" gw := gzip.NewWriter(w) if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { t.Fatalf("io.Copy, upload: %v", err) } if err := gw.Close(); err != nil { t.Errorf("gzip.Close(): %v", err) } if err := w.Close(); err != nil { t.Errorf("w.Close(): %v", err) } r, err := bkt.Object("gzip-test").NewReader(ctx) if err != nil { t.Fatalf("NewReader(gzip-test): %v", err) } n, err := io.Copy(ioutil.Discard, r) if err != nil { t.Errorf("io.Copy, download: %v", err) } if n != zeroCount { t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) } // Test NotFound. _, err = bkt.Object("obj-not-exists").NewReader(ctx) if err != ErrObjectNotExist { t.Errorf("Object should not exist, err found to be %v", err) } objName := objects[0] // Test NewReader googleapi.Error. // Since a 429 or 5xx is hard to cause, we trigger a 416. realLen := len(contents[objName]) _, err = bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10) if err, ok := err.(*googleapi.Error); !ok { t.Error("NewRangeReader did not return a googleapi.Error") } else { if err.Code != 416 { t.Errorf("Code = %d; want %d", err.Code, 416) } if len(err.Header) == 0 { t.Error("Missing googleapi.Error.Header") } if len(err.Body) == 0 { t.Error("Missing googleapi.Error.Body") } } // Test StatObject. o, err := bkt.Object(objName).Attrs(ctx) if err != nil { t.Error(err) } if got, want := o.Name, objName; got != want { t.Errorf("Name (%v) = %q; want %q", objName, got, want) } if got, want := o.ContentType, defaultType; got != want { t.Errorf("ContentType (%v) = %q; want %q", objName, got, want) } created := o.Created // Check that the object is newer than its containing bucket. bAttrs, err := bkt.Attrs(ctx) if err != nil { t.Error(err) } if o.Created.Before(bAttrs.Created) { t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs) } // Test object copy. copyName := "copy-" + objName copyObj, err := bkt.Object(objName).CopyTo(ctx, bkt.Object(copyName), nil) if err != nil { t.Errorf("CopyTo failed with %v", err) } if copyObj.Name != copyName { t.Errorf("Copy object's name = %q; want %q", copyObj.Name, copyName) } if copyObj.Bucket != bucket { t.Errorf("Copy object's bucket = %q; want %q", copyObj.Bucket, bucket) } // Test UpdateAttrs. updated, err := bkt.Object(objName).Update(ctx, ObjectAttrs{ ContentType: "text/html", ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, }) if err != nil { t.Errorf("UpdateAttrs failed with %v", err) } if want := "text/html"; updated.ContentType != want { t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want) } if want := created; updated.Created != want { t.Errorf("updated.Created == %q; want %q", updated.Created, want) } if !updated.Created.Before(updated.Updated) { t.Errorf("updated.Updated should be newer than update.Created") } // Test checksums. checksumCases := []struct { name string contents [][]byte size int64 md5 string crc32c uint32 }{ { name: "checksum-object", contents: [][]byte{[]byte("hello"), []byte("world")}, size: 10, md5: "fc5e038d38a57032085441e7fe7010b0", crc32c: 1456190592, }, { name: "zero-object", contents: [][]byte{}, size: 0, md5: "d41d8cd98f00b204e9800998ecf8427e", crc32c: 0, }, } for _, c := range checksumCases { wc := bkt.Object(c.name).NewWriter(ctx) for _, data := range c.contents { if _, err := wc.Write(data); err != nil { t.Errorf("Write(%q) failed with %q", data, err) } } if err = wc.Close(); err != nil { t.Errorf("%q: close failed with %q", c.name, err) } obj := wc.Attrs() if got, want := obj.Size, c.size; got != want { t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) } if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) } if got, want := obj.CRC32C, c.crc32c; got != want { t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) } } // Test public ACL. publicObj := objects[0] if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil { t.Errorf("PutACLEntry failed with %v", err) } publicClient, err := NewClient(ctx, option.WithHTTPClient(http.DefaultClient)) if err != nil { t.Fatal(err) } rc, err := publicClient.Bucket(bucket).Object(publicObj).NewReader(ctx) if err != nil { t.Error(err) } slurp, err := ioutil.ReadAll(rc) if err != nil { t.Errorf("ReadAll failed with %v", err) } if !bytes.Equal(slurp, contents[publicObj]) { t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj]) } rc.Close() // Test writer error handling. wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx) if _, err := wc.Write([]byte("hello")); err != nil { t.Errorf("Write unexpectedly failed with %v", err) } if err = wc.Close(); err == nil { t.Error("Close expected an error, found none") } // Test deleting the copy object. if err := bkt.Object(copyName).Delete(ctx); err != nil { t.Errorf("Deletion of %v failed with %v", copyName, err) } // Deleting it a second time should return ErrObjectNotExist. if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist { t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err) } _, err = bkt.Object(copyName).Attrs(ctx) if err != ErrObjectNotExist { t.Errorf("Copy is expected to be deleted, stat errored with %v", err) } // Test object composition. compDst := bkt.Object("composed") var compSrcs []*ObjectHandle var wantContents []byte for _, obj := range objects { compSrcs = append(compSrcs, bkt.Object(obj)) wantContents = append(wantContents, contents[obj]...) } if _, err := compDst.ComposeFrom(ctx, compSrcs, &ObjectAttrs{ ContentType: "text/json", }); err != nil { t.Fatalf("ComposeFrom error: %v", err) } rc, err = compDst.NewReader(ctx) if err != nil { t.Fatalf("compDst.NewReader: %v", err) } slurp, err = ioutil.ReadAll(rc) if err != nil { t.Fatalf("compDst ioutil.ReadAll: %v", err) } defer rc.Close() if !bytes.Equal(slurp, wantContents) { t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) } if got, want := rc.ContentType(), "text/json"; got != want { t.Errorf("Composed object content-type = %q, want %q", got, want) } }
// makeRequests makes some requests. // req is an incoming request used to construct the trace. traceClient is the // client used to upload the trace. rt is the trace client's http client's // transport. This is used to retrieve the trace uploaded by the client, if // any. If expectTrace is true, we expect a trace will be uploaded. If // synchronous is true, the call to Finish is expected not to return before the // client has uploaded any traces. func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { span := traceClient.SpanFromRequest(req) ctx := NewContext(context.Background(), span) // An HTTP request. { req2, err := http.NewRequest("GET", "http://example.com/bar", nil) if err != nil { t.Fatal(err) } resp := &http.Response{StatusCode: 200} s := span.NewRemoteChild(req2) s.Finish(WithResponse(resp)) } // An autogenerated API call. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} computeClient, err := compute.New(hc) if err != nil { t.Fatal(err) } _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() if err != nil { t.Fatal(err) } } // A cloud library call that uses the autogenerated API. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } var objAttrsList []*storage.ObjectAttrs it := storageClient.Bucket("testbucket").Objects(ctx, nil) for { objAttrs, err := it.Next() if err != nil && err != storage.Done { t.Fatal(err) } if err == storage.Done { break } objAttrsList = append(objAttrsList, objAttrs) } } done := make(chan struct{}) go func() { if synchronous { err := span.FinishWait() if err != nil { t.Errorf("Unexpected error from span.FinishWait: %v", err) } } else { span.Finish() } done <- struct{}{} }() if !expectTrace { <-done select { case <-rt.reqc: t.Errorf("Got a trace, expected none.") case <-time.After(5 * time.Millisecond): } return nil } else if !synchronous { <-done return <-rt.reqc } else { select { case <-done: t.Errorf("Synchronous Finish didn't wait for trace upload.") return <-rt.reqc case <-time.After(5 * time.Millisecond): r := <-rt.reqc <-done return r } } }
// Test object compose. func TestObjectCompose(t *testing.T) { gotURL := make(chan string, 1) gotBody := make(chan []byte, 1) hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { body, _ := ioutil.ReadAll(r.Body) gotURL <- r.URL.String() gotBody <- body w.Write([]byte("{}")) }) defer close() ctx := context.Background() c, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } testCases := []struct { desc string dst *ObjectHandle srcs []*ObjectHandle attrs *ObjectAttrs wantReq raw.ComposeRequest wantURL string wantErr bool }{ { desc: "basic case", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), c.Bucket("foo").Object("quux"), }, wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", wantReq: raw.ComposeRequest{ Destination: &raw.Object{Bucket: "foo"}, SourceObjects: []*raw.ComposeRequestSourceObjects{ {Name: "baz"}, {Name: "quux"}, }, }, }, { desc: "with object attrs", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), c.Bucket("foo").Object("quux"), }, attrs: &ObjectAttrs{ Name: "not-bar", ContentType: "application/json", }, wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", wantReq: raw.ComposeRequest{ Destination: &raw.Object{ Bucket: "foo", Name: "not-bar", ContentType: "application/json", }, SourceObjects: []*raw.ComposeRequestSourceObjects{ {Name: "baz"}, {Name: "quux"}, }, }, }, { desc: "with conditions", dst: c.Bucket("foo").Object("bar").If(Conditions{ GenerationMatch: 12, MetagenerationMatch: 34, }), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz").Generation(56), c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}), }, wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34", wantReq: raw.ComposeRequest{ Destination: &raw.Object{Bucket: "foo"}, SourceObjects: []*raw.ComposeRequestSourceObjects{ { Name: "baz", Generation: 56, }, { Name: "quux", ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{ IfGenerationMatch: 78, }, }, }, }, }, { desc: "no sources", dst: c.Bucket("foo").Object("bar"), wantErr: true, }, { desc: "destination, no bucket", dst: c.Bucket("").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), }, wantErr: true, }, { desc: "destination, no object", dst: c.Bucket("foo").Object(""), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), }, wantErr: true, }, { desc: "source, different bucket", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("otherbucket").Object("baz"), }, wantErr: true, }, { desc: "source, no object", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object(""), }, wantErr: true, }, { desc: "destination, bad condition", dst: c.Bucket("foo").Object("bar").Generation(12), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), }, wantErr: true, }, { desc: "source, bad condition", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}), }, wantErr: true, }, } for _, tt := range testCases { composer := tt.dst.ComposerFrom(tt.srcs...) if tt.attrs != nil { composer.ObjectAttrs = *tt.attrs } _, err := composer.Run(ctx) if gotErr := err != nil; gotErr != tt.wantErr { t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr) continue } if tt.wantErr { continue } url, body := <-gotURL, <-gotBody if url != tt.wantURL { t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL) } var req raw.ComposeRequest if err := json.Unmarshal(body, &req); err != nil { t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body) } if !reflect.DeepEqual(req, tt.wantReq) { // Print to JSON. wantReq, _ := json.Marshal(tt.wantReq) t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq) } } }
func TestCondition(t *testing.T) { gotReq := make(chan *http.Request, 1) hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(ioutil.Discard, r.Body) gotReq <- r w.WriteHeader(200) }) defer close() ctx := context.Background() c, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } obj := c.Bucket("buck").Object("obj") dst := c.Bucket("dstbuck").Object("dst") tests := []struct { fn func() want string }{ { func() { obj.Generation(1234).NewReader(ctx) }, "GET /buck/obj?generation=1234", }, { func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifGenerationMatch=1234", }, { func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifGenerationNotMatch=1234", }, { func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifMetagenerationMatch=1234", }, { func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifMetagenerationNotMatch=1234", }, { func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) }, "GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full", }, { func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) }, "PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full", }, { func() { obj.Generation(1234).Delete(ctx) }, "DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234", }, { func() { w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx) w.ContentType = "text/plain" w.Close() }, "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart", }, { func() { w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx) w.ContentType = "text/plain" w.Close() }, "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart", }, { func() { dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx) }, "POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full", }, } for i, tt := range tests { tt.fn() select { case r := <-gotReq: got := r.Method + " " + r.RequestURI if got != tt.want { t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want) } case <-time.After(5 * time.Second): t.Fatalf("%d. timeout", i) } if err != nil { t.Fatal(err) } } // Test an error, too: err = obj.Generation(1234).NewWriter(ctx).Close() if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") { t.Errorf("want error about unsupported generation; got %v", err) } }
func maybeSetupGoogleCloudLogging() { if flagGCEProjectID == "" && flagGCELogName == "" && flagGCEJWTFile == "" { return } if flagGCEProjectID == "" || flagGCELogName == "" || flagGCEJWTFile == "" { exitf("All of --gce_project_id, --gce_log_name, and --gce_jwt_file must be specified for logging on Google Cloud Logging.") } jsonSlurp, err := ioutil.ReadFile(flagGCEJWTFile) if err != nil { exitf("Error reading --gce_jwt_file value: %v", err) } jwtConf, err := google.JWTConfigFromJSON(jsonSlurp, logging.Scope) if err != nil { exitf("Error reading --gce_jwt_file value: %v", err) } logc, err := logging.NewClient(context.Background(), flagGCEProjectID, flagGCELogName, option.WithHTTPClient(jwtConf.Client(context.Background()))) if err != nil { exitf("Error creating GCL client: %v", err) } log.SetOutput(io.MultiWriter(os.Stderr, logc.Writer(logging.Debug))) }
func main() { launchConfig.MaybeDeploy() flag.Parse() setProdFlags() if *root == "" { var err error *root, err = os.Getwd() if err != nil { log.Fatalf("Failed to getwd: %v", err) } } readTemplates() if err := initGithubSyncing(); err != nil { log.Fatalf("error setting up syncing to github: %v") } go runDemoBlobserverLoop() mux := http.DefaultServeMux mux.Handle("/favicon.ico", http.FileServer(http.Dir(filepath.Join(*root, "static")))) mux.Handle("/robots.txt", http.FileServer(http.Dir(filepath.Join(*root, "static")))) mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(filepath.Join(*root, "static"))))) mux.Handle("/talks/", http.StripPrefix("/talks/", http.FileServer(http.Dir(filepath.Join(*root, "talks"))))) mux.Handle(pkgPattern, godocHandler{}) mux.Handle(cmdPattern, godocHandler{}) mux.Handle(appPattern, godocHandler{}) mux.HandleFunc(errPattern, errHandler) mux.HandleFunc("/r/", gerritRedirect) mux.HandleFunc("/dl/", releaseRedirect) mux.HandleFunc("/debug/ip", ipHandler) mux.HandleFunc("/debug/uptime", uptimeHandler) mux.Handle("/doc/contributing", redirTo("/code#contributing")) mux.Handle("/lists", redirTo("/community")) mux.HandleFunc("/contributors", contribHandler()) mux.HandleFunc("/doc/", docHandler) mux.HandleFunc("/", mainHandler) if buildbotHost != "" && buildbotBackend != "" { if _, err := url.Parse(buildbotBackend); err != nil { log.Fatalf("Failed to parse %v as a URL: %v", buildbotBackend, err) } bbhpattern := strings.TrimRight(buildbotHost, "/") + "/" mux.HandleFunc(bbhpattern, func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, buildbotBackend, http.StatusFound) }) } gceLauncher, err := gceDeployHandler("/launch/") if err != nil { log.Printf("Not installing GCE /launch/ handler: %v", err) mux.HandleFunc("/launch/", func(w http.ResponseWriter, r *http.Request) { http.Error(w, fmt.Sprintf("GCE launcher disabled: %v", err), 500) }) } else { mux.Handle("/launch/", gceLauncher) } var handler http.Handler = &redirectRootHandler{Handler: mux} if *logDir != "" || *logStdout { handler = NewLoggingHandler(handler, NewApacheLogger(*logDir, *logStdout)) } if *gceLogName != "" { projID := projectID() var hc *http.Client if !metadata.OnGCE() { hc = httpClient(projID) } ctx := context.Background() var logc *logging.Client if metadata.OnGCE() { logc, err = logging.NewClient(ctx, projID, *gceLogName) } else { logc, err = logging.NewClient(ctx, projID, *gceLogName, option.WithHTTPClient(hc)) } if err != nil { log.Fatal(err) } if err := logc.Ping(); err != nil { log.Fatalf("Failed to ping Google Cloud Logging: %v", err) } handler = NewLoggingHandler(handler, gceLogger{logc}) if gceLauncher != nil { var logc *logging.Client if metadata.OnGCE() { logc, err = logging.NewClient(ctx, projID, *gceLogName) } else { logc, err = logging.NewClient(ctx, projID, *gceLogName, option.WithHTTPClient(hc)) } if err != nil { log.Fatal(err) } logc.CommonLabels = map[string]string{ "from": "camli-gce-launcher", } logger := logc.Logger(logging.Default) logger.SetPrefix("launcher: ") gceLauncher.SetLogger(logger) } } emailErr := make(chan error) startEmailCommitLoop(emailErr) if *alsoRun != "" { runAsChild(*alsoRun) } httpServer := &http.Server{ Addr: *httpAddr, Handler: handler, ReadTimeout: 5 * time.Minute, WriteTimeout: 30 * time.Minute, } httpErr := make(chan error) go func() { log.Printf("Listening for HTTP on %v", *httpAddr) httpErr <- httpServer.ListenAndServe() }() httpsErr := make(chan error) if *httpsAddr != "" { go func() { httpsErr <- serveHTTPS(httpServer) }() } if *flagChromeBugRepro { go func() { log.Printf("Repro handler failed: %v", repro(":8001", "foo:bar")) }() } select { case err := <-emailErr: log.Fatalf("Error sending emails: %v", err) case err := <-httpErr: log.Fatalf("Error serving HTTP: %v", err) case err := <-httpsErr: log.Fatalf("Error serving HTTPS: %v", err) } }
// uploadDockerImage makes a tar.gz snapshot of the camlistored docker image, // and uploads it at camlistore-release/docker/camlistored-REV.tar.gz. It then // makes a copy in the same bucket and path as camlistored.tar.gz. func uploadDockerImage() { proj := "camlistore-website" bucket := "camlistore-release" versionedTarball := "docker/camlistored-" + rev() + ".tar.gz" tarball := "docker/camlistored.tar.gz" versionFile := "docker/VERSION" log.Printf("Uploading %s/%s ...", bucket, versionedTarball) ts, err := tokenSource(bucket) if err != nil { log.Fatal(err) } ctx := context.Background() stoClient, err := storage.NewClient(ctx, option.WithTokenSource(ts), option.WithHTTPClient(oauth2.NewClient(ctx, ts))) if err != nil { log.Fatal(err) } w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx) w.ACL = publicACL(proj) w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations? w.ContentType = "application/x-gtar" dockerSave := exec.Command("docker", "save", serverImage) dockerSave.Stderr = os.Stderr tar, err := dockerSave.StdoutPipe() if err != nil { log.Fatal(err) } targz, pw := io.Pipe() go func() { zw := gzip.NewWriter(pw) n, err := io.Copy(zw, tar) if err != nil { log.Fatalf("Error copying to gzip writer: after %d bytes, %v", n, err) } if err := zw.Close(); err != nil { log.Fatalf("gzip.Close: %v", err) } pw.CloseWithError(err) }() if err := dockerSave.Start(); err != nil { log.Fatalf("Error starting docker save %v: %v", serverImage, err) } if _, err := io.Copy(w, targz); err != nil { log.Fatalf("io.Copy: %v", err) } if err := w.Close(); err != nil { log.Fatalf("closing GCS storage writer: %v", err) } if err := dockerSave.Wait(); err != nil { log.Fatalf("Error waiting for docker save %v: %v", serverImage, err) } log.Printf("Uploaded tarball to %s", versionedTarball) if isWIP() { return } log.Printf("Copying tarball to %s/%s ...", bucket, tarball) dest := stoClient.Bucket(bucket).Object(tarball) if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo( ctx, dest, &storage.ObjectAttrs{ ACL: publicACL(proj), CacheControl: "no-cache", ContentType: "application/x-gtar", }); err != nil { log.Fatalf("Error uploading %v: %v", tarball, err) } log.Printf("Uploaded tarball to %s", tarball) log.Printf("Updating %s/%s file...", bucket, versionFile) w = stoClient.Bucket(bucket).Object(versionFile).NewWriter(ctx) w.ACL = publicACL(proj) w.CacheControl = "no-cache" w.ContentType = "text/plain" if _, err := io.Copy(w, strings.NewReader(rev())); err != nil { log.Fatalf("io.Copy: %v", err) } if err := w.Close(); err != nil { log.Fatalf("closing GCS storage writer: %v", err) } }
func newTestClient(rt http.RoundTripper) *errors.Client { t, err := errors.NewClient(context.Background(), testProjectID, "myservice", "v1.000", option.WithHTTPClient(&http.Client{Transport: rt})) if err != nil { panic(err) } t.RepanicDefault = false return t }