Esempio n. 1
0
// uploadFile uploads the file to dst using global bucket.
func uploadFile(dst, file string) error {
	r, gz, err := gzipper(file)
	if err != nil {
		return err
	}
	defer r.Close()
	rel, err := filepath.Rel(vargs.Source, file)
	if err != nil {
		return err
	}
	w := bucket.Object(rel).NewWriter(context.Background())
	w.CacheControl = vargs.CacheControl
	w.Metadata = vargs.Metadata
	for _, s := range vargs.ACL {
		a := strings.SplitN(s, ":", 2)
		if len(a) != 2 {
			return fmt.Errorf("%s: invalid ACL %q", rel, s)
		}
		w.ACL = append(w.ACL, storage.ACLRule{
			Entity: storage.ACLEntity(a[0]),
			Role:   storage.ACLRole(a[1]),
		})
	}
	w.ContentType = mime.TypeByExtension(filepath.Ext(file))
	if w.ContentType == "" {
		w.ContentType = "application/octet-stream"
	}
	if gz {
		w.ContentEncoding = "gzip"
	}
	if _, err := io.Copy(w, r); err != nil {
		return err
	}
	return w.Close()
}
Esempio n. 2
0
func publicACL(proj string) []storage.ACLRule {
	return []storage.ACLRule{
		// If you don't give the owners access, the web UI seems to
		// have a bug and doesn't have access to see that it's public, so
		// won't render the "Shared Publicly" link. So we do that, even
		// though it's dumb and unnecessary otherwise:
		{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner},
		{Entity: storage.AllUsers, Role: storage.RoleReader},
	}
}
Esempio n. 3
0
// uploadBinary uploads the currently-running Linux binary.
// It crashes if it fails.
func (cl *cloudLaunch) uploadBinary() {
	ctx := context.Background()
	if cl.BinaryBucket == "" {
		log.Fatal("cloudlaunch: Config.BinaryBucket is empty")
	}
	stoClient, err := storage.NewClient(ctx, cloud.WithBaseHTTP(cl.oauthClient))
	if err != nil {
		log.Fatal(err)
	}
	w := stoClient.Bucket(cl.BinaryBucket).Object(cl.binaryObject()).NewWriter(ctx)
	if err != nil {
		log.Fatal(err)
	}
	w.ACL = []storage.ACLRule{
		// If you don't give the owners access, the web UI seems to
		// have a bug and doesn't have access to see that it's public, so
		// won't render the "Shared Publicly" link. So we do that, even
		// though it's dumb and unnecessary otherwise:
		{
			Entity: storage.ACLEntity("project-owners-" + cl.GCEProjectID),
			Role:   storage.RoleOwner,
		},
		// Public, so our systemd unit can get it easily:
		{
			Entity: storage.AllUsers,
			Role:   storage.RoleReader,
		},
	}
	w.CacheControl = "no-cache"
	selfPath := getSelfPath()
	log.Printf("Uploading %q to %v", selfPath, cl.binaryURL())
	f, err := os.Open(selfPath)
	if err != nil {
		log.Fatal(err)
	}
	defer f.Close()
	n, err := io.Copy(w, f)
	if err != nil {
		log.Fatal(err)
	}
	if err := w.Close(); err != nil {
		log.Fatal(err)
	}
	log.Printf("Uploaded %d bytes", n)
}
Esempio n. 4
0
func main() {
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "Usage: upload [--public] [--file=...] <bucket/object>\n")
		flag.PrintDefaults()
	}
	flag.Parse()
	if flag.NArg() != 1 {
		flag.Usage()
		os.Exit(1)
	}
	args := strings.SplitN(flag.Arg(0), "/", 2)
	if len(args) != 2 {
		flag.Usage()
		os.Exit(1)
	}
	bucket, object := args[0], args[1]

	proj, ok := bucketProject[bucket]
	if !ok {
		log.Fatalf("bucket %q doesn't have an associated project in upload.go")
	}

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatalf("Failed to get an OAuth2 token source: %v", err)
	}
	httpClient := oauth2.NewClient(oauth2.NoContext, ts)

	ctx := cloud.NewContext(proj, httpClient)
	w := storage.NewWriter(ctx, bucket, object)
	// If you don't give the owners access, the web UI seems to
	// have a bug and doesn't have access to see that it's public, so
	// won't render the "Shared Publicly" link. So we do that, even
	// though it's dumb and unnecessary otherwise:
	w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner})
	if *public {
		w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.AllUsers, Role: storage.RoleReader})
		if !*cacheable {
			w.CacheControl = "no-cache"
		}
	}
	var content io.Reader
	if *file == "-" {
		content = os.Stdin
	} else {
		content, err = os.Open(*file)
		if err != nil {
			log.Fatal(err)
		}
	}

	const maxSlurp = 1 << 20
	var buf bytes.Buffer
	n, err := io.CopyN(&buf, content, maxSlurp)
	if err != nil && err != io.EOF {
		log.Fatalf("Error reading from stdin: %v, %v", n, err)
	}
	w.ContentType = http.DetectContentType(buf.Bytes())

	_, err = io.Copy(w, io.MultiReader(&buf, content))
	if cerr := w.Close(); cerr != nil && err == nil {
		err = cerr
	}
	if err != nil {
		log.Fatalf("Write error: %v", err)
	}
	if *verbose {
		log.Printf("Wrote %v", object)
	}
	os.Exit(0)
}
Esempio n. 5
0
func uploadDockerImage() {
	proj := "camlistore-website"
	bucket := "camlistore-release"
	versionedTarball := "docker/camlistored-" + *rev + ".tar.gz"
	tarball := "docker/camlistored.tar.gz"

	log.Printf("Uploading %s/%s ...", bucket, versionedTarball)

	ts, err := tokenSource(bucket)
	if err != nil {
		log.Fatal(err)
	}

	httpClient := oauth2.NewClient(oauth2.NoContext, ts)
	ctx := cloud.NewContext(proj, httpClient)
	w := storage.NewWriter(ctx, bucket, versionedTarball)
	// If you don't give the owners access, the web UI seems to
	// have a bug and doesn't have access to see that it's public, so
	// won't render the "Shared Publicly" link. So we do that, even
	// though it's dumb and unnecessary otherwise:
	acl := append(w.ACL, storage.ACLRule{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner})
	acl = append(acl, storage.ACLRule{Entity: storage.AllUsers, Role: storage.RoleReader})
	w.ACL = acl
	w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
	w.ContentType = "application/x-gtar"

	dockerSave := exec.Command("docker", "save", serverImage)
	dockerSave.Stderr = os.Stderr
	tar, err := dockerSave.StdoutPipe()
	if err != nil {
		log.Fatal(err)
	}
	targz, pw := io.Pipe()
	go func() {
		zw := gzip.NewWriter(pw)
		n, err := io.Copy(zw, tar)
		if err != nil {
			log.Fatalf("Error copying to gzip writer: after %d bytes, %v", n, err)
		}
		if err := zw.Close(); err != nil {
			log.Fatalf("gzip.Close: %v", err)
		}
		pw.CloseWithError(err)
	}()
	if err := dockerSave.Start(); err != nil {
		log.Fatalf("Error starting docker save %v: %v", serverImage, err)
	}
	if _, err := io.Copy(w, targz); err != nil {
		log.Fatalf("io.Copy: %v", err)
	}
	if err := w.Close(); err != nil {
		log.Fatalf("closing GCS storage writer: %v", err)
	}
	if err := dockerSave.Wait(); err != nil {
		log.Fatalf("Error waiting for docker save %v: %v", serverImage, err)
	}
	log.Printf("Uploaded tarball to %s", versionedTarball)
	log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
	// TODO(mpl): 2015-05-12: update google.golang.org/cloud/storage so we
	// can specify the dest name in CopyObject, and we get the ACLs from the
	// src for free too I think.
	if _, err := storage.CopyObject(ctx, bucket, versionedTarball, bucket, storage.ObjectAttrs{
		Name:        tarball,
		ACL:         acl,
		ContentType: "application/x-gtar",
	}); err != nil {
		log.Fatalf("Error uploading %v: %v", tarball, err)
	}
	log.Printf("Uploaded tarball to %s", tarball)
}