Example #1
0
func (b *s3Backend) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, append bool) error {
	if append {
		// This is a hack, the next easiest thing to do if we need to handle
		// upload resumption is to finalize the multipart upload when the client
		// disconnects and when the rest of the data arrives, start a new
		// multi-part upload copying the existing object as the first part
		// (which is supported by S3 as a specific API call). This requires
		// replacing the simple uploader, so it was not done in the first pass.
		existing, err := b.Open(tx, info, false)
		if err != nil {
			return err
		}
		r = io.MultiReader(existing, r)
	}

	info.ExternalID = random.UUID()
	if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", info.ID, info.ExternalID); err != nil {
		return err
	}

	u := s3manager.NewUploaderWithClient(b.client)
	_, err := u.Upload(&s3manager.UploadInput{
		Bucket:      &b.bucket,
		Key:         &info.ExternalID,
		ContentType: &info.Type,
		Body:        r,
	})

	return err
}
Example #2
0
func (bh *S3BackupHandle) AddFile(filename string) (io.WriteCloser, error) {
	if bh.readOnly {
		return nil, fmt.Errorf("AddFile cannot be called on read-only backup")
	}

	reader, writer := io.Pipe()
	bh.waitGroup.Add(1)

	go func() {
		defer bh.waitGroup.Done()
		uploader := s3manager.NewUploaderWithClient(bh.client)
		object := objName(bh.dir, bh.name, filename)
		_, err := uploader.Upload(&s3manager.UploadInput{
			Bucket: bucket,
			Key:    object,
			Body:   reader,
		})
		if err != nil {
			reader.CloseWithError(err)
			bh.errors.RecordError(err)
		}
	}()

	return writer, nil
}
Example #3
0
func TestReaderAt(t *testing.T) {
	svc := s3.New(unit.Session)
	svc.Handlers.Unmarshal.Clear()
	svc.Handlers.UnmarshalMeta.Clear()
	svc.Handlers.UnmarshalError.Clear()
	svc.Handlers.Send.Clear()

	contentLen := ""
	svc.Handlers.Send.PushBack(func(r *request.Request) {
		contentLen = r.HTTPRequest.Header.Get("Content-Length")
		r.HTTPResponse = &http.Response{
			StatusCode: 200,
			Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
		}
	})

	mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) {
		u.Concurrency = 1
	})

	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &fooReaderAt{},
	})

	assert.NoError(t, err)
	assert.Equal(t, contentLen, "12")
}
Example #4
0
func TestUploadFailCleanup(t *testing.T) {
	svc := s3.New(integration.Session)

	// Break checksum on 2nd part so it fails
	part := 0
	svc.Handlers.Build.PushBack(func(r *request.Request) {
		if r.Operation.Name == "UploadPart" {
			if part == 1 {
				r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
			}
			part++
		}
	})

	key := "12mb-leave"
	mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) {
		u.LeavePartsOnError = false
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: bucketName,
		Key:    &key,
		Body:   bytes.NewReader(integBuf12MB),
	})
	assert.Error(t, err)
	assert.NotContains(t, err.Error(), "MissingRegion")
	uploadID := ""
	if merr, ok := err.(s3manager.MultiUploadFailure); ok {
		uploadID = merr.UploadID()
	}
	assert.NotEmpty(t, uploadID)

	_, err = svc.ListParts(&s3.ListPartsInput{
		Bucket: bucketName, Key: &key, UploadId: &uploadID})
	assert.Error(t, err)
}
func (d *SDKCreateMachineImageManifestDriver) uploadManifest(bucketName string, m *manifests.ImportVolumeManifest) (string, error) {

	manifestKey := fmt.Sprintf("bosh-machine-image-manifest-%d", time.Now().UnixNano())

	// create presigned GET request for the manifest
	getReq, _ := d.s3Client.GetObjectRequest(&s3.GetObjectInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String(manifestKey),
	})

	manifestGetURL, err := getReq.Presign(1 * time.Hour)
	if err != nil {
		return "", fmt.Errorf("failed to sign manifest GET request: %s", err)
	}

	d.logger.Printf("generated presigned manifest GET URL %s\n", manifestGetURL)

	// create presigned DELETE request for the manifest
	deleteReq, _ := d.s3Client.DeleteObjectRequest(&s3.DeleteObjectInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String(manifestKey),
	})

	manifestDeleteURL, err := deleteReq.Presign(2 * time.Hour)
	if err != nil {
		return "", fmt.Errorf("failed to sign manifest delete request: %s", err)
	}

	d.logger.Printf("generated presigned manifest DELETE URL %s\n", manifestDeleteURL)

	m.SelfDestructURL = manifestDeleteURL

	manifestBytes, err := xml.Marshal(m)
	if err != nil {
		return "", fmt.Errorf("serializing machine image manifest: %s", err)
	}

	manifestReader := bytes.NewReader(manifestBytes)

	uploadStartTime := time.Now()
	uploader := s3manager.NewUploaderWithClient(d.s3Client)
	_, err = uploader.Upload(&s3manager.UploadInput{
		Body:   manifestReader,
		Bucket: aws.String(bucketName),
		Key:    aws.String(manifestKey),
	})

	if err != nil {
		return "", fmt.Errorf("uploading machine image manifest to S3: %s", err)
	}

	d.logger.Printf("finished uploaded machine image manifest to s3 after %f seconds\n", time.Since(uploadStartTime).Seconds())

	return manifestGetURL, nil
}
func TestUploadOrderReadFail1(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s)
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &failreader{times: 1},
	})

	assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
	assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
	assert.Equal(t, []string{}, *ops)
}
func TestUploadOrderSingleBufferedReader(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s)
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &sizedReader{size: 1024 * 1024 * 2},
	})

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
}
Example #8
0
// Put uploads a blob to an S3 compatible blobstore
func (client *S3Blobstore) Put(src io.ReadSeeker, dest string) error {
	uploader := s3manager.NewUploaderWithClient(client.s3Client)
	putResult, err := uploader.Upload(&s3manager.UploadInput{
		Body:   src,
		Bucket: aws.String(client.s3cliConfig.BucketName),
		Key:    aws.String(dest),
	})

	if err != nil {
		return err
	}

	log.Println("Successfully uploaded file to", putResult.Location)
	return nil
}
func TestUploadOrderZero(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s)
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 0)),
	})

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
	assert.Equal(t, 0, buflen(val((*args)[0], "Body")))
}
Example #10
0
func TestUploadOrderReadFail2(t *testing.T) {
	s, ops, _ := loggingSvc([]string{"UploadPart"})
	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.Concurrency = 1
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &failreader{times: 2},
	})

	assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
	assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
	assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
}
Example #11
0
// Put uploads a blob to an S3 compatible blobstore
func (client *S3Blobstore) Put(src io.ReadSeeker, dest string) error {
	cfg := client.s3cliConfig
	if cfg.CredentialsSource == config.NoneCredentialsSource {
		return errorInvalidCredentialsSourceValue
	}

	uploader := s3manager.NewUploaderWithClient(client.s3Client, func(u *s3manager.Uploader) {
		u.LeavePartsOnError = false

		if !cfg.MultipartUpload {
			// disable multipart uploads by way of large PartSize configuration
			u.PartSize = oneTB
		}
	})
	uploadInput := &s3manager.UploadInput{
		Body:   src,
		Bucket: aws.String(cfg.BucketName),
		Key:    aws.String(dest),
	}
	if cfg.ServerSideEncryption != "" {
		uploadInput.ServerSideEncryption = aws.String(cfg.ServerSideEncryption)
	}
	if cfg.SSEKMSKeyID != "" {
		uploadInput.SSEKMSKeyId = aws.String(cfg.SSEKMSKeyID)
	}

	retry := 0
	maxRetries := 3
	for {
		putResult, err := uploader.Upload(uploadInput)
		if err != nil {
			if _, ok := err.(s3manager.MultiUploadFailure); ok {
				if retry == maxRetries {
					log.Println("Upload retry limit exceeded:", err.Error())
					return fmt.Errorf("upload retry limit exceeded: %s", err.Error())
				}
				retry++
				time.Sleep(time.Second * time.Duration(retry))
				continue
			}
			log.Println("Upload failed:", err.Error())
			return fmt.Errorf("upload failure: %s", err.Error())
		}

		log.Println("Successfully uploaded file to", putResult.Location)
		return nil
	}
}
func TestUploadOrderSingleFailure(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	s.Handlers.Send.PushBack(func(r *request.Request) {
		r.HTTPResponse.StatusCode = 400
	})
	mgr := s3manager.NewUploaderWithClient(s)
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf2MB),
	})

	assert.Error(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.Nil(t, resp)
}
Example #13
0
File: s3.go Project: egidijus/s3
func (self *S3Filesystem) Create(src File) error {
	var fullpath string
	if self.path == "" || strings.HasSuffix(self.path, "/") {
		fullpath = filepath.Join(self.path, src.Relative())
	} else {
		fullpath = self.path
	}
	input := s3manager.UploadInput{
		ACL:    aws.String(acl),
		Bucket: aws.String(self.bucket),
		Key:    aws.String(fullpath),
	}

	switch t := src.(type) {
	case *S3File:
		// special case for S3File to preserve header information
		getObjectInput := s3.GetObjectInput{
			Bucket: aws.String(t.bucket),
			Key:    t.object.Key,
		}
		output, err := self.conn.GetObject(&getObjectInput)
		if err != nil {
			return err
		}
		defer output.Body.Close()
		input.Body = output.Body
		// transfer existing headers across
		input.ContentType = output.ContentType
		// input.LastModified = output.LastModified
		input.StorageClass = output.StorageClass
	default:
		reader, err := src.Reader()
		if err != nil {
			return err
		}
		input.Body = reader
		defer reader.Close()
		input.ContentType = aws.String(guessMimeType(src.Relative()))
	}

	u := s3manager.NewUploaderWithClient(self.conn)
	_, err := u.Upload(&input)
	return err
}
Example #14
0
func TestUploadOrderSingle(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s)
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket:               aws.String("Bucket"),
		Key:                  aws.String("Key"),
		Body:                 bytes.NewReader(buf2MB),
		ServerSideEncryption: aws.String("AES256"),
		ContentType:          aws.String("content/type"),
	})

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID)
	assert.Equal(t, "", resp.UploadID)
	assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
	assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
}
func TestUploadOrderMultiDifferentPartSize(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.PartSize = 1024 * 1024 * 7
		u.Concurrency = 1
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf12MB),
	})

	assert.NoError(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)

	// Part lengths
	assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body")))
	assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body")))
}
func TestUploadOrderMultiFailureOnCreate(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	s.Handlers.Send.PushBack(func(r *request.Request) {
		switch r.Data.(type) {
		case *s3.CreateMultipartUploadOutput:
			r.HTTPResponse.StatusCode = 400
		}
	})

	mgr := s3manager.NewUploaderWithClient(s)
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 1024*1024*12)),
	})

	assert.Error(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload"}, *ops)
}
func TestUploadZeroLenObject(t *testing.T) {
	requestMade := false
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		requestMade = true
		w.WriteHeader(http.StatusOK)
	}))
	mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{
		Endpoint: aws.String(server.URL),
	}))
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   strings.NewReader(""),
	})

	assert.NoError(t, err)
	assert.True(t, requestMade)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
}
// TestUploadOrderMultiBufferedReaderEOF tests the edge case where the
// file size is the same as part size, which means nextReader will
// return io.EOF rather than io.ErrUnexpectedEOF
func TestUploadOrderMultiBufferedReaderEOF(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s)
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &sizedReader{size: 1024 * 1024 * 10, err: io.EOF},
	})

	assert.NoError(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)

	// Part lengths
	parts := []int{
		buflen(val((*args)[1], "Body")),
		buflen(val((*args)[2], "Body")),
	}
	sort.Ints(parts)
	assert.Equal(t, []int{1024 * 1024 * 5, 1024 * 1024 * 5}, parts)
}
Example #19
0
func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) {
	s, ops, _ := loggingSvc([]string{"UploadPart"})
	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.Concurrency = 1
		u.MaxUploadParts = 2
	})
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &sizedReader{size: 1024 * 1024 * 12},
	})

	assert.Error(t, err)
	assert.Nil(t, resp)
	assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)

	aerr := err.(awserr.Error)
	assert.Equal(t, "TotalPartsExceeded", aerr.Code())
	assert.Contains(t, aerr.Message(), "configured MaxUploadParts (2)")
}
func TestUploadIncreasePartSize(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.Concurrency = 1
		u.MaxUploadParts = 2
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf12MB),
	})

	assert.NoError(t, err)
	assert.Equal(t, int64(s3manager.DefaultDownloadPartSize), mgr.PartSize)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)

	// Part lengths
	assert.Equal(t, (1024*1024*6)+1, buflen(val((*args)[1], "Body")))
	assert.Equal(t, (1024*1024*6)-1, buflen(val((*args)[2], "Body")))
}
func TestUploadOrderMulti(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	u := s3manager.NewUploaderWithClient(s)

	resp, err := u.Upload(&s3manager.UploadInput{
		Bucket:               aws.String("Bucket"),
		Key:                  aws.String("Key"),
		Body:                 bytes.NewReader(buf12MB),
		ServerSideEncryption: aws.String("aws:kms"),
		SSEKMSKeyId:          aws.String("KmsId"),
		ContentType:          aws.String("content/type"),
	})

	assert.NoError(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
	assert.Equal(t, "https://location", resp.Location)
	assert.Equal(t, "UPLOAD-ID", resp.UploadID)
	assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID)

	// Validate input values

	// UploadPart
	assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadId"))
	assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadId"))
	assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadId"))

	// CompleteMultipartUpload
	assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadId"))
	assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber"))
	assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber"))
	assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber"))
	assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag"))
	assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag"))
	assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag"))

	// Custom headers
	assert.Equal(t, "aws:kms", val((*args)[0], "ServerSideEncryption"))
	assert.Equal(t, "KmsId", val((*args)[0], "SSEKMSKeyId"))
	assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
}
Example #22
0
func TestUploadUnexpectedEOF(t *testing.T) {
	s, ops, args := loggingSvc(emptyList)
	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.Concurrency = 1
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body: &testIncompleteReader{
			Buf:   make([]byte, 1024*1024*5),
			Count: 1,
		},
	})

	assert.Error(t, err)
	assert.Equal(t, "CreateMultipartUpload", (*ops)[0])
	assert.Equal(t, "UploadPart", (*ops)[1])
	assert.Equal(t, "AbortMultipartUpload", (*ops)[len(*ops)-1])

	// Part lengths
	assert.Equal(t, 1024*1024*5, buflen(val((*args)[1], "Body")))
}
func TestUploadOrderMultiFailureOnComplete(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	s.Handlers.Send.PushBack(func(r *request.Request) {
		switch r.Data.(type) {
		case *s3.CompleteMultipartUploadOutput:
			r.HTTPResponse.StatusCode = 400
		}
	})

	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.Concurrency = 1
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf12MB),
	})

	assert.Error(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart",
		"UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops)
}
func TestUploadOrderMultiFailureLeaveParts(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	s.Handlers.Send.PushBack(func(r *request.Request) {
		switch data := r.Data.(type) {
		case *s3.UploadPartOutput:
			if *data.ETag == "ETAG2" {
				r.HTTPResponse.StatusCode = 400
			}
		}
	})

	mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
		u.Concurrency = 1
		u.LeavePartsOnError = true
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 1024*1024*12)),
	})

	assert.Error(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops)
}
// Create uploads a machine image to S3 and returns a presigned URL to an import volume manifest
func (d *SDKCreateMachineImageManifestDriver) Create(driverConfig resources.MachineImageDriverConfig) (resources.MachineImage, error) {
	createStartTime := time.Now()
	defer func(startTime time.Time) {
		d.logger.Printf("completed Create() in %f minutes\n", time.Since(startTime).Minutes())
	}(createStartTime)

	d.logger.Printf("opening image for upload to S3: %s\n", driverConfig.MachineImagePath)

	f, err := os.Open(driverConfig.MachineImagePath)
	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("opening machine image for upload: %s", err)
	}

	keyName := fmt.Sprintf("bosh-machine-image-%d", time.Now().UnixNano())
	d.logger.Printf("uploading image to s3://%s/%s\n", driverConfig.BucketName, keyName)

	uploadStartTime := time.Now()
	uploader := s3manager.NewUploaderWithClient(d.s3Client)
	_, err = uploader.Upload(&s3manager.UploadInput{
		Body:   f,
		Bucket: aws.String(driverConfig.BucketName),
		Key:    aws.String(keyName),
	})

	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("uploading machine image to S3: %s", err)
	}

	d.logger.Printf("finished uploaded image to s3 after %f minutes\n", time.Since(uploadStartTime).Minutes())

	headReqOutput, err := d.s3Client.HeadObject(&s3.HeadObjectInput{
		Bucket: aws.String(driverConfig.BucketName),
		Key:    aws.String(keyName),
	})

	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("fetching properties for uploaded machine image: %s in bucket: %s: %s", keyName, driverConfig.BucketName, err)
	}

	sizeInBytesPtr := headReqOutput.ContentLength
	if sizeInBytesPtr == nil {
		return resources.MachineImage{}, errors.New("size in bytes nil")
	}

	volumeSizeGB := driverConfig.VolumeSizeGB
	if volumeSizeGB == 0 {
		// default to size of image if VolumeSize is not provided
		volumeSizeGB = int64(math.Ceil(float64(*sizeInBytesPtr) / gbInBytes))
	}

	m, err := d.generateManifest(driverConfig.BucketName, keyName, *sizeInBytesPtr, volumeSizeGB, driverConfig.FileFormat)
	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("Failed to generate machine image manifest: %s", err)
	}

	manifestURL, err := d.uploadManifest(driverConfig.BucketName, m)

	machineImage := resources.MachineImage{
		GetURL:     manifestURL,
		DeleteURLs: []string{m.SelfDestructURL, m.Parts.Part.DeleteURL},
	}

	return machineImage, nil
}
Example #26
0
func newS3Uploader(svc s3iface.S3API) *_s3uploader {
	return &_s3uploader{uploader: s3manager.NewUploaderWithClient(svc)}
}
Example #27
0
// Push pushes image tarball directly to S3
func (s *StorageS3) Push(imageName string) (digest string, err error) {
	img := imagename.NewFromString(imageName)

	if img.Storage != imagename.StorageS3 {
		return "", fmt.Errorf("Can only push images with s3 storage specified, got: %s", img)
	}

	if img.Registry == "" {
		return "", fmt.Errorf("Cannot push image to S3, missing bucket name, got: %s", img)
	}

	var image *docker.Image
	if image, err = s.client.InspectImage(img.String()); err != nil {
		return "", err
	}

	if digest, err = s.CacheGet(image.ID); err != nil {
		return "", err
	}

	var tmpf string

	defer func() {
		if tmpf != "" {
			os.Remove(tmpf)
		}
	}()

	// Not cached, make tar
	if digest == "" {
		if tmpf, digest, err = s.MakeTar(imageName); err != nil {
			return "", err
		}
	}

	var (
		ext           = ".tar"
		imgPathDigest = fmt.Sprintf("%s/%s%s", img.Name, digest, ext)
		imgPathTag    = fmt.Sprintf("%s/%s%s", img.Name, img.Tag, ext)
	)

	// Make HEAD request to s3 and check if image already uploaded
	_, headErr := s.s3.HeadObject(&s3.HeadObjectInput{
		Bucket: aws.String(img.Registry),
		Key:    aws.String(imgPathDigest),
	})

	// Object not found, need to store
	if headErr != nil {
		// Other error, raise then
		if e, ok := headErr.(awserr.RequestFailure); !ok || e.StatusCode() != 404 {
			return "", headErr
		}
		// In case we do not have archive
		if tmpf == "" {
			var digest2 string
			if tmpf, digest2, err = s.MakeTar(imageName); err != nil {
				return "", err
			}
			// Verify digest (TODO: remote this check in future?)
			if digest != digest2 {
				return "", fmt.Errorf("The new digest does no equal old one (shouldn't happen) %s != %s", digest, digest2)
			}
		}

		uploader := s3manager.NewUploaderWithClient(s.s3, func(u *s3manager.Uploader) {
			u.PartSize = 64 * 1024 * 1024 // 64MB per part
		})

		fd, err := os.Open(tmpf)
		if err != nil {
			return "", err
		}
		defer fd.Close()

		log.Infof("| Uploading image to s3.amazonaws.com/%s/%s", img.Registry, imgPathDigest)

		uploadParams := &s3manager.UploadInput{
			Bucket:      aws.String(img.Registry),
			Key:         aws.String(imgPathDigest),
			ContentType: aws.String("application/x-tar"),
			Body:        fd,
			Metadata: map[string]*string{
				"Tag":     aws.String(img.Tag),
				"ImageID": aws.String(image.ID),
				"Digest":  aws.String(digest),
			},
		}

		if err := s.retryer.Outer(func() error {
			_, err := uploader.Upload(uploadParams)
			return err
		}); err != nil {
			return "", fmt.Errorf("Failed to upload object to S3, error: %s", err)
		}
	}

	// Make a content addressable copy of an image file
	copyParams := &s3.CopyObjectInput{
		Bucket:     aws.String(img.Registry),
		CopySource: aws.String(img.Registry + "/" + imgPathDigest),
		Key:        aws.String(imgPathTag),
	}

	log.Infof("| Make alias s3.amazonaws.com/%s/%s", img.Registry, imgPathTag)

	if _, err = s.s3.CopyObject(copyParams); err != nil {
		return "", fmt.Errorf("Failed to PUT object to S3, error: %s", err)
	}

	return digest, nil
}
// Create uploads a machine image to S3 and returns a presigned URL
func (d *SDKCreateMachineImageDriver) Create(driverConfig resources.MachineImageDriverConfig) (resources.MachineImage, error) {
	createStartTime := time.Now()
	defer func(startTime time.Time) {
		d.logger.Printf("completed Create() in %f minutes\n", time.Since(startTime).Minutes())
	}(createStartTime)

	d.logger.Printf("opening image for upload to S3: %s\n", driverConfig.MachineImagePath)

	f, err := os.Open(driverConfig.MachineImagePath)
	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("opening machine image for upload: %s", err)
	}

	keyName := fmt.Sprintf("bosh-machine-image-%d", time.Now().UnixNano())
	d.logger.Printf("uploading image to s3://%s/%s\n", driverConfig.BucketName, keyName)

	uploadStartTime := time.Now()
	uploader := s3manager.NewUploaderWithClient(d.s3Client)
	_, err = uploader.Upload(&s3manager.UploadInput{
		Body:   f,
		Bucket: aws.String(driverConfig.BucketName),
		Key:    aws.String(keyName),
	})

	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("uploading machine image to S3: %s", err)
	}

	d.logger.Printf("finished uploaded image to s3 after %f minutes\n", time.Since(uploadStartTime).Minutes())

	getReq, _ := d.s3Client.GetObjectRequest(&s3.GetObjectInput{
		Bucket: aws.String(driverConfig.BucketName),
		Key:    aws.String(keyName),
	})

	machineImageGetURL, err := getReq.Presign(2 * time.Hour)
	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("failed to sign GET request: %s", err)
	}

	d.logger.Printf("generated presigned GET URL %s\n", machineImageGetURL)

	deleteReq, _ := d.s3Client.DeleteObjectRequest(&s3.DeleteObjectInput{
		Bucket: aws.String(driverConfig.BucketName),
		Key:    aws.String(keyName),
	})

	machineImageDeleteURL, err := deleteReq.Presign(24 * time.Hour)
	if err != nil {
		return resources.MachineImage{}, fmt.Errorf("failed to sign DELETE request: %s", err)
	}

	d.logger.Printf("generated presigned GET URL %s\n", machineImageDeleteURL)

	machineImage := resources.MachineImage{
		GetURL:     machineImageGetURL,
		DeleteURLs: []string{machineImageDeleteURL},
	}

	return machineImage, nil
}