Example #1
0
func TestUploadOrderMulti(t *testing.T) {
	s, ops, args := loggingSvc()
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket:               aws.String("Bucket"),
		Key:                  aws.String("Key"),
		Body:                 bytes.NewReader(buf12MB),
		ServerSideEncryption: aws.String("AES256"),
		ContentType:          aws.String("content/type"),
	}, nil)

	assert.NoError(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
	assert.Equal(t, "https://location", resp.Location)
	assert.Equal(t, "UPLOAD-ID", resp.UploadID)

	// Validate input values

	// UploadPart
	assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadID"))
	assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadID"))
	assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadID"))

	// CompleteMultipartUpload
	assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadID"))
	assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber"))
	assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber"))
	assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber"))
	assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag"))
	assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag"))
	assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag"))

	// Custom headers
	assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
	assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
}
Example #2
0
func TestUploadFailCleanup(t *testing.T) {
	svc := s3.New(nil)

	// Break checksum on 2nd part so it fails
	part := 0
	svc.Handlers.Build.PushBack(func(r *aws.Request) {
		if r.Operation.Name == "UploadPart" {
			if part == 1 {
				r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
			}
			part++
		}
	})

	key := "12mb-leave"
	_, err := s3manager.Upload(svc, &s3manager.UploadInput{
		Bucket: bucketName,
		Key:    &key,
		Body:   bytes.NewReader(integBuf12MB),
	}, &s3manager.UploadOptions{
		LeavePartsOnError: false,
	})
	assert.Error(t, err)
	uploadID := ""
	if merr, ok := err.(s3manager.MultiUploadFailure); ok {
		uploadID = merr.UploadID()
	}
	assert.NotEmpty(t, uploadID)

	_, err = svc.ListParts(&s3.ListPartsInput{
		Bucket: bucketName, Key: &key, UploadID: &uploadID})
	assert.Error(t, err)
}
Example #3
0
func TestUploadOrderReadFail2(t *testing.T) {
	failreaderCount = 0
	s, ops, _ := loggingSvc()
	_, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   failreader{2},
	}, nil)

	assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
	assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
	assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
}
Example #4
0
func TestUploadOrderSingleBufferedReader(t *testing.T) {
	s, ops, _ := loggingSvc()
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   sizedReader{&sizedReaderImpl{size: 1024 * 1024 * 2}},
	}, nil)

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
}
Example #5
0
func TestUploadOrderSingle(t *testing.T) {
	s, ops, _ := loggingSvc()
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf2MB),
	}, nil)

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
}
Example #6
0
func TestUploadOrderZero(t *testing.T) {
	s, ops, args := loggingSvc()
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 0)),
	}, nil)

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
	assert.Equal(t, 0, buflen(val((*args)[0], "Body")))
}
Example #7
0
func TestUploadConcurrently(t *testing.T) {
	svc := s3.New(nil)
	key := "12mb-1"
	out, err := s3manager.Upload(svc, &s3manager.UploadInput{
		Bucket: bucketName,
		Key:    &key,
		Body:   bytes.NewReader(integBuf12MB),
	}, nil)

	assert.NoError(t, err)
	assert.NotEqual(t, "", out.UploadID)
	assert.Regexp(t, `^https?://.+/`+key+`$`, out.Location)

	validate(t, key, integMD512MB)
}
Example #8
0
func TestUploadOrderSingleFailure(t *testing.T) {
	s, ops, _ := loggingSvc()
	s.Handlers.Send.PushBack(func(r *aws.Request) {
		r.HTTPResponse.StatusCode = 400
	})
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf2MB),
	}, nil)

	assert.Error(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.Nil(t, resp)
}
Example #9
0
func TestUploadOrderMultiDifferentPartSize(t *testing.T) {
	s, ops, args := loggingSvc()
	_, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf12MB),
	}, &s3manager.UploadOptions{PartSize: 1024 * 1024 * 7, Concurrency: 1})

	assert.NoError(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)

	// Part lengths
	assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body")))
	assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body")))
}
Example #10
0
func TestUploadFailIfPartSizeTooSmall(t *testing.T) {
	s := s3.New(nil)
	opts := &s3manager.UploadOptions{PartSize: 5}
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf12MB),
	}, opts)

	assert.Nil(t, resp)
	assert.NotNil(t, err)

	aerr := err.(awserr.Error)
	assert.Equal(t, "ConfigError", aerr.Code())
	assert.Contains(t, aerr.Message(), "part size must be at least")
}
Example #11
0
func TestUploadOrderMultiFailureOnCreate(t *testing.T) {
	s, ops, _ := loggingSvc()
	s.Handlers.Send.PushBack(func(r *aws.Request) {
		switch r.Data.(type) {
		case *s3.CreateMultipartUploadOutput:
			r.HTTPResponse.StatusCode = 400
		}
	})
	_, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 1024*1024*12)),
	}, nil)

	assert.Error(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload"}, *ops)
}
Example #12
0
func TestUploadOrderSingle(t *testing.T) {
	s, ops, args := loggingSvc()
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket:               aws.String("Bucket"),
		Key:                  aws.String("Key"),
		Body:                 bytes.NewReader(buf2MB),
		ServerSideEncryption: aws.String("AES256"),
		ContentType:          aws.String("content/type"),
	}, nil)

	assert.NoError(t, err)
	assert.Equal(t, []string{"PutObject"}, *ops)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
	assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
	assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
}
Example #13
0
func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) {
	s3manager.MaxUploadParts = 2
	defer func() { s3manager.MaxUploadParts = 1000 }()
	s, ops, _ := loggingSvc()
	resp, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   sizedReader{&sizedReaderImpl{size: 1024 * 1024 * 12}},
	}, &s3manager.UploadOptions{Concurrency: 1})

	assert.Error(t, err)
	assert.Nil(t, resp)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops)

	aerr := err.(awserr.Error)
	assert.Equal(t, "TotalPartsExceeded", aerr.Code())
	assert.Contains(t, aerr.Message(), "exceeded total allowed parts (2)")
}
Example #14
0
func TestUploadOrderMultiFailureLeaveParts(t *testing.T) {
	s, ops, _ := loggingSvc()
	s.Handlers.Send.PushBack(func(r *aws.Request) {
		switch data := r.Data.(type) {
		case *s3.UploadPartOutput:
			if *data.ETag == "ETAG2" {
				r.HTTPResponse.StatusCode = 400
			}
		}
	})
	_, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 1024*1024*12)),
	}, &s3manager.UploadOptions{Concurrency: 1, LeavePartsOnError: true})

	assert.Error(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops)
}
Example #15
0
func TestUploadOrderMultiBufferedReader(t *testing.T) {
	s, ops, args := loggingSvc()
	_, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   sizedReader{&sizedReaderImpl{size: 1024 * 1024 * 12}},
	}, nil)

	assert.NoError(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)

	// Part lengths
	parts := []int{
		buflen(val((*args)[1], "Body")),
		buflen(val((*args)[2], "Body")),
		buflen(val((*args)[3], "Body")),
	}
	sort.Ints(parts)
	assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts)
}
Example #16
0
func TestUploadIncreasePartSize(t *testing.T) {
	s3manager.MaxUploadParts = 2
	defer func() { s3manager.MaxUploadParts = 1000 }()

	s, ops, args := loggingSvc()
	opts := &s3manager.UploadOptions{Concurrency: 1}
	_, err := s3manager.Upload(s, &s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(buf12MB),
	}, opts)

	assert.NoError(t, err)
	assert.Equal(t, int64(0), opts.PartSize) // don't modify orig options
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)

	// Part lengths
	assert.Equal(t, 1024*1024*6, buflen(val((*args)[1], "Body")))
	assert.Equal(t, 1024*1024*6, buflen(val((*args)[2], "Body")))
}