Esempio n. 1
0
func TestUploadFailCleanup(t *testing.T) {
	svc := s3.New(nil)

	// Break checksum on 2nd part so it fails
	part := 0
	svc.Handlers.Build.PushBack(func(r *service.Request) {
		if r.Operation.Name == "UploadPart" {
			if part == 1 {
				r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
			}
			part++
		}
	})

	key := "12mb-leave"
	mgr := s3manager.NewUploader(&s3manager.UploadOptions{
		S3:                svc,
		LeavePartsOnError: false,
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: bucketName,
		Key:    &key,
		Body:   bytes.NewReader(integBuf12MB),
	})
	assert.Error(t, err)
	uploadID := ""
	if merr, ok := err.(s3manager.MultiUploadFailure); ok {
		uploadID = merr.UploadID()
	}
	assert.NotEmpty(t, uploadID)

	_, err = svc.ListParts(&s3.ListPartsInput{
		Bucket: bucketName, Key: &key, UploadId: &uploadID})
	assert.Error(t, err)
}
Esempio n. 2
0
func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) {
	var m sync.Mutex
	names := []string{}
	ranges := []string{}

	svc := s3.New(nil)
	svc.Handlers.Send.Clear()
	svc.Handlers.Send.PushBack(func(r *service.Request) {
		m.Lock()
		defer m.Unlock()

		names = append(names, r.Operation.Name)
		ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range)

		rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`)
		rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range"))
		start, _ := strconv.ParseInt(rng[1], 10, 64)
		fin, _ := strconv.ParseInt(rng[2], 10, 64)
		fin++

		if fin > int64(len(data)) {
			fin = int64(len(data))
		}

		r.HTTPResponse = &http.Response{
			StatusCode: 200,
			Body:       ioutil.NopCloser(bytes.NewReader(data[start:fin])),
			Header:     http.Header{},
		}
		r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d",
			start, fin, len(data)))
	})

	return svc, &names, &ranges
}
Esempio n. 3
0
func TestPresignHandler(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
		Bucket:             aws.String("bucket"),
		Key:                aws.String("key"),
		ContentDisposition: aws.String("a+b c$d"),
		ACL:                aws.String("public-read"),
	})
	req.Time = time.Unix(0, 0)
	urlstr, err := req.Presign(5 * time.Minute)

	assert.NoError(t, err)

	expectedDate := "19700101T000000Z"
	expectedHeaders := "host;x-amz-acl"
	expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2"
	expectedCred := "AKID/19700101/mock-region/s3/aws4_request"

	u, _ := url.Parse(urlstr)
	urlQ := u.Query()
	assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
	assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
	assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
	assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
	assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))

	assert.NotContains(t, urlstr, "+") // + encoded as %20
}
func TestMD5InPutBucketPolicy(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{
		Bucket: aws.String("bucketname"),
		Policy: aws.String("{}"),
	})
	assertMD5(t, req)
}
func TestNoPopulateLocationConstraintIfClassic(t *testing.T) {
	s := s3.New(&aws.Config{Region: aws.String("us-east-1")})
	req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
		Bucket: aws.String("bucket"),
	})
	err := req.Build()
	assert.NoError(t, err)
	assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")))
}
func TestNoPopulateLocationConstraintIfProvided(t *testing.T) {
	s := s3.New(nil)
	req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
		Bucket: aws.String("bucket"),
		CreateBucketConfiguration: &s3.CreateBucketConfiguration{},
	})
	err := req.Build()
	assert.NoError(t, err)
	assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")))
}
func TestPopulateLocationConstraint(t *testing.T) {
	s := s3.New(nil)
	in := &s3.CreateBucketInput{
		Bucket: aws.String("bucket"),
	}
	req, _ := s.CreateBucketRequest(in)
	err := req.Build()
	assert.NoError(t, err)
	assert.Equal(t, "mock-region", awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")[0])
	assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params
}
func TestMD5InDeleteObjects(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{
		Bucket: aws.String("bucketname"),
		Delete: &s3.Delete{
			Objects: []*s3.ObjectIdentifier{
				{Key: aws.String("key")},
			},
		},
	})
	assertMD5(t, req)
}
func TestMD5InPutBucketCors(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.PutBucketCorsRequest(&s3.PutBucketCorsInput{
		Bucket: aws.String("bucketname"),
		CORSConfiguration: &s3.CORSConfiguration{
			CORSRules: []*s3.CORSRule{
				{AllowedMethods: []*string{aws.String("GET")}},
			},
		},
	})
	assertMD5(t, req)
}
func TestMD5InPutBucketTagging(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{
		Bucket: aws.String("bucketname"),
		Tagging: &s3.Tagging{
			TagSet: []*s3.Tag{
				{Key: aws.String("KEY"), Value: aws.String("VALUE")},
			},
		},
	})
	assertMD5(t, req)
}
Esempio n. 11
0
func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) {
	s := s3.New(&aws.Config{DisableSSL: aws.Bool(true)})
	req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
		Bucket:     aws.String("bucket"),
		CopySource: aws.String("bucket/source"),
		Key:        aws.String("dest"),
		CopySourceSSECustomerKey: aws.String("key"),
	})
	err := req.Build()

	assert.Error(t, err)
	assert.Equal(t, "ConfigError", err.(awserr.Error).Code())
	assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP")
}
Esempio n. 12
0
// init will initialize all default options.
func (u *uploader) init() {
	if u.opts.S3 == nil {
		u.opts.S3 = s3.New(nil)
	}
	if u.opts.Concurrency == 0 {
		u.opts.Concurrency = DefaultUploadConcurrency
	}
	if u.opts.PartSize == 0 {
		u.opts.PartSize = DefaultUploadPartSize
	}

	// Try to get the total size for some optimizations
	u.initSize()
}
Esempio n. 13
0
// init initializes the downloader with default options.
func (d *downloader) init() {
	d.totalBytes = -1

	if d.opts.Concurrency == 0 {
		d.opts.Concurrency = DefaultDownloadConcurrency
	}

	if d.opts.PartSize == 0 {
		d.opts.PartSize = DefaultDownloadPartSize
	}

	if d.opts.S3 == nil {
		d.opts.S3 = s3.New(nil)
	}
}
func TestMD5InPutBucketLifecycle(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{
		Bucket: aws.String("bucketname"),
		LifecycleConfiguration: &s3.LifecycleConfiguration{
			Rules: []*s3.LifecycleRule{
				{
					ID:     aws.String("ID"),
					Prefix: aws.String("Prefix"),
					Status: aws.String("Enabled"),
				},
			},
		},
	})
	assertMD5(t, req)
}
Esempio n. 15
0
func TestComputeSSEKeys(t *testing.T) {
	s := s3.New(nil)
	req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
		Bucket:                   aws.String("bucket"),
		CopySource:               aws.String("bucket/source"),
		Key:                      aws.String("dest"),
		SSECustomerKey:           aws.String("key"),
		CopySourceSSECustomerKey: aws.String("key"),
	})
	err := req.Build()

	assert.NoError(t, err)
	assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key"))
	assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key"))
	assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5"))
	assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5"))
}
// Use S3 for simplicity
func TestPaginationTruncation(t *testing.T) {
	count := 0
	client := s3.New(nil)

	reqNum := &count
	resps := []*s3.ListObjectsOutput{
		{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}},
		{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}},
		{IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}},
		{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}},
	}

	client.Handlers.Send.Clear() // mock sending
	client.Handlers.Unmarshal.Clear()
	client.Handlers.UnmarshalMeta.Clear()
	client.Handlers.ValidateResponse.Clear()
	client.Handlers.Unmarshal.PushBack(func(r *service.Request) {
		r.Data = resps[*reqNum]
		*reqNum++
	})

	params := &s3.ListObjectsInput{Bucket: aws.String("bucket")}

	results := []string{}
	err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
		results = append(results, *p.Contents[0].Key)
		return true
	})

	assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results)
	assert.Nil(t, err)

	// Try again without truncation token at all
	count = 0
	resps[1].IsTruncated = nil
	resps[2].IsTruncated = aws.Bool(true)
	results = []string{}
	err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
		results = append(results, *p.Contents[0].Key)
		return true
	})

	assert.Equal(t, []string{"Key1", "Key2"}, results)
	assert.Nil(t, err)

}
func TestGetBucketLocation(t *testing.T) {
	for _, test := range s3LocationTests {
		s := s3.New(nil)
		s.Handlers.Send.Clear()
		s.Handlers.Send.PushBack(func(r *service.Request) {
			reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
			r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader}
		})

		resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")})
		assert.NoError(t, err)
		if test.loc == "" {
			assert.Nil(t, resp.LocationConstraint)
		} else {
			assert.Equal(t, test.loc, *resp.LocationConstraint)
		}
	}
}
Esempio n. 18
0
// Delete the bucket
func teardown() {
	svc := s3.New(nil)

	objs, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName})
	for _, o := range objs.Contents {
		svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key})
	}

	uploads, _ := svc.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: bucketName})
	for _, u := range uploads.Uploads {
		svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
			Bucket:   bucketName,
			Key:      u.Key,
			UploadId: u.UploadId,
		})
	}

	svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName})
}
Esempio n. 19
0
// Create a bucket for testing
func setup() {
	svc = s3.New(nil)
	bucketName = aws.String(
		fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID()))

	for i := 0; i < 10; i++ {
		_, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName})
		if err == nil {
			break
		}
	}

	for {
		_, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName})
		if err == nil {
			break
		}
		time.Sleep(1 * time.Second)
	}
}
Esempio n. 20
0
func TestUploadZeroLenObject(t *testing.T) {
	requestMade := false
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		requestMade = true
		w.WriteHeader(http.StatusOK)
	}))
	svc := s3.New(&aws.Config{
		Endpoint: aws.String(server.URL),
	})
	mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: svc})
	resp, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   strings.NewReader(""),
	})

	assert.NoError(t, err)
	assert.True(t, requestMade)
	assert.NotEqual(t, "", resp.Location)
	assert.Equal(t, "", resp.UploadID)
}
func TestStatusCodeError(t *testing.T) {
	for _, test := range s3StatusCodeErrorTests {
		s := s3.New(nil)
		s.Handlers.Send.Clear()
		s.Handlers.Send.PushBack(func(r *service.Request) {
			body := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
			r.HTTPResponse = &http.Response{
				ContentLength: int64(len(test.body)),
				StatusCode:    test.scode,
				Status:        test.status,
				Body:          body,
			}
		})
		_, err := s.PutBucketAcl(&s3.PutBucketAclInput{
			Bucket: aws.String("bucket"), ACL: aws.String("public-read"),
		})

		assert.Error(t, err)
		assert.Equal(t, test.code, err.(awserr.Error).Code())
		assert.Equal(t, test.message, err.(awserr.Error).Message())
	}
}
func TestSkipPagination(t *testing.T) {
	client := s3.New(nil)
	client.Handlers.Send.Clear() // mock sending
	client.Handlers.Unmarshal.Clear()
	client.Handlers.UnmarshalMeta.Clear()
	client.Handlers.ValidateResponse.Clear()
	client.Handlers.Unmarshal.PushBack(func(r *service.Request) {
		r.Data = &s3.HeadBucketOutput{}
	})

	req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")})

	numPages, gotToEnd := 0, false
	req.EachPage(func(p interface{}, last bool) bool {
		numPages++
		if last {
			gotToEnd = true
		}
		return true
	})
	assert.Equal(t, 1, numPages)
	assert.True(t, gotToEnd)
}
Esempio n. 23
0
func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) {
	var m sync.Mutex
	partNum := 0
	names := []string{}
	params := []interface{}{}
	svc := s3.New(nil)
	svc.Handlers.Unmarshal.Clear()
	svc.Handlers.UnmarshalMeta.Clear()
	svc.Handlers.UnmarshalError.Clear()
	svc.Handlers.Send.Clear()
	svc.Handlers.Send.PushBack(func(r *service.Request) {
		m.Lock()
		defer m.Unlock()

		if !contains(ignoreOps, r.Operation.Name) {
			names = append(names, r.Operation.Name)
			params = append(params, r.Params)
		}

		r.HTTPResponse = &http.Response{
			StatusCode: 200,
			Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
		}

		switch data := r.Data.(type) {
		case *s3.CreateMultipartUploadOutput:
			data.UploadId = aws.String("UPLOAD-ID")
		case *s3.UploadPartOutput:
			partNum++
			data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum))
		case *s3.CompleteMultipartUploadOutput:
			data.Location = aws.String("https://location")
		}
	})

	return svc, &names, &params
}
func TestHostStyleBucketBuild(t *testing.T) {
	s := s3.New(nil)
	runTests(t, s, sslTests)
}
func TestPathStyleBucketBuild(t *testing.T) {
	s := s3.New(&aws.Config{S3ForcePathStyle: aws.Bool(true)})
	runTests(t, s, forcepathTests)
}
Esempio n. 26
0
func TestInterface(t *testing.T) {
	assert.Implements(t, (*s3iface.S3API)(nil), s3.New(nil))
}
func TestHostStyleBucketBuildNoSSL(t *testing.T) {
	s := s3.New(&aws.Config{DisableSSL: aws.Bool(true)})
	runTests(t, s, nosslTests)
}