func (s *S) TestRetryAttempts(c *C) { s3.SetAttemptStrategy(nil) orig := s3.AttemptStrategy() s3.RetryAttempts(false) c.Assert(s3.AttemptStrategy(), Equals, aws.AttemptStrategy{}) s3.RetryAttempts(true) c.Assert(s3.AttemptStrategy(), Equals, orig) }
func (s *S) TestMultiNoPreviousUpload(c *C) { // Don't retry the NoSuchUpload error. s3.RetryAttempts(false) testServer.Response(404, nil, NoSuchUploadErrorDump) testServer.Response(200, nil, InitMultiResultDump) b := s.s3.Bucket("sample") multi, err := b.Multi("multi", "text/plain", s3.Private) c.Assert(err, IsNil) req := testServer.WaitRequest() c.Assert(req.Method, Equals, "GET") c.Assert(req.URL.Path, Equals, "/sample/") c.Assert(req.Form["uploads"], DeepEquals, []string{""}) c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"}) req = testServer.WaitRequest() c.Assert(req.Method, Equals, "POST") c.Assert(req.URL.Path, Equals, "/sample/multi") c.Assert(req.Form["uploads"], DeepEquals, []string{""}) c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") }
func (s *S) TestPutAllNoPreviousUpload(c *C) { // Don't retry the NoSuchUpload error. s3.RetryAttempts(false) etag1 := map[string]string{"ETag": `"etag1"`} etag2 := map[string]string{"ETag": `"etag2"`} etag3 := map[string]string{"ETag": `"etag3"`} testServer.Response(200, nil, InitMultiResultDump) testServer.Response(404, nil, NoSuchUploadErrorDump) testServer.Response(200, etag1, "") testServer.Response(200, etag2, "") testServer.Response(200, etag3, "") b := s.s3.Bucket("sample") multi, err := b.InitMulti("multi", "text/plain", s3.Private) c.Assert(err, IsNil) parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) c.Assert(parts, HasLen, 3) c.Assert(parts[0].ETag, Equals, `"etag1"`) c.Assert(parts[1].ETag, Equals, `"etag2"`) c.Assert(parts[2].ETag, Equals, `"etag3"`) c.Assert(err, IsNil) // Init testServer.WaitRequest() // List old parts. Won't find anything. req := testServer.WaitRequest() c.Assert(req.Method, Equals, "GET") c.Assert(req.URL.Path, Equals, "/sample/multi") // Send part 1. req = testServer.WaitRequest() c.Assert(req.Method, Equals, "PUT") c.Assert(req.URL.Path, Equals, "/sample/multi") c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) c.Assert(readAll(req.Body), Equals, "part1") // Send part 2. req = testServer.WaitRequest() c.Assert(req.Method, Equals, "PUT") c.Assert(req.URL.Path, Equals, "/sample/multi") c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) c.Assert(readAll(req.Body), Equals, "part2") // Send part 3 with shorter body. req = testServer.WaitRequest() c.Assert(req.Method, Equals, "PUT") c.Assert(req.URL.Path, Equals, "/sample/multi") c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"}) c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"}) c.Assert(readAll(req.Body), Equals, "last") }
func (s *S) TestPutAllZeroSizeFile(c *C) { // Don't retry the NoSuchUpload error. s3.RetryAttempts(false) etag1 := map[string]string{"ETag": `"etag1"`} testServer.Response(200, nil, InitMultiResultDump) testServer.Response(404, nil, NoSuchUploadErrorDump) testServer.Response(200, etag1, "") b := s.s3.Bucket("sample") multi, err := b.InitMulti("multi", "text/plain", s3.Private) c.Assert(err, IsNil) // Must send at least one part, so that completing it will work. parts, err := multi.PutAll(strings.NewReader(""), 5) c.Assert(parts, HasLen, 1) c.Assert(parts[0].ETag, Equals, `"etag1"`) c.Assert(err, IsNil) // Init testServer.WaitRequest() // List old parts. Won't find anything. req := testServer.WaitRequest() c.Assert(req.Method, Equals, "GET") c.Assert(req.URL.Path, Equals, "/sample/multi") // Send empty part. req = testServer.WaitRequest() c.Assert(req.Method, Equals, "PUT") c.Assert(req.URL.Path, Equals, "/sample/multi") c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"}) c.Assert(readAll(req.Body), Equals, "") }
func init() { // We will decide when to retry and under what circumstances, not s3. // Sometimes it is expected a file may not exist and we don't want s3 // to hold things up by unilaterally deciding to retry for no good reason. s3.RetryAttempts(false) }