func TestPresignHandler(t *testing.T) { svc := s3.New(nil) req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ Bucket: aws.String("bucket"), Key: aws.String("key"), ContentDisposition: aws.String("a+b c$d"), ACL: aws.String("public-read"), }) req.Time = time.Unix(0, 0) urlstr, err := req.Presign(5 * time.Minute) assert.NoError(t, err) expectedDate := "19700101T000000Z" expectedHeaders := "host;x-amz-acl" expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2" expectedCred := "AKID/19700101/mock-region/s3/aws4_request" u, _ := url.Parse(urlstr) urlQ := u.Query() assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) assert.NotContains(t, urlstr, "+") // + encoded as %20 }
func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) { var m sync.Mutex names := []string{} ranges := []string{} svc := s3.New(nil) svc.Handlers.Send.Clear() svc.Handlers.Send.PushBack(func(r *aws.Request) { m.Lock() defer m.Unlock() names = append(names, r.Operation.Name) ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) start, _ := strconv.ParseInt(rng[1], 10, 64) fin, _ := strconv.ParseInt(rng[2], 10, 64) fin++ if fin > int64(len(data)) { fin = int64(len(data)) } r.HTTPResponse = &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(data[start:fin])), Header: http.Header{}, } r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, fin, len(data))) }) return svc, &names, &ranges }
func loggingSvc() (*s3.S3, *[]string, *[]interface{}) { var m sync.Mutex partNum := 0 names := []string{} params := []interface{}{} svc := s3.New(nil) svc.Handlers.Unmarshal.Clear() svc.Handlers.UnmarshalMeta.Clear() svc.Handlers.UnmarshalError.Clear() svc.Handlers.Send.Clear() svc.Handlers.Send.PushBack(func(r *aws.Request) { m.Lock() defer m.Unlock() names = append(names, r.Operation.Name) params = append(params, r.Params) r.HTTPResponse = &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } switch data := r.Data.(type) { case *s3.CreateMultipartUploadOutput: data.UploadID = aws.String("UPLOAD-ID") case *s3.UploadPartOutput: partNum++ data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum)) case *s3.CompleteMultipartUploadOutput: data.Location = aws.String("https://location") } }) return svc, &names, ¶ms }
func TestMD5InPutBucketPolicy(t *testing.T) { svc := s3.New(nil) req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{ Bucket: aws.String("bucketname"), Policy: aws.String("{}"), }) assertMD5(t, req) }
func TestNoPopulateLocationConstraintIfClassic(t *testing.T) { s := s3.New(&aws.Config{Region: "us-east-1"}) req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ Bucket: aws.String("bucket"), }) err := req.Build() assert.NoError(t, err) assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint"))) }
func TestNoPopulateLocationConstraintIfProvided(t *testing.T) { s := s3.New(nil) req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ Bucket: aws.String("bucket"), CreateBucketConfiguration: &s3.CreateBucketConfiguration{}, }) err := req.Build() assert.NoError(t, err) assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint"))) }
func TestPopulateLocationConstraint(t *testing.T) { s := s3.New(nil) in := &s3.CreateBucketInput{ Bucket: aws.String("bucket"), } req, _ := s.CreateBucketRequest(in) err := req.Build() assert.NoError(t, err) assert.Equal(t, "mock-region", awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")[0]) assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params }
func TestMD5InDeleteObjects(t *testing.T) { svc := s3.New(nil) req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{ Bucket: aws.String("bucketname"), Delete: &s3.Delete{ Objects: []*s3.ObjectIdentifier{ {Key: aws.String("key")}, }, }, }) assertMD5(t, req) }
func TestMD5InPutBucketTagging(t *testing.T) { svc := s3.New(nil) req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{ Bucket: aws.String("bucketname"), Tagging: &s3.Tagging{ TagSet: []*s3.Tag{ {Key: aws.String("KEY"), Value: aws.String("VALUE")}, }, }, }) assertMD5(t, req) }
func TestMD5InPutBucketCORS(t *testing.T) { svc := s3.New(nil) req, _ := svc.PutBucketCORSRequest(&s3.PutBucketCORSInput{ Bucket: aws.String("bucketname"), CORSConfiguration: &s3.CORSConfiguration{ CORSRules: []*s3.CORSRule{ {AllowedMethods: []*string{aws.String("GET")}}, }, }, }) assertMD5(t, req) }
// init will initialize all default options. func (u *uploader) init() { if u.opts.S3 == nil { u.opts.S3 = s3.New(nil) } if u.opts.Concurrency == 0 { u.opts.Concurrency = DefaultUploadConcurrency } if u.opts.PartSize == 0 { u.opts.PartSize = DefaultUploadPartSize } // Try to get the total size for some optimizations u.initSize() }
func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) { s := s3.New(&aws.Config{DisableSSL: true}) req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ Bucket: aws.String("bucket"), CopySource: aws.String("bucket/source"), Key: aws.String("dest"), CopySourceSSECustomerKey: aws.String("key"), }) err := req.Build() assert.Error(t, err) assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") }
func get_svc_by_aksk(ak string, sk string, svc *s3.S3) bool { credentials := credentials.NewStaticCredentials(ak, sk, "") svc = s3.New(&aws.Config{ Region: "HANGZHOU", Credentials: credentials, Endpoint: "ks3.sdns.ksyun.com", DisableSSL: true, LogLevel: 0, S3ForcePathStyle: true, LogHTTPBody: true, }) return true }
// init initializes the downloader with default options. func (d *downloader) init() { d.totalBytes = -1 if d.opts.Concurrency == 0 { d.opts.Concurrency = DefaultDownloadConcurrency } if d.opts.PartSize == 0 { d.opts.PartSize = DefaultDownloadPartSize } if d.opts.S3 == nil { d.opts.S3 = s3.New(nil) } }
func TestMD5InPutBucketLifecycle(t *testing.T) { svc := s3.New(nil) req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{ Bucket: aws.String("bucketname"), LifecycleConfiguration: &s3.LifecycleConfiguration{ Rules: []*s3.LifecycleRule{ { ID: aws.String("ID"), Prefix: aws.String("Prefix"), Status: aws.String("Enabled"), }, }, }, }) assertMD5(t, req) }
func TestComputeSSEKeys(t *testing.T) { s := s3.New(nil) req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ Bucket: aws.String("bucket"), CopySource: aws.String("bucket/source"), Key: aws.String("dest"), SSECustomerKey: aws.String("key"), CopySourceSSECustomerKey: aws.String("key"), }) err := req.Build() assert.NoError(t, err) assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) }
// Use S3 for simplicity func TestPaginationTruncation(t *testing.T) { count := 0 client := s3.New(nil) reqNum := &count resps := []*s3.ListObjectsOutput{ {IsTruncated: aws.Boolean(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}}, {IsTruncated: aws.Boolean(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}}, {IsTruncated: aws.Boolean(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}}, {IsTruncated: aws.Boolean(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}}, } client.Handlers.Send.Clear() // mock sending client.Handlers.Unmarshal.Clear() client.Handlers.UnmarshalMeta.Clear() client.Handlers.ValidateResponse.Clear() client.Handlers.Unmarshal.PushBack(func(r *aws.Request) { r.Data = resps[*reqNum] *reqNum++ }) params := &s3.ListObjectsInput{Bucket: aws.String("bucket")} results := []string{} err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { results = append(results, *p.Contents[0].Key) return true }) assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results) assert.Nil(t, err) // Try again without truncation token at all count = 0 resps[1].IsTruncated = nil resps[2].IsTruncated = aws.Boolean(true) results = []string{} err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { results = append(results, *p.Contents[0].Key) return true }) assert.Equal(t, []string{"Key1", "Key2"}, results) assert.Nil(t, err) }
func TestGetBucketLocation(t *testing.T) { for _, test := range s3LocationTests { s := s3.New(nil) s.Handlers.Send.Clear() s.Handlers.Send.PushBack(func(r *aws.Request) { reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader} }) resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")}) assert.NoError(t, err) if test.loc == "" { assert.Nil(t, resp.LocationConstraint) } else { assert.Equal(t, test.loc, *resp.LocationConstraint) } } }
// Create a bucket for testing func setup() { svc = s3.New(nil) bucketName = aws.String( fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID())) for i := 0; i < 10; i++ { _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName}) if err == nil { break } } for { _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName}) if err == nil { break } time.Sleep(1 * time.Second) } }
func TestStatusCodeError(t *testing.T) { for _, test := range s3StatusCodeErrorTests { s := s3.New(nil) s.Handlers.Send.Clear() s.Handlers.Send.PushBack(func(r *aws.Request) { body := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) r.HTTPResponse = &http.Response{ ContentLength: int64(len(test.body)), StatusCode: test.scode, Status: test.status, Body: body, } }) _, err := s.PutBucketACL(&s3.PutBucketACLInput{ Bucket: aws.String("bucket"), ACL: aws.String("public-read"), }) assert.Error(t, err) assert.Equal(t, test.code, err.(awserr.Error).Code()) assert.Equal(t, test.message, err.(awserr.Error).Message()) } }
func TestSkipPagination(t *testing.T) { client := s3.New(nil) client.Handlers.Send.Clear() // mock sending client.Handlers.Unmarshal.Clear() client.Handlers.UnmarshalMeta.Clear() client.Handlers.ValidateResponse.Clear() client.Handlers.Unmarshal.PushBack(func(r *aws.Request) { r.Data = &s3.HeadBucketOutput{} }) req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")}) numPages, gotToEnd := 0, false req.EachPage(func(p interface{}, last bool) bool { numPages++ if last { gotToEnd = true } return true }) assert.Equal(t, 1, numPages) assert.True(t, gotToEnd) }
func TestPathStyleBucketBuild(t *testing.T) { s := s3.New(&aws.Config{S3ForcePathStyle: true}) runTests(t, s, forcepathTests) }
"github.com/kingsoft-avteam/aws-sdk-go/service/s3" "github.com/stretchr/testify/assert" "net/http" ) var bucket = string("aa-go-sdk") var key = string("中文/test.go") var key_encode = string("%E4%B8%AD%E6%96%87/test.go") var key_copy = string("中文/test.go.copy") var content = string("content") var cre = credentials.NewStaticCredentials("lMQTr0hNlMpB0iOk/i+x", "D4CsYLs75JcWEjbiI22zR3P7kJ/+5B1qdEje7A7I", "") var svc = s3.New(&aws.Config{ Region: "HANGZHOU", Credentials: cre, Endpoint: "kssws.ks-cdn.com", DisableSSL: true, LogLevel: 1, S3ForcePathStyle: false, LogHTTPBody: true, }) func TestCreateBucket(t *testing.T) { _, err := svc.CreateBucket(&s3.CreateBucketInput{ ACL: aws.String("public-read"), Bucket: aws.String(bucket), }) assert.Error(t, err) assert.Equal(t, "BucketAlreadyExists", err.(*apierr.RequestError).Code()) } func TestBucketAcl(t *testing.T) { _, err := svc.PutBucketACL(&s3.PutBucketACLInput{
func TestHostStyleBucketBuild(t *testing.T) { s := s3.New(nil) runTests(t, s, sslTests) }
func TestHostStyleBucketBuildNoSSL(t *testing.T) { s := s3.New(&aws.Config{DisableSSL: true}) runTests(t, s, nosslTests) }
func start_upload_process(idx_file IndexFileT, idx_cfg IndexConfigT) bool { var ak string var sk string ak, sk, err := get_aksk_by_userid(idx_cfg.userid) if err != nil { return false } credentials := credentials.NewStaticCredentials(ak, sk, "") svc := s3.New(&aws.Config{ Region: "HANGZHOU", Credentials: credentials, Endpoint: "ks3.sdns.ksyun.com", DisableSSL: true, LogLevel: 0, S3ForcePathStyle: true, LogHTTPBody: true, }) directory := fmt.Sprintf("%s%s/%s/%s/%s/", idx_file.root, idx_file.vhost, idx_file.app, idx_file.name, idx_file.time) upload := false nupload := 0 for i := 0; i < len(idx_cfg.files); i++ { key := fmt.Sprintf("record/%s/%s/%s/%s", idx_file.app, idx_file.name, idx_file.time, idx_cfg.files[i]) file := directory + idx_cfg.files[i] _, err := os.Stat(directory + idx_cfg.files[i]) if err != nil { // delete already. nupload++ continue } if !ks3_flv_upload(svc, idx_cfg, ak, sk, key, file) { // upload failed. break } os.Remove(file) log.Info(fmt.Sprintf("remove .flv file %s", file)) nupload++ } if nupload == len(idx_cfg.files) { now := time.Now().Unix() if idx_cfg.upload || (now-idx_file.modtime) > (idx_cfg.interval+g_gout_config.timeout) { upload = true } } if upload { if ks3_mp4_merge(svc, idx_file, idx_cfg) { // we need upload file os.RemoveAll(directory) log.Info(fmt.Sprintf("remove directory %s", directory)) } } return true }
func TestInterface(t *testing.T) { assert.Implements(t, (*s3iface.S3API)(nil), s3.New(nil)) }