Example #1
0
func NewClient(public bool) *s3.S3 {
	if public {
		return s3.New(
			&aws.Config{
				Credentials: credentials.AnonymousCredentials,
			},
		)
	}
	return s3.New(nil)
}
Example #2
0
File: s3.go Project: varung/droot
func NewS3Client() *S3Client {
	var svc s3iface.S3API
	if log.IsDebug {
		svc = s3.New(session.New(), aws.NewConfig().WithLogLevel(aws.LogDebug))
	} else {
		svc = s3.New(session.New())
	}
	return &S3Client{
		svc:        svc,
		uploader:   newS3Uploader(svc),
		downloader: newS3Downloader(svc),
	}
}
Example #3
0
// download fetches the CloudTrail logfile from S3 and parses it
func (c *config) download(m *cloudtrailNotification) (*[]cloudtrailRecord, error) {
	if len(m.S3ObjectKey) != 1 {
		return nil, fmt.Errorf("Expected one S3 key but got %d", len(m.S3ObjectKey[0]))
	}
	s := s3.New(&c.awsConfig)
	q := s3.GetObjectInput{
		Bucket: aws.String(m.S3Bucket),
		Key:    aws.String(m.S3ObjectKey[0]),
	}
	o, err := s.GetObject(&q)
	if err != nil {
		return nil, err
	}
	b, err := ioutil.ReadAll(o.Body)
	if err != nil {
		return nil, err
	}

	logfile := cloudtrailLog{}

	if err := json.Unmarshal(b, &logfile); err != nil {
		return nil, fmt.Errorf("Error unmarshaling cloutrail JSON: %s", err.Error())
	}

	return &logfile.Records, nil
}
Example #4
0
func TestPresignHandler(t *testing.T) {
	svc := s3.New(nil)
	req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
		Bucket:             aws.String("bucket"),
		Key:                aws.String("key"),
		ContentDisposition: aws.String("a+b c$d"),
		ACL:                aws.String("public-read"),
	})
	req.Time = time.Unix(0, 0)
	urlstr, err := req.Presign(5 * time.Minute)

	assert.NoError(t, err)

	expectedDate := "19700101T000000Z"
	expectedHeaders := "host;x-amz-acl"
	expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2"
	expectedCred := "AKID/19700101/mock-region/s3/aws4_request"

	u, _ := url.Parse(urlstr)
	urlQ := u.Query()
	assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
	assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
	assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
	assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
	assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))

	assert.NotContains(t, urlstr, "+") // + encoded as %20
}
Example #5
0
func loadManager(config *config.Config) (*sneaker.Manager, error) {
	u, err := url.Parse(config.SneakerS3.SneakerS3Path)
	if err != nil {

		return nil, err
	}
	if u.Path != "" && u.Path[0] == '/' {
		u.Path = u.Path[1:]
	}

	// here, we provide access and secret keys for aws
	creds := credentials.NewStaticCredentials(config.SneakerS3.AwsAccesskeyId, config.SneakerS3.AwsSecretAccessKey, "")

	// we'r gonna use aws providers and region to init aws config
	session := session.New(aws.NewConfig().WithCredentials(creds).WithRegion(config.SneakerS3.AwsRegion))

	return &sneaker.Manager{
		Objects: s3.New(session),
		Envelope: sneaker.Envelope{
			KMS: kms.New(session),
		},
		Bucket: u.Host,
		Prefix: u.Path,
		KeyId:  config.SneakerS3.SneakerMasterKey,
	}, nil
}
Example #6
0
// Create a new StorageClient object based on a configuration file.
func (c *Config) NewClient() (dialects.StorageClient, error) {
	creds := credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "")
	_, err := creds.Get()
	if err != nil {
		return nil, err
	}
	converterFunction, err := dialects.GetBatchConverterFunction(c.FileFormat)
	if err != nil {
		return nil, err
	}
	config := &aws.Config{
		Region:           &c.Region,
		Credentials:      creds,
		Endpoint:         &c.Endpoint,
		S3ForcePathStyle: aws.Bool(true)}
	return &S3Storage{
		AccessKeyID:     c.AccessKeyID,
		SecretAccessKey: c.SecretAccessKey,
		Bucket:          c.Bucket,
		BlobPath:        c.BlobPath,
		Region:          c.Region,
		FileFormat:      c.FileFormat,
		BatchConverter:  converterFunction,
		Client:          s3.New(session.New(), config)}, nil
}
Example #7
0
func ListBucketContents(region, bucket string) {
	config := aws.NewConfig().WithRegion(region)
	svc := s3.New(session.New(config))

	params := &s3.ListObjectsInput{
		Bucket: aws.String(bucket), // Required
		// Delimiter:    aws.String("Delimiter"),
		// EncodingType: aws.String("EncodingType"),
		// Marker:       aws.String("Marker"),
		// MaxKeys:      aws.Int64(1),
		// Prefix:       aws.String("Prefix"),
	}
	resp, err := svc.ListObjects(params)

	if err != nil {
		if awsErr, ok := err.(awserr.Error); ok {
			// Generic AWS error with Code, Message, and original error (if any)
			fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
			if reqErr, ok := err.(awserr.RequestFailure); ok {
				// A service error occurred
				fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
			}
		} else {
			// This case should never be hit, the SDK should always return an
			// error which satisfies the awserr.Error interface.
			fmt.Println(err.Error())
		}
	}

	// Pretty-print the response data.
	fmt.Println(awsutil.Prettify(resp))
}
Example #8
0
func listS3Buckets(response *analyticsResponse) {

	disableSSL := true

	//
	// @todo Find out how to specify credentials here rather than from global
	// config
	//
	log.Printf("Creating session....")
	mySession := session.New(&aws.Config{Region: aws.String("us-west-2"), DisableSSL: &disableSSL})
	log.Printf("Connecting to S3....")
	myS3svc := s3.New(mySession)

	log.Printf("Listing buckets....")
	result, err := myS3svc.ListBuckets(&s3.ListBucketsInput{})

	if err != nil {
		log.Println("Failed to list buckets", err)
		return
	}

	log.Println("Buckets:")
	for _, bucket := range result.Buckets {
		log.Printf("%s : %s\n", aws.StringValue(bucket.Name), bucket.CreationDate)
		myBucket := bucketDescription{*bucket.Name, bucket.CreationDate}
		response.BucketList = append(response.BucketList, myBucket)

	}
}
Example #9
0
// GetS3Config returns the S3 config used for uploading output files to S3
func GetS3Config() *s3util.Manager {
	//There are multiple ways of supporting the cross-region upload to S3 bucket:
	//1) We can specify the url https://s3.amazonaws.com and not specify region in our s3 client. This approach only works in java & .net but not in golang
	//since it enforces to use region in our client.
	//2) We can make use of GetBucketLocation API to find the location of S3 bucket. This is a better way to handle this, however it has its own disadvantages:
	//-> We will have to update the managed policy of AmazonEC2RoleforSSM so that agent will have permissions to make that call.
	//-> We will still have to notify our customers regarding the change in our IAM policy - such that customers using inline policy will also make the change accordingly.
	//3) Special behavior for S3 PutObject API for IAD region which is described in detail below.
	//We have taken the 3rd option - until the changes for the 2nd option is in place.

	//In our current implementation, we upload a test S3 file and use the error message to determine the bucket's region,
	//but we do this with region set as "us-east-1". This is because of special behavior of S3 PutObject API:
	//Only for the endpoint "us-east-1", if the bucket is present in any other region (i.e non IAD bucket) PutObject API will throw an
	//error of type - AuthorizationHeaderMalformed with a message stating which region is the bucket present. A sample error message looks like:
	//AuthorizationHeaderMalformed: The authorization header is malformed; the region 'us-east-1' is wrong; expecting 'us-west-2' status code: 400, request id: []

	//We leverage the above error message to determine the bucket's region, and if there is no error - that means the bucket is indeed in IAD.

	//Note: The above behavior only exists for IAD endpoint (special endpoint for S3) - not just any other region.
	//For other region endpoints, you get a BucketRegionError which is not useful for us in determining where the bucket is present.
	//Revisit this if S3 ensures the PutObject API behavior consistent over all endpoints - in which case - instead of using IAD endpoint,
	//we can then pick the endpoint from meta-data instead.

	awsConfig := sdkutil.AwsConfig()

	if region, err := platform.Region(); err == nil && region == s3Bjs {
		awsConfig.Endpoint = &s3BjsEndpoint
		awsConfig.Region = &s3Bjs
	} else {
		awsConfig.Endpoint = &s3StandardEndpoint
		awsConfig.Region = &S3RegionUSStandard
	}
	s3 := s3.New(session.New(awsConfig))
	return s3util.NewManager(s3)
}
Example #10
0
func (fs *FileService) SendToS3(key string, fileName string) {
	file, err := os.Open(fileName)
	if err != nil {
		log.Fatal("Error opening ", fileName, ": ", err)
	}

	bucketName := os.Getenv("S3_BUCKET_NAME")

	svc := s3.New(session.New(), &aws.Config{Region: aws.String("us-east-1")})
	fileInfo, _ := file.Stat()
	var size int64 = fileInfo.Size()
	buffer := make([]byte, size)
	file.Read(buffer)
	fileBytes := bytes.NewReader(buffer)
	fileType := http.DetectContentType(buffer)
	params := &s3.PutObjectInput{
		Bucket:        aws.String(bucketName),
		Key:           aws.String(key),
		ACL:           aws.String("public-read"),
		Body:          fileBytes,
		ContentLength: &size,
		ContentType:   aws.String(fileType),
		Metadata: map[string]*string{
			"Key": aws.String("MetadataValue"),
		},
	}

	svc.PutObject(params)
}
func TestPresignRequest(t *testing.T) {
	svc := s3.New(unit.Session)
	req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
		Bucket:             aws.String("bucket"),
		Key:                aws.String("key"),
		ContentDisposition: aws.String("a+b c$d"),
		ACL:                aws.String("public-read"),
	})
	req.Time = time.Unix(0, 0)
	urlstr, headers, err := req.PresignRequest(5 * time.Minute)

	assert.NoError(t, err)

	expectedDate := "19700101T000000Z"
	expectedHeaders := "content-disposition;host;x-amz-acl"
	expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b"
	expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
	expectedHeaderMap := http.Header{
		"x-amz-acl":           []string{"public-read"},
		"content-disposition": []string{"a+b c$d"},
	}

	u, _ := url.Parse(urlstr)
	urlQ := u.Query()
	assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
	assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
	assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
	assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
	assert.Equal(t, expectedHeaderMap, headers)
	assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))

	assert.NotContains(t, urlstr, "+") // + encoded as %20
}
Example #12
0
func (a Artifact) Download(update *cr.Update) (string, error) {
	s3svc := s3.New(session.New(&aws.Config{
		Region:      aws.String(s3Region),
		Credentials: credentials.NewStaticCredentials(s3AccessKey, s3SecretKey, ""),
	}))

	key := fmt.Sprintf("/%s/%s", a.ExecutablePrefix, update.Filename)
	params := &s3.GetObjectInput{
		Bucket: aws.String(s3Bucket),
		Key:    aws.String(key),
	}
	resp, err := s3svc.GetObject(params)
	if err != nil {
		return "", err
	}

	artifactPath := filepath.Join(a.ExecutableDir, a.VersionedArtifact(update.Version))
	artifact, err := os.Create(artifactPath)
	if err != nil {
		return "", err
	}
	artifact.Chmod(0755)
	defer artifact.Close()

	if _, err := io.Copy(artifact, resp.Body); err != nil {
		return "", err
	}

	return artifactPath, nil
}
Example #13
0
// New initializes a new S3 client connection based on config.
func New() *S3Client {
	var (
		cfg *aws.Config
	)

	if config.S3.Endpoint != "" {
		cfg = &aws.Config{
			Endpoint:         aws.String(config.S3.Endpoint),
			DisableSSL:       aws.Bool(strings.HasPrefix(config.S3.Endpoint, "http://")),
			Region:           aws.String(config.S3.Region),
			S3ForcePathStyle: aws.Bool(config.S3.PathStyle),
		}
	} else {
		cfg = &aws.Config{
			Region:           aws.String(config.S3.Region),
			S3ForcePathStyle: aws.Bool(config.S3.PathStyle),
		}
	}

	if config.S3.Access != "" && config.S3.Secret != "" {
		cfg.Credentials = credentials.NewStaticCredentials(
			config.S3.Access,
			config.S3.Secret,
			"",
		)
	}

	return &S3Client{
		client: s3.New(
			session.New(),
			cfg,
		),
	}
}
Example #14
0
func (s3 *s3driver) CheckDataAndGetSize(dpconn, itemlocation, fileName string) (exist bool, size int64, err error) {
	bucket := getAwsInfoFromDpconn(dpconn)

	destFullPathFileName := bucket + "/" + itemlocation + "/" + fileName
	log.Info(destFullPathFileName)

	AWS_REGION = Env("AWS_REGION", false)

	svc := s3aws.New(session.New(&aws.Config{Region: aws.String(AWS_REGION)}))
	result, err := svc.ListObjects(&s3aws.ListObjectsInput{Bucket: aws.String(bucket),
		Prefix: aws.String(itemlocation + "/" + fileName)})
	if err != nil {
		log.Error("Failed to list objects", err)
		return exist, size, err
	}

	exist = false
	for _, v := range result.Contents {
		log.Infof("Tag:%s, key:%s, size:%v\n", aws.StringValue(v.ETag), aws.StringValue(v.Key), aws.Int64Value(v.Size))
		if aws.StringValue(v.Key) == fileName {
			size = aws.Int64Value(v.Size)
			exist = true
		}
	}

	return
}
Example #15
0
// CreateS3RollbackFunc creates an S3 rollback function that attempts to delete a previously
// uploaded item. Note that s3ArtifactURL may include a `versionId` query arg
// to denote the specific version to delete.
func CreateS3RollbackFunc(awsSession *session.Session, s3ArtifactURL string) RollbackFunction {
	return func(logger *logrus.Logger) error {
		logger.WithFields(logrus.Fields{
			"URL": s3ArtifactURL,
		}).Info("Deleting S3 object")
		artifactURLParts, artifactURLPartsErr := url.Parse(s3ArtifactURL)
		if nil != artifactURLPartsErr {
			return artifactURLPartsErr
		}
		// Bucket is the first component
		s3Bucket := strings.Split(artifactURLParts.Host, ".")[0]
		s3Client := s3.New(awsSession)
		params := &s3.DeleteObjectInput{
			Bucket: aws.String(s3Bucket),
			Key:    aws.String(artifactURLParts.Path),
		}
		versionID := artifactURLParts.Query().Get("versionId")
		if "" != versionID {
			params.VersionId = aws.String(versionID)
		}
		_, err := s3Client.DeleteObject(params)
		if err != nil {
			logger.WithFields(logrus.Fields{
				"Error": err,
			}).Warn("Failed to delete S3 item during rollback cleanup")
		}
		return err
	}
}
Example #16
0
// NewPublishedStorageRaw creates published storage from raw aws credentials
func NewPublishedStorageRaw(
	bucket, defaultACL, prefix, storageClass, encryptionMethod string,
	plusWorkaround, disabledMultiDel bool,
	config *aws.Config,
) (*PublishedStorage, error) {
	if defaultACL == "" {
		defaultACL = "private"
	}

	if storageClass == "STANDARD" {
		storageClass = ""
	}

	sess := session.New(config)

	result := &PublishedStorage{
		s3:               s3.New(sess),
		bucket:           bucket,
		config:           config,
		acl:              defaultACL,
		prefix:           prefix,
		storageClass:     storageClass,
		encryptionMethod: encryptionMethod,
		plusWorkaround:   plusWorkaround,
		disableMultiDel:  disabledMultiDel,
	}

	return result, nil
}
Example #17
0
func main() {

	args := os.Args

	if args = os.Args; !(len(args) == 3 && !filepath.IsAbs(args[1])) {
		log.Fatal("usage: relativePath")
	}

	p := args[1]

	t := args[2]

	c := config{}
	if e := c.ReadOrNew(p); e != nil {
		log.Fatal(e)
	}

	sess := session.New(&aws.Config{Region: aws.String("eu-west-1")})
	s3serv := s3.New(sess)

	if t == "push" {
		push(s3serv, c, p)
	} else if t == "pull" {
		pull(s3serv, c, p)
	}

}
Example #18
0
func Download(region string, bucket string, key string, toFile string) error {
	sess := session.New(&aws.Config{Region: aws.String(region)})

	client := s3.New(sess)
	result, err := client.GetObject(&s3.GetObjectInput{
		Bucket: aws.String(bucket),
		Key:    aws.String(key),
	})
	if err != nil {
		log.Println("Failed to get object, ", err)
		return err
	}
	file, err := os.Create(toFile)
	if err != nil {
		log.Println("Failed to create file, ", err)
		return err
	}
	if n, err := io.Copy(file, result.Body); err != nil {
		log.Println(n)
		log.Println("Failed to copy object to file, ", err)
		return err
	}
	result.Body.Close()
	file.Close()
	return nil
}
Example #19
0
func NewSnippetHandler(w http.ResponseWriter, r *http.Request) {
	svc := s3.New(session.New())

	payload, err := ioutil.ReadAll(r.Body)

	if err != nil {
		handleError(err, w, http.StatusInternalServerError)
		return
	}

	objectId := md5sum(payload)

	params := &s3.PutObjectInput{
		Bucket: aws.String(os.Getenv("AWS_S3_BUCKET")),
		Key:    aws.String(objectId),
		Body:   bytes.NewReader([]byte(payload)),
	}

	resp, err := svc.PutObject(params)

	if err != nil {
		handleError(err, w, http.StatusInternalServerError)
		return
	}

	fmt.Println(resp)
	fmt.Println(objectId)

	fmt.Fprintf(w, "%s", urlFor(objectId, r))
}
Example #20
0
func loadManager() *sneaker.Manager {
	u, err := url.Parse(os.Getenv("SNEAKER_S3_PATH"))
	if err != nil {
		log.Fatalf("bad SNEAKER_S3_PATH: %s", err)
	}
	if u.Path != "" && u.Path[0] == '/' {
		u.Path = u.Path[1:]
	}

	ctxt, err := parseContext(os.Getenv("SNEAKER_MASTER_CONTEXT"))
	if err != nil {
		log.Fatalf("bad SNEAKER_MASTER_CONTEXT: %s", err)
	}

	return &sneaker.Manager{
		Objects: s3.New(nil),
		Envelope: sneaker.Envelope{
			KMS: kms.New(nil),
		},
		Bucket:            u.Host,
		Prefix:            u.Path,
		EncryptionContext: ctxt,
		KeyID:             os.Getenv("SNEAKER_MASTER_KEY"),
	}
}
Example #21
0
func listS3ObjectsInBucket(response *analyticsResponse, bucketID string) {

	//
	// @todo Find out how to specify credentials here rather than from global
	// config
	//
	log.Printf("Creating session....")
	mySession := session.New(&aws.Config{Region: aws.String("us-east-1")})
	log.Printf("Connecting to S3....")
	myS3svc := s3.New(mySession)

	log.Printf("Listing objects in '%s'....", bucketID)

	i := 0
	err := myS3svc.ListObjectsPages(&s3.ListObjectsInput{
		Bucket: &bucketID,
	}, func(p *s3.ListObjectsOutput, last bool) (shouldContinue bool) {
		fmt.Println("Page,", i)
		i++

		for _, obj := range p.Contents {
			fmt.Println("Object:", *obj.Key)
		}
		return true
	})

	if err != nil {
		fmt.Println("Failed to list objects\n", err)
	}
}
Example #22
0
// NewS3Storage checks config, creates the path and initializes a S3Storage
func newS3Storage(u *url.URL) (storageAdapter, error) {
	return &S3Storage{
		bucket: u.Host,
		path:   u.Path,
		conn:   s3.New(&aws.Config{}),
	}, nil
}
Example #23
0
// NewDownloader inits and returns a Downloader pointer
func NewDownloader(agrs *cfg.InArgs, conf *cfg.Cfg) (*Downloader, error) {
	if agrs == nil || conf == nil {
		return nil, ErrInvalidArgs
	}
	creds := credentials.NewStaticCredentials(conf.AWSAccessKeyID, conf.AWSSecretKey, "")
	if _, err := creds.Get(); err != nil {
		return nil, err
	}

	awsConf := &aws.Config{Credentials: creds, Region: aws.String(conf.Region)}
	sess := session.New(awsConf)
	client := s3.New(sess, awsConf)

	manager := NewS3DownloadManager(sess)

	d := &Downloader{
		downloadManager: manager,
		args:            agrs,
		pageLister:      client,
		regexp:          regexp.MustCompile(agrs.Regexp),
		workers:         make(chan int, 50),
		fileCreator:     &fsAdapter{},
	}

	d.pageIterator = d.pickPageIterator()
	return d, nil
}
Example #24
0
func listS3(wg *sync.WaitGroup, index int, results chan<- []string) {

	var creds *credentials.Credentials
	if *accessFlag != "" && *secretFlag != "" {
		creds = credentials.NewStaticCredentials(*accessFlag, *secretFlag, "")
	} else {
		creds = credentials.AnonymousCredentials
	}
	sess := session.New(aws.NewConfig().WithCredentials(creds).WithRegion(*regionFlag).WithEndpoint(*endpointFlag).WithS3ForcePathStyle(true))

	prefix := fmt.Sprintf("%s%x", *prefixFlag, index)

	svc := s3.New(sess)
	inputparams := &s3.ListObjectsInput{
		Bucket: aws.String(*bucketFlag),
		Prefix: aws.String(prefix),
	}

	result := make([]string, 0, 1000)

	svc.ListObjectsPages(inputparams, func(page *s3.ListObjectsOutput, lastPage bool) bool {

		for _, value := range page.Contents {
			result = append(result, *value.Key)
		}

		if lastPage {
			results <- result
			wg.Done()
			return false
		} else {
			return true
		}
	})
}
Example #25
0
func ListBuckets(region string) {
	config := aws.NewConfig().WithRegion(region)
	sess := session.New(config)
	svc := s3.New(sess)

	var params *s3.ListBucketsInput
	resp, err := svc.ListBuckets(params)

	if err != nil {
		if awsErr, ok := err.(awserr.Error); ok {
			// Generic AWS error with Code, Message, and original error (if any)
			fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
			if reqErr, ok := err.(awserr.RequestFailure); ok {
				// A service error occurred
				fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
			}
		} else {
			// This case should never be hit, the SDK should always return an
			// error which satisfies the awserr.Error interface.
			fmt.Println(err.Error())
		}
	}

	// Pretty-print the response data.
	fmt.Println(awsutil.Prettify(resp))
}
Example #26
0
// Lists all objects in a bucket using pagination
//
// Usage:
// listObjects <bucket>
func main() {
	if len(os.Args) < 2 {
		fmt.Println("you must specify a bucket")
		return
	}

	sess, err := session.NewSession()
	if err != nil {
		fmt.Println("failed to create session,", err)
		return
	}

	svc := s3.New(sess)

	i := 0
	err = svc.ListObjectsPages(&s3.ListObjectsInput{
		Bucket: &os.Args[1],
	}, func(p *s3.ListObjectsOutput, last bool) (shouldContinue bool) {
		fmt.Println("Page,", i)
		i++

		for _, obj := range p.Contents {
			fmt.Println("Object:", *obj.Key)
		}
		return true
	})
	if err != nil {
		fmt.Println("failed to list objects", err)
		return
	}
}
func TestUploadFailCleanup(t *testing.T) {
	svc := s3.New(nil)

	// Break checksum on 2nd part so it fails
	part := 0
	svc.Handlers.Build.PushBack(func(r *request.Request) {
		if r.Operation.Name == "UploadPart" {
			if part == 1 {
				r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
			}
			part++
		}
	})

	key := "12mb-leave"
	mgr := s3manager.NewUploader(&s3manager.UploadOptions{
		S3:                svc,
		LeavePartsOnError: false,
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: bucketName,
		Key:    &key,
		Body:   bytes.NewReader(integBuf12MB),
	})
	assert.Error(t, err)
	uploadID := ""
	if merr, ok := err.(s3manager.MultiUploadFailure); ok {
		uploadID = merr.UploadID()
	}
	assert.NotEmpty(t, uploadID)

	_, err = svc.ListParts(&s3.ListPartsInput{
		Bucket: bucketName, Key: &key, UploadId: &uploadID})
	assert.Error(t, err)
}
Example #28
0
func New(config Config) *Client {
	credentials := credentials.NewStaticCredentials(config.AccessKey, config.SecretKey, "")
	sdkConfig := &aws.Config{
		Credentials: credentials,
		Region:      aws.String(config.RegionName),
	}

	endpointOverrides := config.EndpointOverrides
	if endpointOverrides == nil {
		endpointOverrides = &Endpoints{}
	}

	route53Client := route53.New(sdkConfig.Merge(&aws.Config{MaxRetries: aws.Int(7), Endpoint: aws.String(endpointOverrides.Route53)}))
	ec2Client := ec2.New(sdkConfig.Merge(&aws.Config{MaxRetries: aws.Int(7), Endpoint: aws.String(endpointOverrides.EC2)}))
	s3Client := s3.New(sdkConfig.Merge(&aws.Config{MaxRetries: aws.Int(7), Endpoint: aws.String(endpointOverrides.S3), S3ForcePathStyle: aws.Bool(true)}))
	cloudformationClient := cloudformation.New(sdkConfig.Merge(&aws.Config{MaxRetries: aws.Int(7), Endpoint: aws.String(endpointOverrides.Cloudformation)}))

	return &Client{
		EC2:            ec2Client,
		S3:             s3Client,
		Route53:        route53Client,
		Cloudformation: cloudformationClient,
		// HostedZoneID:   config.HostedZoneID,
		// HostedZoneName: config.HostedZoneName,
		Bucket: config.Bucket,
	}
}
Example #29
0
func Test200WithErrorUnmarshalError(t *testing.T) {
	s := s3.New(unit.Session)
	s.Handlers.Send.Clear()
	s.Handlers.Send.PushBack(func(r *request.Request) {
		r.HTTPResponse = &http.Response{
			StatusCode:    200,
			Header:        http.Header{"X-Amz-Request-Id": []string{"abc123"}},
			Body:          ioutil.NopCloser(strings.NewReader(completeMultiErrResp)),
			ContentLength: -1,
		}
		r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode)
	})
	_, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
		Bucket: aws.String("bucket"), Key: aws.String("key"),
		UploadId: aws.String("id"),
		MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{
			{ETag: aws.String("etag"), PartNumber: aws.Int64(1)},
		}},
	})

	assert.Error(t, err)

	assert.Equal(t, "SomeException", err.(awserr.Error).Code())
	assert.Equal(t, "Exception message", err.(awserr.Error).Message())
	assert.Equal(t, "abc123", err.(awserr.RequestFailure).RequestID())
}
Example #30
0
func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) {
	var m sync.Mutex
	names := []string{}
	ranges := []string{}

	svc := s3.New(nil)
	svc.Handlers.Send.Clear()
	svc.Handlers.Send.PushBack(func(r *request.Request) {
		m.Lock()
		defer m.Unlock()

		names = append(names, r.Operation.Name)
		ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range)

		rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`)
		rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range"))
		start, _ := strconv.ParseInt(rng[1], 10, 64)
		fin, _ := strconv.ParseInt(rng[2], 10, 64)
		fin++

		if fin > int64(len(data)) {
			fin = int64(len(data))
		}

		r.HTTPResponse = &http.Response{
			StatusCode: 200,
			Body:       ioutil.NopCloser(bytes.NewReader(data[start:fin])),
			Header:     http.Header{},
		}
		r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d",
			start, fin, len(data)))
	})

	return svc, &names, &ranges
}