// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {

	// Apply defaults where parameters are not set.
	if p.Client == nil {
		p.Client = sts.New(nil)
	}
	if p.RoleSessionName == "" {
		// Try to work out a role name that will hopefully end up unique.
		p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
	}
	if p.Duration == 0 {
		// Expire as often as AWS permits.
		p.Duration = 15 * time.Minute
	}

	roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
		DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
		RoleArn:         aws.String(p.RoleARN),
		RoleSessionName: aws.String(p.RoleSessionName),
		ExternalId:      p.ExternalID,
	})

	if err != nil {
		return credentials.Value{}, err
	}

	// We will proactively generate new credentials before they expire.
	p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)

	return credentials.Value{
		AccessKeyID:     *roleOutput.Credentials.AccessKeyId,
		SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
		SessionToken:    *roleOutput.Credentials.SessionToken,
	}, nil
}
Пример #2
0
// pulls all the IAM roles from your account
func getIamRoles() ([]*IamRole, map[string]*string) {
	debug := debug.Debug("core.getIamRoles")
	i := iam.New(nil)
	r, err := i.ListRoles(&iam.ListRolesInput{
		// try loading up to 1000 roles now
		MaxItems: aws.Int64(1000),
	})

	if err != nil {
		debug("getting IAM roles failed! maybe you don't have permission to do that?")
		return []*IamRole{}, map[string]*string{}
	}

	roles := make([]*IamRole, len(r.Roles))
	roleMap := make(map[string]*string)
	for i, r := range r.Roles {
		roles[i] = &IamRole{
			Arn:  r.Arn,
			Name: r.RoleName,
		}
		roleMap[*r.RoleName] = r.Arn
	}

	return roles, roleMap
}
Пример #3
0
// multipart upload
func uploadS3MPU(
	s *s3.S3,
	f *os.File,
	bucket, key *string,
) error {
	debug := debug.Debug("uploadS3MPU")
	mpu := &s3.CreateMultipartUploadInput{
		Bucket: bucket,
		Key:    key,
	}

	canceler := func(uploadId *string) {
		debug("canceling upload")
		_, err := s.AbortMultipartUpload(
			&s3.AbortMultipartUploadInput{
				Bucket:   bucket,
				Key:      key,
				UploadId: uploadId,
			},
		)
		if err != nil {
			fmt.Printf(
				"WARNING: upload abort failed for upload ID %s",
				*uploadId,
			)
		}
	}

	cr, err := s.CreateMultipartUpload(mpu)
	if err != nil {
		return err
	}

	debug("created multipart upload")

	buf := new(bytes.Buffer)
	bslice := make([]byte, 8096)
	var pNum int64 = 1

	parts := make([]*s3.CompletedPart, 0)

	for {
		n, err := f.Read(bslice)

		isEOF := err != nil && err == io.EOF

		if err != nil && !isEOF {
			f.Close()
			canceler(cr.UploadId)
			return err
		}

		if isEOF {
			debug("reached end of file")
			// trim final content
			bslice = bslice[:n]
		}

		buf.Write(bslice)

		// drain buf on 1MiB of data
		if buf.Len() >= (1024*1024*5) || isEOF {
			debug("have file data, uploading chunk")
			var err error
			if buf.Len() > 0 {
				var p *s3.UploadPartOutput
				p, err = s.UploadPart(
					&s3.UploadPartInput{
						Bucket:     bucket,
						Key:        key,
						PartNumber: aws.Int64(pNum),
						UploadId:   cr.UploadId,
						Body:       bytes.NewReader(buf.Bytes()),
					},
				)

				if err != nil {
					// if uploading a part failed,
					// cancel the whole upload...
					f.Close()
					canceler(cr.UploadId)
					return err
				}

				parts = append(parts,
					&s3.CompletedPart{
						ETag:       p.ETag,
						PartNumber: aws.Int64(pNum),
					},
				)
			}

			pNum += 1
			buf.Reset()

			if isEOF {
				f.Close()
				debug("completing upload")
				iput := &s3.CompleteMultipartUploadInput{
					Bucket:   bucket,
					Key:      key,
					UploadId: cr.UploadId,
					MultipartUpload: &s3.CompletedMultipartUpload{
						Parts: parts,
					},
				}
				_, err := s.CompleteMultipartUpload(
					iput,
				)

				if err != nil {
					canceler(cr.UploadId)
					return err
				}

				break
			}
		}
	}
	return nil
}