func init() { if os.Getenv("DEBUG") != "" { aws.DefaultConfig.LogLevel = aws.LogLevel(aws.LogDebug) } if os.Getenv("DEBUG_SIGNING") != "" { aws.DefaultConfig.LogLevel = aws.LogLevel(aws.LogDebugWithSigning) } if os.Getenv("DEBUG_BODY") != "" { aws.DefaultConfig.LogLevel = aws.LogLevel(aws.LogDebugWithSigning | aws.LogDebugWithHTTPBody) } if aws.StringValue(aws.DefaultConfig.Region) == "" { panic("AWS_REGION must be configured to run integration tests") } }
func copyConfig(config *Config) *aws.Config { if config == nil { config = &Config{} } c := &aws.Config{ Credentials: credentials.AnonymousCredentials, Endpoint: config.Endpoint, HTTPClient: config.HTTPClient, Logger: config.Logger, LogLevel: config.LogLevel, MaxRetries: config.MaxRetries, } if c.HTTPClient == nil { c.HTTPClient = http.DefaultClient } if c.Logger == nil { c.Logger = aws.NewDefaultLogger() } if c.LogLevel == nil { c.LogLevel = aws.LogLevel(aws.LogOff) } if c.MaxRetries == nil { c.MaxRetries = aws.Int(DefaultRetries) } return c }
func config() *aws.Config { log := aws.LogLevel(aws.LogOff) cfg := app.NewConfig() if cfg.AwsLog { log = aws.LogLevel(aws.LogDebug) } return &aws.Config{ Credentials: credentials.NewChainCredentials( []credentials.Provider{ &credentials.EnvProvider{}, &ec2rolecreds.EC2RoleProvider{ExpiryWindow: cfg.AwsRoleExpiry * time.Minute}, }), Region: aws.String(os.Getenv("AWS_REGION")), LogLevel: log, } }
func GetFile(region, bucketName, path, keyId, secretKey, token string) (io.ReadCloser, int64, error) { creds := credentials.NewStaticCredentials(keyId, secretKey, token) if _, err := creds.Get(); err != nil { return nil, 0, err } awsconfig := &aws.Config{ Region: aws.String(region), Endpoint: aws.String("s3.amazonaws.com"), S3ForcePathStyle: aws.Bool(true), Credentials: creds, LogLevel: aws.LogLevel(0), } sess := session.New(awsconfig) svc := s3.New(sess) params := &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(path), } resp, err := svc.GetObject(params) if err != nil { return nil, 0, err } // log.Println(resp) return resp.Body, *resp.ContentLength, nil }
// NewSessionWithLevel returns an AWS Session (https://github.com/aws/aws-sdk-go/wiki/Getting-Started-Configuration) // object that attaches a debug level handler to all AWS requests from services // sharing the session value. func NewSessionWithLevel(level aws.LogLevelType, logger *logrus.Logger) *session.Session { awsConfig := &aws.Config{ CredentialsChainVerboseErrors: aws.Bool(true), } // Log AWS calls if needed switch logger.Level { case logrus.DebugLevel: awsConfig.LogLevel = aws.LogLevel(level) } awsConfig.Logger = &logrusProxy{logger} sess := session.New(awsConfig) sess.Handlers.Send.PushFront(func(r *request.Request) { logger.WithFields(logrus.Fields{ "Service": r.ClientInfo.ServiceName, "Operation": r.Operation.Name, "Method": r.Operation.HTTPMethod, "Path": r.Operation.HTTPPath, "Payload": r.Params, }).Debug("AWS Request") }) logger.WithFields(logrus.Fields{ "Name": aws.SDKName, "Version": aws.SDKVersion, }).Debug("AWS SDK Info") return sess }
func New(debug bool) MetadataFetcher { sess := session.New() if debug { sess.Config.LogLevel = aws.LogLevel(aws.LogDebug) } return ec2metadata.New(sess) }
func New(debug bool) MetadataFetcher { c := ec2metadata.Config{} if debug { c.LogLevel = aws.LogLevel(aws.LogDebug) } return ec2metadata.New(&c) }
func TestMain(m *testing.M) { flag.Parse() if !*integration { fmt.Fprintln(os.Stderr, "Skipping integration tests") os.Exit(0) } cfg = &aws.Config{ Region: aws.String("us-west-2"), Endpoint: aws.String("http://localhost:8000"), Credentials: credentials.NewSharedCredentials("", *awsprofile), } sess = session.New(cfg) if *dynamodebug { sess.Config.LogLevel = aws.LogLevel(aws.LogDebug) } if err := loadUserFixtures(sess); err != nil { fmt.Fprintf(os.Stderr, "Error loading 'user' integration fixtures: %s", err) os.Exit(1) } if err := loadPostFixtures(sess); err != nil { fmt.Fprintf(os.Stderr, "Error loading 'post' integration fixtures: %s", err) os.Exit(1) } os.Exit(m.Run()) }
func (app *App) roleHandler(w http.ResponseWriter, r *http.Request) { svc := sts.New(session.New(), &aws.Config{LogLevel: aws.LogLevel(2)}) resp, err := svc.AssumeRole(&sts.AssumeRoleInput{ RoleArn: aws.String(app.RoleArn), RoleSessionName: aws.String("aws-mock-metadata"), }) if err != nil { log.Errorf("Error assuming role %+v", err) http.Error(w, err.Error(), 500) return } log.Debugf("STS response %+v", resp) credentials := Credentials{ AccessKeyID: *resp.Credentials.AccessKeyId, Code: "Success", Expiration: resp.Credentials.Expiration.Format("2006-01-02T15:04:05Z"), LastUpdated: time.Now().Format("2006-01-02T15:04:05Z"), SecretAccessKey: *resp.Credentials.SecretAccessKey, Token: *resp.Credentials.SessionToken, Type: "AWS-HMAC", } if err := json.NewEncoder(w).Encode(credentials); err != nil { log.Errorf("Error sending json %+v", err) http.Error(w, err.Error(), 500) } }
func (gs GlacierStorage) getStorageClient() *glacier.Glacier { creds := credentials.NewStaticCredentials(gs.aws_access_key_id, gs.aws_secret_access_key, "") return glacier.New(&aws.Config{ Region: aws.String(gs.region), Credentials: creds, LogLevel: aws.LogLevel(1), }) }
func getService(debug bool) *route53.Route53 { config := aws.Config{} // ensures throttled requests are retried config.MaxRetries = aws.Int(100) if debug { config.LogLevel = aws.LogLevel(aws.LogDebug) } return route53.New(&config) }
// GetECRAuth requests AWS ECR API to get docker.AuthConfiguration token func GetECRAuth(registry, region string) (result docker.AuthConfiguration, err error) { _ecrAuthCache.mu.Lock() defer _ecrAuthCache.mu.Unlock() if token, ok := _ecrAuthCache.tokens[registry]; ok { return token, nil } defer func() { _ecrAuthCache.tokens[registry] = result }() cfg := &aws.Config{ Region: aws.String(region), } if log.StandardLogger().Level >= log.DebugLevel { cfg.LogLevel = aws.LogLevel(aws.LogDebugWithRequestErrors) } split := strings.Split(registry, ".") svc := ecr.New(session.New(), cfg) params := &ecr.GetAuthorizationTokenInput{ RegistryIds: []*string{aws.String(split[0])}, } res, err := svc.GetAuthorizationToken(params) if err != nil { return result, err } if len(res.AuthorizationData) == 0 { return result, nil } data, err := base64.StdEncoding.DecodeString(*res.AuthorizationData[0].AuthorizationToken) if err != nil { return result, err } userpass := strings.Split(string(data), ":") if len(userpass) != 2 { return result, fmt.Errorf("Cannot parse token got from ECR: %s", string(data)) } result = docker.AuthConfiguration{ Username: userpass[0], Password: userpass[1], ServerAddress: *res.AuthorizationData[0].ProxyEndpoint, } return }
func NewGoofys(bucket string, awsConfig *aws.Config, flags *FlagStorage) *Goofys { // Set up the basic struct. fs := &Goofys{ bucket: bucket, flags: flags, umask: 0122, } if flags.DebugS3 { awsConfig.LogLevel = aws.LogLevel(aws.LogDebug | aws.LogDebugWithRequestErrors) s3Log.Level = logrus.DebugLevel } fs.awsConfig = awsConfig fs.sess = session.New(awsConfig) fs.s3 = fs.newS3() err := fs.detectBucketLocation() if err != nil { return nil } now := time.Now() fs.rootAttrs = fuseops.InodeAttributes{ Size: 4096, Nlink: 2, Mode: flags.DirMode | os.ModeDir, Atime: now, Mtime: now, Ctime: now, Crtime: now, Uid: fs.flags.Uid, Gid: fs.flags.Gid, } fs.bufferPool = BufferPool{}.Init() fs.nextInodeID = fuseops.RootInodeID + 1 fs.inodes = make(map[fuseops.InodeID]*Inode) root := NewInode(aws.String(""), aws.String(""), flags) root.Id = fuseops.RootInodeID root.Attributes = &fs.rootAttrs fs.inodes[fuseops.RootInodeID] = root fs.inodesCache = make(map[string]*Inode) fs.nextHandleID = 1 fs.dirHandles = make(map[fuseops.HandleID]*DirHandle) fs.fileHandles = make(map[fuseops.HandleID]*FileHandle) return fs }
func getService(debug bool, profile string) *route53.Route53 { config := aws.Config{} if profile != "" { config.Credentials = credentials.NewSharedCredentials("", profile) } // ensures throttled requests are retried config.MaxRetries = aws.Int(100) if debug { config.LogLevel = aws.LogLevel(aws.LogDebug) } return route53.New(session.New(), &config) }
func getAWSConfig(region string, debug bool) *aws.Config { if debug { logging.SetLogging("DEBUG") return &aws.Config{ Region: aws.String(region), LogLevel: aws.LogLevel(aws.LogDebugWithHTTPBody), } } logging.SetLogging("INFO") return &aws.Config{ Region: aws.String(region), } }
func getConfig(c *cli.Context) *aws.Config { debug := c.Bool("debug") profile := c.String("profile") config := aws.Config{} if profile != "" { config.Credentials = credentials.NewSharedCredentials("", profile) } // ensures throttled requests are retried config.MaxRetries = aws.Int(100) if debug { config.LogLevel = aws.LogLevel(aws.LogDebug) } return &config }
func doAction(c *cli.Context) { awsKeyID := c.String("aws-key-id") awsSecKey := c.String("aws-secret-key") mackerelAPIKey := c.String("mackerel-api-key") client := mkr.NewClient(mackerelAPIKey) sess := session.New(&aws.Config{ Credentials: credentials.NewStaticCredentials(awsKeyID, awsSecKey, ""), Region: aws.String("ap-northeast-1"), }) awsSession := NewAWSSession(sess) if c.Bool("debug") { sess.Config.LogLevel = aws.LogLevel(aws.LogDebug) } rdss := awsSession.fetchRDSList() awsSession.updateAWSElementList(rdss, client) elbs := awsSession.fetchLoadBalancerList() awsSession.updateAWSElementList(elbs, client) tickChan := time.NewTicker(60 * time.Second) quit := make(chan struct{}) awsSession.crawlRDSMetrics(client, rdss) for { select { case <-tickChan.C: awsSession.crawlELBMetrics(client, elbs) awsSession.crawlRDSMetrics(client, rdss) case <-quit: tickChan.Stop() return } } //listMetric(sess) }
// New makes an instance of StorageS3 storage driver func New(client *docker.Client, cacheRoot string) *StorageS3 { retryer := NewRetryer(400, 6) // TODO: configure region? cfg := &aws.Config{ Region: aws.String("us-east-1"), Retryer: retryer, Logger: &Logger{}, } if log.StandardLogger().Level >= log.DebugLevel { cfg.LogLevel = aws.LogLevel(aws.LogDebugWithRequestErrors) } return &StorageS3{ client: client, cacheRoot: cacheRoot, s3: s3.New(session.New(), cfg), retryer: retryer, } }
func copyConfig(config *Config) *aws.Config { if config == nil { config = &Config{} } c := &aws.Config{ Credentials: credentials.AnonymousCredentials, Endpoint: config.Endpoint, HTTPClient: config.HTTPClient, Logger: config.Logger, LogLevel: config.LogLevel, MaxRetries: config.MaxRetries, } if c.HTTPClient == nil { c.HTTPClient = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ // use a shorter timeout than default because the metadata // service is local if it is running, and to fail faster // if not running on an ec2 instance. Timeout: 5 * time.Second, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, }, } } if c.Logger == nil { c.Logger = aws.NewDefaultLogger() } if c.LogLevel == nil { c.LogLevel = aws.LogLevel(aws.LogOff) } if c.MaxRetries == nil { c.MaxRetries = aws.Int(DefaultRetries) } return c }
func (s *GoofysTest) SetUpSuite(t *C) { //addr := "play.minio.io:9000" const LOCAL_TEST = true if LOCAL_TEST { addr := "127.0.0.1:8080" err := s.waitFor(t, addr) t.Assert(err, IsNil) s.awsConfig = &aws.Config{ //Credentials: credentials.AnonymousCredentials, Credentials: credentials.NewStaticCredentials("foo", "bar", ""), Region: aws.String("us-west-2"), Endpoint: aws.String(addr), DisableSSL: aws.Bool(true), S3ForcePathStyle: aws.Bool(true), MaxRetries: aws.Int(0), //Logger: t, //LogLevel: aws.LogLevel(aws.LogDebug), //LogLevel: aws.LogLevel(aws.LogDebug | aws.LogDebugWithHTTPBody), } } else { s.awsConfig = &aws.Config{ Region: aws.String("us-west-2"), DisableSSL: aws.Bool(true), LogLevel: aws.LogLevel(aws.LogDebug | aws.LogDebugWithSigning), S3ForcePathStyle: aws.Bool(true), } } s.sess = session.New(s.awsConfig) s.s3 = s3.New(s.sess) s.s3.Handlers.Sign.Clear() s.s3.Handlers.Sign.PushBack(SignV2) s.s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) _, err := s.s3.ListBuckets(nil) t.Assert(err, IsNil) }
func (s *GoofysTest) SetUpSuite(t *C) { //addr := "play.minio.io:9000" addr := "127.0.0.1:9000" accessKey, secretKey := s.setupMinio(t, addr) s.awsConfig = &aws.Config{ //Credentials: credentials.AnonymousCredentials, Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), Region: aws.String("milkyway"), //aws.String("us-west-2"), Endpoint: aws.String(addr), DisableSSL: aws.Bool(true), S3ForcePathStyle: aws.Bool(true), MaxRetries: aws.Int(0), Logger: t, LogLevel: aws.LogLevel(aws.LogDebug), //LogLevel: aws.LogLevel(aws.LogDebug | aws.LogDebugWithHTTPBody), } s.s3 = s3.New(s.awsConfig) _, err := s.s3.ListBuckets(nil) t.Assert(err, IsNil) }
func GetFileIAM(region, bucketName, path string) (io.ReadCloser, int64, error) { awsconfig := &aws.Config{ Region: aws.String(region), Endpoint: aws.String("s3.amazonaws.com"), S3ForcePathStyle: aws.Bool(true), LogLevel: aws.LogLevel(0), } sess := session.New(awsconfig) svc := s3.New(sess) params := &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(path), } resp, err := svc.GetObject(params) if err != nil { return nil, 0, err } // log.Println(resp) return resp.Body, *resp.ContentLength, nil }
func ConfigureAws(region string) { logLevelType := func(logLevel uint) aws.LogLevelType { var awsLogLevel aws.LogLevelType switch logLevel { case 0: awsLogLevel = aws.LogOff case 1: awsLogLevel = aws.LogDebug case 2: awsLogLevel = aws.LogDebugWithSigning case 3: awsLogLevel = aws.LogDebugWithHTTPBody case 4: awsLogLevel = aws.LogDebugWithRequestRetries case 5: awsLogLevel = aws.LogDebugWithRequestErrors } return awsLogLevel } defaults.DefaultConfig.Credentials = credentials.NewStaticCredentials(Cfg.BackupSet.AccessKey, Cfg.BackupSet.SecretKey, "") defaults.DefaultConfig.Region = ®ion defaults.DefaultConfig.LogLevel = aws.LogLevel(logLevelType(uint(Cfg.AwsLogLevel))) }
// Client configures and returns a fully initialized AWSClient func (c *Config) Client() (interface{}, error) { // Get the auth and region. This can fail if keys/regions were not // specified and we're attempting to use the environment. var errs []error log.Println("[INFO] Building AWS region structure") err := c.ValidateRegion() if err != nil { errs = append(errs, err) } var client AWSClient if len(errs) == 0 { // store AWS region in client struct, for region specific operations such as // bucket storage in S3 client.region = c.Region log.Println("[INFO] Building AWS auth structure") creds := GetCredentials(c.AccessKey, c.SecretKey, c.Token, c.Profile, c.CredsFilename) // Call Get to check for credential provider. If nothing found, we'll get an // error, and we can present it nicely to the user cp, err := creds.Get() if err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { errs = append(errs, fmt.Errorf(`No valid credential sources found for AWS Provider. Please see https://terraform.io/docs/providers/aws/index.html for more information on providing credentials for the AWS Provider`)) } else { errs = append(errs, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)) } return nil, &multierror.Error{Errors: errs} } log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) awsConfig := &aws.Config{ Credentials: creds, Region: aws.String(c.Region), MaxRetries: aws.Int(c.MaxRetries), HTTPClient: cleanhttp.DefaultClient(), } if logging.IsDebugOrHigher() { awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) awsConfig.Logger = awsLogger{} } if c.Insecure { transport := awsConfig.HTTPClient.Transport.(*http.Transport) transport.TLSClientConfig = &tls.Config{ InsecureSkipVerify: true, } } // Set up base session sess := session.New(awsConfig) sess.Handlers.Build.PushFrontNamed(addTerraformVersionToUserAgent) // Some services exist only in us-east-1, e.g. because they manage // resources that can span across multiple regions, or because // signature format v4 requires region to be us-east-1 for global // endpoints: // http://docs.aws.amazon.com/general/latest/gr/sigv4_changes.html usEast1Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) // Some services have user-configurable endpoints awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) dynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) kinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) // These two services need to be set up early so we can check on AccountID client.iamconn = iam.New(awsIamSess) client.stsconn = sts.New(sess) err = c.ValidateCredentials(client.stsconn) if err != nil { errs = append(errs, err) return nil, &multierror.Error{Errors: errs} } accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName) if err == nil { client.accountid = accountId } authErr := c.ValidateAccountId(client.accountid) if authErr != nil { errs = append(errs, authErr) } client.apigateway = apigateway.New(sess) client.autoscalingconn = autoscaling.New(sess) client.cfconn = cloudformation.New(sess) client.cloudfrontconn = cloudfront.New(sess) client.cloudtrailconn = cloudtrail.New(sess) client.cloudwatchconn = cloudwatch.New(sess) client.cloudwatcheventsconn = cloudwatchevents.New(sess) client.cloudwatchlogsconn = cloudwatchlogs.New(sess) client.codecommitconn = codecommit.New(usEast1Sess) client.codedeployconn = codedeploy.New(sess) client.dsconn = directoryservice.New(sess) client.dynamodbconn = dynamodb.New(dynamoSess) client.ec2conn = ec2.New(awsEc2Sess) client.ecrconn = ecr.New(sess) client.ecsconn = ecs.New(sess) client.efsconn = efs.New(sess) client.elasticacheconn = elasticache.New(sess) client.elasticbeanstalkconn = elasticbeanstalk.New(sess) client.elastictranscoderconn = elastictranscoder.New(sess) client.elbconn = elb.New(awsElbSess) client.emrconn = emr.New(sess) client.esconn = elasticsearch.New(sess) client.firehoseconn = firehose.New(sess) client.glacierconn = glacier.New(sess) client.kinesisconn = kinesis.New(kinesisSess) client.kmsconn = kms.New(sess) client.lambdaconn = lambda.New(sess) client.opsworksconn = opsworks.New(usEast1Sess) client.r53conn = route53.New(usEast1Sess) client.rdsconn = rds.New(sess) client.redshiftconn = redshift.New(sess) client.simpledbconn = simpledb.New(sess) client.s3conn = s3.New(sess) client.sesConn = ses.New(sess) client.snsconn = sns.New(sess) client.sqsconn = sqs.New(sess) } if len(errs) > 0 { return nil, &multierror.Error{Errors: errs} } return &client, nil }
// Client configures and returns a fully initialized AWSClient func (c *Config) Client() (interface{}, error) { // Get the auth and region. This can fail if keys/regions were not // specified and we're attempting to use the environment. log.Println("[INFO] Building AWS region structure") err := c.ValidateRegion() if err != nil { return nil, err } var client AWSClient // store AWS region in client struct, for region specific operations such as // bucket storage in S3 client.region = c.Region log.Println("[INFO] Building AWS auth structure") creds, err := GetCredentials(c) if err != nil { return nil, err } // Call Get to check for credential provider. If nothing found, we'll get an // error, and we can present it nicely to the user cp, err := creds.Get() if err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { return nil, errors.New(`No valid credential sources found for AWS Provider. Please see https://terraform.io/docs/providers/aws/index.html for more information on providing credentials for the AWS Provider`) } return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) } log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) awsConfig := &aws.Config{ Credentials: creds, Region: aws.String(c.Region), MaxRetries: aws.Int(c.MaxRetries), HTTPClient: cleanhttp.DefaultClient(), S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), } if logging.IsDebugOrHigher() { awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) awsConfig.Logger = awsLogger{} } if c.Insecure { transport := awsConfig.HTTPClient.Transport.(*http.Transport) transport.TLSClientConfig = &tls.Config{ InsecureSkipVerify: true, } } // Set up base session sess, err := session.NewSession(awsConfig) if err != nil { return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err) } // Removes the SDK Version handler, so we only have the provider User-Agent // Ex: "User-Agent: APN/1.0 HashiCorp/1.0 Terraform/0.7.9-dev" sess.Handlers.Build.Remove(request.NamedHandler{Name: "core.SDKVersionUserAgentHandler"}) sess.Handlers.Build.PushFrontNamed(addTerraformVersionToUserAgent) if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" { sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure) } // Some services exist only in us-east-1, e.g. because they manage // resources that can span across multiple regions, or because // signature format v4 requires region to be us-east-1 for global // endpoints: // http://docs.aws.amazon.com/general/latest/gr/sigv4_changes.html usEast1Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) // Some services have user-configurable endpoints awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)}) dynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) kinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) // These two services need to be set up early so we can check on AccountID client.iamconn = iam.New(awsIamSess) client.stsconn = sts.New(sess) if !c.SkipCredsValidation { err = c.ValidateCredentials(client.stsconn) if err != nil { return nil, err } } if !c.SkipRequestingAccountId { partition, accountId, err := GetAccountInfo(client.iamconn, client.stsconn, cp.ProviderName) if err == nil { client.partition = partition client.accountid = accountId } } authErr := c.ValidateAccountId(client.accountid) if authErr != nil { return nil, authErr } client.acmconn = acm.New(sess) client.apigateway = apigateway.New(sess) client.appautoscalingconn = applicationautoscaling.New(sess) client.autoscalingconn = autoscaling.New(sess) client.cfconn = cloudformation.New(sess) client.cloudfrontconn = cloudfront.New(sess) client.cloudtrailconn = cloudtrail.New(sess) client.cloudwatchconn = cloudwatch.New(sess) client.cloudwatcheventsconn = cloudwatchevents.New(sess) client.cloudwatchlogsconn = cloudwatchlogs.New(sess) client.codecommitconn = codecommit.New(usEast1Sess) client.codedeployconn = codedeploy.New(sess) client.dsconn = directoryservice.New(sess) client.dynamodbconn = dynamodb.New(dynamoSess) client.ec2conn = ec2.New(awsEc2Sess) client.ecrconn = ecr.New(sess) client.ecsconn = ecs.New(sess) client.efsconn = efs.New(sess) client.elasticacheconn = elasticache.New(sess) client.elasticbeanstalkconn = elasticbeanstalk.New(sess) client.elastictranscoderconn = elastictranscoder.New(sess) client.elbconn = elb.New(awsElbSess) client.elbv2conn = elbv2.New(awsElbSess) client.emrconn = emr.New(sess) client.esconn = elasticsearch.New(sess) client.firehoseconn = firehose.New(sess) client.glacierconn = glacier.New(sess) client.kinesisconn = kinesis.New(kinesisSess) client.kmsconn = kms.New(sess) client.lambdaconn = lambda.New(sess) client.lightsailconn = lightsail.New(usEast1Sess) client.opsworksconn = opsworks.New(usEast1Sess) client.r53conn = route53.New(usEast1Sess) client.rdsconn = rds.New(sess) client.redshiftconn = redshift.New(sess) client.simpledbconn = simpledb.New(sess) client.s3conn = s3.New(awsS3Sess) client.sesConn = ses.New(sess) client.snsconn = sns.New(sess) client.sqsconn = sqs.New(sess) client.ssmconn = ssm.New(sess) client.wafconn = waf.New(sess) return &client, nil }
func init() { logLevel := Session.Config.LogLevel if os.Getenv("DEBUG") != "" { logLevel = aws.LogLevel(aws.LogDebug) } if os.Getenv("DEBUG_SIGNING") != "" { logLevel = aws.LogLevel(aws.LogDebugWithSigning) } if os.Getenv("DEBUG_BODY") != "" { logLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) } Session.Config.LogLevel = logLevel When(`^I call the "(.+?)" API$`, func(op string) { call(op, nil, false) }) When(`^I call the "(.+?)" API with:$`, func(op string, args [][]string) { call(op, args, false) }) Then(`^the value at "(.+?)" should be a list$`, func(member string) { vals := awsutil.ValuesAtAnyPath(World["response"], member) assert.NotNil(T, vals) }) Then(`^the response should contain a "(.+?)"$`, func(member string) { vals := awsutil.ValuesAtAnyPath(World["response"], member) assert.NotEmpty(T, vals) }) When(`^I attempt to call the "(.+?)" API with:$`, func(op string, args [][]string) { call(op, args, true) }) Then(`^I expect the response error code to be "(.+?)"$`, func(code string) { err, ok := World["error"].(awserr.Error) assert.True(T, ok, "no error returned") if ok { assert.Equal(T, code, err.Code(), "Error: %v", err) } }) And(`^I expect the response error message to include:$`, func(data string) { err, ok := World["error"].(awserr.Error) assert.True(T, ok, "no error returned") if ok { assert.Contains(T, err.Message(), data) } }) And(`^I expect the response error message to include one of:$`, func(table [][]string) { err, ok := World["error"].(awserr.Error) assert.True(T, ok, "no error returned") if ok { found := false for _, row := range table { if strings.Contains(err.Message(), row[0]) { found = true break } } assert.True(T, found, fmt.Sprintf("no error messages matched: \"%s\"", err.Message())) } }) When(`^I call the "(.+?)" API with JSON:$`, func(s1 string, data string) { callWithJSON(s1, data, false) }) When(`^I attempt to call the "(.+?)" API with JSON:$`, func(s1 string, data string) { callWithJSON(s1, data, true) }) Then(`^the error code should be "(.+?)"$`, func(s1 string) { err, ok := World["error"].(awserr.Error) assert.True(T, ok, "no error returned") assert.Equal(T, s1, err.Code()) }) And(`^the error message should contain:$`, func(data string) { err, ok := World["error"].(awserr.Error) assert.True(T, ok, "no error returned") assert.Contains(T, err.Error(), data) }) Then(`^the request should fail$`, func() { err, ok := World["error"].(awserr.Error) assert.True(T, ok, "no error returned") assert.Error(T, err) }) Then(`^the request should be successful$`, func() { err, ok := World["error"].(awserr.Error) assert.False(T, ok, "error returned") assert.NoError(T, err) }) }
func NewGoofys(bucket string, awsConfig *aws.Config, flags *flagStorage) *Goofys { // Set up the basic struct. fs := &Goofys{ bucket: bucket, flags: flags, umask: 0122, } if flags.DebugS3 { awsConfig.LogLevel = aws.LogLevel(aws.LogDebug) } fs.awsConfig = awsConfig fs.s3 = s3.New(awsConfig) params := &s3.GetBucketLocationInput{Bucket: &bucket} resp, err := fs.s3.GetBucketLocation(params) var fromRegion, toRegion string if err != nil { if mapAwsError(err) == fuse.ENOENT { log.Printf("bucket %v does not exist", bucket) return nil } fromRegion, toRegion = parseRegionError(err) } else { fs.logS3(resp) if resp.LocationConstraint == nil { toRegion = "us-east-1" } else { toRegion = *resp.LocationConstraint } fromRegion = *awsConfig.Region } if len(toRegion) != 0 && fromRegion != toRegion { log.Printf("Switching from region '%v' to '%v'", fromRegion, toRegion) awsConfig.Region = &toRegion fs.s3 = s3.New(awsConfig) _, err = fs.s3.GetBucketLocation(params) if err != nil { log.Println(err) return nil } } else if len(toRegion) == 0 && *awsConfig.Region != "milkyway" { log.Printf("Unable to detect bucket region, staying at '%v'", *awsConfig.Region) } now := time.Now() fs.rootAttrs = fuseops.InodeAttributes{ Size: 4096, Nlink: 2, Mode: flags.DirMode | os.ModeDir, Atime: now, Mtime: now, Ctime: now, Crtime: now, Uid: fs.flags.Uid, Gid: fs.flags.Gid, } fs.bufferPool = NewBufferPool(100*1024*1024, 20*1024*1024) fs.nextInodeID = fuseops.RootInodeID + 1 fs.inodes = make(map[fuseops.InodeID]*Inode) root := NewInode(aws.String(""), aws.String(""), flags) root.Id = fuseops.RootInodeID root.Attributes = &fs.rootAttrs fs.inodes[fuseops.RootInodeID] = root fs.inodesCache = make(map[string]*Inode) fs.nextHandleID = 1 fs.dirHandles = make(map[fuseops.HandleID]*DirHandle) fs.fileHandles = make(map[fuseops.HandleID]*FileHandle) return fs }
func ListFolder(region, bucketName, path, keyId, secretKey, token string, iam bool) (FileList, error) { var f FileList var awsconfig *aws.Config if !iam { creds := credentials.NewStaticCredentials(keyId, secretKey, token) var f FileList if _, err := creds.Get(); err != nil { return f, err } awsconfig = &aws.Config{ Region: aws.String(region), Endpoint: aws.String("s3.amazonaws.com"), S3ForcePathStyle: aws.Bool(true), Credentials: creds, LogLevel: aws.LogLevel(0), } } else { awsconfig = &aws.Config{ Region: aws.String(region), Endpoint: aws.String("s3.amazonaws.com"), S3ForcePathStyle: aws.Bool(true), LogLevel: aws.LogLevel(0), } } sess := session.New(awsconfig) svc := s3.New(sess) inparams := &s3.ListObjectsInput{ Bucket: aws.String(bucketName), Prefix: aws.String(path), Delimiter: aws.String("/"), } err := svc.ListObjectsPages(inparams, func(p *s3.ListObjectsOutput, lastPage bool) bool { for _, o := range p.Contents { switch *o.StorageClass { case s3.ObjectStorageClassGlacier: log.Printf("GLACIER %s\n", *o.Key) continue case s3.ObjectStorageClassReducedRedundancy: log.Printf("REDUCED %s\n", *o.Key) continue case s3.ObjectStorageClassStandard: log.Printf("STANDARD %s\n", *o.Key) } loc, err := time.LoadLocation("America/New_York") if err != nil { log.Fatal(err) } keyString := *o.Key fileString := strings.TrimPrefix(keyString, *p.Prefix) lm := o.LastModified.In(loc) t := lm.Format("15:04:05") d := lm.Format("2006-01-02") f.Files = append(f.Files, &FileObject{keyString, fileString, *o.Size, t, d}) } return !lastPage }) if err != nil { log.Println(err) } log.Printf("Kept %d backup files.\n", len(f.Files)) // log.Println(f) sort.Sort(sort.Reverse(f.Files)) return f, nil }
func (p *SigningRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { glog.V(2).Infof("Got request: %s %s", req.Method, req.URL) // Fix the host header in case broken by proxy-rewrite if req.URL.Host != "" { req.Host = req.URL.Host } // I think the AWS authentication proxy does not like forwarded headers for k := range req.Header { lk := strings.ToLower(k) if lk == "x-forwarded-host" { delete(req.Header, k) } if lk == "x-forwarded-for" { delete(req.Header, k) } if lk == "x-forwarded-proto" { delete(req.Header, k) } if lk == "x-forward-for" { delete(req.Header, k) } if lk == "x-forward-proto" { delete(req.Header, k) } if lk == "x-forward-port" { delete(req.Header, k) } if lk == "x-forwarded-port" { delete(req.Header, k) } if lk == "x-forwarded-prefix" { delete(req.Header, k) } if lk == "x-netflix-httpclientname" { delete(req.Header, k) } if lk == "x-newrelic-id" { delete(req.Header, k) } if lk == "x-newrelic-transaction" { delete(req.Header, k) } if lk == "netflix.nfhttpclient.version" { delete(req.Header, k) } } // We're going to put our own auth headers on here delete(req.Header, "Authorization") var body []byte var err error if req.Body != nil { body, err = ioutil.ReadAll(req.Body) if err != nil { glog.Infof("error reading request body: %v", err) return nil, err } } if req.Method == "GET" || req.Method == "HEAD" { delete(req.Header, "Content-Length") } oldPath := req.URL.Path if oldPath != "" { // Escape the path before signing so that the path in the signature and // the path in the request match. req.URL.Path = req.URL.EscapedPath() glog.V(4).Infof("Path -> %q", req.URL.Path) } awsReq := &request.Request{} awsReq.Config.Credentials = p.credentials awsReq.Config.Region = aws.String(p.region) awsReq.ClientInfo.ServiceName = SERVICE_NAME awsReq.HTTPRequest = req awsReq.Time = time.Now() awsReq.ExpireTime = 0 if body != nil { awsReq.Body = bytes.NewReader(body) } if glog.V(4) { awsReq.Config.LogLevel = aws.LogLevel(aws.LogDebugWithSigning) awsReq.Config.Logger = aws.NewDefaultLogger() } v4.Sign(awsReq) if awsReq.Error != nil { glog.Warningf("error signing request: %v", awsReq.Error) return nil, awsReq.Error } req.URL.Path = oldPath if body != nil { req.Body = ioutil.NopCloser(bytes.NewReader(body)) } response, err := p.inner.RoundTrip(req) if err != nil { glog.Warning("Request error: ", err) return nil, err } else { glog.V(2).Infof("response %s", response.Status) return response, err } }
func (d *driver) Login(ctx types.Context) (interface{}, error) { sessionsL.Lock() defer sessionsL.Unlock() var ( endpoint *string ckey string hkey = md5.New() akey = d.accessKey region = d.mustRegion(ctx) ) if region != nil && d.endpointFormat != "" { szEndpoint := fmt.Sprintf(d.endpointFormat, *region) endpoint = &szEndpoint } else { endpoint = d.endpoint } if !d.disableSessionCache { writeHkey(hkey, region) writeHkey(hkey, endpoint) writeHkey(hkey, &akey) ckey = fmt.Sprintf("%x", hkey.Sum(nil)) // if the session is cached then return it if svc, ok := sessions[ckey]; ok { ctx.WithField(cacheKeyC, ckey).Debug("using cached efs service") return svc, nil } } var ( skey = d.getSecretKey() fields = map[string]interface{}{ efs.AccessKey: akey, efs.Tag: d.tag, cacheKeyC: ckey, } ) if skey == "" { fields[efs.SecretKey] = "" } else { fields[efs.SecretKey] = "******" } if region != nil { fields[efs.Region] = *region } if endpoint != nil { fields[efs.Endpoint] = *endpoint } ctx.WithFields(fields).Debug("efs service connetion attempt") sess := session.New() var ( awsLogger = &awsLogger{ctx: ctx} awsLogLevel = aws.LogOff ) if ll, ok := context.GetLogLevel(ctx); ok { switch ll { case log.DebugLevel: awsLogger.lvl = log.DebugLevel awsLogLevel = aws.LogDebugWithHTTPBody case log.InfoLevel: awsLogger.lvl = log.InfoLevel awsLogLevel = aws.LogDebug } } svc := awsefs.New(sess, &aws.Config{ Region: region, Endpoint: endpoint, MaxRetries: d.maxRetries, Credentials: credentials.NewChainCredentials( []credentials.Provider{ &credentials.StaticProvider{ Value: credentials.Value{ AccessKeyID: akey, SecretAccessKey: skey, }, }, &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{}, &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.New(sess), }, }, ), Logger: awsLogger, LogLevel: aws.LogLevel(awsLogLevel), }) ctx.WithFields(fields).Info("efs service connection created") if !d.disableSessionCache { sessions[ckey] = svc ctx.WithFields(fields).Info("efs service connection cached") } return svc, nil }