// GetAuth returns aws.Auth from credentials or envrionment variables. func GetAuth(name string) (aws.Auth, error) { f, err := loadCredFile() if err != nil { if os.IsExist(err) { return aws.Auth{}, err } return aws.EnvAuth() } var prof ini.Section var ok bool if name != "" { prof, ok = f[name] } if !ok { prof, ok = f["default"] } if !ok { return aws.Auth{}, errors.New("cannot find section") } // Parse auth info from a ini's section. a := aws.Auth{ AccessKey: prof["aws_access_key_id"], SecretKey: prof["aws_secret_access_key"], } if a.AccessKey == "" { return aws.Auth{}, errors.New("empty aws_access_key_id in credentials") } if a.SecretKey == "" { return aws.Auth{}, errors.New("empty aws_secret_access_key in credentials") } return a, nil }
func (s *AmazonServer) SetUp(c *C) { auth, err := aws.EnvAuth() if err != nil { c.Fatal(err) } s.auth = auth }
func TestBasicGroupRequest(t *testing.T) { var as *AutoScaling awsAuth, err := aws.EnvAuth() if err != nil { mockTest = true t.Log("Running mock tests as AWS environment variables are not set") awsAuth := aws.Auth{AccessKey: "abc", SecretKey: "123"} as = New(awsAuth, aws.Region{AutoScalingEndpoint: testServer.URL}) testServer.Start() go testServer.WaitRequest() testServer.Response(200, nil, BasicGroupResponse) } else { as = New(awsAuth, aws.USWest2) } groupResp, err := as.DescribeAutoScalingGroups(nil, 10, "") if err != nil { t.Fatal(err) } if len(groupResp.AutoScalingGroups) > 0 { firstGroup := groupResp.AutoScalingGroups[0] if len(firstGroup.AutoScalingGroupName) > 0 { t.Logf("Found AutoScaling group %s\n", firstGroup.AutoScalingGroupName) } } testServer.Flush() }
func (rc *Recommender) LoadBackup() (success bool) { log.Info("Loading backup from S3:", rc.identifier) auth, err := aws.EnvAuth() if err != nil { log.Error("Problem trying to connect with AWS:", err) return false } s := s3.New(auth, rc.s3Region) bucket := s.Bucket(S3BUCKET) jsonData, err := bucket.Get(rc.getS3Path()) if err != nil { log.Info("Problem trying to get backup from S3:", err) return false } dataFromJSON := [][]uint64{} json.Unmarshal(rc.uncompress(jsonData), &dataFromJSON) log.Info("Data loaded from S3:", rc.identifier, "len:", len(dataFromJSON)) recs := 0 for _, record := range dataFromJSON { scores := make(map[uint64]uint8) for i := 1; i < len(record); i += 2 { scores[record[i]] = uint8(record[i+1]) } recs += len(scores) rc.AddRecord(record[0], scores) } return true }
func InitAndKeepAlive(prefix string, awsRegion string, keepAlive bool) (im *Model) { if awsAuth, err := aws.EnvAuth(); err == nil { im = &Model{ prefix: prefix, tableName: fmt.Sprintf("%s_%s", prefix, cTable), conn: &dynamodb.Server{ Auth: awsAuth, Region: aws.Regions[awsRegion], }, } im.initTable() if keepAlive { im.registerHostName(hostName) } im.updateInstances() if keepAlive { go func() { for { im.registerHostName(hostName) im.updateInstances() time.Sleep(time.Second) } }() } } else { log.Error("Problem trying to connect with DynamoDB, Error:", err) return } return }
func TestPutGet(t *testing.T) { Reset(t) auth, err := aws.EnvAuth() if err != nil { t.Error(err) } s := s3.New(auth, localRegion) b := s.Bucket("TestBucket") err = b.PutBucket("acl") if err != nil { t.Fatal(err) } o, err := b.GetBucketContents() if err != nil { t.Fatal(err) } if len(*o) != 0 { t.Fatalf("Bucket should be empty, but has %d object", len(*o)) } }
func ExampleV4Signer() { // Get auth from env vars auth, err := aws.EnvAuth() if err != nil { fmt.Println(err) } // Create a signer with the auth, name of the service, and aws region signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast) // Create a request req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request")) if err != nil { fmt.Println(err) } // Date or x-amz-date header is required to sign a request req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat)) // Sign the request signer.Sign(req) // Issue signed request http.DefaultClient.Do(req) }
func (s *AmazonServer) SetUp(c *gocheck.C) { auth, err := aws.EnvAuth() if err != nil { c.Fatal(err.Error()) } s.auth = auth }
func (s *S) TestEnvAuthAlt(c *gocheck.C) { os.Clearenv() os.Setenv("AWS_SECRET_KEY", "secret") os.Setenv("AWS_ACCESS_KEY", "access") auth, err := aws.EnvAuth() c.Assert(err, gocheck.IsNil) c.Assert(auth, gocheck.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) }
func (s *S) TestEnvAuthAlt(c *C) { os.Clearenv() os.Setenv("AWS_SECRET_KEY", "secret") os.Setenv("AWS_ACCESS_KEY", "access") auth, err := aws.EnvAuth() c.Assert(err, IsNil) c.Assert(*auth, Equals, *aws.NewAuth("access", "secret", "", time.Time{})) }
//Connect starts the connection to aws, and returns // any error, default strategy is to connect 5 times, wait 4 seconds and with delay of 200 ms // make sure to source the credential file on the server // Region is harcoded to EUWest // Auth is read from environment. func (c *Connection) Connect() error { auth, err := aws.EnvAuth() if err != nil { return err } c.Auth = auth c.Region = aws.EUWest return nil }
func (s *SuiteI) SetUpSuite(c *C) { if !*integration { c.Skip("Integration tests not enabled (-i flag)") } auth, err := aws.EnvAuth() if err != nil { c.Fatal(err.Error()) } s.auth = auth }
func (s *LiveSuite) SetUpSuite(c *gocheck.C) { if !Amazon { c.Skip("amazon tests not enabled (-amazon flag)") } auth, err := aws.EnvAuth() if err != nil { c.Fatal(err.Error()) } s.auth = auth }
func (s *S) TestEnvAuthToken(c *C) { os.Clearenv() os.Setenv("AWS_SECRET_KEY", "secret") os.Setenv("AWS_ACCESS_KEY", "access") os.Setenv("AWS_SESSION_TOKEN", "token") auth, err := aws.EnvAuth() c.Assert(err, IsNil) c.Assert(auth.SecretKey(), Equals, "secret") c.Assert(auth.AccessKey(), Equals, "access") c.Assert(auth.Token(), Equals, "token") }
func (d *SolrDocument) Cache(awsConfig *AWSConfig) { if d.Name == "" { return } documentName := fmt.Sprintf("%v/%v", d.Name, d.Id) auth, _ := aws.EnvAuth() region := aws.Region{Name: awsConfig.RegionName, S3Endpoint: awsConfig.S3Endpoint} svc := s3.New(auth, region) bucketName := awsConfig.BucketName bucket := svc.Bucket(bucketName) err := bucket.Put(documentName, d.content, "text/xml", s3.AuthenticatedRead, s3.Options{}) if err != nil { } }
func (rc *Recommender) DestroyS3Backup() (success bool) { log.Info("Destroying backup on S3:", rc.identifier) auth, err := aws.EnvAuth() if err != nil { log.Error("Problem trying to connect with AWS:", err) return false } s := s3.New(auth, rc.s3Region) bucket := s.Bucket(S3BUCKET) if err := bucket.Del(rc.getS3Path()); err != nil { log.Info("Problem trying to remove backup from S3:", err) return false } return true }
func setUpAuth(c *C) { if !*amazon { c.Skip("Test against amazon not enabled.") } if *local { c.Log("Using local server") dynamodb_region = aws.Region{DynamoDBEndpoint: "http://127.0.0.1:8000"} dynamodb_auth = aws.Auth{AccessKey: "DUMMY_KEY", SecretKey: "DUMMY_SECRET"} } else { c.Log("Using REAL AMAZON SERVER") dynamodb_region = aws.USEast auth, err := aws.EnvAuth() if err != nil { c.Fatal(err) } dynamodb_auth = auth } }
func main() { initFlags() filter := ec2.NewFilter() for _, t := range tags { filter.Add(t.FilterName, t.FilterValue) } auth, err := aws.EnvAuth() if err != nil { log.Fatal(err) } e := ec2.New(auth, region) for { resp, err := e.DescribeInstances(nil, filter) if err != nil { log.Fatal(err) } instances := flattenReservations(resp.Reservations) tagKeys := tags.Keys() if len(tagKeys) == 0 { tagKeys = allTagKeys(instances) } targetGroups := groupByTags(instances, tagKeys) b := marshalTargetGroups(targetGroups) if dest == "-" { _, err = os.Stdout.Write(b) } else { err = atomicWriteFile(dest, b, ".new") } if err != nil { log.Fatal(err) } if sleep == 0 { break } else { time.Sleep(sleep) } } }
func NewS3Connection(bucket string, region string, public bool) *S3 { var acl s3.ACL var auth aws.Auth var err error // set auth auth, err = aws.EnvAuth() if err != nil { auth, err = aws.SharedAuth() if err != nil { log.Fatal(err) } } // set region if region == "" { region = os.Getenv("AWS_REGION") if region == "" { region = "us-east-1" } } awsRegion := aws.Regions[region] acl = s3.ACL("private") if public { acl = s3.ACL("public-read") } // establish connection conn := s3.New(auth, awsRegion) // set bucket bkt := conn.Bucket(bucket) return &S3{ Auth: auth, Region: awsRegion, bucket: bkt, conn: conn, ACL: acl, } }
func (rc *Recommender) SaveBackup() { log.Info("Storing backup on S3:", rc.identifier) rc.mutex.Lock() records := make([][]uint64, len(rc.records)) i := 0 for recID, record := range rc.records { records[i] = make([]uint64, len(record.scores)*2+1) records[i][0] = recID elemPos := 1 for k, v := range record.scores { records[i][elemPos] = k records[i][elemPos+1] = uint64(v) elemPos += 2 } i++ } rc.mutex.Unlock() jsonToUpload, err := json.Marshal(records) auth, err := aws.EnvAuth() if err != nil { log.Error("Problem trying to connect with AWS:", err) return } s := s3.New(auth, rc.s3Region) bucket := s.Bucket(S3BUCKET) err = bucket.Put( rc.getS3Path(), rc.compress(jsonToUpload), "text/plain", s3.BucketOwnerFull, s3.Options{}) if err != nil { log.Error("Problem trying to upload backup to S3 from:", rc.identifier, "Error:", err) } log.Info("New backup stored on S3, bucket:", S3BUCKET, "Path:", rc.getS3Path()) }
func GetModel(prefix string, awsRegion string) (um *Model) { if awsAuth, err := aws.EnvAuth(); err == nil { um = &Model{ prefix: prefix, tableName: fmt.Sprintf("%s_%s", prefix, cTable), secret: []byte(os.Getenv("PIT_SECRET")), cache: make(map[string]*User), conn: &dynamodb.Server{ Auth: awsAuth, Region: aws.Regions[awsRegion], }, } um.initTable() go um.cacheManager() } else { log.Error("Problem trying to connect with DynamoDB, Error:", err) } return }
func setUpAuth(c *C) { if !*amazon && !*local { c.Skip("Neither test against local nor amazon is enabled.") } if *local { c.Log("Using local server") dynamodb_region = aws.Region{ DynamoDBEndpoint: "http://127.0.0.1:8000", DynamoDBStreamsEndpoint: "http://127.0.0.1:8000", } dynamodb_auth = aws.NewAuth("DUMMY_KEY", "DUMMY_SECRET", "", time.Time{}) } else { c.Log("Using REAL AMAZON SERVER") dynamodb_region = aws.USEast auth, err := aws.EnvAuth() if err != nil { c.Fatal(err) } dynamodb_auth = auth } }
func CmdUpload(c *cli.Context) { files, _ := ioutil.ReadDir("./") for _, f := range files { fmt.Println(f.Name()) } // uploadFile("READsME.md") auth, err := aws.EnvAuth() euwest := aws.EUWest fmt.Println(auth) connection := s3.New(auth, euwest) mybucket := connection.Bucket(bucketname) res, err := mybucket.List(foldername, "", "", 1000) if err != nil { log.Fatal(err) } for _, v := range res.Contents { fmt.Println("https://s3-eu-west-1.amazonaws.com/" + bucketname + "/" + v.Key) } }
//Addlisting function adding listings data to db func (r Listing) Add(config *config.Conf) error { auth, err := aws.EnvAuth() if err != nil { log.Fatal(err) } client := s3.New(auth, aws.USWest2) bucket := client.Bucket("yellowpagesng") p := rand.New(rand.NewSource(time.Now().UnixNano())) str := strconv.Itoa(p.Intn(10)) r.Date = time.Now() if r.Image != "" { tm, _ := strconv.Atoi(r.Duration) t := r.Date.AddDate(tm, 1, 0) r.Expiry = t byt, er := base64.StdEncoding.DecodeString(strings.Split(r.Image, "base64,")[1]) if er != nil { log.Println(er) } meta := strings.Split(r.Image, "base64,")[0] newmeta := strings.Replace(strings.Replace(meta, "data:", "", -1), ";", "", -1) imagename := "listings/" + uuid.NewV1().String() err = bucket.Put(imagename, byt, newmeta, s3.PublicReadWrite, s3.Options{}) if err != nil { log.Println(err) } log.Println(bucket.URL(imagename)) r.Image = bucket.URL(imagename) var images []string for _, v := range r.Images { var byt []byte byt, err = base64.StdEncoding.DecodeString(strings.Split(v, "base64,")[1]) if err != nil { log.Println(err) } meta := strings.Split(v, "base64,")[0] newmeta := strings.Replace(strings.Replace(meta, "data:", "", -1), ";", "", -1) imagename = "listings/" + uuid.NewV1().String() err = bucket.Put(imagename, byt, newmeta, s3.PublicReadWrite, s3.Options{}) if err != nil { log.Println(err) } images = append(images, bucket.URL(imagename)) } r.Images = images } else { r.Plus = "false" } r.Slug = strings.Replace(r.CompanyName, " ", "-", -1) + str r.Slug = strings.Replace(r.Slug, "&", "-", -1) + str r.Slug = strings.Replace(r.Slug, "/", "-", -1) + str r.Slug = strings.Replace(r.Slug, ",", "-", -1) + str index := mgo.Index{ Key: []string{"$text:specialisation", "$text:companyname"}, Unique: true, DropDups: true, Background: true, } collection := config.Database.C("Listings").With(config.Database.Session.Copy()) collection.EnsureIndex(index) collection.Insert(r) return err }
func main() { flag.StringVar(&bucketName, "bucket", "", "S3 Bucket Name (required)") flag.StringVar(&baseDir, "dir", "", "Local directory (required)") flag.BoolVar(&verbose, "verbose", false, "Print extra log messages") flag.BoolVar(&showHelp, "help", false, "Show this help") flag.BoolVar(&recursive, "recursive", false, "recurse into sub-directories") flag.BoolVar(&includeUnknownMimeTypes, "include-unknown-mime-types", false, "upload files with unknown mime types") flag.StringVar(&ignore, "ignore", "", "Comma-separated list of files/directories to ignore") flag.StringVar(&s3BasePrefix, "s3-prefix", "", "Prefix for s3 objects") flag.DurationVar(&timeout, "timeout", 0, "Max time to run in seconds, 0=indefinite") flag.Parse() if showHelp { fmt.Fprintf(os.Stderr, "usage: %s [ options ]\noptions:\n", programName) flag.PrintDefaults() return } if bucketName == "" { log.Fatalf("Must specify bucket: use '%s -help' for usage", programName) } if baseDir == "" { log.Fatalf("Must specify directory: use '%s -help' for usage", programName) } if timeout == time.Duration(0) { timeout = veryLongTime } stopTime = time.Now().Add(timeout) for _, name := range strings.Split(ignore, ",") { ignoreNames[name] = name } if s3BasePrefix != "" { if !strings.HasSuffix(s3BasePrefix, "/") { s3BasePrefix += "/" } if verbose { log.Printf("s3-prefix = '%s'", s3BasePrefix) log.Printf("dir = %s", baseDir) } } auth, err := aws.EnvAuth() if err != nil { log.Fatal(err) } s3Config := s3.New(auth, aws.APSoutheast2) bucket := &s3.Bucket{S3: s3Config, Name: bucketName} if verbose { log.Println("Listing objects in bucket") } marker := "" for { listResp, err := bucket.List(s3BasePrefix, "", marker, 1000) if err != nil { log.Fatal(err) } for _, key := range listResp.Contents { s3Objects[key.Key] = key.ETag marker = key.Key } if verbose { log.Printf("%d objects loaded", len(s3Objects)) } if !listResp.IsTruncated { break } if time.Now().After(stopTime) { log.Fatal("Timeout limit reached") } } processDir(baseDir, s3BasePrefix, bucket) }
func TestObjectCycle(t *testing.T) { Reset(t) objectPath := "/test1" objectContents := []byte("test1") updatedObjectContents := []byte("Updatedtest") auth, err := aws.EnvAuth() if err != nil { t.Error(err) } s := s3.New(auth, localRegion) // Create Bucket b := s.Bucket("TestBucket") err = b.PutBucket("acl") if err != nil { log.Fatalf("Couldn't create bucket: %v", err) } // Put Object err = b.Put(objectPath, objectContents, "application/octet-stream", "acl", s3.Options{}) if err != nil { t.Fatal(err) } // Get the same object data, err := b.Get(objectPath) if err != nil { t.Fatal(err) } if !bytes.Equal(data, objectContents) { t.Errorf("Expected content %v, got content: %v", objectContents, data) } // Check if there is exactly 1 object in the bucket now o, err := b.GetBucketContents() if err != nil { t.Fatalf("Couldn't get bucket contents: %v", err) } if len(*o) != 1 { t.Fatalf("Bucket sould contain 1 object, contains %d objects", len(*o)) } err = b.Put(objectPath, updatedObjectContents, "application/octet-stream", "acl", s3.Options{}) if err != nil { t.Fatalf("Error updateing Object: %v", err) } // Check that the object has been modified data, err = b.Get(objectPath) if err != nil { t.Fatalf("Error getting updated object: %v", err) } if !bytes.Equal(data, updatedObjectContents) { t.Fatalf("Wrong Bucket contents, expected %v, got %v", data, updatedObjectContents) } if err != nil { t.Fatalf("Couldn't get bucket contents: %v", err) } if len(*o) != 1 { t.Fatalf("Bucket sould contain 1 object after update, contains %d objects", len(*o)) } err = b.Del(objectPath) if err != nil { t.Fatalf("Error deleting object: %v", err) } // Check that the bucket is now empty o, err = b.GetBucketContents() if err != nil { t.Fatalf("Couldn't get bucket contents: %v", err) } if len(*o) != 0 { t.Fatalf("Bucket should be empty, but contains %d objects", len(*o)) } }
func (s *S) TestEnvAuthNoAccess(c *C) { os.Clearenv() os.Setenv("AWS_SECRET_ACCESS_KEY", "foo") _, err := aws.EnvAuth() c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") }
func (s *S) TestEnvAuthNoSecret(c *C) { os.Clearenv() _, err := aws.EnvAuth() c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") }
func TestAutoScalingGroup(t *testing.T) { var as *AutoScaling // Launch configuration test config lc := new(LaunchConfiguration) lc.LaunchConfigurationName = "LConf1" lc.ImageId = "ami-03e47533" // Octave debian ami lc.KernelId = "aki-98e26fa8" lc.KeyName = "testAWS" // Replace with valid key for your account lc.InstanceType = "m1.small" // CreateAutoScalingGroup params test config asgReq := new(CreateAutoScalingGroupParams) asgReq.AutoScalingGroupName = "ASGTest1" asgReq.LaunchConfigurationName = lc.LaunchConfigurationName asgReq.DefaultCooldown = 300 asgReq.HealthCheckGracePeriod = 300 asgReq.DesiredCapacity = 1 asgReq.MinSize = 1 asgReq.MaxSize = 5 asgReq.AvailabilityZones = []string{"us-west-2a"} asg := new(AutoScalingGroup) asg.AutoScalingGroupName = "ASGTest1" asg.LaunchConfigurationName = lc.LaunchConfigurationName asg.DefaultCooldown = 300 asg.HealthCheckGracePeriod = 300 asg.DesiredCapacity = 1 asg.MinSize = 1 asg.MaxSize = 5 asg.AvailabilityZones = []string{"us-west-2a"} awsAuth, err := aws.EnvAuth() if err != nil { mockTest = true t.Log("Running mock tests as AWS environment variables are not set") awsAuth := aws.Auth{AccessKey: "abc", SecretKey: "123"} as = New(awsAuth, aws.Region{AutoScalingEndpoint: testServer.URL}) } else { as = New(awsAuth, aws.USWest2) } // Create the launch configuration if mockTest { testServer.Response(200, nil, CreateLaunchConfigurationResponse) } _, err = as.CreateLaunchConfiguration(lc) if err != nil { t.Fatal(err) } // Check that we can get the launch configuration details if mockTest { testServer.Response(200, nil, DescribeLaunchConfigurationsResponse) } _, err = as.DescribeLaunchConfigurations([]string{lc.LaunchConfigurationName}, 10, "") if err != nil { t.Fatal(err) } // Create the AutoScalingGroup if mockTest { testServer.Response(200, nil, CreateAutoScalingGroupResponse) } _, err = as.CreateAutoScalingGroup(asgReq) if err != nil { t.Fatal(err) } // Check that we can get the autoscaling group details if mockTest { testServer.Response(200, nil, DescribeAutoScalingGroupsResponse) } _, err = as.DescribeAutoScalingGroups(nil, 10, "") if err != nil { t.Fatal(err) } // Suspend the scaling processes for the test AutoScalingGroup if mockTest { testServer.Response(200, nil, SuspendProcessesResponse) } _, err = as.SuspendProcesses(asg.AutoScalingGroupName, nil) if err != nil { t.Fatal(err) } // Resume scaling processes for the test AutoScalingGroup if mockTest { testServer.Response(200, nil, ResumeProcessesResponse) } _, err = as.ResumeProcesses(asg.AutoScalingGroupName, nil) if err != nil { t.Fatal(err) } // Change the desired capacity from 1 to 2. This will launch a second instance if mockTest { testServer.Response(200, nil, SetDesiredCapacityResponse) } _, err = as.SetDesiredCapacity(asg.AutoScalingGroupName, 2, false) if err != nil { t.Fatal(err) } // Change the desired capacity from 2 to 1. This will terminate one of the instances if mockTest { testServer.Response(200, nil, SetDesiredCapacityResponse) } _, err = as.SetDesiredCapacity(asg.AutoScalingGroupName, 1, false) if err != nil { t.Fatal(err) } // Update the max capacity for the scaling group if mockTest { testServer.Response(200, nil, UpdateAutoScalingGroupResponse) } asg.MinSize = 1 asg.MaxSize = 6 asg.DesiredCapacity = 1 _, err = as.UpdateAutoScalingGroup(asg) if err != nil { t.Fatal(err) } // Add a scheduled action to the group psar := new(PutScheduledUpdateGroupActionParams) psar.AutoScalingGroupName = asg.AutoScalingGroupName psar.MaxSize = 4 psar.ScheduledActionName = "SATest1" psar.Recurrence = "30 0 1 1,6,12 *" if mockTest { testServer.Response(200, nil, PutScheduledUpdateGroupActionResponse) } _, err = as.PutScheduledUpdateGroupAction(psar) if err != nil { t.Fatal(err) } // List the scheduled actions for the group sar := new(DescribeScheduledActionsParams) sar.AutoScalingGroupName = asg.AutoScalingGroupName if mockTest { testServer.Response(200, nil, DescribeScheduledActionsResponse) } _, err = as.DescribeScheduledActions(sar) if err != nil { t.Fatal(err) } // Delete the test scheduled action from the group if mockTest { testServer.Response(200, nil, DeleteScheduledActionResponse) } _, err = as.DeleteScheduledAction(asg.AutoScalingGroupName, psar.ScheduledActionName) if err != nil { t.Fatal(err) } testServer.Flush() }