func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, }, raws...) if err != nil { return err } errs := &packer.MultiError{} errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...) // required configuration templates := map[string]*string{ "region": &p.config.Region, "bucket": &p.config.Bucket, "manifest": &p.config.ManifestPath, "box_name": &p.config.BoxName, "box_dir": &p.config.BoxDir, "version": &p.config.Version, } // Template process for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( errs, fmt.Errorf("%s must be set", key)) } *ptr, err = interpolate.Render(*ptr, &p.config.ctx) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", key, err)) } } // setup the s3 bucket auth, err := aws.GetAuth(p.config.AccessConfig.AccessKey, p.config.AccessConfig.SecretKey) if err != nil { errs = packer.MultiErrorAppend(errs, err) } // determine region region, valid := aws.Regions[p.config.Region] if valid { p.s3 = s3.New(auth, region).Bucket(p.config.Bucket) } else { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Invalid region specified: %s", p.config.Region)) } if len(errs.Errors) > 0 { return errs } return nil }
// NewPublishedStorage creates new instance of PublishedStorage with specified S3 access // keys, region and bucket name func NewPublishedStorage(accessKey, secretKey, region, endpoint, bucket, defaultACL, prefix, storageClass, encryptionMethod string, plusWorkaround, disableMultiDel bool) (*PublishedStorage, error) { auth, err := aws.GetAuth(accessKey, secretKey) if err != nil { return nil, err } var awsRegion aws.Region if endpoint == "" { var ok bool awsRegion, ok = aws.Regions[region] if !ok { return nil, fmt.Errorf("unknown region: %#v", region) } } else { awsRegion = aws.Region{ Name: region, S3Endpoint: endpoint, S3LocationConstraint: true, S3LowercaseBucket: true, } } return NewPublishedStorageRaw(auth, awsRegion, bucket, defaultACL, prefix, storageClass, encryptionMethod, plusWorkaround, disableMultiDel) }
func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{"output"}, }, }, raws...) if err != nil { return err } errs := new(packer.MultiError) // required configuration templates := map[string]*string{ "region": &p.config.Region, "bucket": &p.config.Bucket, "manifest": &p.config.ManifestPath, "box_name": &p.config.BoxName, "box_dir": &p.config.BoxDir, "version": &p.config.Version, } for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("vagrant-s3 %s must be set", key)) } } // Template process for key, ptr := range templates { if err = interpolate.Validate(*ptr, &p.config.ctx); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s template: %s", key, err)) } } auth, err := aws.GetAuth(p.config.AccessKey, p.config.SecretKey) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Unable to create Aws Authentication. Try providing keys 'access_key_id' and 'secret_key'")) } // determine region region, valid := aws.Regions[p.config.Region] if valid { p.s3 = s3.New(auth, region).Bucket(p.config.Bucket) } else { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Invalid region specified: %s", p.config.Region)) } if p.config.ACL == "" { p.config.ACL = "public-read" } if len(errs.Errors) > 0 { return errs } return nil }
func buildRemoteStore(s *stores) (err error) { if remoteStore == "" { return nil } var c store.Client if strings.HasPrefix(remoteStore, "s3://") { path := strings.TrimPrefix(remoteStore, "s3://") bucketPathSplit := strings.Split(path, "/") if len(bucketPathSplit) == 0 { return fmt.Errorf("invalid S3 path: %#v\n", remoteStore) } bucket := bucketPathSplit[0] var auth aws.Auth auth, err = aws.GetAuth("", "") // Extract credentials from the current instance. if err != nil { return fmt.Errorf("error getting AWS credentials: %v", err) } c = store.NewS3Client(bucket, auth, aws.APSoutheast2) } else { c = store.NewClient(remoteStore, "") s.artwork = store.NewRemoteFileSystem(store.NewClient(remoteStore, "artwork")) } s.media = store.NewRemoteChunkedFileSystem(c, 32*1024) if s.artwork == nil { s.artwork = store.Trace(store.ArtworkFileSystem(s.media), "artwork") } return nil }
func main() { flag.Parse() // Instantiates the CoreRoller updater to check periodically for version update. if updater, err := updater.New(30*time.Second, syscall.SIGTERM); err == nil { go updater.Start() } awsAuth, err := aws.GetAuth(config.awsAccessKey, config.awsSecretKey) if err != nil { log.Println(err) } manager := &Manager{ configPath: config.etcdPath, etcdClient: etcd.NewClient(strings.Split(config.etcdHost, ",")), awsAuth: awsAuth, } log.Println("Running load balancers manager...") go manager.Start() // Wait for signal to terminate signalsCh := make(chan os.Signal, 1) signal.Notify(signalsCh, os.Interrupt, syscall.SIGTERM) <-signalsCh }
func (this *Manager) init(accessKey string, secretKey string) error { auth, err := aws.GetAuth(accessKey, secretKey) if err == nil { this.auth = &auth } return err }
func (gozo Gozo) SendImage(filename string) (url string, err error) { data, err := ioutil.ReadFile(filename) if err != nil { return } auth, err := aws.GetAuth( gozo.accessKey, gozo.secretAccessKey, ) if err != nil { return } s3client := s3.New(auth, gozo.region) bucket := s3client.Bucket(gozo.bucketName) path := "images/" + hexdigest(fmt.Sprintf("%s-%d", filename, time.Now().Unix())) + ".png" err = bucket.Put(path, data, "image/png", s3.PublicRead) if err != nil { return } url = gozo.rootURL + path return }
func (s *S) TestGetAuthEnv(c *C) { os.Clearenv() os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") os.Setenv("AWS_ACCESS_KEY_ID", "access") auth, err := aws.GetAuth("", "") c.Assert(err, IsNil) c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) }
func (cache *EC2Cache) Instances() (*ec2.InstancesResp, error) { auth, err := aws.GetAuth(cache.accessKey, cache.secretKey) if err != nil { return nil, err } return ec2.New(auth, cache.region).Instances(nil, nil) }
func (s3p *s3Provider) getAuth(accessKey, secretKey string) (aws.Auth, error) { if s3p.overrideAuth != nilAuth { s3p.log.WithField("auth", s3p.overrideAuth).Debug("using override auth") return s3p.overrideAuth, nil } s3p.log.Debug("creating new auth") return aws.GetAuth(accessKey, secretKey) }
func NewS3(key, secret, bucket, prefix string) S3 { auth, _ := aws.GetAuth(key, secret) region := aws.USEast // haha for life. TODO - configurable? client := s3.New(auth, region) return &s3client{ client: client, bucket: client.Bucket(bucket), prefix: prefix, } }
func (s *S3) Open() (err error) { auth, err := aws.GetAuth(s.AccessKey, s.SecretKey) if err != nil { return } s.conn = s3.New(auth, aws.USEast) // TODO: hardcoded region..? s.bucket = s.conn.Bucket(s.BucketId) return nil // TODO: no errors ever..? }
// AWSAuth returns a valid aws.Auth object for access to AWS services, or // an error if the authentication couldn't be resolved. // // TODO(mitchellh): Test in some way. func (c *Config) AWSAuth() (aws.Auth, error) { auth, err := aws.GetAuth(c.AccessKey, c.SecretKey) if err == nil { // Store the accesskey and secret that we got... c.AccessKey = auth.AccessKey c.SecretKey = auth.SecretKey } return auth, err }
func (s *S3) getBucket() *s3.Bucket { auth, err := aws.GetAuth(s.AccessKeyId, s.SecretKey) if err != nil { log.Printf("Amazon authentication failed", err) return nil } service := s3.New(auth, aws.EUWest) return service.Bucket(s.Bucket) }
// NewDriver creates a driver for S3 paths func NewDriver(accessKey, secretKey string, region aws.Region) (*Driver, error) { // Authenticate -- will fall back to ~/.aws then to environment variables auth, err := aws.GetAuth(accessKey, secretKey) if err != nil { return nil, err } return &Driver{ Region: region, Auth: auth, }, nil }
// NewPublishedStorage creates new instance of PublishedStorage with specified S3 access // keys, region and bucket name func NewPublishedStorage(accessKey, secretKey, region, bucket, defaultACL, prefix string) (*PublishedStorage, error) { auth, err := aws.GetAuth(accessKey, secretKey) if err != nil { return nil, err } awsRegion, ok := aws.Regions[region] if !ok { return nil, fmt.Errorf("unknown region: %#v", region) } return NewPublishedStorageRaw(auth, awsRegion, bucket, defaultACL, prefix) }
// Auth returns a valid aws.Auth object for access to AWS services, or // an error if the authentication couldn't be resolved. func (c *AccessConfig) Auth() (aws.Auth, error) { auth, err := aws.GetAuth(c.AccessKey, c.SecretKey) if err == nil { // Store the accesskey and secret that we got... c.AccessKey = auth.AccessKey c.SecretKey = auth.SecretKey c.Token = auth.Token } if c.Token != "" { auth.Token = c.Token } return auth, err }
func main() { app := cli.NewApp() app.Name = "gosync" app.Usage = "gosync OPTIONS SOURCE TARGET" app.Version = version.Version() app.Flags = []cli.Flag{ cli.IntFlag{"concurrent, c", 20, "number of concurrent transfers", ""}, cli.StringFlag{"log-level, l", "info", "log level", ""}, cli.StringFlag{"aws-secret-access-key", "", "AWS Secret Access Key", ""}, cli.StringFlag{"aws-access-key-id", "", "AWS Access Key Id", ""}, cli.StringFlag{"aws-security-token", "", "AWS Security Token", ""}, } const concurrent = 20 app.Action = func(c *cli.Context) { defer log.Flush() setLogLevel(c.String("log-level")) err := validateArgs(c) exitOnError(err) key := c.String("aws-access-key-id") secret := c.String("aws-secret-access-key") token := c.String("aws-security-token") auth, err := aws.GetAuth(key, secret) exitOnError(err) if token != "" { auth.Token = token } source := c.Args()[0] log.Infof("Setting source to '%s'.", source) target := c.Args()[1] log.Infof("Setting target to '%s'.", target) syncPair := gosync.NewSyncPair(auth, source, target) syncPair.Concurrent = c.Int("concurrent") log.Infof("Setting concurrent transfers to '%d'.", syncPair.Concurrent) err = syncPair.Sync() exitOnError(err) log.Infof("Syncing completed successfully.") } app.Run(os.Args) }
func (self *FSS3) Initialize() error { auth, err := aws.GetAuth(self.AccessKey, self.SecretKey) if err != nil { return err } self.auth = &auth conn := s3.New(*self.auth, self.RegionObj) self.conn = conn if self.conn == nil { return errors.New("Unable to initialize s3 driver") } bucket := self.conn.Bucket(self.BucketName) self.bucket = bucket return nil }
func (s *s3Store) Open() (err error) { if s.opened { return } auth, err := aws.GetAuth(s.AccessKey, s.SecretKey) if err != nil { return } s.conn = s3.New(auth, aws.USEast) // TODO: hardcoded region..? s.bucket = s.conn.Bucket(s.BucketId) s.opened = true return }
func main() { // auth with aws auth, err := aws.GetAuth(s3AccessKey, s3SecretKey) if err != nil { logrus.Fatalf("Could not auth to AWS: %v", err) } // create the client region, err := getRegion(s3Region) if err != nil { logrus.Fatal(err) } client := s3.New(auth, region) // get the files in the bucket bucket, prefix := cleanBucketName(s3Bucket) // get the bucket b := client.Bucket(bucket) files, err := listFiles(prefix, prefix, "", 2000, b) if err != nil { logrus.Fatalf("Listing all files in bucket failed: %v", err) } // create mux server mux := http.NewServeMux() // static files handler staticHandler := http.StripPrefix("/static/", http.FileServer(http.Dir("/src/static"))) mux.Handle("/static/", staticHandler) // template handler h := Handler{ Files: files, } mux.HandleFunc("/", h.serveTemplate) // set up the server server := &http.Server{ Addr: ":" + port, Handler: mux, } logrus.Infof("Starting server on port %q", port) if certFile != "" && keyFile != "" { logrus.Fatal(server.ListenAndServeTLS(certFile, keyFile)) } else { logrus.Fatal(server.ListenAndServe()) } }
func (s *PublishedStorageSuite) SetUpTest(c *C) { var err error s.srv, err = s3test.NewServer(&s3test.Config{}) c.Assert(err, IsNil) c.Assert(s.srv, NotNil) auth, _ := aws.GetAuth("aa", "bb") s.storage, err = NewPublishedStorageRaw(auth, aws.Region{Name: "test-1", S3Endpoint: s.srv.URL(), S3LocationConstraint: true}, "test", "", "", "", "", false, true) c.Assert(err, IsNil) s.prefixedStorage, err = NewPublishedStorageRaw(auth, aws.Region{Name: "test-1", S3Endpoint: s.srv.URL(), S3LocationConstraint: true}, "test", "", "lala", "", "", false, true) c.Assert(err, IsNil) err = s.storage.s3.Bucket("test").PutBucket("private") c.Assert(err, IsNil) }
func main() { flag.Parse() awsAuth, err := aws.GetAuth(config.awsAccessKey, config.awsSecretKey) if err != nil { log.Println(err) } manager := &Manager{ configPath: config.etcdPath, etcdClient: etcd.NewClient([]string{config.etcdHost}), awsAuth: awsAuth, } log.Println("Running load balancers manager...") manager.Start() }
func (this *Factory) NewS3ImageStore(conf map[string]string) ImageStore { bucket := conf["BucketName"] auth, err := aws.GetAuth(conf["AWSKey"], conf["AWSSecret"]) if err != nil { log.Fatal(err) } client := s3.New(auth, aws.Regions[conf["Region"]]) mapper := NewNamePathMapper(conf["NamePathRegex"], conf["NamePathMap"]) return NewS3ImageStore( bucket, conf["StoreRoot"], client, mapper, ) }
// NewDNSProviderRoute53 returns a DNSProviderRoute53 instance with a configured route53 client. // Authentication is either done using the passed credentials or - when empty - falling back to // the customary AWS credential mechanisms, including the file refernced by $AWS_CREDENTIAL_FILE // (defaulting to $HOME/.aws/credentials) optionally scoped to $AWS_PROFILE, credentials // supplied by the environment variables AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY [ + AWS_SECURITY_TOKEN ], // and finally credentials available via the EC2 instance metadata service. func NewDNSProviderRoute53(awsAccessKey, awsSecretKey, awsRegionName string) (*DNSProviderRoute53, error) { region, ok := aws.Regions[awsRegionName] if !ok { return nil, fmt.Errorf("Invalid AWS region name %s", awsRegionName) } // use aws.GetAuth, which tries really hard to find credentails: // - uses awsAccessKey and awsSecretKey, if provided // - uses AWS_PROFILE / AWS_CREDENTIAL_FILE, if provided // - uses AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY and optionally AWS_SECURITY_TOKEN, if provided // - uses EC2 instance metadata credentials (http://169.254.169.254/latest/meta-data/…), if available // ...and otherwise returns an error if auth, err := aws.GetAuth(awsAccessKey, awsSecretKey); err != nil { return nil, err } else { client := route53.New(auth, region) return &DNSProviderRoute53{client: client}, nil } }
func (so *S3Output) Init(config interface{}) (err error) { so.config = config.(*S3OutputConfig) auth, err := aws.GetAuth(so.config.AccessKey, so.config.SecretKey) if err != nil { return } region, ok := aws.Regions[so.config.Region] if !ok { err = errors.New("Region of that name not found.") return } so.client = s3.New(auth, region) so.bucket = so.client.Bucket(so.config.Bucket) prefixList := strings.Split(so.config.Prefix, "/") bufferFileName := so.config.Bucket + strings.Join(prefixList, "_") so.bufferFilePath = so.config.BufferPath + "/" + bufferFileName return }
func printList(cfg *configuration) { auth, err := aws.GetAuth(cfg.AccessKey, cfg.SecretKey) if err != nil { die("Error creating AWS auth:\n%s\n", err) } e := ec2.New(auth, aws.EUWest) instances, err := e.Instances([]string{}, nil) if err != nil { die("Error fetching EC2 instances:\n%s\n", err) } inv, err := newInventory(instances) if err != nil { die("Error creating inventory from EC2 instances:\n%s\n", err) } invJSON, err := inv.toJSON() if err != nil { die("Error generatin inventory JSON:\n%s\n", err) } os.Stdout.Write(invJSON) }
// NewS3Store initializes an *S3Store. Wow! func NewS3Store(key, secret, bucket, regionName string, log *logrus.Logger, md metadata.LookupSaver) (*S3Store, error) { log.Debug("getting aws auth") auth, err := aws.GetAuth(key, secret) if err != nil { log.WithField("err", err).Error("failed to get auth") return nil, err } region, ok := aws.Regions[regionName] if !ok { log.WithFields(logrus.Fields{ "region": regionName, }).Warn(fmt.Sprintf("nonexistent region, falling back to %s", aws.USEast.Name)) region = aws.USEast } log.Debug("getting new s3 connection") s3Conn := s3.New(auth, region) b := s3Conn.Bucket(bucket) if b == nil || b.Name == "" { return nil, errNoBucket } log.WithFields(logrus.Fields{ "bucket": b.Name, }).Debug("got back this bucket") return &S3Store{ key: key, secret: secret, bucket: bucket, log: log, md: md, b: b, }, nil }
func (s *S3Config) Upload(fileDir string, progressFunc reader.ProgressReaderCallbackFunc) error { if len(s.Acl) == 0 { s.Acl = string(s3.PublicReadWrite) } auth, err := aws.GetAuth(s.AccessKey, s.SecretKey) s3client := s3.New(auth, aws.Region{Name: "us-east-1", S3Endpoint: s.S3Endpoint}) filename := filepath.Join(fileDir, s.File) fmt.Println("start s3upload:", filename) b, err := s.Bucket(s3client) if err != nil { return err } f, err := os.Stat(filename) if err != nil { return err } file, err := os.Open(filename) if err != nil { return err } defer file.Close() progressR := &reader.Reader{ Reader: file, Size: f.Size(), DrawFunc: progressFunc, } err = b.PutReader(s.File, progressR, f.Size(), "application/octet-stream", s3.ACL(s.Acl)) //err = b.Put("zoujtw2015-12-16.mkv", file, "content-type", s3.PublicReadWrite) if err != nil { return err } fmt.Println("s3 upload file succeed!!!", file.Name()) return nil }
func newProvider(provider, bucket, s3Region, s3AccessKey, s3SecretKey string) (cloud, error) { if provider == "s3" { // auth with aws auth, err := aws.GetAuth(s3AccessKey, s3SecretKey) if err != nil { return nil, err } // create the client region, err := getRegion(s3Region) if err != nil { return nil, err } p := s3Provider{bucket: bucket} p.client = s3.New(auth, region) bucket, p.prefix = cleanBucketName(p.bucket) p.b = p.client.Bucket(bucket) p.baseURL = p.bucket + ".s3.amazonaws.com" return &p, nil } p := gcsProvider{bucket: bucket} p.ctx = context.Background() client, err := storage.NewClient(p.ctx) if err != nil { return nil, err } p.client = client p.bucket, p.prefix = cleanBucketName(p.bucket) p.b = client.Bucket(p.bucket) p.baseURL = p.bucket if !strings.Contains(p.bucket, "j3ss.co") { p.baseURL += ".storage.googleapis.com" } return &p, nil }