func NewS3Session(auth *aws.Auth, region aws.Region) *s3.S3 { var s3Session *s3.S3 cert := x509.Certificate{} // go's systemVerify panics with no verify options set // TODO: EVG-483 if runtime.GOOS == "windows" { s3Session = s3.New(*auth, region) s3Session.ReadTimeout = S3ReadTimeout s3Session.WriteTimeout = S3WriteTimeout s3Session.ConnectTimeout = S3ConnectTimeout return s3Session } // no verify options so system root ca will be used _, err := cert.Verify(x509.VerifyOptions{}) rootsError := x509.SystemRootsError{} if err != nil && err.Error() == rootsError.Error() { // create a Transport which includes our TLSConfig with InsecureSkipVerify // and client timeouts. tlsConfig := tls.Config{InsecureSkipVerify: true} tr := http.Transport{ TLSClientConfig: &tlsConfig} // add the Transport to our http client client := &http.Client{Transport: &tr} s3Session = s3.New(*auth, region, client) } else { s3Session = s3.New(*auth, region) } s3Session.ReadTimeout = S3ReadTimeout s3Session.WriteTimeout = S3WriteTimeout s3Session.ConnectTimeout = S3ConnectTimeout return s3Session }
func NewS3Session(auth *aws.Auth, region aws.Region) *s3.S3 { if runtime.GOOS == "darwin" { // create a Transport which includes our TLS config tlsConfig := tls.Config{InsecureSkipVerify: true} tr := http.Transport{TLSClientConfig: &tlsConfig} // add the Transport to our http client client := &http.Client{Transport: &tr} return s3.New(*auth, region, client) } return s3.New(*auth, region) }
func ReadFile(path string) ([]byte, *model.AppError) { if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 { var auth aws.Auth auth.AccessKey = utils.Cfg.FileSettings.AmazonS3AccessKeyId auth.SecretKey = utils.Cfg.FileSettings.AmazonS3SecretAccessKey s := s3.New(auth, awsRegion()) bucket := s.Bucket(utils.Cfg.FileSettings.AmazonS3Bucket) // try to get the file from S3 with some basic retry logic tries := 0 for { tries++ f, err := bucket.Get(path) if f != nil { return f, nil } else if tries >= 3 { return nil, model.NewLocAppError("ReadFile", "api.file.read_file.get.app_error", nil, "path="+path+", err="+err.Error()) } time.Sleep(3000 * time.Millisecond) } } else if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL { if f, err := ioutil.ReadFile(utils.Cfg.FileSettings.Directory + path); err != nil { return nil, model.NewLocAppError("ReadFile", "api.file.read_file.reading_local.app_error", nil, err.Error()) } else { return f, nil } } else { return nil, model.NewLocAppError("ReadFile", "api.file.read_file.configured.app_error", nil, "") } }
func uploadImageToS3(path string, fileName string) error { fmt.Printf("Filename: %s\n", fileName) auth := aws.Auth{ AccessKey: os.Getenv("ACCESS_KEY"), SecretKey: os.Getenv("SECRET_KEY"), } var region = aws.USEast client := s3.New(auth, region) data, err := ioutil.ReadFile(fileName) if err != nil { panic("error reading file! " + fileName) } bucket := client.Bucket("mesos-hackathon-bucket") options := s3.Options{} fmt.Printf("Path: %s\n", path) err = bucket.Put(path, data, "binary/octet-stream", s3.PublicRead, options) if err != nil { return err } return nil }
func writeFile(f []byte, path string) *model.AppError { if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 { var auth aws.Auth auth.AccessKey = utils.Cfg.FileSettings.AmazonS3AccessKeyId auth.SecretKey = utils.Cfg.FileSettings.AmazonS3SecretAccessKey s := s3.New(auth, awsRegion()) bucket := s.Bucket(utils.Cfg.FileSettings.AmazonS3Bucket) ext := filepath.Ext(path) var err error if model.IsFileExtImage(ext) { options := s3.Options{} err = bucket.Put(path, f, model.GetImageMimeType(ext), s3.Private, options) } else { options := s3.Options{} err = bucket.Put(path, f, "binary/octet-stream", s3.Private, options) } if err != nil { return model.NewAppError("writeFile", "Encountered an error writing to S3", err.Error()) } } else if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL { if err := writeFileLocally(f, utils.Cfg.FileSettings.Directory+path); err != nil { return err } } else { return model.NewAppError("writeFile", "File storage not configured properly. Please configure for either S3 or local server file storage.", "") } return nil }
func TestPutGet(t *testing.T) { Reset(t) auth, err := aws.EnvAuth() if err != nil { t.Error(err) } s := s3.New(auth, localRegion) b := s.Bucket("TestBucket") err = b.PutBucket("acl") if err != nil { t.Fatal(err) } o, err := b.GetBucketContents() if err != nil { t.Fatal(err) } if len(*o) != 0 { t.Fatalf("Bucket should be empty, but has %d object", len(*o)) } }
func moveFile(oldPath, newPath string) *model.AppError { if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 { fileData := make(chan []byte) getFileAndForget(oldPath, fileData) fileBytes := <-fileData if fileBytes == nil { return model.NewLocAppError("moveFile", "api.file.move_file.get_from_s3.app_error", nil, "") } var auth aws.Auth auth.AccessKey = utils.Cfg.FileSettings.AmazonS3AccessKeyId auth.SecretKey = utils.Cfg.FileSettings.AmazonS3SecretAccessKey s := s3.New(auth, awsRegion()) bucket := s.Bucket(utils.Cfg.FileSettings.AmazonS3Bucket) if err := bucket.Del(oldPath); err != nil { return model.NewLocAppError("moveFile", "api.file.move_file.delete_from_s3.app_error", nil, err.Error()) } if err := writeFile(fileBytes, newPath); err != nil { return err } } else if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL { if err := os.Rename(utils.Cfg.FileSettings.Directory+oldPath, utils.Cfg.FileSettings.Directory+newPath); err != nil { return model.NewLocAppError("moveFile", "api.file.move_file.rename.app_error", nil, err.Error()) } } else { return model.NewLocAppError("moveFile", "api.file.move_file.configured.app_error", nil, "") } return nil }
func readFile(path string) ([]byte, *model.AppError) { if utils.IsS3Configured() && !utils.Cfg.ServiceSettings.UseLocalStorage { var auth aws.Auth auth.AccessKey = utils.Cfg.AWSSettings.S3AccessKeyId auth.SecretKey = utils.Cfg.AWSSettings.S3SecretAccessKey s := s3.New(auth, aws.Regions[utils.Cfg.AWSSettings.S3Region]) bucket := s.Bucket(utils.Cfg.AWSSettings.S3Bucket) // try to get the file from S3 with some basic retry logic tries := 0 for { tries++ f, err := bucket.Get(path) if f != nil { return f, nil } else if tries >= 3 { return nil, model.NewAppError("readFile", "Unable to get file from S3", "path="+path+", err="+err.Error()) } time.Sleep(3000 * time.Millisecond) } } else if utils.Cfg.ServiceSettings.UseLocalStorage && len(utils.Cfg.ServiceSettings.StorageDirectory) > 0 { if f, err := ioutil.ReadFile(utils.Cfg.ServiceSettings.StorageDirectory + path); err != nil { return nil, model.NewAppError("readFile", "Encountered an error reading from local server storage", err.Error()) } else { return f, nil } } else { return nil, model.NewAppError("readFile", "File storage not configured properly. Please configure for either S3 or local server file storage.", "") } }
func WriteFile(f []byte, path string) *model.AppError { if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 { var auth aws.Auth auth.AccessKey = utils.Cfg.FileSettings.AmazonS3AccessKeyId auth.SecretKey = utils.Cfg.FileSettings.AmazonS3SecretAccessKey s := s3.New(auth, awsRegion()) bucket := s.Bucket(utils.Cfg.FileSettings.AmazonS3Bucket) ext := filepath.Ext(path) var err error if model.IsFileExtImage(ext) { options := s3.Options{} err = bucket.Put(path, f, model.GetImageMimeType(ext), s3.Private, options) } else { options := s3.Options{} err = bucket.Put(path, f, "binary/octet-stream", s3.Private, options) } if err != nil { return model.NewLocAppError("WriteFile", "api.file.write_file.s3.app_error", nil, err.Error()) } } else if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL { if err := WriteFileLocally(f, utils.Cfg.FileSettings.Directory+path); err != nil { return err } } else { return model.NewLocAppError("WriteFile", "api.file.write_file.configured.app_error", nil, "") } return nil }
func (rc *Recommender) LoadBackup() (success bool) { log.Info("Loading backup from S3:", rc.identifier) auth, err := aws.EnvAuth() if err != nil { log.Error("Problem trying to connect with AWS:", err) return false } s := s3.New(auth, rc.s3Region) bucket := s.Bucket(S3BUCKET) jsonData, err := bucket.Get(rc.getS3Path()) if err != nil { log.Info("Problem trying to get backup from S3:", err) return false } dataFromJSON := [][]uint64{} json.Unmarshal(rc.uncompress(jsonData), &dataFromJSON) log.Info("Data loaded from S3:", rc.identifier, "len:", len(dataFromJSON)) recs := 0 for _, record := range dataFromJSON { scores := make(map[uint64]uint8) for i := 1; i < len(record); i += 2 { scores[record[i]] = uint8(record[i+1]) } recs += len(scores) rc.AddRecord(record[0], scores) } return true }
// Communicate with all endpoints to see if they are alive. func (s *ClientTests) TestRegions(c *gocheck.C) { errs := make(chan error, len(aws.Regions)) for _, region := range aws.Regions { go func(r aws.Region) { s := s3.New(s.s3.Auth, r) b := s.Bucket("goamz-" + s.Auth.AccessKey) _, err := b.Get("non-existent") errs <- err }(region) } for _ = range aws.Regions { err := <-errs if err != nil { s3_err, ok := err.(*s3.Error) if ok { c.Check(s3_err.Code, gocheck.Matches, "NoSuchBucket") } else if _, ok = err.(*net.DNSError); ok { // Okay as well. } else { c.Errorf("Non-S3 error: %s", err) } } else { c.Errorf("Test should have errored but it seems to have succeeded") } } }
func readFile(path string) ([]byte, *model.AppError) { if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 { var auth aws.Auth auth.AccessKey = utils.Cfg.FileSettings.AmazonS3AccessKeyId auth.SecretKey = utils.Cfg.FileSettings.AmazonS3SecretAccessKey s := s3.New(auth, awsRegion()) bucket := s.Bucket(utils.Cfg.FileSettings.AmazonS3Bucket) // try to get the file from S3 with some basic retry logic tries := 0 for { tries++ f, err := bucket.Get(path) if f != nil { return f, nil } else if tries >= 3 { return nil, model.NewAppError("readFile", "Unable to get file from S3", "path="+path+", err="+err.Error()) } time.Sleep(3000 * time.Millisecond) } } else if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL { if f, err := ioutil.ReadFile(utils.Cfg.FileSettings.Directory + path); err != nil { return nil, model.NewAppError("readFile", "Encountered an error reading from local server storage", err.Error()) } else { return f, nil } } else { return nil, model.NewAppError("readFile", "File storage not configured properly. Please configure for either S3 or local server file storage.", "") } }
func goamzBucket(bucketName, endpoint string) *goamz.Bucket { region := aws.Region{ Name: "fake_region", S3Endpoint: s3Server.URL(), S3LocationConstraint: true, } return goamz.New(aws.Auth{}, region).Bucket(bucketName) }
func (s3p *S3Provider) Version(rawurl, previous string) (string, error) { ref := s3p.parse(rawurl) resp, err := s3.New(ref.auth, ref.region).Bucket(ref.bucket).Head(ref.path, map[string][]string{}) if err != nil { return "", err } return resp.Header.Get("x-amz-version-id"), nil }
func (s *LocalServerSuite) SetUpSuite(c *C) { s.srv.SetUp(c) s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) // TODO Sadly the fake server ignores auth completely right now. :-( s.clientTests.authIsBroken = true s.clientTests.Cleanup() }
func s3Client() *s3.S3 { r := aws.Region{ Name: "jp-east", S3Endpoint: "https://ds.jp-east.idcfcloud.com", } auth := aws.NewAuth(os.Getenv("IDCF_ACCESS_KEY"), os.Getenv("IDCF_ACCESS_SECRET"), "", time.Now()) return s3.New(*auth, r) }
func GetS3File(auth *aws.Auth, s3URL string) (io.ReadCloser, error) { urlParsed, err := url.Parse(s3URL) if err != nil { return nil, err } session := s3.New(*auth, aws.USEast) bucket := session.Bucket(urlParsed.Host) return bucket.GetReader(urlParsed.Path) }
func s3BucketFromConfig(c Config) *s3.Bucket { awsAuth := aws.Auth{ AccessKey: c.AwsClientKey, SecretKey: c.AwsSecretKey, } region := aws.USEast connection := s3.New(awsAuth, region) return connection.Bucket(c.Bucket) }
func (s *AmazonClientSuite) SetUpSuite(c *gocheck.C) { if !testutil.Amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) s.s3 = s3.New(s.srv.auth, s.Region) // In case tests were interrupted in the middle before. s.ClientTests.Cleanup() }
// NewS3Client returns a new S3 client func (m *AwsMgr) NewS3Client(accId, region string) (*s3.S3, error) { // Get Auth auth, err := m.GetAuth(accId) if err != nil { return nil, err } s3 := s3.New(*auth, aws.Regions[region]) return s3, nil }
func getBucket() (*s3.Bucket, error) { auth, err := aws.GetAuth(config.AWS_ACCESS_KEY, config.AWS_SECRET_KEY, "", time.Time{}) if err != nil { return nil, err } conn := s3.New(auth, aws.Regions["eu-west-1"]) b := conn.Bucket(config.BUCKET) return b, nil }
func (s *AmazonDomainClientSuite) SetUpSuite(c *gocheck.C) { if !testutil.Amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) region := s.Region region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" s.s3 = s3.New(s.srv.auth, region) s.ClientTests.Cleanup() }
func (s3w S3Writer) WriteToS3(p []byte) error { auth := aws.Auth{ AccessKey: s3w.AccessKey, SecretKey: s3w.SecretKey, } path := fmt.Sprintf("%s-%s.tar.gz", s3w.ClusterName, time.Now().UTC().Format(time.RFC3339)) client := s3.New(auth, aws.Region{S3Endpoint: s3w.Endpoint}) bucket := client.Bucket(s3w.Bucket) return bucket.Put(path, p, "application/x-gzip", s3.Private, s3.Options{}) }
func getProfileImage(c *Context, w http.ResponseWriter, r *http.Request) { params := mux.Vars(r) id := params["id"] if result := <-Srv.Store.User().Get(id); result.Err != nil { c.Err = result.Err return } else { var img []byte var err *model.AppError if !utils.IsS3Configured() { img, err = createProfileImage(result.Data.(*model.User).Username, id) if err != nil { c.Err = err return } } else { var auth aws.Auth auth.AccessKey = utils.Cfg.AWSSettings.S3AccessKeyId auth.SecretKey = utils.Cfg.AWSSettings.S3SecretAccessKey s := s3.New(auth, aws.Regions[utils.Cfg.AWSSettings.S3Region]) bucket := s.Bucket(utils.Cfg.AWSSettings.S3Bucket) path := "teams/" + c.Session.TeamId + "/users/" + id + "/profile.png" if data, getErr := bucket.Get(path); getErr != nil { img, err = createProfileImage(result.Data.(*model.User).Username, id) if err != nil { c.Err = err return } options := s3.Options{} if err := bucket.Put(path, img, "image", s3.Private, options); err != nil { c.Err = model.NewAppError("getImage", "Couldn't upload default profile image", err.Error()) return } } else { img = data } } if c.Session.UserId == id { w.Header().Set("Cache-Control", "max-age=300, public") // 5 mins } else { w.Header().Set("Cache-Control", "max-age=86400, public") // 24 hrs } w.Write(img) } }
func (s *S3MultipartUploadSession) getS3Bucket() *s3.Bucket { auth := s.awsAuth() s3 := s3.New(auth, aws.Regions[s.s3Region]) s3.ConnectTimeout = time.Second * 10 s3.ReadTimeout = time.Second * 20 s3.WriteTimeout = time.Second * 20 s3.RequestTimeout = time.Second * 120 return s3.Bucket(s.s3Bucket) }
func s3GetBucket() *s3.Bucket { s3VerifyHasSecrets() auth := aws.Auth{ AccessKey: s3AwsAccess, SecretKey: s3AwsSecret, } // Note: it's important that region is aws.USEast. This is where my bucket // is and giving a different region will fail // TODO: make aws.USEast a variable s3BucketRegion, to allow over-ride s3Obj := s3.New(auth, aws.USEast, getS3Client()) return s3Obj.Bucket(s3BucketName) }
func NewClient(endpoint, accessKey, secretKey string, logger lager.Logger) Client { auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey, } return &s3Client{ endpoint: endpoint, goamzClient: goamz.New(auth, getRegion(endpoint)), logger: logger, } }
// Getbucket connects to bucket name, with connection c, and returns // a bucket type and any connection error func Getbucket(name string, c *Connection) (*s3.Bucket, error) { s3Connector := s3.New(c.Auth, c.Region) bucket := s3Connector.Bucket(name) _, err := bucket.List("", "", "", 1) if err != nil { // probably wrong name of bucket log.Errorf("connection to s3", name, err) } else { fmt.Printf("Connected to s3 bucket: %v\n", name) } return bucket, err }
func s3GetBucket() *s3.Bucket { s3BucketName := "kjkpub" secrets := readSecretsMust() auth := aws.Auth{ AccessKey: secrets.AwsAccess, SecretKey: secrets.AwsSecret, } // Note: it's important that region is aws.USEast. This is where my bucket // is and giving a different region will fail s3Obj := s3.New(auth, aws.USEast, getS3Client()) return s3Obj.Bucket(s3BucketName) }
/** This func we need to initialize our backup service in main programm **/ func NewBackup(db *DBConf, awsc *Aws, backupdir string, rlimit, cTimeout, wTimeout, rTimeout int) *Backup { quit := make(chan struct{}) started := make(chan struct{}) err := make(chan error) zone := aws.EUWest connection := s3.New(awsc.Auth, zone) connection.ConnectTimeout = time.Duration(cTimeout) * time.Second connection.WriteTimeout = time.Duration(wTimeout) * time.Second connection.RequestTimeout = time.Duration(rTimeout) * time.Second return &Backup{db, awsc, backupdir, rlimit, connection, quit, started, err} }