// Communicate with all endpoints to see if they are alive. func (s *ClientTests) TestRegions(c *C) { errs := make(chan error, len(aws.Regions)) for _, region := range aws.Regions { go func(r aws.Region) { s := s3.New(s.s3.Auth, r) b := s.Bucket("goamz-" + s.Auth.AccessKey) _, err := b.Get("non-existent") errs <- err }(region) } for range aws.Regions { err := <-errs if err != nil { s3_err, ok := err.(*s3.Error) if ok { c.Check(s3_err.Code, Matches, "NoSuchBucket") } else if _, ok = err.(*net.DNSError); ok { // Okay as well. } else { c.Errorf("Non-S3 error: %s", err) } } else { c.Errorf("Test should have errored but it seems to have succeeded") } } }
func (i *S3) SetConfig(c *S3Config) { client := s3.New(aws.Auth{c.AccessKey, c.SecretKey}, aws.Regions[c.Region]) i.bucket = client.Bucket(c.Bucket) r, err := i.bucket.List(c.Prefix, c.Delimiter, c.Marker, c.MaxKeys) if err != nil { Critical("list bucket %s: %v", c.Bucket, err) } for _, key := range r.Contents { i.factories = append(i.factories, i.createReaderFactory(key, c.Gzip)) } if c.TrackFile == "" { return } i.trackFile = c.TrackFile i.helper.readerEOF = func() error { if i.current == nil { return nil } track := i.getTrackFilename() fmt.Println(track) if err := os.MkdirAll(filepath.Dir(track), 0644); err != nil { return err } return ioutil.WriteFile(track, []byte(i.current.LastModified), 0644) } }
func (s *LocalServerSuite) SetUpSuite(c *C) { s.srv.SetUp(c) s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) // TODO Sadly the fake server ignores auth completely right now. :-( s.clientTests.authIsBroken = true s.clientTests.Cleanup() }
func (s *AmazonClientSuite) SetUpSuite(c *C) { if !testutil.Amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) s.s3 = s3.New(s.srv.auth, s.Region) // In case tests were interrupted in the middle before. s.ClientTests.Cleanup() }
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { if !testutil.Amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) region := s.Region region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" s.s3 = s3.New(s.srv.auth, region) s.ClientTests.Cleanup() }
func Put(c Config) gonzo.Stage { return func(ctx context.Context, files <-chan gonzo.File, out chan<- gonzo.File) error { err := checkconfig(c) if err != nil { return err } auth := aws.Auth{ AccessKey: c.AccessKey, SecretKey: c.SecretKey, } con := s3.New(auth, aws.Region(c.Region)) bucket := con.Bucket(c.Name) for { select { case file, ok := <-files: if !ok { return nil } if file.FileInfo().IsDir() { continue } content, err := ioutil.ReadAll(file) if err != nil { return err } name := file.FileInfo().Name() contentType := mime.TypeByExtension(filepath.Ext(name)) if contentType == "" { contentType = http.DetectContentType(content) } ctx = context.WithValue(ctx, "Content-Type", contentType) ctx.Infof("Uploading %s", name) err = bucket.Put(name, content, contentType, s3.ACL(c.Perm)) if err != nil { return err } out <- gonzo.NewFile(ioutil.NopCloser(bytes.NewReader(content)), file.FileInfo()) case <-ctx.Done(): return ctx.Err() } } } }
func (app *App) configS3() { reg, ok := aws.Regions[app.S3Region] if !ok { panic(app.S3Region + " is not a region") } auth := aws.Auth{ AccessKey: app.AwsAccess, SecretKey: app.AwsSecret, } conn := s3.New(auth, reg) bucket := conn.Bucket(app.S3Bucket) app.s3 = bucket }
func TestS3Upload(t *testing.T) { auth, err := aws.EnvAuth() s := s3.New(auth, aws.USEast) bucket := s.Bucket("downloaderd") data := []byte("Hello, Goamz!!") err = bucket.Put("sample.txt", data, "text/plain", s3.BucketOwnerFull) if err != nil { t.Errorf("upload-failed: %v", err) } err = bucket.Put("test/sample.txt", data, "text/plain", s3.BucketOwnerFull) if err != nil { t.Errorf("upload-failed: %v", err) } }
func createS3Bucket(t *testing.T) (*s3test.Server, *s3.Bucket) { s3Server, err := s3test.NewServer(&s3test.Config{Send409Conflict: true}) if err != nil { t.Fatalf("Error bringing up fake s3 server: %s\n", err) } t.Logf("Fake S3 server up at %s\n", s3Server.URL()) s3Client := s3.New(aws.Auth{AccessKey: "abc", SecretKey: "123"}, aws.Region{ Name: "fake-artifacts-test-region", S3Endpoint: s3Server.URL(), S3LocationConstraint: true, Sign: aws.SignV2, }) s3Bucket := s3Client.Bucket("fake-artifacts-store-bucket") if err := s3Bucket.PutBucket(s3.Private); err != nil { t.Fatalf("Error creating s3 bucket: %s\n", err) } return s3Server, s3Bucket }
func NewS3Storage(accessKey, secretKey, bucketUrl string, prefix string) (*S3Storage, error) { auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey, } u, err := url.Parse(bucketUrl) if err != nil { return nil, err } bucketname := strings.SplitN(strings.TrimPrefix(u.Path, "/"), "/", 2)[0] region, err := s3RegionByEndpoint(u.Scheme + "://" + u.Host) if err != nil { return nil, err } s3i := s3.New(auth, region) b := s3i.Bucket(bucketname) return &S3Storage{ bucket: b, prefix: prefix, }, nil }
func put() { // The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables are used. auth, err := aws.EnvAuth() if err != nil { panic(err.Error()) } // Open Bucket s := s3.New(auth, aws.USEast) bucket := s.Bucket("joburnet-lambda-sources") data := []byte("bang!") key := randSeq(32) err = bucket.Put(key, data, "content-type", s3.Private) if err != nil { fmt.Printf("%v", err) } else { fmt.Printf(".") } }
func main() { var flagConfigFile string flag.StringVar(&flagConfigFile, "config", "", "JSON Config file containing DB parameters and S3 information") flagVerbose := flag.Bool("verbose", false, "Enable request logging") flagLogDBQueries := flag.Bool("log-db-queries", false, "Enable DB query logging (Use with care, will dump raw logchunk contents to logfile)") showVersion := flag.Bool("version", false, "Show version number and quit") onlyPerformMigrations := flag.Bool("migrations-only", false, "Only perform database migrations and quit") dbMaxIdleConns := flag.Int("db-max-idle-conns", 20, "Maximum number of idle connections to the DB") dbMaxOpenConns := flag.Int("db-max-open-conns", 50, "Maximum number of open connections to the DB") shutdownTimeout := flag.Duration("shutdown-timeout", 15*time.Second, "Time to wait before closing active connections after SIGTERM signal has been recieved") flag.Parse() log.SetFlags(log.Lmicroseconds | log.Lshortfile) // Required for artifact name deduplication (not strictly necessary, but good to have) rand.Seed(time.Now().UTC().UnixNano()) if *showVersion { fmt.Println(common.GetVersion()) return } conf := getConfigFrom(flagConfigFile) // ----- BEGIN DB Connections Setup ----- db, err := sql.Open("postgres", conf.DbConnstr) if err != nil { log.Fatalf("Could not connect to the database: %v\n", err) } if *onlyPerformMigrations { err := performMigrations(db) db.Close() if err != nil { os.Exit(1) } return } db.SetMaxIdleConns(*dbMaxIdleConns) db.SetMaxOpenConns(*dbMaxOpenConns) defer db.Close() dbmap := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}} if *flagLogDBQueries { dbmap.TraceOn("[gorp]", log.New(os.Stdout, "artifacts:", log.Lmicroseconds)) } gdb := database.NewGorpDatabase(dbmap) // ----- END DB Connections Setup ----- // ----- BEGIN AWS Connections ----- var region aws.Region var auth aws.Auth if conf.S3Region == "fakes3" { region = aws.Region{ Name: conf.S3Region, S3Endpoint: conf.S3Server, Sign: aws.SignV2, } auth = aws.Auth{} } else { region = aws.Regions[conf.S3Region] auth = aws.Auth{AccessKey: conf.S3AccessKey, SecretKey: conf.S3SecretKey} } s3Client := s3.New(auth, region) bucket := s3Client.Bucket(conf.S3Bucket) // ----- END AWS Connections ----- gdb.RegisterEntities() stats.CreateStatsdClient(conf.StatsdURL, conf.StatsdPrefix) defer stats.ShutdownStatsdClient() g := gin.New() g.Use(gin.Recovery()) if *flagVerbose { g.Use(gin.Logger()) } realClock := new(common.RealClock) rootCtx := context.Background() rootCtx = sentry.CreateAndInstallSentryClient(rootCtx, conf.Env, conf.SentryDSN) g.Use(stats.Counter()) g.GET("/", HomeHandler) g.GET("/version", VersionHandler) g.GET("/buckets", func(gc *gin.Context) { api.ListBuckets(rootCtx, &RenderOnGin{ginCtx: gc}, gdb) }) g.POST("/buckets/", func(gc *gin.Context) { api.HandleCreateBucket(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gdb, realClock) }) g.POST("/buckets/:bucket_id/artifacts/:artifact_name", func(gc *gin.Context) { render := &RenderOnGin{ginCtx: gc} afct := bindArtifact(rootCtx, render, gc, gdb) if !gc.IsAborted() { api.PostArtifact(rootCtx, render, gc.Request, gdb, bucket, afct) } }) br := g.Group("/buckets/:bucket_id", func(gc *gin.Context) { bindBucket(rootCtx, &RenderOnGin{ginCtx: gc}, gc, gdb) }) { br.GET("", func(gc *gin.Context) { bkt := gc.MustGet("bucket").(*model.Bucket) api.HandleGetBucket(rootCtx, &RenderOnGin{ginCtx: gc}, bkt) }) br.POST("/close", func(gc *gin.Context) { bkt := gc.MustGet("bucket").(*model.Bucket) api.HandleCloseBucket(rootCtx, &RenderOnGin{ginCtx: gc}, gdb, bkt, bucket, realClock) }) br.GET("/artifacts/", func(gc *gin.Context) { bkt := gc.MustGet("bucket").(*model.Bucket) api.ListArtifacts(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gdb, bkt) }) br.POST("/artifacts", func(gc *gin.Context) { bkt := gc.MustGet("bucket").(*model.Bucket) api.HandleCreateArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gdb, bkt) }) ar := br.Group("/artifacts/:artifact_name", func(gc *gin.Context) { bindArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, gc, gdb) }) { ar.GET("", func(gc *gin.Context) { afct := gc.MustGet("artifact").(*model.Artifact) api.HandleGetArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, afct) }) ar.POST("/close", func(gc *gin.Context) { afct := gc.MustGet("artifact").(*model.Artifact) api.HandleCloseArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, gdb, bucket, afct) }) ar.GET("/content", func(gc *gin.Context) { afct := gc.MustGet("artifact").(*model.Artifact) api.GetArtifactContent(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gc.Writer, gdb, bucket, afct) }) ar.GET("/chunked", func(gc *gin.Context) { if conf.CorsURLs != "" { gc.Writer.Header().Add("Access-Control-Allow-Origin", conf.CorsURLs) } afct := gc.MustGet("artifact").(*model.Artifact) api.GetArtifactContentChunks(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gc.Writer, gdb, bucket, afct) }) } } http.Handle("/", g) // If the process gets a SIGTERM, it will close listening port allowing another server to bind and // begin listening immediately. Any ongoing connections will be given 15 seconds (by default) to // complete, after which they are forcibly terminated. graceful.Run(getListenAddr(), *shutdownTimeout, nil) }
func openBucket(auth aws.Auth, region aws.Region, bucketName string) *s3.Bucket { s3i := s3.New(auth, region) return s3i.Bucket(bucketName) }
func (s *S) SetUpSuite(c *C) { testServer.Start() auth := aws.Auth{"abc", "123"} s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL, Sign: aws.SignV2}) }
func main() { var flagConfigFile string flag.StringVar(&flagConfigFile, "config", "", "JSON Config file containing DB parameters and S3 information") var flagCPUProfile string flag.StringVar(&flagCPUProfile, "cpuprofile", "", "File to write CPU profile into") flagVerbose := flag.Bool("verbose", false, "Enable request logging") flagLogDBQueries := flag.Bool("log-db-queries", false, "Enable DB query logging (Use with care, will dump raw logchunk contents to logfile)") showVersion := flag.Bool("version", false, "Show version number and quit") onlyPerformMigrations := flag.Bool("migrations-only", false, "Only perform database migrations and quit") flag.Parse() if *showVersion { fmt.Println(common.GetVersion()) return } conf := getConfigFrom(flagConfigFile) // ----- BEGIN DB Connections Setup ----- db, err := sql.Open("postgres", conf.DbConnstr) if err != nil { log.Fatalf("Could not connect to the database: %v\n", err) } if *onlyPerformMigrations { err := performMigrations(db) db.Close() if err != nil { os.Exit(1) } return } defer db.Close() dbmap := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}} if *flagLogDBQueries { dbmap.TraceOn("[gorp]", log.New(os.Stdout, "artifacts:", log.Lmicroseconds)) } gdb := database.NewGorpDatabase(dbmap) // ----- END DB Connections Setup ----- // ----- BEGIN CPU profiling ----- if flagCPUProfile != "" { sig := make(chan os.Signal, 1) f, err := os.Create(flagCPUProfile) if err != nil { log.Fatal(err) } go func() { <-sig fmt.Println("Handling SIGHUP") pprof.StopCPUProfile() os.Exit(0) }() pprof.StartCPUProfile(f) signal.Notify(sig, syscall.SIGHUP) } // ----- END CPU Profiling ----- // ----- BEGIN AWS Connections ----- var region aws.Region var auth aws.Auth if conf.S3Region == "fakes3" { region = aws.Region{ Name: conf.S3Region, S3Endpoint: conf.S3Server, Sign: aws.SignV2, } auth = aws.Auth{} } else { region = aws.Regions[conf.S3Region] auth = aws.Auth{AccessKey: conf.S3AccessKey, SecretKey: conf.S3SecretKey} } s3Client := s3.New(auth, region) bucket := s3Client.Bucket(conf.S3Bucket) // ----- END AWS Connections ----- gdb.RegisterEntities() m := martini.New() m.Use(martini.Recovery()) m.Map(dbmap) m.Map(s3Client) m.Map(bucket) m.Use(render.Renderer()) if *flagVerbose { m.Use(martini.Logger()) } // Bind the gdb instance to be returned every time a Database interface is required. m.MapTo(gdb, (*database.Database)(nil)) // Bind real clock implementation m.MapTo(new(common.RealClock), (*common.Clock)(nil)) r := martini.NewRouter() // '/' url is used to determine if the server is up. Do not remove. r.Get("/", HomeHandler) r.Get("/version", VersionHandler) r.Get("/buckets", api.ListBuckets) r.Post("/buckets", api.HandleCreateBucket) r.Group("/buckets/:bucket_id", func(br martini.Router) { br.Get("", api.HandleGetBucket) br.Post("/close", api.HandleCloseBucket) br.Get("/artifacts", api.ListArtifacts) br.Post("/artifacts", api.HandleCreateArtifact) br.Group("/artifacts/:artifact_name", func(ar martini.Router) { ar.Get("", api.HandleGetArtifact) ar.Post("", api.PostArtifact) ar.Post("/close", api.FinalizeArtifact) ar.Get("/content", api.GetArtifactContent) }, bindArtifact) }, bindBucket) m.Action(r.Handle) m.Run() }