Esempio n. 1
0
func setupDB(tb testing.TB) {
	DB_STR := "postgres://*****:*****@artifactsdb/artifacts?sslmode=disable"

	db, err := sql.Open("postgres", DB_STR)
	if err != nil {
		tb.Fatalf("Error connecting to Postgres: %s", err)
	}

	dbmap := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
	gdb := database.NewGorpDatabase(dbmap)
	gdb.RegisterEntities()

	migrations := &migrate.AssetMigrationSource{
		Asset:    database.Asset,
		AssetDir: database.AssetDir,
		Dir:      "migrations",
	}

	if n, err := migrate.Exec(db, "postgres", migrations, migrate.Down); err != nil {
		tb.Fatalf("Error resetting Postgres DB: %s", err)
	} else {
		fmt.Printf("Completed %d DOWN migrations\n", n)
	}

	// `maxMigrations` below is the maximum number of migration levels to be performed.
	// This is left here to make it easy to verify that database upgrades are backwards compatible.
	// After a new migration is added, this number should be bumped up.
	const maxMigrations = 4
	if n, err := migrate.ExecMax(db, "postgres", migrations, migrate.Up, maxMigrations); err != nil {
		tb.Fatalf("Error recreating Postgres DB: %s", err)
	} else {
		fmt.Printf("Completed %d UP migrations\n", n)
	}

	fmt.Println("************* DB RESET **************")
}
Esempio n. 2
0
func main() {
	var flagConfigFile string
	flag.StringVar(&flagConfigFile, "config", "", "JSON Config file containing DB parameters and S3 information")

	flagVerbose := flag.Bool("verbose", false, "Enable request logging")
	flagLogDBQueries := flag.Bool("log-db-queries", false, "Enable DB query logging (Use with care, will dump raw logchunk contents to logfile)")

	showVersion := flag.Bool("version", false, "Show version number and quit")

	onlyPerformMigrations := flag.Bool("migrations-only", false, "Only perform database migrations and quit")

	dbMaxIdleConns := flag.Int("db-max-idle-conns", 20, "Maximum number of idle connections to the DB")

	dbMaxOpenConns := flag.Int("db-max-open-conns", 50, "Maximum number of open connections to the DB")

	shutdownTimeout := flag.Duration("shutdown-timeout", 15*time.Second, "Time to wait before closing active connections after SIGTERM signal has been recieved")

	flag.Parse()
	log.SetFlags(log.Lmicroseconds | log.Lshortfile)

	// Required for artifact name deduplication (not strictly necessary, but good to have)
	rand.Seed(time.Now().UTC().UnixNano())

	if *showVersion {
		fmt.Println(common.GetVersion())
		return
	}

	conf := getConfigFrom(flagConfigFile)

	// ----- BEGIN DB Connections Setup -----
	db, err := sql.Open("postgres", conf.DbConnstr)

	if err != nil {
		log.Fatalf("Could not connect to the database: %v\n", err)
	}

	if *onlyPerformMigrations {
		err := performMigrations(db)
		db.Close()
		if err != nil {
			os.Exit(1)
		}

		return
	}

	db.SetMaxIdleConns(*dbMaxIdleConns)
	db.SetMaxOpenConns(*dbMaxOpenConns)
	defer db.Close()
	dbmap := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
	if *flagLogDBQueries {
		dbmap.TraceOn("[gorp]", log.New(os.Stdout, "artifacts:", log.Lmicroseconds))
	}
	gdb := database.NewGorpDatabase(dbmap)
	// ----- END DB Connections Setup -----

	// ----- BEGIN AWS Connections -----
	var region aws.Region
	var auth aws.Auth
	if conf.S3Region == "fakes3" {
		region = aws.Region{
			Name:       conf.S3Region,
			S3Endpoint: conf.S3Server,
			Sign:       aws.SignV2,
		}

		auth = aws.Auth{}
	} else {
		region = aws.Regions[conf.S3Region]
		auth = aws.Auth{AccessKey: conf.S3AccessKey, SecretKey: conf.S3SecretKey}
	}

	s3Client := s3.New(auth, region)

	bucket := s3Client.Bucket(conf.S3Bucket)
	// ----- END AWS Connections -----

	gdb.RegisterEntities()

	stats.CreateStatsdClient(conf.StatsdURL, conf.StatsdPrefix)
	defer stats.ShutdownStatsdClient()

	g := gin.New()
	g.Use(gin.Recovery())

	if *flagVerbose {
		g.Use(gin.Logger())
	}

	realClock := new(common.RealClock)

	rootCtx := context.Background()
	rootCtx = sentry.CreateAndInstallSentryClient(rootCtx, conf.Env, conf.SentryDSN)
	g.Use(stats.Counter())

	g.GET("/", HomeHandler)
	g.GET("/version", VersionHandler)
	g.GET("/buckets", func(gc *gin.Context) {
		api.ListBuckets(rootCtx, &RenderOnGin{ginCtx: gc}, gdb)
	})
	g.POST("/buckets/", func(gc *gin.Context) {
		api.HandleCreateBucket(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gdb, realClock)
	})
	g.POST("/buckets/:bucket_id/artifacts/:artifact_name", func(gc *gin.Context) {
		render := &RenderOnGin{ginCtx: gc}
		afct := bindArtifact(rootCtx, render, gc, gdb)
		if !gc.IsAborted() {
			api.PostArtifact(rootCtx, render, gc.Request, gdb, bucket, afct)
		}
	})

	br := g.Group("/buckets/:bucket_id", func(gc *gin.Context) {
		bindBucket(rootCtx, &RenderOnGin{ginCtx: gc}, gc, gdb)
	})
	{
		br.GET("", func(gc *gin.Context) {
			bkt := gc.MustGet("bucket").(*model.Bucket)
			api.HandleGetBucket(rootCtx, &RenderOnGin{ginCtx: gc}, bkt)
		})
		br.POST("/close", func(gc *gin.Context) {
			bkt := gc.MustGet("bucket").(*model.Bucket)
			api.HandleCloseBucket(rootCtx, &RenderOnGin{ginCtx: gc}, gdb, bkt, bucket, realClock)
		})
		br.GET("/artifacts/", func(gc *gin.Context) {
			bkt := gc.MustGet("bucket").(*model.Bucket)
			api.ListArtifacts(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gdb, bkt)
		})
		br.POST("/artifacts", func(gc *gin.Context) {
			bkt := gc.MustGet("bucket").(*model.Bucket)
			api.HandleCreateArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gdb, bkt)
		})

		ar := br.Group("/artifacts/:artifact_name", func(gc *gin.Context) {
			bindArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, gc, gdb)
		})
		{
			ar.GET("", func(gc *gin.Context) {
				afct := gc.MustGet("artifact").(*model.Artifact)
				api.HandleGetArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, afct)
			})
			ar.POST("/close", func(gc *gin.Context) {
				afct := gc.MustGet("artifact").(*model.Artifact)
				api.HandleCloseArtifact(rootCtx, &RenderOnGin{ginCtx: gc}, gdb, bucket, afct)
			})
			ar.GET("/content", func(gc *gin.Context) {
				afct := gc.MustGet("artifact").(*model.Artifact)
				api.GetArtifactContent(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gc.Writer, gdb, bucket, afct)
			})
			ar.GET("/chunked", func(gc *gin.Context) {
				if conf.CorsURLs != "" {
					gc.Writer.Header().Add("Access-Control-Allow-Origin", conf.CorsURLs)
				}
				afct := gc.MustGet("artifact").(*model.Artifact)
				api.GetArtifactContentChunks(rootCtx, &RenderOnGin{ginCtx: gc}, gc.Request, gc.Writer, gdb, bucket, afct)
			})
		}
	}

	http.Handle("/", g)

	// If the process gets a SIGTERM, it will close listening port allowing another server to bind and
	// begin listening immediately. Any ongoing connections will be given 15 seconds (by default) to
	// complete, after which they are forcibly terminated.
	graceful.Run(getListenAddr(), *shutdownTimeout, nil)
}
Esempio n. 3
0
func main() {
	var flagConfigFile string
	flag.StringVar(&flagConfigFile, "config", "", "JSON Config file containing DB parameters and S3 information")

	var flagCPUProfile string
	flag.StringVar(&flagCPUProfile, "cpuprofile", "", "File to write CPU profile into")

	flagVerbose := flag.Bool("verbose", false, "Enable request logging")
	flagLogDBQueries := flag.Bool("log-db-queries", false, "Enable DB query logging (Use with care, will dump raw logchunk contents to logfile)")

	showVersion := flag.Bool("version", false, "Show version number and quit")

	onlyPerformMigrations := flag.Bool("migrations-only", false, "Only perform database migrations and quit")

	flag.Parse()

	if *showVersion {
		fmt.Println(common.GetVersion())
		return
	}

	conf := getConfigFrom(flagConfigFile)

	// ----- BEGIN DB Connections Setup -----
	db, err := sql.Open("postgres", conf.DbConnstr)

	if err != nil {
		log.Fatalf("Could not connect to the database: %v\n", err)
	}

	if *onlyPerformMigrations {
		err := performMigrations(db)
		db.Close()
		if err != nil {
			os.Exit(1)
		}

		return
	}

	defer db.Close()
	dbmap := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
	if *flagLogDBQueries {
		dbmap.TraceOn("[gorp]", log.New(os.Stdout, "artifacts:", log.Lmicroseconds))
	}
	gdb := database.NewGorpDatabase(dbmap)
	// ----- END DB Connections Setup -----

	// ----- BEGIN CPU profiling -----
	if flagCPUProfile != "" {
		sig := make(chan os.Signal, 1)

		f, err := os.Create(flagCPUProfile)
		if err != nil {
			log.Fatal(err)
		}

		go func() {
			<-sig
			fmt.Println("Handling SIGHUP")
			pprof.StopCPUProfile()
			os.Exit(0)
		}()

		pprof.StartCPUProfile(f)
		signal.Notify(sig, syscall.SIGHUP)
	}
	// ----- END CPU Profiling -----

	// ----- BEGIN AWS Connections -----
	var region aws.Region
	var auth aws.Auth
	if conf.S3Region == "fakes3" {
		region = aws.Region{
			Name:       conf.S3Region,
			S3Endpoint: conf.S3Server,
			Sign:       aws.SignV2,
		}

		auth = aws.Auth{}
	} else {
		region = aws.Regions[conf.S3Region]
		auth = aws.Auth{AccessKey: conf.S3AccessKey, SecretKey: conf.S3SecretKey}
	}

	s3Client := s3.New(auth, region)

	bucket := s3Client.Bucket(conf.S3Bucket)
	// ----- END AWS Connections -----

	gdb.RegisterEntities()

	m := martini.New()
	m.Use(martini.Recovery())
	m.Map(dbmap)
	m.Map(s3Client)
	m.Map(bucket)
	m.Use(render.Renderer())

	if *flagVerbose {
		m.Use(martini.Logger())
	}

	// Bind the gdb instance to be returned every time a Database interface is required.
	m.MapTo(gdb, (*database.Database)(nil))
	// Bind real clock implementation
	m.MapTo(new(common.RealClock), (*common.Clock)(nil))

	r := martini.NewRouter()
	// '/' url is used to determine if the server is up. Do not remove.
	r.Get("/", HomeHandler)
	r.Get("/version", VersionHandler)
	r.Get("/buckets", api.ListBuckets)
	r.Post("/buckets", api.HandleCreateBucket)
	r.Group("/buckets/:bucket_id", func(br martini.Router) {
		br.Get("", api.HandleGetBucket)
		br.Post("/close", api.HandleCloseBucket)
		br.Get("/artifacts", api.ListArtifacts)
		br.Post("/artifacts", api.HandleCreateArtifact)
		br.Group("/artifacts/:artifact_name", func(ar martini.Router) {
			ar.Get("", api.HandleGetArtifact)
			ar.Post("", api.PostArtifact)
			ar.Post("/close", api.FinalizeArtifact)
			ar.Get("/content", api.GetArtifactContent)
		}, bindArtifact)
	}, bindBucket)
	m.Action(r.Handle)

	m.Run()
}