コード例 #1
0
ファイル: command.go プロジェクト: xoebus/checkin
func (cmd *ATCCommand) Runner(args []string) (ifrit.Runner, error) {
	err := cmd.validate()
	if err != nil {
		return nil, err
	}

	logger, reconfigurableSink := cmd.constructLogger()

	cmd.configureMetrics(logger)

	sqlDB, pipelineDBFactory, err := cmd.constructDB(logger)
	if err != nil {
		return nil, err
	}

	trackerFactory := resource.TrackerFactory{}
	workerClient := cmd.constructWorkerPool(logger, sqlDB, trackerFactory)

	tracker := resource.NewTracker(workerClient)
	engine := cmd.constructEngine(sqlDB, workerClient, tracker)

	radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
		tracker,
		cmd.ResourceCheckingInterval,
		engine,
		sqlDB,
	)

	radarScannerFactory := radar.NewScannerFactory(
		tracker,
		cmd.ResourceCheckingInterval,
		cmd.ExternalURL.String(),
	)

	signingKey, err := cmd.loadOrGenerateSigningKey()
	if err != nil {
		return nil, err
	}

	err = sqlDB.CreateDefaultTeamIfNotExists()
	if err != nil {
		return nil, err
	}

	authValidator := cmd.constructValidator(signingKey, sqlDB)

	err = cmd.updateBasicAuthCredentials(sqlDB)
	if err != nil {
		return nil, err
	}

	jwtReader := auth.JWTReader{
		PublicKey: &signingKey.PublicKey,
	}

	err = cmd.configureOAuthProviders(logger, sqlDB)
	if err != nil {
		return nil, err
	}

	providerFactory := provider.NewOAuthFactory(
		sqlDB,
		cmd.oauthBaseURL(),
		auth.OAuthRoutes,
		auth.OAuthCallback,
	)
	if err != nil {
		return nil, err
	}

	drain := make(chan struct{})

	apiHandler, err := cmd.constructAPIHandler(
		logger,
		reconfigurableSink,
		sqlDB,
		authValidator,
		jwtReader,
		providerFactory,
		signingKey,
		pipelineDBFactory,
		engine,
		workerClient,
		drain,
		radarSchedulerFactory,
		radarScannerFactory,
	)
	if err != nil {
		return nil, err
	}

	oauthHandler, err := auth.NewOAuthHandler(
		logger,
		providerFactory,
		signingKey,
		sqlDB,
	)
	if err != nil {
		return nil, err
	}

	webHandler, err := cmd.constructWebHandler(
		logger,
		authValidator,
		jwtReader,
		pipelineDBFactory,
	)
	if err != nil {
		return nil, err
	}

	members := []grouper.Member{
		{"drainer", drainer(drain)},

		{"web", http_server.New(
			cmd.bindAddr(),
			cmd.constructHTTPHandler(
				webHandler,
				apiHandler,
				oauthHandler,
			),
		)},

		{"debug", http_server.New(
			cmd.debugBindAddr(),
			http.DefaultServeMux,
		)},

		{"pipelines", pipelines.SyncRunner{
			Syncer: cmd.constructPipelineSyncer(
				logger.Session("syncer"),
				sqlDB,
				pipelineDBFactory,
				radarSchedulerFactory,
			),
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"builds", builds.TrackerRunner{
			Tracker: builds.NewTracker(
				logger.Session("build-tracker"),
				sqlDB,
				engine,
			),
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"lostandfound", lostandfound.NewRunner(
			logger.Session("lost-and-found"),
			lostandfound.NewBaggageCollector(
				logger.Session("baggage-collector"),
				workerClient,
				sqlDB,
				pipelineDBFactory,
				cmd.OldResourceGracePeriod,
				24*time.Hour,
			),
			sqlDB,
			clock.NewClock(),
			cmd.ResourceCacheCleanupInterval,
		)},
	}

	members = cmd.appendStaticWorker(logger, sqlDB, members)

	return onReady(grouper.NewParallel(os.Interrupt, members), func() {
		logger.Info("listening", lager.Data{
			"web":   cmd.bindAddr(),
			"debug": cmd.debugBindAddr(),
		})
	}), nil
}
コード例 #2
0
ファイル: command.go プロジェクト: ACPK/atc
func (cmd *ATCCommand) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
	err := cmd.validate()
	if err != nil {
		return err
	}

	logger, reconfigurableSink := cmd.constructLogger()

	cmd.configureMetrics(logger)

	sqlDB, pipelineDBFactory, err := cmd.constructDB(logger)
	if err != nil {
		return err
	}

	workerClient := cmd.constructWorkerPool(logger, sqlDB)

	tracker := resource.NewTracker(workerClient, sqlDB)

	engine := cmd.constructEngine(sqlDB, workerClient, tracker)

	radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
		tracker,
		cmd.ResourceCheckingInterval,
		engine,
		sqlDB,
	)

	signingKey, err := cmd.loadOrGenerateSigningKey()
	if err != nil {
		return err
	}

	authValidator, basicAuthEnabled := cmd.constructValidator(signingKey)

	oauthProviders, err := cmd.configureOAuthProviders(logger)
	if err != nil {
		return err
	}

	drain := make(chan struct{})

	apiHandler, err := cmd.constructAPIHandler(
		logger,
		reconfigurableSink,
		sqlDB,
		authValidator,
		oauthProviders,
		basicAuthEnabled,
		signingKey,
		pipelineDBFactory,
		engine,
		workerClient,
		drain,
		radarSchedulerFactory,
	)
	if err != nil {
		return err
	}

	oauthHandler, err := auth.NewOAuthHandler(
		logger,
		oauthProviders,
		signingKey,
	)
	if err != nil {
		return err
	}

	webHandler, err := cmd.constructWebHandler(
		logger,
		sqlDB,
		authValidator,
		pipelineDBFactory,
		engine,
	)
	if err != nil {
		return err
	}

	members := []grouper.Member{
		{"drainer", drainer(drain)},

		{"web", http_server.New(
			cmd.bindAddr(),
			cmd.constructHTTPHandler(
				webHandler,
				apiHandler,
				oauthHandler,
			),
		)},

		{"debug", http_server.New(
			cmd.debugBindAddr(),
			http.DefaultServeMux,
		)},

		{"pipelines", pipelines.SyncRunner{
			Syncer: cmd.constructPipelineSyncer(
				logger.Session("syncer"),
				sqlDB,
				pipelineDBFactory,
				radarSchedulerFactory,
			),
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"builds", builds.TrackerRunner{
			Tracker: builds.NewTracker(
				logger.Session("build-tracker"),
				sqlDB,
				engine,
			),
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"lostandfound", lostandfound.NewRunner(
			logger.Session("lost-and-found"),
			lostandfound.NewBaggageCollector(
				logger.Session("baggage-collector"),
				workerClient,
				sqlDB,
				pipelineDBFactory,
			),
			sqlDB,
			clock.NewClock(),
			5*time.Minute,
		)},
	}

	members = cmd.appendStaticWorker(logger, sqlDB, members)

	group := grouper.NewParallel(os.Interrupt, members)

	running := ifrit.Invoke(sigmon.New(group))

	logger.Info("listening", lager.Data{
		"web":   cmd.bindAddr(),
		"debug": cmd.debugBindAddr(),
	})

	close(ready)

	for {
		select {
		case s := <-signals:
			running.Signal(s)
		case err := <-running.Wait():
			if err != nil {
				logger.Error("exited-with-failure", err)
			}

			return err
		}
	}
}
コード例 #3
0
ファイル: tracker_test.go プロジェクト: ACPK/atc
	var (
		fakeTrackerDB *fakes.FakeTrackerDB
		fakeEngine    *enginefakes.FakeEngine

		tracker *builds.Tracker
		logger  *lagertest.TestLogger
	)

	BeforeEach(func() {
		fakeTrackerDB = new(fakes.FakeTrackerDB)
		fakeEngine = new(enginefakes.FakeEngine)
		logger = lagertest.NewTestLogger("test")

		tracker = builds.NewTracker(
			logger,
			fakeTrackerDB,
			fakeEngine,
		)
	})

	Describe("Track", func() {
		var (
			inFlightBuilds []db.Build

			engineBuilds []*enginefakes.FakeBuild
		)

		BeforeEach(func() {
			inFlightBuilds = []db.Build{
				{ID: 1},
				{ID: 2},
コード例 #4
0
ファイル: main.go プロジェクト: utako/atc
func main() {
	flag.Parse()

	if !*dev && (*httpUsername == "" || (*httpHashedPassword == "" && *httpPassword == "")) {
		fatal(errors.New("must specify -httpUsername and -httpPassword or -httpHashedPassword or turn on dev mode"))
	}

	if _, err := os.Stat(*templatesDir); err != nil {
		fatal(errors.New("directory specified via -templates does not exist"))
	}

	if _, err := os.Stat(*publicDir); err != nil {
		fatal(errors.New("directory specified via -public does not exist"))
	}

	logger := lager.NewLogger("atc")

	logLevel := lager.INFO
	if *dev {
		logLevel = lager.DEBUG
	}

	sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), logLevel)
	logger.RegisterSink(sink)

	var err error

	var dbConn Db.Conn

	for {
		dbConn, err = migration.Open(*sqlDriver, *sqlDataSource, migrations.Migrations)
		if err != nil {
			if strings.Contains(err.Error(), " dial ") {
				logger.Error("failed-to-open-db", err)
				time.Sleep(5 * time.Second)
				continue
			}

			fatal(err)
		}

		break
	}

	dbConn = Db.Explain(logger, dbConn, 500*time.Millisecond)

	listener := pq.NewListener(*sqlDataSource, time.Second, time.Minute, nil)
	bus := Db.NewNotificationsBus(listener)

	db := Db.NewSQL(logger.Session("db"), dbConn, bus)
	pipelineDBFactory := Db.NewPipelineDBFactory(logger.Session("db"), dbConn, bus, db)

	var configDB Db.ConfigDB
	configDB = Db.PlanConvertingConfigDB{db}

	var resourceTypesNG []atc.WorkerResourceType
	err = json.Unmarshal([]byte(*resourceTypes), &resourceTypesNG)
	if err != nil {
		logger.Fatal("invalid-resource-types", err)
	}

	var workerClient worker.Client
	if *gardenAddr != "" {
		workerClient = worker.NewGardenWorker(
			gclient.New(gconn.NewWithLogger(
				*gardenNetwork,
				*gardenAddr,
				logger.Session("garden-connection"),
			)),
			clock.NewClock(),
			-1,
			resourceTypesNG,
			"linux",
			[]string{},
		)
	} else {
		workerClient = worker.NewPool(worker.NewDBWorkerProvider(db, logger))
	}

	resourceTracker := resource.NewTracker(workerClient)
	gardenFactory := exec.NewGardenFactory(workerClient, resourceTracker, func() string {
		guid, err := uuid.NewV4()
		if err != nil {
			panic("not enough entropy to generate guid: " + err.Error())
		}

		return guid.String()
	})
	execEngine := engine.NewExecEngine(gardenFactory, engine.NewBuildDelegateFactory(db), db)

	engine := engine.NewDBEngine(engine.Engines{execEngine}, db, db)

	var webValidator auth.Validator

	if *httpUsername != "" && *httpHashedPassword != "" {
		webValidator = auth.BasicAuthHashedValidator{
			Username:       *httpUsername,
			HashedPassword: *httpHashedPassword,
		}
	} else if *httpUsername != "" && *httpPassword != "" {
		webValidator = auth.BasicAuthValidator{
			Username: *httpUsername,
			Password: *httpPassword,
		}
	} else {
		webValidator = auth.NoopValidator{}
	}

	callbacksURL, err := url.Parse(*callbacksURLString)
	if err != nil {
		fatal(err)
	}

	drain := make(chan struct{})

	apiHandler, err := api.NewHandler(
		logger,            // logger lager.Logger,
		webValidator,      // validator auth.Validator,
		pipelineDBFactory, // pipelineDBFactory db.PipelineDBFactory,

		configDB, // configDB db.ConfigDB,

		db, // buildsDB buildserver.BuildsDB,
		db, // workerDB workerserver.WorkerDB,
		db, // pipeDB pipes.PipeDB,
		db, // pipelinesDB db.PipelinesDB,

		config.ValidateConfig,       // configValidator configserver.ConfigValidator,
		callbacksURL.String(),       // peerURL string,
		buildserver.NewEventHandler, // eventHandlerFactory buildserver.EventHandlerFactory,
		drain, // drain <-chan struct{},

		engine,       // engine engine.Engine,
		workerClient, // workerClient worker.Client,

		sink, // sink *lager.ReconfigurableSink,

		*cliDownloadsDir, // cliDownloadsDir string,
	)
	if err != nil {
		fatal(err)
	}

	radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
		resourceTracker,
		*checkInterval,
		db,
		engine,
		db,
	)

	webHandler, err := web.NewHandler(
		logger,
		webValidator,
		radarSchedulerFactory,
		db,
		pipelineDBFactory,
		configDB,
		*templatesDir,
		*publicDir,
		engine,
	)
	if err != nil {
		fatal(err)
	}

	webMux := http.NewServeMux()
	webMux.Handle("/api/v1/", apiHandler)
	webMux.Handle("/", webHandler)

	var httpHandler http.Handler

	httpHandler = webMux

	if !*publiclyViewable {
		httpHandler = auth.Handler{
			Handler:   httpHandler,
			Validator: webValidator,
		}
	}

	// copy Authorization header as ATC-Authorization cookie for websocket auth
	httpHandler = auth.CookieSetHandler{
		Handler: httpHandler,
	}

	httpHandler = httpmetrics.Wrap(httpHandler)

	webListenAddr := fmt.Sprintf("%s:%d", *webListenAddress, *webListenPort)
	debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort)

	syncer := pipelines.NewSyncer(
		logger.Session("syncer"),
		db,
		pipelineDBFactory,
		func(pipelineDB Db.PipelineDB) ifrit.Runner {
			return grouper.NewParallel(os.Interrupt, grouper.Members{
				{
					pipelineDB.ScopedName("radar"),
					rdr.NewRunner(
						logger.Session(pipelineDB.ScopedName("radar")),
						*noop,
						db,
						radarSchedulerFactory.BuildRadar(pipelineDB),
						pipelineDB,
						1*time.Minute,
					),
				},
				{
					pipelineDB.ScopedName("scheduler"),
					&sched.Runner{
						Logger: logger.Session(pipelineDB.ScopedName("scheduler")),

						Locker: db,
						DB:     pipelineDB,

						Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB),

						Noop: *noop,

						Interval: 10 * time.Second,
					},
				},
			})
		},
	)

	buildTracker := builds.NewTracker(
		logger.Session("build-tracker"),
		db,
		engine,
	)

	memberGrouper := []grouper.Member{
		{"web", http_server.New(webListenAddr, httpHandler)},

		{"debug", http_server.New(debugListenAddr, http.DefaultServeMux)},

		{"drainer", ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
			close(ready)

			<-signals

			close(drain)

			return nil
		})},

		{"pipelines", pipelines.SyncRunner{
			Syncer:   syncer,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"builds", builds.TrackerRunner{
			Tracker:  buildTracker,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},
	}

	group := grouper.NewParallel(os.Interrupt, memberGrouper)

	running := ifrit.Envoke(sigmon.New(group))

	logger.Info("listening", lager.Data{
		"web":   webListenAddr,
		"debug": debugListenAddr,
	})

	err = <-running.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}
}