func (cmd *WebCommand) Execute(args []string) error { tsa := &tsacmd.TSACommand{ BindIP: cmd.TSA.BindIP, BindPort: cmd.TSA.BindPort, HostKeyPath: cmd.TSA.HostKeyPath, AuthorizedKeysPath: cmd.TSA.AuthorizedKeysPath, HeartbeatInterval: cmd.TSA.HeartbeatInterval, } cmd.populateTSAFlagsFromATCFlags(tsa) atcRunner, err := cmd.ATCCommand.Runner(args) if err != nil { return err } tsaRunner, err := tsa.Runner(args) if err != nil { return err } runner := sigmon.New(grouper.NewParallel(os.Interrupt, grouper.Members{ {"atc", atcRunner}, {"tsa", tsaRunner}, })) return <-ifrit.Invoke(runner).Wait() }
func main() { logger := lager.NewLogger("checkin") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) var opts Opts _, err := flags.Parse(&opts) if err != nil { logger.Error("parsing-flags", err) os.Exit(1) } // ts := oauth2.StaticTokenSource( // &oauth2.Token{AccessToken: opts.GitHubAccessToken}, // ) // tc := oauth2.NewClient(oauth2.NoContext, ts) // githubClient := github.NewClient(tc) // checker := build.NewConcourseChecker() // checker = build.NewStatusReporter(checker, githubClient.Repositories) dbConn, err := migration.Open(opts.DBDriver, opts.DBURL, migrations.Migrations) if err != nil { logger.Error("failed-to-run-migrations", err) os.Exit(1) } sqlDB := db.NewSQL(logger.Session("db"), dbConn) enqueuer := build.NewEnqueuer(sqlDB) apiServer := api.NewServer(opts.GitHubSecret, enqueuer) members := []grouper.Member{ { "api", http_server.New( opts.Addr, apiServer, ), }, } group := grouper.NewParallel(os.Interrupt, members) running := ifrit.Invoke(sigmon.New(group)) logger.Info("listening", lager.Data{ "api": opts.Addr, }) err = <-running.Wait() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func NewTaskWorkerPool(receptorBBS bbs.ReceptorBBS, logger lager.Logger) (ifrit.Runner, chan<- models.Task) { taskQueue := make(chan models.Task, POOL_SIZE) members := make(grouper.Members, POOL_SIZE) for i := 0; i < POOL_SIZE; i++ { name := fmt.Sprintf("task-worker-%d", i) members[i].Name = name members[i].Runner = newTaskWorker(taskQueue, receptorBBS, logger.Session(name)) } return grouper.NewParallel(os.Interrupt, members), taskQueue }
func main() { apiServer := NewGRPCRunner("localhost:50051", func(s *grpc.Server) { airfreight.RegisterAirfreightServer(s, &server{}) }) debugServer := http_server.New( "localhost:6060", debugHandler(), ) members := []grouper.Member{ {"api", apiServer}, {"debug", debugServer}, } runner := sigmon.New(grouper.NewParallel(os.Interrupt, members)) err := <-ifrit.Invoke(runner).Wait() if err != nil { log.Fatalln(err) } }
func (cmd *ATCCommand) constructPipelineSyncer( logger lager.Logger, sqlDB *db.SQLDB, pipelineDBFactory db.PipelineDBFactory, radarSchedulerFactory pipelines.RadarSchedulerFactory, ) *pipelines.Syncer { return pipelines.NewSyncer( logger, sqlDB, pipelineDBFactory, func(pipelineDB db.PipelineDB) ifrit.Runner { return grouper.NewParallel(os.Interrupt, grouper.Members{ { pipelineDB.ScopedName("radar"), radar.NewRunner( logger.Session(pipelineDB.ScopedName("radar")), cmd.Developer.Noop, radarSchedulerFactory.BuildScanRunnerFactory(pipelineDB, cmd.ExternalURL.String()), pipelineDB, 1*time.Minute, ), }, { pipelineDB.ScopedName("scheduler"), &scheduler.Runner{ Logger: logger.Session(pipelineDB.ScopedName("scheduler")), DB: pipelineDB, Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB, cmd.ExternalURL.String()), Noop: cmd.Developer.Noop, Interval: 10 * time.Second, }, }, }) }, ) }
func (cmd *WorkerCommand) Execute(args []string) error { logger := lager.NewLogger("worker") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) worker, gardenRunner, err := cmd.gardenRunner(logger.Session("garden"), args) if err != nil { return err } baggageclaimRunner, err := cmd.baggageclaimRunner(logger.Session("baggageclaim")) if err != nil { return err } members := grouper.Members{ { Name: "garden", Runner: gardenRunner, }, { Name: "baggageclaim", Runner: baggageclaimRunner, }, } if cmd.TSA.WorkerPrivateKey != "" { members = append(members, grouper.Member{ Name: "beacon", Runner: cmd.beaconRunner(logger.Session("beacon"), worker), }) } runner := sigmon.New(grouper.NewParallel(os.Interrupt, members)) return <-ifrit.Invoke(runner).Wait() }
address string lrp receptor.DesiredLRPCreateRequest ) BeforeEach(func() { processGuid = helpers.GenerateGuid() address = componentMaker.Addresses.SSHProxy var fileServer ifrit.Runner fileServer, fileServerStaticDir = componentMaker.FileServer() runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"router", componentMaker.Router()}, {"file-server", fileServer}, {"rep", componentMaker.Rep()}, {"converger", componentMaker.Converger()}, {"auctioneer", componentMaker.Auctioneer()}, {"route-emitter", componentMaker.RouteEmitter()}, {"ssh-proxy", componentMaker.SSHProxy()}, })) tgCompressor := compressor.NewTgz() err := tgCompressor.Compress(componentMaker.Artifacts.Executables["sshd"], filepath.Join(fileServerStaticDir, "sshd.tgz")) Expect(err).NotTo(HaveOccurred()) sshRoute := routes.SSHRoute{ ContainerPort: 3456, PrivateKey: componentMaker.SSHConfig.PrivateKeyPem, HostFingerprint: ssh_helpers.MD5Fingerprint(componentMaker.SSHConfig.HostKey.PublicKey()), }
} switchboardRunner := ginkgomon.New(ginkgomon.Config{ Command: exec.Command( switchboardBinPath, fmt.Sprintf("-configPath=%s", configPath), ), Name: fmt.Sprintf("switchboard"), StartCheck: "started", StartCheckTimeout: startupTimeout, }) group := grouper.NewParallel(os.Kill, grouper.Members{ {Name: "backend-0", Runner: dummies.NewBackendRunner(0, backends[0])}, {Name: "backend-1", Runner: dummies.NewBackendRunner(1, backends[1])}, {Name: "healthcheck-0", Runner: healthcheckRunners[0]}, {Name: "healthcheck-1", Runner: healthcheckRunners[1]}, {Name: "switchboard", Runner: switchboardRunner}, }) process = ifrit.Invoke(group) var err error var conn net.Conn Eventually(func() error { conn, err = net.Dial("tcp", fmt.Sprintf("localhost:%d", switchboardPort)) return err }, startupTimeout).Should(Succeed()) defer conn.Close() response, err := sendData(conn, "detect active") Expect(err).NotTo(HaveOccurred())
Expect(modelErr.Type).To(Equal(models.Error_ResourceNotFound)) }) }) }) Describe("Cells", func() { const cell1 = "cell-id-1" const cell2 = "cell-id-2" Context("when there is a single cell", func() { var maintainers ifrit.Process BeforeEach(func() { Expect(serviceClient.Cells(logger)).To(HaveLen(0)) maintainers = ifrit.Invoke(grouper.NewParallel(os.Interrupt, grouper.Members{ {cell1, serviceClient.NewCellPresenceRunner(logger, newCellPresence(cell1), locket.RetryInterval, locket.LockTTL)}, {cell2, serviceClient.NewCellPresenceRunner(logger, newCellPresence(cell2), locket.RetryInterval, locket.LockTTL)}, })) }) AfterEach(func() { ginkgomon.Interrupt(maintainers) }) It("returns only one cell", func() { Eventually(func() (models.CellSet, error) { return serviceClient.Cells(logger) }).Should(HaveLen(2)) Expect(serviceClient.Cells(logger)).To(HaveKey(cell1)) Expect(serviceClient.Cells(logger)).To(HaveKey(cell2)) }) }) }) })
web: the-start-command EOF `}, } BeforeEach(func() { appId = helpers.GenerateGuid() taskId = helpers.GenerateGuid() fileServer, dir := componentMaker.FileServer() fileServerStaticDir = dir fakeCC = componentMaker.FakeCC() cell = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"rep", componentMaker.Rep("-memoryMB=1024")}, })) brain = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"bbs", componentMaker.BBS()}, {"receptor", componentMaker.Receptor()}, {"auctioneer", componentMaker.Auctioneer()}, {"file-server", fileServer}, })) bridge = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"cc", fakeCC}, {"stager", componentMaker.Stager()}, {"nsync-listener", componentMaker.NsyncListener()}, }))
rep ifrit.Process converger ifrit.Process appId string processGuid string runningLRPsPoller func() []receptor.ActualLRPResponse helloWorldInstancePoller func() []string ) BeforeEach(func() { fileServer, fileServerStaticDir := componentMaker.FileServer() runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"file-server", fileServer}, {"route-emitter", componentMaker.RouteEmitter()}, {"router", componentMaker.Router()}, })) archive_helper.CreateZipArchive( filepath.Join(fileServerStaticDir, "lrp.zip"), fixtures.HelloWorldIndexLRP(), ) appId = helpers.GenerateGuid() processGuid = helpers.GenerateGuid() runningLRPsPoller = func() []receptor.ActualLRPResponse { return helpers.ActiveActualLRPs(receptorClient, processGuid) }
func (cmd *ATCCommand) Run(signals <-chan os.Signal, ready chan<- struct{}) error { err := cmd.validate() if err != nil { return err } logger, reconfigurableSink := cmd.constructLogger() cmd.configureMetrics(logger) sqlDB, pipelineDBFactory, err := cmd.constructDB(logger) if err != nil { return err } workerClient := cmd.constructWorkerPool(logger, sqlDB) tracker := resource.NewTracker(workerClient, sqlDB) engine := cmd.constructEngine(sqlDB, workerClient, tracker) radarSchedulerFactory := pipelines.NewRadarSchedulerFactory( tracker, cmd.ResourceCheckingInterval, engine, sqlDB, ) signingKey, err := cmd.loadOrGenerateSigningKey() if err != nil { return err } authValidator, basicAuthEnabled := cmd.constructValidator(signingKey) oauthProviders, err := cmd.configureOAuthProviders(logger) if err != nil { return err } drain := make(chan struct{}) apiHandler, err := cmd.constructAPIHandler( logger, reconfigurableSink, sqlDB, authValidator, oauthProviders, basicAuthEnabled, signingKey, pipelineDBFactory, engine, workerClient, drain, radarSchedulerFactory, ) if err != nil { return err } oauthHandler, err := auth.NewOAuthHandler( logger, oauthProviders, signingKey, ) if err != nil { return err } webHandler, err := cmd.constructWebHandler( logger, sqlDB, authValidator, pipelineDBFactory, engine, ) if err != nil { return err } members := []grouper.Member{ {"drainer", drainer(drain)}, {"web", http_server.New( cmd.bindAddr(), cmd.constructHTTPHandler( webHandler, apiHandler, oauthHandler, ), )}, {"debug", http_server.New( cmd.debugBindAddr(), http.DefaultServeMux, )}, {"pipelines", pipelines.SyncRunner{ Syncer: cmd.constructPipelineSyncer( logger.Session("syncer"), sqlDB, pipelineDBFactory, radarSchedulerFactory, ), Interval: 10 * time.Second, Clock: clock.NewClock(), }}, {"builds", builds.TrackerRunner{ Tracker: builds.NewTracker( logger.Session("build-tracker"), sqlDB, engine, ), Interval: 10 * time.Second, Clock: clock.NewClock(), }}, {"lostandfound", lostandfound.NewRunner( logger.Session("lost-and-found"), lostandfound.NewBaggageCollector( logger.Session("baggage-collector"), workerClient, sqlDB, pipelineDBFactory, ), sqlDB, clock.NewClock(), 5*time.Minute, )}, } members = cmd.appendStaticWorker(logger, sqlDB, members) group := grouper.NewParallel(os.Interrupt, members) running := ifrit.Invoke(sigmon.New(group)) logger.Info("listening", lager.Data{ "web": cmd.bindAddr(), "debug": cmd.debugBindAddr(), }) close(ready) for { select { case s := <-signals: running.Signal(s) case err := <-running.Wait(): if err != nil { logger.Error("exited-with-failure", err) } return err } } }
cellB ifrit.Process processGuid string appId string ) BeforeEach(func() { processGuid = helpers.GenerateGuid() appId = helpers.GenerateGuid() fileServer, fileServerStaticDir := componentMaker.FileServer() runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"router", componentMaker.Router()}, {"file-server", fileServer}, {"converger", componentMaker.Converger("-convergeRepeatInterval", "1s")}, {"auctioneer", componentMaker.Auctioneer()}, {"route-emitter", componentMaker.RouteEmitter()}, })) cellAID = "cell-a" cellBID = "cell-b" cellAExecutorAddr = fmt.Sprintf("127.0.0.1:%d", 13100+GinkgoParallelNode()) cellBExecutorAddr = fmt.Sprintf("127.0.0.1:%d", 13200+GinkgoParallelNode()) cellARepAddr = fmt.Sprintf("0.0.0.0:%d", 14100+GinkgoParallelNode()) cellBRepAddr = fmt.Sprintf("0.0.0.0:%d", 14200+GinkgoParallelNode()) cellARepRunner = componentMaker.RepN(0, "-cellID", cellAID,
Δ time.Duration = 10 * time.Millisecond ) BeforeEach(func() { childRunner1 = fake_runner.NewTestRunner() childRunner2 = fake_runner.NewTestRunner() childRunner3 = fake_runner.NewTestRunner() members = grouper.Members{ {"child1", childRunner1}, {"child2", childRunner2}, {"child3", childRunner3}, } groupRunner = grouper.NewParallel(os.Interrupt, members) }) AfterEach(func() { childRunner1.EnsureExit() childRunner2.EnsureExit() childRunner3.EnsureExit() ginkgomon.Kill(groupProcess) }) Describe("Start", func() { BeforeEach(func() { groupProcess = ifrit.Background(groupRunner) })
"github.com/cloudfoundry-incubator/receptor" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/tedsuo/ifrit" "github.com/tedsuo/ifrit/ginkgomon" "github.com/tedsuo/ifrit/grouper" ) var _ = Describe("Privileges", func() { var runtime ifrit.Process BeforeEach(func() { runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"rep", componentMaker.Rep("-allowPrivileged")}, {"converger", componentMaker.Converger()}, {"auctioneer", componentMaker.Auctioneer()}, {"router", componentMaker.Router()}, {"route-emitter", componentMaker.RouteEmitter()}, })) }) AfterEach(func() { helpers.StopProcesses(runtime) }) Context("when a task that tries to do privileged things is requested", func() { var taskRequest receptor.TaskCreateRequest BeforeEach(func() { taskRequest = helpers.TaskCreateRequest( helpers.GenerateGuid(),
func main() { if version == "" { version = "dev" } if len(os.Args) > 1 { arg := os.Args[1] if arg == "version" || arg == "-v" || arg == "--version" { fmt.Printf("%s\n", version) os.Exit(0) } } flag.Parse() logger, sink, err := logger.InitializeLogger(logger.LogLevel(*logLevel)) if err != nil { fmt.Printf("Failed to initialize logger\n") panic(err) } logger.Info("garagepi starting", lager.Data{"version": version}) logger.Debug("flags", lager.Data{ "enableHTTP": enableHTTP, "enableHTTPS": enableHTTPS, "forceHTTPS": forceHTTPS, }) if !(*enableHTTP || *enableHTTPS) { logger.Fatal("exiting", fmt.Errorf("at least one of enableHTTP and enableHTTPS must be true")) } if *enableHTTPS { if *keyFile == "" { logger.Fatal("exiting", fmt.Errorf("keyFile must be provided if enableHTTPS is true")) } if *certFile == "" { logger.Fatal("exiting", fmt.Errorf("certFile must be provided if enableHTTPS is true")) } } if *forceHTTPS && !(*enableHTTP && *enableHTTPS) { logger.Fatal("exiting", fmt.Errorf("enableHTTP must be enabled if forceHTTPS is true")) } if !*dev && (*username == "" || *password == "") { logger.Fatal("exiting", fmt.Errorf("must specify -username and -password or turn on dev mode")) } var tlsConfig *tls.Config if *keyFile != "" && *certFile != "" { var err error tlsConfig, err = createTLSConfig(*keyFile, *certFile) if err != nil { logger.Fatal("exiting. Failed to create tlsConfig", err) } } cookieHandler := securecookie.New( securecookie.GenerateRandomKey(64), securecookie.GenerateRandomKey(32), ) templates, err := filesystem.LoadTemplates() if err != nil { logger.Fatal("exiting", err) } osHelper := gpos.NewOSHelper(logger) loginHandler := login.NewHandler( logger, templates, cookieHandler, *cookieMaxAge, ) webcamURL := fmt.Sprintf("%s:%d", *webcamHost, *webcamPort) wh := webcam.NewHandler( logger, webcamURL, ) gpio := gpio.NewGpio(osHelper, logger) lh := light.NewHandler( logger, gpio, *gpioLightPin, ) hh := homepage.NewHandler( logger, templates, lh, loginHandler, ) dh := door.NewHandler( logger, osHelper, gpio, *gpioDoorPin) loglevelHandler := loglevel.NewServer( logger, sink, ) staticFileServer := http.FileServer(static.FS(false)) rtr := mux.NewRouter() rtr.PathPrefix("/static/").Handler(staticFileServer) rtr.HandleFunc("/", hh.Handle).Methods("GET") rtr.HandleFunc("/webcam", wh.Handle).Methods("GET") s := rtr.PathPrefix("/api/v1").Subrouter() s.HandleFunc("/toggle", dh.HandleToggle).Methods("POST") s.HandleFunc("/light", lh.HandleGet).Methods("GET") s.HandleFunc("/light", lh.HandleSet).Methods("POST") s.HandleFunc("/loglevel", loglevelHandler.GetMinLevel).Methods("GET") s.HandleFunc("/loglevel", loglevelHandler.SetMinLevel).Methods("POST") rtr.HandleFunc("/login", loginHandler.LoginGET).Methods("GET") rtr.HandleFunc("/login", loginHandler.LoginPOST).Methods("POST") rtr.HandleFunc("/logout", loginHandler.LogoutPOST).Methods("POST") members := grouper.Members{} if *enableHTTPS { forceHTTPS := false httpsRunner := NewWebRunner( *httpsPort, logger, rtr, tlsConfig, forceHTTPS, *redirectPort, *username, *password, cookieHandler, ) members = append(members, grouper.Member{ Name: "https", Runner: httpsRunner, }) } if *enableHTTP { var tlsConfig *tls.Config // nil httpRunner := NewWebRunner( *httpPort, logger, rtr, tlsConfig, *forceHTTPS, *redirectPort, *username, *password, cookieHandler, ) members = append(members, grouper.Member{ Name: "http", Runner: httpRunner, }) } group := grouper.NewParallel(os.Kill, members) process := ifrit.Invoke(group) if *pidFile != "" { pid := os.Getpid() err = ioutil.WriteFile(*pidFile, []byte(strconv.Itoa(os.Getpid())), 0644) if err != nil { logger.Fatal("Failed to write pid file", err, lager.Data{ "pid": pid, "pidFile": *pidFile, }) } } logger.Info("garagepi started") err = <-process.Wait() if err != nil { logger.Error("Error running garagepi", err) } }
Action: models.WrapAction(&models.RunAction{ Path: "sh", Args: []string{"-c", `[ "$(id -u)" -eq "0" ]`}, User: "******", }), } config := executorinit.DefaultConfiguration config.GardenNetwork = "tcp" config.GardenAddr = componentMaker.Addresses.GardenLinux config.AllowPrivileged = allowPrivileged logger := lagertest.NewTestLogger("test") executorClient, executorMembers, err := executorinit.Initialize(logger, config) Expect(err).NotTo(HaveOccurred()) runner = grouper.NewParallel(os.Kill, executorMembers) _, err = executorClient.AllocateContainers([]executor.Container{container}) Expect(err).NotTo(HaveOccurred()) err = executorClient.RunContainer(containerGuid) Expect(err).NotTo(HaveOccurred()) Eventually(func() executor.State { container, err := executorClient.GetContainer(containerGuid) if err != nil { return executor.StateInvalid } runResult = container.RunResult return container.State
var builtArtifacts world.BuiltArtifacts err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts) Expect(err).NotTo(HaveOccurred()) localIP, err := localip.LocalIP() Expect(err).NotTo(HaveOccurred()) componentMaker = helpers.MakeComponentMaker(builtArtifacts, localIP) }) var _ = BeforeEach(func() { plumbing = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"etcd", componentMaker.Etcd()}, {"nats", componentMaker.NATS()}, {"consul", componentMaker.Consul()}, {"bbs", componentMaker.BBS()}, {"receptor", componentMaker.Receptor()}, {"garden-linux", componentMaker.GardenLinux("-denyNetworks=0.0.0.0/0", "-allowHostAccess=true")}, })) helpers.ConsulWaitUntilReady() gardenClient = componentMaker.GardenClient() natsClient = componentMaker.NATSClient() receptorClient = componentMaker.ReceptorClient() helpers.UpsertInigoDomain(receptorClient) inigo_announcement_server.Start(componentMaker.ExternalAddress) })
Expect(err).NotTo(HaveOccurred()) request.Header.Set("Content-Type", "application/json") return http.DefaultClient.Do(request) } BeforeEach(func() { appId = helpers.GenerateGuid() fileServer, fileServerStaticDir := componentMaker.FileServer() runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"bbs", componentMaker.BBS()}, {"receptor", componentMaker.Receptor()}, {"rep", componentMaker.Rep()}, {"auctioneer", componentMaker.Auctioneer()}, {"route-emitter", componentMaker.RouteEmitter()}, {"converger", componentMaker.Converger()}, {"router", componentMaker.Router()}, {"file-server", fileServer}, })) bridge = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"tps-listener", componentMaker.TPSListener()}, {"nsync-listener", componentMaker.NsyncListener()}, })) archive_helper.CreateZipArchive( filepath.Join(fileServerStaticDir, "droplet.zip"), fixtures.HelloWorldIndexApp(), )
cellProcess = nil convergerProcess = nil }) AfterEach(func() { helpers.StopProcesses( auctioneerProcess, cellProcess, convergerProcess, ) }) Context("when a rep, and auctioneer are running", func() { BeforeEach(func() { cellProcess = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"rep", componentMaker.Rep("-memoryMB", "1024")}, })) auctioneerProcess = ginkgomon.Invoke(componentMaker.Auctioneer()) }) Context("and a standard Task is desired", func() { var taskGuid string var taskSleepSeconds int var taskRequest receptor.TaskCreateRequest BeforeEach(func() { taskSleepSeconds = 10 taskGuid = helpers.GenerateGuid()
func main() { flag.Parse() if !*dev && (*httpUsername == "" || (*httpHashedPassword == "" && *httpPassword == "")) { fatal(errors.New("must specify -httpUsername and -httpPassword or -httpHashedPassword or turn on dev mode")) } if _, err := os.Stat(*templatesDir); err != nil { fatal(errors.New("directory specified via -templates does not exist")) } if _, err := os.Stat(*publicDir); err != nil { fatal(errors.New("directory specified via -public does not exist")) } logger := lager.NewLogger("atc") logLevel := lager.INFO if *dev { logLevel = lager.DEBUG } sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), logLevel) logger.RegisterSink(sink) var err error var dbConn Db.Conn for { dbConn, err = migration.Open(*sqlDriver, *sqlDataSource, migrations.Migrations) if err != nil { if strings.Contains(err.Error(), " dial ") { logger.Error("failed-to-open-db", err) time.Sleep(5 * time.Second) continue } fatal(err) } break } dbConn = Db.Explain(logger, dbConn, 500*time.Millisecond) listener := pq.NewListener(*sqlDataSource, time.Second, time.Minute, nil) bus := Db.NewNotificationsBus(listener) db := Db.NewSQL(logger.Session("db"), dbConn, bus) pipelineDBFactory := Db.NewPipelineDBFactory(logger.Session("db"), dbConn, bus, db) var configDB Db.ConfigDB configDB = Db.PlanConvertingConfigDB{db} var resourceTypesNG []atc.WorkerResourceType err = json.Unmarshal([]byte(*resourceTypes), &resourceTypesNG) if err != nil { logger.Fatal("invalid-resource-types", err) } var workerClient worker.Client if *gardenAddr != "" { workerClient = worker.NewGardenWorker( gclient.New(gconn.NewWithLogger( *gardenNetwork, *gardenAddr, logger.Session("garden-connection"), )), clock.NewClock(), -1, resourceTypesNG, "linux", []string{}, ) } else { workerClient = worker.NewPool(worker.NewDBWorkerProvider(db, logger)) } resourceTracker := resource.NewTracker(workerClient) gardenFactory := exec.NewGardenFactory(workerClient, resourceTracker, func() string { guid, err := uuid.NewV4() if err != nil { panic("not enough entropy to generate guid: " + err.Error()) } return guid.String() }) execEngine := engine.NewExecEngine(gardenFactory, engine.NewBuildDelegateFactory(db), db) engine := engine.NewDBEngine(engine.Engines{execEngine}, db, db) var webValidator auth.Validator if *httpUsername != "" && *httpHashedPassword != "" { webValidator = auth.BasicAuthHashedValidator{ Username: *httpUsername, HashedPassword: *httpHashedPassword, } } else if *httpUsername != "" && *httpPassword != "" { webValidator = auth.BasicAuthValidator{ Username: *httpUsername, Password: *httpPassword, } } else { webValidator = auth.NoopValidator{} } callbacksURL, err := url.Parse(*callbacksURLString) if err != nil { fatal(err) } drain := make(chan struct{}) apiHandler, err := api.NewHandler( logger, // logger lager.Logger, webValidator, // validator auth.Validator, pipelineDBFactory, // pipelineDBFactory db.PipelineDBFactory, configDB, // configDB db.ConfigDB, db, // buildsDB buildserver.BuildsDB, db, // workerDB workerserver.WorkerDB, db, // pipeDB pipes.PipeDB, db, // pipelinesDB db.PipelinesDB, config.ValidateConfig, // configValidator configserver.ConfigValidator, callbacksURL.String(), // peerURL string, buildserver.NewEventHandler, // eventHandlerFactory buildserver.EventHandlerFactory, drain, // drain <-chan struct{}, engine, // engine engine.Engine, workerClient, // workerClient worker.Client, sink, // sink *lager.ReconfigurableSink, *cliDownloadsDir, // cliDownloadsDir string, ) if err != nil { fatal(err) } radarSchedulerFactory := pipelines.NewRadarSchedulerFactory( resourceTracker, *checkInterval, db, engine, db, ) webHandler, err := web.NewHandler( logger, webValidator, radarSchedulerFactory, db, pipelineDBFactory, configDB, *templatesDir, *publicDir, engine, ) if err != nil { fatal(err) } webMux := http.NewServeMux() webMux.Handle("/api/v1/", apiHandler) webMux.Handle("/", webHandler) var httpHandler http.Handler httpHandler = webMux if !*publiclyViewable { httpHandler = auth.Handler{ Handler: httpHandler, Validator: webValidator, } } // copy Authorization header as ATC-Authorization cookie for websocket auth httpHandler = auth.CookieSetHandler{ Handler: httpHandler, } httpHandler = httpmetrics.Wrap(httpHandler) webListenAddr := fmt.Sprintf("%s:%d", *webListenAddress, *webListenPort) debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort) syncer := pipelines.NewSyncer( logger.Session("syncer"), db, pipelineDBFactory, func(pipelineDB Db.PipelineDB) ifrit.Runner { return grouper.NewParallel(os.Interrupt, grouper.Members{ { pipelineDB.ScopedName("radar"), rdr.NewRunner( logger.Session(pipelineDB.ScopedName("radar")), *noop, db, radarSchedulerFactory.BuildRadar(pipelineDB), pipelineDB, 1*time.Minute, ), }, { pipelineDB.ScopedName("scheduler"), &sched.Runner{ Logger: logger.Session(pipelineDB.ScopedName("scheduler")), Locker: db, DB: pipelineDB, Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB), Noop: *noop, Interval: 10 * time.Second, }, }, }) }, ) buildTracker := builds.NewTracker( logger.Session("build-tracker"), db, engine, ) memberGrouper := []grouper.Member{ {"web", http_server.New(webListenAddr, httpHandler)}, {"debug", http_server.New(debugListenAddr, http.DefaultServeMux)}, {"drainer", ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { close(ready) <-signals close(drain) return nil })}, {"pipelines", pipelines.SyncRunner{ Syncer: syncer, Interval: 10 * time.Second, Clock: clock.NewClock(), }}, {"builds", builds.TrackerRunner{ Tracker: buildTracker, Interval: 10 * time.Second, Clock: clock.NewClock(), }}, } group := grouper.NewParallel(os.Interrupt, memberGrouper) running := ifrit.Envoke(sigmon.New(group)) logger.Info("listening", lager.Data{ "web": webListenAddr, "debug": debugListenAddr, }) err = <-running.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } }
func (cmd *ATCCommand) Runner(args []string) (ifrit.Runner, error) { err := cmd.validate() if err != nil { return nil, err } logger, reconfigurableSink := cmd.constructLogger() cmd.configureMetrics(logger) sqlDB, pipelineDBFactory, err := cmd.constructDB(logger) if err != nil { return nil, err } trackerFactory := resource.TrackerFactory{} workerClient := cmd.constructWorkerPool(logger, sqlDB, trackerFactory) tracker := resource.NewTracker(workerClient) engine := cmd.constructEngine(sqlDB, workerClient, tracker) radarSchedulerFactory := pipelines.NewRadarSchedulerFactory( tracker, cmd.ResourceCheckingInterval, engine, sqlDB, ) radarScannerFactory := radar.NewScannerFactory( tracker, cmd.ResourceCheckingInterval, cmd.ExternalURL.String(), ) signingKey, err := cmd.loadOrGenerateSigningKey() if err != nil { return nil, err } err = sqlDB.CreateDefaultTeamIfNotExists() if err != nil { return nil, err } authValidator := cmd.constructValidator(signingKey, sqlDB) err = cmd.updateBasicAuthCredentials(sqlDB) if err != nil { return nil, err } jwtReader := auth.JWTReader{ PublicKey: &signingKey.PublicKey, } err = cmd.configureOAuthProviders(logger, sqlDB) if err != nil { return nil, err } providerFactory := provider.NewOAuthFactory( sqlDB, cmd.oauthBaseURL(), auth.OAuthRoutes, auth.OAuthCallback, ) if err != nil { return nil, err } drain := make(chan struct{}) apiHandler, err := cmd.constructAPIHandler( logger, reconfigurableSink, sqlDB, authValidator, jwtReader, providerFactory, signingKey, pipelineDBFactory, engine, workerClient, drain, radarSchedulerFactory, radarScannerFactory, ) if err != nil { return nil, err } oauthHandler, err := auth.NewOAuthHandler( logger, providerFactory, signingKey, sqlDB, ) if err != nil { return nil, err } webHandler, err := cmd.constructWebHandler( logger, authValidator, jwtReader, pipelineDBFactory, ) if err != nil { return nil, err } members := []grouper.Member{ {"drainer", drainer(drain)}, {"web", http_server.New( cmd.bindAddr(), cmd.constructHTTPHandler( webHandler, apiHandler, oauthHandler, ), )}, {"debug", http_server.New( cmd.debugBindAddr(), http.DefaultServeMux, )}, {"pipelines", pipelines.SyncRunner{ Syncer: cmd.constructPipelineSyncer( logger.Session("syncer"), sqlDB, pipelineDBFactory, radarSchedulerFactory, ), Interval: 10 * time.Second, Clock: clock.NewClock(), }}, {"builds", builds.TrackerRunner{ Tracker: builds.NewTracker( logger.Session("build-tracker"), sqlDB, engine, ), Interval: 10 * time.Second, Clock: clock.NewClock(), }}, {"lostandfound", lostandfound.NewRunner( logger.Session("lost-and-found"), lostandfound.NewBaggageCollector( logger.Session("baggage-collector"), workerClient, sqlDB, pipelineDBFactory, cmd.OldResourceGracePeriod, 24*time.Hour, ), sqlDB, clock.NewClock(), cmd.ResourceCacheCleanupInterval, )}, } members = cmd.appendStaticWorker(logger, sqlDB, members) return onReady(grouper.NewParallel(os.Interrupt, members), func() { logger.Info("listening", lager.Data{ "web": cmd.bindAddr(), "debug": cmd.debugBindAddr(), }) }), nil }
return payload }, func(encodedBuiltArtifacts []byte) { var builtArtifacts world.BuiltArtifacts err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts) Expect(err).NotTo(HaveOccurred()) localIP, err := localip.LocalIP() Expect(err).NotTo(HaveOccurred()) componentMaker = helpers.MakeComponentMaker(builtArtifacts, localIP) }) var _ = BeforeEach(func() { gardenProcess = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"garden-linux", componentMaker.GardenLinux()}, })) gardenClient = componentMaker.GardenClient() }) var _ = AfterEach(func() { destroyContainerErrors := helpers.CleanupGarden(gardenClient) helpers.StopProcesses(gardenProcess) Expect(destroyContainerErrors).To( BeEmpty(), "%d containers failed to be destroyed!", len(destroyContainerErrors), )
) var fileServerStaticDir string BeforeEach(func() { var fileServerRunner ifrit.Runner fileServerRunner, fileServerStaticDir = componentMaker.FileServer() cellGroup := grouper.Members{ {"file-server", fileServerRunner}, {"rep", componentMaker.Rep("-memoryMB", "1024")}, {"auctioneer", componentMaker.Auctioneer()}, {"converger", componentMaker.Converger()}, } cellProcess = ginkgomon.Invoke(grouper.NewParallel(os.Interrupt, cellGroup)) Eventually(receptorClient.Cells).Should(HaveLen(1)) }) AfterEach(func() { helpers.StopProcesses(cellProcess) }) Describe("Running a task", func() { var guid string BeforeEach(func() { guid = helpers.GenerateGuid() })
}, func(encodedBuiltArtifacts []byte) { var builtArtifacts world.BuiltArtifacts err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts) Expect(err).NotTo(HaveOccurred()) localIP, err := localip.LocalIP() Expect(err).NotTo(HaveOccurred()) componentMaker = helpers.MakeComponentMaker(builtArtifacts, localIP) }) var _ = BeforeEach(func() { plumbing = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{ {"etcd", componentMaker.Etcd()}, {"nats", componentMaker.NATS()}, {"consul", componentMaker.Consul()}, {"garden-linux", componentMaker.GardenLinux("-allowHostAccess=true")}, })) helpers.ConsulWaitUntilReady() gardenClient = componentMaker.GardenClient() receptorClient = componentMaker.ReceptorClient() inigo_announcement_server.Start(componentMaker.ExternalAddress) }) var _ = AfterEach(func() { inigo_announcement_server.Stop() destroyContainerErrors := helpers.CleanupGarden(gardenClient)