func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("auctioneer") initializeDropsonde(logger) if err := validateBBSAddress(); err != nil { logger.Fatal("invalid-bbs-address", err) } consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } port, err := strconv.Atoi(strings.Split(*listenAddr, ":")[1]) if err != nil { logger.Fatal("invalid-port", err) } clock := clock.NewClock() auctioneerServiceClient := auctioneer.NewServiceClient(consulClient, clock) auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout, initializeBBSClient(logger), *startingContainerWeight) auctionServer := initializeAuctionServer(logger, auctionRunner) lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient, port) registrationRunner := initializeRegistrationRunner(logger, consulClient, clock, port) members := grouper.Members{ {"lock-maintainer", lockMaintainer}, {"auction-runner", auctionRunner}, {"auction-server", auctionServer}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func initializeServiceClient(logger lager.Logger) nsync.ServiceClient { consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } return nsync.NewServiceClient(consulClient, clock.NewClock()) }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("ssh-proxy") initializeDropsonde(logger) proxyConfig, err := configureProxy(logger) if err != nil { logger.Error("configure-failed", err) os.Exit(1) } sshProxy := proxy.New(logger, proxyConfig) server := server.NewServer(logger, *address, sshProxy) consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, *address, clock.NewClock()) members := grouper.Members{ {"ssh-proxy", server}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{{ "debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink), }}, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") os.Exit(0) }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Var( &insecureDockerRegistries, "insecureDockerRegistry", "Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)", ) lifecycles := flags.LifecycleMap{} flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)") flag.Parse() logger, reconfigurableSink := cf_lager.New("stager") initializeDropsonde(logger) ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify) backends := initializeBackends(logger, lifecycles) handler := handlers.New(logger, ccClient, initializeBBSClient(logger), backends, clock.NewClock()) clock := clock.NewClock() consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) members := grouper.Members{ {"server", http_server.New(*listenAddress, handler)}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } logger.Info("starting") group := grouper.NewOrdered(os.Interrupt, members) process := ifrit.Invoke(sigmon.New(group)) logger.Info("Listening for staging requests!") err = <-process.Wait() if err != nil { logger.Fatal("Stager exited with error", err) } logger.Info("stopped") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) lifecycles := flags.LifecycleMap{} flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)") flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("nsync-listener") initializeDropsonde(logger) recipeBuilderConfig := recipebuilder.Config{ Lifecycles: lifecycles, FileServerURL: *fileServerURL, KeyFactory: keys.RSAKeyPairFactory, } recipeBuilders := map[string]recipebuilder.RecipeBuilder{ "buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig), "docker": recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig), } handler := handlers.New(logger, initializeBBSClient(logger), recipeBuilders) consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } clock := clock.NewClock() registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) members := grouper.Members{ {"server", http_server.New(*listenAddress, handler)}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) etcdFlags := AddETCDFlags(flag.CommandLine) encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("bbs") logger.Info("starting") initializeDropsonde(logger) clock := clock.NewClock() consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } serviceClient := bbs.NewServiceClient(consulClient, clock) maintainer := initializeLockMaintainer(logger, serviceClient) _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } _, portString, err = net.SplitHostPort(*healthAddress) if err != nil { logger.Fatal("failed-invalid-health-address", err) } _, err = net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-health-port", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask) var activeDB db.DB var sqlDB *sqldb.SQLDB var sqlConn *sql.DB var storeClient etcddb.StoreClient var etcdDB *etcddb.ETCDDB key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } if etcdOptions.IsConfigured { storeClient = initializeEtcdStoreClient(logger, etcdOptions) etcdDB = initializeEtcdDB(logger, cryptor, storeClient, cbWorkPool, serviceClient, *desiredLRPCreationTimeout) activeDB = etcdDB } // If SQL database info is passed in, use SQL instead of ETCD if *databaseDriver != "" && *databaseConnectionString != "" { var err error connectionString := appendSSLConnectionStringParam(logger, *databaseDriver, *databaseConnectionString, *sqlCACertFile) sqlConn, err = sql.Open(*databaseDriver, connectionString) if err != nil { logger.Fatal("failed-to-open-sql", err) } defer sqlConn.Close() sqlConn.SetMaxOpenConns(*maxDatabaseConnections) sqlConn.SetMaxIdleConns(*maxDatabaseConnections) err = sqlConn.Ping() if err != nil { logger.Fatal("sql-failed-to-connect", err) } sqlDB = sqldb.NewSQLDB(sqlConn, *convergenceWorkers, *updateWorkers, format.ENCRYPTED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock, *databaseDriver) err = sqlDB.CreateConfigurationsTable(logger) if err != nil { logger.Fatal("sql-failed-create-configurations-table", err) } activeDB = sqlDB } if activeDB == nil { logger.Fatal("no-database-configured", errors.New("no database configured")) } encryptor := encryptor.New(logger, activeDB, keyManager, cryptor, clock) migrationsDone := make(chan struct{}) migrationManager := migration.NewManager(logger, etcdDB, storeClient, sqlDB, sqlConn, cryptor, migrations.Migrations, migrationsDone, clock, *databaseDriver, ) desiredHub := events.NewHub() actualHub := events.NewHub() repClientFactory := rep.NewClientFactory(cf_http.NewClient(), cf_http.NewClient()) auctioneerClient := initializeAuctioneerClient(logger) exitChan := make(chan struct{}) handler := handlers.New( logger, *updateWorkers, *convergenceWorkers, activeDB, desiredHub, actualHub, cbWorkPool, serviceClient, auctioneerClient, repClientFactory, migrationsDone, exitChan, ) metricsNotifier := metrics.NewPeriodicMetronNotifier( logger, *reportInterval, etcdOptions, clock, ) var server ifrit.Runner if *requireSSL { tlsConfig, err := cf_http.NewTLSConfig(*certFile, *keyFile, *caFile) if err != nil { logger.Fatal("tls-configuration-failed", err) } server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig) } else { server = http_server.New(*listenAddress, handler) } healthcheckServer := http_server.New(*healthAddress, http.HandlerFunc(healthCheckHandler)) members := grouper.Members{ {"healthcheck", healthcheckServer}, {"lock-maintainer", maintainer}, {"workpool", cbWorkPool}, {"server", server}, {"migration-manager", migrationManager}, {"encryptor", encryptor}, {"hub-maintainer", hubMaintainer(logger, desiredHub, actualHub)}, {"metrics", *metricsNotifier}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) go func() { // If a handler writes to this channel, we've hit an unrecoverable error // and should shut down (cleanly) <-exitChan monitor.Signal(os.Interrupt) }() logger.Info("started") err = <-monitor.Wait() if sqlConn != nil { sqlConn.Close() } if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }