var _ thepackagedb.DB = sqlDB }) var _ = BeforeEach(func() { if !test_helpers.UseSQL() { Skip("SQL Backend not available") } migrationsDone := make(chan struct{}) migrationManager := migration.NewManager(logger, nil, nil, sqlDB, db, cryptor, migrations.Migrations, migrationsDone, fakeClock, dbDriverName, ) migrationProcess = ifrit.Invoke(migrationManager) Consistently(migrationProcess.Wait()).ShouldNot(Receive()) Eventually(migrationsDone).Should(BeClosed()) }) var _ = AfterEach(func() { if test_helpers.UseSQL() { fakeGUIDProvider.NextGUIDReturns("", nil)
dbVersion = &models.Version{} logger = lagertest.NewTestLogger("test") fakeDB = &fakes.FakeDB{} fakeDB.VersionReturns(dbVersion, nil) storeClient = etcd.NewStoreClient(nil) cryptor = &fakeencryption.FakeCryptor{} fakeMigration = &migrationfakes.FakeMigration{} migrations = []migration.Migration{fakeMigration} }) JustBeforeEach(func() { manager = migration.NewManager(logger, fakeDB, cryptor, storeClient, migrations, migrationsDone, clock.NewClock()) migrationProcess = ifrit.Background(manager) }) AfterEach(func() { ginkgomon.Kill(migrationProcess) }) It("fetches the migration version from the database", func() { Eventually(fakeDB.VersionCallCount).Should(Equal(1)) Consistently(fakeDB.VersionCallCount).Should(Equal(1)) ginkgomon.Interrupt(migrationProcess) Eventually(migrationProcess.Wait()).Should(Receive(BeNil())) })
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) etcdFlags := AddETCDFlags(flag.CommandLine) encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("bbs") logger.Info("starting") initializeDropsonde(logger) clock := clock.NewClock() consulClient, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } sessionManager := consuladapter.NewSessionManager(consulClient) serviceClient := initializeServiceClient(logger, clock, consulClient, sessionManager) cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask) keyManager, err := encryptionFlags.Validate() if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } storeClient := initializeEtcdStoreClient(logger, etcdOptions) db := initializeEtcdDB(logger, cryptor, storeClient, cbWorkPool, serviceClient) migrationsDone := make(chan struct{}) maintainer := initializeLockMaintainer(logger, serviceClient) migrationManager := migration.NewManager(logger, db, cryptor, storeClient, migrations.Migrations, migrationsDone, clock, ) encryptor := encryptor.New(logger, db, keyManager, cryptor, storeClient, clock) hub := events.NewHub() watcher := watcher.NewWatcher( logger, db, hub, clock, bbsWatchRetryWaitDuration, ) handler := handlers.New(logger, db, hub, migrationsDone) metricsNotifier := metrics.NewPeriodicMetronNotifier( logger, *reportInterval, etcdOptions, clock, ) var server ifrit.Runner if *requireSSL { tlsConfig, err := cf_http.NewTLSConfig(*certFile, *keyFile, *caFile) if err != nil { logger.Fatal("tls-configuration-failed", err) } server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig) } else { server = http_server.New(*listenAddress, handler) } members := grouper.Members{ {"lock-maintainer", maintainer}, {"workPool", cbWorkPool}, {"server", server}, {"migration-manager", migrationManager}, {"encryptor", encryptor}, {"watcher", watcher}, {"hub-closer", closeHub(logger.Session("hub-closer"), hub)}, {"metrics", *metricsNotifier}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
logger = lagertest.NewTestLogger("test") fakeETCDDB = &dbfakes.FakeDB{} fakeETCDDB.VersionReturns(dbVersion, nil) fakeSQLDB = &dbfakes.FakeDB{} cryptor = &encryptionfakes.FakeCryptor{} fakeMigration = &migrationfakes.FakeMigration{} fakeMigration.RequiresSQLReturns(false) migrations = []migration.Migration{fakeMigration} }) JustBeforeEach(func() { manager = migration.NewManager(logger, fakeETCDDB, etcdStoreClient, fakeSQLDB, rawSQLDB, cryptor, migrations, migrationsDone, clock.NewClock(), "db-driver") migrationProcess = ifrit.Background(manager) }) AfterEach(func() { ginkgomon.Kill(migrationProcess) }) Context("when both a etcd and sql configurations are present", func() { BeforeEach(func() { rawSQLDB = &sql.DB{} etcdStoreClient = etcd.NewStoreClient(nil) }) Context("but SQL does not have a version", func() { BeforeEach(func() {
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) etcdFlags := AddETCDFlags(flag.CommandLine) encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("bbs") logger.Info("starting") initializeDropsonde(logger) clock := clock.NewClock() consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } serviceClient := bbs.NewServiceClient(consulClient, clock) maintainer := initializeLockMaintainer(logger, serviceClient) _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } _, portString, err = net.SplitHostPort(*healthAddress) if err != nil { logger.Fatal("failed-invalid-health-address", err) } _, err = net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-health-port", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask) var activeDB db.DB var sqlDB *sqldb.SQLDB var sqlConn *sql.DB var storeClient etcddb.StoreClient var etcdDB *etcddb.ETCDDB key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } if etcdOptions.IsConfigured { storeClient = initializeEtcdStoreClient(logger, etcdOptions) etcdDB = initializeEtcdDB(logger, cryptor, storeClient, cbWorkPool, serviceClient, *desiredLRPCreationTimeout) activeDB = etcdDB } // If SQL database info is passed in, use SQL instead of ETCD if *databaseDriver != "" && *databaseConnectionString != "" { var err error connectionString := appendSSLConnectionStringParam(logger, *databaseDriver, *databaseConnectionString, *sqlCACertFile) sqlConn, err = sql.Open(*databaseDriver, connectionString) if err != nil { logger.Fatal("failed-to-open-sql", err) } defer sqlConn.Close() sqlConn.SetMaxOpenConns(*maxDatabaseConnections) sqlConn.SetMaxIdleConns(*maxDatabaseConnections) err = sqlConn.Ping() if err != nil { logger.Fatal("sql-failed-to-connect", err) } sqlDB = sqldb.NewSQLDB(sqlConn, *convergenceWorkers, *updateWorkers, format.ENCRYPTED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock, *databaseDriver) err = sqlDB.CreateConfigurationsTable(logger) if err != nil { logger.Fatal("sql-failed-create-configurations-table", err) } activeDB = sqlDB } if activeDB == nil { logger.Fatal("no-database-configured", errors.New("no database configured")) } encryptor := encryptor.New(logger, activeDB, keyManager, cryptor, clock) migrationsDone := make(chan struct{}) migrationManager := migration.NewManager(logger, etcdDB, storeClient, sqlDB, sqlConn, cryptor, migrations.Migrations, migrationsDone, clock, *databaseDriver, ) desiredHub := events.NewHub() actualHub := events.NewHub() repClientFactory := rep.NewClientFactory(cf_http.NewClient(), cf_http.NewClient()) auctioneerClient := initializeAuctioneerClient(logger) exitChan := make(chan struct{}) handler := handlers.New( logger, *updateWorkers, *convergenceWorkers, activeDB, desiredHub, actualHub, cbWorkPool, serviceClient, auctioneerClient, repClientFactory, migrationsDone, exitChan, ) metricsNotifier := metrics.NewPeriodicMetronNotifier( logger, *reportInterval, etcdOptions, clock, ) var server ifrit.Runner if *requireSSL { tlsConfig, err := cf_http.NewTLSConfig(*certFile, *keyFile, *caFile) if err != nil { logger.Fatal("tls-configuration-failed", err) } server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig) } else { server = http_server.New(*listenAddress, handler) } healthcheckServer := http_server.New(*healthAddress, http.HandlerFunc(healthCheckHandler)) members := grouper.Members{ {"healthcheck", healthcheckServer}, {"lock-maintainer", maintainer}, {"workpool", cbWorkPool}, {"server", server}, {"migration-manager", migrationManager}, {"encryptor", encryptor}, {"hub-maintainer", hubMaintainer(logger, desiredHub, actualHub)}, {"metrics", *metricsNotifier}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) go func() { // If a handler writes to this channel, we've hit an unrecoverable error // and should shut down (cleanly) <-exitChan monitor.Signal(os.Interrupt) }() logger.Info("started") err = <-monitor.Wait() if sqlConn != nil { sqlConn.Close() } if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }