func NewServer(logger lager.Logger, config DriverConfig) (volman.Manager, ifrit.Runner) { clock := clock.NewClock() registry := NewDriverRegistry() syncer := NewDriverSyncer(logger, registry, config.DriverPaths, config.SyncInterval, clock) purger := NewMountPurger(logger, registry) grouper := grouper.NewOrdered(os.Kill, grouper.Members{grouper.Member{"volman-syncer", syncer.Runner()}, grouper.Member{"volman-purger", purger.Runner()}}) return NewLocalClient(logger, registry, clock), grouper }
func setupRouteFetcher(logger lager.Logger, c *config.Config, registry rregistry.RegistryInterface) *route_fetcher.RouteFetcher { clock := clock.NewClock() uaaClient := newUaaClient(logger, clock, c) _, err := uaaClient.FetchToken(true) if err != nil { logger.Fatal("unable-to-fetch-token", err) } routingApiUri := fmt.Sprintf("%s:%d", c.RoutingApi.Uri, c.RoutingApi.Port) routingApiClient := routing_api.NewClient(routingApiUri, false) routeFetcher := route_fetcher.NewRouteFetcher(logger, uaaClient, registry, c, routingApiClient, 1, clock) return routeFetcher }
func main() { var ( err error uaaClient client.Client token *schema.Token ) if len(os.Args) < 5 { fmt.Printf("Usage: <client-name> <client-secret> <uaa-url> <skip-verification>\n\n") fmt.Printf("For example: client-name client-secret https://uaa.service.cf.internal:8443 true\n") return } skip, err := strconv.ParseBool(os.Args[4]) if err != nil { log.Fatal(err) os.Exit(1) } cfg := &config.Config{ ClientName: os.Args[1], ClientSecret: os.Args[2], UaaEndpoint: os.Args[3], SkipVerification: skip, } logger := lager.NewLogger("test") clock := clock.NewClock() uaaClient, err = client.NewClient(logger, cfg, clock) if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Connecting to: %s ...\n", cfg.UaaEndpoint) token, err = uaaClient.FetchToken(true) if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Response:\n\ttoken: %s\n\texpires: %d\n", token.AccessToken, token.ExpiresIn) }
func initializeEtcdDB( logger lager.Logger, cryptor encryption.Cryptor, storeClient etcddb.StoreClient, serviceClient bbs.ServiceClient, desiredLRPCreationMaxTime time.Duration, ) *etcddb.ETCDDB { return etcddb.NewETCD( format.ENCRYPTED_PROTO, *convergenceWorkers, *updateWorkers, desiredLRPCreationMaxTime, cryptor, storeClient, clock.NewClock(), ) }
func main() { var ( err error uaaClient client.Client key string ) if len(os.Args) < 3 { fmt.Printf("Usage: <uaa-url> <skip-verification>\n\n") fmt.Printf("For example: https://uaa.service.cf.internal:8443 true\n") return } skip, err := strconv.ParseBool(os.Args[2]) if err != nil { log.Fatal(err) os.Exit(1) } cfg := &config.Config{ UaaEndpoint: os.Args[1], SkipVerification: skip, } logger := lager.NewLogger("test") clock := clock.NewClock() uaaClient, err = client.NewClient(logger, cfg, clock) if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Connecting to: %s ...\n", cfg.UaaEndpoint) key, err = uaaClient.FetchKey() if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Response:\n%s\n", key) }
func initializeSQLDB(logger lager.Logger, sqlConn *sql.DB) *sqldb.SQLDB { key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) return sqldb.NewSQLDB(sqlConn, 1000, 1000, format.ENCODED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock.NewClock(), databaseDriver) }
"github.com/tedsuo/ifrit" "github.com/tedsuo/ifrit/ginkgomon" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Ping API", func() { Describe("Protobuf Ping", func() { It("returns true when the bbs is running", func() { By("having the bbs down", func() { Expect(client.Ping(logger)).To(BeFalse()) }) By("starting the bbs without a lock", func() { competingBBSLock := locket.NewLock(logger, consulClient, locket.LockSchemaPath("bbs_lock"), []byte{}, clock.NewClock(), locket.RetryInterval, locket.DefaultSessionTTL) competingBBSLockProcess := ifrit.Invoke(competingBBSLock) defer ginkgomon.Kill(competingBBSLockProcess) bbsRunner = testrunner.New(bbsBinPath, bbsArgs) bbsRunner.StartCheck = "bbs.lock.acquiring-lock" bbsProcess = ginkgomon.Invoke(bbsRunner) Expect(client.Ping(logger)).To(BeFalse()) }) By("finally acquiring the lock", func() { Eventually(func() bool { return client.Ping(logger) }).Should(BeTrue()) })
func main() { ///// ARGUMENT PARSING ////////////////////////////////////////////////////// flag.Parse() if *version { fmt.Printf("clique-agent v%s\n", clique.CliqueAgentVersion) os.Exit(0) } level := logrus.InfoLevel if *debug { level = logrus.DebugLevel } if *configPath == "" { fmt.Fprintf(os.Stderr, "`-config` option is required\n") os.Exit(1) } ///// LOGGING /////////////////////////////////////////////////////////////// logger := &logrus.Logger{ Out: os.Stdout, Level: level, Formatter: new(logrus.TextFormatter), } logger.Debug("Initializing internals...") ///// CONFIGURATION ///////////////////////////////////////////////////////// cfg, err := config.NewConfig(*configPath) if err != nil { logger.Fatal(err.Error()) } ///// TRANSFER ////////////////////////////////////////////////////////////// // Protocol t, err := setupTransferrer(logger, cfg) if err != nil { logger.Fatal(err.Error()) } // Server transferListener, err := net.Listen( "tcp", fmt.Sprintf("0.0.0.0:%d", cfg.TransferPort), ) if err != nil { logger.Fatalf("Setting up transfer server: %s", err.Error()) } transferServer := transfer.NewServer( logger, transferListener, t.transferReceiver, ) // Client transferConnector := transfer.NewConnector() transferClient := transfer.NewClient( logger, transferConnector, t.transferSender, ) ///// SCHEDULING //////////////////////////////////////////////////////////// schedRandGen := scheduler.NewCryptoUIG() schedTaskSelector := &scheduler.LotteryTaskSelector{ Rand: schedRandGen, } schedClock := clock.NewClock() sched := scheduler.NewScheduler( logger, schedTaskSelector, // scheduling algorithm time.Second, // sleep between tasks schedClock, ) ///// TRANSFER REGISTRY ///////////////////////////////////////////////////// transferRegistry := registry.NewRegistry() ///// DISPATCHER //////////////////////////////////////////////////////////// dsptchr := &dispatcher.Dispatcher{ Scheduler: sched, TransferInterruptible: t.interruptible, TransferClient: transferClient, ApiRegistry: transferRegistry, Logger: logger, } ///// API /////////////////////////////////////////////////////////////////// var apiServer *api.Server if cfg.APIPort != 0 { apiServer = api.NewServer( cfg.APIPort, transferRegistry, dsptchr, ) } ///// SIGNAL HANDLER //////////////////////////////////////////////////////// sigTermCh := make(chan os.Signal) signal.Notify(sigTermCh, os.Interrupt) signal.Notify(sigTermCh, syscall.SIGTERM) go func() { <-sigTermCh logger.Debug("Closing transfer server...") transferListener.Close() logger.Debug("Closing scheduler...") sched.Stop() if apiServer != nil { logger.Debug("Closing API server...") apiServer.Close() } logger.Info("Exitting clique-agent...") }() logger.Debug("Initialization is complete!") ///// START ///////////////////////////////////////////////////////////////// logger.Info("Clique Agent") // Populate the dispatcher with tasks if len(cfg.RemoteHosts) != 0 { createTransferTasks(logger, cfg, dsptchr) } wg := new(sync.WaitGroup) // Start the transfer server wg.Add(1) go func() { transferServer.Serve() logger.Debug("Transfer server is done.") wg.Done() }() // Start the scheduler wg.Add(1) go func() { sched.Run() logger.Debug("Scheduler is done.") wg.Done() }() // Start the API server if apiServer != nil { wg.Add(1) go func() { apiServer.Serve() logger.Debug("API server is done.") wg.Done() }() } // Wait until everything is done! wg.Wait() logger.Debug("Clique agent is done.") }
func initializeETCDDB(logger lager.Logger, etcdClient *etcd.Client) *etcddb.ETCDDB { key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) return etcddb.NewETCD(format.ENCRYPTED_PROTO, 1000, 1000, 1*time.Minute, cryptor, etcddb.NewStoreClient(etcdClient), clock.NewClock()) }
func (t *ConsulHelper) RegisterCell(cell *models.CellPresence) { var err error jsonBytes, err := json.Marshal(cell) Expect(err).NotTo(HaveOccurred()) // Use NewLock instead of NewPresence in order to block on the cell being registered runner := locket.NewLock(t.logger, t.consulClient, bbs.CellSchemaPath(cell.CellId), jsonBytes, clock.NewClock(), locket.RetryInterval, locket.LockTTL) ifrit.Invoke(runner) Expect(err).NotTo(HaveOccurred()) }
"code.cloudfoundry.org/bbs/models" "code.cloudfoundry.org/clock" "code.cloudfoundry.org/locket" "github.com/tedsuo/ifrit" "github.com/tedsuo/ifrit/ginkgomon" "github.com/tedsuo/ifrit/grouper" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("ServiceClient", func() { var serviceClient bbs.ServiceClient BeforeEach(func() { serviceClient = bbs.NewServiceClient(consulClient, clock.NewClock()) }) Describe("CellById", func() { const cellID = "cell-id" Context("when the cell exists", func() { It("returns the correct CellPresence", func() { cellPresence := newCellPresence(cellID) consulHelper.RegisterCell(cellPresence) presence, err := serviceClient.CellById(logger, cellID) Expect(err).NotTo(HaveOccurred()) Expect(presence).To(BeEquivalentTo(cellPresence)) }) })
}) }) }) Context("when given a driver spec not in canonical form", func() { var ( fakeRemoteClientFactory *voldriverfakes.FakeRemoteClientFactory driverFactory vollocal.DriverFactory fakeDriver *voldriverfakes.FakeDriver driverSyncer vollocal.DriverSyncer ) JustBeforeEach(func() { fakeRemoteClientFactory = new(voldriverfakes.FakeRemoteClientFactory) driverFactory = vollocal.NewDriverFactoryWithRemoteClientFactory(fakeRemoteClientFactory) driverSyncer = vollocal.NewDriverSyncerWithDriverFactory(logger, nil, []string{defaultPluginsDirectory}, time.Second*60, clock.NewClock(), driverFactory) }) TestCanonicalization := func(context, actual, it, expected string) { Context(context, func() { BeforeEach(func() { err := voldriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "spec", []byte(actual)) Expect(err).NotTo(HaveOccurred()) }) JustBeforeEach(func() { fakeDriver = new(voldriverfakes.FakeDriver) fakeDriver.ActivateReturns(voldriver.ActivateResponse{ Implements: []string{"VolumeDriver"}, })
logger = lagertest.NewTestLogger("test") fakeETCDDB = &dbfakes.FakeDB{} fakeETCDDB.VersionReturns(dbVersion, nil) fakeSQLDB = &dbfakes.FakeDB{} cryptor = &encryptionfakes.FakeCryptor{} fakeMigration = &migrationfakes.FakeMigration{} fakeMigration.RequiresSQLReturns(false) migrations = []migration.Migration{fakeMigration} }) JustBeforeEach(func() { manager = migration.NewManager(logger, fakeETCDDB, etcdStoreClient, fakeSQLDB, rawSQLDB, cryptor, migrations, migrationsDone, clock.NewClock(), "db-driver") migrationProcess = ifrit.Background(manager) }) AfterEach(func() { ginkgomon.Kill(migrationProcess) }) Context("when both a etcd and sql configurations are present", func() { BeforeEach(func() { rawSQLDB = &sql.DB{} etcdStoreClient = etcd.NewStoreClient(nil) }) Context("but SQL does not have a version", func() { BeforeEach(func() {
func main() { flagSet := flag.NewFlagSet("flags", flag.ContinueOnError) flagSet.Var(&recursors, "recursor", "specifies the address of an upstream DNS `server`, may be specified multiple times") flagSet.StringVar(&configFile, "config-file", "", "specifies the config `file`") flagSet.BoolVar(&foreground, "foreground", false, "if true confab will wait for consul to exit") if len(os.Args) < 2 { printUsageAndExit("invalid number of arguments", flagSet) } if err := flagSet.Parse(os.Args[2:]); err != nil { os.Exit(1) } configFileContents, err := ioutil.ReadFile(configFile) if err != nil { stderr.Printf("error reading configuration file: %s", err) os.Exit(1) } cfg, err := config.ConfigFromJSON(configFileContents) if err != nil { stderr.Printf("error reading configuration file: %s", err) os.Exit(1) } path, err := exec.LookPath(cfg.Path.AgentPath) if err != nil { printUsageAndExit(fmt.Sprintf("\"agent_path\" %q cannot be found", cfg.Path.AgentPath), flagSet) } if len(cfg.Path.PIDFile) == 0 { printUsageAndExit("\"pid_file\" cannot be empty", flagSet) } logger := lager.NewLogger("confab") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) agentRunner := &agent.Runner{ Path: path, PIDFile: cfg.Path.PIDFile, ConfigDir: cfg.Path.ConsulConfigDir, Recursors: recursors, Stdout: os.Stdout, Stderr: os.Stderr, Logger: logger, } consulAPIClient, err := api.NewClient(api.DefaultConfig()) if err != nil { panic(err) // not tested, NewClient never errors } agentClient := &agent.Client{ ExpectedMembers: cfg.Consul.Agent.Servers.LAN, ConsulAPIAgent: consulAPIClient.Agent(), ConsulRPCClient: nil, Logger: logger, } retrier := utils.NewRetrier(clock.NewClock(), 1*time.Second) controller := chaperon.Controller{ AgentRunner: agentRunner, AgentClient: agentClient, Retrier: retrier, EncryptKeys: cfg.Consul.EncryptKeys, Logger: logger, ServiceDefiner: config.ServiceDefiner{logger}, ConfigDir: cfg.Path.ConsulConfigDir, Config: cfg, } keyringRemover := chaperon.NewKeyringRemover(cfg.Path.KeyringFile, logger) configWriter := chaperon.NewConfigWriter(cfg.Path.ConsulConfigDir, logger) var r runner = chaperon.NewClient(controller, consulagent.NewRPCClient, keyringRemover, configWriter) if controller.Config.Consul.Agent.Mode == "server" { bootstrapChecker := chaperon.NewBootstrapChecker(logger, agentClient, status.Client{ConsulAPIStatus: consulAPIClient.Status()}, time.Sleep) r = chaperon.NewServer(controller, configWriter, consulagent.NewRPCClient, bootstrapChecker) } switch os.Args[1] { case "start": _, err = os.Stat(controller.Config.Path.ConsulConfigDir) if err != nil { printUsageAndExit(fmt.Sprintf("\"consul_config_dir\" %q could not be found", controller.Config.Path.ConsulConfigDir), flagSet) } if utils.IsRunningProcess(agentRunner.PIDFile) { stderr.Println("consul_agent is already running, please stop it first") os.Exit(1) } if len(agentClient.ExpectedMembers) == 0 { printUsageAndExit("at least one \"expected-member\" must be provided", flagSet) } timeout := utils.NewTimeout(time.After(time.Duration(controller.Config.Confab.TimeoutInSeconds) * time.Second)) if err := r.Start(cfg, timeout); err != nil { stderr.Printf("error during start: %s", err) r.Stop() os.Exit(1) } if foreground { if err := agentRunner.Wait(); err != nil { stderr.Printf("error during wait: %s", err) r.Stop() os.Exit(1) } } case "stop": if err := r.Stop(); err != nil { stderr.Printf("error during stop: %s", err) os.Exit(1) } default: printUsageAndExit(fmt.Sprintf("invalid COMMAND %q", os.Args[1]), flagSet) } }
func main() { debugserver.AddFlags(flag.CommandLine) lagerflags.AddFlags(flag.CommandLine) etcdFlags := AddETCDFlags(flag.CommandLine) encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine) flag.Parse() cfhttp.Initialize(*communicationTimeout) logger, reconfigurableSink := lagerflags.New("bbs") logger.Info("starting") initializeDropsonde(logger) clock := clock.NewClock() consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } serviceClient := bbs.NewServiceClient(consulClient, clock) maintainer := initializeLockMaintainer(logger, serviceClient) _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } _, portString, err = net.SplitHostPort(*healthAddress) if err != nil { logger.Fatal("failed-invalid-health-address", err) } _, err = net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-health-port", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) var activeDB db.DB var sqlDB *sqldb.SQLDB var sqlConn *sql.DB var storeClient etcddb.StoreClient var etcdDB *etcddb.ETCDDB key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } if etcdOptions.IsConfigured { storeClient = initializeEtcdStoreClient(logger, etcdOptions) etcdDB = initializeEtcdDB(logger, cryptor, storeClient, serviceClient, *desiredLRPCreationTimeout) activeDB = etcdDB } // If SQL database info is passed in, use SQL instead of ETCD if *databaseDriver != "" && *databaseConnectionString != "" { var err error connectionString := appendSSLConnectionStringParam(logger, *databaseDriver, *databaseConnectionString, *sqlCACertFile) sqlConn, err = sql.Open(*databaseDriver, connectionString) if err != nil { logger.Fatal("failed-to-open-sql", err) } defer sqlConn.Close() sqlConn.SetMaxOpenConns(*maxDatabaseConnections) sqlConn.SetMaxIdleConns(*maxDatabaseConnections) err = sqlConn.Ping() if err != nil { logger.Fatal("sql-failed-to-connect", err) } sqlDB = sqldb.NewSQLDB(sqlConn, *convergenceWorkers, *updateWorkers, format.ENCRYPTED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock, *databaseDriver) err = sqlDB.SetIsolationLevel(logger, sqldb.IsolationLevelReadCommitted) if err != nil { logger.Fatal("sql-failed-to-set-isolation-level", err) } err = sqlDB.CreateConfigurationsTable(logger) if err != nil { logger.Fatal("sql-failed-create-configurations-table", err) } activeDB = sqlDB } if activeDB == nil { logger.Fatal("no-database-configured", errors.New("no database configured")) } encryptor := encryptor.New(logger, activeDB, keyManager, cryptor, clock) migrationsDone := make(chan struct{}) migrationManager := migration.NewManager( logger, etcdDB, storeClient, sqlDB, sqlConn, cryptor, migrations.Migrations, migrationsDone, clock, *databaseDriver, ) desiredHub := events.NewHub() actualHub := events.NewHub() repTLSConfig := &rep.TLSConfig{ RequireTLS: *repRequireTLS, CaCertFile: *repCACert, CertFile: *repClientCert, KeyFile: *repClientKey, ClientCacheSize: *repClientSessionCacheSize, } httpClient := cfhttp.NewClient() repClientFactory, err := rep.NewClientFactory(httpClient, httpClient, repTLSConfig) if err != nil { logger.Fatal("new-rep-client-factory-failed", err) } auctioneerClient := initializeAuctioneerClient(logger) exitChan := make(chan struct{}) var accessLogger lager.Logger if *accessLogPath != "" { accessLogger = lager.NewLogger("bbs-access") file, err := os.OpenFile(*accessLogPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { logger.Error("invalid-access-log-path", err, lager.Data{"access-log-path": *accessLogPath}) os.Exit(1) } accessLogger.RegisterSink(lager.NewWriterSink(file, lager.INFO)) } var tlsConfig *tls.Config if *requireSSL { tlsConfig, err = cfhttp.NewTLSConfig(*certFile, *keyFile, *caFile) if err != nil { logger.Fatal("tls-configuration-failed", err) } } cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask, tlsConfig) handler := handlers.New( logger, accessLogger, *updateWorkers, *convergenceWorkers, activeDB, desiredHub, actualHub, cbWorkPool, serviceClient, auctioneerClient, repClientFactory, migrationsDone, exitChan, ) metricsNotifier := metrics.NewPeriodicMetronNotifier(logger) retirer := controllers.NewActualLRPRetirer(activeDB, actualHub, repClientFactory, serviceClient) lrpConvergenceController := controllers.NewLRPConvergenceController(logger, activeDB, actualHub, auctioneerClient, serviceClient, retirer, *convergenceWorkers) taskController := controllers.NewTaskController(activeDB, cbWorkPool, auctioneerClient, serviceClient, repClientFactory) convergerProcess := converger.New( logger, clock, lrpConvergenceController, taskController, serviceClient, *convergeRepeatInterval, *kickTaskDuration, *expirePendingTaskDuration, *expireCompletedTaskDuration) var server ifrit.Runner if tlsConfig != nil { server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig) } else { server = http_server.New(*listenAddress, handler) } healthcheckServer := http_server.New(*healthAddress, http.HandlerFunc(healthCheckHandler)) members := grouper.Members{ {"healthcheck", healthcheckServer}, {"lock-maintainer", maintainer}, {"workpool", cbWorkPool}, {"server", server}, {"migration-manager", migrationManager}, {"encryptor", encryptor}, {"hub-maintainer", hubMaintainer(logger, desiredHub, actualHub)}, {"metrics", *metricsNotifier}, {"converger", convergerProcess}, {"registration-runner", registrationRunner}, } if dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", debugserver.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) go func() { // If a handler writes to this channel, we've hit an unrecoverable error // and should shut down (cleanly) <-exitChan monitor.Signal(os.Interrupt) }() logger.Info("started") err = <-monitor.Wait() if sqlConn != nil { sqlConn.Close() } if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() uploaderConfig, err := config.NewUploaderConfig(*configPath) if err != nil { panic(err.Error()) } logger, reconfigurableSink := lagerflags.NewFromConfig("cc-uploader", uploaderConfig.LagerConfig) cfhttp.Initialize(communicationTimeout) initializeDropsonde(logger, uploaderConfig) consulClient, err := consuladapter.NewClientFromUrl(uploaderConfig.ConsulCluster) if err != nil { logger.Fatal("new-client-failed", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, uploaderConfig.ListenAddress, clock.NewClock()) members := grouper.Members{ {"cc-uploader", initializeServer(logger, uploaderConfig)}, {"registration-runner", registrationRunner}, } if uploaderConfig.DebugServerConfig.DebugAddress != "" { members = append(grouper.Members{ {"debug-server", debugserver.Runner(uploaderConfig.DebugServerConfig.DebugAddress, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("ready") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
fakeDB = new(dbfakes.FakeEncryptionDB) logger = lagertest.NewTestLogger("test") oldKey, err := encryption.NewKey("old-key", "old-passphrase") encryptionKey, err := encryption.NewKey("label", "passphrase") Expect(err).NotTo(HaveOccurred()) keyManager, err = encryption.NewKeyManager(encryptionKey, []encryption.Key{oldKey}) Expect(err).NotTo(HaveOccurred()) cryptor = encryption.NewCryptor(keyManager, rand.Reader) fakeDB.EncryptionKeyLabelReturns("", models.ErrResourceNotFound) }) JustBeforeEach(func() { runner = encryptor.New(logger, fakeDB, keyManager, cryptor, clock.NewClock()) encryptorProcess = ifrit.Background(runner) }) AfterEach(func() { ginkgomon.Kill(encryptorProcess) }) It("reports the duration that it took to encrypt", func() { Eventually(encryptorProcess.Ready()).Should(BeClosed()) Eventually(logger.LogMessages).Should(ContainElement("test.encryptor.encryption-finished")) reportedDuration := sender.GetValue("EncryptionDuration") Expect(reportedDuration.Value).NotTo(BeZero()) Expect(reportedDuration.Unit).To(Equal("nanos")) })
const retryInterval = 500 * time.Millisecond var ( consulClient consuladapter.Client watcherRunner ifrit.Runner watcherProcess ifrit.Process disappearChan <-chan []string logger *lagertest.TestLogger ) BeforeEach(func() { consulClient = consulRunner.NewClient() logger = lagertest.NewTestLogger("test") clock := clock.NewClock() watcherRunner, disappearChan = locket.NewDisappearanceWatcher(logger, consulClient, "under", clock) watcherProcess = ifrit.Invoke(watcherRunner) }) AfterEach(func() { ginkgomon.Kill(watcherProcess) }) var addThenRemovePresence = func(presenceKey string) { presenceRunner := locket.NewPresence(logger, consulClient, presenceKey, []byte("value"), clock.NewClock(), retryInterval, 10*time.Second) presenceProcess := ifrit.Invoke(presenceRunner) Eventually(func() int { sessions, _, err := consulClient.Session().List(nil) Expect(err).NotTo(HaveOccurred())