func New(writer io.Writer) *GinkgoReporter { logger := lager.NewLogger("ginkgo") logger.RegisterSink(lager.NewWriterSink(writer, lager.DEBUG)) return &GinkgoReporter{ writer: writer, logger: logger, } }
func NewTestSink() *TestSink { buffer := gbytes.NewBuffer() return &TestSink{ Sink: lager.NewWriterSink(buffer, lager.DEBUG), buffer: buffer, } }
func NewTestLogger(component string) *TestLogger { logger := lager.NewLogger(component) testSink := NewTestSink() logger.RegisterSink(testSink) logger.RegisterSink(lager.NewWriterSink(ginkgo.GinkgoWriter, lager.DEBUG)) return &TestLogger{logger, testSink} }
func (c *Config) BuildLogger() error { logFileHandle, err := os.Create(c.LogFilePath) if err != nil { return errors.New(fmt.Sprintf("Could not open handle to logfile %s", err.Error())) } c.Logger = lager.NewLogger("BootStrap Errand") writerSink := lager.NewWriterSink(logFileHandle, lager.DEBUG) c.Logger.RegisterSink(writerSink) return nil }
func TestBenchmarkBbs(t *testing.T) { var lagerLogLevel lager.LogLevel switch logLevel { case DEBUG: lagerLogLevel = lager.DEBUG case INFO: lagerLogLevel = lager.INFO case ERROR: lagerLogLevel = lager.ERROR case FATAL: lagerLogLevel = lager.FATAL default: panic(fmt.Errorf("unknown log level: %s", logLevel)) } var logWriter io.Writer if logFilename == "" { logWriter = GinkgoWriter } else { logFile, err := os.Create(logFilename) if err != nil { panic(fmt.Errorf("Error opening file '%s': %s", logFilename, err.Error())) } defer logFile.Close() logWriter = logFile } logger = lager.NewLogger("bbs-benchmarks-test") logger.RegisterSink(lager.NewWriterSink(logWriter, lagerLogLevel)) reporters = []Reporter{} if dataDogAPIKey != "" && dataDogAppKey != "" { dataDogClient = datadog.NewClient(dataDogAPIKey, dataDogAppKey) dataDogReporter = reporter.NewDataDogReporter(logger, metricPrefix, dataDogClient) reporters = append(reporters, &dataDogReporter) } if awsAccessKeyID != "" && awsSecretAccessKey != "" && awsBucketName != "" { creds := credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, "") s3Client := s3.New(&aws.Config{ Region: &awsRegion, Credentials: creds, }) uploader := s3manager.NewUploader(&s3manager.UploadOptions{S3: s3Client}) reporter := reporter.NewS3Reporter(logger, awsBucketName, uploader) reporters = append(reporters, &reporter) } RegisterFailHandler(Fail) RunSpecsWithDefaultAndCustomReporters(t, "Benchmark BBS Suite", reporters) }
func main() { logger := lager.NewLogger("cnsim-server") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG)) port := getEnv(logger, "PORT", "9000") listenAddress := getEnv(logger, "LISTEN_ADDRESS", "127.0.0.1") address := fmt.Sprintf("%s:%s", listenAddress, port) logger.Info("listen", lager.Data{"address": address}) routes := rata.Routes{ {Name: "root", Method: "GET", Path: "/"}, {Name: "steady_state", Method: "GET", Path: "/steady_state"}, } rataHandlers := rata.Handlers{ "root": &handlers.Root{ Logger: logger, }, "steady_state": gziphandler.GzipHandler(&handlers.SteadyState{ Logger: logger, Simulator: &simulate.SteadyState{ AppSizeDistribution: &distributions.GeometricWithPositiveSupport{}, }, }), } router, err := rata.NewRouter(routes, rataHandlers) if err != nil { log.Fatalf("unable to create rata Router: %s", err) // not tested } monitor := ifrit.Invoke(sigmon.New(grouper.NewOrdered(os.Interrupt, grouper.Members{ {"http_server", http_server.New(address, router)}, }))) err = <-monitor.Wait() if err != nil { log.Fatalf("ifrit: %s", err) } }
func (f LagerFlag) Logger(component string) (lager.Logger, *lager.ReconfigurableSink) { var minLagerLogLevel lager.LogLevel switch f.LogLevel { case LogLevelDebug: minLagerLogLevel = lager.DEBUG case LogLevelInfo: minLagerLogLevel = lager.INFO case LogLevelError: minLagerLogLevel = lager.ERROR case LogLevelFatal: minLagerLogLevel = lager.FATAL default: panic(fmt.Sprintf("unknown log level: %s", f.LogLevel)) } logger := lager.NewLogger(component) sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), minLagerLogLevel) logger.RegisterSink(sink) return logger, sink }
fakeLRPDB.UnclaimActualLRPStub = func(_ lager.Logger, key *models.ActualLRPKey) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) { if key.ProcessGuid == unclaimingActualLRP1.ProcessGuid { return &models.ActualLRPGroup{Instance: unclaimingActualLRP1}, &models.ActualLRPGroup{Instance: unclaimingActualLRP1}, nil } if key.ProcessGuid == unclaimingActualLRP2.ProcessGuid { return &models.ActualLRPGroup{Instance: unclaimingActualLRP2}, &models.ActualLRPGroup{Instance: unclaimingActualLRP2}, nil } return nil, nil, models.ErrResourceNotFound } fakeLRPDB.ConvergeLRPsReturns(keysToAuction, keysWithMissingCells, keysToRetire) logger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG)) fakeServiceClient = new(fake_bbs.FakeServiceClient) fakeRepClientFactory = new(repfakes.FakeClientFactory) fakeRepClient = new(repfakes.FakeClient) fakeRepClientFactory.CreateClientReturns(fakeRepClient, nil) fakeServiceClient.CellByIdReturns(nil, errors.New("hi")) cellPresence := models.NewCellPresence("cell-id", "1.1.1.1", "", "z1", models.CellCapacity{}, nil, nil, nil, nil) cellSet = models.CellSet{"cell-id": &cellPresence} fakeServiceClient.CellsReturns(cellSet, nil) actualHub = &eventfakes.FakeHub{} retirer := controllers.NewActualLRPRetirer(fakeLRPDB, actualHub, fakeRepClientFactory, fakeServiceClient) controller = controllers.NewLRPConvergenceController(logger, fakeLRPDB, actualHub, fakeAuctioneerClient, fakeServiceClient, retirer, 2) })
func main() { debugserver.AddFlags(flag.CommandLine) lagerflags.AddFlags(flag.CommandLine) etcdFlags := AddETCDFlags(flag.CommandLine) encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine) flag.Parse() cfhttp.Initialize(*communicationTimeout) logger, reconfigurableSink := lagerflags.New("bbs") logger.Info("starting") initializeDropsonde(logger) clock := clock.NewClock() consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } serviceClient := bbs.NewServiceClient(consulClient, clock) maintainer := initializeLockMaintainer(logger, serviceClient) _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } _, portString, err = net.SplitHostPort(*healthAddress) if err != nil { logger.Fatal("failed-invalid-health-address", err) } _, err = net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-health-port", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) var activeDB db.DB var sqlDB *sqldb.SQLDB var sqlConn *sql.DB var storeClient etcddb.StoreClient var etcdDB *etcddb.ETCDDB key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } if etcdOptions.IsConfigured { storeClient = initializeEtcdStoreClient(logger, etcdOptions) etcdDB = initializeEtcdDB(logger, cryptor, storeClient, serviceClient, *desiredLRPCreationTimeout) activeDB = etcdDB } // If SQL database info is passed in, use SQL instead of ETCD if *databaseDriver != "" && *databaseConnectionString != "" { var err error connectionString := appendSSLConnectionStringParam(logger, *databaseDriver, *databaseConnectionString, *sqlCACertFile) sqlConn, err = sql.Open(*databaseDriver, connectionString) if err != nil { logger.Fatal("failed-to-open-sql", err) } defer sqlConn.Close() sqlConn.SetMaxOpenConns(*maxDatabaseConnections) sqlConn.SetMaxIdleConns(*maxDatabaseConnections) err = sqlConn.Ping() if err != nil { logger.Fatal("sql-failed-to-connect", err) } sqlDB = sqldb.NewSQLDB(sqlConn, *convergenceWorkers, *updateWorkers, format.ENCRYPTED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock, *databaseDriver) err = sqlDB.SetIsolationLevel(logger, sqldb.IsolationLevelReadCommitted) if err != nil { logger.Fatal("sql-failed-to-set-isolation-level", err) } err = sqlDB.CreateConfigurationsTable(logger) if err != nil { logger.Fatal("sql-failed-create-configurations-table", err) } activeDB = sqlDB } if activeDB == nil { logger.Fatal("no-database-configured", errors.New("no database configured")) } encryptor := encryptor.New(logger, activeDB, keyManager, cryptor, clock) migrationsDone := make(chan struct{}) migrationManager := migration.NewManager( logger, etcdDB, storeClient, sqlDB, sqlConn, cryptor, migrations.Migrations, migrationsDone, clock, *databaseDriver, ) desiredHub := events.NewHub() actualHub := events.NewHub() repTLSConfig := &rep.TLSConfig{ RequireTLS: *repRequireTLS, CaCertFile: *repCACert, CertFile: *repClientCert, KeyFile: *repClientKey, ClientCacheSize: *repClientSessionCacheSize, } httpClient := cfhttp.NewClient() repClientFactory, err := rep.NewClientFactory(httpClient, httpClient, repTLSConfig) if err != nil { logger.Fatal("new-rep-client-factory-failed", err) } auctioneerClient := initializeAuctioneerClient(logger) exitChan := make(chan struct{}) var accessLogger lager.Logger if *accessLogPath != "" { accessLogger = lager.NewLogger("bbs-access") file, err := os.OpenFile(*accessLogPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { logger.Error("invalid-access-log-path", err, lager.Data{"access-log-path": *accessLogPath}) os.Exit(1) } accessLogger.RegisterSink(lager.NewWriterSink(file, lager.INFO)) } var tlsConfig *tls.Config if *requireSSL { tlsConfig, err = cfhttp.NewTLSConfig(*certFile, *keyFile, *caFile) if err != nil { logger.Fatal("tls-configuration-failed", err) } } cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask, tlsConfig) handler := handlers.New( logger, accessLogger, *updateWorkers, *convergenceWorkers, activeDB, desiredHub, actualHub, cbWorkPool, serviceClient, auctioneerClient, repClientFactory, migrationsDone, exitChan, ) metricsNotifier := metrics.NewPeriodicMetronNotifier(logger) retirer := controllers.NewActualLRPRetirer(activeDB, actualHub, repClientFactory, serviceClient) lrpConvergenceController := controllers.NewLRPConvergenceController(logger, activeDB, actualHub, auctioneerClient, serviceClient, retirer, *convergenceWorkers) taskController := controllers.NewTaskController(activeDB, cbWorkPool, auctioneerClient, serviceClient, repClientFactory) convergerProcess := converger.New( logger, clock, lrpConvergenceController, taskController, serviceClient, *convergeRepeatInterval, *kickTaskDuration, *expirePendingTaskDuration, *expireCompletedTaskDuration) var server ifrit.Runner if tlsConfig != nil { server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig) } else { server = http_server.New(*listenAddress, handler) } healthcheckServer := http_server.New(*healthAddress, http.HandlerFunc(healthCheckHandler)) members := grouper.Members{ {"healthcheck", healthcheckServer}, {"lock-maintainer", maintainer}, {"workpool", cbWorkPool}, {"server", server}, {"migration-manager", migrationManager}, {"encryptor", encryptor}, {"hub-maintainer", hubMaintainer(logger, desiredHub, actualHub)}, {"metrics", *metricsNotifier}, {"converger", convergerProcess}, {"registration-runner", registrationRunner}, } if dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", debugserver.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) go func() { // If a handler writes to this channel, we've hit an unrecoverable error // and should shut down (cleanly) <-exitChan monitor.Signal(os.Interrupt) }() logger.Info("started") err = <-monitor.Wait() if sqlConn != nil { sqlConn.Close() } if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
}) Context("when the image plugin output parsing fails", func() { It("returns an error", func() { fakeCmdRunnerStdout = `{"silly" "json":"formating}"}}"` _, err := externalImageManager.Metrics(logger, "", "/store/0/bundles/123/rootfs") Expect(err).To(MatchError(ContainSubstring("parsing metrics"))) }) }) }) Describe("logging", func() { BeforeEach(func() { buffer := gbytes.NewBuffer() externalLogger := lager.NewLogger("external-plugin") externalLogger.RegisterSink(lager.NewWriterSink(buffer, lager.DEBUG)) externalLogger.Debug("debug-message", lager.Data{"type": "debug"}) externalLogger.Info("info-message", lager.Data{"type": "info"}) externalLogger.Error("error-message", errors.New("failed!"), lager.Data{"type": "error"}) fakeCmdRunnerStderr = string(buffer.Contents()) }) Context("Create", func() { It("relogs the image plugin logs", func() { _, _, err := externalImageManager.Create( logger, "hello", rootfs_provider.Spec{ RootFS: baseImage, }, )
func main() { flagSet := flag.NewFlagSet("flags", flag.ContinueOnError) flagSet.Var(&recursors, "recursor", "specifies the address of an upstream DNS `server`, may be specified multiple times") flagSet.StringVar(&configFile, "config-file", "", "specifies the config `file`") flagSet.BoolVar(&foreground, "foreground", false, "if true confab will wait for consul to exit") if len(os.Args) < 2 { printUsageAndExit("invalid number of arguments", flagSet) } if err := flagSet.Parse(os.Args[2:]); err != nil { os.Exit(1) } configFileContents, err := ioutil.ReadFile(configFile) if err != nil { stderr.Printf("error reading configuration file: %s", err) os.Exit(1) } cfg, err := config.ConfigFromJSON(configFileContents) if err != nil { stderr.Printf("error reading configuration file: %s", err) os.Exit(1) } path, err := exec.LookPath(cfg.Path.AgentPath) if err != nil { printUsageAndExit(fmt.Sprintf("\"agent_path\" %q cannot be found", cfg.Path.AgentPath), flagSet) } if len(cfg.Path.PIDFile) == 0 { printUsageAndExit("\"pid_file\" cannot be empty", flagSet) } logger := lager.NewLogger("confab") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) agentRunner := &agent.Runner{ Path: path, PIDFile: cfg.Path.PIDFile, ConfigDir: cfg.Path.ConsulConfigDir, Recursors: recursors, Stdout: os.Stdout, Stderr: os.Stderr, Logger: logger, } consulAPIClient, err := api.NewClient(api.DefaultConfig()) if err != nil { panic(err) // not tested, NewClient never errors } agentClient := &agent.Client{ ExpectedMembers: cfg.Consul.Agent.Servers.LAN, ConsulAPIAgent: consulAPIClient.Agent(), ConsulRPCClient: nil, Logger: logger, } retrier := utils.NewRetrier(clock.NewClock(), 1*time.Second) controller := chaperon.Controller{ AgentRunner: agentRunner, AgentClient: agentClient, Retrier: retrier, EncryptKeys: cfg.Consul.EncryptKeys, Logger: logger, ServiceDefiner: config.ServiceDefiner{logger}, ConfigDir: cfg.Path.ConsulConfigDir, Config: cfg, } keyringRemover := chaperon.NewKeyringRemover(cfg.Path.KeyringFile, logger) configWriter := chaperon.NewConfigWriter(cfg.Path.ConsulConfigDir, logger) var r runner = chaperon.NewClient(controller, consulagent.NewRPCClient, keyringRemover, configWriter) if controller.Config.Consul.Agent.Mode == "server" { bootstrapChecker := chaperon.NewBootstrapChecker(logger, agentClient, status.Client{ConsulAPIStatus: consulAPIClient.Status()}, time.Sleep) r = chaperon.NewServer(controller, configWriter, consulagent.NewRPCClient, bootstrapChecker) } switch os.Args[1] { case "start": _, err = os.Stat(controller.Config.Path.ConsulConfigDir) if err != nil { printUsageAndExit(fmt.Sprintf("\"consul_config_dir\" %q could not be found", controller.Config.Path.ConsulConfigDir), flagSet) } if utils.IsRunningProcess(agentRunner.PIDFile) { stderr.Println("consul_agent is already running, please stop it first") os.Exit(1) } if len(agentClient.ExpectedMembers) == 0 { printUsageAndExit("at least one \"expected-member\" must be provided", flagSet) } timeout := utils.NewTimeout(time.After(time.Duration(controller.Config.Confab.TimeoutInSeconds) * time.Second)) if err := r.Start(cfg, timeout); err != nil { stderr.Printf("error during start: %s", err) r.Stop() os.Exit(1) } if foreground { if err := agentRunner.Wait(); err != nil { stderr.Printf("error during wait: %s", err) r.Stop() os.Exit(1) } } case "stop": if err := r.Stop(); err != nil { stderr.Printf("error during stop: %s", err) os.Exit(1) } default: printUsageAndExit(fmt.Sprintf("invalid COMMAND %q", os.Args[1]), flagSet) } }
) var _ = Describe("TaskWorker", func() { var ( fakeServer *ghttp.Server logger *lagertest.TestLogger timeout time.Duration ) BeforeEach(func() { timeout = 1 * time.Second cfhttp.Initialize(timeout) fakeServer = ghttp.NewServer() logger = lagertest.NewTestLogger("test") logger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.INFO)) }) AfterEach(func() { fakeServer.Close() }) Describe("HandleCompletedTask", func() { var ( callbackURL string taskDB *dbfakes.FakeTaskDB statusCodes chan int task *models.Task httpClient *http.Client )
var ( serverProc ifrit.Process fakeMetrics *fakes.FakeMetrics ) BeforeEach(func() { var err error fakeMetrics = new(fakes.FakeMetrics) fakeMetrics.NumCPUReturns(11) fakeMetrics.NumGoroutineReturns(888) fakeMetrics.LoopDevicesReturns(33) fakeMetrics.BackingStoresReturns(12) fakeMetrics.DepotDirsReturns(3) sink := lager.NewReconfigurableSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG), lager.DEBUG) serverProc, err = metrics.StartDebugServer("127.0.0.1:5123", sink, fakeMetrics) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { serverProc.Signal(os.Kill) }) It("should report the number of loop devices, backing store files and depotDirs", func() { resp, err := http.Get("http://127.0.0.1:5123/debug/vars") Expect(err).ToNot(HaveOccurred()) defer resp.Body.Close() Expect(resp.StatusCode).To(Equal(http.StatusOK))