func init() { reexec.Register("chrootwrite", func() { var rootfs, path string var uid, gid int flag.StringVar(&rootfs, "rootfs", "", "rootfs") flag.StringVar(&path, "path", "", "path") flag.IntVar(&uid, "uid", 0, "uid") flag.IntVar(&gid, "gid", 0, "gid") flag.Parse() if err := syscall.Chroot(rootfs); err != nil { panic(err) } if err := os.Chdir("/"); err != nil { panic(err) } var contents bytes.Buffer if _, err := io.Copy(&contents, os.Stdin); err != nil { panic(err) } w := RootfsWriter{} if err := w.writeFile(lager.NewLogger("chroot-write"), path, contents.Bytes(), rootfs, uid, gid); err != nil { panic(err) } }) }
func New(writer io.Writer) *GinkgoReporter { logger := lager.NewLogger("ginkgo") logger.RegisterSink(lager.NewWriterSink(writer, lager.DEBUG)) return &GinkgoReporter{ writer: writer, logger: logger, } }
func NewTestLogger(component string) *TestLogger { logger := lager.NewLogger(component) testSink := NewTestSink() logger.RegisterSink(testSink) logger.RegisterSink(lager.NewWriterSink(ginkgo.GinkgoWriter, lager.DEBUG)) return &TestLogger{logger, testSink} }
func (c *Config) BuildLogger() error { logFileHandle, err := os.Create(c.LogFilePath) if err != nil { return errors.New(fmt.Sprintf("Could not open handle to logfile %s", err.Error())) } c.Logger = lager.NewLogger("BootStrap Errand") writerSink := lager.NewWriterSink(logFileHandle, lager.DEBUG) c.Logger.RegisterSink(writerSink) return nil }
func TestBenchmarkBbs(t *testing.T) { var lagerLogLevel lager.LogLevel switch logLevel { case DEBUG: lagerLogLevel = lager.DEBUG case INFO: lagerLogLevel = lager.INFO case ERROR: lagerLogLevel = lager.ERROR case FATAL: lagerLogLevel = lager.FATAL default: panic(fmt.Errorf("unknown log level: %s", logLevel)) } var logWriter io.Writer if logFilename == "" { logWriter = GinkgoWriter } else { logFile, err := os.Create(logFilename) if err != nil { panic(fmt.Errorf("Error opening file '%s': %s", logFilename, err.Error())) } defer logFile.Close() logWriter = logFile } logger = lager.NewLogger("bbs-benchmarks-test") logger.RegisterSink(lager.NewWriterSink(logWriter, lagerLogLevel)) reporters = []Reporter{} if dataDogAPIKey != "" && dataDogAppKey != "" { dataDogClient = datadog.NewClient(dataDogAPIKey, dataDogAppKey) dataDogReporter = reporter.NewDataDogReporter(logger, metricPrefix, dataDogClient) reporters = append(reporters, &dataDogReporter) } if awsAccessKeyID != "" && awsSecretAccessKey != "" && awsBucketName != "" { creds := credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, "") s3Client := s3.New(&aws.Config{ Region: &awsRegion, Credentials: creds, }) uploader := s3manager.NewUploader(&s3manager.UploadOptions{S3: s3Client}) reporter := reporter.NewS3Reporter(logger, awsBucketName, uploader) reporters = append(reporters, &reporter) } RegisterFailHandler(Fail) RunSpecsWithDefaultAndCustomReporters(t, "Benchmark BBS Suite", reporters) }
func main() { var ( err error uaaClient client.Client token *schema.Token ) if len(os.Args) < 5 { fmt.Printf("Usage: <client-name> <client-secret> <uaa-url> <skip-verification>\n\n") fmt.Printf("For example: client-name client-secret https://uaa.service.cf.internal:8443 true\n") return } skip, err := strconv.ParseBool(os.Args[4]) if err != nil { log.Fatal(err) os.Exit(1) } cfg := &config.Config{ ClientName: os.Args[1], ClientSecret: os.Args[2], UaaEndpoint: os.Args[3], SkipVerification: skip, } logger := lager.NewLogger("test") clock := clock.NewClock() uaaClient, err = client.NewClient(logger, cfg, clock) if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Connecting to: %s ...\n", cfg.UaaEndpoint) token, err = uaaClient.FetchToken(true) if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Response:\n\ttoken: %s\n\texpires: %d\n", token.AccessToken, token.ExpiresIn) }
func main() { var ( err error uaaClient client.Client key string ) if len(os.Args) < 3 { fmt.Printf("Usage: <uaa-url> <skip-verification>\n\n") fmt.Printf("For example: https://uaa.service.cf.internal:8443 true\n") return } skip, err := strconv.ParseBool(os.Args[2]) if err != nil { log.Fatal(err) os.Exit(1) } cfg := &config.Config{ UaaEndpoint: os.Args[1], SkipVerification: skip, } logger := lager.NewLogger("test") clock := clock.NewClock() uaaClient, err = client.NewClient(logger, cfg, clock) if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Connecting to: %s ...\n", cfg.UaaEndpoint) key, err = uaaClient.FetchKey() if err != nil { log.Fatal(err) os.Exit(1) } fmt.Printf("Response:\n%s\n", key) }
func main() { logger := lager.NewLogger("cnsim-server") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG)) port := getEnv(logger, "PORT", "9000") listenAddress := getEnv(logger, "LISTEN_ADDRESS", "127.0.0.1") address := fmt.Sprintf("%s:%s", listenAddress, port) logger.Info("listen", lager.Data{"address": address}) routes := rata.Routes{ {Name: "root", Method: "GET", Path: "/"}, {Name: "steady_state", Method: "GET", Path: "/steady_state"}, } rataHandlers := rata.Handlers{ "root": &handlers.Root{ Logger: logger, }, "steady_state": gziphandler.GzipHandler(&handlers.SteadyState{ Logger: logger, Simulator: &simulate.SteadyState{ AppSizeDistribution: &distributions.GeometricWithPositiveSupport{}, }, }), } router, err := rata.NewRouter(routes, rataHandlers) if err != nil { log.Fatalf("unable to create rata Router: %s", err) // not tested } monitor := ifrit.Invoke(sigmon.New(grouper.NewOrdered(os.Interrupt, grouper.Members{ {"http_server", http_server.New(address, router)}, }))) err = <-monitor.Wait() if err != nil { log.Fatalf("ifrit: %s", err) } }
func (f LagerFlag) Logger(component string) (lager.Logger, *lager.ReconfigurableSink) { var minLagerLogLevel lager.LogLevel switch f.LogLevel { case LogLevelDebug: minLagerLogLevel = lager.DEBUG case LogLevelInfo: minLagerLogLevel = lager.INFO case LogLevelError: minLagerLogLevel = lager.ERROR case LogLevelFatal: minLagerLogLevel = lager.FATAL default: panic(fmt.Sprintf("unknown log level: %s", f.LogLevel)) } logger := lager.NewLogger(component) sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), minLagerLogLevel) logger.RegisterSink(sink) return logger, sink }
func New(network, address string) Connection { return NewWithLogger(network, address, lager.NewLogger("garden-connection")) }
func main() { debugserver.AddFlags(flag.CommandLine) lagerflags.AddFlags(flag.CommandLine) etcdFlags := AddETCDFlags(flag.CommandLine) encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine) flag.Parse() cfhttp.Initialize(*communicationTimeout) logger, reconfigurableSink := lagerflags.New("bbs") logger.Info("starting") initializeDropsonde(logger) clock := clock.NewClock() consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-consul-client-failed", err) } serviceClient := bbs.NewServiceClient(consulClient, clock) maintainer := initializeLockMaintainer(logger, serviceClient) _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } _, portString, err = net.SplitHostPort(*healthAddress) if err != nil { logger.Fatal("failed-invalid-health-address", err) } _, err = net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-health-port", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock) var activeDB db.DB var sqlDB *sqldb.SQLDB var sqlConn *sql.DB var storeClient etcddb.StoreClient var etcdDB *etcddb.ETCDDB key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } if etcdOptions.IsConfigured { storeClient = initializeEtcdStoreClient(logger, etcdOptions) etcdDB = initializeEtcdDB(logger, cryptor, storeClient, serviceClient, *desiredLRPCreationTimeout) activeDB = etcdDB } // If SQL database info is passed in, use SQL instead of ETCD if *databaseDriver != "" && *databaseConnectionString != "" { var err error connectionString := appendSSLConnectionStringParam(logger, *databaseDriver, *databaseConnectionString, *sqlCACertFile) sqlConn, err = sql.Open(*databaseDriver, connectionString) if err != nil { logger.Fatal("failed-to-open-sql", err) } defer sqlConn.Close() sqlConn.SetMaxOpenConns(*maxDatabaseConnections) sqlConn.SetMaxIdleConns(*maxDatabaseConnections) err = sqlConn.Ping() if err != nil { logger.Fatal("sql-failed-to-connect", err) } sqlDB = sqldb.NewSQLDB(sqlConn, *convergenceWorkers, *updateWorkers, format.ENCRYPTED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock, *databaseDriver) err = sqlDB.SetIsolationLevel(logger, sqldb.IsolationLevelReadCommitted) if err != nil { logger.Fatal("sql-failed-to-set-isolation-level", err) } err = sqlDB.CreateConfigurationsTable(logger) if err != nil { logger.Fatal("sql-failed-create-configurations-table", err) } activeDB = sqlDB } if activeDB == nil { logger.Fatal("no-database-configured", errors.New("no database configured")) } encryptor := encryptor.New(logger, activeDB, keyManager, cryptor, clock) migrationsDone := make(chan struct{}) migrationManager := migration.NewManager( logger, etcdDB, storeClient, sqlDB, sqlConn, cryptor, migrations.Migrations, migrationsDone, clock, *databaseDriver, ) desiredHub := events.NewHub() actualHub := events.NewHub() repTLSConfig := &rep.TLSConfig{ RequireTLS: *repRequireTLS, CaCertFile: *repCACert, CertFile: *repClientCert, KeyFile: *repClientKey, ClientCacheSize: *repClientSessionCacheSize, } httpClient := cfhttp.NewClient() repClientFactory, err := rep.NewClientFactory(httpClient, httpClient, repTLSConfig) if err != nil { logger.Fatal("new-rep-client-factory-failed", err) } auctioneerClient := initializeAuctioneerClient(logger) exitChan := make(chan struct{}) var accessLogger lager.Logger if *accessLogPath != "" { accessLogger = lager.NewLogger("bbs-access") file, err := os.OpenFile(*accessLogPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { logger.Error("invalid-access-log-path", err, lager.Data{"access-log-path": *accessLogPath}) os.Exit(1) } accessLogger.RegisterSink(lager.NewWriterSink(file, lager.INFO)) } var tlsConfig *tls.Config if *requireSSL { tlsConfig, err = cfhttp.NewTLSConfig(*certFile, *keyFile, *caFile) if err != nil { logger.Fatal("tls-configuration-failed", err) } } cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask, tlsConfig) handler := handlers.New( logger, accessLogger, *updateWorkers, *convergenceWorkers, activeDB, desiredHub, actualHub, cbWorkPool, serviceClient, auctioneerClient, repClientFactory, migrationsDone, exitChan, ) metricsNotifier := metrics.NewPeriodicMetronNotifier(logger) retirer := controllers.NewActualLRPRetirer(activeDB, actualHub, repClientFactory, serviceClient) lrpConvergenceController := controllers.NewLRPConvergenceController(logger, activeDB, actualHub, auctioneerClient, serviceClient, retirer, *convergenceWorkers) taskController := controllers.NewTaskController(activeDB, cbWorkPool, auctioneerClient, serviceClient, repClientFactory) convergerProcess := converger.New( logger, clock, lrpConvergenceController, taskController, serviceClient, *convergeRepeatInterval, *kickTaskDuration, *expirePendingTaskDuration, *expireCompletedTaskDuration) var server ifrit.Runner if tlsConfig != nil { server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig) } else { server = http_server.New(*listenAddress, handler) } healthcheckServer := http_server.New(*healthAddress, http.HandlerFunc(healthCheckHandler)) members := grouper.Members{ {"healthcheck", healthcheckServer}, {"lock-maintainer", maintainer}, {"workpool", cbWorkPool}, {"server", server}, {"migration-manager", migrationManager}, {"encryptor", encryptor}, {"hub-maintainer", hubMaintainer(logger, desiredHub, actualHub)}, {"metrics", *metricsNotifier}, {"converger", convergerProcess}, {"registration-runner", registrationRunner}, } if dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", debugserver.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) go func() { // If a handler writes to this channel, we've hit an unrecoverable error // and should shut down (cleanly) <-exitChan monitor.Signal(os.Interrupt) }() logger.Info("started") err = <-monitor.Wait() if sqlConn != nil { sqlConn.Close() } if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
. "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" ) var _ = Describe("Middleware", func() { Describe("EmitLatency", func() { var ( sender *fake.FakeMetricSender handler http.HandlerFunc ) BeforeEach(func() { sender = fake.NewFakeMetricSender() dropsonde_metrics.Initialize(sender, nil) logger := lager.NewLogger("test-session") handler = func(w http.ResponseWriter, r *http.Request) { time.Sleep(10) } handler = middleware.NewLatencyEmitter(logger).EmitLatency(handler) }) It("reports latency", func() { handler.ServeHTTP(nil, nil) latency := sender.GetValue("RequestLatency") Expect(latency.Value).NotTo(BeZero()) Expect(latency.Unit).To(Equal("nanos")) }) }) Describe("RequestCountWrap", func() {
}) }) Context("when the image plugin output parsing fails", func() { It("returns an error", func() { fakeCmdRunnerStdout = `{"silly" "json":"formating}"}}"` _, err := externalImageManager.Metrics(logger, "", "/store/0/bundles/123/rootfs") Expect(err).To(MatchError(ContainSubstring("parsing metrics"))) }) }) }) Describe("logging", func() { BeforeEach(func() { buffer := gbytes.NewBuffer() externalLogger := lager.NewLogger("external-plugin") externalLogger.RegisterSink(lager.NewWriterSink(buffer, lager.DEBUG)) externalLogger.Debug("debug-message", lager.Data{"type": "debug"}) externalLogger.Info("info-message", lager.Data{"type": "info"}) externalLogger.Error("error-message", errors.New("failed!"), lager.Data{"type": "error"}) fakeCmdRunnerStderr = string(buffer.Contents()) }) Context("Create", func() { It("relogs the image plugin logs", func() { _, _, err := externalImageManager.Create( logger, "hello", rootfs_provider.Spec{ RootFS: baseImage, },
var incomingRequest *http.Request var responseWriter http.ResponseWriter var outgoingResponse *httptest.ResponseRecorder var uploader fake_ccclient.FakeUploader var poller fake_ccclient.FakePoller var logger lager.Logger BeforeEach(func() { outgoingResponse = httptest.NewRecorder() responseWriter = outgoingResponse uploader = fake_ccclient.FakeUploader{} poller = fake_ccclient.FakePoller{} }) JustBeforeEach(func() { logger = lager.NewLogger("fake-logger") dropletUploadHandler := upload_droplet.New(&uploader, &poller, logger) dropletUploadHandler.ServeHTTP(responseWriter, incomingRequest) }) Context("When the request does not include a droplet upload URI", func() { BeforeEach(func() { var err error incomingRequest, err = http.NewRequest("POST", "http://example.com", bytes.NewBufferString("")) Expect(err).NotTo(HaveOccurred()) }) It("responds with an error code", func() { Expect(outgoingResponse.Code).To(Equal(http.StatusBadRequest)) })
func main() { flagSet := flag.NewFlagSet("flags", flag.ContinueOnError) flagSet.Var(&recursors, "recursor", "specifies the address of an upstream DNS `server`, may be specified multiple times") flagSet.StringVar(&configFile, "config-file", "", "specifies the config `file`") flagSet.BoolVar(&foreground, "foreground", false, "if true confab will wait for consul to exit") if len(os.Args) < 2 { printUsageAndExit("invalid number of arguments", flagSet) } if err := flagSet.Parse(os.Args[2:]); err != nil { os.Exit(1) } configFileContents, err := ioutil.ReadFile(configFile) if err != nil { stderr.Printf("error reading configuration file: %s", err) os.Exit(1) } cfg, err := config.ConfigFromJSON(configFileContents) if err != nil { stderr.Printf("error reading configuration file: %s", err) os.Exit(1) } path, err := exec.LookPath(cfg.Path.AgentPath) if err != nil { printUsageAndExit(fmt.Sprintf("\"agent_path\" %q cannot be found", cfg.Path.AgentPath), flagSet) } if len(cfg.Path.PIDFile) == 0 { printUsageAndExit("\"pid_file\" cannot be empty", flagSet) } logger := lager.NewLogger("confab") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) agentRunner := &agent.Runner{ Path: path, PIDFile: cfg.Path.PIDFile, ConfigDir: cfg.Path.ConsulConfigDir, Recursors: recursors, Stdout: os.Stdout, Stderr: os.Stderr, Logger: logger, } consulAPIClient, err := api.NewClient(api.DefaultConfig()) if err != nil { panic(err) // not tested, NewClient never errors } agentClient := &agent.Client{ ExpectedMembers: cfg.Consul.Agent.Servers.LAN, ConsulAPIAgent: consulAPIClient.Agent(), ConsulRPCClient: nil, Logger: logger, } retrier := utils.NewRetrier(clock.NewClock(), 1*time.Second) controller := chaperon.Controller{ AgentRunner: agentRunner, AgentClient: agentClient, Retrier: retrier, EncryptKeys: cfg.Consul.EncryptKeys, Logger: logger, ServiceDefiner: config.ServiceDefiner{logger}, ConfigDir: cfg.Path.ConsulConfigDir, Config: cfg, } keyringRemover := chaperon.NewKeyringRemover(cfg.Path.KeyringFile, logger) configWriter := chaperon.NewConfigWriter(cfg.Path.ConsulConfigDir, logger) var r runner = chaperon.NewClient(controller, consulagent.NewRPCClient, keyringRemover, configWriter) if controller.Config.Consul.Agent.Mode == "server" { bootstrapChecker := chaperon.NewBootstrapChecker(logger, agentClient, status.Client{ConsulAPIStatus: consulAPIClient.Status()}, time.Sleep) r = chaperon.NewServer(controller, configWriter, consulagent.NewRPCClient, bootstrapChecker) } switch os.Args[1] { case "start": _, err = os.Stat(controller.Config.Path.ConsulConfigDir) if err != nil { printUsageAndExit(fmt.Sprintf("\"consul_config_dir\" %q could not be found", controller.Config.Path.ConsulConfigDir), flagSet) } if utils.IsRunningProcess(agentRunner.PIDFile) { stderr.Println("consul_agent is already running, please stop it first") os.Exit(1) } if len(agentClient.ExpectedMembers) == 0 { printUsageAndExit("at least one \"expected-member\" must be provided", flagSet) } timeout := utils.NewTimeout(time.After(time.Duration(controller.Config.Confab.TimeoutInSeconds) * time.Second)) if err := r.Start(cfg, timeout); err != nil { stderr.Printf("error during start: %s", err) r.Stop() os.Exit(1) } if foreground { if err := agentRunner.Wait(); err != nil { stderr.Printf("error during wait: %s", err) r.Stop() os.Exit(1) } } case "stop": if err := r.Stop(); err != nil { stderr.Printf("error during stop: %s", err) os.Exit(1) } default: printUsageAndExit(fmt.Sprintf("invalid COMMAND %q", os.Args[1]), flagSet) } }