func generateGuid(logger lager.Logger) string { uuid, err := uuid.NewV4() if err != nil { logger.Fatal("Couldn't generate uuid", err) } return uuid.String() }
func newUaaClient(logger lager.Logger, clock clock.Clock, c *config.Config) uaa_client.Client { if c.RoutingApi.AuthDisabled { logger.Info("using-noop-token-fetcher") return uaa_client.NewNoOpUaaClient() } if c.OAuth.Port == -1 { logger.Fatal("tls-not-enabled", errors.New("GoRouter requires TLS enabled to get OAuth token"), lager.Data{"token-endpoint": c.OAuth.TokenEndpoint, "port": c.OAuth.Port}) } tokenURL := fmt.Sprintf("https://%s:%d", c.OAuth.TokenEndpoint, c.OAuth.Port) cfg := &uaa_config.Config{ UaaEndpoint: tokenURL, SkipVerification: c.OAuth.SkipSSLValidation, ClientName: c.OAuth.ClientName, ClientSecret: c.OAuth.ClientSecret, CACerts: c.OAuth.CACerts, MaxNumberOfRetries: c.TokenFetcherMaxRetries, RetryInterval: c.TokenFetcherRetryInterval, ExpirationBufferInSec: c.TokenFetcherExpirationBufferTimeInSeconds, } uaaClient, err := uaa_client.NewClient(logger, cfg, clock) if err != nil { logger.Fatal("initialize-token-fetcher-error", err) } return uaaClient }
func initializeServer( bbsClient bbs.Client, executorClient executor.Client, evacuatable evacuation_context.Evacuatable, evacuationReporter evacuation_context.EvacuationReporter, logger lager.Logger, stackMap rep.StackPathMap, supportedProviders []string, ) (ifrit.Runner, string) { lrpStopper := initializeLRPStopper(*cellID, executorClient, logger) auctionCellRep := auction_cell_rep.New(*cellID, stackMap, supportedProviders, *zone, generateGuid, executorClient, evacuationReporter, logger) handlers := handlers.New(auctionCellRep, lrpStopper, executorClient, evacuatable, logger) router, err := rata.NewRouter(rep.Routes, handlers) if err != nil { logger.Fatal("failed-to-construct-router", err) } ip, err := localip.LocalIP() if err != nil { logger.Fatal("failed-to-fetch-ip", err) } port := strings.Split(*listenAddr, ":")[1] address := fmt.Sprintf("http://%s:%s", ip, port) return http_server.New(*listenAddr, router), address }
func NewHandler( logger lager.Logger, webcamHost string, ) Handler { director := func(req *http.Request) { req.URL.Scheme = "http" req.URL.Host = webcamHost req.URL.Path = "/" req.URL.RawQuery = "action=stream" } flushInterval, err := time.ParseDuration("10ms") if err != nil { logger.Fatal("golang broke", err) } proxy := httputil.ReverseProxy{ Director: director, FlushInterval: flushInterval, ErrorLog: log.New(ioutil.Discard, "", 0), } return &handler{ logger: logger, proxy: proxy, } }
func connectToNatsServer(logger lager.Logger, c *config.Config) yagnats.NATSConn { var natsClient yagnats.NATSConn var err error natsServers := c.NatsServers() attempts := 3 for attempts > 0 { natsClient, err = yagnats.Connect(natsServers) if err == nil { break } else { attempts-- time.Sleep(100 * time.Millisecond) } } if err != nil { logger.Fatal("nats-connection-error", err) } natsClient.AddClosedCB(func(conn *nats.Conn) { logger.Fatal("nats-connection-closed", errors.New("unexpected close"), lager.Data{"connection": *conn}) }) return natsClient }
func appendSSLConnectionStringParam(logger lager.Logger, driverName, databaseConnectionString, sqlCACertFile string) string { switch driverName { case "mysql": if sqlCACertFile != "" { certBytes, err := ioutil.ReadFile(sqlCACertFile) if err != nil { logger.Fatal("failed-to-read-sql-ca-file", err) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(certBytes); !ok { logger.Fatal("failed-to-parse-sql-ca", err) } tlsConfig := &tls.Config{ InsecureSkipVerify: false, RootCAs: caCertPool, } mysql.RegisterTLSConfig("bbs-tls", tlsConfig) databaseConnectionString = fmt.Sprintf("%s?tls=bbs-tls", databaseConnectionString) } case "postgres": if sqlCACertFile == "" { databaseConnectionString = fmt.Sprintf("%s?sslmode=disable", databaseConnectionString) } else { databaseConnectionString = fmt.Sprintf("%s?sslmode=verify-ca&sslrootcert=%s", databaseConnectionString, sqlCACertFile) } } return databaseConnectionString }
func initializeServer(logger lager.Logger) ifrit.Runner { if *staticDirectory == "" { logger.Fatal("static-directory-missing", nil) } transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: ccUploadDialTimeout, KeepAlive: ccUploadKeepAlive, }).Dial, TLSClientConfig: &tls.Config{ InsecureSkipVerify: *skipCertVerify, }, TLSHandshakeTimeout: ccUploadTLSHandshakeTimeout, } pollerHttpClient := cf_http.NewClient() pollerHttpClient.Transport = transport fileServerHandler, err := handlers.New(*staticDirectory, logger) if err != nil { logger.Error("router-building-failed", err) os.Exit(1) } return http_server.New(*serverAddress, fileServerHandler) }
func connectToNatsServer(logger lager.Logger, c *config.Config) *nats.Conn { var natsClient *nats.Conn var err error natsServers := c.NatsServers() attempts := 3 for attempts > 0 { options := nats.DefaultOptions options.Servers = natsServers options.PingInterval = c.NatsClientPingInterval options.ClosedCB = func(conn *nats.Conn) { logger.Fatal("nats-connection-closed", errors.New("unexpected close"), lager.Data{"last_error": conn.LastError()}) } natsClient, err = options.Connect() if err == nil { break } else { attempts-- time.Sleep(100 * time.Millisecond) } } if err != nil { logger.Fatal("nats-connection-error", err) } return natsClient }
func (*generator) Guid(logger lager.Logger) string { guid, err := uuid.NewV4() if err != nil { logger.Fatal("failed-to-generate-guid", err) } return guid.String() }
func initializeNatsEmitter(natsClient diegonats.NATSClient, logger lager.Logger) nats_emitter.NATSEmitter { workPool, err := workpool.NewWorkPool(*routeEmittingWorkers) if err != nil { logger.Fatal("failed-to-construct-nats-emitter-workpool", err, lager.Data{"num-workers": *routeEmittingWorkers}) // should never happen } return nats_emitter.New(natsClient, workPool, logger) }
func initializeServiceClient(logger lager.Logger, clock clock.Clock, consulClient *api.Client, sessionManager consuladapter.SessionManager) bbs.ServiceClient { consulDBSession, err := consuladapter.NewSessionNoChecks("consul-db", *lockTTL, consulClient, sessionManager) if err != nil { logger.Fatal("consul-session-failed", err) } return bbs.NewServiceClient(consulDBSession, clock) }
func initializeServiceClient(logger lager.Logger) nsync.ServiceClient { consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } return nsync.NewServiceClient(consulClient, clock.NewClock()) }
func initializeHandler(logger lager.Logger, noaaClient *noaa.Consumer, maxInFlight int, apiClient bbs.Client) http.Handler { apiHandler, err := handler.New(apiClient, noaaClient, maxInFlight, *bulkLRPStatusWorkers, logger) if err != nil { logger.Fatal("initialize-handler.failed", err) } return apiHandler }
func createCrypto(logger lager.Logger, secret string) *secure.AesGCM { // generate secure encryption key using key derivation function (pbkdf2) secretPbkdf2 := secure.NewPbkdf2([]byte(secret), 16) crypto, err := secure.NewAesGCM(secretPbkdf2) if err != nil { logger.Fatal("error-creating-route-service-crypto", err) } return crypto }
func wireNetworker( log lager.Logger, tag string, networkPoolCIDR *net.IPNet, externalIP net.IP, iptablesMgr kawasaki.IPTablesConfigurer, interfacePrefix string, chainPrefix string, propManager *properties.Manager, networkModulePath string) gardener.Networker { runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: log.Session("network-runner")} hostConfigurer := &configure.Host{ Veth: &devices.VethCreator{}, Link: &devices.Link{Name: "guardian"}, Bridge: &devices.Bridge{}, Logger: log.Session("network-host-configurer"), } containerCfgApplier := &configure.Container{ Logger: log.Session("network-container-configurer"), Link: &devices.Link{Name: "guardian"}, } idGenerator := kawasaki.NewSequentialIDGenerator(time.Now().UnixNano()) portPool, err := ports.NewPool(uint32(*portPoolStart), uint32(*portPoolSize), ports.State{}) if err != nil { log.Fatal("invalid pool range", err) } switch networkModulePath { case "": return kawasaki.New( kawasaki.NewManager(runner, "/var/run/netns"), kawasaki.SpecParserFunc(kawasaki.ParseSpec), subnets.NewPool(networkPoolCIDR), kawasaki.NewConfigCreator(idGenerator, interfacePrefix, chainPrefix, externalIP), kawasaki.NewConfigurer( hostConfigurer, containerCfgApplier, iptablesMgr, &netns.Execer{}, ), propManager, iptables.NewPortForwarder(runner), portPool, ) default: if _, err := os.Stat(networkModulePath); err != nil { log.Fatal("failed-to-stat-network-module", err) return nil } return gardener.ForeignNetworkAdaptor{ ForeignNetworker: genclient.New(networkModulePath), } } }
func validateFlags(logger lager.Logger) { _, portString, err := net.SplitHostPort(*listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } _, err = net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } }
func initializeLockMaintainer(logger lager.Logger) ifrit.Runner { serviceClient := initializeServiceClient(logger) uuid, err := uuid.NewV4() if err != nil { logger.Fatal("Couldn't generate uuid", err) } return serviceClient.NewTPSWatcherLockRunner(logger, uuid.String(), *lockRetryInterval) }
func initializeConsulSession(logger lager.Logger) *consuladapter.Session { client, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } sessionMgr := consuladapter.NewSessionManager(client) consulSession, err := consuladapter.NewSession("converger", *lockTTL, client, sessionMgr) if err != nil { logger.Fatal("consul-session-failed", err) } return consulSession }
func (app Application) RetrieveUAAPublicKey(logger lager.Logger) { zonedUAAClient := uaa.NewZonedUAAClient(app.env.UAAClientID, app.env.UAAClientSecret, app.env.VerifySSL, "") key, err := zonedUAAClient.GetTokenKey(app.env.UAAHost) if err != nil { logger.Fatal("uaa-get-token-key-errored", err) } UAAPublicKey = key logger.Info("uaa-public-key", lager.Data{ "key": UAAPublicKey, }) }
func ensureRunning(instance *redis.Instance, repo *redis.LocalRepository, processController *redis.OSProcessController, logger lager.Logger) { configPath := repo.InstanceConfigPath(instance.ID) instanceDataDir := repo.InstanceDataDir(instance.ID) pidfilePath := repo.InstancePidFilePath(instance.ID) logfilePath := repo.InstanceLogFilePath(instance.ID) err := processController.EnsureRunning(instance, configPath, instanceDataDir, pidfilePath, logfilePath) if err != nil { logger.Fatal("Error starting instance", err, lager.Data{ "instance": instance.ID, }) } }
func (app Application) ConfigureSMTP(logger lager.Logger) { if app.env.TestMode { return } mailClient := app.mother.MailClient() err := mailClient.Connect(logger) if err != nil { logger.Fatal("smtp-connect-errored", err) } err = mailClient.Hello() if err != nil { logger.Fatal("smtp-hello-errored", err) } startTLSSupported, _ := mailClient.Extension("STARTTLS") mailClient.Quit() if !startTLSSupported && app.env.SMTPTLS { logger.Fatal("smtp-config-mismatch", errors.New(`SMTP TLS configuration mismatch: Configured to use TLS over SMTP, but the mail server does not support the "STARTTLS" extension.`)) } if startTLSSupported && !app.env.SMTPTLS { logger.Fatal("smtp-config-mismatch", errors.New(`SMTP TLS configuration mismatch: Not configured to use TLS over SMTP, but the mail server does support the "STARTTLS" extension.`)) } }
func initializeReceptorBBS(etcdOptions *etcdstoreadapter.ETCDOptions, logger lager.Logger) Bbs.ReceptorBBS { workPool, err := workpool.NewWorkPool(100) if err != nil { logger.Fatal("failed-to-construct-etcd-adapter-workpool", err, lager.Data{"num-workers": 100}) // should never happen } etcdAdapter, err := etcdstoreadapter.New(etcdOptions, workPool) if err != nil { logger.Fatal("failed-to-construct-etcd-tls-client", err) } client, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } sessionMgr := consuladapter.NewSessionManager(client) consulSession, err := consuladapter.NewSession("receptor", *lockTTL, client, sessionMgr) if err != nil { logger.Fatal("consul-session-failed", err) } return Bbs.NewReceptorBBS(etcdAdapter, consulSession, *taskHandlerAddress, clock.NewClock(), logger) }
func initializeLocketClient(logger lager.Logger) locket.Client { client, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } sessionMgr := consuladapter.NewSessionManager(client) consulSession, err := consuladapter.NewSessionNoChecks(*sessionName, *lockTTL, client, sessionMgr) if err != nil { logger.Fatal("consul-session-failed", err) } return locket.NewClient(consulSession, clock.NewClock(), logger) }
func initializeServiceClient(logger lager.Logger) nsync.ServiceClient { client, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } sessionMgr := consuladapter.NewSessionManager(client) consulSession, err := consuladapter.NewSession("nsync-bulker", *lockTTL, client, sessionMgr) if err != nil { logger.Fatal("consul-session-failed", err) } return nsync.NewServiceClient(consulSession, clock.NewClock()) }
func copyConfigFile(instance *redis.Instance, repo *redis.LocalRepository, logger lager.Logger) { err := repo.EnsureDirectoriesExist(instance) if err != nil { logger.Fatal("Error creating instance directories", err, lager.Data{ "instance": instance.ID, }) } err = repo.WriteConfigFile(instance) if err != nil { logger.Fatal("Error writing redis config", err, lager.Data{ "instance": instance.ID, }) } }
func setupRouteFetcher(logger lager.Logger, c *config.Config, registry rregistry.RegistryInterface) *route_fetcher.RouteFetcher { clock := clock.NewClock() tokenFetcher := newTokenFetcher(logger, clock, c) _, err := tokenFetcher.FetchToken(false) if err != nil { logger.Fatal("unable-to-fetch-token", err) } routingApiUri := fmt.Sprintf("%s:%d", c.RoutingApi.Uri, c.RoutingApi.Port) routingApiClient := routing_api.NewClient(routingApiUri) routeFetcher := route_fetcher.NewRouteFetcher(logger, tokenFetcher, registry, c, routingApiClient, 1, clock) return routeFetcher }
func LockDBAndMigrate(logger lager.Logger, sqlDriver string, sqlDataSource string) (db.Conn, error) { var err error var dbLockConn db.Conn var dbConn db.Conn for { dbLockConn, err = db.WrapWithError(sql.Open(sqlDriver, sqlDataSource)) if err != nil { if strings.Contains(err.Error(), " dial ") { logger.Error("failed-to-open-db-retrying", err) time.Sleep(5 * time.Second) continue } return nil, err } break } lockName := crc32.ChecksumIEEE([]byte(sqlDriver + sqlDataSource)) for { _, err = dbLockConn.Exec(`select pg_advisory_lock($1)`, lockName) if err != nil { logger.Error("failed-to-acquire-lock-retrying", err) time.Sleep(5 * time.Second) continue } logger.Info("migration-lock-acquired") migrations := Translogrifier(logger, Migrations) dbConn, err = db.WrapWithError(migration.OpenWith(sqlDriver, sqlDataSource, migrations, safeGetVersion, safeSetVersion)) if err != nil { logger.Fatal("failed-to-run-migrations", err) } _, err = dbLockConn.Exec(`select pg_advisory_unlock($1)`, lockName) if err != nil { logger.Error("failed-to-release-lock", err) } dbLockConn.Close() break } return dbConn, nil }
func mountPoint(logger lager.Logger, path string) string { dfOut := new(bytes.Buffer) df := exec.Command("df", path) df.Stdout = dfOut df.Stderr = os.Stderr err := df.Run() if err != nil { logger.Fatal("failed-to-get-mount-info", err) } dfOutputWords := strings.Split(string(dfOut.Bytes()), " ") return strings.Trim(dfOutputWords[len(dfOutputWords)-1], "\n") }
func initializeBBSClient(logger lager.Logger) bbs.Client { bbsURL, err := url.Parse(*bbsAddress) if err != nil { logger.Fatal("Invalid BBS URL", err) } if bbsURL.Scheme != "https" { return bbs.NewClient(*bbsAddress) } bbsClient, err := bbs.NewSecureClient(*bbsAddress, *bbsCACert, *bbsClientCert, *bbsClientKey, *bbsClientSessionCacheSize, *bbsMaxIdleConnsPerHost) if err != nil { logger.Fatal("Failed to configure secure BBS client", err) } return bbsClient }
func NewConfigurer(logger lager.Logger, tcpLoadBalancer string, tcpLoadBalancerBaseCfg string, tcpLoadBalancerCfg string) RouterConfigurer { switch tcpLoadBalancer { case HaProxyConfigurer: routerHostInfo, err := haproxy.NewHaProxyConfigurer(logger, tcpLoadBalancerBaseCfg, tcpLoadBalancerCfg) if err != nil { logger.Fatal("could not create tcp load balancer", err, lager.Data{"tcp_load_balancer": tcpLoadBalancer}) return nil } return routerHostInfo default: logger.Fatal("not-supported", errors.New("unsupported tcp load balancer"), lager.Data{"tcp_load_balancer": tcpLoadBalancer}) return nil } }