func NewPresence( logger lager.Logger, consulClient consuladapter.Client, lockKey string, lockValue []byte, clock clock.Clock, retryInterval time.Duration, lockTTL time.Duration, ) Presence { uuid, err := uuid.NewV4() if err != nil { logger.Fatal("create-uuid-failed", err) } session, err := NewSessionNoChecks(uuid.String(), lockTTL, consulClient) if err != nil { logger.Fatal("consul-session-failed", err) } return Presence{ consul: session, key: lockKey, value: lockValue, clock: clock, retryInterval: retryInterval, logger: logger, } }
func NewLock( logger lager.Logger, consulClient consuladapter.Client, lockKey string, lockValue []byte, clock clock.Clock, retryInterval time.Duration, lockTTL time.Duration, ) Lock { lockMetricName := strings.Replace(lockKey, "/", "-", -1) uuid, err := uuid.NewV4() if err != nil { logger.Fatal("create-uuid-failed", err) } session, err := NewSessionNoChecks(uuid.String(), lockTTL, consulClient) if err != nil { logger.Fatal("consul-session-failed", err) } return Lock{ consul: session, key: lockKey, value: lockValue, clock: clock, retryInterval: retryInterval, logger: logger, lockAcquiredMetric: metric.Metric("LockHeld." + lockMetricName), lockUptimeMetric: metric.Duration("LockHeldDuration." + lockMetricName), } }
func connectToNatsServer(logger lager.Logger, c *config.Config, startMsg chan<- struct{}) *nats.Conn { var natsClient *nats.Conn var natsHost atomic.Value var err error options := natsOptions(logger, c, &natsHost, startMsg) attempts := 3 for attempts > 0 { natsClient, err = options.Connect() if err == nil { break } else { attempts-- time.Sleep(100 * time.Millisecond) } } if err != nil { logger.Fatal("nats-connection-error", err) } var natsHostStr string natsUrl, err := url.Parse(natsClient.ConnectedUrl()) if err == nil { natsHostStr = natsUrl.Host } logger.Info("Successfully-connected-to-nats", lager.Data{"host": natsHostStr}) natsHost.Store(natsHostStr) return natsClient }
func newUaaClient(logger lager.Logger, clock clock.Clock, c *config.Config) uaa_client.Client { if c.RoutingApi.AuthDisabled { logger.Info("using-noop-token-fetcher") return uaa_client.NewNoOpUaaClient() } if c.OAuth.Port == -1 { logger.Fatal("tls-not-enabled", errors.New("GoRouter requires TLS enabled to get OAuth token"), lager.Data{"token-endpoint": c.OAuth.TokenEndpoint, "port": c.OAuth.Port}) } tokenURL := fmt.Sprintf("https://%s:%d", c.OAuth.TokenEndpoint, c.OAuth.Port) cfg := &uaa_config.Config{ UaaEndpoint: tokenURL, SkipVerification: c.OAuth.SkipSSLValidation, ClientName: c.OAuth.ClientName, ClientSecret: c.OAuth.ClientSecret, CACerts: c.OAuth.CACerts, MaxNumberOfRetries: c.TokenFetcherMaxRetries, RetryInterval: c.TokenFetcherRetryInterval, ExpirationBufferInSec: c.TokenFetcherExpirationBufferTimeInSeconds, } uaaClient, err := uaa_client.NewClient(logger, cfg, clock) if err != nil { logger.Fatal("initialize-token-fetcher-error", err) } return uaaClient }
func appendSSLConnectionStringParam(logger lager.Logger, driverName, databaseConnectionString, sqlCACertFile string) string { switch driverName { case "mysql": if sqlCACertFile != "" { certBytes, err := ioutil.ReadFile(sqlCACertFile) if err != nil { logger.Fatal("failed-to-read-sql-ca-file", err) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(certBytes); !ok { logger.Fatal("failed-to-parse-sql-ca", err) } tlsConfig := &tls.Config{ InsecureSkipVerify: false, RootCAs: caCertPool, } mysql.RegisterTLSConfig("bbs-tls", tlsConfig) databaseConnectionString = fmt.Sprintf("%s?tls=bbs-tls", databaseConnectionString) } case "postgres": if sqlCACertFile == "" { databaseConnectionString = fmt.Sprintf("%s?sslmode=disable", databaseConnectionString) } else { databaseConnectionString = fmt.Sprintf("%s?sslmode=verify-ca&sslrootcert=%s", databaseConnectionString, sqlCACertFile) } } return databaseConnectionString }
func createCrypto(logger lager.Logger, secret string) *secure.AesGCM { // generate secure encryption key using key derivation function (pbkdf2) secretPbkdf2 := secure.NewPbkdf2([]byte(secret), 16) crypto, err := secure.NewAesGCM(secretPbkdf2) if err != nil { logger.Fatal("error-creating-route-service-crypto", err) } return crypto }
func initializeETCDDB(logger lager.Logger, etcdClient *etcd.Client) *etcddb.ETCDDB { key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) return etcddb.NewETCD(format.ENCRYPTED_PROTO, 1000, 1000, 1*time.Minute, cryptor, etcddb.NewStoreClient(etcdClient), clock.NewClock()) }
func initializeSQLDB(logger lager.Logger, sqlConn *sql.DB) *sqldb.SQLDB { key, keys, err := encryptionFlags.Parse() if err != nil { logger.Fatal("cannot-setup-encryption", err) } keyManager, err := encryption.NewKeyManager(key, keys) if err != nil { logger.Fatal("cannot-setup-encryption", err) } cryptor := encryption.NewCryptor(keyManager, rand.Reader) return sqldb.NewSQLDB(sqlConn, 1000, 1000, format.ENCODED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock.NewClock(), databaseDriver) }
func initializeAuctioneerClient(logger lager.Logger) auctioneer.Client { if *auctioneerAddress == "" { logger.Fatal("auctioneer-address-validation-failed", errors.New("auctioneerAddress is required")) } if *auctioneerCACert != "" || *auctioneerClientCert != "" || *auctioneerClientKey != "" { client, err := auctioneer.NewSecureClient(*auctioneerAddress, *auctioneerCACert, *auctioneerClientCert, *auctioneerClientKey, *auctioneerRequireTLS) if err != nil { logger.Fatal("failed-to-construct-auctioneer-client", err) } return client } return auctioneer.NewClient(*auctioneerAddress) }
func setupRouteFetcher(logger lager.Logger, c *config.Config, registry rregistry.RegistryInterface) *route_fetcher.RouteFetcher { clock := clock.NewClock() uaaClient := newUaaClient(logger, clock, c) _, err := uaaClient.FetchToken(true) if err != nil { logger.Fatal("unable-to-fetch-token", err) } routingApiUri := fmt.Sprintf("%s:%d", c.RoutingApi.Uri, c.RoutingApi.Port) routingApiClient := routing_api.NewClient(routingApiUri, false) routeFetcher := route_fetcher.NewRouteFetcher(logger, uaaClient, registry, c, routingApiClient, 1, clock) return routeFetcher }
func initializeBBSClient(logger lager.Logger, bbsClientHTTPTimeout time.Duration) bbs.InternalClient { bbsURL, err := url.Parse(bbsAddress) if err != nil { logger.Fatal("Invalid BBS URL", err) } if bbsURL.Scheme != "https" { return bbs.NewClient(bbsAddress) } cfhttp.Initialize(bbsClientHTTPTimeout) bbsClient, err := bbs.NewSecureSkipVerifyClient(bbsAddress, bbsClientCert, bbsClientKey, 1, 25000) if err != nil { logger.Fatal("Failed to configure secure BBS client", err) } return bbsClient }
func initializeRegistrationRunner(logger lager.Logger, consulClient consuladapter.Client, listenAddress string, clock clock.Clock) ifrit.Runner { _, portString, err := net.SplitHostPort(listenAddress) if err != nil { logger.Fatal("failed-invalid-listen-address", err) } portNum, err := net.LookupPort("tcp", portString) if err != nil { logger.Fatal("failed-invalid-listen-port", err) } registration := &api.AgentServiceRegistration{ Name: "cc-uploader", Port: portNum, Check: &api.AgentServiceCheck{ TTL: "3s", }, } return locket.NewRegistrationRunner(logger, registration, consulClient, locket.RetryInterval, clock) }
func createSubscriber( logger lager.Logger, c *config.Config, natsClient *nats.Conn, registry rregistry.RegistryInterface, startMsgChan chan struct{}, ) ifrit.Runner { guid, err := uuid.GenerateUUID() if err != nil { logger.Fatal("failed-to-generate-uuid", err) } opts := &mbus.SubscriberOpts{ ID: fmt.Sprintf("%d-%s", c.Index, guid), MinimumRegisterIntervalInSeconds: int(c.StartResponseDelayInterval.Seconds()), PruneThresholdInSeconds: int(c.DropletStaleThreshold.Seconds()), } return mbus.NewSubscriber(logger.Session("subscriber"), natsClient, registry, startMsgChan, opts) }
func natsOptions(logger lager.Logger, c *config.Config, natsHost *atomic.Value, startMsg chan<- struct{}) nats.Options { natsServers := c.NatsServers() options := nats.DefaultOptions options.Servers = natsServers options.PingInterval = c.NatsClientPingInterval options.ClosedCB = func(conn *nats.Conn) { logger.Fatal("nats-connection-closed", errors.New("unexpected close"), lager.Data{"last_error": conn.LastError()}) } options.DisconnectedCB = func(conn *nats.Conn) { hostStr := natsHost.Load().(string) logger.Info("nats-connection-disconnected", lager.Data{"nats-host": hostStr}) } options.ReconnectedCB = func(conn *nats.Conn) { natsURL, err := url.Parse(conn.ConnectedUrl()) natsHostStr := "" if err != nil { logger.Error("nats-url-parse-error", err) } else { natsHostStr = natsURL.Host } natsHost.Store(natsHostStr) data := lager.Data{"nats-host": natsHostStr} logger.Info("nats-connection-reconnected", data) startMsg <- struct{}{} } // in the case of suspending pruning, we need to ensure we retry reconnects indefinitely if c.SuspendPruningIfNatsUnavailable { options.MaxReconnect = -1 } return options }
func initializeLockMaintainer(logger lager.Logger, serviceClient bbs.ServiceClient) ifrit.Runner { uuid, err := uuid.NewV4() if err != nil { logger.Fatal("Couldn't generate uuid", err) } if *advertiseURL == "" { logger.Fatal("Advertise URL must be specified", nil) } bbsPresence := models.NewBBSPresence(uuid.String(), *advertiseURL) lockMaintainer, err := serviceClient.NewBBSLockRunner(logger, &bbsPresence, *lockRetryInterval, *lockTTL) if err != nil { logger.Fatal("Couldn't create lock maintainer", err) } return lockMaintainer }
func initializeEtcdStoreClient(logger lager.Logger, etcdOptions *etcddb.ETCDOptions) etcddb.StoreClient { var etcdClient *etcdclient.Client var tr *http.Transport if etcdOptions.IsSSL { if etcdOptions.CertFile == "" || etcdOptions.KeyFile == "" { logger.Fatal("failed-to-construct-etcd-tls-client", errors.New("Require both cert and key path")) } var err error etcdClient, err = etcdclient.NewTLSClient(etcdOptions.ClusterUrls, etcdOptions.CertFile, etcdOptions.KeyFile, etcdOptions.CAFile) if err != nil { logger.Fatal("failed-to-construct-etcd-tls-client", err) } tlsCert, err := tls.LoadX509KeyPair(etcdOptions.CertFile, etcdOptions.KeyFile) if err != nil { logger.Fatal("failed-to-construct-etcd-tls-client", err) } tlsConfig := &tls.Config{ Certificates: []tls.Certificate{tlsCert}, InsecureSkipVerify: true, ClientSessionCache: tls.NewLRUClientSessionCache(etcdOptions.ClientSessionCacheSize), } tr = &http.Transport{ TLSClientConfig: tlsConfig, Dial: etcdClient.DefaultDial, MaxIdleConnsPerHost: etcdOptions.MaxIdleConnsPerHost, } etcdClient.SetTransport(tr) etcdClient.AddRootCA(etcdOptions.CAFile) } else { etcdClient = etcdclient.NewClient(etcdOptions.ClusterUrls) } etcdClient.SetConsistency(etcdclient.STRONG_CONSISTENCY) return etcddb.NewStoreClient(etcdClient) }
func initializeAuctioneerClient(logger lager.Logger) auctioneer.Client { if *auctioneerAddress == "" { logger.Fatal("auctioneer-address-validation-failed", errors.New("auctioneerAddress is required")) } return auctioneer.NewClient(*auctioneerAddress) }
Expect(err).NotTo(HaveOccurred()) expectedLRPVariation = float64(expectedLRPCount) * errorTolerance for k, v := range expectedActualLRPCounts { expectedActualLRPVariations[k] = float64(v) * errorTolerance } } }) var _ = AfterSuite(func() { if databaseConnectionString == "" { cleanupETCD() } else { sqlConn, err := sql.Open(databaseDriver, databaseConnectionString) if err != nil { logger.Fatal("failed-to-open-sql", err) } sqlConn.SetMaxOpenConns(1) sqlConn.SetMaxIdleConns(1) err = sqlConn.Ping() Expect(err).NotTo(HaveOccurred()) cleanupSQLDB(sqlConn) } }) type ETCDFlags struct { etcdCertFile string etcdKeyFile string etcdCaFile string clusterUrls string
func (cmd *GuardianCommand) wireVolumeCreator(logger lager.Logger, graphRoot string, insecureRegistries, persistentImages []string) gardener.VolumeCreator { if graphRoot == "" { return gardener.NoopVolumeCreator{} } if cmd.Bin.ImagePlugin.Path() != "" { defaultRootFS, err := url.Parse(cmd.Containers.DefaultRootFSDir.Path()) if err != nil { logger.Fatal("failed-to-parse-default-rootfs", err) } return imageplugin.New(cmd.Bin.ImagePlugin.Path(), linux_command_runner.New(), defaultRootFS, idMappings) } logger = logger.Session("volume-creator", lager.Data{"graphRoot": graphRoot}) runner := &logging.Runner{CommandRunner: linux_command_runner.New(), Logger: logger} if err := os.MkdirAll(graphRoot, 0755); err != nil { logger.Fatal("failed-to-create-graph-directory", err) } dockerGraphDriver, err := graphdriver.New(graphRoot, nil) if err != nil { logger.Fatal("failed-to-construct-graph-driver", err) } backingStoresPath := filepath.Join(graphRoot, "backing_stores") if err := os.MkdirAll(backingStoresPath, 0660); err != nil { logger.Fatal("failed-to-mkdir-backing-stores", err) } quotaedGraphDriver := "aed_aufs.QuotaedDriver{ GraphDriver: dockerGraphDriver, Unmount: quotaed_aufs.Unmount, BackingStoreMgr: "aed_aufs.BackingStore{ RootPath: backingStoresPath, Logger: logger.Session("backing-store-mgr"), }, LoopMounter: "aed_aufs.Loop{ Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), Logger: logger.Session("loop-mounter"), }, Retrier: retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil), RootPath: graphRoot, Logger: logger.Session("quotaed-driver"), } dockerGraph, err := graph.NewGraph(graphRoot, quotaedGraphDriver) if err != nil { logger.Fatal("failed-to-construct-graph", err) } var cake layercake.Cake = &layercake.Docker{ Graph: dockerGraph, Driver: quotaedGraphDriver, } if cake.DriverName() == "aufs" { cake = &layercake.AufsCake{ Cake: cake, Runner: runner, GraphRoot: graphRoot, } } repoFetcher := repository_fetcher.Retryable{ RepositoryFetcher: &repository_fetcher.CompositeFetcher{ LocalFetcher: &repository_fetcher.Local{ Cake: cake, DefaultRootFSPath: cmd.Containers.DefaultRootFSDir.Path(), IDProvider: repository_fetcher.LayerIDProvider{}, }, RemoteFetcher: repository_fetcher.NewRemote( logger, cmd.Docker.Registry, cake, distclient.NewDialer(insecureRegistries), repository_fetcher.VerifyFunc(repository_fetcher.Verify), ), }, Logger: logger, } rootFSNamespacer := &rootfs_provider.UidNamespacer{ Translator: rootfs_provider.NewUidTranslator( idMappings, // uid idMappings, // gid ), } retainer := cleaner.NewRetainer() ovenCleaner := cleaner.NewOvenCleaner(retainer, cleaner.NewThreshold(int64(cmd.Graph.CleanupThresholdInMegabytes)*1024*1024), ) imageRetainer := &repository_fetcher.ImageRetainer{ GraphRetainer: retainer, DirectoryRootfsIDProvider: repository_fetcher.LayerIDProvider{}, DockerImageIDFetcher: repoFetcher, NamespaceCacheKey: rootFSNamespacer.CacheKey(), Logger: logger, } // spawn off in a go function to avoid blocking startup // worst case is if an image is immediately created and deleted faster than // we can retain it we'll garbage collect it when we shouldn't. This // is an OK trade-off for not having garden startup block on dockerhub. go imageRetainer.Retain(persistentImages) layerCreator := rootfs_provider.NewLayerCreator(cake, rootfs_provider.SimpleVolumeCreator{}, rootFSNamespacer) quotaManager := "a_manager.AUFSQuotaManager{ BaseSizer: quota_manager.NewAUFSBaseSizer(cake), DiffSizer: "a_manager.AUFSDiffSizer{ AUFSDiffPathFinder: quotaedGraphDriver, }, } return rootfs_provider.NewCakeOrdinator(cake, repoFetcher, layerCreator, rootfs_provider.NewMetricsAdapter(quotaManager.GetUsage, quotaedGraphDriver.GetMntPath), ovenCleaner) }
func New( listenNetwork, listenAddr string, containerGraceTime time.Duration, backend garden.Backend, logger lager.Logger, ) *GardenServer { s := &GardenServer{ logger: logger.Session("garden-server"), listenNetwork: listenNetwork, listenAddr: listenAddr, containerGraceTime: containerGraceTime, backend: backend, stopping: make(chan bool), handling: new(sync.WaitGroup), conns: make(map[net.Conn]net.Conn), streamer: streamer.New(time.Minute), destroys: make(map[string]struct{}), destroysL: new(sync.Mutex), } handlers := map[string]http.Handler{ routes.Ping: http.HandlerFunc(s.handlePing), routes.Capacity: http.HandlerFunc(s.handleCapacity), routes.Create: http.HandlerFunc(s.handleCreate), routes.Destroy: http.HandlerFunc(s.handleDestroy), routes.List: http.HandlerFunc(s.handleList), routes.Stop: http.HandlerFunc(s.handleStop), routes.StreamIn: http.HandlerFunc(s.handleStreamIn), routes.StreamOut: http.HandlerFunc(s.handleStreamOut), routes.CurrentBandwidthLimits: http.HandlerFunc(s.handleCurrentBandwidthLimits), routes.CurrentCPULimits: http.HandlerFunc(s.handleCurrentCPULimits), routes.CurrentDiskLimits: http.HandlerFunc(s.handleCurrentDiskLimits), routes.CurrentMemoryLimits: http.HandlerFunc(s.handleCurrentMemoryLimits), routes.NetIn: http.HandlerFunc(s.handleNetIn), routes.NetOut: http.HandlerFunc(s.handleNetOut), routes.Info: http.HandlerFunc(s.handleInfo), routes.BulkInfo: http.HandlerFunc(s.handleBulkInfo), routes.BulkMetrics: http.HandlerFunc(s.handleBulkMetrics), routes.Run: http.HandlerFunc(s.handleRun), routes.Stdout: streamer.HandlerFunc(s.streamer.ServeStdout), routes.Stderr: streamer.HandlerFunc(s.streamer.ServeStderr), routes.Attach: http.HandlerFunc(s.handleAttach), routes.Metrics: http.HandlerFunc(s.handleMetrics), routes.Properties: http.HandlerFunc(s.handleProperties), routes.Property: http.HandlerFunc(s.handleProperty), routes.SetProperty: http.HandlerFunc(s.handleSetProperty), routes.RemoveProperty: http.HandlerFunc(s.handleRemoveProperty), routes.SetGraceTime: http.HandlerFunc(s.handleSetGraceTime), } mux, err := rata.NewRouter(routes.Routes, handlers) if err != nil { logger.Fatal("failed-to-initialize-rata", err) } conLogger := logger.Session("connection") s.server = &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mux.ServeHTTP(w, r) }), ConnState: func(conn net.Conn, state http.ConnState) { switch state { case http.StateNew: conLogger.Debug("open", lager.Data{"local_addr": conn.LocalAddr(), "remote_addr": conn.RemoteAddr()}) s.handling.Add(1) case http.StateActive: s.mu.Lock() delete(s.conns, conn) s.mu.Unlock() case http.StateIdle: select { case <-s.stopping: conn.Close() default: s.mu.Lock() s.conns[conn] = conn s.mu.Unlock() } case http.StateHijacked, http.StateClosed: s.mu.Lock() delete(s.conns, conn) s.mu.Unlock() conLogger.Debug("closed", lager.Data{"local_addr": conn.LocalAddr(), "remote_addr": conn.RemoteAddr()}) s.handling.Done() } }, } return s }