func main() { runtime.GOMAXPROCS(runtime.NumCPU()) cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("file-server") initializeDropsonde(logger) members := grouper.Members{ {"file server", initializeServer(logger)}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("ready") err := <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("auctioneer") initializeDropsonde(logger) if err := validateBBSAddress(); err != nil { logger.Fatal("invalid-bbs-address", err) } consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } port, err := strconv.Atoi(strings.Split(*listenAddr, ":")[1]) if err != nil { logger.Fatal("invalid-port", err) } clock := clock.NewClock() auctioneerServiceClient := auctioneer.NewServiceClient(consulClient, clock) auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout, initializeBBSClient(logger), *startingContainerWeight) auctionServer := initializeAuctionServer(logger, auctionRunner) lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient, port) registrationRunner := initializeRegistrationRunner(logger, consulClient, clock, port) members := grouper.Members{ {"lock-maintainer", lockMaintainer}, {"auction-runner", auctionRunner}, {"auction-server", auctionServer}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func (cmd *WebCommand) Execute(args []string) error { tsa := &tsacmd.TSACommand{ BindIP: cmd.TSA.BindIP, BindPort: cmd.TSA.BindPort, HostKeyPath: cmd.TSA.HostKeyPath, AuthorizedKeysPath: cmd.TSA.AuthorizedKeysPath, HeartbeatInterval: cmd.TSA.HeartbeatInterval, } cmd.populateTSAFlagsFromATCFlags(tsa) atcRunner, err := cmd.ATCCommand.Runner(args) if err != nil { return err } tsaRunner, err := tsa.Runner(args) if err != nil { return err } runner := sigmon.New(grouper.NewParallel(os.Interrupt, grouper.Members{ {"atc", atcRunner}, {"tsa", tsaRunner}, })) return <-ifrit.Invoke(runner).Wait() }
func main() { logger := configureLogger() flag.Parse() validateFlags(logger) router := configureRouter(logger) var server ifrit.Runner server = http_server.New(*listenAddress, router) members := grouper.Members{ {"server", server}, } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err := <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() logger, reconfigurableSink := cf_lager.New("tps-listener") initializeDropsonde(logger) noaaClient := noaa.NewConsumer(*trafficControllerURL, &tls.Config{InsecureSkipVerify: *skipSSLVerification}, nil) defer noaaClient.Close() apiHandler := initializeHandler(logger, noaaClient, *maxInFlightRequests, initializeBBSClient(logger)) members := grouper.Members{ {"api", http_server.New(*listenAddr, apiHandler)}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err := <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func ServeAPI(l logger.Logger, conf *config.Config) { store := connectToStore(l, conf) apiHandler, err := handlers.New(l, store, buildTimeProvider(l)) if err != nil { l.Error("initialize-handler.failed", err) panic(err) } handler := handlers.BasicAuthWrap(apiHandler, conf.APIServerUsername, conf.APIServerPassword) listenAddr := fmt.Sprintf("%s:%d", conf.APIServerAddress, conf.APIServerPort) members := grouper.Members{ {"api", http_server.New(listenAddr, handler)}, } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) l.Info("started") l.Info(listenAddr) err = <-monitor.Wait() if err != nil { l.Error("exited", err) os.Exit(1) } l.Info("exited") os.Exit(0) }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("auctioneer") initializeDropsonde(logger) if err := validateBBSAddress(); err != nil { logger.Fatal("invalid-bbs-address", err) } client, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } sessionMgr := consuladapter.NewSessionManager(client) consulSession, err := consuladapter.NewSession("auctioneer", *lockTTL, client, sessionMgr) if err != nil { logger.Fatal("consul-session-failed", err) } clock := clock.NewClock() bbsServiceClient := bbs.NewServiceClient(consulSession, clock) auctioneerServiceClient := auctioneer.NewServiceClient(consulSession, clock) auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout, initializeBBSClient(logger), bbsServiceClient) auctionServer := initializeAuctionServer(logger, auctionRunner) lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient) members := grouper.Members{ {"lock-maintainer", lockMaintainer}, {"auction-runner", auctionRunner}, {"auction-server", auctionServer}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func (cmd *ATCCommand) Execute(args []string) error { runner, err := cmd.Runner(args) if err != nil { return err } return <-ifrit.Invoke(sigmon.New(runner)).Wait() }
func run(cmd *exec.Cmd) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr runner := sigmon.New(cmdRunner{cmd}) process := ifrit.Invoke(runner) return <-process.Wait() }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New(*sessionName) natsClient := diegonats.NewClient() clock := clock.NewClock() syncer := syncer.NewSyncer(clock, *syncInterval, natsClient, logger) initializeDropsonde(logger) natsClientRunner := diegonats.NewClientRunner(*natsAddresses, *natsUsername, *natsPassword, logger, natsClient) table := initializeRoutingTable() emitter := initializeNatsEmitter(natsClient, logger) watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { return watcher.NewWatcher(initializeBBSClient(logger), clock, table, emitter, syncer.Events(), logger).Run(signals, ready) }) syncRunner := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { return syncer.Run(signals, ready) }) lockMaintainer := initializeLockMaintainer(logger, *consulCluster, *sessionName, *lockTTL, *lockRetryInterval, clock) members := grouper.Members{ {"lock-maintainer", lockMaintainer}, {"nats-client", natsClientRunner}, {"watcher", watcher}, {"syncer", syncRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err := <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { logger := lager.NewLogger("checkin") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) var opts Opts _, err := flags.Parse(&opts) if err != nil { logger.Error("parsing-flags", err) os.Exit(1) } // ts := oauth2.StaticTokenSource( // &oauth2.Token{AccessToken: opts.GitHubAccessToken}, // ) // tc := oauth2.NewClient(oauth2.NoContext, ts) // githubClient := github.NewClient(tc) // checker := build.NewConcourseChecker() // checker = build.NewStatusReporter(checker, githubClient.Repositories) dbConn, err := migration.Open(opts.DBDriver, opts.DBURL, migrations.Migrations) if err != nil { logger.Error("failed-to-run-migrations", err) os.Exit(1) } sqlDB := db.NewSQL(logger.Session("db"), dbConn) enqueuer := build.NewEnqueuer(sqlDB) apiServer := api.NewServer(opts.GitHubSecret, enqueuer) members := []grouper.Member{ { "api", http_server.New( opts.Addr, apiServer, ), }, } group := grouper.NewParallel(os.Interrupt, members) running := ifrit.Invoke(sigmon.New(group)) logger.Info("listening", lager.Data{ "api": opts.Addr, }) err = <-running.Wait() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() logger, reconfigurableSink := cf_lager.New("sshd") serverConfig, err := configure(logger) if err != nil { logger.Error("configure-failed", err) os.Exit(1) } runner := handlers.NewCommandRunner() shellLocator := handlers.NewShellLocator() dialer := &net.Dialer{} sshDaemon := daemon.New( logger, serverConfig, nil, map[string]handlers.NewChannelHandler{ "session": handlers.NewSessionChannelHandler(runner, shellLocator, getDaemonEnvironment(), 15*time.Second), "direct-tcpip": handlers.NewDirectTcpipChannelHandler(dialer), }, ) server := server.NewServer(logger, *address, sshDaemon) members := grouper.Members{ {"sshd", server}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") os.Exit(0) }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("ssh-proxy") initializeDropsonde(logger) proxyConfig, err := configureProxy(logger) if err != nil { logger.Error("configure-failed", err) os.Exit(1) } sshProxy := proxy.New(logger, proxyConfig) server := server.NewServer(logger, *address, sshProxy) consulClient, err := consuladapter.NewClientFromUrl(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, *address, clock.NewClock()) members := grouper.Members{ {"ssh-proxy", server}, {"registration-runner", registrationRunner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{{ "debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink), }}, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") os.Exit(0) }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) lifecycles := flags.LifecycleMap{} flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)") flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("nsync-listener") initializeDropsonde(logger) recipeBuilderConfig := recipebuilder.Config{ Lifecycles: lifecycles, FileServerURL: *fileServerURL, KeyFactory: keys.RSAKeyPairFactory, } recipeBuilders := map[string]recipebuilder.RecipeBuilder{ "buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig), "docker": recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig), } handler := handlers.New(logger, initializeBBSClient(logger), recipeBuilders) members := grouper.Members{ {"server", http_server.New(*listenAddress, handler)}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err := <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Var( &insecureDockerRegistries, "insecureDockerRegistry", "Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)", ) lifecycles := flags.LifecycleMap{} flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)") flag.Parse() logger, reconfigurableSink := cf_lager.New("stager") initializeDropsonde(logger) ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify) backends := initializeBackends(logger, lifecycles) handler := handlers.New(logger, ccClient, initializeBBSClient(logger), backends, clock.NewClock()) members := grouper.Members{ {"server", http_server.New(*listenAddress, handler)}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } logger.Info("starting") group := grouper.NewOrdered(os.Interrupt, members) process := ifrit.Invoke(sigmon.New(group)) logger.Info("Listening for staging requests!") err := <-process.Wait() if err != nil { logger.Fatal("Stager exited with error", err) } logger.Info("stopped") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("ssh-proxy") err := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin) if err != nil { logger.Error("failed-to-initialize-dropsonde", err) } proxyConfig, err := configureProxy(logger) if err != nil { logger.Error("configure-failed", err) os.Exit(1) } sshProxy := proxy.New(logger, proxyConfig) server := server.NewServer(logger, *address, sshProxy) members := grouper.Members{ {"ssh-proxy", server}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{{ "debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink), }}, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") os.Exit(0) }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() logger, reconfigurableSink := cf_lager.New("tps-watcher") initializeDropsonde(logger) lockMaintainer := initializeLockMaintainer(logger) ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify) watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { w, err := watcher.NewWatcher(logger, *eventHandlingWorkers, initializeBBSClient(logger), ccClient) if err != nil { return err } return w.Run(signals, ready) }) members := grouper.Members{ {"lock-maintainer", lockMaintainer}, {"watcher", watcher}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err := <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() uploaderConfig, err := config.NewUploaderConfig(*configPath) if err != nil { panic(err.Error()) } logger, reconfigurableSink := lagerflags.NewFromConfig("cc-uploader", uploaderConfig.LagerConfig) cfhttp.Initialize(communicationTimeout) initializeDropsonde(logger, uploaderConfig) consulClient, err := consuladapter.NewClientFromUrl(uploaderConfig.ConsulCluster) if err != nil { logger.Fatal("new-client-failed", err) } registrationRunner := initializeRegistrationRunner(logger, consulClient, uploaderConfig.ListenAddress, clock.NewClock()) members := grouper.Members{ {"cc-uploader", initializeServer(logger, uploaderConfig)}, {"registration-runner", registrationRunner}, } if uploaderConfig.DebugServerConfig.DebugAddress != "" { members = append(grouper.Members{ {"debug-server", debugserver.Runner(uploaderConfig.DebugServerConfig.DebugAddress, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("ready") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { apiServer := NewGRPCRunner("localhost:50051", func(s *grpc.Server) { airfreight.RegisterAirfreightServer(s, &server{}) }) debugServer := http_server.New( "localhost:6060", debugHandler(), ) members := []grouper.Member{ {"api", apiServer}, {"debug", debugServer}, } runner := sigmon.New(grouper.NewParallel(os.Interrupt, members)) err := <-ifrit.Invoke(runner).Wait() if err != nil { log.Fatalln(err) } }
func main() { logger := lager.NewLogger("cnsim-server") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG)) port := getEnv(logger, "PORT", "9000") listenAddress := getEnv(logger, "LISTEN_ADDRESS", "127.0.0.1") address := fmt.Sprintf("%s:%s", listenAddress, port) logger.Info("listen", lager.Data{"address": address}) routes := rata.Routes{ {Name: "root", Method: "GET", Path: "/"}, {Name: "steady_state", Method: "GET", Path: "/steady_state"}, } rataHandlers := rata.Handlers{ "root": &handlers.Root{ Logger: logger, }, "steady_state": gziphandler.GzipHandler(&handlers.SteadyState{ Logger: logger, Simulator: &simulate.SteadyState{ AppSizeDistribution: &distributions.GeometricWithPositiveSupport{}, }, }), } router, err := rata.NewRouter(routes, rataHandlers) if err != nil { log.Fatalf("unable to create rata Router: %s", err) // not tested } monitor := ifrit.Invoke(sigmon.New(grouper.NewOrdered(os.Interrupt, grouper.Members{ {"http_server", http_server.New(address, router)}, }))) err = <-monitor.Wait() if err != nil { log.Fatalf("ifrit: %s", err) } }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() logger, reconfigurableSink := cf_lager.New("sshd") serverConfig, err := configure(logger) if err != nil { logger.Error("configure-failed", err) os.Exit(1) } sshDaemon := daemon.New(logger, serverConfig, nil, newChannelHandlers()) server := server.NewServer(logger, *address, sshDaemon) members := grouper.Members{ {"sshd", server}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") os.Exit(0) }
func main() { cf_lager.AddFlags(flag.CommandLine) flag.Parse() if *repGuid == "" { panic("need rep-guid") } if *httpAddr == "" { panic("need http addr") } simulationRep := simulationrep.New(*stack, *zone, rep.Resources{ MemoryMB: int32(*memoryMB), DiskMB: int32(*diskMB), Containers: *containers, }) logger, _ := cf_lager.New("repnode-http") fakeLRPStopper := new(fake_lrp_stopper.FakeLRPStopper) fakeExecutorClient := new(executorfakes.FakeClient) fakeEvacuatable := new(fake_evacuation_context.FakeEvacuatable) handlers := rephandlers.New(simulationRep, fakeLRPStopper, fakeExecutorClient, fakeEvacuatable, logger.Session(*repGuid)) router, err := rata.NewRouter(rep.Routes, handlers) if err != nil { log.Fatalln("failed to make router:", err) } httpServer := http_server.New(*httpAddr, router) monitor := ifrit.Invoke(sigmon.New(httpServer)) fmt.Println("rep node listening") err = <-monitor.Wait() if err != nil { println("EXITED WITH ERROR: ", err.Error()) } }
func (cmd *WorkerCommand) Execute(args []string) error { logger := lager.NewLogger("worker") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO)) worker, gardenRunner, err := cmd.gardenRunner(logger.Session("garden"), args) if err != nil { return err } baggageclaimRunner, err := cmd.baggageclaimRunner(logger.Session("baggageclaim")) if err != nil { return err } members := grouper.Members{ { Name: "garden", Runner: gardenRunner, }, { Name: "baggageclaim", Runner: baggageclaimRunner, }, } if cmd.TSA.WorkerPrivateKey != "" { members = append(members, grouper.Member{ Name: "beacon", Runner: cmd.beaconRunner(logger.Session("beacon"), worker), }) } runner := sigmon.New(grouper.NewParallel(os.Interrupt, members)) return <-ifrit.Invoke(runner).Wait() }
func main() { var configFilePath string const configFileFlag = "configFile" cf_lager.AddFlags(flag.CommandLine) flag.StringVar(&configFilePath, configFileFlag, "", "") flag.Parse() conf, err := config.ParseConfigFile(configFilePath) if err != nil { log.Fatalf("parsing config: %s", err) } subnet := conf.LocalSubnet overlay := conf.OverlayNetwork if !overlay.Contains(subnet.IP) { log.Fatalf("overlay network does not contain local subnet") } retriableConnector := db.RetriableConnector{ Connector: db.GetConnectionPool, Sleeper: db.SleeperFunc(time.Sleep), RetryInterval: 3 * time.Second, MaxRetries: 10, } databaseURL := conf.DatabaseURL dbConnectionPool, err := retriableConnector.GetConnectionPool(databaseURL) if err != nil { log.Fatalf("db connect: %s", err) } dataStore, err := store.New(dbConnectionPool) if err != nil { log.Fatalf("failed to construct datastore: %s", err) } logger, reconfigurableSink := cf_lager.New("ducatid") configFactory := &ipam.ConfigFactory{ Config: types.IPConfig{ IP: *subnet, }, } ipAllocator := ipam.New( &ipam.StoreFactory{}, &sync.Mutex{}, configFactory, &sync.Mutex{}, ) rataHandlers := rata.Handlers{} addressManager := &ip.AddressManager{Netlinker: nl.Netlink} routeManager := &ip.RouteManager{Netlinker: nl.Netlink} linkFactory := &links.Factory{Netlinker: nl.Netlink} osThreadLocker := &ossupport.OSLocker{} sandboxNamespaceRepo, err := namespace.NewRepository(logger, conf.SandboxRepoDir, osThreadLocker) if err != nil { log.Fatalf("unable to make repo: %s", err) // not tested } namespaceOpener := &namespace.PathOpener{ Logger: logger, ThreadLocker: osThreadLocker, } subscriber := &subscriber.Subscriber{ Logger: logger.Session("subscriber"), Netlinker: nl.Netlink, } resolver := &watcher.Resolver{ Logger: logger, Store: dataStore, } arpInserter := &neigh.ARPInserter{ Logger: logger, Netlinker: nl.Netlink, } missWatcher := watcher.New( logger, subscriber, &sync.Mutex{}, resolver, arpInserter, ) networkMapper := &network.FixedNetworkMapper{DefaultNetworkID: "default"} reloader := &reloader.Reloader{ Watcher: missWatcher, } sandboxRepo := &sandbox.Repository{ Logger: logger.Session("sandbox-repository"), Locker: &sync.Mutex{}, NamespaceRepo: sandboxNamespaceRepo, Invoker: sandbox.InvokeFunc(ifrit.Invoke), LinkFactory: linkFactory, Watcher: missWatcher, SandboxFactory: sandbox.NewSandboxFunc(sandbox.New), Sandboxes: map[string]sandbox.Sandbox{}, } hostNamespace, err := namespaceOpener.OpenPath("/proc/self/ns/net") if err != nil { log.Fatalf("unable to open host namespace: %s", err) // not tested } commandBuilder := &container.CommandBuilder{ MissWatcher: missWatcher, HostNamespace: hostNamespace, } dnsFactory := &executor.DNSFactory{ Logger: logger, ExternalServer: fmt.Sprintf("%s:%d", conf.ExternalDNSServer, 53), Suffix: conf.Suffix, DucatiAPI: "http://" + conf.ListenAddress, DecoratorFactory: executor.WriterDecoratorFactoryFunc(executor.NamespaceDecoratorFactory), } executor := executor.New( logger, addressManager, routeManager, linkFactory, sandboxNamespaceRepo, sandboxRepo, executor.ListenUDPFunc(net.ListenUDP), dnsFactory, ) creator := &container.Creator{ Executor: executor, SandboxRepo: sandboxRepo, Watcher: missWatcher, CommandBuilder: commandBuilder, DNSAddress: fmt.Sprintf("%s:%d", conf.OverlayDNSAddress, 53), HostIP: conf.HostAddress, NamespaceOpener: namespaceOpener, } deletor := &container.Deletor{ Executor: executor, NamespaceOpener: namespaceOpener, } addController := &cni.AddController{ IPAllocator: ipAllocator, NetworkMapper: networkMapper, Creator: creator, Datastore: dataStore, } delController := &cni.DelController{ Datastore: dataStore, Deletor: deletor, IPAllocator: ipAllocator, NetworkMapper: networkMapper, } marshaler := marshal.MarshalFunc(json.Marshal) unmarshaler := marshal.UnmarshalFunc(json.Unmarshal) rataHandlers["get_container"] = &handlers.GetContainer{ Marshaler: marshaler, Logger: logger, Datastore: dataStore, } rataHandlers["networks_list_containers"] = &handlers.NetworksListContainers{ Marshaler: marshaler, Logger: logger, Datastore: dataStore, } rataHandlers["list_containers"] = &handlers.ListContainers{ Marshaler: marshaler, Logger: logger, Datastore: dataStore, } rataHandlers["cni_add"] = &handlers.CNIAdd{ Logger: logger, Marshaler: marshaler, Unmarshaler: unmarshaler, Controller: addController, } rataHandlers["cni_del"] = &handlers.CNIDel{ Logger: logger, Marshaler: marshaler, Unmarshaler: unmarshaler, Controller: delController, } routes := rata.Routes{ {Name: "get_container", Method: "GET", Path: "/containers/:container_id"}, {Name: "networks_list_containers", Method: "GET", Path: "/networks/:network_id"}, {Name: "list_containers", Method: "GET", Path: "/containers"}, {Name: "cni_add", Method: "POST", Path: "/cni/add"}, {Name: "cni_del", Method: "POST", Path: "/cni/del"}, } rataRouter, err := rata.NewRouter(routes, rataHandlers) if err != nil { log.Fatalf("unable to create rata Router: %s", err) // not tested } err = sandboxRepo.Load(conf.SandboxRepoDir) if err != nil { log.Fatalf("unable to load sandboxRepo: %s", err) } err = sandboxRepo.ForEach(reloader) if err != nil { log.Fatalf("unable to restart monitors: %s", err) } httpServer := http_server.New(conf.ListenAddress, rataRouter) members := grouper.Members{ {"http_server", httpServer}, } if conf.DebugAddress != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(conf.DebugAddress, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) err = <-monitor.Wait() if err != nil { log.Fatalf("daemon terminated: %s", err) } }
func processRunnerFor(servers grouper.Members) ifrit.Runner { return sigmon.New(grouper.NewOrdered(os.Interrupt, servers)) }
func main() { flag.StringVar(&configFile, "c", "", "Configuration File") cf_lager.AddFlags(flag.CommandLine) flag.Parse() c := config.DefaultConfig() logCounter := schema.NewLogCounter() if configFile != "" { c = config.InitConfigFromFile(configFile) } prefix := "gorouter.stdout" if c.Logging.Syslog != "" { prefix = c.Logging.Syslog } logger, reconfigurableSink := cf_lager.New(prefix) InitLoggerFromConfig(logger, c, logCounter) logger.Info("starting") err := dropsonde.Initialize(c.Logging.MetronAddress, c.Logging.JobName) if err != nil { logger.Fatal("dropsonde-initialize-error", err) } // setup number of procs if c.GoMaxProcs != 0 { runtime.GOMAXPROCS(c.GoMaxProcs) } if c.DebugAddr != "" { cf_debug_server.Run(c.DebugAddr, reconfigurableSink) } logger.Info("setting-up-nats-connection") natsClient := connectToNatsServer(logger.Session("nats"), c) logger.Info("Successfully-connected-to-nats") metricsReporter := metrics.NewMetricsReporter() registry := rregistry.NewRouteRegistry(logger.Session("registry"), c, metricsReporter) varz := rvarz.NewVarz(registry) compositeReporter := metrics.NewCompositeReporter(varz, metricsReporter) accessLogger, err := access_log.CreateRunningAccessLogger(logger.Session("access-log"), c) if err != nil { logger.Fatal("error-creating-access-logger", err) } var crypto secure.Crypto var cryptoPrev secure.Crypto if c.RouteServiceEnabled { crypto = createCrypto(logger, c.RouteServiceSecret) if c.RouteServiceSecretPrev != "" { cryptoPrev = createCrypto(logger, c.RouteServiceSecretPrev) } } proxy := buildProxy(logger.Session("proxy"), c, registry, accessLogger, compositeReporter, crypto, cryptoPrev) router, err := router.NewRouter(logger.Session("router"), c, proxy, natsClient, registry, varz, logCounter, nil) if err != nil { logger.Fatal("initialize-router-error", err) } members := grouper.Members{ {"router", router}, } if c.RoutingApiEnabled() { logger.Info("setting-up-routing-api") routeFetcher := setupRouteFetcher(logger.Session("route-fetcher"), c, registry) // check connectivity to routing api err := routeFetcher.FetchRoutes() if err != nil { logger.Fatal("routing-api-connection-failed", err) } members = append(members, grouper.Member{"router-fetcher", routeFetcher}) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group, syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1)) err = <-monitor.Wait() if err != nil { logger.Error("gorouter.exited-with-failure", err) os.Exit(1) } os.Exit(0) }
func (cmd *GuardianCommand) Execute([]string) error { return <-ifrit.Invoke(sigmon.New(cmd)).Wait() }
func main() { flag.Parse() cf_lager.AddFlags(flag.CommandLine) logger, reconfigurableSink := cf_lager.New("routing-api") err := checkFlags() if err != nil { logger.Error("failed to start", err) os.Exit(1) } cfg, err := config.NewConfigFromFile(*configPath) if err != nil { logger.Error("failed to start", err) os.Exit(1) } err = dropsonde.Initialize(cfg.MetronConfig.Address+":"+cfg.MetronConfig.Port, cfg.LogGuid) if err != nil { logger.Error("failed to initialize Dropsonde", err) os.Exit(1) } if cfg.DebugAddress != "" { cf_debug_server.Run(cfg.DebugAddress, reconfigurableSink) } database, err := initializeDatabase(cfg, logger) if err != nil { logger.Error("failed to initialize database", err) os.Exit(1) } err = database.Connect() if err != nil { logger.Error("failed to connect to database", err) os.Exit(1) } defer database.Disconnect() prefix := "routing_api" statsdClient, err := statsd.NewBufferedClient(cfg.StatsdEndpoint, prefix, cfg.StatsdClientFlushInterval, 512) if err != nil { logger.Error("failed to create a statsd client", err) os.Exit(1) } defer statsdClient.Close() stopChan := make(chan struct{}) apiServer := constructApiServer(cfg, database, statsdClient, stopChan, logger) stopper := constructStopper(stopChan) routerRegister := constructRouteRegister(cfg.LogGuid, database, logger) metricsTicker := time.NewTicker(cfg.MetricsReportingInterval) metricsReporter := metrics.NewMetricsReporter(database, statsdClient, metricsTicker) members := grouper.Members{ {"metrics", metricsReporter}, {"api-server", apiServer}, {"conn-stopper", stopper}, {"route-register", routerRegister}, } group := grouper.NewOrdered(os.Interrupt, members) process := ifrit.Invoke(sigmon.New(group)) // This is used by testrunner to signal ready for tests. logger.Info("started", lager.Data{"port": *port}) errChan := process.Wait() err = <-errChan if err != nil { logger.Error("shutdown-error", err) os.Exit(1) } logger.Info("exited") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) etcdFlags := etcdstoreadapter.AddFlags(flag.CommandLine) flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("receptor") logger.Info("starting") initializeDropsonde(logger) etcdOptions, err := etcdFlags.Validate() if err != nil { logger.Fatal("etcd-validation-failed", err) } if err := validateNatsArguments(); err != nil { logger.Error("invalid-nats-flags", err) os.Exit(1) } bbs := initializeReceptorBBS(etcdOptions, logger) hub := event.NewHub() handler := handlers.New(bbs, hub, logger, *username, *password, *corsEnabled) worker, enqueue := task_handler.NewTaskWorkerPool(bbs, logger) taskHandler := task_handler.New(enqueue, logger) lrpChangeWatcher := watcher.NewWatcher( bbs, hub, clock.NewClock(), bbsWatchRetryWaitDuration, logger, ) members := grouper.Members{ {"lrp-change-watcher", lrpChangeWatcher}, {"server", http_server.New(*serverAddress, handler)}, {"worker", worker}, {"task-complete-handler", http_server.New(*taskHandlerAddress, taskHandler)}, {"hub-closer", closeHub(logger.Session("hub-closer"), hub)}, } if *registerWithRouter { registration := initializeServerRegistration(logger) natsClient := diegonats.NewClient() members = append(members, grouper.Member{ Name: "background-heartbeat", Runner: natbeat.NewBackgroundHeartbeat(natsClient, *natsAddresses, *natsUsername, *natsPassword, logger, registration), }) } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") }
func main() { cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) lifecycles := flags.LifecycleMap{} flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)") flag.Parse() cf_http.Initialize(*communicationTimeout) logger, reconfigurableSink := cf_lager.New("nsync-bulker") initializeDropsonde(logger) serviceClient := initializeServiceClient(logger) uuid, err := uuid.NewV4() if err != nil { logger.Fatal("Couldn't generate uuid", err) } lockMaintainer := serviceClient.NewNsyncBulkerLockRunner(logger, uuid.String(), *lockRetryInterval, *lockTTL) recipeBuilderConfig := recipebuilder.Config{ Lifecycles: lifecycles, FileServerURL: *fileServerURL, KeyFactory: keys.RSAKeyPairFactory, } recipeBuilders := map[string]recipebuilder.RecipeBuilder{ "buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig), "docker": recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig), } runner := bulk.NewProcessor( initializeBBSClient(logger), *pollingInterval, *domainTTL, *bulkBatchSize, *updateLRPWorkers, *skipCertVerify, logger, &bulk.CCFetcher{ BaseURI: *ccBaseURL, BatchSize: int(*bulkBatchSize), Username: *ccUsername, Password: *ccPassword, }, recipeBuilders, clock.NewClock(), ) members := grouper.Members{ {"lock-maintainer", lockMaintainer}, {"runner", runner}, } if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" { members = append(grouper.Members{ {"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)}, }, members...) } group := grouper.NewOrdered(os.Interrupt, members) logger.Info("waiting-for-lock") monitor := ifrit.Invoke(sigmon.New(group)) logger.Info("started") err = <-monitor.Wait() if err != nil { logger.Error("exited-with-failure", err) os.Exit(1) } logger.Info("exited") os.Exit(0) }