예제 #1
0
파일: main.go 프로젝트: cfibmers/auctioneer
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("auctioneer")
	initializeDropsonde(logger)

	if err := validateBBSAddress(); err != nil {
		logger.Fatal("invalid-bbs-address", err)
	}

	consulClient, err := consuladapter.NewClientFromUrl(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	port, err := strconv.Atoi(strings.Split(*listenAddr, ":")[1])
	if err != nil {
		logger.Fatal("invalid-port", err)
	}

	clock := clock.NewClock()
	auctioneerServiceClient := auctioneer.NewServiceClient(consulClient, clock)

	auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout,
		initializeBBSClient(logger), *startingContainerWeight)
	auctionServer := initializeAuctionServer(logger, auctionRunner)
	lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient, port)
	registrationRunner := initializeRegistrationRunner(logger, consulClient, clock, port)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"auction-runner", auctionRunner},
		{"auction-server", auctionServer},
		{"registration-runner", registrationRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #2
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("file-server")

	initializeDropsonde(logger)

	members := grouper.Members{
		{"file server", initializeServer(logger)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))
	logger.Info("ready")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #3
0
파일: serve_api.go 프로젝트: cgrotz/hm9000
func ServeAPI(l logger.Logger, conf *config.Config) {
	store := connectToStore(l, conf)

	apiHandler, err := handlers.New(l, store, buildTimeProvider(l))
	if err != nil {
		l.Error("initialize-handler.failed", err)
		panic(err)
	}
	handler := handlers.BasicAuthWrap(apiHandler, conf.APIServerUsername, conf.APIServerPassword)

	listenAddr := fmt.Sprintf("%s:%d", conf.APIServerAddress, conf.APIServerPort)

	members := grouper.Members{
		{"api", http_server.New(listenAddr, handler)},
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	l.Info("started")
	l.Info(listenAddr)

	err = <-monitor.Wait()
	if err != nil {
		l.Error("exited", err)
		os.Exit(1)
	}

	l.Info("exited")
	os.Exit(0)
}
예제 #4
0
파일: main.go 프로젝트: migdi/delphos-api
func main() {
	logger := configureLogger()
	flag.Parse()

	validateFlags(logger)

	router := configureRouter(logger)

	var server ifrit.Runner
	server = http_server.New(*listenAddress, router)
	members := grouper.Members{
		{"server", server},
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))
	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #5
0
파일: main.go 프로젝트: emc-xchallenge/tps
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("tps-listener")
	initializeDropsonde(logger)
	noaaClient := noaa.NewConsumer(*trafficControllerURL, &tls.Config{InsecureSkipVerify: *skipSSLVerification}, nil)
	defer noaaClient.Close()
	apiHandler := initializeHandler(logger, noaaClient, *maxInFlightRequests, initializeBBSClient(logger))

	members := grouper.Members{
		{"api", http_server.New(*listenAddr, apiHandler)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #6
0
func StartGarden(gardenBin, containerizerBin string, argv ...string) (ifrit.Process, garden.Client) {
	gardenPort, err := localip.LocalPort()
	Expect(err).NotTo(HaveOccurred())
	gardenAddr := fmt.Sprintf("127.0.0.1:%d", gardenPort)

	tmpDir := os.TempDir()

	// If below fails, try
	// netsh advfirewall firewall add rule name="Open Port 48080"  dir=in action=allow protocol=TCP localport=48080

	containerizerPort, err := localip.LocalPort()
	Expect(err).NotTo(HaveOccurred())
	gardenRunner := garden_runner.New("tcp4", gardenAddr, tmpDir, gardenBin, fmt.Sprintf("http://127.0.0.1:%d", containerizerPort))
	containerizerRunner := ginkgomon.New(ginkgomon.Config{
		Name:              "containerizer",
		Command:           exec.Command(containerizerBin, "127.0.0.1", strconv.Itoa(int(containerizerPort))),
		AnsiColorCode:     "",
		StartCheck:        "Control-C to quit.",
		StartCheckTimeout: 10 * time.Second,
		Cleanup:           func() {},
	})

	group := grouper.NewOrdered(syscall.SIGTERM, []grouper.Member{
		{Name: "containerizer", Runner: containerizerRunner},
		{Name: "garden", Runner: gardenRunner},
	})

	gardenProcess := ifrit.Invoke(group)

	return gardenProcess, gardenRunner.NewClient()
}
예제 #7
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("auctioneer")
	initializeDropsonde(logger)

	if err := validateBBSAddress(); err != nil {
		logger.Fatal("invalid-bbs-address", err)
	}

	client, err := consuladapter.NewClient(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	sessionMgr := consuladapter.NewSessionManager(client)
	consulSession, err := consuladapter.NewSession("auctioneer", *lockTTL, client, sessionMgr)
	if err != nil {
		logger.Fatal("consul-session-failed", err)
	}

	clock := clock.NewClock()
	bbsServiceClient := bbs.NewServiceClient(consulSession, clock)
	auctioneerServiceClient := auctioneer.NewServiceClient(consulSession, clock)

	auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout, initializeBBSClient(logger), bbsServiceClient)
	auctionServer := initializeAuctionServer(logger, auctionRunner)
	lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"auction-runner", auctionRunner},
		{"auction-server", auctionServer},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #8
0
func newNatsGroup() ifrit.Runner {
	natsServerRunner = gnatsdrunner.NewGnatsdTestRunner(natsPort)
	natsClientRunner = diegonats.NewClientRunner(natsAddress, "", "", logger, natsClient)
	return grouper.NewOrdered(os.Kill, grouper.Members{
		{"natsServer", natsServerRunner},
		{"natsClient", natsClientRunner},
	})
}
예제 #9
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New(*sessionName)
	natsClient := diegonats.NewClient()
	clock := clock.NewClock()
	syncer := syncer.NewSyncer(clock, *syncInterval, natsClient, logger)

	initializeDropsonde(logger)

	natsClientRunner := diegonats.NewClientRunner(*natsAddresses, *natsUsername, *natsPassword, logger, natsClient)

	table := initializeRoutingTable()
	emitter := initializeNatsEmitter(natsClient, logger)
	watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return watcher.NewWatcher(initializeBBSClient(logger), clock, table, emitter, syncer.Events(), logger).Run(signals, ready)
	})

	syncRunner := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return syncer.Run(signals, ready)
	})

	lockMaintainer := initializeLockMaintainer(logger, *consulCluster, *sessionName, *lockTTL, *lockRetryInterval, clock)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"nats-client", natsClientRunner},
		{"watcher", watcher},
		{"syncer", syncRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #10
0
func NewServer(logger lager.Logger, config DriverConfig) (volman.Manager, ifrit.Runner) {
	clock := clock.NewClock()
	registry := NewDriverRegistry()

	syncer := NewDriverSyncer(logger, registry, config.DriverPaths, config.SyncInterval, clock)
	purger := NewMountPurger(logger, registry)

	grouper := grouper.NewOrdered(os.Kill, grouper.Members{grouper.Member{"volman-syncer", syncer.Runner()}, grouper.Member{"volman-purger", purger.Runner()}})

	return NewLocalClient(logger, registry, clock), grouper
}
예제 #11
0
파일: main.go 프로젝트: sykesm/diego-ssh
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("sshd")

	serverConfig, err := configure(logger)
	if err != nil {
		logger.Error("configure-failed", err)
		os.Exit(1)
	}

	runner := handlers.NewCommandRunner()
	shellLocator := handlers.NewShellLocator()
	dialer := &net.Dialer{}

	sshDaemon := daemon.New(
		logger,
		serverConfig,
		nil,
		map[string]handlers.NewChannelHandler{
			"session":      handlers.NewSessionChannelHandler(runner, shellLocator, getDaemonEnvironment(), 15*time.Second),
			"direct-tcpip": handlers.NewDirectTcpipChannelHandler(dialer),
		},
	)
	server := server.NewServer(logger, *address, sshDaemon)

	members := grouper.Members{
		{"sshd", server},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
예제 #12
0
파일: main.go 프로젝트: swisscom/diego-ssh
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("ssh-proxy")

	initializeDropsonde(logger)

	proxyConfig, err := configureProxy(logger)
	if err != nil {
		logger.Error("configure-failed", err)
		os.Exit(1)
	}

	sshProxy := proxy.New(logger, proxyConfig)
	server := server.NewServer(logger, *address, sshProxy)

	consulClient, err := consuladapter.NewClientFromUrl(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	registrationRunner := initializeRegistrationRunner(logger, consulClient, *address, clock.NewClock())

	members := grouper.Members{
		{"ssh-proxy", server},
		{"registration-runner", registrationRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{{
			"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink),
		}}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
예제 #13
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)

	lifecycles := flags.LifecycleMap{}
	flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)")
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)
	logger, reconfigurableSink := cf_lager.New("nsync-listener")

	initializeDropsonde(logger)

	recipeBuilderConfig := recipebuilder.Config{
		Lifecycles:    lifecycles,
		FileServerURL: *fileServerURL,
		KeyFactory:    keys.RSAKeyPairFactory,
	}
	recipeBuilders := map[string]recipebuilder.RecipeBuilder{
		"buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig),
		"docker":    recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig),
	}

	handler := handlers.New(logger, initializeBBSClient(logger), recipeBuilders)

	members := grouper.Members{
		{"server", http_server.New(*listenAddress, handler)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #14
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)

	flag.Var(
		&insecureDockerRegistries,
		"insecureDockerRegistry",
		"Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)",
	)

	lifecycles := flags.LifecycleMap{}
	flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)")
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("stager")
	initializeDropsonde(logger)

	ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify)

	backends := initializeBackends(logger, lifecycles)

	handler := handlers.New(logger, ccClient, initializeBBSClient(logger), backends, clock.NewClock())

	members := grouper.Members{
		{"server", http_server.New(*listenAddress, handler)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	logger.Info("starting")

	group := grouper.NewOrdered(os.Interrupt, members)

	process := ifrit.Invoke(sigmon.New(group))

	logger.Info("Listening for staging requests!")

	err := <-process.Wait()
	if err != nil {
		logger.Fatal("Stager exited with error", err)
	}

	logger.Info("stopped")
}
예제 #15
0
파일: main.go 프로젝트: krishicks/diego-ssh
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("ssh-proxy")

	err := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)
	if err != nil {
		logger.Error("failed-to-initialize-dropsonde", err)
	}

	proxyConfig, err := configureProxy(logger)
	if err != nil {
		logger.Error("configure-failed", err)
		os.Exit(1)
	}

	sshProxy := proxy.New(logger, proxyConfig)
	server := server.NewServer(logger, *address, sshProxy)

	members := grouper.Members{
		{"ssh-proxy", server},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{{
			"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink),
		}}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
예제 #16
0
파일: main.go 프로젝트: emc-xchallenge/tps
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("tps-watcher")
	initializeDropsonde(logger)

	lockMaintainer := initializeLockMaintainer(logger)

	ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify)

	watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		w, err := watcher.NewWatcher(logger, *eventHandlingWorkers, initializeBBSClient(logger), ccClient)
		if err != nil {
			return err
		}

		return w.Run(signals, ready)
	})

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"watcher", watcher},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #17
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	flag.Parse()

	uploaderConfig, err := config.NewUploaderConfig(*configPath)
	if err != nil {
		panic(err.Error())
	}

	logger, reconfigurableSink := lagerflags.NewFromConfig("cc-uploader", uploaderConfig.LagerConfig)

	cfhttp.Initialize(communicationTimeout)

	initializeDropsonde(logger, uploaderConfig)
	consulClient, err := consuladapter.NewClientFromUrl(uploaderConfig.ConsulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	registrationRunner := initializeRegistrationRunner(logger, consulClient, uploaderConfig.ListenAddress, clock.NewClock())

	members := grouper.Members{
		{"cc-uploader", initializeServer(logger, uploaderConfig)},
		{"registration-runner", registrationRunner},
	}

	if uploaderConfig.DebugServerConfig.DebugAddress != "" {
		members = append(grouper.Members{
			{"debug-server", debugserver.Runner(uploaderConfig.DebugServerConfig.DebugAddress, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))
	logger.Info("ready")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #18
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("sshd")

	serverConfig, err := configure(logger)
	if err != nil {
		logger.Error("configure-failed", err)
		os.Exit(1)
	}

	sshDaemon := daemon.New(logger, serverConfig, nil, newChannelHandlers())
	server := server.NewServer(logger, *address, sshDaemon)

	members := grouper.Members{
		{"sshd", server},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
예제 #19
0
파일: main.go 프로젝트: rosenhouse/cnsim
func main() {
	logger := lager.NewLogger("cnsim-server")
	logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))

	port := getEnv(logger, "PORT", "9000")
	listenAddress := getEnv(logger, "LISTEN_ADDRESS", "127.0.0.1")

	address := fmt.Sprintf("%s:%s", listenAddress, port)
	logger.Info("listen", lager.Data{"address": address})

	routes := rata.Routes{
		{Name: "root", Method: "GET", Path: "/"},
		{Name: "steady_state", Method: "GET", Path: "/steady_state"},
	}

	rataHandlers := rata.Handlers{
		"root": &handlers.Root{
			Logger: logger,
		},
		"steady_state": gziphandler.GzipHandler(&handlers.SteadyState{
			Logger: logger,
			Simulator: &simulate.SteadyState{
				AppSizeDistribution: &distributions.GeometricWithPositiveSupport{},
			},
		}),
	}

	router, err := rata.NewRouter(routes, rataHandlers)
	if err != nil {
		log.Fatalf("unable to create rata Router: %s", err) // not tested
	}

	monitor := ifrit.Invoke(sigmon.New(grouper.NewOrdered(os.Interrupt, grouper.Members{
		{"http_server", http_server.New(address, router)},
	})))
	err = <-monitor.Wait()
	if err != nil {
		log.Fatalf("ifrit: %s", err)
	}
}
예제 #20
0
func main() {
	flag.Parse()
	cf_lager.AddFlags(flag.CommandLine)
	logger, reconfigurableSink := cf_lager.New("routing-api")

	err := checkFlags()
	if err != nil {
		logger.Error("failed to start", err)
		os.Exit(1)
	}

	cfg, err := config.NewConfigFromFile(*configPath)
	if err != nil {
		logger.Error("failed to start", err)
		os.Exit(1)
	}

	err = dropsonde.Initialize(cfg.MetronConfig.Address+":"+cfg.MetronConfig.Port, cfg.LogGuid)
	if err != nil {
		logger.Error("failed to initialize Dropsonde", err)
		os.Exit(1)
	}

	if cfg.DebugAddress != "" {
		cf_debug_server.Run(cfg.DebugAddress, reconfigurableSink)
	}

	database, err := initializeDatabase(cfg, logger)
	if err != nil {
		logger.Error("failed to initialize database", err)
		os.Exit(1)
	}

	err = database.Connect()
	if err != nil {
		logger.Error("failed to connect to database", err)
		os.Exit(1)
	}
	defer database.Disconnect()

	prefix := "routing_api"
	statsdClient, err := statsd.NewBufferedClient(cfg.StatsdEndpoint, prefix, cfg.StatsdClientFlushInterval, 512)
	if err != nil {
		logger.Error("failed to create a statsd client", err)
		os.Exit(1)
	}
	defer statsdClient.Close()

	stopChan := make(chan struct{})
	apiServer := constructApiServer(cfg, database, statsdClient, stopChan, logger)
	stopper := constructStopper(stopChan)

	routerRegister := constructRouteRegister(cfg.LogGuid, database, logger)

	metricsTicker := time.NewTicker(cfg.MetricsReportingInterval)
	metricsReporter := metrics.NewMetricsReporter(database, statsdClient, metricsTicker)

	members := grouper.Members{
		{"metrics", metricsReporter},
		{"api-server", apiServer},
		{"conn-stopper", stopper},
		{"route-register", routerRegister},
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	process := ifrit.Invoke(sigmon.New(group))

	// This is used by testrunner to signal ready for tests.
	logger.Info("started", lager.Data{"port": *port})

	errChan := process.Wait()
	err = <-errChan
	if err != nil {
		logger.Error("shutdown-error", err)
		os.Exit(1)
	}
	logger.Info("exited")
}
예제 #21
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	etcdFlags := etcdstoreadapter.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("receptor")
	logger.Info("starting")

	initializeDropsonde(logger)

	etcdOptions, err := etcdFlags.Validate()
	if err != nil {
		logger.Fatal("etcd-validation-failed", err)
	}

	if err := validateNatsArguments(); err != nil {
		logger.Error("invalid-nats-flags", err)
		os.Exit(1)
	}

	bbs := initializeReceptorBBS(etcdOptions, logger)
	hub := event.NewHub()

	handler := handlers.New(bbs, hub, logger, *username, *password, *corsEnabled)

	worker, enqueue := task_handler.NewTaskWorkerPool(bbs, logger)
	taskHandler := task_handler.New(enqueue, logger)
	lrpChangeWatcher := watcher.NewWatcher(
		bbs,
		hub,
		clock.NewClock(),
		bbsWatchRetryWaitDuration,
		logger,
	)

	members := grouper.Members{
		{"lrp-change-watcher", lrpChangeWatcher},
		{"server", http_server.New(*serverAddress, handler)},
		{"worker", worker},
		{"task-complete-handler", http_server.New(*taskHandlerAddress, taskHandler)},
		{"hub-closer", closeHub(logger.Session("hub-closer"), hub)},
	}

	if *registerWithRouter {
		registration := initializeServerRegistration(logger)
		natsClient := diegonats.NewClient()
		members = append(members, grouper.Member{
			Name:   "background-heartbeat",
			Runner: natbeat.NewBackgroundHeartbeat(natsClient, *natsAddresses, *natsUsername, *natsPassword, logger, registration),
		})
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #22
0
파일: main.go 프로젝트: cf-routing/nsync
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)

	lifecycles := flags.LifecycleMap{}
	flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)")
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("nsync-bulker")
	initializeDropsonde(logger)

	serviceClient := initializeServiceClient(logger)
	uuid, err := uuid.NewV4()
	if err != nil {
		logger.Fatal("Couldn't generate uuid", err)
	}
	lockMaintainer := serviceClient.NewNsyncBulkerLockRunner(logger, uuid.String(), *lockRetryInterval, *lockTTL)

	recipeBuilderConfig := recipebuilder.Config{
		Lifecycles:    lifecycles,
		FileServerURL: *fileServerURL,
		KeyFactory:    keys.RSAKeyPairFactory,
	}
	recipeBuilders := map[string]recipebuilder.RecipeBuilder{
		"buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig),
		"docker":    recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig),
	}

	runner := bulk.NewProcessor(
		initializeBBSClient(logger),
		*pollingInterval,
		*domainTTL,
		*bulkBatchSize,
		*updateLRPWorkers,
		*skipCertVerify,
		logger,
		&bulk.CCFetcher{
			BaseURI:   *ccBaseURL,
			BatchSize: int(*bulkBatchSize),
			Username:  *ccUsername,
			Password:  *ccPassword,
		},
		recipeBuilders,
		clock.NewClock(),
	)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"runner", runner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	logger.Info("waiting-for-lock")

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
예제 #23
0
파일: main.go 프로젝트: cf-routing/nsync
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)

	lifecycles := flags.LifecycleMap{}
	flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)")
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)
	logger, reconfigurableSink := cf_lager.New("nsync-listener")

	initializeDropsonde(logger)

	recipeBuilderConfig := recipebuilder.Config{
		Lifecycles:    lifecycles,
		FileServerURL: *fileServerURL,
		KeyFactory:    keys.RSAKeyPairFactory,
	}
	recipeBuilders := map[string]recipebuilder.RecipeBuilder{
		"buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig),
		"docker":    recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig),
	}

	handler := handlers.New(logger, initializeBBSClient(logger), recipeBuilders)

	consulClient, err := consuladapter.NewClientFromUrl(*consulCluster)
	if err != nil {
		logger.Fatal("new-consul-client-failed", err)
	}

	_, portString, err := net.SplitHostPort(*listenAddress)
	if err != nil {
		logger.Fatal("failed-invalid-listen-address", err)
	}
	portNum, err := net.LookupPort("tcp", portString)
	if err != nil {
		logger.Fatal("failed-invalid-listen-port", err)
	}

	clock := clock.NewClock()
	registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock)

	members := grouper.Members{
		{"server", http_server.New(*listenAddress, handler)},
		{"registration-runner", registrationRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
예제 #24
0
파일: main.go 프로젝트: idouba/gorouter
func main() {
	flag.StringVar(&configFile, "c", "", "Configuration File")
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	c := config.DefaultConfig()
	logCounter := vcap.NewLogCounter()

	if configFile != "" {
		c = config.InitConfigFromFile(configFile)
	}

	InitLoggerFromConfig(c, logCounter)
	logger := steno.NewLogger("router.main")

	err := dropsonde.Initialize(c.Logging.MetronAddress, c.Logging.JobName)
	if err != nil {
		logger.Errorf("Dropsonde failed to initialize: %s", err.Error())
		os.Exit(1)
	}

	// setup number of procs
	if c.GoMaxProcs != 0 {
		runtime.GOMAXPROCS(c.GoMaxProcs)
	}

	if c.DebugAddr != "" {
		cf_debug_server.Run(c.DebugAddr)
	}

	logger.Info("Setting up NATs connection")
	natsClient := connectToNatsServer(logger, c)

	metricsReporter := metrics.NewMetricsReporter()
	registry := rregistry.NewRouteRegistry(c, natsClient, metricsReporter)

	varz := rvarz.NewVarz(registry)
	compositeReporter := metrics.NewCompositeReporter(varz, metricsReporter)

	accessLogger, err := access_log.CreateRunningAccessLogger(c)
	if err != nil {
		logger.Fatalf("Error creating access logger: %s\n", err)
	}

	var crypto secure.Crypto
	var cryptoPrev secure.Crypto
	if c.RouteServiceEnabled {
		crypto = createCrypto(logger, c.RouteServiceSecret)
		if c.RouteServiceSecretPrev != "" {
			cryptoPrev = createCrypto(logger, c.RouteServiceSecretPrev)
		}
	}

	proxy := buildProxy(c, registry, accessLogger, compositeReporter, crypto, cryptoPrev)

	router, err := router.NewRouter(c, proxy, natsClient, registry, varz, logCounter, nil)
	if err != nil {
		logger.Errorf("An error occurred: %s", err.Error())
		os.Exit(1)
	}

	members := grouper.Members{
		{"router", router},
	}
	if c.RoutingApiEnabled() {
		logger.Info("Setting up route fetcher")
		routeFetcher := setupRouteFetcher(logger, c, registry)
		members = append(members, grouper.Member{"router-fetcher", routeFetcher})
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group, syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1))

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("gorouter.exited-with-failure")
		os.Exit(1)
	}

	os.Exit(0)
}
예제 #25
0
func processRunnerFor(servers grouper.Members) ifrit.Runner {
	return sigmon.New(grouper.NewOrdered(os.Interrupt, servers))
}
예제 #26
0
func main() {
	var configFilePath string
	const configFileFlag = "configFile"

	cf_lager.AddFlags(flag.CommandLine)
	flag.StringVar(&configFilePath, configFileFlag, "", "")
	flag.Parse()

	conf, err := config.ParseConfigFile(configFilePath)
	if err != nil {
		log.Fatalf("parsing config: %s", err)
	}

	subnet := conf.LocalSubnet
	overlay := conf.OverlayNetwork

	if !overlay.Contains(subnet.IP) {
		log.Fatalf("overlay network does not contain local subnet")
	}

	retriableConnector := db.RetriableConnector{
		Connector:     db.GetConnectionPool,
		Sleeper:       db.SleeperFunc(time.Sleep),
		RetryInterval: 3 * time.Second,
		MaxRetries:    10,
	}

	databaseURL := conf.DatabaseURL
	dbConnectionPool, err := retriableConnector.GetConnectionPool(databaseURL)
	if err != nil {
		log.Fatalf("db connect: %s", err)
	}

	dataStore, err := store.New(dbConnectionPool)
	if err != nil {
		log.Fatalf("failed to construct datastore: %s", err)
	}

	logger, reconfigurableSink := cf_lager.New("ducatid")

	configFactory := &ipam.ConfigFactory{
		Config: types.IPConfig{
			IP: *subnet,
		},
	}

	ipAllocator := ipam.New(
		&ipam.StoreFactory{},
		&sync.Mutex{},
		configFactory,
		&sync.Mutex{},
	)

	rataHandlers := rata.Handlers{}

	addressManager := &ip.AddressManager{Netlinker: nl.Netlink}
	routeManager := &ip.RouteManager{Netlinker: nl.Netlink}
	linkFactory := &links.Factory{Netlinker: nl.Netlink}
	osThreadLocker := &ossupport.OSLocker{}

	sandboxNamespaceRepo, err := namespace.NewRepository(logger, conf.SandboxRepoDir, osThreadLocker)
	if err != nil {
		log.Fatalf("unable to make repo: %s", err) // not tested
	}

	namespaceOpener := &namespace.PathOpener{
		Logger:       logger,
		ThreadLocker: osThreadLocker,
	}

	subscriber := &subscriber.Subscriber{
		Logger:    logger.Session("subscriber"),
		Netlinker: nl.Netlink,
	}
	resolver := &watcher.Resolver{
		Logger: logger,
		Store:  dataStore,
	}
	arpInserter := &neigh.ARPInserter{
		Logger:    logger,
		Netlinker: nl.Netlink,
	}
	missWatcher := watcher.New(
		logger,
		subscriber,
		&sync.Mutex{},
		resolver,
		arpInserter,
	)
	networkMapper := &network.FixedNetworkMapper{DefaultNetworkID: "default"}

	reloader := &reloader.Reloader{
		Watcher: missWatcher,
	}

	sandboxRepo := &sandbox.Repository{
		Logger:         logger.Session("sandbox-repository"),
		Locker:         &sync.Mutex{},
		NamespaceRepo:  sandboxNamespaceRepo,
		Invoker:        sandbox.InvokeFunc(ifrit.Invoke),
		LinkFactory:    linkFactory,
		Watcher:        missWatcher,
		SandboxFactory: sandbox.NewSandboxFunc(sandbox.New),
		Sandboxes:      map[string]sandbox.Sandbox{},
	}

	hostNamespace, err := namespaceOpener.OpenPath("/proc/self/ns/net")
	if err != nil {
		log.Fatalf("unable to open host namespace: %s", err) // not tested
	}
	commandBuilder := &container.CommandBuilder{
		MissWatcher:   missWatcher,
		HostNamespace: hostNamespace,
	}
	dnsFactory := &executor.DNSFactory{
		Logger:           logger,
		ExternalServer:   fmt.Sprintf("%s:%d", conf.ExternalDNSServer, 53),
		Suffix:           conf.Suffix,
		DucatiAPI:        "http://" + conf.ListenAddress,
		DecoratorFactory: executor.WriterDecoratorFactoryFunc(executor.NamespaceDecoratorFactory),
	}
	executor := executor.New(
		logger,
		addressManager,
		routeManager,
		linkFactory,
		sandboxNamespaceRepo,
		sandboxRepo,
		executor.ListenUDPFunc(net.ListenUDP),
		dnsFactory,
	)
	creator := &container.Creator{
		Executor:        executor,
		SandboxRepo:     sandboxRepo,
		Watcher:         missWatcher,
		CommandBuilder:  commandBuilder,
		DNSAddress:      fmt.Sprintf("%s:%d", conf.OverlayDNSAddress, 53),
		HostIP:          conf.HostAddress,
		NamespaceOpener: namespaceOpener,
	}
	deletor := &container.Deletor{
		Executor:        executor,
		NamespaceOpener: namespaceOpener,
	}

	addController := &cni.AddController{
		IPAllocator:   ipAllocator,
		NetworkMapper: networkMapper,
		Creator:       creator,
		Datastore:     dataStore,
	}

	delController := &cni.DelController{
		Datastore:     dataStore,
		Deletor:       deletor,
		IPAllocator:   ipAllocator,
		NetworkMapper: networkMapper,
	}

	marshaler := marshal.MarshalFunc(json.Marshal)
	unmarshaler := marshal.UnmarshalFunc(json.Unmarshal)

	rataHandlers["get_container"] = &handlers.GetContainer{
		Marshaler: marshaler,
		Logger:    logger,
		Datastore: dataStore,
	}

	rataHandlers["networks_list_containers"] = &handlers.NetworksListContainers{
		Marshaler: marshaler,
		Logger:    logger,
		Datastore: dataStore,
	}

	rataHandlers["list_containers"] = &handlers.ListContainers{
		Marshaler: marshaler,
		Logger:    logger,
		Datastore: dataStore,
	}

	rataHandlers["cni_add"] = &handlers.CNIAdd{
		Logger:      logger,
		Marshaler:   marshaler,
		Unmarshaler: unmarshaler,
		Controller:  addController,
	}

	rataHandlers["cni_del"] = &handlers.CNIDel{
		Logger:      logger,
		Marshaler:   marshaler,
		Unmarshaler: unmarshaler,
		Controller:  delController,
	}

	routes := rata.Routes{
		{Name: "get_container", Method: "GET", Path: "/containers/:container_id"},
		{Name: "networks_list_containers", Method: "GET", Path: "/networks/:network_id"},
		{Name: "list_containers", Method: "GET", Path: "/containers"},
		{Name: "cni_add", Method: "POST", Path: "/cni/add"},
		{Name: "cni_del", Method: "POST", Path: "/cni/del"},
	}

	rataRouter, err := rata.NewRouter(routes, rataHandlers)
	if err != nil {
		log.Fatalf("unable to create rata Router: %s", err) // not tested
	}

	err = sandboxRepo.Load(conf.SandboxRepoDir)
	if err != nil {
		log.Fatalf("unable to load sandboxRepo: %s", err)
	}

	err = sandboxRepo.ForEach(reloader)
	if err != nil {
		log.Fatalf("unable to restart monitors: %s", err)
	}

	httpServer := http_server.New(conf.ListenAddress, rataRouter)

	members := grouper.Members{
		{"http_server", httpServer},
	}

	if conf.DebugAddress != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(conf.DebugAddress, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	err = <-monitor.Wait()
	if err != nil {
		log.Fatalf("daemon terminated: %s", err)
	}
}
예제 #27
0
파일: main.go 프로젝트: cloudfoundry/bbs
func main() {
	debugserver.AddFlags(flag.CommandLine)
	lagerflags.AddFlags(flag.CommandLine)
	etcdFlags := AddETCDFlags(flag.CommandLine)
	encryptionFlags := encryption.AddEncryptionFlags(flag.CommandLine)

	flag.Parse()

	cfhttp.Initialize(*communicationTimeout)

	logger, reconfigurableSink := lagerflags.New("bbs")
	logger.Info("starting")

	initializeDropsonde(logger)

	clock := clock.NewClock()

	consulClient, err := consuladapter.NewClientFromUrl(*consulCluster)
	if err != nil {
		logger.Fatal("new-consul-client-failed", err)
	}

	serviceClient := bbs.NewServiceClient(consulClient, clock)

	maintainer := initializeLockMaintainer(logger, serviceClient)

	_, portString, err := net.SplitHostPort(*listenAddress)
	if err != nil {
		logger.Fatal("failed-invalid-listen-address", err)
	}
	portNum, err := net.LookupPort("tcp", portString)
	if err != nil {
		logger.Fatal("failed-invalid-listen-port", err)
	}

	_, portString, err = net.SplitHostPort(*healthAddress)
	if err != nil {
		logger.Fatal("failed-invalid-health-address", err)
	}
	_, err = net.LookupPort("tcp", portString)
	if err != nil {
		logger.Fatal("failed-invalid-health-port", err)
	}

	registrationRunner := initializeRegistrationRunner(logger, consulClient, portNum, clock)

	var activeDB db.DB
	var sqlDB *sqldb.SQLDB
	var sqlConn *sql.DB
	var storeClient etcddb.StoreClient
	var etcdDB *etcddb.ETCDDB

	key, keys, err := encryptionFlags.Parse()
	if err != nil {
		logger.Fatal("cannot-setup-encryption", err)
	}
	keyManager, err := encryption.NewKeyManager(key, keys)
	if err != nil {
		logger.Fatal("cannot-setup-encryption", err)
	}
	cryptor := encryption.NewCryptor(keyManager, rand.Reader)

	etcdOptions, err := etcdFlags.Validate()
	if err != nil {
		logger.Fatal("etcd-validation-failed", err)
	}

	if etcdOptions.IsConfigured {
		storeClient = initializeEtcdStoreClient(logger, etcdOptions)
		etcdDB = initializeEtcdDB(logger, cryptor, storeClient, serviceClient, *desiredLRPCreationTimeout)
		activeDB = etcdDB
	}

	// If SQL database info is passed in, use SQL instead of ETCD
	if *databaseDriver != "" && *databaseConnectionString != "" {
		var err error
		connectionString := appendSSLConnectionStringParam(logger, *databaseDriver, *databaseConnectionString, *sqlCACertFile)

		sqlConn, err = sql.Open(*databaseDriver, connectionString)
		if err != nil {
			logger.Fatal("failed-to-open-sql", err)
		}
		defer sqlConn.Close()
		sqlConn.SetMaxOpenConns(*maxDatabaseConnections)
		sqlConn.SetMaxIdleConns(*maxDatabaseConnections)

		err = sqlConn.Ping()
		if err != nil {
			logger.Fatal("sql-failed-to-connect", err)
		}

		sqlDB = sqldb.NewSQLDB(sqlConn, *convergenceWorkers, *updateWorkers, format.ENCRYPTED_PROTO, cryptor, guidprovider.DefaultGuidProvider, clock, *databaseDriver)
		err = sqlDB.SetIsolationLevel(logger, sqldb.IsolationLevelReadCommitted)
		if err != nil {
			logger.Fatal("sql-failed-to-set-isolation-level", err)
		}

		err = sqlDB.CreateConfigurationsTable(logger)
		if err != nil {
			logger.Fatal("sql-failed-create-configurations-table", err)
		}
		activeDB = sqlDB
	}

	if activeDB == nil {
		logger.Fatal("no-database-configured", errors.New("no database configured"))
	}

	encryptor := encryptor.New(logger, activeDB, keyManager, cryptor, clock)

	migrationsDone := make(chan struct{})

	migrationManager := migration.NewManager(
		logger,
		etcdDB,
		storeClient,
		sqlDB,
		sqlConn,
		cryptor,
		migrations.Migrations,
		migrationsDone,
		clock,
		*databaseDriver,
	)

	desiredHub := events.NewHub()
	actualHub := events.NewHub()

	repTLSConfig := &rep.TLSConfig{
		RequireTLS:      *repRequireTLS,
		CaCertFile:      *repCACert,
		CertFile:        *repClientCert,
		KeyFile:         *repClientKey,
		ClientCacheSize: *repClientSessionCacheSize,
	}

	httpClient := cfhttp.NewClient()
	repClientFactory, err := rep.NewClientFactory(httpClient, httpClient, repTLSConfig)
	if err != nil {
		logger.Fatal("new-rep-client-factory-failed", err)
	}

	auctioneerClient := initializeAuctioneerClient(logger)

	exitChan := make(chan struct{})

	var accessLogger lager.Logger
	if *accessLogPath != "" {
		accessLogger = lager.NewLogger("bbs-access")
		file, err := os.OpenFile(*accessLogPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
		if err != nil {
			logger.Error("invalid-access-log-path", err, lager.Data{"access-log-path": *accessLogPath})
			os.Exit(1)
		}
		accessLogger.RegisterSink(lager.NewWriterSink(file, lager.INFO))
	}

	var tlsConfig *tls.Config
	if *requireSSL {
		tlsConfig, err = cfhttp.NewTLSConfig(*certFile, *keyFile, *caFile)
		if err != nil {
			logger.Fatal("tls-configuration-failed", err)
		}
	}

	cbWorkPool := taskworkpool.New(logger, *taskCallBackWorkers, taskworkpool.HandleCompletedTask, tlsConfig)

	handler := handlers.New(
		logger,
		accessLogger,
		*updateWorkers,
		*convergenceWorkers,
		activeDB,
		desiredHub,
		actualHub,
		cbWorkPool,
		serviceClient,
		auctioneerClient,
		repClientFactory,
		migrationsDone,
		exitChan,
	)

	metricsNotifier := metrics.NewPeriodicMetronNotifier(logger)

	retirer := controllers.NewActualLRPRetirer(activeDB, actualHub, repClientFactory, serviceClient)
	lrpConvergenceController := controllers.NewLRPConvergenceController(logger, activeDB, actualHub, auctioneerClient, serviceClient, retirer, *convergenceWorkers)
	taskController := controllers.NewTaskController(activeDB, cbWorkPool, auctioneerClient, serviceClient, repClientFactory)

	convergerProcess := converger.New(
		logger,
		clock,
		lrpConvergenceController,
		taskController,
		serviceClient,
		*convergeRepeatInterval,
		*kickTaskDuration,
		*expirePendingTaskDuration,
		*expireCompletedTaskDuration)

	var server ifrit.Runner
	if tlsConfig != nil {
		server = http_server.NewTLSServer(*listenAddress, handler, tlsConfig)
	} else {
		server = http_server.New(*listenAddress, handler)
	}

	healthcheckServer := http_server.New(*healthAddress, http.HandlerFunc(healthCheckHandler))

	members := grouper.Members{
		{"healthcheck", healthcheckServer},
		{"lock-maintainer", maintainer},
		{"workpool", cbWorkPool},
		{"server", server},
		{"migration-manager", migrationManager},
		{"encryptor", encryptor},
		{"hub-maintainer", hubMaintainer(logger, desiredHub, actualHub)},
		{"metrics", *metricsNotifier},
		{"converger", convergerProcess},
		{"registration-runner", registrationRunner},
	}

	if dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", debugserver.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))
	go func() {
		// If a handler writes to this channel, we've hit an unrecoverable error
		// and should shut down (cleanly)
		<-exitChan
		monitor.Signal(os.Interrupt)
	}()

	logger.Info("started")

	err = <-monitor.Wait()
	if sqlConn != nil {
		sqlConn.Close()
	}
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
		Δ time.Duration = 10 * time.Millisecond
	)

	Describe("Start", func() {
		BeforeEach(func() {
			childRunner1 = fake_runner.NewTestRunner()
			childRunner2 = fake_runner.NewTestRunner()
			childRunner3 = fake_runner.NewTestRunner()

			members = grouper.Members{
				{"child1", childRunner1},
				{"child2", childRunner2},
				{"child3", childRunner3},
			}

			groupRunner = grouper.NewOrdered(os.Interrupt, members)
		})

		AfterEach(func() {
			childRunner1.EnsureExit()
			childRunner2.EnsureExit()
			childRunner3.EnsureExit()

			Eventually(started).Should(BeClosed())
			groupProcess.Signal(os.Kill)
			Eventually(groupProcess.Wait()).Should(Receive())
		})

		BeforeEach(func() {
			started = make(chan struct{})
			go func() {
예제 #29
0
func main() {
	flag.StringVar(&configFile, "c", "", "Configuration File")
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	c := config.DefaultConfig()
	logCounter := schema.NewLogCounter()

	if configFile != "" {
		c = config.InitConfigFromFile(configFile)
	}

	prefix := "gorouter.stdout"
	if c.Logging.Syslog != "" {
		prefix = c.Logging.Syslog
	}
	logger, reconfigurableSink := cf_lager.New(prefix)
	InitLoggerFromConfig(logger, c, logCounter)

	logger.Info("starting")

	err := dropsonde.Initialize(c.Logging.MetronAddress, c.Logging.JobName)
	if err != nil {
		logger.Fatal("dropsonde-initialize-error", err)
	}

	// setup number of procs
	if c.GoMaxProcs != 0 {
		runtime.GOMAXPROCS(c.GoMaxProcs)
	}

	if c.DebugAddr != "" {
		cf_debug_server.Run(c.DebugAddr, reconfigurableSink)
	}

	logger.Info("setting-up-nats-connection")
	natsClient := connectToNatsServer(logger.Session("nats"), c)
	logger.Info("Successfully-connected-to-nats")

	metricsReporter := metrics.NewMetricsReporter()
	registry := rregistry.NewRouteRegistry(logger.Session("registry"), c, metricsReporter)

	varz := rvarz.NewVarz(registry)
	compositeReporter := metrics.NewCompositeReporter(varz, metricsReporter)

	accessLogger, err := access_log.CreateRunningAccessLogger(logger.Session("access-log"), c)
	if err != nil {
		logger.Fatal("error-creating-access-logger", err)
	}

	var crypto secure.Crypto
	var cryptoPrev secure.Crypto
	if c.RouteServiceEnabled {
		crypto = createCrypto(logger, c.RouteServiceSecret)
		if c.RouteServiceSecretPrev != "" {
			cryptoPrev = createCrypto(logger, c.RouteServiceSecretPrev)
		}
	}

	proxy := buildProxy(logger.Session("proxy"), c, registry, accessLogger, compositeReporter, crypto, cryptoPrev)

	router, err := router.NewRouter(logger.Session("router"), c, proxy, natsClient, registry, varz, logCounter, nil)
	if err != nil {
		logger.Fatal("initialize-router-error", err)
	}

	members := grouper.Members{
		{"router", router},
	}
	if c.RoutingApiEnabled() {
		logger.Info("setting-up-routing-api")
		routeFetcher := setupRouteFetcher(logger.Session("route-fetcher"), c, registry)

		// check connectivity to routing api
		err := routeFetcher.FetchRoutes()
		if err != nil {
			logger.Fatal("routing-api-connection-failed", err)
		}
		members = append(members, grouper.Member{"router-fetcher", routeFetcher})
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group, syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1))

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("gorouter.exited-with-failure", err)
		os.Exit(1)
	}

	os.Exit(0)
}
예제 #30
0
func newBackgroundGroup(natsClient diegonats.NATSClient, natsAddress, natsUsername, natsPassword string, logger lager.Logger, registration RegistryMessage) ifrit.Runner {
	return grouper.NewOrdered(os.Interrupt, grouper.Members{
		{"nats_connection", diegonats.NewClientRunner(natsAddress, natsUsername, natsPassword, logger, natsClient)},
		{"router_heartbeat", New(natsClient, registration, 50*time.Millisecond, logger)},
	})
}