Beispiel #1
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("tps-listener")
	initializeDropsonde(logger)
	noaaClient := noaa.NewConsumer(*trafficControllerURL, &tls.Config{InsecureSkipVerify: *skipSSLVerification}, nil)
	defer noaaClient.Close()
	apiHandler := initializeHandler(logger, noaaClient, *maxInFlightRequests, initializeBBSClient(logger))

	members := grouper.Members{
		{"api", http_server.New(*listenAddr, apiHandler)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #2
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("file-server")

	initializeDropsonde(logger)

	members := grouper.Members{
		{"file server", initializeServer(logger)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))
	logger.Info("ready")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #3
0
func (db *serviceClient) CellEvents(logger lager.Logger) <-chan models.CellEvent {
	logger = logger.Session("cell-events")

	disappearanceWatcher, disappeared := locket.NewDisappearanceWatcher(logger, db.consulClient, CellSchemaRoot(), db.clock)
	process := ifrit.Invoke(disappearanceWatcher)

	events := make(chan models.CellEvent)
	go func() {
		for {
			select {
			case keys, ok := <-disappeared:
				if !ok {
					process.Signal(os.Interrupt)
					return
				}

				cellIDs := make([]string, len(keys))
				for i, key := range keys {
					cellIDs[i] = path.Base(key)
				}
				logger.Info("cell-disappeared", lager.Data{"cell_ids": cellIDs})
				events <- models.NewCellDisappearedEvent(cellIDs)
			}
		}
	}()

	return events
}
Beispiel #4
0
func main() {
	logger := configureLogger()
	flag.Parse()

	validateFlags(logger)

	router := configureRouter(logger)

	var server ifrit.Runner
	server = http_server.New(*listenAddress, router)
	members := grouper.Members{
		{"server", server},
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))
	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #5
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("auctioneer")
	initializeDropsonde(logger)

	if err := validateBBSAddress(); err != nil {
		logger.Fatal("invalid-bbs-address", err)
	}

	consulClient, err := consuladapter.NewClientFromUrl(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	port, err := strconv.Atoi(strings.Split(*listenAddr, ":")[1])
	if err != nil {
		logger.Fatal("invalid-port", err)
	}

	clock := clock.NewClock()
	auctioneerServiceClient := auctioneer.NewServiceClient(consulClient, clock)

	auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout,
		initializeBBSClient(logger), *startingContainerWeight)
	auctionServer := initializeAuctionServer(logger, auctionRunner)
	lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient, port)
	registrationRunner := initializeRegistrationRunner(logger, consulClient, clock, port)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"auction-runner", auctionRunner},
		{"auction-server", auctionServer},
		{"registration-runner", registrationRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #6
0
func (ts *timeout) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
	runProcess := ifrit.Invoke(ts.runStep)

	timer := time.NewTimer(time.Duration(ts.duration))

	var runErr error
	var timeoutErr error
	var sig os.Signal

dance:
	for {
		select {
		case runErr = <-runProcess.Wait():
			break dance
		case <-timer.C:
			ts.timedOut = true
			timeoutErr = ErrStepTimedOut
			runProcess.Signal(os.Kill)
		case sig = <-signals:
			runProcess.Signal(sig)
		}
	}

	if timeoutErr != nil {
		return timeoutErr
	}

	if runErr != nil {
		return runErr
	}

	return nil
}
Beispiel #7
0
func StartGarden(gardenBin, containerizerBin string, argv ...string) (ifrit.Process, garden.Client) {
	gardenPort, err := localip.LocalPort()
	Expect(err).NotTo(HaveOccurred())
	gardenAddr := fmt.Sprintf("127.0.0.1:%d", gardenPort)

	tmpDir := os.TempDir()

	// If below fails, try
	// netsh advfirewall firewall add rule name="Open Port 48080"  dir=in action=allow protocol=TCP localport=48080

	containerizerPort, err := localip.LocalPort()
	Expect(err).NotTo(HaveOccurred())
	gardenRunner := garden_runner.New("tcp4", gardenAddr, tmpDir, gardenBin, fmt.Sprintf("http://127.0.0.1:%d", containerizerPort))
	containerizerRunner := ginkgomon.New(ginkgomon.Config{
		Name:              "containerizer",
		Command:           exec.Command(containerizerBin, "127.0.0.1", strconv.Itoa(int(containerizerPort))),
		AnsiColorCode:     "",
		StartCheck:        "Control-C to quit.",
		StartCheckTimeout: 10 * time.Second,
		Cleanup:           func() {},
	})

	group := grouper.NewOrdered(syscall.SIGTERM, []grouper.Member{
		{Name: "containerizer", Runner: containerizerRunner},
		{Name: "garden", Runner: gardenRunner},
	})

	gardenProcess := ifrit.Invoke(group)

	return gardenProcess, gardenRunner.NewClient()
}
Beispiel #8
0
func (cmd *WebCommand) Execute(args []string) error {
	tsa := &tsacmd.TSACommand{
		BindIP:   cmd.TSA.BindIP,
		BindPort: cmd.TSA.BindPort,

		HostKeyPath:        cmd.TSA.HostKeyPath,
		AuthorizedKeysPath: cmd.TSA.AuthorizedKeysPath,

		HeartbeatInterval: cmd.TSA.HeartbeatInterval,
	}

	cmd.populateTSAFlagsFromATCFlags(tsa)

	atcRunner, err := cmd.ATCCommand.Runner(args)
	if err != nil {
		return err
	}

	tsaRunner, err := tsa.Runner(args)
	if err != nil {
		return err
	}

	runner := sigmon.New(grouper.NewParallel(os.Interrupt, grouper.Members{
		{"atc", atcRunner},
		{"tsa", tsaRunner},
	}))

	return <-ifrit.Invoke(runner).Wait()
}
func startGarden(argv ...string) garden.Client {
	gardenAddr := fmt.Sprintf("/tmp/garden_%d.sock", GinkgoParallelNode())
	gardenRunner = runner.New("unix", gardenAddr, gardenBin, argv...)
	gardenProcess = ifrit.Invoke(gardenRunner)

	return gardenRunner.NewClient()
}
func (f *FakeCell) SpinUp(serviceClient bbs.ServiceClient) {
	//make a test-friendly AuctionRepDelegate using the auction package's SimulationRepDelegate
	f.SimulationRep = simulationrep.New(f.stack, "Z0", rep.Resources{
		DiskMB:     100,
		MemoryMB:   100,
		Containers: 100,
	})

	//spin up an http auction server
	logger := lager.NewLogger(f.cellID)
	logger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.INFO))

	fakeLRPStopper := new(fake_lrp_stopper.FakeLRPStopper)
	fakeExecutorClient := new(executorfakes.FakeClient)
	fakeEvacuatable := new(fake_evacuation_context.FakeEvacuatable)

	handlers := rephandlers.New(f.SimulationRep, fakeLRPStopper, fakeExecutorClient, fakeEvacuatable, logger)
	router, err := rata.NewRouter(rep.Routes, handlers)
	Expect(err).NotTo(HaveOccurred())
	f.server = httptest.NewServer(router)

	presence := models.NewCellPresence(
		f.cellID,
		f.server.URL,
		"az1",
		models.NewCellCapacity(512, 1024, 124),
		[]string{},
		[]string{})

	f.heartbeater = ifrit.Invoke(serviceClient.NewCellPresenceRunner(logger, &presence, time.Second))
}
func (w *windowsJobSupervisor) MonitorJobFailures(handler JobFailureHandler) error {
	hl := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		defer r.Body.Close()
		if s.stateIs(stateDisabled) {
			return
		}
		var event windowsServiceEvent
		err := json.NewDecoder(r.Body).Decode(&event)
		if err != nil {
			s.logger.Error(s.logTag, "MonitorJobFailures received unknown request: %s", err)
			return
		}
		handler(boshalert.MonitAlert{
			Action:      "Start",
			Date:        time.Now().Format(time.RFC1123Z),
			Event:       event.Event,
			ID:          event.ProcessName,
			Service:     event.ProcessName,
			Description: fmt.Sprintf("exited with code %d", event.ExitCode),
		})
	})
	server := http_server.New(fmt.Sprintf("localhost:%d", s.jobFailuresServerPort), hl)
	process := ifrit.Invoke(server)
	for {
		select {
		case <-s.cancelServer:
			process.Signal(os.Kill)
		case err := <-process.Wait():
			if err != nil {
				return bosherr.WrapError(err, "Listen for HTTP")
			}
			return nil
		}
	}
}
Beispiel #12
0
func ServeAPI(l logger.Logger, conf *config.Config) {
	store := connectToStore(l, conf)

	apiHandler, err := handlers.New(l, store, buildTimeProvider(l))
	if err != nil {
		l.Error("initialize-handler.failed", err)
		panic(err)
	}
	handler := handlers.BasicAuthWrap(apiHandler, conf.APIServerUsername, conf.APIServerPassword)

	listenAddr := fmt.Sprintf("%s:%d", conf.APIServerAddress, conf.APIServerPort)

	members := grouper.Members{
		{"api", http_server.New(listenAddr, handler)},
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	l.Info("started")
	l.Info(listenAddr)

	err = <-monitor.Wait()
	if err != nil {
		l.Error("exited", err)
		os.Exit(1)
	}

	l.Info("exited")
	os.Exit(0)
}
Beispiel #13
0
func StartDebugServer(address string, sink *lager.ReconfigurableSink, metrics Metrics) (ifrit.Process, error) {
	expvar.Publish("numCPUS", expvar.Func(func() interface{} {
		return metrics.NumCPU()
	}))

	expvar.Publish("numGoRoutines", expvar.Func(func() interface{} {
		return metrics.NumGoroutine()
	}))

	expvar.Publish("loopDevices", expvar.Func(func() interface{} {
		return metrics.LoopDevices()
	}))

	expvar.Publish("backingStores", expvar.Func(func() interface{} {
		return metrics.BackingStores()
	}))

	expvar.Publish("depotDirs", expvar.Func(func() interface{} {
		return metrics.DepotDirs()
	}))

	server := http_server.New(address, handler(sink))
	p := ifrit.Invoke(server)
	select {
	case <-p.Ready():
	case err := <-p.Wait():
		return nil, err
	}
	return p, nil
}
Beispiel #14
0
func start(creator RunnerCreator, network, addr string, argv ...string) *RunningGarden {
	tmpDir := filepath.Join(
		os.TempDir(),
		fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()),
	)

	if GraphRoot == "" {
		GraphRoot = filepath.Join(tmpDir, "graph")
	}

	graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode()))

	r := &RunningGarden{
		GraphRoot: GraphRoot,
		GraphPath: graphPath,
		tmpdir:    tmpDir,
		logger:    lagertest.NewTestLogger("garden-runner"),

		Client: client.New(connection.New(network, addr)),
	}

	c := cmd(tmpDir, graphPath, network, addr, GardenBin, BinPath, RootFSPath, argv...)
	r.process = ifrit.Invoke(creator.Create(c))
	r.Pid = c.Process.Pid

	return r
}
Beispiel #15
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("auctioneer")
	initializeDropsonde(logger)

	if err := validateBBSAddress(); err != nil {
		logger.Fatal("invalid-bbs-address", err)
	}

	client, err := consuladapter.NewClient(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	sessionMgr := consuladapter.NewSessionManager(client)
	consulSession, err := consuladapter.NewSession("auctioneer", *lockTTL, client, sessionMgr)
	if err != nil {
		logger.Fatal("consul-session-failed", err)
	}

	clock := clock.NewClock()
	bbsServiceClient := bbs.NewServiceClient(consulSession, clock)
	auctioneerServiceClient := auctioneer.NewServiceClient(consulSession, clock)

	auctionRunner := initializeAuctionRunner(logger, *cellStateTimeout, initializeBBSClient(logger), bbsServiceClient)
	auctionServer := initializeAuctionServer(logger, auctionRunner)
	lockMaintainer := initializeLockMaintainer(logger, auctioneerServiceClient)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"auction-runner", auctionRunner},
		{"auction-server", auctionServer},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #16
0
func (cmd *ATCCommand) Execute(args []string) error {
	runner, err := cmd.Runner(args)
	if err != nil {
		return err
	}

	return <-ifrit.Invoke(sigmon.New(runner)).Wait()
}
Beispiel #17
0
func run(cmd *exec.Cmd) error {
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr

	runner := sigmon.New(cmdRunner{cmd})

	process := ifrit.Invoke(runner)
	return <-process.Wait()
}
Beispiel #18
0
func Run(address string) error {
	p := ifrit.Invoke(Runner(address))
	select {
	case <-p.Ready():
	case err := <-p.Wait():
		return err
	}
	return nil
}
Beispiel #19
0
func Run(address string, sink *lager.ReconfigurableSink) (ifrit.Process, error) {
	p := ifrit.Invoke(Runner(address, sink))
	select {
	case <-p.Ready():
	case err := <-p.Wait():
		return nil, err
	}
	return p, nil
}
Beispiel #20
0
func (syncer *Syncer) Sync() {
	pipelines, err := syncer.syncherDB.GetAllPipelines()
	if err != nil {
		syncer.logger.Error("failed-to-get-pipelines", err)
		return
	}

	for id, runningPipeline := range syncer.runningPipelines {
		select {
		case <-runningPipeline.Exited:
			syncer.logger.Debug("pipeline-exited", lager.Data{"pipeline-id": id})
			syncer.removePipeline(id)
		default:
		}

		var found bool
		for _, pipeline := range pipelines {
			if pipeline.Paused {
				continue
			}

			if pipeline.ID == id && pipeline.Name == runningPipeline.Name {
				found = true
			}
		}

		if !found {
			syncer.logger.Debug("stopping-pipeline", lager.Data{"pipeline-id": id})
			runningPipeline.Process.Signal(os.Interrupt)
			syncer.removePipeline(id)

			err := syncer.syncherDB.ResetBuildPreparationsWithPipelinePaused(id)
			if err != nil {
				syncer.logger.Error("updating-build-preps-stopping-pipeline", err, lager.Data{"pipeline-id": id})
			}
		}
	}

	for _, pipeline := range pipelines {
		if pipeline.Paused || syncer.isPipelineRunning(pipeline.ID) {
			continue
		}

		pipelineDB := syncer.pipelineDBFactory.Build(pipeline)
		runner := syncer.pipelineRunnerFactory(pipelineDB)

		syncer.logger.Debug("starting-pipeline", lager.Data{"pipeline": pipeline.Name})

		process := ifrit.Invoke(runner)

		syncer.runningPipelines[pipeline.ID] = runningPipeline{
			Name:    pipeline.Name,
			Process: process,
			Exited:  process.Wait(),
		}
	}
}
Beispiel #21
0
func start(network, addr string, argv ...string) *RunningGarden {
	tmpDir := filepath.Join(
		os.TempDir(),
		fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()),
	)
	Expect(os.MkdirAll(tmpDir, 0755)).To(Succeed())

	if GraphRoot == "" {
		GraphRoot = filepath.Join(tmpDir, "graph")
	}

	graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode()))
	stateDirPath := filepath.Join(tmpDir, "state")
	depotPath := filepath.Join(tmpDir, "containers")
	snapshotsPath := filepath.Join(tmpDir, "snapshots")

	if err := os.MkdirAll(stateDirPath, 0755); err != nil {
		Expect(err).ToNot(HaveOccurred())
	}

	if err := os.MkdirAll(depotPath, 0755); err != nil {
		Expect(err).ToNot(HaveOccurred())
	}

	if err := os.MkdirAll(snapshotsPath, 0755); err != nil {
		Expect(err).ToNot(HaveOccurred())
	}

	MustMountTmpfs(graphPath)

	r := &RunningGarden{
		GraphRoot:     GraphRoot,
		GraphPath:     graphPath,
		StateDirPath:  stateDirPath,
		DepotPath:     depotPath,
		SnapshotsPath: snapshotsPath,
		tmpdir:        tmpDir,
		logger:        lagertest.NewTestLogger("garden-runner"),

		Client: client.New(connection.New(network, addr)),
	}

	c := cmd(stateDirPath, depotPath, snapshotsPath, graphPath, network, addr, GardenBin, BinPath, RootFSPath, argv...)
	r.runner = ginkgomon.New(ginkgomon.Config{
		Name:              "garden-linux",
		Command:           c,
		AnsiColorCode:     "31m",
		StartCheck:        "garden-linux.started",
		StartCheckTimeout: 30 * time.Second,
	})

	r.process = ifrit.Invoke(r.runner)
	r.Pid = c.Process.Pid

	return r
}
Beispiel #22
0
func (runner *Runner) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
	close(ready)

	if runner.noop {
		<-signals
		return nil
	}

	runner.logger.Info("start")
	defer runner.logger.Info("done")

	ticker := time.NewTicker(runner.syncInterval)

	scannersGroup := grouper.NewDynamic(nil, 0, 0)

	scannersClient := scannersGroup.Client()
	exits := scannersClient.ExitListener()
	insertScanner := scannersClient.Inserter()

	scanners := ifrit.Invoke(scannersGroup)

	scanning := make(map[string]bool)
	scanningResourceTypes := make(map[string]bool)

	runner.tick(scanning, scanningResourceTypes, insertScanner)

dance:
	for {
		select {
		case <-signals:
			scanners.Signal(os.Interrupt)

			// don't bother waiting for scanners on shutdown

			break dance

		case exited := <-exits:
			if exited.Err != nil {
				runner.logger.Error("scanner-failed", exited.Err, lager.Data{
					"member": exited.Member.Name,
				})
			} else {
				runner.logger.Info("scanner-exited", lager.Data{
					"member": exited.Member.Name,
				})
			}

			delete(scanning, exited.Member.Name)

		case <-ticker.C:
			runner.tick(scanning, scanningResourceTypes, insertScanner)
		}
	}

	return nil
}
Beispiel #23
0
func startGarden(argv ...string) garden.Client {
	gardenBin, err := gexec.Build("github.com/cloudfoundry-incubator/guardian/cmd/guardian")
	Expect(err).NotTo(HaveOccurred())

	gardenAddr := fmt.Sprintf("/tmp/garden_%d.sock", GinkgoParallelNode())
	gardenRunner := runner.New("unix", gardenAddr, gardenBin, argv...)
	ifrit.Invoke(gardenRunner)

	return gardenRunner.NewClient()
}
Beispiel #24
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New(*sessionName)
	natsClient := diegonats.NewClient()
	clock := clock.NewClock()
	syncer := syncer.NewSyncer(clock, *syncInterval, natsClient, logger)

	initializeDropsonde(logger)

	natsClientRunner := diegonats.NewClientRunner(*natsAddresses, *natsUsername, *natsPassword, logger, natsClient)

	table := initializeRoutingTable()
	emitter := initializeNatsEmitter(natsClient, logger)
	watcher := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return watcher.NewWatcher(initializeBBSClient(logger), clock, table, emitter, syncer.Events(), logger).Run(signals, ready)
	})

	syncRunner := ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
		return syncer.Run(signals, ready)
	})

	lockMaintainer := initializeLockMaintainer(logger, *consulCluster, *sessionName, *lockTTL, *lockRetryInterval, clock)

	members := grouper.Members{
		{"lock-maintainer", lockMaintainer},
		{"nats-client", natsClientRunner},
		{"watcher", watcher},
		{"syncer", syncRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #25
0
func main() {
	logger := lager.NewLogger("checkin")
	logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))

	var opts Opts

	_, err := flags.Parse(&opts)
	if err != nil {
		logger.Error("parsing-flags", err)
		os.Exit(1)
	}

	// ts := oauth2.StaticTokenSource(
	// 	&oauth2.Token{AccessToken: opts.GitHubAccessToken},
	// )
	// tc := oauth2.NewClient(oauth2.NoContext, ts)
	// githubClient := github.NewClient(tc)

	// checker := build.NewConcourseChecker()
	// checker = build.NewStatusReporter(checker, githubClient.Repositories)

	dbConn, err := migration.Open(opts.DBDriver, opts.DBURL, migrations.Migrations)
	if err != nil {
		logger.Error("failed-to-run-migrations", err)
		os.Exit(1)
	}
	sqlDB := db.NewSQL(logger.Session("db"), dbConn)

	enqueuer := build.NewEnqueuer(sqlDB)
	apiServer := api.NewServer(opts.GitHubSecret, enqueuer)

	members := []grouper.Member{
		{
			"api",
			http_server.New(
				opts.Addr,
				apiServer,
			),
		},
	}

	group := grouper.NewParallel(os.Interrupt, members)
	running := ifrit.Invoke(sigmon.New(group))

	logger.Info("listening", lager.Data{
		"api": opts.Addr,
	})

	err = <-running.Wait()
	if err != nil {
		fmt.Fprintln(os.Stderr, err)
		os.Exit(1)
	}
}
Beispiel #26
0
func (t *ConsulHelper) RegisterCell(cell *models.CellPresence) {
	var err error
	jsonBytes, err := json.Marshal(cell)
	Expect(err).NotTo(HaveOccurred())

	// Use NewLock instead of NewPresence in order to block on the cell being registered
	runner := locket.NewLock(t.logger, t.consulClient, bbs.CellSchemaPath(cell.CellId), jsonBytes, clock.NewClock(), locket.RetryInterval, locket.LockTTL)
	ifrit.Invoke(runner)

	Expect(err).NotTo(HaveOccurred())
}
Beispiel #27
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("sshd")

	serverConfig, err := configure(logger)
	if err != nil {
		logger.Error("configure-failed", err)
		os.Exit(1)
	}

	runner := handlers.NewCommandRunner()
	shellLocator := handlers.NewShellLocator()
	dialer := &net.Dialer{}

	sshDaemon := daemon.New(
		logger,
		serverConfig,
		nil,
		map[string]handlers.NewChannelHandler{
			"session":      handlers.NewSessionChannelHandler(runner, shellLocator, getDaemonEnvironment(), 15*time.Second),
			"direct-tcpip": handlers.NewDirectTcpipChannelHandler(dialer),
		},
	)
	server := server.NewServer(logger, *address, sshDaemon)

	members := grouper.Members{
		{"sshd", server},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
Beispiel #28
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)

	logger, reconfigurableSink := cf_lager.New("ssh-proxy")

	initializeDropsonde(logger)

	proxyConfig, err := configureProxy(logger)
	if err != nil {
		logger.Error("configure-failed", err)
		os.Exit(1)
	}

	sshProxy := proxy.New(logger, proxyConfig)
	server := server.NewServer(logger, *address, sshProxy)

	consulClient, err := consuladapter.NewClientFromUrl(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	registrationRunner := initializeRegistrationRunner(logger, consulClient, *address, clock.NewClock())

	members := grouper.Members{
		{"ssh-proxy", server},
		{"registration-runner", registrationRunner},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{{
			"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink),
		}}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)
	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err = <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
	os.Exit(0)
}
Beispiel #29
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)

	lifecycles := flags.LifecycleMap{}
	flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)")
	flag.Parse()

	cf_http.Initialize(*communicationTimeout)
	logger, reconfigurableSink := cf_lager.New("nsync-listener")

	initializeDropsonde(logger)

	recipeBuilderConfig := recipebuilder.Config{
		Lifecycles:    lifecycles,
		FileServerURL: *fileServerURL,
		KeyFactory:    keys.RSAKeyPairFactory,
	}
	recipeBuilders := map[string]recipebuilder.RecipeBuilder{
		"buildpack": recipebuilder.NewBuildpackRecipeBuilder(logger, recipeBuilderConfig),
		"docker":    recipebuilder.NewDockerRecipeBuilder(logger, recipeBuilderConfig),
	}

	handler := handlers.New(logger, initializeBBSClient(logger), recipeBuilders)

	members := grouper.Members{
		{"server", http_server.New(*listenAddress, handler)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	group := grouper.NewOrdered(os.Interrupt, members)

	monitor := ifrit.Invoke(sigmon.New(group))

	logger.Info("started")

	err := <-monitor.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}

	logger.Info("exited")
}
Beispiel #30
0
func main() {
	cf_debug_server.AddFlags(flag.CommandLine)
	cf_lager.AddFlags(flag.CommandLine)

	flag.Var(
		&insecureDockerRegistries,
		"insecureDockerRegistry",
		"Docker registry to allow connecting to even if not secure. (Can be specified multiple times to allow insecure connection to multiple repositories)",
	)

	lifecycles := flags.LifecycleMap{}
	flag.Var(&lifecycles, "lifecycle", "app lifecycle binary bundle mapping (lifecycle[/stack]:bundle-filepath-in-fileserver)")
	flag.Parse()

	logger, reconfigurableSink := cf_lager.New("stager")
	initializeDropsonde(logger)

	ccClient := cc_client.NewCcClient(*ccBaseURL, *ccUsername, *ccPassword, *skipCertVerify)

	backends := initializeBackends(logger, lifecycles)

	handler := handlers.New(logger, ccClient, initializeBBSClient(logger), backends, clock.NewClock())

	members := grouper.Members{
		{"server", http_server.New(*listenAddress, handler)},
	}

	if dbgAddr := cf_debug_server.DebugAddress(flag.CommandLine); dbgAddr != "" {
		members = append(grouper.Members{
			{"debug-server", cf_debug_server.Runner(dbgAddr, reconfigurableSink)},
		}, members...)
	}

	logger.Info("starting")

	group := grouper.NewOrdered(os.Interrupt, members)

	process := ifrit.Invoke(sigmon.New(group))

	logger.Info("Listening for staging requests!")

	err := <-process.Wait()
	if err != nil {
		logger.Fatal("Stager exited with error", err)
	}

	logger.Info("stopped")
}