Ejemplo n.º 1
0
func StartGnatsd(natsPort int) (ifrit.Process, diegonats.NATSClient) {
	ginkgomonRunner := NewGnatsdTestRunner(natsPort)
	gnatsdProcess := ifrit.Envoke(ginkgomonRunner)

	natsClient := diegonats.NewClient()
	_, err := natsClient.Connect([]string{fmt.Sprintf("nats://127.0.0.1:%d", natsPort)})
	Expect(err).ShouldNot(HaveOccurred())

	return gnatsdProcess, natsClient
}
Ejemplo n.º 2
0
func main() {
	flag.Parse()

	logger := lager.NewLogger("lattice-app")
	if quiet {
		logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))
	} else {
		logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
	}

	ports := getServerPorts()

	logger.Info("lattice-app.starting", lager.Data{"ports": ports})
	handler, err := rata.NewRouter(routes.Routes, handlers.New(logger))
	if err != nil {
		logger.Fatal("router.creation.failed", err)
	}

	index, err := helpers.FetchIndex()
	appName := fetchAppName()
	go func() {
		t := time.NewTicker(time.Second)
		for {
			<-t.C
			if err != nil {
				fmt.Fprintf(os.Stderr, "Failed to fetch index: %s\n", err.Error())
			} else {
				fmt.Println(fmt.Sprintf("%s. Says %s. on index: %d", appName, message, index))
			}
		}
	}()

	wg := sync.WaitGroup{}
	for _, port := range ports {
		wg.Add(1)
		go func(wg *sync.WaitGroup, port string) {
			defer wg.Done()
			server := ifrit.Envoke(http_server.New(":"+port, handler))
			logger.Info("lattice-app.up", lager.Data{"port": port})
			err = <-server.Wait()
			if err != nil {
				logger.Error("shutting down server", err, lager.Data{"server port": port})
			}
			logger.Info("shutting down server", lager.Data{"server port": port})
		}(&wg, port)
	}
	wg.Wait()
	logger.Info("shutting latice app")
}
Ejemplo n.º 3
0
func main() {
	logger := lager.NewLogger("lattice-app")
	if quiet {
		logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))
	} else {
		logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
	}

	port := os.Getenv("PORT")
	if port == "" {
		port = "8080"
	}

	logger.Info("lattice-app.starting", lager.Data{"port": port})
	handler, err := rata.NewRouter(routes.Routes, handlers.New(logger))
	if err != nil {
		logger.Fatal("router.creation.failed", err)
	}

	index, err := helpers.FetchIndex()
	appName := helpers.FetchAppName()

	go func() {
		t := time.NewTicker(time.Second)
		for {
			<-t.C
			if err != nil {
				fmt.Fprintf(os.Stderr, "Failed to fetch index: %s\n", err.Error())
			} else {
				fmt.Println(fmt.Sprintf("%s. Says %s. on index: %d", appName, message, index))
			}
		}
	}()

	server := ifrit.Envoke(http_server.New(":"+port, handler))
	logger.Info("lattice-app.up", lager.Data{"port": port})
	err = <-server.Wait()
	if err != nil {
		logger.Error("farewell", err)
	}
	logger.Info("farewell")
}
			break
		}

		precedingMigrations = append(precedingMigrations, migration)
	}

	BeforeEach(func() {
		Ω(migrationFromSet).ShouldNot(BeNil(), "Migration was not added to the list!")

		var err error

		postgresRunner = postgresrunner.Runner{
			Port: 5433 + GinkgoParallelNode(),
		}

		dbProcess = ifrit.Envoke(postgresRunner)

		postgresRunner.CreateTestDB()

		dbConn, err = migration.Open("postgres", postgresRunner.DataSourceName(), precedingMigrations)
		Ω(err).ShouldNot(HaveOccurred())
	})

	AfterEach(func() {
		err := dbConn.Close()
		Ω(err).ShouldNot(HaveOccurred())

		postgresRunner.DropTestDB()

		dbProcess.Signal(os.Interrupt)
		Eventually(dbProcess.Wait(), 10*time.Second).Should(Receive())
Ejemplo n.º 5
0
		childRunner1.EnsureExit()
		childRunner2.EnsureExit()
		childRunner3.EnsureExit()
	})

	Describe("Get", func() {
		var member1, member2, member3 grouper.Member

		BeforeEach(func() {
			member1 = grouper.Member{"child1", childRunner1}
			member2 = grouper.Member{"child2", childRunner2}
			member3 = grouper.Member{"child3", childRunner3}

			pool = grouper.NewDynamic(nil, 3, 2)
			client = pool.Client()
			poolProcess = ifrit.Envoke(pool)

			insert := client.Inserter()
			Eventually(insert).Should(BeSent(member1))
			Eventually(insert).Should(BeSent(member2))
			Eventually(insert).Should(BeSent(member3))
		})

		It("returns a process when the member is present", func() {
			signal1 := childRunner1.WaitForCall()
			p, ok := client.Get("child1")
			Ω(ok).Should(BeTrue())
			p.Signal(syscall.SIGUSR2)
			Eventually(signal1).Should(Receive(Equal(syscall.SIGUSR2)))
		})
Ejemplo n.º 6
0
	)

	BeforeEach(func() {
		startedRequestChan = make(chan struct{}, 1)
		finishRequestChan = make(chan struct{}, 1)
		port := 8000 + GinkgoParallelNode()
		address = fmt.Sprintf("127.0.0.1:%d", port)
	})

	Describe("Envoke", func() {
		var process ifrit.Process

		Context("when the server starts successfully", func() {
			BeforeEach(func() {
				server = http_server.New(address, handler)
				process = ifrit.Envoke(server)
			})

			AfterEach(func() {
				process.Signal(syscall.SIGINT)
				Eventually(process.Wait()).Should(Receive())
			})

			Context("and a request is in flight", func() {
				type httpResponse struct {
					response *http.Response
					err      error
				}
				var responses chan httpResponse

				BeforeEach(func() {
Ejemplo n.º 7
0
			MemoryMB:   128,
			DiskMB:     256,
			Containers: 512,
		}, nil)

		executorClient.ListContainersReturns([]executor.Container{
			{Guid: "container-1"},
			{Guid: "container-2"},
			{Guid: "container-3"},
		}, nil)
	})

	JustBeforeEach(func() {
		reporter = ifrit.Envoke(&metrics.Reporter{
			ExecutorSource: executorClient,
			Interval:       reportInterval,
			Logger:         logger,
		})
	})

	AfterEach(func() {
		reporter.Signal(os.Interrupt)
		Eventually(reporter.Wait()).Should(Receive())
	})

	It("reports the current capacity on the given interval", func() {
		Eventually(func() fake.Metric {
			return sender.GetValue("CapacityTotalMemory")
		}, reportInterval*2).Should(Equal(fake.Metric{
			Value: 1024,
			Unit:  "MiB",
Ejemplo n.º 8
0
				{"child2", r2},
				{"child3", r3},
			}
		})

		AfterEach(func() {
			groupProcess.Signal(os.Kill)
			Eventually(groupProcess.Wait()).Should(Receive())
		})

		JustBeforeEach(func() {
			groupRunner = grouper.NewOrdered(os.Interrupt, members)

			started = make(chan struct{})
			go func() {
				groupProcess = ifrit.Envoke(groupRunner)
				close(started)
			}()

			Eventually(started).Should(BeClosed())
		})

		It("stops in reverse order", func() {
			groupProcess.Signal(os.Kill)
			Eventually(groupProcess.Wait()).Should(Receive())
			close(startOrder)
			close(stopOrder)

			Ω(startOrder).To(HaveLen(len(stopOrder)))

			order := []int64{}
Ejemplo n.º 9
0
Archivo: main.go Proyecto: utako/atc
func main() {
	flag.Parse()

	if !*dev && (*httpUsername == "" || (*httpHashedPassword == "" && *httpPassword == "")) {
		fatal(errors.New("must specify -httpUsername and -httpPassword or -httpHashedPassword or turn on dev mode"))
	}

	if _, err := os.Stat(*templatesDir); err != nil {
		fatal(errors.New("directory specified via -templates does not exist"))
	}

	if _, err := os.Stat(*publicDir); err != nil {
		fatal(errors.New("directory specified via -public does not exist"))
	}

	logger := lager.NewLogger("atc")

	logLevel := lager.INFO
	if *dev {
		logLevel = lager.DEBUG
	}

	sink := lager.NewReconfigurableSink(lager.NewWriterSink(os.Stdout, lager.DEBUG), logLevel)
	logger.RegisterSink(sink)

	var err error

	var dbConn Db.Conn

	for {
		dbConn, err = migration.Open(*sqlDriver, *sqlDataSource, migrations.Migrations)
		if err != nil {
			if strings.Contains(err.Error(), " dial ") {
				logger.Error("failed-to-open-db", err)
				time.Sleep(5 * time.Second)
				continue
			}

			fatal(err)
		}

		break
	}

	dbConn = Db.Explain(logger, dbConn, 500*time.Millisecond)

	listener := pq.NewListener(*sqlDataSource, time.Second, time.Minute, nil)
	bus := Db.NewNotificationsBus(listener)

	db := Db.NewSQL(logger.Session("db"), dbConn, bus)
	pipelineDBFactory := Db.NewPipelineDBFactory(logger.Session("db"), dbConn, bus, db)

	var configDB Db.ConfigDB
	configDB = Db.PlanConvertingConfigDB{db}

	var resourceTypesNG []atc.WorkerResourceType
	err = json.Unmarshal([]byte(*resourceTypes), &resourceTypesNG)
	if err != nil {
		logger.Fatal("invalid-resource-types", err)
	}

	var workerClient worker.Client
	if *gardenAddr != "" {
		workerClient = worker.NewGardenWorker(
			gclient.New(gconn.NewWithLogger(
				*gardenNetwork,
				*gardenAddr,
				logger.Session("garden-connection"),
			)),
			clock.NewClock(),
			-1,
			resourceTypesNG,
			"linux",
			[]string{},
		)
	} else {
		workerClient = worker.NewPool(worker.NewDBWorkerProvider(db, logger))
	}

	resourceTracker := resource.NewTracker(workerClient)
	gardenFactory := exec.NewGardenFactory(workerClient, resourceTracker, func() string {
		guid, err := uuid.NewV4()
		if err != nil {
			panic("not enough entropy to generate guid: " + err.Error())
		}

		return guid.String()
	})
	execEngine := engine.NewExecEngine(gardenFactory, engine.NewBuildDelegateFactory(db), db)

	engine := engine.NewDBEngine(engine.Engines{execEngine}, db, db)

	var webValidator auth.Validator

	if *httpUsername != "" && *httpHashedPassword != "" {
		webValidator = auth.BasicAuthHashedValidator{
			Username:       *httpUsername,
			HashedPassword: *httpHashedPassword,
		}
	} else if *httpUsername != "" && *httpPassword != "" {
		webValidator = auth.BasicAuthValidator{
			Username: *httpUsername,
			Password: *httpPassword,
		}
	} else {
		webValidator = auth.NoopValidator{}
	}

	callbacksURL, err := url.Parse(*callbacksURLString)
	if err != nil {
		fatal(err)
	}

	drain := make(chan struct{})

	apiHandler, err := api.NewHandler(
		logger,            // logger lager.Logger,
		webValidator,      // validator auth.Validator,
		pipelineDBFactory, // pipelineDBFactory db.PipelineDBFactory,

		configDB, // configDB db.ConfigDB,

		db, // buildsDB buildserver.BuildsDB,
		db, // workerDB workerserver.WorkerDB,
		db, // pipeDB pipes.PipeDB,
		db, // pipelinesDB db.PipelinesDB,

		config.ValidateConfig,       // configValidator configserver.ConfigValidator,
		callbacksURL.String(),       // peerURL string,
		buildserver.NewEventHandler, // eventHandlerFactory buildserver.EventHandlerFactory,
		drain, // drain <-chan struct{},

		engine,       // engine engine.Engine,
		workerClient, // workerClient worker.Client,

		sink, // sink *lager.ReconfigurableSink,

		*cliDownloadsDir, // cliDownloadsDir string,
	)
	if err != nil {
		fatal(err)
	}

	radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
		resourceTracker,
		*checkInterval,
		db,
		engine,
		db,
	)

	webHandler, err := web.NewHandler(
		logger,
		webValidator,
		radarSchedulerFactory,
		db,
		pipelineDBFactory,
		configDB,
		*templatesDir,
		*publicDir,
		engine,
	)
	if err != nil {
		fatal(err)
	}

	webMux := http.NewServeMux()
	webMux.Handle("/api/v1/", apiHandler)
	webMux.Handle("/", webHandler)

	var httpHandler http.Handler

	httpHandler = webMux

	if !*publiclyViewable {
		httpHandler = auth.Handler{
			Handler:   httpHandler,
			Validator: webValidator,
		}
	}

	// copy Authorization header as ATC-Authorization cookie for websocket auth
	httpHandler = auth.CookieSetHandler{
		Handler: httpHandler,
	}

	httpHandler = httpmetrics.Wrap(httpHandler)

	webListenAddr := fmt.Sprintf("%s:%d", *webListenAddress, *webListenPort)
	debugListenAddr := fmt.Sprintf("%s:%d", *debugListenAddress, *debugListenPort)

	syncer := pipelines.NewSyncer(
		logger.Session("syncer"),
		db,
		pipelineDBFactory,
		func(pipelineDB Db.PipelineDB) ifrit.Runner {
			return grouper.NewParallel(os.Interrupt, grouper.Members{
				{
					pipelineDB.ScopedName("radar"),
					rdr.NewRunner(
						logger.Session(pipelineDB.ScopedName("radar")),
						*noop,
						db,
						radarSchedulerFactory.BuildRadar(pipelineDB),
						pipelineDB,
						1*time.Minute,
					),
				},
				{
					pipelineDB.ScopedName("scheduler"),
					&sched.Runner{
						Logger: logger.Session(pipelineDB.ScopedName("scheduler")),

						Locker: db,
						DB:     pipelineDB,

						Scheduler: radarSchedulerFactory.BuildScheduler(pipelineDB),

						Noop: *noop,

						Interval: 10 * time.Second,
					},
				},
			})
		},
	)

	buildTracker := builds.NewTracker(
		logger.Session("build-tracker"),
		db,
		engine,
	)

	memberGrouper := []grouper.Member{
		{"web", http_server.New(webListenAddr, httpHandler)},

		{"debug", http_server.New(debugListenAddr, http.DefaultServeMux)},

		{"drainer", ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
			close(ready)

			<-signals

			close(drain)

			return nil
		})},

		{"pipelines", pipelines.SyncRunner{
			Syncer:   syncer,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},

		{"builds", builds.TrackerRunner{
			Tracker:  buildTracker,
			Interval: 10 * time.Second,
			Clock:    clock.NewClock(),
		}},
	}

	group := grouper.NewParallel(os.Interrupt, memberGrouper)

	running := ifrit.Envoke(sigmon.New(group))

	logger.Info("listening", lager.Data{
		"web":   webListenAddr,
		"debug": debugListenAddr,
	})

	err = <-running.Wait()
	if err != nil {
		logger.Error("exited-with-failure", err)
		os.Exit(1)
	}
}
Ejemplo n.º 10
0
func TestCCUploader(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "CC Uploader Suite")
}

var _ = SynchronizedBeforeSuite(func() []byte {
	ccUploaderPath, err := gexec.Build("github.com/cloudfoundry-incubator/cc-uploader/cmd/cc-uploader")
	Expect(err).NotTo(HaveOccurred())
	return []byte(ccUploaderPath)
}, func(ccUploaderPath []byte) {
	fakeCCAddress := fmt.Sprintf("127.0.0.1:%d", 6767+GinkgoParallelNode())
	fakeCC = fake_cc.New(fakeCCAddress)

	ccUploaderBinary = string(ccUploaderPath)
})

var _ = SynchronizedAfterSuite(func() {
}, func() {
	gexec.CleanupBuildArtifacts()
})

var _ = BeforeEach(func() {
	fakeCCProcess = ifrit.Envoke(fakeCC)
})

var _ = AfterEach(func() {
	fakeCCProcess.Signal(os.Kill)
	Eventually(fakeCCProcess.Wait()).Should(Receive(BeNil()))
})
Ejemplo n.º 11
0
				members := []grouper.Member{}
				memChan := pGroup.Exits()
				for i := 0; i < len(rGroup); i++ {
					members = append(members, <-memChan)
				}
				Ω(members).Should(HaveLen(len(rGroup)))
				close(done)
			})
		})
	})

	Describe("ifrit.Envoke()", func() {
		var pGroup ifrit.Process

		BeforeEach(func() {
			pGroup = ifrit.Envoke(rGroup)
		})

		Context("when the linked process is signaled to stop", func() {
			BeforeEach(func() {
				pGroup.Signal(os.Kill)
			})

			It("exits with a combined error message", func() {
				err := <-pGroup.Wait()
				errMsg := err.Error()
				Ω(errMsg).Should(ContainSubstring("ping1"))
				Ω(errMsg).Should(ContainSubstring("ping2"))
				Ω(errMsg).Should(ContainSubstring(test_helpers.PingerExitedFromSignal.Error()))
			})
		})
Ejemplo n.º 12
0
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"github.com/tedsuo/ifrit"
	"github.com/tedsuo/ifrit/test_helpers"
	"os"
)

var _ = Describe("Process", func() {
	Context("when a process is envoked", func() {
		var pinger test_helpers.PingChan
		var pingProc ifrit.Process
		var errChan chan error

		BeforeEach(func() {
			pinger = make(test_helpers.PingChan)
			pingProc = ifrit.Envoke(pinger)
			errChan = make(chan error)
		})

		Describe("Wait()", func() {
			BeforeEach(func() {
				go func() {
					errChan <- <-pingProc.Wait()
				}()
				go func() {
					errChan <- <-pingProc.Wait()
				}()
			})

			Context("when the process exits", func() {
				BeforeEach(func() {
Ejemplo n.º 13
0
func (mChan MemberChan) envokeMember(name string, runner ifrit.Runner) {
	mChan <- Member{Name: name, Process: ifrit.Envoke(runner)}
}