func waitForEvents( member Member, process ifrit.Process, entrance entranceEventChannel, exit exitEventChannel, ) { select { case <-process.Ready(): entrance <- EntranceEvent{ Member: member, Process: process, } exit <- ExitEvent{ Member: member, Err: <-process.Wait(), } case err := <-process.Wait(): entrance <- EntranceEvent{ Member: member, Process: process, } exit <- ExitEvent{ Member: member, Err: err, } } }
func forwardSignals(signals <-chan os.Signal, process ifrit.Process) { exit := process.Wait() for { select { case sig := <-signals: process.Signal(sig) case <-exit: return } } }
func waitUntilReady(process ifrit.Process, logger lager.Logger) error { //we could not find a reliable way for ifrit to report that all processes //were ready without error, so we opted to simply report as ready if no errors //were thrown within a timeout ready := time.After(5 * time.Second) select { case <-ready: logger.Info("All child processes are ready") return nil case err := <-process.Wait(): if err == nil { //sometimes process will exit early, but will return a nil error err = errors.New("Child process exited before becoming ready") } return err } }
}) Context("when attempt 1 succeeds", func() { BeforeEach(func() { attempt1Step.ResultStub = successResult(true) }) Describe("Run", func() { var process ifrit.Process JustBeforeEach(func() { process = ifrit.Invoke(step) }) It("returns nil having only run the first attempt", func() { Expect(<-process.Wait()).ToNot(HaveOccurred()) Expect(attempt1Step.RunCallCount()).To(Equal(1)) Expect(attempt2Step.RunCallCount()).To(Equal(0)) Expect(attempt3Step.RunCallCount()).To(Equal(0)) }) Describe("Result", func() { It("delegates to attempt 1", func() { <-process.Wait() // internal check for success within retry loop Expect(attempt1Step.ResultCallCount()).To(Equal(1)) attempt1Step.ResultReturns(true)
converger_process.New( fakeBBSServiceClient, fakeBBSClient, logger, fakeClock, convergeRepeatInterval, kickTaskDuration, expirePendingTaskDuration, expireCompletedTaskDuration, ), ) }) AfterEach(func() { ginkgomon.Interrupt(process) Eventually(process.Wait()).Should(Receive()) }) Describe("converging over time", func() { It("converges tasks, LRPs, and auctions when the lock is periodically reestablished", func() { fakeClock.Increment(convergeRepeatInterval + aBit) Eventually(fakeBBSClient.ConvergeTasksCallCount, aBit).Should(Equal(1)) Eventually(fakeBBSClient.ConvergeLRPsCallCount, aBit).Should(Equal(1)) actualKickTaskDuration, actualExpirePendingTaskDuration, actualExpireCompletedTaskDuration := fakeBBSClient.ConvergeTasksArgsForCall(0) Expect(actualKickTaskDuration).To(Equal(kickTaskDuration)) Expect(actualExpirePendingTaskDuration).To(Equal(expirePendingTaskDuration)) Expect(actualExpireCompletedTaskDuration).To(Equal(expireCompletedTaskDuration)) fakeClock.Increment(convergeRepeatInterval + aBit)
) BeforeEach(func() { healthPort = 10000 + GinkgoParallelNode() logger = lagertest.NewTestLogger("HealthRunner Test") healthRunner = health.NewRunner(uint(healthPort), logger) healthProcess = ifrit.Invoke(healthRunner) isReady := healthProcess.Ready() Eventually(isReady, startupTimeout).Should(BeClosed(), "Error starting Health Runner") }) AfterEach(func() { healthProcess.Signal(os.Kill) err := <-healthProcess.Wait() Expect(err).ToNot(HaveOccurred()) }) Context("when the runner is running", func() { It("accepts connections on health port", func() { conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", healthPort)) Expect(err).ToNot(HaveOccurred()) err = conn.Close() Expect(err).ToNot(HaveOccurred()) }) }) It("shuts down gracefully when signalled", func() { healthProcess.Signal(os.Kill)
}) }) Describe("Run", func() { BeforeEach(func() { uaaClient.FetchTokenReturns(token, nil) client.RoutesReturns(response, nil) }) JustBeforeEach(func() { process = ifrit.Invoke(fetcher) }) AfterEach(func() { process.Signal(os.Interrupt) Eventually(process.Wait(), 5*time.Second).Should(Receive()) }) It("subscribes for events", func() { Eventually(client.SubscribeToEventsWithMaxRetriesCallCount).Should(Equal(1)) }) Context("on specified interval", func() { It("it fetches routes", func() { // to be consumed by by the eventSource.NextStub to avoid starvation eventChannel <- routing_api.Event{} clock.Increment(cfg.PruneStaleDropletsInterval + 100*time.Millisecond) Eventually(client.RoutesCallCount, 2*time.Second, 50*time.Millisecond).Should(Equal(1)) clock.Increment(cfg.PruneStaleDropletsInterval + 100*time.Millisecond) Eventually(client.RoutesCallCount, 2*time.Second, 50*time.Millisecond).Should(Equal(2)) })
_, err := io.Stdout.Write([]byte(outScriptStdout)) Ω(err).ShouldNot(HaveOccurred()) _, err = io.Stderr.Write([]byte(outScriptStderr)) Ω(err).ShouldNot(HaveOccurred()) return outScriptProcess, nil } versionedSource = resource.Put(ioConfig, source, params, fakeArtifactSource) outProcess = ifrit.Invoke(versionedSource) }) AfterEach(func() { Eventually(outProcess.Wait()).Should(Receive()) }) itCanStreamOut := func() { Describe("streaming bits out", func() { Context("when streaming out succeeds", func() { BeforeEach(func() { fakeContainer.StreamOutStub = func(spec garden.StreamOutSpec) (io.ReadCloser, error) { streamOut := new(bytes.Buffer) if spec.Path == "/tmp/build/put/some/subdir" { streamOut.WriteString("sup") } return ioutil.NopCloser(streamOut), nil }
restarter = restart.Restarter{ Runner: testRunner, Load: func(runner ifrit.Runner, err error) ifrit.Runner { return nil }, } }) JustBeforeEach(func() { process = ifrit.Background(restarter) }) AfterEach(func() { process.Signal(os.Kill) testRunner.EnsureExit() Eventually(process.Wait()).Should(Receive()) }) Describe("Process Behavior", func() { It("waits for the internal runner to be ready", func() { Consistently(process.Ready()).ShouldNot(BeClosed()) testRunner.TriggerReady() Eventually(process.Ready()).Should(BeClosed()) }) }) Describe("Load", func() { Context("when load returns a runner", func() { var loadedRunner *fake_runner.TestRunner
expectedTTL := 30 * time.Second Eventually(workerDB.SaveWorkerCallCount()).Should(Equal(1)) workerInfo, ttl := workerDB.SaveWorkerArgsForCall(0) Expect(workerInfo).To(Equal(expectedWorkerInfo)) Expect(ttl).To(Equal(expectedTTL)) ginkgomon.Interrupt(process) fakeClock.Increment(11 * time.Second) Consistently(workerDB.SaveWorkerCallCount).Should(Equal(1)) }) }) Context("if saving to the DB fails", func() { disaster := errors.New("bad bad bad") BeforeEach(func() { workerDB.SaveWorkerReturns(db.SavedWorker{}, disaster) }) It("exits early", func() { runner := worker.NewHardcoded(logger, workerDB, fakeClock, gardenAddr, baggageClaimAddr, resourceTypes) process = ifrit.Invoke(runner) Expect(<-process.Wait()).To(Equal(disaster)) }) }) })
fakeClock = fakeclock.NewFakeClock(time.Unix(0, 123)) trackerRunner = TrackerRunner{ Tracker: fakeTracker, Interval: interval, Clock: fakeClock, } }) JustBeforeEach(func() { process = ifrit.Invoke(trackerRunner) }) AfterEach(func() { process.Signal(os.Interrupt) <-process.Wait() }) It("tracks immediately", func() { <-tracked }) Context("when the interval elapses", func() { JustBeforeEach(func() { <-tracked fakeClock.Increment(interval) }) It("tracks", func() { <-tracked Consistently(tracked).ShouldNot(Receive())
func untilTerminated(logger lager.Logger, process ifrit.Process) { err := <-process.Wait() exitOnFailure(logger, err) }
}) It("uses the input source for all steps", func() { Ω(fakeStepA.UsingCallCount()).Should(Equal(1)) step, repo := fakeStepA.UsingArgsForCall(0) Ω(step).Should(Equal(inStep)) Ω(repo).Should(Equal(repo)) Ω(fakeStepB.UsingCallCount()).Should(Equal(1)) step, repo = fakeStepB.UsingArgsForCall(0) Ω(step).Should(Equal(inStep)) Ω(repo).Should(Equal(repo)) }) It("exits successfully", func() { Eventually(process.Wait()).Should(Receive(BeNil())) }) Describe("executing each source", func() { BeforeEach(func() { wg := new(sync.WaitGroup) wg.Add(2) outStepA.RunStub = func(signals <-chan os.Signal, ready chan<- struct{}) error { wg.Done() wg.Wait() close(ready) return nil } outStepB.RunStub = func(signals <-chan os.Signal, ready chan<- struct{}) error {
})) }) }) }) }) Context("when the service has already been registered", func() { BeforeEach(func() { oldregistration := *registration oldregistration.Port = 9000 err := consulClient.Agent().ServiceRegister(&oldregistration) Expect(err).NotTo(HaveOccurred()) }) It("does not exit", func() { Consistently(registrationProcess.Wait()).ShouldNot(Receive()) }) It("updates the service", func() { services, err := consulClient.Agent().Services() Expect(err).NotTo(HaveOccurred()) service, ok := services[registration.ID] Expect(ok).To(BeTrue()) Expect(*service).To(Equal(api.AgentService{ ID: registration.ID, Service: registration.Name, Tags: registration.Tags, Port: registration.Port, Address: registration.Address, })) })
{Guid: "container-2"}, {Guid: "container-3"}, }, nil) }) JustBeforeEach(func() { reporter = ifrit.Envoke(&metrics.Reporter{ ExecutorSource: executorClient, Interval: reportInterval, Logger: logger, }) }) AfterEach(func() { reporter.Signal(os.Interrupt) Eventually(reporter.Wait()).Should(Receive()) }) It("reports the current capacity on the given interval", func() { Eventually(func() fake.Metric { return sender.GetValue("CapacityTotalMemory") }, reportInterval*2).Should(Equal(fake.Metric{ Value: 1024, Unit: "MiB", })) Eventually(func() fake.Metric { return sender.GetValue("CapacityTotalDisk") }, reportInterval*2).Should(Equal(fake.Metric{ Value: 2048, Unit: "MiB",
runStep.RunStub = func(signals <-chan os.Signal, ready chan<- struct{}) error { close(ready) select { case <-startStep: return nil case <-signals: return ErrInterrupted } } }) It("should interrupt after timeout duration", func() { Eventually(runStep.RunCallCount).Should(Equal(1)) var receivedError error Eventually(process.Wait(), 3*time.Second).Should(Receive(&receivedError)) Ω(receivedError).Should(Equal(ErrStepTimedOut)) }) Context("when the process is signaled", func() { BeforeEach(func() { timeoutDuration = atc.Duration(10 * time.Second) }) It("the process should be interrupted", func() { Eventually(runStep.RunCallCount).Should(Equal(1)) process.Signal(os.Kill) var receivedError error Eventually(process.Wait()).Should(Receive(&receivedError))
. "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/contraband/checkin/db/postgresrunner" "github.com/tedsuo/ifrit" ) func TestDB(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "DB Suite") } var postgresRunner postgresrunner.Runner var dbProcess ifrit.Process var _ = BeforeSuite(func() { postgresRunner = postgresrunner.Runner{ Port: 5433 + GinkgoParallelNode(), } dbProcess = ifrit.Invoke(postgresRunner) postgresRunner.CreateTestDB() }) var _ = AfterSuite(func() { dbProcess.Signal(os.Interrupt) Eventually(dbProcess.Wait(), 10*time.Second).Should(Receive()) })
Expect(logger.LogMessages()).To(ContainElement("test.encryptor.encryption-failed")) }) It("does not change the key in the db", func() { Consistently(fakeDB.SetEncryptionKeyLabelCallCount).Should(Equal(0)) }) }) Context("when fetching the current encryption key fails", func() { BeforeEach(func() { fakeDB.EncryptionKeyLabelReturns("", errors.New("can't fetch")) }) It("fails early", func() { var err error Eventually(encryptorProcess.Wait()).Should(Receive(&err)) Expect(err).To(HaveOccurred()) Expect(encryptorProcess.Ready()).ToNot(BeClosed()) }) }) Context("when the current encryption key is not known to the encryptor", func() { BeforeEach(func() { fakeDB.EncryptionKeyLabelReturns("some-unknown-key", nil) }) It("shuts down wihtout signalling ready", func() { var err error Eventually(encryptorProcess.Wait()).Should(Receive(&err)) Expect(err).To(MatchError("Existing encryption key version (some-unknown-key) is not among the known keys")) Expect(encryptorProcess.Ready()).ToNot(BeClosed())
Ω(otherInputSource.StreamToCallCount()).Should(Equal(1)) destination = otherInputSource.StreamToArgsForCall(0) initial = fakeContainer.StreamInCallCount() err = destination.StreamIn("foo", streamIn) Ω(err).ShouldNot(HaveOccurred()) Ω(fakeContainer.StreamInCallCount()).Should(Equal(initial + 1)) spec = fakeContainer.StreamInArgsForCall(initial) Ω(spec.Path).Should(Equal("/tmp/build/a-random-guid/some-other-input/foo")) Ω(spec.User).Should(Equal("")) // use default Ω(spec.TarStream).Should(Equal(streamIn)) Eventually(process.Wait()).Should(Receive(BeNil())) }) Context("when streaming the bits in to the container fails", func() { disaster := errors.New("nope") BeforeEach(func() { inputSource.StreamToReturns(disaster) }) It("exits with the error", func() { Eventually(process.Wait()).Should(Receive(Equal(disaster))) }) It("does not run anything", func() { Eventually(process.Wait()).Should(Receive())
func Interrupt(process ifrit.Process, intervals ...interface{}) { process.Signal(os.Interrupt) Eventually(process.Wait(), intervals...).Should(Receive(), "interrupted ginkgomon process failed to exit in time") }
unixHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("yo")) }) var err error tmpdir, err = ioutil.TempDir(os.TempDir(), "ifrit-server-test") Ω(err).ShouldNot(HaveOccurred()) socketPath = path.Join(tmpdir, "ifrit.sock") Ω(err).ShouldNot(HaveOccurred()) server = http_server.NewUnixServer(socketPath, unixHandler) process = ifrit.Invoke(server) }) AfterEach(func() { process.Signal(syscall.SIGINT) Eventually(process.Wait()).Should(Receive()) }) It("serves requests with the given handler", func() { resp, err := httpGetUnix("unix://"+socketPath+"/", socketPath) Ω(err).ShouldNot(HaveOccurred()) body, err := ioutil.ReadAll(resp.Body) Ω(err).ShouldNot(HaveOccurred()) Ω(string(body)).Should(Equal("yo")) }) }) Context("when the server starts successfully", func() { BeforeEach(func() { server = http_server.New(address, handler)
func Kill(process ifrit.Process, intervals ...interface{}) { if process != nil { process.Signal(os.Kill) Eventually(process.Wait(), intervals...).Should(Receive(), "killed ginkgomon process failed to exit in time") } }
Ω(ioConfig.Stderr).Should(Equal(stderrBuf)) }) It("runs the get resource action", func() { Ω(fakeVersionedSource.RunCallCount()).Should(Equal(1)) }) It("reports the fetched version info", func() { var info VersionInfo Ω(step.Result(&info)).Should(BeTrue()) Ω(info.Version).Should(Equal(atc.Version{"some": "version"})) Ω(info.Metadata).Should(Equal([]atc.MetadataField{{"some", "metadata"}})) }) It("is successful", func() { Eventually(process.Wait()).Should(Receive(BeNil())) var success Success Ω(step.Result(&success)).Should(BeTrue()) Ω(bool(success)).Should(BeTrue()) }) It("completes via the delegate", func() { Eventually(getDelegate.CompletedCallCount).Should(Equal(1)) exitStatus, versionInfo := getDelegate.CompletedArgsForCall(0) Ω(exitStatus).Should(Equal(ExitStatus(0))) Ω(versionInfo).Should(Equal(&VersionInfo{ Version: atc.Version{"some": "version"}, Metadata: []atc.MetadataField{{"some", "metadata"}}, }))
opts := &mbus.SubscriberOpts{ ID: "test", MinimumRegisterIntervalInSeconds: int(config.StartResponseDelayInterval.Seconds()), PruneThresholdInSeconds: int(config.DropletStaleThreshold.Seconds()), } subscriber = ifrit.Background(mbus.NewSubscriber(logger.Session("subscriber"), mbusClient, registry, nil, opts)) <-subscriber.Ready() }) AfterEach(func() { if natsRunner != nil { natsRunner.Stop() } if subscriber != nil { subscriber.Signal(os.Interrupt) <-subscriber.Wait() } }) Context("Drain", func() { BeforeEach(func() { runRouter(rtr) }) AfterEach(func() { if rtr != nil { rtr.Stop() } }) It("waits until the last request completes", func() {
runnerDelegate = NewAuctionRunnerDelegate(cells) metricEmitterDelegate := NewAuctionMetricEmitterDelegate() runner = auctionrunner.New( runnerDelegate, metricEmitterDelegate, clock.NewClock(), workPool, logger, ) runnerProcess = ifrit.Invoke(runner) }) var _ = AfterEach(func() { runnerProcess.Signal(os.Interrupt) Eventually(runnerProcess.Wait(), 20).Should(Receive()) workPool.Stop() }) var _ = AfterSuite(func() { if !disableSVGReport { finishReport() } for _, sess := range sessionsToTerminate { sess.Kill().Wait() } }) func cellGuid(index int) string { return fmt.Sprintf("REP-%d", index+1)
var _ = Describe("Starting the NatsClientRunner process", func() { var natsClient NATSClient var natsClientRunner ifrit.Runner var natsClientProcess ifrit.Process BeforeEach(func() { natsAddress := fmt.Sprintf("127.0.0.1:%d", natsPort) natsClient = NewClient() natsClientRunner = NewClientRunner(natsAddress, "nats", "nats", lagertest.NewTestLogger("test"), natsClient) }) AfterEach(func() { stopNATS() if natsClientProcess != nil { natsClientProcess.Signal(os.Interrupt) Eventually(natsClientProcess.Wait(), 5).Should(Receive()) } }) Describe("when NATS is up", func() { BeforeEach(func() { startNATS() natsClientProcess = ifrit.Invoke(natsClientRunner) }) It("connects to NATS", func() { Expect(natsClient.Ping()).To(BeTrue()) }) It("disconnects when it receives a signal", func() { natsClientProcess.Signal(os.Interrupt)
Expect(ioConfig.Stderr).To(Equal(stderrBuf)) }) It("runs the get resource action", func() { Expect(fakeVersionedSource.RunCallCount()).To(Equal(1)) }) It("reports the created version info", func() { var info VersionInfo Expect(step.Result(&info)).To(BeTrue()) Expect(info.Version).To(Equal(atc.Version{"some": "version"})) Expect(info.Metadata).To(Equal([]atc.MetadataField{{"some", "metadata"}})) }) It("is successful", func() { Eventually(process.Wait()).Should(Receive(BeNil())) var success Success Expect(step.Result(&success)).To(BeTrue()) Expect(bool(success)).To(BeTrue()) }) It("completes via the delegate", func() { Eventually(putDelegate.CompletedCallCount).Should(Equal(1)) exitStatus, verionInfo := putDelegate.CompletedArgsForCall(0) Expect(exitStatus).To(Equal(ExitStatus(0))) Expect(verionInfo).To(Equal(&VersionInfo{ Version: atc.Version{"some": "version"}, Metadata: []atc.MetadataField{{"some", "metadata"}}, }))
sender = fake.NewFakeMetricSender() dropsonde_metrics.Initialize(sender, nil) }) JustBeforeEach(func() { pmn = ifrit.Invoke(metrics.NewPeriodicMetronNotifier( lagertest.NewTestLogger("test"), reportInterval, &etcdOptions, fakeClock, )) }) AfterEach(func() { pmn.Signal(os.Interrupt) Eventually(pmn.Wait(), 2*time.Second).Should(Receive()) }) Context("when the metron notifier starts up", func() { It("should emit an event that BBS has started", func() { Eventually(func() uint64 { return sender.GetCounter("BBSMasterElected") }).Should(Equal(uint64(1))) }) }) Context("when the report interval elapses", func() { JustBeforeEach(func() { fakeClock.Increment(reportInterval) })
) }) JustBeforeEach(func() { process = startBulker(true) _, err := consulRunner.NewClient().KV().DeleteTree(locket.LockSchemaPath(bulkerLockName), nil) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { ginkgomon.Interrupt(process, interruptTimeout) }) It("exits with an error", func() { Eventually(process.Wait(), 5*domainTTL).Should(Receive(HaveOccurred())) }) }) Context("when the bulker initially does not have the lock", func() { var nsyncLockClaimerProcess ifrit.Process BeforeEach(func() { heartbeatInterval = 1 * time.Second nsyncLockClaimer := locket.NewLock(logger, consulRunner.NewClient(), locket.LockSchemaPath(bulkerLockName), []byte("something-else"), clock.NewClock(), locket.RetryInterval, locket.LockTTL) nsyncLockClaimerProcess = ifrit.Invoke(nsyncLockClaimer) }) JustBeforeEach(func() { process = startBulker(false)
}) Context("but SQL does not have a version", func() { BeforeEach(func() { fakeSQLDB.VersionReturns(nil, models.ErrResourceNotFound) }) It("fetches the version from etcd", func() { Eventually(fakeSQLDB.VersionCallCount).Should(Equal(1)) Consistently(fakeSQLDB.VersionCallCount).Should(Equal(1)) Eventually(fakeETCDDB.VersionCallCount).Should(Equal(1)) Consistently(fakeETCDDB.VersionCallCount).Should(Equal(1)) ginkgomon.Interrupt(migrationProcess) Eventually(migrationProcess.Wait()).Should(Receive(BeNil())) }) // cross-db migration Context("but etcd does", func() { var fakeMigrationToSQL *migrationfakes.FakeMigration BeforeEach(func() { fakeMigrationToSQL = &migrationfakes.FakeMigration{} fakeMigrationToSQL.VersionReturns(102) fakeMigrationToSQL.RequiresSQLReturns(true) dbVersion.CurrentVersion = 99 dbVersion.TargetVersion = 99 fakeMigration.VersionReturns(100)