Esempio n. 1
0
func forwardSignals(signals <-chan os.Signal, process ifrit.Process) {
	exit := process.Wait()
	for {
		select {
		case sig := <-signals:
			process.Signal(sig)
		case <-exit:
			return
		}
	}
}
Esempio n. 2
0
						completedTasks := getTasksByState(bbsClient, models.Task_Completed)
						return failedTasks(completedTasks)
					}, 10*convergeRepeatInterval).Should(HaveLen(1))
				})
			})
		})
	})

	Describe("signal handling", func() {
		BeforeEach(func() {
			startConverger()
		})

		Describe("when it receives SIGINT", func() {
			It("exits successfully", func() {
				convergerProcess.Signal(syscall.SIGINT)
				Eventually(runner, exitDuration).Should(Exit(0))
			})
		})

		Describe("when it receives SIGTERM", func() {
			It("exits successfully", func() {
				convergerProcess.Signal(syscall.SIGTERM)
				Eventually(runner, exitDuration).Should(Exit(0))
			})
		})
	})

	Context("when etcd is down", func() {
		BeforeEach(func() {
			etcdRunner.Stop()
	util.ResetGuids()

	runnerDelegate = NewAuctionRunnerDelegate(cells)
	metricEmitterDelegate := NewAuctionMetricEmitterDelegate()
	runner = auctionrunner.New(
		runnerDelegate,
		metricEmitterDelegate,
		clock.NewClock(),
		workPool,
		logger,
	)
	runnerProcess = ifrit.Invoke(runner)
})

var _ = AfterEach(func() {
	runnerProcess.Signal(os.Interrupt)
	Eventually(runnerProcess.Wait(), 20).Should(Receive())
	workPool.Stop()
})

var _ = AfterSuite(func() {
	if !disableSVGReport {
		finishReport()
	}

	for _, sess := range sessionsToTerminate {
		sess.Kill().Wait()
	}
})

func cellGuid(index int) string {
			BeforeEach(func() {
				unixHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
					w.Write([]byte("yo"))
				})
				var err error
				tmpdir, err = ioutil.TempDir(os.TempDir(), "ifrit-server-test")
				Ω(err).ShouldNot(HaveOccurred())

				socketPath = path.Join(tmpdir, "ifrit.sock")
				Ω(err).ShouldNot(HaveOccurred())
				server = http_server.NewUnixServer(socketPath, unixHandler)
				process = ifrit.Invoke(server)
			})

			AfterEach(func() {
				process.Signal(syscall.SIGINT)
				Eventually(process.Wait()).Should(Receive())
			})

			It("serves requests with the given handler", func() {
				resp, err := httpGetUnix("unix://"+socketPath+"/", socketPath)

				Ω(err).ShouldNot(HaveOccurred())
				body, err := ioutil.ReadAll(resp.Body)
				Ω(err).ShouldNot(HaveOccurred())
				Ω(string(body)).Should(Equal("yo"))
			})
		})

		Context("when the server starts successfully", func() {
			BeforeEach(func() {
Esempio n. 5
0
			//  a) once immediately after the task completes
			//  b) once as a result of convergence
			const expireFactor = 11
			const kickFactor = 7
			const expectedResolutionAttempts = 2

			BeforeEach(func() {
				converger = ginkgomon.Invoke(componentMaker.Converger(
					"-convergeRepeatInterval", convergeRepeatInterval.String(),
					"-expireCompletedTaskDuration", (expireFactor * convergeRepeatInterval).String(),
					"-kickTaskDuration", (kickFactor * convergeRepeatInterval).String(),
				))
			})

			AfterEach(func() {
				converger.Signal(os.Kill)
			})

			It("eventually gives up", func() {
				fakeCC.SetStagingResponseStatusCode(http.StatusServiceUnavailable)
				fakeCC.SetStagingResponseBody(`{"error": "bah!"}`)

				resp, err := stageApplication(stagingGuid, string(stagingMessage))
				Expect(err).NotTo(HaveOccurred())
				Expect(resp.StatusCode).To(Equal(http.StatusAccepted))

				numExpectedStagingResponses := task_handler.MAX_RETRIES * expectedResolutionAttempts

				Eventually(fakeCC.StagingResponses).Should(HaveLen(numExpectedStagingResponses))
				Consistently(fakeCC.StagingResponses).Should(HaveLen(numExpectedStagingResponses))
			})
Esempio n. 6
0
	BeforeEach(func() {
		testRunner = fake_runner.NewTestRunner()
		restarter = restart.Restarter{
			Runner: testRunner,
			Load: func(runner ifrit.Runner, err error) ifrit.Runner {
				return nil
			},
		}
	})

	JustBeforeEach(func() {
		process = ifrit.Background(restarter)
	})

	AfterEach(func() {
		process.Signal(os.Kill)
		testRunner.EnsureExit()
		Eventually(process.Wait()).Should(Receive())
	})

	Describe("Process Behavior", func() {

		It("waits for the internal runner to be ready", func() {
			Consistently(process.Ready()).ShouldNot(BeClosed())
			testRunner.TriggerReady()
			Eventually(process.Ready()).Should(BeClosed())
		})
	})

	Describe("Load", func() {
Esempio n. 7
0
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"

	"github.com/contraband/checkin/db/postgresrunner"
	"github.com/tedsuo/ifrit"
)

func TestDB(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "DB Suite")
}

var postgresRunner postgresrunner.Runner
var dbProcess ifrit.Process

var _ = BeforeSuite(func() {
	postgresRunner = postgresrunner.Runner{
		Port: 5433 + GinkgoParallelNode(),
	}

	dbProcess = ifrit.Invoke(postgresRunner)

	postgresRunner.CreateTestDB()
})

var _ = AfterSuite(func() {
	dbProcess.Signal(os.Interrupt)
	Eventually(dbProcess.Wait(), 10*time.Second).Should(Receive())
})
Esempio n. 8
0
var _ = Describe("Starting the NatsClientRunner process", func() {
	var natsClient NATSClient
	var natsClientRunner ifrit.Runner
	var natsClientProcess ifrit.Process

	BeforeEach(func() {
		natsAddress := fmt.Sprintf("127.0.0.1:%d", natsPort)
		natsClient = NewClient()
		natsClientRunner = NewClientRunner(natsAddress, "nats", "nats", lagertest.NewTestLogger("test"), natsClient)
	})

	AfterEach(func() {
		stopNATS()
		if natsClientProcess != nil {
			natsClientProcess.Signal(os.Interrupt)
			Eventually(natsClientProcess.Wait(), 5).Should(Receive())
		}
	})

	Describe("when NATS is up", func() {
		BeforeEach(func() {
			startNATS()
			natsClientProcess = ifrit.Invoke(natsClientRunner)
		})

		It("connects to NATS", func() {
			Expect(natsClient.Ping()).To(BeTrue())
		})

		It("disconnects when it receives a signal", func() {
Esempio n. 9
0
		var err error

		fakeMetrics = new(fakes.FakeMetrics)
		fakeMetrics.NumCPUReturns(11)
		fakeMetrics.NumGoroutineReturns(888)
		fakeMetrics.LoopDevicesReturns(33)
		fakeMetrics.BackingStoresReturns(12)
		fakeMetrics.DepotDirsReturns(3)

		sink := lager.NewReconfigurableSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG), lager.DEBUG)
		serverProc, err = metrics.StartDebugServer("127.0.0.1:5123", sink, fakeMetrics)
		Expect(err).ToNot(HaveOccurred())
	})

	AfterEach(func() {
		serverProc.Signal(os.Kill)
	})

	It("should report the number of loop devices, backing store files and depotDirs", func() {
		resp, err := http.Get("http://127.0.0.1:5123/debug/vars")
		Expect(err).ToNot(HaveOccurred())

		defer resp.Body.Close()
		Expect(resp.StatusCode).To(Equal(http.StatusOK))

		Expect(expvar.Get("loopDevices").String()).To(Equal("33"))
		Expect(expvar.Get("backingStores").String()).To(Equal("12"))
		Expect(expvar.Get("depotDirs").String()).To(Equal("3"))
		Expect(expvar.Get("numCPUS").String()).To(Equal("11"))
		Expect(expvar.Get("numGoRoutines").String()).To(Equal("888"))
	})
Esempio n. 10
0
							case err := <-heartbeaterErrors:
								logger.Info("never-ready-fake-heartbeat-received-error")
								return err
							}
						}
					},
				}

				serviceClient.NewCellPresenceRunnerReturns(fakeHeartbeater)

				pingErrors <- nil
				maintainProcess = ifrit.Background(maintainer)
			})

			It("exits when signaled", func() {
				maintainProcess.Signal(os.Interrupt)
				var err error
				Eventually(maintainProcess.Wait()).Should(Receive(&err))
				Expect(err).NotTo(HaveOccurred())
				Expect(maintainProcess.Ready()).NotTo(BeClosed())
			})

			Context("when the heartbeat errors", func() {
				BeforeEach(func() {
					heartbeaterErrors <- errors.New("oh no")
					pingErrors <- nil
				})

				It("does not shut down", func() {
					Consistently(maintainProcess.Wait()).ShouldNot(Receive(), "should not shut down")
				})
		ActiveKeyLabel: "label",
	}
})

var _ = BeforeEach(func() {
	etcdRunner.Start()
	consulRunner.Start()
	consulRunner.WaitUntilReady()

	bbsRunner = bbstestrunner.New(bbsPath, bbsArgs)
	bbsProcess = ginkgomon.Invoke(bbsRunner)

	gnatsdRunner, natsClient = gnatsdrunner.StartGnatsd(natsPort)
})

var _ = AfterEach(func() {
	ginkgomon.Kill(bbsProcess)
	etcdRunner.Stop()
	consulRunner.Stop()
	gnatsdRunner.Signal(os.Interrupt)
	Eventually(gnatsdRunner.Wait(), 5).Should(Receive())
})

var _ = SynchronizedAfterSuite(func() {
	if etcdRunner != nil {
		etcdRunner.Stop()
	}
}, func() {
	gexec.CleanupBuildArtifacts()
})
			members = grouper.Members{
				{"child1", childRunner1},
				{"child2", childRunner2},
				{"child3", childRunner3},
			}

			groupRunner = grouper.NewOrdered(os.Interrupt, members)
		})

		AfterEach(func() {
			childRunner1.EnsureExit()
			childRunner2.EnsureExit()
			childRunner3.EnsureExit()

			Eventually(started).Should(BeClosed())
			groupProcess.Signal(os.Kill)
			Eventually(groupProcess.Wait()).Should(Receive())
		})

		BeforeEach(func() {
			started = make(chan struct{})
			go func() {
				groupProcess = ifrit.Invoke(groupRunner)
				close(started)
			}()
		})

		It("runs the first runner, then the second, then becomes ready", func() {
			Eventually(childRunner1.RunCallCount).Should(Equal(1))
			Consistently(childRunner2.RunCallCount, Δ).Should(BeZero())
			Consistently(started, Δ).ShouldNot(BeClosed())
func TestCCUploader(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "CC Uploader Suite")
}

var _ = SynchronizedBeforeSuite(func() []byte {
	ccUploaderPath, err := gexec.Build("github.com/cloudfoundry-incubator/cc-uploader/cmd/cc-uploader")
	Expect(err).NotTo(HaveOccurred())
	return []byte(ccUploaderPath)
}, func(ccUploaderPath []byte) {
	fakeCCAddress := fmt.Sprintf("127.0.0.1:%d", 6767+GinkgoParallelNode())
	fakeCC = fake_cc.New(fakeCCAddress)

	ccUploaderBinary = string(ccUploaderPath)
})

var _ = SynchronizedAfterSuite(func() {
}, func() {
	gexec.CleanupBuildArtifacts()
})

var _ = BeforeEach(func() {
	fakeCCProcess = ifrit.Envoke(fakeCC)
})

var _ = AfterEach(func() {
	fakeCCProcess.Signal(os.Kill)
	Eventually(fakeCCProcess.Wait()).Should(Receive(BeNil()))
})
Esempio n. 14
0
			Context("and an LRP is desired", func() {
				var initialInstanceGuids []string

				BeforeEach(func() {
					err := receptorClient.CreateDesiredLRP(helpers.DefaultLRPCreateRequest(processGuid, appId, 2))
					Expect(err).NotTo(HaveOccurred())

					Eventually(runningLRPsPoller).Should(HaveLen(2))
					Eventually(helloWorldInstancePoller).Should(Equal([]string{"0", "1"}))
					initialActuals := runningLRPsPoller()
					initialInstanceGuids = []string{initialActuals[0].InstanceGuid, initialActuals[1].InstanceGuid}
				})

				Context("and the LRP goes away because its rep dies", func() {
					BeforeEach(func() {
						rep.Signal(syscall.SIGKILL)

						Eventually(runningLRPsPoller).Should(BeEmpty())
						Eventually(helloWorldInstancePoller).Should(BeEmpty())
					})

					Context("once the rep comes back", func() {
						BeforeEach(func() {
							rep = ginkgomon.Invoke(componentMaker.Rep())
						})

						It("eventually brings the long-running process up", func() {
							Eventually(runningLRPsPoller).Should(HaveLen(2))
							Eventually(helloWorldInstancePoller).Should(Equal([]string{"0", "1"}))

							currentActuals := runningLRPsPoller()
Esempio n. 15
0
						<-pinger
					}()
				})

				It("returns the run result upon completion", func() {
					err1 := <-errChan
					err2 := <-errChan
					Ω(err1).Should(Equal(test_helpers.PingerExitedFromPing))
					Ω(err2).Should(Equal(test_helpers.PingerExitedFromPing))
				})
			})
		})

		Describe("Signal()", func() {
			BeforeEach(func() {
				pingProc.Signal(os.Kill)
			})

			It("sends the signal to the runner", func() {
				err := <-pingProc.Wait()
				Ω(err).Should(Equal(test_helpers.PingerExitedFromSignal))
			})
		})
	})

	Context("when a process exits without closing ready", func() {
		var proc ifrit.Process

		BeforeEach(func(done Done) {
			proc = ifrit.Invoke(test_helpers.NoReadyRunner)
			close(done)
Esempio n. 16
0
		opts := &mbus.SubscriberOpts{
			ID: "test",
			MinimumRegisterIntervalInSeconds: int(config.StartResponseDelayInterval.Seconds()),
			PruneThresholdInSeconds:          int(config.DropletStaleThreshold.Seconds()),
		}
		subscriber = ifrit.Background(mbus.NewSubscriber(logger.Session("subscriber"), mbusClient, registry, nil, opts))
		<-subscriber.Ready()
	})

	AfterEach(func() {
		if natsRunner != nil {
			natsRunner.Stop()
		}
		if subscriber != nil {
			subscriber.Signal(os.Interrupt)
			<-subscriber.Wait()
		}
	})

	Context("Drain", func() {
		BeforeEach(func() {
			runRouter(rtr)
		})

		AfterEach(func() {
			if rtr != nil {
				rtr.Stop()
			}
		})
Esempio n. 17
0
			member1 = grouper.Member{"child1", childRunner1}
			member2 = grouper.Member{"child2", childRunner2}
			member3 = grouper.Member{"child3", childRunner3}

			pool = grouper.NewDynamic(nil, 3, 2)
			client = pool.Client()
			poolProcess = ifrit.Envoke(pool)

			insert := client.Inserter()
			Eventually(insert).Should(BeSent(member1))
			Eventually(insert).Should(BeSent(member2))
			Eventually(insert).Should(BeSent(member3))
		})

		AfterEach(func() {
			poolProcess.Signal(os.Kill)
			Eventually(poolProcess.Wait()).Should(Receive())
		})

		It("announces the events as processes move through their lifecycle", func() {
			entrance1, entrance2, entrance3 := grouper.EntranceEvent{}, grouper.EntranceEvent{}, grouper.EntranceEvent{}
			exit1, exit2, exit3 := grouper.ExitEvent{}, grouper.ExitEvent{}, grouper.ExitEvent{}

			entrances := client.EntranceListener()
			exits := client.ExitListener()

			childRunner2.TriggerReady()
			Eventually(entrances).Should(Receive(&entrance2))
			Ω(entrance2.Member).Should(Equal(member2))

			childRunner1.TriggerReady()
Esempio n. 18
0
			)

			BeforeEach(func() {
				signal1 = childRunner1.WaitForCall()
				childRunner1.TriggerReady()
				signal2 = childRunner2.WaitForCall()
				childRunner2.TriggerReady()
				signal3 = childRunner3.WaitForCall()
				childRunner3.TriggerReady()

				Eventually(groupProcess.Ready()).Should(BeClosed())
			})

			Describe("when it receives a signal", func() {
				BeforeEach(func() {
					groupProcess.Signal(syscall.SIGUSR2)
				})

				It("sends the signal to all child runners", func() {
					Eventually(signal1).Should(Receive(Equal(syscall.SIGUSR2)))
					Eventually(signal2).Should(Receive(Equal(syscall.SIGUSR2)))
					Eventually(signal3).Should(Receive(Equal(syscall.SIGUSR2)))
				})

				It("doesn't send any more signals to remaining child processes", func() {
					Eventually(signal3).Should(Receive(Equal(syscall.SIGUSR2)))
					childRunner2.TriggerExit(nil)
					Consistently(signal3).ShouldNot(Receive())
				})
			})
		sender = fake.NewFakeMetricSender()
		dropsonde_metrics.Initialize(sender, nil)
	})

	JustBeforeEach(func() {
		pmn = ifrit.Invoke(metrics.NewPeriodicMetronNotifier(
			lagertest.NewTestLogger("test"),
			reportInterval,
			&etcdOptions,
			fakeClock,
		))
	})

	AfterEach(func() {
		pmn.Signal(os.Interrupt)
		Eventually(pmn.Wait(), 2*time.Second).Should(Receive())
	})

	Context("when the metron notifier starts up", func() {
		It("should emit an event that BBS has started", func() {
			Eventually(func() uint64 {
				return sender.GetCounter("BBSMasterElected")
			}).Should(Equal(uint64(1)))
		})
	})

	Context("when the report interval elapses", func() {
		JustBeforeEach(func() {
			fakeClock.Increment(reportInterval)
		})
Esempio n. 20
0
func Kill(process ifrit.Process) {
	process.Signal(os.Kill)
	Eventually(process.Wait()).Should(Receive(), "killed ginkgomon process failed to exit in time")
}
Esempio n. 21
0
		Context("when a signal is received", func() {
			var waited chan<- struct{}

			BeforeEach(func() {
				waiting := make(chan struct{})
				waited = waiting

				inScriptProcess.WaitStub = func() (int, error) {
					// cause waiting to block so that it can be aborted
					<-waiting
					return 0, nil
				}
			})

			It("stops the container", func() {
				inProcess.Signal(os.Interrupt)

				Eventually(fakeContainer.StopCallCount).Should(Equal(1))

				kill := fakeContainer.StopArgsForCall(0)
				Expect(kill).To(BeFalse())

				close(waited)
			})
		})
	}

	Context("before running /in", func() {
		BeforeEach(func() {
			versionedSource = resource.Get(ioConfig, source, params, version)
		})
Esempio n. 22
0
	)

	migrationProcess = ifrit.Invoke(migrationManager)

	Consistently(migrationProcess.Wait()).ShouldNot(Receive())
	Eventually(migrationsDone).Should(BeClosed())
})

var _ = AfterEach(func() {
	fakeGUIDProvider.NextGUIDReturns("", nil)
	truncateTables(db)
})

var _ = AfterSuite(func() {
	if migrationProcess != nil {
		migrationProcess.Signal(os.Kill)
	}

	Expect(db.Close()).NotTo(HaveOccurred())
	db, err := sql.Open(dbDriverName, dbBaseConnectionString)
	Expect(err).NotTo(HaveOccurred())
	Expect(db.Ping()).NotTo(HaveOccurred())
	_, err = db.Exec(fmt.Sprintf("DROP DATABASE diego_%d", GinkgoParallelNode()))
	Expect(err).NotTo(HaveOccurred())
	Expect(db.Close()).NotTo(HaveOccurred())
})

func truncateTables(db *sql.DB) {
	for _, query := range truncateTablesSQL {
		result, err := db.Exec(query)
		Expect(err).NotTo(HaveOccurred())
Esempio n. 23
0
				Port:    3000,
				IP:      "i dont care even more",
				TTL:     120,
				LogGuid: "i care a little bit more now",
			}
			database = &fake_db.FakeDB{}
			logger = lagertest.NewTestLogger("event-handler-test")

			timeChan = make(chan time.Time)
			ticker = &time.Ticker{C: timeChan}

			routeRegister = helpers.NewRouteRegister(database, route, ticker, logger)
		})

		AfterEach(func() {
			process.Signal(syscall.SIGTERM)
		})

		JustBeforeEach(func() {
			process = ifrit.Invoke(routeRegister)
		})

		Context("registration", func() {

			Context("with no errors", func() {
				BeforeEach(func() {
					database.SaveRouteStub = func(route models.Route) error {
						return nil
					}

				})
Esempio n. 24
0
			{Guid: "container-1"},
			{Guid: "container-2"},
			{Guid: "container-3"},
		}, nil)
	})

	JustBeforeEach(func() {
		reporter = ifrit.Envoke(&metrics.Reporter{
			ExecutorSource: executorClient,
			Interval:       reportInterval,
			Logger:         logger,
		})
	})

	AfterEach(func() {
		reporter.Signal(os.Interrupt)
		Eventually(reporter.Wait()).Should(Receive())
	})

	It("reports the current capacity on the given interval", func() {
		Eventually(func() fake.Metric {
			return sender.GetValue("CapacityTotalMemory")
		}, reportInterval*2).Should(Equal(fake.Metric{
			Value: 1024,
			Unit:  "MiB",
		}))

		Eventually(func() fake.Metric {
			return sender.GetValue("CapacityTotalDisk")
		}, reportInterval*2).Should(Equal(fake.Metric{
			Value: 2048,
Esempio n. 25
0
		Context("when a signal is received", func() {
			var waited chan<- struct{}

			BeforeEach(func() {
				waiting := make(chan struct{})
				waited = waiting

				outScriptProcess.WaitStub = func() (int, error) {
					// cause waiting to block so that it can be aborted
					<-waiting
					return 0, nil
				}
			})

			It("stops the container", func() {
				outProcess.Signal(os.Interrupt)

				Eventually(fakeContainer.StopCallCount).Should(Equal(1))

				kill := fakeContainer.StopArgsForCall(0)
				Ω(kill).Should(BeFalse())

				close(waited)
			})
		})
	}

	Context("when a result is already present on the container", func() {
		BeforeEach(func() {
			fakeContainer.PropertyStub = func(name string) (string, error) {
				switch name {
			registrationProcess = ifrit.Background(registrationRunner)
		})

		AfterEach(func() {
			close(blockRegister)
			Eventually(blockRegisterDone).Should(BeClosed())
			ginkgomon.Kill(registrationProcess)
		})

		It("does not become ready", func() {
			Consistently(registrationProcess.Ready()).ShouldNot(BeClosed())
		})

		It("shuts down without deregistering", func() {
			Eventually(agent.ServiceRegisterCallCount).Should(Equal(1))
			registrationProcess.Signal(os.Interrupt)
			Eventually(registrationProcess.Wait()).Should(Receive(BeNil()))
			Expect(agent.ServiceDeregisterCallCount()).Should(Equal(0))
		})
	})

	Context("when we fail to deregister the service", func() {
		var registrationError = errors.New("boom")
		BeforeEach(func() {
			agent.ServiceDeregisterReturns(registrationError)
		})

		JustBeforeEach(func() {
			registrationProcess = ginkgomon.Invoke(registrationRunner)
		})
Esempio n. 27
0
		startupTimeout = 5 * time.Second
	)

	BeforeEach(func() {

		healthPort = 10000 + GinkgoParallelNode()

		logger = lagertest.NewTestLogger("HealthRunner Test")
		healthRunner = health.NewRunner(uint(healthPort), logger)
		healthProcess = ifrit.Invoke(healthRunner)
		isReady := healthProcess.Ready()
		Eventually(isReady, startupTimeout).Should(BeClosed(), "Error starting Health Runner")
	})

	AfterEach(func() {
		healthProcess.Signal(os.Kill)
		err := <-healthProcess.Wait()
		Expect(err).ToNot(HaveOccurred())
	})

	Context("when the runner is running", func() {
		It("accepts connections on health port", func() {
			conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", healthPort))
			Expect(err).ToNot(HaveOccurred())

			err = conn.Close()
			Expect(err).ToNot(HaveOccurred())
		})
	})

	It("shuts down gracefully when signalled", func() {
Esempio n. 28
0
func Interrupt(process ifrit.Process, intervals ...interface{}) {
	process.Signal(os.Interrupt)
	Eventually(process.Wait(), intervals...).Should(Receive(), "interrupted ginkgomon process failed to exit in time")
}
Esempio n. 29
0
				return errors.New("nope")
			}

			attempt2Step.RunStub = func(signals <-chan os.Signal, ready chan<- struct{}) error {
				close(ready)
				<-signals
				return ErrInterrupted
			}
		})

		Describe("Run", func() {
			var process ifrit.Process

			JustBeforeEach(func() {
				process = ifrit.Invoke(step)
				process.Signal(os.Interrupt)
			})

			It("returns ErrInterrupted having only run the first and second attempts", func() {
				Expect(<-process.Wait()).To(Equal(ErrInterrupted))

				Expect(attempt1Step.RunCallCount()).To(Equal(1))
				Expect(attempt2Step.RunCallCount()).To(Equal(1))
				Expect(attempt3Step.RunCallCount()).To(Equal(0))
			})

			Describe("Result", func() {
				It("delegates to attempt 2", func() {
					<-process.Wait()

					// internal check for success within retry loop
Esempio n. 30
0
func Kill(process ifrit.Process, intervals ...interface{}) {
	if process != nil {
		process.Signal(os.Kill)
		Eventually(process.Wait(), intervals...).Should(Receive(), "killed ginkgomon process failed to exit in time")
	}
}