func (r Restarter) Run(signals <-chan os.Signal, ready chan<- struct{}) error { if r.Load == nil { return ErrNoLoadCallback } process := ifrit.Background(r.Runner) processReady := process.Ready() exit := process.Wait() signaled := false for { select { case signal := <-signals: process.Signal(signal) signaled = true case <-processReady: close(ready) processReady = nil case err := <-exit: if signaled { return err } r.Runner = r.Load(r.Runner, err) if r.Runner == nil { return err } process = ifrit.Background(r.Runner) exit = process.Wait() } } }
func (step *composed) Run(signals <-chan os.Signal, ready chan<- struct{}) error { step.firstStep = step.a.Using(step.prev, step.repo) firstProcess := ifrit.Background(step.firstStep) var signalled bool var waitErr error dance: for { select { case waitErr = <-firstProcess.Wait(): break dance case sig := <-signals: firstProcess.Signal(sig) signalled = true } } if signalled || waitErr != nil { return waitErr } step.secondStep = step.b.Using(step.firstStep, step.repo) return step.secondStep.Run(signals, ready) }
func (server *registrarSSHServer) forwardTCPIP( logger lager.Logger, conn *ssh.ServerConn, listener net.Listener, forwardIP string, forwardPort uint32, ) ifrit.Process { return ifrit.Background(ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { go func() { <-signals listener.Close() }() close(ready) for { localConn, err := listener.Accept() if err != nil { logger.Error("failed-to-accept", err) break } go forwardLocalConn(logger, localConn, conn, forwardIP, forwardPort) } return nil })) }
func (build *execBuild) Resume(logger lager.Logger) { stepFactory := build.buildStepFactory(logger, build.metadata.Plan) source := stepFactory.Using(&exec.NoopStep{}, exec.NewSourceRepository()) defer source.Release() process := ifrit.Background(source) exited := process.Wait() aborted := false var succeeded exec.Success for { select { case err := <-exited: if aborted { succeeded = false } else if !source.Result(&succeeded) { logger.Error("step-had-no-result", errors.New("step failed to provide us with a result")) succeeded = false } build.delegate.Finish(logger.Session("finish"), err, succeeded, aborted) return case sig := <-build.signals: process.Signal(sig) if sig == os.Kill { aborted = true } } } }
func (m *Maintainer) heartbeat(sigChan <-chan os.Signal, ready chan<- struct{}, heartbeater ifrit.Runner) error { m.logger.Info("start-heartbeating") defer m.logger.Info("complete-heartbeating") ticker := m.clock.NewTicker(m.RetryInterval) defer ticker.Stop() heartbeatProcess := ifrit.Background(heartbeater) heartbeatExitChan := heartbeatProcess.Wait() select { case <-heartbeatProcess.Ready(): m.logger.Info("ready") case err := <-heartbeatExitChan: if err != nil { m.logger.Error("heartbeat-exited", err) } return err case <-sigChan: m.logger.Info("signaled-while-starting-heatbeater") heartbeatProcess.Signal(os.Kill) <-heartbeatExitChan return nil } if ready != nil { close(ready) } for { select { case err := <-heartbeatExitChan: m.logger.Error("heartbeat-lost-lock", err) return err case <-sigChan: m.logger.Info("signaled-while-heartbeating") heartbeatProcess.Signal(os.Kill) <-heartbeatExitChan return nil case <-ticker.C(): m.logger.Debug("heartbeat-pinging-executor") err := m.executorClient.Ping() if err == nil { continue } m.logger.Info("start-signaling-heartbeat-to-stop") heartbeatProcess.Signal(os.Kill) select { case <-heartbeatExitChan: m.logger.Info("heartbeat-stopped") return err case <-sigChan: m.logger.Info("signaled-while-waiting-for-heartbeat-to-stop") return nil } } } }
func (server *registrarSSHServer) heartbeatWorker(logger lager.Logger, worker atc.Worker, channel ssh.Channel) ifrit.Process { return ifrit.Background(tsa.NewHeartbeater( logger, server.heartbeatInterval, gclient.New(gconn.New("tcp", worker.Addr)), server.atcEndpoint, worker, channel, )) }
func New(proxySignals <-chan os.Signal, runner ifrit.Runner) ifrit.Runner { return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { process := ifrit.Background(runner) <-process.Ready() close(ready) go forwardSignals(proxySignals, process) go forwardSignals(signals, process) return <-process.Wait() }) }
func Invoke(runner ifrit.Runner) ifrit.Process { process := ifrit.Background(runner) select { case <-process.Ready(): case err := <-process.Wait(): ginkgo.Fail(fmt.Sprintf("process failed to start: %s", err)) } return process }
func (g *orderedGroup) orderedStart(signals <-chan os.Signal) (os.Signal, ErrorTrace) { for _, member := range g.members { p := ifrit.Background(member) cases := make([]reflect.SelectCase, 0, len(g.pool)+3) for i := 0; i < len(g.pool); i++ { cases = append(cases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(g.pool[g.members[i].Name].Wait()), }) } cases = append(cases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(p.Ready()), }) cases = append(cases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(p.Wait()), }) cases = append(cases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(signals), }) chosen, recv, _ := reflect.Select(cases) g.pool[member.Name] = p switch chosen { case len(cases) - 1: // signals return recv.Interface().(os.Signal), nil case len(cases) - 2: // p.Wait return nil, ErrorTrace{ ExitEvent{Member: member, Err: recv.Interface().(error)}, } case len(cases) - 3: // p.Ready default: // other member has exited var err error = nil if e := recv.Interface(); e != nil { err = e.(error) } return nil, ErrorTrace{ ExitEvent{Member: g.members[chosen], Err: err}, } } } return nil, nil }
func (r *groupRunner) Run(signals <-chan os.Signal, ready chan<- struct{}) error { processes := []ifrit.Process{} processChan := make(chan ifrit.Process) exitTrace := make(ExitTrace, 0, len(r.members)) exitEvents := make(chan ExitEvent) shutdown := false go func() { for _, member := range r.members { process := ifrit.Background(member) go func(member Member) { err := <-process.Wait() exitEvents <- ExitEvent{ Err: err, Member: member, } }(member) processChan <- process <-process.Ready() } close(ready) }() for { select { case sig := <-signals: shutdown = true for _, process := range processes { process.Signal(sig) } case process := <-processChan: processes = append(processes, process) case exit := <-exitEvents: exitTrace = append(exitTrace, exit) if len(exitTrace) == len(processes) { return exitTrace.ToError() } if shutdown { break } shutdown = true for _, process := range processes { process.Signal(os.Interrupt) } } } }
func (step aggregateStep) Run(signals <-chan os.Signal, ready chan<- struct{}) error { members := []ifrit.Process{} for _, ms := range step { process := ifrit.Background(ms) members = append(members, process) } for _, mp := range members { select { case <-mp.Ready(): case <-mp.Wait(): } } close(ready) var errorMessages []string dance: for _, mp := range members { select { case sig := <-signals: for _, mp := range members { mp.Signal(sig) } for _, mp := range members { err := <-mp.Wait() if err != nil { errorMessages = append(errorMessages, err.Error()) } } break dance case err := <-mp.Wait(): if err != nil { errorMessages = append(errorMessages, err.Error()) } } } if len(errorMessages) > 0 { return fmt.Errorf("sources failed:\n%s", strings.Join(errorMessages, "\n")) } return nil }
func (g *parallelGroup) parallelStart(signals <-chan os.Signal) (os.Signal, ErrorTrace) { numMembers := len(g.members) processes := make([]ifrit.Process, numMembers) cases := make([]reflect.SelectCase, 2*numMembers+1) for i, member := range g.members { process := ifrit.Background(member) processes[i] = process cases[2*i] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(process.Wait()), } cases[2*i+1] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(process.Ready()), } } cases[2*numMembers] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(signals), } numReady := 0 for { chosen, recv, _ := reflect.Select(cases) switch { case chosen == 2*numMembers: return recv.Interface().(os.Signal), nil case chosen%2 == 0: recvError, _ := recv.Interface().(error) return nil, ErrorTrace{ExitEvent{Member: g.members[chosen/2], Err: recvError}} default: cases[chosen].Chan = reflect.Zero(cases[chosen].Chan.Type()) g.pool[g.members[chosen/2].Name] = processes[chosen/2] numReady++ if numReady == numMembers { return nil, nil } } } }
func (g *orderedGroup) orderedStart(signals <-chan os.Signal) (os.Signal, ErrorTrace) { for _, member := range g.members { p := ifrit.Background(member) select { case <-p.Ready(): g.pool[member.Name] = p case err := <-p.Wait(): return nil, ErrorTrace{ ExitEvent{Member: member, Err: err}, } case signal := <-signals: return signal, nil } } return nil, nil }
func onReady(runner ifrit.Runner, cb func()) ifrit.Runner { return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error { process := ifrit.Background(runner) subExited := process.Wait() subReady := process.Ready() for { select { case <-subReady: cb() subReady = nil case err := <-subExited: return err case sig := <-signals: process.Signal(sig) } } }) }
func (s sigmon) Run(signals <-chan os.Signal, ready chan<- struct{}) error { osSignals := make(chan os.Signal, SIGNAL_BUFFER_SIZE) signal.Notify(osSignals, s.Signals...) process := ifrit.Background(s.Runner) pReady := process.Ready() pWait := process.Wait() for { select { case sig := <-signals: process.Signal(sig) case sig := <-osSignals: process.Signal(sig) case <-pReady: close(ready) pReady = nil case err := <-pWait: signal.Stop(osSignals) return err } } }
logger = lagertest.NewTestLogger("test") fakeETCDDB = &dbfakes.FakeDB{} fakeETCDDB.VersionReturns(dbVersion, nil) fakeSQLDB = &dbfakes.FakeDB{} cryptor = &encryptionfakes.FakeCryptor{} fakeMigration = &migrationfakes.FakeMigration{} fakeMigration.RequiresSQLReturns(false) migrations = []migration.Migration{fakeMigration} }) JustBeforeEach(func() { manager = migration.NewManager(logger, fakeETCDDB, etcdStoreClient, fakeSQLDB, rawSQLDB, cryptor, migrations, migrationsDone, clock.NewClock(), "db-driver") migrationProcess = ifrit.Background(manager) }) AfterEach(func() { ginkgomon.Kill(migrationProcess) }) Context("when both a etcd and sql configurations are present", func() { BeforeEach(func() { rawSQLDB = &sql.DB{} etcdStoreClient = etcd.NewStoreClient(nil) }) Context("but SQL does not have a version", func() { BeforeEach(func() { fakeSQLDB.VersionReturns(nil, models.ErrResourceNotFound)
previousStep = &fakes.FakeStep{} stepFactory.UsingReturns(step) hookFactory.UsingReturns(hook) repo = exec.NewSourceRepository() ensureFactory = exec.Ensure(stepFactory, hookFactory) ensureStep = ensureFactory.Using(previousStep, repo) }) It("runs the ensure hook if the step succeeds", func() { step.ResultStub = successResult(true) process := ifrit.Background(ensureStep) Eventually(step.RunCallCount).Should(Equal(1)) Eventually(hook.RunCallCount).Should(Equal(1)) Eventually(process.Wait()).Should(Receive(noError())) }) It("runs the ensure hook if the step fails", func() { step.ResultStub = successResult(false) process := ifrit.Background(ensureStep) Eventually(step.RunCallCount).Should(Equal(1)) Eventually(hook.RunCallCount).Should(Equal(1))
clock = fakeclock.NewFakeClock(time.Now()) presenceRunner = locket.NewPresence(logger, consulClient, presenceKey, presenceValue, clock, retryInterval, presenceTTL) }) AfterEach(func() { ginkgomon.Kill(presenceProcess) }) Context("When consul is running", func() { Context("an error occurs while acquiring the presence", func() { BeforeEach(func() { presenceKey = "" }) It("continues to retry", func() { presenceProcess = ifrit.Background(presenceRunner) Consistently(presenceProcess.Ready()).ShouldNot(BeClosed()) Consistently(presenceProcess.Wait()).ShouldNot(Receive()) Eventually(logger).Should(Say("failed-setting-presence")) clock.WaitForWatcherAndIncrement(6 * time.Second) Eventually(logger).Should(Say("recreating-session")) }) }) Context("and the presence is available", func() { It("acquires the presence", func() { presenceProcess = ifrit.Background(presenceRunner) Eventually(presenceProcess.Ready()).Should(BeClosed()) Eventually(getPresenceValue).Should(Equal(presenceValue))
previousStep = &fakes.FakeStep{} stepFactory.UsingReturns(step) successFactory.UsingReturns(hook) repo = exec.NewSourceRepository() onSuccessFactory = exec.OnSuccess(stepFactory, successFactory) onSuccessStep = onSuccessFactory.Using(previousStep, repo) }) It("runs the success hook if the step succeeds", func() { step.ResultStub = successResult(true) process := ifrit.Background(onSuccessStep) Eventually(step.RunCallCount).Should(Equal(1)) Eventually(hook.RunCallCount).Should(Equal(1)) Eventually(process.Wait()).Should(Receive(noError())) }) It("provides the step as the previous step to the hook", func() { step.ResultStub = successResult(true) process := ifrit.Background(onSuccessStep) Eventually(step.RunCallCount).Should(Equal(1)) Eventually(successFactory.UsingCallCount).Should(Equal(1))
var testRunner *fake_runner.TestRunner var restarter restart.Restarter var process ifrit.Process BeforeEach(func() { testRunner = fake_runner.NewTestRunner() restarter = restart.Restarter{ Runner: testRunner, Load: func(runner ifrit.Runner, err error) ifrit.Runner { return nil }, } }) JustBeforeEach(func() { process = ifrit.Background(restarter) }) AfterEach(func() { process.Signal(os.Kill) testRunner.EnsureExit() Eventually(process.Wait()).Should(Receive()) }) Describe("Process Behavior", func() { It("waits for the internal runner to be ready", func() { Consistently(process.Ready()).ShouldNot(BeClosed()) testRunner.TriggerReady() Eventually(process.Ready()).Should(BeClosed()) })
previousStep = &fakes.FakeStep{} stepFactory.UsingReturns(step) failureFactory.UsingReturns(hook) repo = exec.NewSourceRepository() onFailureFactory = exec.OnFailure(stepFactory, failureFactory) onFailureStep = onFailureFactory.Using(previousStep, repo) }) It("runs the failure hook if step fails", func() { step.ResultStub = successResult(false) process := ifrit.Background(onFailureStep) Eventually(step.RunCallCount).Should(Equal(1)) Eventually(hook.RunCallCount).Should(Equal(1)) Eventually(process.Wait()).Should(Receive(noError())) }) It("provides the step as the previous step to the hook", func() { step.ResultStub = successResult(false) process := ifrit.Background(onFailureStep) Eventually(step.RunCallCount).Should(Equal(1)) Eventually(failureFactory.UsingCallCount).Should(Equal(1))
timeoutDuration atc.Duration ) BeforeEach(func() { startStep = make(chan error, 1) fakeStepFactoryStep = new(fakes.FakeStepFactory) runStep = new(fakes.FakeStep) fakeStepFactoryStep.UsingReturns(runStep) }) JustBeforeEach(func() { timeout = Timeout(fakeStepFactoryStep, timeoutDuration) step = timeout.Using(nil, nil) process = ifrit.Background(step) }) Context("when the process goes beyond the duration", func() { BeforeEach(func() { runStep.ResultStub = successResult(true) timeoutDuration = atc.Duration(1 * time.Second) runStep.RunStub = func(signals <-chan os.Signal, ready chan<- struct{}) error { close(ready) select { case <-startStep: return nil case <-signals: return ErrInterrupted }
} groupRunner = grouper.NewParallel(os.Interrupt, members) }) AfterEach(func() { childRunner1.EnsureExit() childRunner2.EnsureExit() childRunner3.EnsureExit() ginkgomon.Kill(groupProcess) }) Describe("Start", func() { BeforeEach(func() { groupProcess = ifrit.Background(groupRunner) }) It("runs all runners at the same time", func() { Eventually(childRunner1.RunCallCount).Should(Equal(1)) Eventually(childRunner2.RunCallCount).Should(Equal(1)) Eventually(childRunner3.RunCallCount).Should(Equal(1)) Consistently(groupProcess.Ready()).ShouldNot(BeClosed()) childRunner1.TriggerReady() childRunner2.TriggerReady() childRunner3.TriggerReady() Eventually(groupProcess.Ready()).Should(BeClosed()) })
var shouldEventuallyHaveNumSessions = func(numSessions int) { Eventually(func() int { sessions, _, err := consulClient.Session().List(nil) Expect(err).NotTo(HaveOccurred()) return len(sessions) }).Should(Equal(numSessions)) } Context("When consul is running", func() { Context("an error occurs while acquiring the lock", func() { BeforeEach(func() { lockKey = "" }) It("continues to retry", func() { lockProcess = ifrit.Background(lockRunner) shouldEventuallyHaveNumSessions(1) Consistently(lockProcess.Ready()).ShouldNot(BeClosed()) Consistently(lockProcess.Wait()).ShouldNot(Receive()) clock.Increment(retryInterval) Eventually(logger).Should(Say("acquire-lock-failed")) Eventually(logger).Should(Say("retrying-acquiring-lock")) Expect(sender.GetValue(lockHeldMetricName).Value).To(Equal(float64(0))) }) }) Context("and the lock is available", func() { It("acquires the lock", func() { lockProcess = ifrit.Background(lockRunner) Eventually(lockProcess.Ready()).Should(BeClosed())
registration = &api.AgentServiceRegistration{ ID: "test-id", Name: "Test-Service", Tags: []string{"a", "b", "c"}, Port: 8080, Address: "127.0.0.1", } }) JustBeforeEach(func() { registrationRunner = locket.NewRegistrationRunner(logger, registration, client, 5*time.Second, clock) }) Context("when the service is invalid", func() { JustBeforeEach(func() { registrationProcess = ifrit.Background(registrationRunner) }) Context("when the service has a value in the Checks list", func() { BeforeEach(func() { registration.Checks = []*api.AgentServiceCheck{ &api.AgentServiceCheck{ TTL: "1m", }, } }) It("returns a validation error", func() { Eventually(registrationProcess.Wait()).Should(Receive(MatchError("Support for multiple service checks not implemented"))) })
Reporter: varz, AccessLogger: &access_log.NullAccessLogger{}, HealthCheckUserAgent: "HTTP-Monitor/1.1", HeartbeatOK: &healthCheck, }) errChan := make(chan error, 2) rtr, err = router.NewRouter(logger, config, p, mbusClient, registry, varz, &healthCheck, logcounter, errChan) Expect(err).ToNot(HaveOccurred()) opts := &mbus.SubscriberOpts{ ID: "test", MinimumRegisterIntervalInSeconds: int(config.StartResponseDelayInterval.Seconds()), PruneThresholdInSeconds: int(config.DropletStaleThreshold.Seconds()), } subscriber = ifrit.Background(mbus.NewSubscriber(logger.Session("subscriber"), mbusClient, registry, nil, opts)) <-subscriber.Ready() }) AfterEach(func() { if natsRunner != nil { natsRunner.Stop() } if subscriber != nil { subscriber.Signal(os.Interrupt) <-subscriber.Wait() } }) Context("Drain", func() { BeforeEach(func() {
BeforeEach(func() { task = &rep.Task{ TaskGuid: "task-guid", Domain: "test", Resource: rep.Resource{ MemoryMB: 124, DiskMB: 456, RootFs: "some-rootfs", }, } err := consulSession.AcquireLock(locket.LockSchemaPath("auctioneer_lock"), []byte{}) Expect(err).NotTo(HaveOccurred()) runner.StartCheck = "auctioneer.lock-bbs.lock.acquiring-lock" auctioneerProcess = ifrit.Background(runner) }) It("should not advertise its presence, and should not be reachable", func() { Eventually(func() error { return auctioneerClient.RequestTaskAuctions([]*auctioneer.TaskStartRequest{ &auctioneer.TaskStartRequest{*task}, }) }).Should(HaveOccurred()) }) It("should eventually come up in the event that the lock is released", func() { consulSession.Destroy() _, err := consulSession.Recreate() Expect(err).NotTo(HaveOccurred())
logger = lagertest.NewTestLogger("test") oldKey, err := encryption.NewKey("old-key", "old-passphrase") encryptionKey, err := encryption.NewKey("label", "passphrase") Expect(err).NotTo(HaveOccurred()) keyManager, err = encryption.NewKeyManager(encryptionKey, []encryption.Key{oldKey}) Expect(err).NotTo(HaveOccurred()) cryptor = encryption.NewCryptor(keyManager, rand.Reader) fakeDB.EncryptionKeyLabelReturns("", models.ErrResourceNotFound) }) JustBeforeEach(func() { runner = encryptor.New(logger, fakeDB, keyManager, cryptor, clock.NewClock()) encryptorProcess = ifrit.Background(runner) }) AfterEach(func() { ginkgomon.Kill(encryptorProcess) }) It("reports the duration that it took to encrypt", func() { Eventually(encryptorProcess.Ready()).Should(BeClosed()) Eventually(logger.LogMessages).Should(ContainElement("test.encryptor.encryption-finished")) reportedDuration := sender.GetValue("EncryptionDuration") Expect(reportedDuration.Value).NotTo(BeZero()) Expect(reportedDuration.Unit).To(Equal("nanos")) })
func (p *dynamicGroup) Run(signals <-chan os.Signal, ready chan<- struct{}) error { processes := newProcessSet() insertEvents := p.client.insertEventListener() memberRequests := p.client.memberRequests() closeNotifier := p.client.CloseNotifier() entranceEvents := make(entranceEventChannel) exitEvents := make(exitEventChannel) invoking := 0 close(ready) for { select { case shutdown := <-signals: processes.Signal(shutdown) p.client.Close() case <-closeNotifier: closeNotifier = nil insertEvents = nil if processes.Length() == 0 { return p.client.closeBroadcasters() } if invoking == 0 { p.client.closeEntranceBroadcaster() } case memberRequest := <-memberRequests: p, ok := processes.Get(memberRequest.Name) if ok { memberRequest.Response <- p } close(memberRequest.Response) case newMember, ok := <-insertEvents: if !ok { p.client.Close() insertEvents = nil break } process := ifrit.Background(newMember) processes.Add(newMember.Name, process) if processes.Length() == p.poolSize { insertEvents = nil } invoking++ go waitForEvents(newMember, process, entranceEvents, exitEvents) case entranceEvent := <-entranceEvents: invoking-- p.client.broadcastEntrance(entranceEvent) if closeNotifier == nil && invoking == 0 { p.client.closeEntranceBroadcaster() entranceEvents = nil } case exitEvent := <-exitEvents: processes.Remove(exitEvent.Member.Name) p.client.broadcastExit(exitEvent) if !processes.Signaled() && p.terminationSignal != nil { processes.Signal(p.terminationSignal) p.client.Close() insertEvents = nil } if processes.Complete() || (processes.Length() == 0 && insertEvents == nil) { return p.client.closeBroadcasters() } if !processes.Signaled() && closeNotifier != nil { insertEvents = p.client.insertEventListener() } } } }
AfterEach(func() { logger.Info("test-complete-signaling-maintainer-to-stop") close(pingErrors) ginkgomon.Interrupt(maintainProcess) }) It("pings the executor", func() { pingErrors <- nil maintainProcess = ginkgomon.Invoke(maintainer) Expect(fakeClient.PingCallCount()).To(Equal(1)) }) Context("when pinging the executor fails", func() { It("keeps pinging until it succeeds, then starts heartbeating the executor's presence", func() { maintainProcess = ifrit.Background(maintainer) ready := maintainProcess.Ready() for i := 1; i <= 4; i++ { clock.Increment(1 * time.Second) pingErrors <- errors.New("ping failed") Eventually(fakeClient.PingCallCount).Should(Equal(i)) Expect(ready).NotTo(BeClosed()) } pingErrors <- nil clock.Increment(1 * time.Second) Eventually(fakeClient.PingCallCount).Should(Equal(5)) Eventually(ready).Should(BeClosed()) Expect(fakeHeartbeater.RunCallCount()).To(Equal(1))