Exemple #1
0
// AdvanceTicks advances the raft state machine fake clock
func AdvanceTicks(clockSource *fakeclock.FakeClock, ticks int) {
	// A FakeClock timer won't fire multiple times if time is advanced
	// more than its interval.
	for i := 0; i != ticks; i++ {
		clockSource.Increment(time.Second)
	}
}
func incrementSleepInBackground(fakeTimeService *fakeclock.FakeClock, delay time.Duration) chan struct{} {
	doneChan := make(chan struct{})
	go func() {
		for {
			select {
			case <-doneChan:
				return
			default:
				if fakeTimeService.WatcherCount() > 0 {
					fakeTimeService.Increment(delay)
				}
			}
		}
	}()
	return doneChan
}
Exemple #3
0
// PollFuncWithTimeout is used to periodically execute a check function, it
// returns error after timeout.
func PollFuncWithTimeout(clockSource *fakeclock.FakeClock, f func() error, timeout time.Duration) error {
	if f() == nil {
		return nil
	}
	timer := time.After(timeout)
	for i := 0; ; i++ {
		if i%5 == 0 && clockSource != nil {
			clockSource.Increment(time.Second)
		}
		err := f()
		if err == nil {
			return nil
		}
		select {
		case <-timer:
			return fmt.Errorf("polling failed: %v", err)
		case <-time.After(50 * time.Millisecond):
		}
	}
}
Exemple #4
0
// PollFuncWithTimeout is used to periodically execute a check function, it
// returns error after timeout.
func PollFuncWithTimeout(clockSource *fakeclock.FakeClock, f func() error, timeout time.Duration) error {
	if f() == nil {
		return nil
	}
	timer := time.NewTimer(timeout)
	defer timer.Stop()
	for i := 0; ; i++ {
		if i%5 == 0 && clockSource != nil {
			clockSource.Increment(time.Second)
		}
		err := f()
		if err == nil {
			return nil
		}
		select {
		case <-timer.C:
			return errors.Wrap(err, "polling failed")
		case <-time.After(50 * time.Millisecond):
		}
	}
}
Exemple #5
0
					Eventually(spawnedProcess.SignalCallCount).Should(Equal(1))

					waitExited <- (128 + 15)

					Eventually(performErr).Should(Receive(Equal(steps.ErrCancelled)))

					Expect(spawnedProcess.SignalCallCount()).To(Equal(1))
					Expect(spawnedProcess.SignalArgsForCall(0)).To(Equal(garden.SignalTerminate))
				})
			})

			Context("when the process does not exit after 10s", func() {
				It("sends a kill signal to the process", func() {
					Eventually(spawnedProcess.SignalCallCount).Should(Equal(1))

					fakeClock.Increment(steps.TERMINATE_TIMEOUT + 1*time.Second)

					Eventually(spawnedProcess.SignalCallCount).Should(Equal(2))
					Expect(spawnedProcess.SignalArgsForCall(1)).To(Equal(garden.SignalKill))

					waitExited <- (128 + 9)

					Eventually(performErr).Should(Receive(Equal(steps.ErrCancelled)))
				})

				Context("when the process *still* does not exit after 1m", func() {
					It("finishes performing with failure", func() {
						Eventually(spawnedProcess.SignalCallCount).Should(Equal(1))

						fakeClock.Increment(steps.TERMINATE_TIMEOUT + 1*time.Second)
Exemple #6
0
		Eventually(process.Wait()).Should(Receive())
	})

	itPerformsBatchOperations := func() {
		Context("when generating the batch operations succeeds", func() {
			var (
				operation1 *fake_operationq.FakeOperation
				operation2 *fake_operationq.FakeOperation
			)

			BeforeEach(func() {
				operation1 = new(fake_operationq.FakeOperation)
				operation2 = new(fake_operationq.FakeOperation)

				fakeGenerator.BatchOperationsStub = func(lager.Logger) (map[string]operationq.Operation, error) {
					fakeClock.Increment(10 * time.Second)
					return map[string]operationq.Operation{"guid1": operation1, "guid2": operation2}, nil
				}
			})

			It("pushes them onto the queue", func() {
				Eventually(fakeQueue.PushCallCount).Should(Equal(2))

				enqueuedOperations := make([]operationq.Operation, 0, 2)
				enqueuedOperations = append(enqueuedOperations, fakeQueue.PushArgsForCall(0))
				enqueuedOperations = append(enqueuedOperations, fakeQueue.PushArgsForCall(1))

				Expect(enqueuedOperations).To(ConsistOf(operation1, operation2))
			})

			It("emits the duration it took to generate the batch operations", func() {
				go func() {
					errchan <- monit.StopAndWait()
				}()

				Eventually(timeService.WatcherCount).Should(Equal(2)) // we hit the sleep

				// never called stop since 2 jobs pending
				Expect(len(runner.RunCommands)).To(Equal(0))

				client.StatusStatus = fakemonit.FakeMonitStatus{
					Services: []boshmonit.Service{
						{Monitored: false, Name: "foo", Status: "unknown", Pending: false},
						{Monitored: true, Name: "bar", Status: "unknown", Pending: true},
					},
				}
				timeService.Increment(2 * time.Minute)

				Eventually(timeService.WatcherCount).Should(Equal(2)) // we hit the sleep

				// never called stop since 1 job pending
				Expect(len(runner.RunCommands)).To(Equal(0))

				client.StatusStatus = fakemonit.FakeMonitStatus{
					Services: []boshmonit.Service{
						{Monitored: false, Name: "foo", Status: "unknown", Pending: false},
						{Monitored: false, Name: "bar", Status: "unknown", Pending: false},
					},
				}
				timeService.Increment(2 * time.Minute)

				Eventually(errchan).Should(Receive(BeNil()))
Exemple #8
0
		logErrBuffer = bytes.NewBufferString("")
		logger = boshlog.NewWriterLogger(boshlog.LevelDebug, logOutBuffer, logErrBuffer)

		ui = NewWriterUI(uiOut, uiErr, logger)
		fakeTimeService = fakeclock.NewFakeClock(time.Now())

		stage = NewStage(ui, fakeTimeService, logger)
	})

	Describe("Perform", func() {
		It("prints a single-line stage", func() {
			actionsPerformed := []string{}

			err := stage.Perform("Simple stage 1", func() error {
				actionsPerformed = append(actionsPerformed, "1")
				fakeTimeService.Increment(time.Minute)
				return nil
			})

			Expect(err).To(BeNil())

			expectedOutput := "Simple stage 1... Finished (00:01:00)\n"
			Expect(uiOut.String()).To(Equal(expectedOutput))
			Expect(actionsPerformed).To(Equal([]string{"1"}))
		})

		It("fails on error", func() {
			actionsPerformed := []string{}
			stageError := bosherr.Error("fake-stage-1-error")

			err := stage.Perform("Simple stage 1", func() error {
					pid, err := pdr.Pid(pidFilePath)
					Expect(err).NotTo(HaveOccurred())
					Expect(pid).To(Equal(5621))

					close(pidReturns)
				}()

				// WaitForWatchersAndIncrement ensures that the implementation will try
				// first time and sleep. However the sleep interval is 20ms so
				// incrementing by 10ms won't get the loop moving. We do that to ensure
				// that the file write will happen before the implementation tries to
				// read the file. Hence, after we write the file the clock is
				// incremented by a further 10ms.
				clk.WaitForWatcherAndIncrement(time.Millisecond * 10)
				Expect(ioutil.WriteFile(pidFilePath, []byte("5621"), 0766)).To(Succeed())
				clk.Increment(time.Millisecond * 10)

				Eventually(pidReturns).Should(BeClosed())
				close(done)
			}, 1.0)
		})

		Context("and it is never created", func() {
			It("should return error after the timeout", func(done Done) {
				pidReturns := make(chan struct{})
				go func() {
					defer GinkgoRecover()

					_, err := pdr.Pid(pidFilePath)
					Expect(err).To(MatchError(ContainSubstring("timeout")))
			err = allocationStore.Initialize(logger, &runReq)
			Expect(err).NotTo(HaveOccurred())

			expirationTime = 20 * time.Millisecond

			pruner := allocationStore.RegistryPruner(logger, expirationTime)
			process = ginkgomon.Invoke(pruner)
		})

		AfterEach(func() {
			ginkgomon.Interrupt(process)
		})

		Context("when the elapsed time is less than expiration period", func() {
			BeforeEach(func() {
				fakeClock.Increment(expirationTime / 2)
			})

			It("all containers are still in the list", func() {
				Consistently(allocationStore.List).Should(HaveLen(2))
			})
		})

		Context("when the elapsed time is more than expiration period", func() {
			BeforeEach(func() {
				fakeClock.Increment(2 * expirationTime)
			})

			It("it removes only RESERVED containers from the list", func() {
				Eventually(allocationStore.List).Should(HaveLen(1))
				Expect(allocationStore.List()[0].Guid).To(Equal("eventually-initialized"))
		}

		dockerRecipeBuilder = new(fakes.FakeRecipeBuilder)
		dockerRecipeBuilder.BuildStub = func(ccRequest *cc_messages.DesireAppRequestFromCC) (*models.DesiredLRP, error) {
			createRequest := models.DesiredLRP{
				ProcessGuid: ccRequest.ProcessGuid,
				Annotation:  ccRequest.ETag,
			}
			return &createRequest, nil
		}

		bbsClient = new(fake_bbs.FakeClient)
		bbsClient.DesiredLRPSchedulingInfosReturns(existingSchedulingInfos, nil)

		bbsClient.UpsertDomainStub = func(string, time.Duration) error {
			clock.Increment(syncDuration)
			return nil
		}

		logger = lagertest.NewTestLogger("test")

		processor = bulk.NewProcessor(
			bbsClient,
			500*time.Millisecond,
			time.Second,
			10,
			50,
			false,
			logger,
			fetcher,
			map[string]recipebuilder.RecipeBuilder{
	})

	AfterEach(func() {
		process.Signal(os.Interrupt)
		Eventually(process.Wait()).Should(Receive(BeNil()))
		close(syncChannel)
	})

	Context("on a specified interval", func() {

		It("should sync", func() {
			process = ifrit.Invoke(syncerRunner)
			var t1 time.Time
			var t2 time.Time

			clock.Increment(syncInterval + 100*time.Millisecond)

			select {
			case <-syncChannel:
				t1 = clock.Now()
			case <-time.After(2 * syncInterval):
				Fail("did not receive a sync event")
			}

			clock.Increment(syncInterval + 100*time.Millisecond)

			select {
			case <-syncChannel:
				t2 = clock.Now()
			case <-time.After(2 * syncInterval):
				Fail("did not receive a sync event")
Exemple #13
0
							newConfig = atc.Config{
								Resources: atc.ResourceConfigs{newResource},
							}
						})

						It("goes back to the default interval", func() {
							Expect(<-times).To(Equal(epoch)) // ignore immediate first check

							fakeClock.WaitForWatcherAndIncrement(interval)
							Expect(<-times).To(Equal(epoch.Add(interval)))

							fakeClock.WaitForWatcherAndIncrement(newInterval)
							Expect(<-times).To(Equal(epoch.Add(interval + newInterval)))

							fakeClock.WaitForWatcherAndIncrement(newInterval)
							fakeClock.Increment(interval - newInterval)
							Expect(<-times).To(Equal(epoch.Add(interval + newInterval + interval)))
						})
					})
				})

				Context("with the resource removed", func() {
					BeforeEach(func() {
						newConfig = atc.Config{
							Resources: atc.ResourceConfigs{},
						}
					})

					It("exits with the correct error", func() {
						<-times
	It("To subscribe to the dea.advertise subject", func() {
		Expect(messageBus.Subscriptions("dea.advertise")).NotTo(BeNil())
		Expect(messageBus.Subscriptions("dea.advertise")).To(HaveLen(1))
	})

	It("To start tracking store usage", func() {
		Expect(usageTracker.StartTrackingUsageCallCount()).To(Equal(1))
		Expect(usageTracker.MeasureUsageCallCount()).To(Equal(1))
		Expect(metricsAccountant.TrackActualStateListenerStoreUsageFractionCallCount()).To(Equal(1))
		Expect(metricsAccountant.TrackActualStateListenerStoreUsageFractionArgsForCall(0)).To(Equal(0.7))
	})

	It("To save heartbeats on a timer", func() {
		beat()
		clock.Increment(conf.ListenerHeartbeatSyncInterval())
		Eventually(store.SyncHeartbeatsCallCount).Should(Equal(1))

		beat()
		Consistently(store.SyncHeartbeatsCallCount).Should(Equal(1))

		clock.Increment(conf.ListenerHeartbeatSyncInterval())
		Eventually(store.SyncHeartbeatsCallCount).Should(Equal(2))
	})

	Context("When it receives a dea advertisement over the message bus", func() {
		advertise := func() {
			messageBus.SubjectCallbacks("dea.advertise")[0](&nats.Msg{
				Data: []byte("doesn't matter"),
			})
		}
Exemple #15
0
				BeforeEach(func() {
					containerResponses := [][]executor.Container{
						containers,
						[]executor.Container{},
					}

					index := 0
					executorClient.ListContainersStub = func(lager.Logger) ([]executor.Container, error) {
						containersToReturn := containerResponses[index]
						index++
						return containersToReturn, nil
					}
				})

				It("waits for all the containers to go away and exits before evacuation timeout", func() {
					fakeClock.Increment(pollingInterval)
					Eventually(executorClient.ListContainersCallCount).Should(Equal(1))

					fakeClock.Increment(pollingInterval)
					Eventually(executorClient.ListContainersCallCount).Should(Equal(2))

					Eventually(errChan).Should(Receive(BeNil()))
				})

				Context("when the executor client returns an error", func() {
					BeforeEach(func() {
						index := 0
						executorClient.ListContainersStub = func(lager.Logger) ([]executor.Container, error) {
							if index == 0 {
								index++
								return nil, errors.New("whoops")
	AfterEach(func() {
		pmn.Signal(os.Interrupt)
		Eventually(pmn.Wait(), 2*time.Second).Should(Receive())
	})

	Context("when the metron notifier starts up", func() {
		It("should emit an event that BBS has started", func() {
			Eventually(func() uint64 {
				return sender.GetCounter("BBSMasterElected")
			}).Should(Equal(uint64(1)))
		})
	})

	Context("when the report interval elapses", func() {
		JustBeforeEach(func() {
			fakeClock.Increment(reportInterval)
		})

		Context("when the etcd cluster is around", func() {
			var (
				etcd1 *ghttp.Server
				etcd2 *ghttp.Server
				etcd3 *ghttp.Server
			)

			BeforeEach(func() {
				etcd1 = ghttp.NewServer()
				etcd2 = ghttp.NewServer()
				etcd3 = ghttp.NewServer()

				etcdOptions.ClusterUrls = []string{
var _ = Describe("FakeClock", func() {
	const Δ time.Duration = 10 * time.Millisecond

	var (
		fakeClock   *fakeclock.FakeClock
		initialTime time.Time
	)

	BeforeEach(func() {
		initialTime = time.Date(2014, 1, 1, 3, 0, 30, 0, time.UTC)
		fakeClock = fakeclock.NewFakeClock(initialTime)
	})

	Describe("Now", func() {
		It("returns the current time, w/o race conditions", func() {
			go fakeClock.Increment(time.Minute)
			Eventually(fakeClock.Now).Should(Equal(initialTime.Add(time.Minute)))
		})
	})

	Describe("Sleep", func() {
		It("blocks until the given interval elapses", func() {
			doneSleeping := make(chan struct{})
			go func() {
				fakeClock.Sleep(10 * time.Second)
				close(doneSleeping)
			}()

			Consistently(doneSleeping, Δ).ShouldNot(BeClosed())

			fakeClock.Increment(5 * time.Second)
			checkFunc,
			hasBecomeHealthyChannel,
			logger,
			clock,
			fakeStreamer,
			startTimeout,
			healthyInterval,
			unhealthyInterval,
			workPool,
		)
	})

	expectCheckAfterInterval := func(fakeStep *fakes.FakeStep, d time.Duration) {
		previousCheckCount := fakeStep.PerformCallCount()

		clock.Increment(d - 1*time.Microsecond)
		Consistently(fakeStep.PerformCallCount, 0.05).Should(Equal(previousCheckCount))

		clock.Increment(d)
		Eventually(fakeStep.PerformCallCount).Should(Equal(previousCheckCount + 1))
	}

	Describe("Throttling", func() {
		var (
			throttleChan chan struct{}
			doneChan     chan struct{}
			fakeStep     *fakes.FakeStep
		)

		BeforeEach(func() {
			throttleChan = make(chan struct{}, numOfConcurrentMonitorSteps)
	var (
		fakeClock   *fakeclock.FakeClock
		initialTime time.Time
	)

	BeforeEach(func() {
		initialTime = time.Date(2014, 1, 1, 3, 0, 30, 0, time.UTC)
		fakeClock = fakeclock.NewFakeClock(initialTime)
	})

	It("provides a channel that receives the time at each interval", func() {
		ticker := fakeClock.NewTicker(10 * time.Second)
		timeChan := ticker.C()
		Consistently(timeChan, Δ).ShouldNot(Receive())

		fakeClock.Increment(5 * time.Second)
		Consistently(timeChan, Δ).ShouldNot(Receive())

		fakeClock.Increment(4 * time.Second)
		Consistently(timeChan, Δ).ShouldNot(Receive())

		fakeClock.Increment(1 * time.Second)
		Eventually(timeChan).Should(Receive(Equal(initialTime.Add(10 * time.Second))))

		fakeClock.Increment(10 * time.Second)
		Eventually(timeChan).Should(Receive(Equal(initialTime.Add(20 * time.Second))))

		fakeClock.Increment(10 * time.Second)
		Eventually(timeChan).Should(Receive(Equal(initialTime.Add(30 * time.Second))))
	})
})
Exemple #20
0
		ginkgomon.Interrupt(maintainProcess)
	})

	It("pings the executor", func() {
		pingErrors <- nil
		maintainProcess = ginkgomon.Invoke(maintainer)
		Expect(fakeClient.PingCallCount()).To(Equal(1))
	})

	Context("when pinging the executor fails", func() {
		It("keeps pinging until it succeeds, then starts heartbeating the executor's presence", func() {
			maintainProcess = ifrit.Background(maintainer)
			ready := maintainProcess.Ready()

			for i := 1; i <= 4; i++ {
				clock.Increment(1 * time.Second)
				pingErrors <- errors.New("ping failed")
				Eventually(fakeClient.PingCallCount).Should(Equal(i))
				Expect(ready).NotTo(BeClosed())
			}

			pingErrors <- nil
			clock.Increment(1 * time.Second)
			Eventually(fakeClient.PingCallCount).Should(Equal(5))

			Eventually(ready).Should(BeClosed())
			Expect(fakeHeartbeater.RunCallCount()).To(Equal(1))
		})
	})

	Context("when pinging the executor succeeds", func() {
Exemple #21
0
		process = ifrit.Invoke(runner)
	})

	AfterEach(func() {
		process.Signal(os.Interrupt)
		Eventually(process.Wait()).Should(Receive())
	})

	It("syncs immediately", func() {
		Eventually(fakeSyncer.SyncCallCount).Should(Equal(1))
	})

	Context("when the interval elapses", func() {
		JustBeforeEach(func() {
			Eventually(fakeSyncer.SyncCallCount).Should(Equal(1))
			fakeClock.Increment(interval)
		})

		It("syncs again", func() {
			Eventually(fakeSyncer.SyncCallCount).Should(Equal(2))
			Consistently(fakeSyncer.SyncCallCount).Should(Equal(2))
		})

		Context("when the interval elapses", func() {
			JustBeforeEach(func() {
				Eventually(fakeSyncer.SyncCallCount).Should(Equal(2))
				fakeClock.Increment(interval)
			})

			It("syncs again", func() {
				Eventually(fakeSyncer.SyncCallCount).Should(Equal(3))
						callback(0)
					})

					It("stops the watches", func() {
						Eventually(desiredLRPStop).Should(Receive())
						Eventually(actualLRPStop).Should(Receive())
					})
				})

				Context("when the desired watch reports an error", func() {
					BeforeEach(func() {
						desiredLRPErrors <- errors.New("oh no!")
					})

					It("requests a new desired watch after the retry interval", func() {
						clock.Increment(retryWaitDuration / 2)
						Consistently(bbs.WatchForDesiredLRPChangesCallCount).Should(Equal(1))
						clock.Increment(retryWaitDuration * 2)
						Eventually(bbs.WatchForDesiredLRPChangesCallCount).Should(Equal(2))
					})

					Context("and the hub reports no subscribers before the retry interval elapses", func() {
						BeforeEach(func() {
							clock.Increment(retryWaitDuration / 2)
							callback(0)
							// give watcher time to clear out event loop
							time.Sleep(10 * time.Millisecond)
						})

						It("does not request new watches", func() {
							clock.Increment(retryWaitDuration * 2)
func advanceTime(timeService *fakeclock.FakeClock, duration time.Duration, watcherCount int) {
	Eventually(timeService.WatcherCount).Should(Equal(watcherCount))
	timeService.Increment(duration)
}
	Context("when a staging task completes", func() {
		var taskResponse *models.TaskCallbackResponse
		var annotationJson []byte

		BeforeEach(func() {
			var err error
			annotationJson, err = json.Marshal(cc_messages.StagingTaskAnnotation{
				Lifecycle: "fake",
			})
			Expect(err).NotTo(HaveOccurred())
		})

		JustBeforeEach(func() {
			createdAt := fakeClock.Now().UnixNano()
			fakeClock.Increment(stagingDurationNano)

			taskResponse = &models.TaskCallbackResponse{
				TaskGuid:  "the-task-guid",
				CreatedAt: createdAt,
				Result: `{
					"buildpack_key":"buildpack-key",
					"detected_buildpack":"Some Buildpack",
					"execution_metadata":"{\"start_command\":\"./some-start-command\"}",
					"detected_start_command":{"web":"./some-start-command"}
				}`,
				Annotation: string(annotationJson),
			}

			handler.StagingComplete(responseRecorder, postTask(taskResponse))
		})
						BuildCellState("C-zone", 100, 100, 100, false, windowsOnlyRootFSProviders, []rep.LRP{
							*BuildLRP("pg-win-1", "domain", 0, "", 10, 10),
						}),
					),
				}
			})

			Context("with a new LRP only supported in one of many zones", func() {
				BeforeEach(func() {

					startAuction = BuildLRPAuction("pg-win-2", "domain", 1, windowsRootFSURL, 10, 10, clock.Now())
				})

				Context("when it picks a winner", func() {
					BeforeEach(func() {
						clock.Increment(time.Minute)
						s := auctionrunner.NewScheduler(workPool, zones, clock, logger)
						results = s.Schedule(auctiontypes.AuctionRequest{LRPs: []auctiontypes.LRPAuction{startAuction}})
					})

					It("picks the best cell for the job", func() {
						Expect(clients["A-cell"].PerformCallCount()).To(Equal(0))
						Expect(clients["B-cell"].PerformCallCount()).To(Equal(0))
						Expect(clients["C-cell"].PerformCallCount()).To(Equal(1))

						startsToC := clients["C-cell"].PerformArgsForCall(0).LRPs

						Expect(startsToC).To(ConsistOf(startAuction.LRP))
					})

					It("marks the start auction as succeeded", func() {
Exemple #26
0
				Name:             gardenAddr,
				GardenAddr:       gardenAddr,
				BaggageclaimURL:  baggageClaimAddr,
				ActiveContainers: 0,
				ResourceTypes:    resourceTypes,
				Platform:         "linux",
				Tags:             []string{},
			}
			expectedTTL := 30 * time.Second

			Eventually(workerDB.SaveWorkerCallCount()).Should(Equal(1))
			workerInfo, ttl := workerDB.SaveWorkerArgsForCall(0)
			Expect(workerInfo).To(Equal(expectedWorkerInfo))
			Expect(ttl).To(Equal(expectedTTL))

			fakeClock.Increment(11 * time.Second)

			Eventually(workerDB.SaveWorkerCallCount).Should(Equal(2))
			workerInfo, ttl = workerDB.SaveWorkerArgsForCall(1)
			Expect(workerInfo).To(Equal(expectedWorkerInfo))
			Expect(ttl).To(Equal(expectedTTL))
		})

		It("can be interrupted", func() {
			expectedWorkerInfo := db.WorkerInfo{
				Name:             gardenAddr,
				GardenAddr:       gardenAddr,
				BaggageclaimURL:  baggageClaimAddr,
				ActiveContainers: 0,
				ResourceTypes:    resourceTypes,
				Platform:         "linux",
		})

		AfterEach(func() {
			process.Signal(os.Interrupt)
			Eventually(process.Wait(), 5*time.Second).Should(Receive())
		})

		It("subscribes for events", func() {
			Eventually(client.SubscribeToEventsWithMaxRetriesCallCount).Should(Equal(1))
		})

		Context("on specified interval", func() {
			It("it fetches routes", func() {
				// to be consumed by by the eventSource.NextStub to avoid starvation
				eventChannel <- routing_api.Event{}
				clock.Increment(cfg.PruneStaleDropletsInterval + 100*time.Millisecond)
				Eventually(client.RoutesCallCount, 2*time.Second, 50*time.Millisecond).Should(Equal(1))
				clock.Increment(cfg.PruneStaleDropletsInterval + 100*time.Millisecond)
				Eventually(client.RoutesCallCount, 2*time.Second, 50*time.Millisecond).Should(Equal(2))
			})
		})

		Context("when token fetcher returns error", func() {
			BeforeEach(func() {
				uaaClient.FetchTokenReturns(nil, errors.New("Unauthorized"))
			})

			It("logs the error", func() {
				currentTokenFetchErrors := sender.GetCounter(TokenFetchErrors)

				Eventually(logger).Should(gbytes.Say("Unauthorized"))
Exemple #28
0
			actualHandle := fakeDB.GetVolumeTTLArgsForCall(0)
			Expect(actualHandle).To(Equal("some-handle"))

			By("using that ttl to heartbeat the volume initially")
			Expect(fakeVolume.SetTTLCallCount()).To(Equal(1))
			actualTTL := fakeVolume.SetTTLArgsForCall(0)
			Expect(actualTTL).To(Equal(expectedTTL))

			Expect(fakeDB.SetVolumeTTLCallCount()).To(Equal(1))
			actualHandle, actualTTL = fakeDB.SetVolumeTTLArgsForCall(0)
			Expect(actualHandle).To(Equal(vol.Handle()))
			Expect(actualTTL).To(Equal(expectedTTL))

			By("using the ttl from the database each tick")
			fakeDB.GetVolumeTTLReturns(expectedTTL2, nil)
			fakeClock.Increment(30 * time.Second)

			Eventually(fakeVolume.SetTTLCallCount).Should(Equal(2))
			actualTTL = fakeVolume.SetTTLArgsForCall(1)
			Expect(actualTTL).To(Equal(expectedTTL2))

			Eventually(fakeDB.SetVolumeTTLCallCount).Should(Equal(2))
			actualHandle, actualTTL = fakeDB.SetVolumeTTLArgsForCall(1)
			Expect(actualHandle).To(Equal(vol.Handle()))
			Expect(actualTTL).To(Equal(expectedTTL2))

			By("being resiliant to db errors")
			fakeDB.GetVolumeTTLReturns(0, errors.New("disaster"))
			fakeClock.Increment(30 * time.Second)
			Eventually(fakeVolume.SetTTLCallCount).Should(Equal(3))
			actualTTL = fakeVolume.SetTTLArgsForCall(2)
				convergeRepeatInterval,
				kickTaskDuration,
				expirePendingTaskDuration,
				expireCompletedTaskDuration,
			),
		)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(process)
		Eventually(process.Wait()).Should(Receive())
	})

	Describe("converging over time", func() {
		It("converges tasks, LRPs, and auctions when the lock is periodically reestablished", func() {
			fakeClock.Increment(convergeRepeatInterval + aBit)

			Eventually(fakeBBSClient.ConvergeTasksCallCount, aBit).Should(Equal(1))
			Eventually(fakeBBSClient.ConvergeLRPsCallCount, aBit).Should(Equal(1))

			actualKickTaskDuration, actualExpirePendingTaskDuration, actualExpireCompletedTaskDuration := fakeBBSClient.ConvergeTasksArgsForCall(0)
			Expect(actualKickTaskDuration).To(Equal(kickTaskDuration))
			Expect(actualExpirePendingTaskDuration).To(Equal(expirePendingTaskDuration))
			Expect(actualExpireCompletedTaskDuration).To(Equal(expireCompletedTaskDuration))

			fakeClock.Increment(convergeRepeatInterval + aBit)

			Eventually(fakeBBSClient.ConvergeTasksCallCount, aBit).Should(Equal(2))
			Eventually(fakeBBSClient.ConvergeLRPsCallCount, aBit).Should(Equal(2))

			actualKickTaskDuration, actualExpirePendingTaskDuration, actualExpireCompletedTaskDuration = fakeBBSClient.ConvergeTasksArgsForCall(1)
Exemple #30
0
			query, args := underlyingConn.QueryArgsForCall(0)
			Expect(query).To(Equal("SELECT $1::int"))
			Expect(args).To(Equal(varargs(1)))
		})
	})

	Context("when the query takes more time than the timeout", func() {
		var realConn *sql.DB

		BeforeEach(func() {
			postgresRunner.Truncate()

			realConn = postgresRunner.Open()
			underlyingConn.QueryStub = func(query string, args ...interface{}) (*sql.Rows, error) {
				if !strings.HasPrefix(query, "EXPLAIN") {
					fakeClock.Increment(120 * time.Millisecond)
				}

				return realConn.Query(query, args...)
			}

			underlyingConn.QueryRowStub = func(query string, args ...interface{}) *sql.Row {
				if !strings.HasPrefix(query, "EXPLAIN") {
					fakeClock.Increment(120 * time.Millisecond)
				}

				return realConn.QueryRow(query, args...)
			}

			underlyingConn.ExecStub = func(query string, args ...interface{}) (sql.Result, error) {
				if !strings.HasPrefix(query, "EXPLAIN") {