} sim = &simulate.SteadyState{ AppSizeDistribution: appSizeDistribution, } logger = lagertest.NewTestLogger("test") req = models.SteadyStateRequest{ NumHosts: 1000, NumApps: 10000, MeanInstancesPerApp: 5, } }) Describe("Execute", func() { It("logs on start and stop", func() { sim.Execute(logger, req) Expect(len(logger.LogMessages())).To(BeNumerically(">=", 2)) }) It("logs the structured request and responses", func() { sim.Execute(logger, req) Expect(logger.Buffer()).To(gbytes.Say(`start.*input.*1000`)) Expect(logger.Buffer()).To(gbytes.Say(`success`)) }) It("returns the request data along with the response", func() { resp, err := sim.Execute(logger, req) Expect(err).NotTo(HaveOccurred()) Expect(resp.Request).To(Equal(req)) })
Eventually(fakeDB.SetEncryptionKeyLabelCallCount).Should(Equal(1)) _, newLabel := fakeDB.SetEncryptionKeyLabelArgsForCall(0) Expect(newLabel).To(Equal("label")) }) }) Context("when encrypting fails", func() { BeforeEach(func() { fakeDB.PerformEncryptionReturns(errors.New("something is broken")) }) It("does not fail and logs the error", func() { Eventually(encryptorProcess.Ready()).Should(BeClosed()) Eventually(logger.LogMessages).Should(ContainElement("test.encryptor.encryption-finished")) Expect(logger.LogMessages()).To(ContainElement("test.encryptor.encryption-failed")) }) It("does not change the key in the db", func() { Consistently(fakeDB.SetEncryptionKeyLabelCallCount).Should(Equal(0)) }) }) Context("when fetching the current encryption key fails", func() { BeforeEach(func() { fakeDB.EncryptionKeyLabelReturns("", errors.New("can't fetch")) }) It("fails early", func() { var err error Eventually(encryptorProcess.Wait()).Should(Receive(&err))
It("does not destroy again", func() { fakeCake.GetReturns(nil, errors.New("cannae find it")) Expect(cakeOrdinator.Destroy(logger, "something", "rootfs")).To(Succeed()) Expect(fakeCake.RemoveCallCount()).To(Equal(0)) }) }) Context("when the graph can't retrieve information about the layer", func() { BeforeEach(func() { fakeCake.GetReturns(nil, errors.New("failed tae find it")) }) It("skips destroy and logs the error instead", func() { Expect(cakeOrdinator.Destroy(logger, "something", "rootfs")).To(Succeed()) Expect(logger.LogMessages()).To(ContainElement("test.layer-already-deleted-skipping")) }) }) }) Describe("GC", func() { It("delegates GC", func() { Expect(cakeOrdinator.GC(logger)).To(Succeed()) Expect(fakeGCer.GCCallCount()).To(Equal(1)) _, cake := fakeGCer.GCArgsForCall(0) Expect(cake).To(Equal(fakeCake)) }) It("prevents concurrent garbage collection and creation", func() { gcStarted := make(chan struct{})
Expect(os.Mkdir(filepath.Join(depotPath, "depot-2"), 0660)).To(Succeed()) Expect(os.Mkdir(filepath.Join(depotPath, "depot-3"), 0660)).To(Succeed()) Expect(err).ToNot(HaveOccurred()) logger = lagertest.NewTestLogger("test") m = metrics.NewMetrics(logger, backingStorePath, depotPath) }) AfterEach(func() { Expect(os.RemoveAll(depotPath)).To(Succeed()) Expect(os.RemoveAll(backingStorePath)).To(Succeed()) }) It("should report the number of loop devices, backing store files and depotDirs", func() { Expect(m.NumCPU()).To(Equal(runtime.NumCPU())) Expect(m.NumGoroutine()).To(BeNumerically("~", runtime.NumGoroutine(), 2)) Expect(m.LoopDevices()).NotTo(BeNil()) Expect(m.BackingStores()).To(Equal(2)) Expect(m.DepotDirs()).To(Equal(3)) }) Context("when the backing store path is empty", func() { It("reports BackingStores as -1 without doing any funny business", func() { m := metrics.NewMetrics(logger, "", depotPath) Expect(m.BackingStores()).To(Equal(-1)) Expect(logger.LogMessages()).To(BeEmpty()) }) }) })