consulRunner.DestroySession("nsync-bulker") }) AfterEach(func() { ginkgomon.Interrupt(process, interruptTimeout) }) itIsMissingDomain() It("exits with an error", func() { Eventually(process.Wait(), 2*domainTTL).Should(Receive(HaveOccurred())) }) }) Context("when the bulker initially does not have the lock", func() { var otherSession *consuladapter.Session BeforeEach(func() { heartbeatInterval = 1 * time.Second otherSession = consulRunner.NewSession("other-session") err := otherSession.AcquireLock(locket.LockSchemaPath(bulkerLockName), []byte("something-else")) Expect(err).NotTo(HaveOccurred()) }) JustBeforeEach(func() { process = startBulker(false) }) AfterEach(func() { ginkgomon.Interrupt(process, interruptTimeout)
}) Context("when Lock() is stopped (session is destroyed)", func() { BeforeEach(func() { lock := &fakes.FakeLock{} lock.LockReturns(nil, nil) sessionMgr.NewLockReturns(lock, nil) }) It("returns an error", func() { Expect(lockErr).To(Equal(consuladapter.ErrCancelled)) }) }) Context("when recreating the Session", func() { var newSession *consuladapter.Session JustBeforeEach(func() { var err error newSession, err = session.Recreate() Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { newSession.Destroy() }) It("creates a new session", func() { Expect(newSession.ID()).NotTo(Equal(session.ID())) })
startClusterAndSession() }) AfterEach(stopClusterAndSession) Context("when the watch starts first", func() { BeforeEach(func() { var err error client = clusterRunner.NewClient() disappearChan = session.WatchForDisappearancesUnder(logger, "under") Expect(err).NotTo(HaveOccurred()) }) Context("when there are keys", func() { var bsession *consuladapter.Session BeforeEach(func() { bsession = clusterRunner.NewSession("bsession") }) AfterEach(func() { bsession.Destroy() }) It("detects removals of keys", func() { _, err := bsession.SetPresence("under/here", []byte("value")) Expect(err).NotTo(HaveOccurred()) bsession.Destroy()
exitDuration = 4 * time.Second convergeRepeatInterval = 500 * time.Millisecond taskKickInterval = convergeRepeatInterval expireCompletedTaskDuration = 3 * convergeRepeatInterval expirePendingTaskDuration = 30 * time.Minute ) var ( binPaths BinPaths etcdRunner *etcdstorerunner.ETCDClusterRunner bbsArgs bbsrunner.Args bbsProcess ifrit.Process bbsClient bbs.Client convergerConfig *convergerrunner.Config convergerProcess ifrit.Process runner *ginkgomon.Runner consulRunner *consulrunner.ClusterRunner consulSession *consuladapter.Session etcdClient storeadapter.StoreAdapter logger lager.Logger ) SynchronizedBeforeSuite(func() []byte { convergerBinPath, err := Build("github.com/cloudfoundry-incubator/converger/cmd/converger", "-race") Expect(err).NotTo(HaveOccurred()) bbsBinPath, err := Build("github.com/cloudfoundry-incubator/bbs/cmd/bbs", "-race") Expect(err).NotTo(HaveOccurred()) bytes, err := json.Marshal(BinPaths{
"github.com/hashicorp/consul/api" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/pivotal-golang/lager/lagertest" ) // copied from consul to prevent unnecessary additional dependencies const serfCheckID = "serfHealth" var _ = Describe("Session", func() { BeforeEach(startCluster) AfterEach(stopCluster) var client *api.Client var sessionMgr *fakes.FakeSessionManager var session *consuladapter.Session var newSessionErr error var logger *lagertest.TestLogger var noChecks bool BeforeEach(func() { logger = lagertest.NewTestLogger("test") client = clusterRunner.NewClient() sessionMgr = newFakeSessionManager(client) noChecks = false }) JustBeforeEach(func() { if noChecks { session, newSessionErr = consuladapter.NewSessionNoChecks("a-session", 20*time.Second, client, sessionMgr)