コード例 #1
0
ファイル: dynamic_group.go プロジェクト: qinguoan/vulcan
func waitForEvents(
	member Member,
	process ifrit.Process,
	entrance entranceEventChannel,
	exit exitEventChannel,
) {
	select {
	case <-process.Ready():
		entrance <- EntranceEvent{
			Member:  member,
			Process: process,
		}

		exit <- ExitEvent{
			Member: member,
			Err:    <-process.Wait(),
		}

	case err := <-process.Wait():
		entrance <- EntranceEvent{
			Member:  member,
			Process: process,
		}

		exit <- ExitEvent{
			Member: member,
			Err:    err,
		}
	}
}
コード例 #2
0
		sub = mbus.NewSubscriber(logger, natsClient, registry, startMsgChan, subOpts)
	})

	AfterEach(func() {
		if natsRunner != nil {
			natsRunner.Stop()
		}
		if process != nil {
			process.Signal(os.Interrupt)
		}
		process = nil
	})

	It("exits when signaled", func() {
		process = ifrit.Invoke(sub)
		Eventually(process.Ready()).Should(BeClosed())

		process.Signal(os.Interrupt)
		var err error
		Eventually(process.Wait()).Should(Receive(&err))
		Expect(err).NotTo(HaveOccurred())
	})

	It("sends a start message", func() {
		msgChan := make(chan *nats.Msg, 1)

		_, err := natsClient.ChanSubscribe("router.start", msgChan)
		Expect(err).ToNot(HaveOccurred())

		process = ifrit.Invoke(sub)
		Eventually(process.Ready()).Should(BeClosed())
コード例 #3
0
ファイル: runner_test.go プロジェクト: cautio/switchboard
	var (
		healthPort     int
		logger         *lagertest.TestLogger
		healthRunner   health.Runner
		healthProcess  ifrit.Process
		startupTimeout = 5 * time.Second
	)

	BeforeEach(func() {

		healthPort = 10000 + GinkgoParallelNode()

		logger = lagertest.NewTestLogger("HealthRunner Test")
		healthRunner = health.NewRunner(uint(healthPort), logger)
		healthProcess = ifrit.Invoke(healthRunner)
		isReady := healthProcess.Ready()
		Eventually(isReady, startupTimeout).Should(BeClosed(), "Error starting Health Runner")
	})

	AfterEach(func() {
		healthProcess.Signal(os.Kill)
		err := <-healthProcess.Wait()
		Expect(err).ToNot(HaveOccurred())
	})

	Context("when the runner is running", func() {
		It("accepts connections on health port", func() {
			conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", healthPort))
			Expect(err).ToNot(HaveOccurred())

			err = conn.Close()
コード例 #4
0
	})

	JustBeforeEach(func() {
		process = ifrit.Background(restarter)
	})

	AfterEach(func() {
		process.Signal(os.Kill)
		testRunner.EnsureExit()
		Eventually(process.Wait()).Should(Receive())
	})

	Describe("Process Behavior", func() {

		It("waits for the internal runner to be ready", func() {
			Consistently(process.Ready()).ShouldNot(BeClosed())
			testRunner.TriggerReady()
			Eventually(process.Ready()).Should(BeClosed())
		})
	})

	Describe("Load", func() {

		Context("when load returns a runner", func() {
			var loadedRunner *fake_runner.TestRunner
			var loadedRunners chan *fake_runner.TestRunner

			BeforeEach(func() {
				loadedRunners = make(chan *fake_runner.TestRunner, 1)
				restarter.Load = func(runner ifrit.Runner, err error) ifrit.Runner {
					select {
コード例 #5
0
ファイル: manager_test.go プロジェクト: cloudfoundry/bbs
				var fakeMigrationToSQL *migrationfakes.FakeMigration

				BeforeEach(func() {
					fakeMigrationToSQL = &migrationfakes.FakeMigration{}
					fakeMigrationToSQL.VersionReturns(102)
					fakeMigrationToSQL.RequiresSQLReturns(true)

					dbVersion.CurrentVersion = 99
					dbVersion.TargetVersion = 99
					fakeMigration.VersionReturns(100)

					migrations = []migration.Migration{fakeMigrationToSQL, fakeMigration}
				})

				It("sorts all the migrations and runs them", func() {
					Eventually(migrationProcess.Ready()).Should(BeClosed())
					Expect(migrationsDone).To(BeClosed())
					Expect(fakeETCDDB.SetVersionCallCount()).To(Equal(2))

					_, version := fakeETCDDB.SetVersionArgsForCall(0)
					Expect(version).To(Equal(&models.Version{CurrentVersion: 99, TargetVersion: 102}))

					_, version = fakeETCDDB.SetVersionArgsForCall(1)
					// Current Version set to last ETCD migration plus 1
					Expect(version).To(Equal(&models.Version{CurrentVersion: 101, TargetVersion: 102}))

					Expect(fakeSQLDB.SetVersionCallCount()).To(Equal(2))
					_, version = fakeSQLDB.SetVersionArgsForCall(0)
					Expect(version).To(Equal(&models.Version{CurrentVersion: 99, TargetVersion: 102}))

					_, version = fakeSQLDB.SetVersionArgsForCall(1)
コード例 #6
0
	BeforeEach(func() {
		logger := lager.NewLogger("test")
		fakeNatsClient = diegonats.NewFakeClient()
		heartbeater = natbeat.New(fakeNatsClient, expectedRegistryMsg, initialRegisterInterval, logger)
		registrations = make(chan natbeat.RegistryMessage, 1)

		fakeNatsClient.Subscribe("router.register", func(msg *nats.Msg) {
			registration := natbeat.RegistryMessage{}
			fromJson(msg.Data, &registration)
			registrations <- registration
		})
	})

	JustBeforeEach(func() {
		heartbeaterProcess = ifrit.Invoke(heartbeater)
		Eventually(heartbeaterProcess.Ready()).Should(BeClosed())
	})

	AfterEach(func() {
		ginkgomon.Kill(heartbeaterProcess)
	})

	Context("when the router greeting is successful", func() {
		var expectedRegisterInterval = time.Second
		var greetingMsg = natbeat.GreetingMessage{
			Id:               "some-router-id",
			Hosts:            []string{"1.2.3.4"},
			RegisterInterval: expectedRegisterInterval,
		}

		BeforeEach(func() {
コード例 #7
0
			AccessLogger:         &access_log.NullAccessLogger{},
			HealthCheckUserAgent: "HTTP-Monitor/1.1",
			HeartbeatOK:          &healthCheck,
		})

		errChan := make(chan error, 2)
		rtr, err = router.NewRouter(logger, config, p, mbusClient, registry, varz, &healthCheck, logcounter, errChan)
		Expect(err).ToNot(HaveOccurred())

		opts := &mbus.SubscriberOpts{
			ID: "test",
			MinimumRegisterIntervalInSeconds: int(config.StartResponseDelayInterval.Seconds()),
			PruneThresholdInSeconds:          int(config.DropletStaleThreshold.Seconds()),
		}
		subscriber = ifrit.Background(mbus.NewSubscriber(logger.Session("subscriber"), mbusClient, registry, nil, opts))
		<-subscriber.Ready()
	})

	AfterEach(func() {
		if natsRunner != nil {
			natsRunner.Stop()
		}
		if subscriber != nil {
			subscriber.Signal(os.Interrupt)
			<-subscriber.Wait()
		}
	})

	Context("Drain", func() {
		BeforeEach(func() {
			runRouter(rtr)
コード例 #8
0
ファイル: encryptor_test.go プロジェクト: timani/bbs
		cryptor = encryption.NewCryptor(keyManager, rand.Reader)

		fakeDB.EncryptionKeyLabelReturns("", models.ErrResourceNotFound)
	})

	JustBeforeEach(func() {
		runner = encryptor.New(logger, fakeDB, keyManager, cryptor, clock.NewClock())
		encryptorProcess = ifrit.Background(runner)
	})

	AfterEach(func() {
		ginkgomon.Kill(encryptorProcess)
	})

	It("reports the duration that it took to encrypt", func() {
		Eventually(encryptorProcess.Ready()).Should(BeClosed())
		Eventually(logger.LogMessages).Should(ContainElement("test.encryptor.encryption-finished"))

		reportedDuration := sender.GetValue("EncryptionDuration")
		Expect(reportedDuration.Value).NotTo(BeZero())
		Expect(reportedDuration.Unit).To(Equal("nanos"))
	})

	Context("when there is no current encryption key", func() {
		BeforeEach(func() {
			fakeDB.EncryptionKeyLabelReturns("", models.ErrResourceNotFound)
		})

		It("encrypts all the existing records", func() {
			Eventually(encryptorProcess.Ready()).Should(BeClosed())
			Eventually(logger.LogMessages).Should(ContainElement("test.encryptor.encryption-finished"))
コード例 #9
0
		Context("when the service has a value in the Checks list", func() {
			BeforeEach(func() {
				registration.Checks = []*api.AgentServiceCheck{
					&api.AgentServiceCheck{
						TTL: "1m",
					},
				}
			})

			It("returns a validation error", func() {
				Eventually(registrationProcess.Wait()).Should(Receive(MatchError("Support for multiple service checks not implemented")))
			})

			It("does not become ready", func() {
				Consistently(registrationProcess.Ready()).Should(Not(BeClosed()))
			})

			It("does not try to register the service", func() {
				Consistently(agent.ServiceRegisterCallCount).Should(Equal(0))
			})

			It("does not try to deregister the service", func() {
				Consistently(agent.ServiceDeregisterCallCount).Should(Equal(0))
			})
		})

		Context("when the ttl is not a valid duration", func() {
			BeforeEach(func() {
				registration.Check = &api.AgentServiceCheck{
					TTL: "a minute or so",
コード例 #10
0
ファイル: presence_test.go プロジェクト: cloudfoundry/locket
	})

	AfterEach(func() {
		ginkgomon.Kill(presenceProcess)
	})

	Context("When consul is running", func() {
		Context("an error occurs while acquiring the presence", func() {
			BeforeEach(func() {
				presenceKey = ""
			})

			It("continues to retry", func() {
				presenceProcess = ifrit.Background(presenceRunner)

				Consistently(presenceProcess.Ready()).ShouldNot(BeClosed())
				Consistently(presenceProcess.Wait()).ShouldNot(Receive())

				Eventually(logger).Should(Say("failed-setting-presence"))
				clock.WaitForWatcherAndIncrement(6 * time.Second)
				Eventually(logger).Should(Say("recreating-session"))
			})
		})

		Context("and the presence is available", func() {
			It("acquires the presence", func() {
				presenceProcess = ifrit.Background(presenceRunner)
				Eventually(presenceProcess.Ready()).Should(BeClosed())
				Eventually(getPresenceValue).Should(Equal(presenceValue))
			})
コード例 #11
0
ファイル: lock_test.go プロジェクト: cloudfoundry/locket
			sessions, _, err := consulClient.Session().List(nil)
			Expect(err).NotTo(HaveOccurred())
			return len(sessions)
		}).Should(Equal(numSessions))
	}

	Context("When consul is running", func() {
		Context("an error occurs while acquiring the lock", func() {
			BeforeEach(func() {
				lockKey = ""
			})

			It("continues to retry", func() {
				lockProcess = ifrit.Background(lockRunner)
				shouldEventuallyHaveNumSessions(1)
				Consistently(lockProcess.Ready()).ShouldNot(BeClosed())
				Consistently(lockProcess.Wait()).ShouldNot(Receive())

				clock.Increment(retryInterval)
				Eventually(logger).Should(Say("acquire-lock-failed"))
				Eventually(logger).Should(Say("retrying-acquiring-lock"))
				Expect(sender.GetValue(lockHeldMetricName).Value).To(Equal(float64(0)))
			})
		})

		Context("and the lock is available", func() {
			It("acquires the lock", func() {
				lockProcess = ifrit.Background(lockRunner)
				Eventually(lockProcess.Ready()).Should(BeClosed())
				Expect(sender.GetValue(lockUptimeMetricName).Value).Should(Equal(float64(0)))
				Expect(getLockValue()).To(Equal(lockValue))
コード例 #12
0
ファイル: maintain_test.go プロジェクト: emc-xchallenge/rep
	AfterEach(func() {
		logger.Info("test-complete-signaling-maintainer-to-stop")
		close(pingErrors)
		ginkgomon.Interrupt(maintainProcess)
	})

	It("pings the executor", func() {
		pingErrors <- nil
		maintainProcess = ginkgomon.Invoke(maintainer)
		Expect(fakeClient.PingCallCount()).To(Equal(1))
	})

	Context("when pinging the executor fails", func() {
		It("keeps pinging until it succeeds, then starts heartbeating the executor's presence", func() {
			maintainProcess = ifrit.Background(maintainer)
			ready := maintainProcess.Ready()

			for i := 1; i <= 4; i++ {
				clock.Increment(1 * time.Second)
				pingErrors <- errors.New("ping failed")
				Eventually(fakeClient.PingCallCount).Should(Equal(i))
				Expect(ready).NotTo(BeClosed())
			}

			pingErrors <- nil
			clock.Increment(1 * time.Second)
			Eventually(fakeClient.PingCallCount).Should(Equal(5))

			Eventually(ready).Should(BeClosed())
			Expect(fakeHeartbeater.RunCallCount()).To(Equal(1))
		})
コード例 #13
0
ファイル: main_test.go プロジェクト: emc-xchallenge/tps
				})
			})

			Context("when the traffic controller is running", func() {
				BeforeEach(func() {
					message1 := marshalMessage(createContainerMetric("some-process-guid", 0, 3.0, 1024, 2048, 0))
					message2 := marshalMessage(createContainerMetric("some-process-guid", 1, 4.0, 1024, 2048, 0))
					message3 := marshalMessage(createContainerMetric("some-process-guid", 2, 5.0, 1024, 2048, 0))

					messages := map[string][][]byte{}
					messages["some-log-guid"] = [][]byte{message1, message2, message3}

					handler := NewHttpHandler(messages)
					httpServer := http_server.New(trafficControllerAddress, handler)
					trafficControllerProcess = ifrit.Invoke(sigmon.New(httpServer))
					Expect(trafficControllerProcess.Ready()).To(BeClosed())
				})

				AfterEach(func() {
					ginkgomon.Interrupt(trafficControllerProcess)
				})

				It("reports the state of the given process guid's instances", func() {
					getLRPStats, err := requestGenerator.CreateRequest(
						tps.LRPStats,
						rata.Params{"guid": "some-process-guid"},
						nil,
					)
					Expect(err).NotTo(HaveOccurred())
					getLRPStats.Header.Add("Authorization", "I can do this.")