Esempio n. 1
0
func (etcd *ETCDClusterRunner) stop(nuke bool) {
	etcd.mutex.Lock()
	defer etcd.mutex.Unlock()

	if etcd.running {
		for i := 0; i < etcd.numNodes; i++ {
			ginkgomon.Interrupt(etcd.etcdProcesses[i], 5*time.Second)
			if nuke {
				etcd.nukeArtifacts(i)
			}
		}
		etcd.markAsStopped()
	}
}
Esempio n. 2
0
func (runner *AgentRunner) Stop() {
	runner.mutex.Lock()
	defer runner.mutex.Unlock()

	if !runner.running {
		return
	}

	ginkgomon.Interrupt(runner.consulProcess, 5*time.Second)

	os.RemoveAll(runner.dataDir)
	os.RemoveAll(runner.configDir)
	runner.consulProcess = nil
	runner.running = false
}
func (cr *ClusterRunner) Stop() {
	cr.mutex.Lock()
	defer cr.mutex.Unlock()

	if !cr.running {
		return
	}

	for i := 0; i < cr.numNodes; i++ {
		ginkgomon.Interrupt(cr.consulProcesses[i], 5*time.Second)
	}

	os.RemoveAll(cr.dataDir)
	os.RemoveAll(cr.configDir)
	cr.consulProcesses = nil
	cr.running = false
}
			Expect(err).NotTo(HaveOccurred())
			service, ok := services[registration.ID]
			Expect(ok).To(BeTrue())
			Expect(*service).To(Equal(api.AgentService{
				ID:      registration.ID,
				Service: registration.Name,
				Tags:    registration.Tags,
				Port:    registration.Port,
				Address: registration.Address,
			}))
		})
	})

	Context("when signalled", func() {
		It("deregisters the given service before exiting", func() {
			ginkgomon.Interrupt(registrationProcess)
			services, err := consulClient.Agent().Services()
			Expect(err).NotTo(HaveOccurred())
			Expect(services).ToNot(HaveKey(registration.ID))
		})
	})
})

var _ = Describe("Service Registration Unit Tests", func() {
	var (
		client *fakes.FakeClient
		agent  *fakes.FakeAgent
		logger lager.Logger
		clock  *fakeclock.FakeClock

		registration        *api.AgentServiceRegistration
Esempio n. 5
0
						Eventually(presenceProcess.Wait()).ShouldNot(Receive())
						clock.WaitForWatcherAndIncrement(6 * time.Second)
						Eventually(logger).Should(Say("recreating-session"))

						consulRunner.Start()
						consulRunner.WaitUntilReady()

						clock.WaitForWatcherAndIncrement(6 * time.Second)
						Eventually(logger).Should(Say("succeeded-recreating-session"))
						Eventually(presenceProcess.Ready()).Should(BeClosed())
					})
				})

				Context("and the process is shutting down", func() {
					It("releases the presence and exits", func() {
						ginkgomon.Interrupt(presenceProcess)
						Eventually(presenceProcess.Wait()).Should(Receive(BeNil()))
						_, err := getPresenceValue()
						Expect(err).To(Equal(consuladapter.NewKeyNotFoundError(presenceKey)))
					})
				})

				Context("and consul goes through a period of instability", func() {
					var serveFiveHundreds chan struct{}
					var fakeConsul *httptest.Server

					BeforeEach(func() {
						serveFiveHundreds = make(chan struct{}, 8)

						consulClusterURL, err := url.Parse(consulRunner.URL())
						Expect(err).NotTo(HaveOccurred())
Esempio n. 6
0
		return httpClient.Do(req)
	}

	BeforeEach(func() {
		nsyncPort = 8888 + GinkgoParallelNode()
		nsyncURL := fmt.Sprintf("http://127.0.0.1:%d", nsyncPort)

		requestGenerator = rata.NewRequestGenerator(nsyncURL, nsync.Routes)
		httpClient = http.DefaultClient

		runner := newNSyncRunner(fmt.Sprintf("127.0.0.1:%d", nsyncPort))
		process = ginkgomon.Invoke(runner)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(process, exitDuration)
	})

	Describe("Desire an app", func() {
		BeforeEach(func() {
			response, err = requestDesireWithInstances(3)
		})

		It("desires the app from the bbs", func() {
			Expect(err).NotTo(HaveOccurred())
			Expect(response.StatusCode).To(Equal(http.StatusAccepted))
			Eventually(func() ([]*models.DesiredLRP, error) {
				return bbsClient.DesiredLRPs(models.DesiredLRPFilter{})
			}, 10).Should(HaveLen(1))
			desiredLrps, _ := bbsClient.DesiredLRPs(models.DesiredLRPFilter{})
			newRouteMessage := json.RawMessage([]byte(`[{"hostnames":["route-1"],"port":8080}]`))
Esempio n. 7
0
	var pipelineDBFactory db.PipelineDBFactory

	BeforeEach(func() {
		dbLogger := lagertest.NewTestLogger("test")
		postgresRunner.Truncate()
		dbConn = postgresRunner.Open()
		dbListener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)
		bus := db.NewNotificationsBus(dbListener, dbConn)
		sqlDB = db.NewSQL(dbLogger, dbConn, bus)
		pipelineDBFactory = db.NewPipelineDBFactory(dbLogger, dbConn, bus, sqlDB)

		atcProcess, atcPort = startATC(atcBin, 1)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(atcProcess)

		Expect(dbConn.Close()).To(Succeed())
		Expect(dbListener.Close()).To(Succeed())
	})

	Describe("viewing a list of builds", func() {
		var page *agouti.Page
		var pipelineDB db.PipelineDB

		BeforeEach(func() {
			var err error
			page, err = agoutiDriver.NewPage()
			Expect(err).NotTo(HaveOccurred())
		})
Esempio n. 8
0
	BeforeEach(func() {
		dbLogger := lagertest.NewTestLogger("test")

		postgresRunner.Truncate()
		dbConn = db.Wrap(postgresRunner.Open())
		dbListener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)
		bus := db.NewNotificationsBus(dbListener, dbConn)
		sqlDB = db.NewSQL(dbLogger, dbConn, bus)

		atcOneProcess, atcOnePort = startATC(atcBin, 1, true, BASIC_AUTH)
		atcTwoProcess, atcTwoPort = startATC(atcBin, 2, true, BASIC_AUTH)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(atcOneProcess)
		ginkgomon.Interrupt(atcTwoProcess)

		Expect(dbConn.Close()).To(Succeed())
		Expect(dbListener.Close()).To(Succeed())
	})

	Describe("Pipes", func() {

		var client *http.Client
		BeforeEach(func() {
			client = &http.Client{
				Transport: &http.Transport{},
			}
		})
Esempio n. 9
0
func (m *MetronRunner) Stop() {
	ginkgomon.Interrupt(m.Process, 2)
}
Esempio n. 10
0
		const cell1 = "cell-id-1"
		const cell2 = "cell-id-2"

		Context("when there is a single cell", func() {
			var maintainers ifrit.Process

			BeforeEach(func() {
				Expect(serviceClient.Cells(logger)).To(HaveLen(0))
				maintainers = ifrit.Invoke(grouper.NewParallel(os.Interrupt, grouper.Members{
					{cell1, serviceClient.NewCellPresenceRunner(logger, newCellPresence(cell1), locket.RetryInterval, locket.LockTTL)},
					{cell2, serviceClient.NewCellPresenceRunner(logger, newCellPresence(cell2), locket.RetryInterval, locket.LockTTL)},
				}))
			})

			AfterEach(func() {
				ginkgomon.Interrupt(maintainers)
			})

			It("returns only one cell", func() {
				Eventually(func() (models.CellSet, error) { return serviceClient.Cells(logger) }).Should(HaveLen(2))
				Expect(serviceClient.Cells(logger)).To(HaveKey(cell1))
				Expect(serviceClient.Cells(logger)).To(HaveKey(cell2))
			})
		})
	})
})

func newCellPresence(cellID string) *models.CellPresence {
	presence := models.NewCellPresence(
		cellID,
		"cell.example.com",
				registrations = make(chan *nats.Msg, 1)
				natsClient.Subscribe("router.register", func(msg *nats.Msg) {
					registrations <- msg
				})
			})

			It("starts announcing its location again", func() {
				Eventually(registrations).Should(Receive())
			})
		})
	})

	Context("when a server is sent SIGINT after the hearbeat has started", func() {
		JustBeforeEach(func() {
			Eventually(receptorRunner).Should(gbytes.Say("nats-heartbeat.started"))
			ginkgomon.Interrupt(receptorProcess)
		})

		Context("and NATS is accessible", func() {
			var unregistrations chan *nats.Msg

			BeforeEach(func() {
				unregistrations = make(chan *nats.Msg, 1)
				natsClient.Subscribe("router.unregister", func(msg *nats.Msg) {
					unregistrations <- msg
				})
			})

			It("broadcasts an unregister message", func() {
				Eventually(unregistrations).Should(Receive())
			})
Esempio n. 12
0
			)

			tsaRunner := ginkgomon.New(ginkgomon.Config{
				Command:       tsaCommand,
				Name:          "tsa",
				StartCheck:    "tsa.listening",
				AnsiColorCode: "32m",
			})

			tsaProcess = ginkgomon.Invoke(tsaRunner)
		})

		AfterEach(func() {
			atcServer.Close()
			gardenServer.Stop()
			ginkgomon.Interrupt(tsaProcess)
		})

		Describe("SSHing", func() {
			var sshSess *gexec.Session
			var sshStdin io.Writer
			var sshArgv []string

			BeforeEach(func() {
				sshArgv = []string{
					"127.0.0.1",
					"-p", strconv.Itoa(tsaPort),
					"-o", "UserKnownHostsFile=" + userKnownHostsFile,
				}
			})
Esempio n. 13
0
			Expect(delta).To(BeEquivalentTo(1))
		})
	})

	It("cleans up exiting connections when killing the BBS", func(done Done) {
		var err error
		eventSource, err = client.SubscribeToEvents(logger)
		Expect(err).NotTo(HaveOccurred())

		go func() {
			_, err := eventSource.Next()
			Expect(err).To(HaveOccurred())
			close(done)
		}()

		ginkgomon.Interrupt(bbsProcess)
	})
})

func primeEventStream(eventChannel chan models.Event, eventType string, primer func(), cleanup func()) {
	primer()

PRIMING:
	for {
		select {
		case <-eventChannel:
			break PRIMING
		case <-time.After(50 * time.Millisecond):
			cleanup()
			primer()
		}
Esempio n. 14
0
		Describe("pinging the server", func() {
			var pingErr error

			Context("when Garden responds to ping", func() {
				JustBeforeEach(func() {
					pingErr = executorClient.Ping()
				})

				It("does not return an error", func() {
					Expect(pingErr).NotTo(HaveOccurred())
				})
			})

			Context("when Garden returns an error", func() {
				JustBeforeEach(func() {
					ginkgomon.Interrupt(gardenProcess)
					pingErr = executorClient.Ping()
				})

				AfterEach(func() {
					gardenProcess = ginkgomon.Invoke(componentMaker.GardenLinux())
				})

				It("should return an error", func() {
					Expect(pingErr).To(HaveOccurred())
					Expect(pingErr.Error()).To(ContainSubstring("connection refused"))
				})
			})
		})

		Describe("getting the total resources", func() {
Esempio n. 15
0
			etcdStoreClient = etcd.NewStoreClient(nil)
		})

		Context("but SQL does not have a version", func() {
			BeforeEach(func() {
				fakeSQLDB.VersionReturns(nil, models.ErrResourceNotFound)
			})

			It("fetches the version from etcd", func() {
				Eventually(fakeSQLDB.VersionCallCount).Should(Equal(1))
				Consistently(fakeSQLDB.VersionCallCount).Should(Equal(1))

				Eventually(fakeETCDDB.VersionCallCount).Should(Equal(1))
				Consistently(fakeETCDDB.VersionCallCount).Should(Equal(1))

				ginkgomon.Interrupt(migrationProcess)
				Eventually(migrationProcess.Wait()).Should(Receive(BeNil()))
			})

			// cross-db migration
			Context("but etcd does", func() {
				var fakeMigrationToSQL *migrationfakes.FakeMigration

				BeforeEach(func() {
					fakeMigrationToSQL = &migrationfakes.FakeMigration{}
					fakeMigrationToSQL.VersionReturns(102)
					fakeMigrationToSQL.RequiresSQLReturns(true)

					dbVersion.CurrentVersion = 99
					dbVersion.TargetVersion = 99
					fakeMigration.VersionReturns(100)
Esempio n. 16
0
		It("unregisters from etcd when the process exits", func() {
			routingAPIRunner := testrunner.New(routingAPIBinPath, routingAPIArgs)
			proc := ifrit.Invoke(routingAPIRunner)

			getRoutes := func() string {
				routesPath := fmt.Sprintf("%s/v2/keys/routes", etcdUrl)
				resp, err := http.Get(routesPath)
				Expect(err).ToNot(HaveOccurred())

				body, err := ioutil.ReadAll(resp.Body)
				Expect(err).ToNot(HaveOccurred())
				return string(body)
			}
			Eventually(getRoutes).Should(ContainSubstring("api.example.com/routing"))

			ginkgomon.Interrupt(proc)

			Eventually(getRoutes).ShouldNot(ContainSubstring("api.example.com/routing"))
			Eventually(routingAPIRunner.ExitCode()).Should(Equal(0))
		})

		Context("when router groups endpoint is invoked", func() {
			var proc ifrit.Process

			BeforeEach(func() {
				routingAPIRunner := testrunner.New(routingAPIBinPath, routingAPIArgs)
				proc = ifrit.Invoke(routingAPIRunner)
			})

			AfterEach(func() {
				ginkgomon.Interrupt(proc)
func stopMetron() {
	ginkgomon.Interrupt(metronProcess)
}
Esempio n. 18
0
			err = natsClient.Publish(msg.Reply, response)
			Expect(err).NotTo(HaveOccurred())
		})
	})

	Context("when the emitter is running", func() {
		var emitter ifrit.Process

		BeforeEach(func() {
			runner := createEmitterRunner("emitter1")
			runner.StartCheck = "emitter1.started"
			emitter = ginkgomon.Invoke(runner)
		})

		AfterEach(func() {
			ginkgomon.Interrupt(emitter, emitterInterruptTimeout)
		})

		Context("and an lrp with routes is desired", func() {
			BeforeEach(func() {
				err := bbsClient.DesireLRP(desiredLRP)
				Expect(err).NotTo(HaveOccurred())
			})

			Context("and an instance starts", func() {
				BeforeEach(func() {
					err := bbsClient.StartActualLRP(&lrpKey, &instanceKey, &netInfo)
					Expect(err).NotTo(HaveOccurred())
				})

				It("emits its routes immediately", func() {
Esempio n. 19
0
				BeforeEach(func() {
					message1 := marshalMessage(createContainerMetric("some-process-guid", 0, 3.0, 1024, 2048, 0))
					message2 := marshalMessage(createContainerMetric("some-process-guid", 1, 4.0, 1024, 2048, 0))
					message3 := marshalMessage(createContainerMetric("some-process-guid", 2, 5.0, 1024, 2048, 0))

					messages := map[string][][]byte{}
					messages["some-log-guid"] = [][]byte{message1, message2, message3}

					handler := NewHttpHandler(messages)
					httpServer := http_server.New(trafficControllerAddress, handler)
					trafficControllerProcess = ifrit.Invoke(sigmon.New(httpServer))
					Expect(trafficControllerProcess.Ready()).To(BeClosed())
				})

				AfterEach(func() {
					ginkgomon.Interrupt(trafficControllerProcess)
				})

				It("reports the state of the given process guid's instances", func() {
					getLRPStats, err := requestGenerator.CreateRequest(
						tps.LRPStats,
						rata.Params{"guid": "some-process-guid"},
						nil,
					)
					Expect(err).NotTo(HaveOccurred())
					getLRPStats.Header.Add("Authorization", "I can do this.")

					response, err := httpClient.Do(getLRPStats)
					Expect(err).NotTo(HaveOccurred())
					Expect(response.StatusCode).To(Equal(http.StatusOK))
		process = ifrit.Invoke(
			converger_process.New(
				fakeBBSServiceClient,
				fakeBBSClient,
				logger,
				fakeClock,
				convergeRepeatInterval,
				kickTaskDuration,
				expirePendingTaskDuration,
				expireCompletedTaskDuration,
			),
		)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(process)
		Eventually(process.Wait()).Should(Receive())
	})

	Describe("converging over time", func() {
		It("converges tasks, LRPs, and auctions when the lock is periodically reestablished", func() {
			fakeClock.Increment(convergeRepeatInterval + aBit)

			Eventually(fakeBBSClient.ConvergeTasksCallCount, aBit).Should(Equal(1))
			Eventually(fakeBBSClient.ConvergeLRPsCallCount, aBit).Should(Equal(1))

			actualKickTaskDuration, actualExpirePendingTaskDuration, actualExpireCompletedTaskDuration := fakeBBSClient.ConvergeTasksArgsForCall(0)
			Expect(actualKickTaskDuration).To(Equal(kickTaskDuration))
			Expect(actualExpirePendingTaskDuration).To(Equal(expirePendingTaskDuration))
			Expect(actualExpireCompletedTaskDuration).To(Equal(expireCompletedTaskDuration))
Esempio n. 21
0
		serviceClient.NewCellPresenceRunnerReturns(fakeHeartbeater)

		config = maintain.Config{
			CellID:          "cell-id",
			RepAddress:      "1.2.3.4",
			Zone:            "az1",
			RetryInterval:   1 * time.Second,
			RootFSProviders: []string{"provider-1", "provider-2"},
		}
		maintainer = maintain.New(config, fakeClient, serviceClient, logger, clock)
	})

	AfterEach(func() {
		logger.Info("test-complete-signaling-maintainer-to-stop")
		close(pingErrors)
		ginkgomon.Interrupt(maintainProcess)
	})

	It("pings the executor", func() {
		pingErrors <- nil
		maintainProcess = ginkgomon.Invoke(maintainer)
		Expect(fakeClient.PingCallCount()).To(Equal(1))
	})

	Context("when pinging the executor fails", func() {
		It("keeps pinging until it succeeds, then starts heartbeating the executor's presence", func() {
			maintainProcess = ifrit.Background(maintainer)
			ready := maintainProcess.Ready()

			for i := 1; i <= 4; i++ {
				clock.Increment(1 * time.Second)
Esempio n. 22
0
	Describe("AuctioneerAddress", func() {
		Context("when able to get an auctioneer presence", func() {
			var heartbeater ifrit.Process
			var presence auctioneer.Presence

			BeforeEach(func() {
				presence = auctioneer.NewPresence("auctioneer-id", "auctioneer.example.com")

				auctioneerLock, err := serviceClient.NewAuctioneerLockRunner(logger, presence, 100*time.Millisecond, 10*time.Second)
				Expect(err).NotTo(HaveOccurred())
				heartbeater = ginkgomon.Invoke(auctioneerLock)
			})

			AfterEach(func() {
				ginkgomon.Interrupt(heartbeater)
			})

			It("returns the address", func() {
				address, err := serviceClient.CurrentAuctioneerAddress()
				Expect(err).NotTo(HaveOccurred())
				Expect(address).To(Equal(presence.AuctioneerAddress))
			})
		})

		Context("when unable to get any auctioneer presences", func() {
			It("returns ErrServiceUnavailable", func() {
				_, err := serviceClient.CurrentAuctioneerAddress()
				Expect(err).To(HaveOccurred())
			})
		})
Esempio n. 23
0
					AfterEach(func() {
						consulRunner.Start()
						consulRunner.WaitUntilReady()
					})

					It("loses the lock and exits", func() {
						var err error
						Eventually(lockProcess.Wait()).Should(Receive(&err))
						Expect(err).To(Equal(locket.ErrLockLost))
						Expect(sender.GetValue(lockHeldMetricName).Value).To(Equal(float64(0)))
					})
				})

				Context("and the process is shutting down", func() {
					It("releases the lock and exits", func() {
						ginkgomon.Interrupt(lockProcess)
						Eventually(lockProcess.Wait()).Should(Receive(BeNil()))
						_, err := getLockValue()
						Expect(err).To(Equal(consuladapter.NewKeyNotFoundError(lockKey)))
					})
				})

				Context("and consul goes through a period of instability", func() {
					var serveFiveHundreds chan struct{}
					var fakeConsul *httptest.Server

					BeforeEach(func() {
						serveFiveHundreds = make(chan struct{}, 4)

						consulClusterURL, err := url.Parse(consulRunner.URL())
						Expect(err).NotTo(HaveOccurred())
Esempio n. 24
0
				w.Write(payload)
			}),
		)
	})

	AfterEach(func() {
		defer fakeCC.Close()
	})

	Describe("when the CC polling interval elapses", func() {
		JustBeforeEach(func() {
			process = startBulker(true)
		})

		AfterEach(func() {
			ginkgomon.Interrupt(process, interruptTimeout)
		})

		Context("once the state has been synced with CC", func() {
			Context("lrps", func() {
				BeforeEach(func() {
					schedulingInfoResponse := models.DesiredLRPSchedulingInfosResponse{
						Error: nil,
						DesiredLrpSchedulingInfos: []*models.DesiredLRPSchedulingInfo{
							{
								DesiredLRPKey: models.DesiredLRPKey{ // perfect. love it. keep it.
									ProcessGuid: "process-guid-1",
									Domain:      cc_messages.AppLRPDomain,
								},
								Annotation: "1.1",
							},
Esempio n. 25
0
		It("POSTs to the CC that the application has crashed", func() {
			Eventually(ready, 5*time.Second).Should(BeClosed())
		})
	})

	Context("when the watcher loses the lock", func() {
		BeforeEach(func() {
			watcher, _ = startWatcher(true)
		})

		JustBeforeEach(func() {
			consulRunner.Reset()
		})

		AfterEach(func() {
			ginkgomon.Interrupt(watcher, 5)
		})

		It("exits with an error", func() {
			Eventually(watcher.Wait(), 5).Should(Receive(HaveOccurred()))
		})
	})

	Context("when the watcher initially does not have the lock", func() {
		var runner *ginkgomon.Runner
		var otherSession *consuladapter.Session

		BeforeEach(func() {
			otherSession = consulRunner.NewSession("other-Session")
			err := otherSession.AcquireLock(locket.LockSchemaPath(watcherLockName), []byte("something-else"))
			Expect(err).NotTo(HaveOccurred())