Beispiel #1
0
func (etcd *ETCDClusterRunner) kill() {
	etcd.mutex.Lock()
	defer etcd.mutex.Unlock()

	if etcd.running {
		for i := 0; i < etcd.numNodes; i++ {
			ginkgomon.Kill(etcd.etcdProcesses[i], 5*time.Second)
			etcd.nukeArtifacts(i)
		}
		etcd.markAsStopped()
	}
}
Beispiel #2
0
		consulSession = consulRunner.NewSession("a-session")

		capacity := models.NewCellCapacity(512, 1024, 124)
		cellPresence := models.NewCellPresence("the-cell-id", "1.2.3.4", "the-zone", capacity, []string{}, []string{})

		value, err := json.Marshal(cellPresence)
		Expect(err).NotTo(HaveOccurred())

		_, err = consulSession.SetPresence(bbs.CellSchemaPath(cellPresence.CellID), value)
		Expect(err).NotTo(HaveOccurred())

	})

	AfterEach(func() {
		ginkgomon.Kill(bbsProcess)
		ginkgomon.Kill(convergerProcess)
		consulRunner.Stop()
		etcdRunner.Stop()
	})

	startConverger := func() {
		runner = convergerrunner.New(convergerConfig)
		convergerProcess = ginkgomon.Invoke(runner)
		time.Sleep(convergeRepeatInterval)
	}

	createRunningTaskWithDeadCell := func() {
		task := model_helpers.NewValidTask("task-guid")

		err := bbsClient.DesireTask(task.TaskGuid, task.Domain, task.TaskDefinition)
Beispiel #3
0
	)

	BeforeEach(func() {
		bbsRunner = testrunner.New(bbsBinPath, bbsArgs)
		bbsProcess = ginkgomon.Invoke(bbsRunner)
		filter = models.DesiredLRPFilter{}
		expectedDesiredLRPs = []*models.DesiredLRP{}
		actualDesiredLRPs = []*models.DesiredLRP{}
		desiredLRPs = etcdHelper.CreateDesiredLRPsInDomains(map[string]int{
			"domain-1": 2,
			"domain-2": 3,
		})
	})

	AfterEach(func() {
		ginkgomon.Kill(bbsProcess)
	})

	Describe("DesiredLRPs", func() {
		JustBeforeEach(func() {
			actualDesiredLRPs, getErr = client.DesiredLRPs(filter)
		})

		It("responds without error", func() {
			Expect(getErr).NotTo(HaveOccurred())
		})

		It("has the correct number of responses", func() {
			Expect(actualDesiredLRPs).To(HaveLen(5))
		})
	"github.com/cloudfoundry-incubator/receptor/serialization"
	"github.com/cloudfoundry-incubator/runtime-schema/models"
	"github.com/tedsuo/ifrit/ginkgomon"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Desired LRP API", func() {

	BeforeEach(func() {
		receptorProcess = ginkgomon.Invoke(receptorRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(receptorProcess)
	})

	Describe("POST /v1/desired_lrps/", func() {
		var lrpToCreate receptor.DesiredLRPCreateRequest
		var createErr error

		BeforeEach(func() {
			lrpToCreate = newValidDesiredLRPCreateRequest()
			createErr = client.CreateDesiredLRP(lrpToCreate)
		})

		It("responds without an error", func() {
			Expect(createErr).NotTo(HaveOccurred())
		})
Beispiel #5
0
			SkipCertVerify:      skipCertVerify,
			UAATokenURL:         uaaTokenURL,
			UAAPassword:         uaaPassword,
			UAAUsername:         uaaUsername,
			ConsulCluster:       consulRunner.URL(),
			AllowedCiphers:      allowedCiphers,
			AllowedMACs:         allowedMACs,
			AllowedKeyExchanges: allowedKeyExchanges,
		}

		runner = testrunner.New(sshProxyPath, args)
		process = ifrit.Invoke(runner)
	})

	AfterEach(func() {
		ginkgomon.Kill(process, 3*time.Second)

		fakeBBS.Close()
		fakeUAA.Close()
		fakeCC.Close()
	})

	Describe("argument validation", func() {
		Context("when the host key is not provided", func() {
			BeforeEach(func() {
				hostKey = ""
			})

			It("reports the problem and terminates", func() {
				Expect(runner).To(gbytes.Say("hostKey is required"))
				Expect(runner).NotTo(gexec.Exit(0))
Beispiel #6
0
import (
	"github.com/cloudfoundry-incubator/routing-api/db"
	"github.com/tedsuo/ifrit/ginkgomon"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Routes API", func() {
	BeforeEach(func() {
		routingAPIProcess = ginkgomon.Invoke(routingAPIRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(routingAPIProcess)
	})

	Describe("Routes", func() {
		var routes []db.Route
		var getErr error
		var route1, route2 db.Route

		BeforeEach(func() {
			route1 = db.Route{
				Route:   "a.b.c",
				Port:    33,
				IP:      "1.1.1.1",
				TTL:     55,
				LogGuid: "potato",
			}
Beispiel #7
0
		fakeSQLDB = &dbfakes.FakeDB{}

		cryptor = &encryptionfakes.FakeCryptor{}

		fakeMigration = &migrationfakes.FakeMigration{}
		fakeMigration.RequiresSQLReturns(false)
		migrations = []migration.Migration{fakeMigration}
	})

	JustBeforeEach(func() {
		manager = migration.NewManager(logger, fakeETCDDB, etcdStoreClient, fakeSQLDB, rawSQLDB, cryptor, migrations, migrationsDone, clock.NewClock(), "db-driver")
		migrationProcess = ifrit.Background(manager)
	})

	AfterEach(func() {
		ginkgomon.Kill(migrationProcess)
	})

	Context("when both a etcd and sql configurations are present", func() {
		BeforeEach(func() {
			rawSQLDB = &sql.DB{}
			etcdStoreClient = etcd.NewStoreClient(nil)
		})

		Context("but SQL does not have a version", func() {
			BeforeEach(func() {
				fakeSQLDB.VersionReturns(nil, models.ErrResourceNotFound)
			})

			It("fetches the version from etcd", func() {
				Eventually(fakeSQLDB.VersionCallCount).Should(Equal(1))
Beispiel #8
0
		consulRunner = consulrunner.NewClusterRunner(
			9001+config.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength,
			1,
			"http",
		)

		consulRunner.Start()
		consulRunner.WaitUntilReady()

		etcdRunner.Start()
	},
)

var _ = SynchronizedAfterSuite(func() {
	ginkgomon.Kill(sqlProcess)

	etcdRunner.Stop()
	consulRunner.Stop()
}, func() {
	gexec.CleanupBuildArtifacts()
})

var _ = BeforeEach(func() {
	logger = lagertest.NewTestLogger("test")

	etcdRunner.Reset()

	consulRunner.Reset()
	consulClient = consulRunner.NewClient()
Beispiel #9
0
					MemoryMB: 124,
					DiskMB:   456,
					RootFs:   "some-rootfs",
				},
			}

			competingAuctioneerLock := locket.NewLock(logger, consulClient, locket.LockSchemaPath("auctioneer_lock"), []byte{}, clock.NewClock(), 500*time.Millisecond, 10*time.Second)
			competingAuctioneerProcess = ifrit.Invoke(competingAuctioneerLock)

			runner.StartCheck = "auctioneer.lock-bbs.lock.acquiring-lock"

			auctioneerProcess = ifrit.Background(runner)
		})

		AfterEach(func() {
			ginkgomon.Kill(competingAuctioneerProcess)
		})

		It("should not advertise its presence, and should not be reachable", func() {
			Eventually(func() error {
				return auctioneerClient.RequestTaskAuctions([]*auctioneer.TaskStartRequest{
					&auctioneer.TaskStartRequest{*task},
				})
			}).Should(HaveOccurred())
		})

		It("should eventually come up in the event that the lock is released", func() {
			ginkgomon.Kill(competingAuctioneerProcess)

			Eventually(func() error {
				return auctioneerClient.RequestTaskAuctions([]*auctioneer.TaskStartRequest{
				}
			}
		}
	}

	spinupTcpReceiver := func(port int, id string) ifrit.Process {
		sampleReceiverArgs := testrunner.Args{
			Address:  fmt.Sprintf("%s:%d", externalIP, port),
			ServerId: id,
		}
		runner1 := testrunner.New(sampleReceiverPath, sampleReceiverArgs)
		return ifrit.Invoke(runner1)
	}

	tearDownTcpReceiver := func(receiverProcess ifrit.Process) {
		ginkgomon.Kill(receiverProcess, 5*time.Second)
	}

	Describe("A sample receiver running as a separate process", func() {
		BeforeEach(func() {
			externalPort = 60000 + GinkgoParallelNode()
			sampleReceiverPort1 = 9000 + GinkgoParallelNode()
			sampleReceiverPort2 = 9500 + GinkgoParallelNode()
			serverId1 = "serverId1"
			serverId2 = "serverId2"

			receiver1 = spinupTcpReceiver(sampleReceiverPort1, serverId1)
			receiver2 = spinupTcpReceiver(sampleReceiverPort2, serverId2)
		})

		AfterEach(func() {
Beispiel #11
0
					Index:        1,
					State:        cc_messages.LRPInstanceStateRunning,
				}))

				Expect(lrpInstances).To(ContainElement(cc_messages.LRPInstance{
					ProcessGuid:  "some-process-guid",
					InstanceGuid: "",
					Index:        2,
					State:        cc_messages.LRPInstanceStateStarting,
				}))
			})
		})

		Context("when the bbs is not running", func() {
			JustBeforeEach(func() {
				ginkgomon.Kill(bbsProcess, 5)
			})

			It("returns 500", func() {
				getLRPs, err := requestGenerator.CreateRequest(
					tps.LRPStatus,
					rata.Params{"guid": "some-process-guid"},
					nil,
				)
				Expect(err).NotTo(HaveOccurred())

				response, err := httpClient.Do(getLRPs)
				Expect(err).NotTo(HaveOccurred())

				Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
			})
Beispiel #12
0
func stopNATS() {
	ginkgomon.Kill(gnatsdProcess)
}
Beispiel #13
0
		var competingBulkerProcess ifrit.Process

		BeforeEach(func() {
			heartbeatInterval = 1 * time.Second

			competingBulker := locket.NewLock(logger, consulRunner.NewClient(), locket.LockSchemaPath(bulkerLockName), []byte("something-else"), clock.NewClock(), locket.RetryInterval, locket.LockTTL)
			competingBulkerProcess = ifrit.Invoke(competingBulker)
		})

		JustBeforeEach(func() {
			process = startBulker(false)
		})

		AfterEach(func() {
			ginkgomon.Interrupt(process, interruptTimeout)
			ginkgomon.Kill(competingBulkerProcess)
		})

		itIsMissingDomain()

		Context("when the lock becomes available", func() {
			BeforeEach(func() {
				ginkgomon.Kill(competingBulkerProcess)

				time.Sleep(pollingInterval + 10*time.Millisecond)
			})

			It("is updated", func() {
				Eventually(func() ([]string, error) {
					logger := logger.Session("domain-polling")
					logger.Debug("getting-domains")
	"github.com/tedsuo/ifrit/ginkgomon"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"github.com/onsi/gomega/gbytes"
	"github.com/onsi/gomega/gexec"
)

var _ = Describe("registrations", func() {
	JustBeforeEach(func() {
		receptorProcess = ginkgomon.Invoke(receptorRunner)
	})

	Context("when the server attempts to connect to NATS", func() {
		AfterEach(func() {
			ginkgomon.Kill(receptorProcess)
		})

		Context("and NATS is accessible", func() {
			var registrations chan *nats.Msg

			BeforeEach(func() {
				registrations = make(chan *nats.Msg, 1)
				natsClient.Subscribe("router.register", func(msg *nats.Msg) {
					registrations <- msg
				})
			})

			It("announces it's location", func() {
				Eventually(registrations).Should(Receive())
			})
Beispiel #15
0
		encryptionKey, err := encryption.NewKey("label", "passphrase")
		Expect(err).NotTo(HaveOccurred())
		keyManager, err = encryption.NewKeyManager(encryptionKey, []encryption.Key{oldKey})
		Expect(err).NotTo(HaveOccurred())
		cryptor = encryption.NewCryptor(keyManager, rand.Reader)

		fakeDB.EncryptionKeyLabelReturns("", models.ErrResourceNotFound)
	})

	JustBeforeEach(func() {
		runner = encryptor.New(logger, fakeDB, keyManager, cryptor, clock.NewClock())
		encryptorProcess = ifrit.Background(runner)
	})

	AfterEach(func() {
		ginkgomon.Kill(encryptorProcess)
	})

	It("reports the duration that it took to encrypt", func() {
		Eventually(encryptorProcess.Ready()).Should(BeClosed())
		Eventually(logger.LogMessages).Should(ContainElement("test.encryptor.encryption-finished"))

		reportedDuration := sender.GetValue("EncryptionDuration")
		Expect(reportedDuration.Value).NotTo(BeZero())
		Expect(reportedDuration.Unit).To(Equal("nanos"))
	})

	Context("when there is no current encryption key", func() {
		BeforeEach(func() {
			fakeDB.EncryptionKeyLabelReturns("", models.ErrResourceNotFound)
		})
		disappearChan <-chan []string

		logger *lagertest.TestLogger
	)

	BeforeEach(func() {
		consulClient = consulRunner.NewClient()
		logger = lagertest.NewTestLogger("test")
		clock := clock.NewClock()
		watcherRunner, disappearChan = locket.NewDisappearanceWatcher(logger, consulClient, "under", clock)
		watcherProcess = ifrit.Invoke(watcherRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(watcherProcess)
	})

	var addThenRemovePresence = func(presenceKey string) {
		presenceRunner := locket.NewPresence(logger, consulClient, presenceKey, []byte("value"), clock.NewClock(), retryInterval, 10*time.Second)

		presenceProcess := ifrit.Invoke(presenceRunner)
		Eventually(func() int {
			sessions, _, err := consulClient.Session().List(nil)
			Expect(err).NotTo(HaveOccurred())
			return len(sessions)
		}).Should(Equal(1))
		Eventually(logger).Should(gbytes.Say("presence.succeeded-setting-presence"))

		ginkgomon.Kill(presenceProcess)
	}
	hostKeyPem = context["host-key"]
	privateKeyPem = context["private-key"]
	publicAuthorizedKey = context["authorized-key"]

	sshdPort = 7000 + GinkgoParallelNode()
	sshdPath = context["sshd"]

	sshProxyPort = 7100 + GinkgoParallelNode()
	sshProxyPath = context["ssh-proxy"]
})

var _ = BeforeEach(func() {
	sshdArgs := testrunner.Args{
		Address:       fmt.Sprintf("127.0.0.1:%d", sshdPort),
		HostKey:       hostKeyPem,
		AuthorizedKey: publicAuthorizedKey,
	}

	runner := testrunner.New(sshdPath, sshdArgs)
	sshdProcess = ifrit.Invoke(runner)
})

var _ = AfterEach(func() {
	ginkgomon.Kill(sshdProcess, 5*time.Second)
})

var _ = SynchronizedAfterSuite(func() {
}, func() {
	gexec.CleanupBuildArtifacts()
})
	debugServerAddress = fmt.Sprintf("0.0.0.0:%d", 9850+GinkgoParallelNode())
	localDriverRunner = ginkgomon.New(ginkgomon.Config{
		Name: "local-driver",
		Command: exec.Command(
			localDriverPath,
			"-listenAddr", fmt.Sprintf("0.0.0.0:%d", localDriverServerPort),
			"-debugAddr", debugServerAddress,
			"-driversPath", defaultPluginsDirectory,
		),
		StartCheck: "local-driver-server.started",
	})
})

var _ = AfterEach(func() {
	ginkgomon.Kill(localDriverProcess)
})

var _ = SynchronizedAfterSuite(func() {

}, func() {
	gexec.CleanupBuildArtifacts()
})

// testing support types:

type errCloser struct{ io.Reader }

func (errCloser) Close() error                     { return nil }
func (errCloser) Read(p []byte) (n int, err error) { return 0, fmt.Errorf("any") }
		registrations = make(chan natbeat.RegistryMessage, 1)

		fakeNatsClient.Subscribe("router.register", func(msg *nats.Msg) {
			registration := natbeat.RegistryMessage{}
			fromJson(msg.Data, &registration)
			registrations <- registration
		})
	})

	JustBeforeEach(func() {
		heartbeaterProcess = ifrit.Invoke(heartbeater)
		Eventually(heartbeaterProcess.Ready()).Should(BeClosed())
	})

	AfterEach(func() {
		ginkgomon.Kill(heartbeaterProcess)
	})

	Context("when the router greeting is successful", func() {
		var expectedRegisterInterval = time.Second
		var greetingMsg = natbeat.GreetingMessage{
			Id:               "some-router-id",
			Hosts:            []string{"1.2.3.4"},
			RegisterInterval: expectedRegisterInterval,
		}

		BeforeEach(func() {
			fakeNatsClient.Subscribe("router.greet", func(msg *nats.Msg) {
				fakeNatsClient.Publish(msg.Reply, toJson(greetingMsg))
			})
		})
Beispiel #20
0
		retryInterval = 500 * time.Millisecond
		lockTTL = 5 * time.Second
		logger = lagertest.NewTestLogger("locket")

		sender = fake.NewFakeMetricSender()
		metrics.Initialize(sender, nil)
	})

	JustBeforeEach(func() {
		clock = fakeclock.NewFakeClock(time.Now())
		lockRunner = locket.NewLock(logger, consulClient, lockKey, lockValue, clock, retryInterval, lockTTL)
	})

	AfterEach(func() {
		ginkgomon.Kill(lockProcess)
	})

	var shouldEventuallyHaveNumSessions = func(numSessions int) {
		Eventually(func() int {
			sessions, _, err := consulClient.Session().List(nil)
			Expect(err).NotTo(HaveOccurred())
			return len(sessions)
		}).Should(Equal(numSessions))
	}

	Context("When consul is running", func() {
		Context("an error occurs while acquiring the lock", func() {
			BeforeEach(func() {
				lockKey = ""
			})
	Expect(rawSQLDB.Ping()).NotTo(HaveOccurred())

	flavor = sqlRunner.DriverName()

	encryptionKey, err := encryption.NewKey("label", "passphrase")
	Expect(err).NotTo(HaveOccurred())
	keyManager, err := encryption.NewKeyManager(encryptionKey, nil)
	Expect(err).NotTo(HaveOccurred())
	cryptor = encryption.NewCryptor(keyManager, rand.Reader)

	fakeClock = fakeclock.NewFakeClock(time.Now())
})

var _ = AfterSuite(func() {
	etcdRunner.Stop()

	Expect(rawSQLDB.Close()).NotTo(HaveOccurred())
	ginkgomon.Kill(sqlProcess, 5*time.Second)
})

var _ = BeforeEach(func() {
	etcdRunner.Reset()

	etcdClient = etcdRunner.Client()
	etcdClient.SetConsistency(etcdclient.STRONG_CONSISTENCY)

	storeClient = etcd.NewStoreClient(etcdClient)

	sqlRunner.Reset()
})
Beispiel #22
0
		presenceKey = "some-key"
		presenceValue = []byte("some-value")

		retryInterval = 500 * time.Millisecond
		logger = lagertest.NewTestLogger("locket")

		presenceTTL = 5 * time.Second
	})

	JustBeforeEach(func() {
		clock = fakeclock.NewFakeClock(time.Now())
		presenceRunner = locket.NewPresence(logger, consulClient, presenceKey, presenceValue, clock, retryInterval, presenceTTL)
	})

	AfterEach(func() {
		ginkgomon.Kill(presenceProcess)
	})

	Context("When consul is running", func() {
		Context("an error occurs while acquiring the presence", func() {
			BeforeEach(func() {
				presenceKey = ""
			})

			It("continues to retry", func() {
				presenceProcess = ifrit.Background(presenceRunner)

				Consistently(presenceProcess.Ready()).ShouldNot(BeClosed())
				Consistently(presenceProcess.Wait()).ShouldNot(Receive())

				Eventually(logger).Should(Say("failed-setting-presence"))
Beispiel #23
0
	httpclient *http.Client
)

func TestDelphos(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "Delphos Suite")
}

var _ = SynchronizedBeforeSuite(
	func() []byte {
		delphosConfig, err := gexec.Build("github.com/migdi/delphos-api/cmd/delphos", "-race")
		Expect(err).NotTo(HaveOccurred())
		return []byte(delphosConfig)
	},
	func(delphosConfig []byte) {
		delphosBinPath = string(delphosConfig)
		SetDefaultEventuallyTimeout(15 * time.Second)
		delphosPort = 8080 + GinkgoParallelNode()
		delphosArgs.Address = fmt.Sprintf("127.0.0.1:%d", delphosPort)
		delphosRunner = testrunner.New(delphosBinPath, delphosArgs)
		delphosProcess = ginkgomon.Invoke(delphosRunner)
		httpclient = &http.Client{}
	},
)

var _ = SynchronizedAfterSuite(func() {
	ginkgomon.Kill(delphosProcess)
}, func() {
	gexec.CleanupBuildArtifacts()
})
Beispiel #24
0
		members = grouper.Members{
			{"child1", childRunner1},
			{"child2", childRunner2},
			{"child3", childRunner3},
		}

		groupRunner = grouper.NewParallel(os.Interrupt, members)
	})

	AfterEach(func() {
		childRunner1.EnsureExit()
		childRunner2.EnsureExit()
		childRunner3.EnsureExit()

		ginkgomon.Kill(groupProcess)
	})

	Describe("Start", func() {
		BeforeEach(func() {
			groupProcess = ifrit.Background(groupRunner)
		})

		It("runs all runners at the same time", func() {
			Eventually(childRunner1.RunCallCount).Should(Equal(1))
			Eventually(childRunner2.RunCallCount).Should(Equal(1))
			Eventually(childRunner3.RunCallCount).Should(Equal(1))

			Consistently(groupProcess.Ready()).ShouldNot(BeClosed())

			childRunner1.TriggerReady()
Beispiel #25
0
		simulateTaskCompleting := func(signals <-chan os.Signal, ready chan<- struct{}) error {
			close(ready)
			task = model_helpers.NewValidTask("the-task-guid")
			task.CompletionCallbackUrl = callbackURL
			taskworkpool.HandleCompletedTask(logger, httpClient, taskDB, task)
			return nil
		}

		var process ifrit.Process
		JustBeforeEach(func() {
			process = ifrit.Invoke(ifrit.RunFunc(simulateTaskCompleting))
		})

		AfterEach(func() {
			ginkgomon.Kill(process)
		})

		Context("when the task has a completion callback URL", func() {
			BeforeEach(func() {
				Expect(taskDB.ResolvingTaskCallCount()).To(Equal(0))
			})

			It("marks the task as resolving", func() {
				statusCodes <- 200

				Eventually(taskDB.ResolvingTaskCallCount).Should(Equal(1))
				_, actualGuid := taskDB.ResolvingTaskArgsForCall(0)
				Expect(actualGuid).To(Equal("the-task-guid"))
			})
Beispiel #26
0
			Host:   bbsAddress,
		}

		bbsClient = bbs.NewClient(bbsURL.String())

		bbsArgs = bbstestrunner.Args{
			Address:     bbsAddress,
			EtcdCluster: etcdUrl,
		}
		bbsRunner = bbstestrunner.New(bbsBinPath, bbsArgs)
		bbsProcess = ginkgomon.Invoke(bbsRunner)
	},
)

var _ = SynchronizedAfterSuite(func() {
	ginkgomon.Kill(bbsProcess)
	etcdRunner.Stop()
	consulRunner.Stop()
}, func() {
	gexec.CleanupBuildArtifacts()
})

var _ = BeforeEach(func() {
	logger = lagertest.NewTestLogger("test")

	etcdRunner.Reset()

	consulRunner.Reset()
	consulSession = consulRunner.NewSession("a-session")

	receptorAddress = fmt.Sprintf("127.0.0.1:%d", 6700+GinkgoParallelNode())
Beispiel #27
0
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Ping API", func() {
	Describe("Protobuf Ping", func() {
		It("returns true when the bbs is running", func() {
			By("having the bbs down", func() {
				Expect(client.Ping(logger)).To(BeFalse())
			})

			By("starting the bbs without a lock", func() {
				competingBBSLock := locket.NewLock(logger, consulClient, locket.LockSchemaPath("bbs_lock"), []byte{}, clock.NewClock(), locket.RetryInterval, locket.DefaultSessionTTL)
				competingBBSLockProcess := ifrit.Invoke(competingBBSLock)
				defer ginkgomon.Kill(competingBBSLockProcess)

				bbsRunner = testrunner.New(bbsBinPath, bbsArgs)
				bbsRunner.StartCheck = "bbs.lock.acquiring-lock"
				bbsProcess = ginkgomon.Invoke(bbsRunner)

				Expect(client.Ping(logger)).To(BeFalse())
			})

			By("finally acquiring the lock", func() {
				Eventually(func() bool {
					return client.Ping(logger)
				}).Should(BeTrue())
			})
		})
	})
		registration = &api.AgentServiceRegistration{
			ID:      serviceID,
			Name:    serviceName,
			Tags:    []string{"a", "b", "c"},
			Port:    8080,
			Address: "127.0.0.1",
		}
	})

	JustBeforeEach(func() {
		registrationRunner := locket.NewRegistrationRunner(logger, registration, consulClient, 5*time.Second, clock)
		registrationProcess = ginkgomon.Invoke(registrationRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(registrationProcess)
	})

	Context("when the service has not already been registered", func() {
		It("registers the service", func() {
			services, err := consulClient.Agent().Services()
			Expect(err).NotTo(HaveOccurred())
			service, ok := services[registration.ID]
			Expect(ok).To(BeTrue())
			Expect(*service).To(Equal(api.AgentService{
				ID:      registration.ID,
				Service: registration.Name,
				Tags:    registration.Tags,
				Port:    registration.Port,
				Address: registration.Address,
			}))
Beispiel #29
0
		var nsyncLockClaimerProcess ifrit.Process

		BeforeEach(func() {
			heartbeatInterval = 1 * time.Second

			nsyncLockClaimer := locket.NewLock(logger, consulRunner.NewClient(), locket.LockSchemaPath(bulkerLockName), []byte("something-else"), clock.NewClock(), locket.RetryInterval, locket.LockTTL)
			nsyncLockClaimerProcess = ifrit.Invoke(nsyncLockClaimer)
		})

		JustBeforeEach(func() {
			process = startBulker(false)
		})

		AfterEach(func() {
			ginkgomon.Interrupt(process, interruptTimeout)
			ginkgomon.Kill(nsyncLockClaimerProcess)
		})

		It("does not make any requests", func() {
			Consistently(func() int {
				return len(fakeBBS.ReceivedRequests())
			}).Should(Equal(0))
		})

		Context("when the lock becomes available", func() {
			var (
				expectedLRPDomainRequest  *models.UpsertDomainRequest
				expectedTaskDomainRequest *models.UpsertDomainRequest

				foundLRPDomain  chan bool
				foundTaskDomain chan bool
		boshCmd("manifests/cell1.yml", "deploy", "Deployed 'cf-warden-diego-cell1'")

		By("Deploying Cell 2")
		boshCmd("manifests/cell2.yml", "deploy", "Deployed 'cf-warden-diego-cell2'")

		By("Deploying a Test App")
		pollerApp = newCfApp("test-app", config.MaxPollingErrors)
		pollerApp.Push()

		By("Continuously Polling the Test Application")
		pollerProcess = ginkgomon.Invoke(pollerApp.NewPoller())
	})

	AfterEach(func() {
		By("Test Complete, AfterEach Beginning")
		ginkgomon.Kill(pollerProcess)

		By("Deleting the Test App")
		pollerApp.Destroy()
	})

	It("Upgrades from V0 to V1", func() {
		By("Generating the V1 deployment manifests for 5 piece wise deployments")
		arguments := []string{
			"-d", filepath.Join(config.BaseReleaseDirectory, config.V1DiegoReleasePath),
			"-c", filepath.Join(config.BaseReleaseDirectory, config.V1CfReleasePath),
			"-a", filepath.Join(config.BaseReleaseDirectory, config.AwsStubsDirectory),
			"-o", config.OverrideDomain,
			"-s", // Use SQL
		}