Пример #1
0
func startATC(atcBin string, atcServerNumber uint16) (ifrit.Process, uint16) {
	atcPort := 5697 + uint16(GinkgoParallelNode()) + (atcServerNumber * 100)
	debugPort := 6697 + uint16(GinkgoParallelNode()) + (atcServerNumber * 100)

	atcCommand := exec.Command(
		atcBin,
		"-webListenPort", fmt.Sprintf("%d", atcPort),
		"-callbacksURL", fmt.Sprintf("http://127.0.0.1:%d", atcPort),
		"-debugListenPort", fmt.Sprintf("%d", debugPort),
		"-httpUsername", "admin",
		"-httpHashedPassword", "$2a$04$DYaOWeQgyxTCv7QxydTP9u1KnwXWSKipC4BeTuBy.9m.IlkAdqNGG", // "password"
		"-publiclyViewable=true",
		"-templates", filepath.Join("..", "web", "templates"),
		"-public", filepath.Join("..", "web", "public"),
		"-sqlDataSource", postgresRunner.DataSourceName(),
	)
	atcRunner := ginkgomon.New(ginkgomon.Config{
		Command:       atcCommand,
		Name:          "atc",
		StartCheck:    "atc.listening",
		AnsiColorCode: "32m",
	})

	return ginkgomon.Invoke(atcRunner), atcPort
}
Пример #2
0
func startATC(atcBin string, atcServerNumber uint16) (ifrit.Process, uint16) {
	atcPort := 5697 + uint16(GinkgoParallelNode()) + (atcServerNumber * 100)
	debugPort := 6697 + uint16(GinkgoParallelNode()) + (atcServerNumber * 100)

	atcCommand := exec.Command(
		atcBin,
		"--bind-port", fmt.Sprintf("%d", atcPort),
		"--peer-url", fmt.Sprintf("http://127.0.0.1:%d", atcPort),
		"--postgres-data-source", postgresRunner.DataSourceName(),
		"--debug-bind-port", fmt.Sprintf("%d", debugPort),
		"--basic-auth-username", "admin",
		"--basic-auth-password", "password",
		"--publicly-viewable",
		"--templates", filepath.Join("..", "web", "templates"),
		"--public", filepath.Join("..", "web", "public"),
	)
	atcRunner := ginkgomon.New(ginkgomon.Config{
		Command:       atcCommand,
		Name:          "atc",
		StartCheck:    "atc.listening",
		AnsiColorCode: "32m",
	})

	return ginkgomon.Invoke(atcRunner), atcPort
}
Пример #3
0
func startATC(atcBin string, atcServerNumber uint16, publiclyViewable bool, authTypes ...string) (ifrit.Process, uint16) {
	atcCommand, atcPort := getATCCommand(atcBin, atcServerNumber, publiclyViewable, authTypes...)
	atcRunner := ginkgomon.New(ginkgomon.Config{
		Command:       atcCommand,
		Name:          "atc",
		StartCheck:    "atc.listening",
		AnsiColorCode: "32m",
	})
	return ginkgomon.Invoke(atcRunner), atcPort
}
func (cr *ClusterRunner) Start() {
	cr.mutex.Lock()
	defer cr.mutex.Unlock()

	if cr.running {
		return
	}

	tmpDir, err := ioutil.TempDir("", defaultDataDirPrefix)
	Expect(err).NotTo(HaveOccurred())
	cr.dataDir = tmpDir

	tmpDir, err = ioutil.TempDir("", defaultConfigDirPrefix)
	Expect(err).NotTo(HaveOccurred())
	cr.configDir = tmpDir

	cr.consulProcesses = make([]ifrit.Process, cr.numNodes)

	for i := 0; i < cr.numNodes; i++ {
		iStr := fmt.Sprintf("%d", i)
		nodeDataDir := path.Join(cr.dataDir, iStr)
		os.MkdirAll(nodeDataDir, 0700)

		configFilePath := writeConfigFile(
			cr.configDir,
			nodeDataDir,
			iStr,
			cr.startingPort,
			i,
			cr.numNodes,
			cr.sessionTTL,
		)

		process := ginkgomon.Invoke(ginkgomon.New(ginkgomon.Config{
			Name:              fmt.Sprintf("consul_cluster[%d]", i),
			AnsiColorCode:     "35m",
			StartCheck:        "agent: Join completed.",
			StartCheckTimeout: 10 * time.Second,
			Command: exec.Command(
				"consul",
				"agent",
				"--log-level", "trace",
				"--config-file", configFilePath,
			),
		}))
		cr.consulProcesses[i] = process

		ready := process.Ready()
		Eventually(ready, 10, 0.05).Should(BeClosed(), "Expected consul to be up and running")
	}

	cr.running = true
}
Пример #5
0
func (runner *AgentRunner) Start() {
	runner.mutex.Lock()
	defer runner.mutex.Unlock()

	if runner.running {
		return
	}

	tmpDir, err := ioutil.TempDir("", defaultDataDirPrefix)
	Expect(err).NotTo(HaveOccurred())
	runner.dataDir = tmpDir

	tmpDir, err = ioutil.TempDir("", defaultConfigDirPrefix)
	Expect(err).NotTo(HaveOccurred())
	runner.configDir = tmpDir

	os.MkdirAll(runner.dataDir, 0700)

	configFilePath := writeConfigFile(
		runner.configDir,
		runner.dataDir,
		runner.bindAddress,
		runner.serverIps,
	)

	timeout := 1 * time.Minute
	process := ginkgomon.Invoke(ginkgomon.New(ginkgomon.Config{
		Name:              "consul_agent",
		AnsiColorCode:     "35m",
		StartCheck:        "agent: Join completed.",
		StartCheckTimeout: timeout,
		Command: exec.Command(
			"consul",
			"agent",
			"--config-file", configFilePath,
		),
	}))
	runner.consulProcess = process

	ready := process.Ready()
	Eventually(ready, timeout, 100*time.Millisecond).Should(BeClosed(), "Expected consul to be up and running")

	runner.running = true
}
Пример #6
0
package main_test

import (
	"github.com/cloudfoundry-incubator/routing-api/db"
	"github.com/tedsuo/ifrit/ginkgomon"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Routes API", func() {
	BeforeEach(func() {
		routingAPIProcess = ginkgomon.Invoke(routingAPIRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(routingAPIProcess)
	})

	Describe("Routes", func() {
		var routes []db.Route
		var getErr error
		var route1, route2 db.Route

		BeforeEach(func() {
			route1 = db.Route{
				Route:   "a.b.c",
				Port:    33,
				IP:      "1.1.1.1",
				TTL:     55,
				LogGuid: "potato",
Пример #7
0
	var builtArtifacts world.BuiltArtifacts

	err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts)
	Expect(err).NotTo(HaveOccurred())

	localIP, err := localip.LocalIP()
	Expect(err).NotTo(HaveOccurred())

	componentMaker = helpers.MakeComponentMaker(builtArtifacts, localIP)
})

var _ = BeforeEach(func() {
	plumbing = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
		{"etcd", componentMaker.Etcd()},
		{"nats", componentMaker.NATS()},
		{"consul", componentMaker.Consul()},
		{"bbs", componentMaker.BBS()},
		{"receptor", componentMaker.Receptor()},
		{"garden-linux", componentMaker.GardenLinux("-denyNetworks=0.0.0.0/0", "-allowHostAccess=true")},
	}))

	helpers.ConsulWaitUntilReady()

	gardenClient = componentMaker.GardenClient()
	natsClient = componentMaker.NATSClient()
	receptorClient = componentMaker.ReceptorClient()

	helpers.UpsertInigoDomain(receptorClient)

	inigo_announcement_server.Start(componentMaker.ExternalAddress)
})
Пример #8
0
			Zone:            "az1",
			RetryInterval:   1 * time.Second,
			RootFSProviders: []string{"provider-1", "provider-2"},
		}
		maintainer = maintain.New(config, fakeClient, serviceClient, logger, clock)
	})

	AfterEach(func() {
		logger.Info("test-complete-signaling-maintainer-to-stop")
		close(pingErrors)
		ginkgomon.Interrupt(maintainProcess)
	})

	It("pings the executor", func() {
		pingErrors <- nil
		maintainProcess = ginkgomon.Invoke(maintainer)
		Expect(fakeClient.PingCallCount()).To(Equal(1))
	})

	Context("when pinging the executor fails", func() {
		It("keeps pinging until it succeeds, then starts heartbeating the executor's presence", func() {
			maintainProcess = ifrit.Background(maintainer)
			ready := maintainProcess.Ready()

			for i := 1; i <= 4; i++ {
				clock.Increment(1 * time.Second)
				pingErrors <- errors.New("ping failed")
				Eventually(fakeClient.PingCallCount).Should(Equal(i))
				Expect(ready).NotTo(BeClosed())
			}
Пример #9
0
		address string

		lrp receptor.DesiredLRPCreateRequest
	)

	BeforeEach(func() {
		processGuid = helpers.GenerateGuid()
		address = componentMaker.Addresses.SSHProxy

		var fileServer ifrit.Runner
		fileServer, fileServerStaticDir = componentMaker.FileServer()
		runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
			{"router", componentMaker.Router()},
			{"file-server", fileServer},
			{"rep", componentMaker.Rep()},
			{"converger", componentMaker.Converger()},
			{"auctioneer", componentMaker.Auctioneer()},
			{"route-emitter", componentMaker.RouteEmitter()},
			{"ssh-proxy", componentMaker.SSHProxy()},
		}))

		tgCompressor := compressor.NewTgz()
		err := tgCompressor.Compress(componentMaker.Artifacts.Executables["sshd"], filepath.Join(fileServerStaticDir, "sshd.tgz"))
		Expect(err).NotTo(HaveOccurred())

		sshRoute := routes.SSHRoute{
			ContainerPort:   3456,
			PrivateKey:      componentMaker.SSHConfig.PrivateKeyPem,
			HostFingerprint: ssh_helpers.MD5Fingerprint(componentMaker.SSHConfig.HostKey.PublicKey()),
		}
Пример #10
0
		consulClient := consulRunner.NewClient()
		serviceClient = auctioneer.NewServiceClient(consulClient, clock)
	})

	Describe("AuctioneerAddress", func() {
		Context("when able to get an auctioneer presence", func() {
			var heartbeater ifrit.Process
			var presence auctioneer.Presence

			BeforeEach(func() {
				presence = auctioneer.NewPresence("auctioneer-id", "auctioneer.example.com")

				auctioneerLock, err := serviceClient.NewAuctioneerLockRunner(logger, presence, 100*time.Millisecond, 10*time.Second)
				Expect(err).NotTo(HaveOccurred())
				heartbeater = ginkgomon.Invoke(auctioneerLock)
			})

			AfterEach(func() {
				ginkgomon.Interrupt(heartbeater)
			})

			It("returns the address", func() {
				address, err := serviceClient.CurrentAuctioneerAddress()
				Expect(err).NotTo(HaveOccurred())
				Expect(address).To(Equal(presence.AuctioneerAddress))
			})
		})

		Context("when unable to get any auctioneer presences", func() {
			It("returns ErrServiceUnavailable", func() {
Пример #11
0
		bbsAddress := fmt.Sprintf("127.0.0.1:%d", 13000+GinkgoParallelNode())

		bbsURL = &url.URL{
			Scheme: "http",
			Host:   bbsAddress,
		}

		bbsClient = bbs.NewClient(bbsURL.String())

		bbsArgs = bbstestrunner.Args{
			Address:     bbsAddress,
			EtcdCluster: etcdUrl,
		}
		bbsRunner = bbstestrunner.New(bbsBinPath, bbsArgs)
		bbsProcess = ginkgomon.Invoke(bbsRunner)
	},
)

var _ = SynchronizedAfterSuite(func() {
	ginkgomon.Kill(bbsProcess)
	etcdRunner.Stop()
	consulRunner.Stop()
}, func() {
	gexec.CleanupBuildArtifacts()
})

var _ = BeforeEach(func() {
	logger = lagertest.NewTestLogger("test")

	etcdRunner.Reset()
Пример #12
0
			atcBin,
			"-webListenPort", fmt.Sprintf("%d", atcPort),
			"-debugListenPort", fmt.Sprintf("%d", debugPort),
			"-httpUsername", "admin",
			"-httpPassword", "password",
			"-templates", filepath.Join("..", "web", "templates"),
			"-public", filepath.Join("..", "web", "public"),
			"-sqlDataSource", postgresRunner.DataSourceName(),
		)
		atcRunner := ginkgomon.New(ginkgomon.Config{
			Command:       atcCommand,
			Name:          "atc",
			StartCheck:    "atc.listening",
			AnsiColorCode: "32m",
		})
		atcProcess = ginkgomon.Invoke(atcRunner)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(atcProcess)

		Ω(dbConn.Close()).Should(Succeed())
		Ω(dbListener.Close()).Should(Succeed())

		postgresRunner.DropTestDB()
	})

	It("can reach the page", func() {
		request, err := http.NewRequest("GET", fmt.Sprintf("http://127.0.0.1:%d", atcPort), nil)

		resp, err := http.DefaultClient.Do(request)
Пример #13
0
	)

	var fileServerStaticDir string

	BeforeEach(func() {
		var fileServerRunner ifrit.Runner

		fileServerRunner, fileServerStaticDir = componentMaker.FileServer()

		cellGroup := grouper.Members{
			{"file-server", fileServerRunner},
			{"rep", componentMaker.Rep("-memoryMB", "1024")},
			{"auctioneer", componentMaker.Auctioneer()},
			{"converger", componentMaker.Converger()},
		}
		cellProcess = ginkgomon.Invoke(grouper.NewParallel(os.Interrupt, cellGroup))

		Eventually(receptorClient.Cells).Should(HaveLen(1))
	})

	AfterEach(func() {
		helpers.StopProcesses(cellProcess)
	})

	Describe("Running a task", func() {
		var guid string

		BeforeEach(func() {
			guid = helpers.GenerateGuid()
		})
Пример #14
0
		cellProcess = nil
		convergerProcess = nil
	})

	AfterEach(func() {
		helpers.StopProcesses(
			auctioneerProcess,
			cellProcess,
			convergerProcess,
		)
	})

	Context("when a rep, and auctioneer are running", func() {
		BeforeEach(func() {
			cellProcess = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
				{"rep", componentMaker.Rep("-memoryMB", "1024")},
			}))

			auctioneerProcess = ginkgomon.Invoke(componentMaker.Auctioneer())
		})

		Context("and a standard Task is desired", func() {
			var taskGuid string
			var taskSleepSeconds int

			var taskRequest receptor.TaskCreateRequest

			BeforeEach(func() {
				taskSleepSeconds = 10
				taskGuid = helpers.GenerateGuid()
Пример #15
0
	httpclient *http.Client
)

func TestDelphos(t *testing.T) {
	RegisterFailHandler(Fail)
	RunSpecs(t, "Delphos Suite")
}

var _ = SynchronizedBeforeSuite(
	func() []byte {
		delphosConfig, err := gexec.Build("github.com/migdi/delphos-api/cmd/delphos", "-race")
		Expect(err).NotTo(HaveOccurred())
		return []byte(delphosConfig)
	},
	func(delphosConfig []byte) {
		delphosBinPath = string(delphosConfig)
		SetDefaultEventuallyTimeout(15 * time.Second)
		delphosPort = 8080 + GinkgoParallelNode()
		delphosArgs.Address = fmt.Sprintf("127.0.0.1:%d", delphosPort)
		delphosRunner = testrunner.New(delphosBinPath, delphosArgs)
		delphosProcess = ginkgomon.Invoke(delphosRunner)
		httpclient = &http.Client{}
	},
)

var _ = SynchronizedAfterSuite(func() {
	ginkgomon.Kill(delphosProcess)
}, func() {
	gexec.CleanupBuildArtifacts()
})
Пример #16
0
}, func(encodedBuiltArtifacts []byte) {
	var builtArtifacts world.BuiltArtifacts

	err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts)
	Expect(err).NotTo(HaveOccurred())

	localIP, err := localip.LocalIP()
	Expect(err).NotTo(HaveOccurred())

	componentMaker = helpers.MakeComponentMaker(builtArtifacts, localIP)
})

var _ = BeforeEach(func() {
	plumbing = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
		{"etcd", componentMaker.Etcd()},
		{"nats", componentMaker.NATS()},
		{"consul", componentMaker.Consul()},
		{"garden-linux", componentMaker.GardenLinux("-allowHostAccess=true")},
	}))

	helpers.ConsulWaitUntilReady()

	gardenClient = componentMaker.GardenClient()
	receptorClient = componentMaker.ReceptorClient()

	inigo_announcement_server.Start(componentMaker.ExternalAddress)
})

var _ = AfterEach(func() {
	inigo_announcement_server.Stop()

	destroyContainerErrors := helpers.CleanupGarden(gardenClient)
			BeforeEach(func() {
				driverName = "fakedriver"
				err := voldriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "spec", []byte("http://0.0.0.0:8080"))
				Expect(err).NotTo(HaveOccurred())

				syncer = vollocal.NewDriverSyncerWithDriverFactory(logger, registry, []string{defaultPluginsDirectory}, scanInterval, fakeClock, fakeDriverFactory)

				fakeDriver = new(voldriverfakes.FakeDriver)
				fakeDriver.ActivateReturns(voldriver.ActivateResponse{
					Implements: []string{"VolumeDriver"},
				})

				fakeDriverFactory.DriverReturns(fakeDriver, nil)

				process = ginkgomon.Invoke(syncer.Runner())
			})

			AfterEach(func() {
				ginkgomon.Kill(process)
			})

			It("should have fake driver in registry map", func() {
				drivers := registry.Drivers()
				Expect(len(drivers)).To(Equal(1))
				Expect(fakeDriverFactory.DriverCallCount()).To(Equal(1))
				Expect(fakeDriver.ActivateCallCount()).To(Equal(1))
			})

			Context("when drivers are added", func() {
				BeforeEach(func() {
Пример #18
0
	return payload
}, func(encodedBuiltArtifacts []byte) {
	var builtArtifacts world.BuiltArtifacts

	err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts)
	Expect(err).NotTo(HaveOccurred())

	localIP, err := localip.LocalIP()
	Expect(err).NotTo(HaveOccurred())

	componentMaker = helpers.MakeComponentMaker(builtArtifacts, localIP)
})

var _ = BeforeEach(func() {
	gardenProcess = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
		{"garden-linux", componentMaker.GardenLinux()},
	}))

	gardenClient = componentMaker.GardenClient()
})

var _ = AfterEach(func() {
	destroyContainerErrors := helpers.CleanupGarden(gardenClient)

	helpers.StopProcesses(gardenProcess)

	Expect(destroyContainerErrors).To(
		BeEmpty(),
		"%d containers failed to be destroyed!",
		len(destroyContainerErrors),
	)
Пример #19
0
		workerDB = &fakes.FakeSaveWorkerDB{}
		gardenAddr = "http://garden.example.com"
		baggageClaimAddr = "http://volumes.example.com"
		resourceTypes = []atc.WorkerResourceType{
			{
				Type:  "type",
				Image: "image",
			},
		}
		fakeClock = fakeclock.NewFakeClock(time.Now())
	})

	Describe("registering a single worker", func() {
		JustBeforeEach(func() {
			runner := worker.NewHardcoded(logger, workerDB, fakeClock, gardenAddr, baggageClaimAddr, resourceTypes)
			process = ginkgomon.Invoke(runner)
		})

		AfterEach(func() {
			ginkgomon.Interrupt(process)
		})

		It("registers it and then keeps registering it on an interval", func() {
			expectedWorkerInfo := db.WorkerInfo{
				Name:             gardenAddr,
				GardenAddr:       gardenAddr,
				BaggageclaimURL:  baggageClaimAddr,
				ActiveContainers: 0,
				ResourceTypes:    resourceTypes,
				Platform:         "linux",
				Tags:             []string{},
		consulClient = consulRunner.NewClient()

		logger = lagertest.NewTestLogger("test")
		clock = fakeclock.NewFakeClock(time.Now())
		registration = &api.AgentServiceRegistration{
			ID:      serviceID,
			Name:    serviceName,
			Tags:    []string{"a", "b", "c"},
			Port:    8080,
			Address: "127.0.0.1",
		}
	})

	JustBeforeEach(func() {
		registrationRunner := locket.NewRegistrationRunner(logger, registration, consulClient, 5*time.Second, clock)
		registrationProcess = ginkgomon.Invoke(registrationRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(registrationProcess)
	})

	Context("when the service has not already been registered", func() {
		It("registers the service", func() {
			services, err := consulClient.Agent().Services()
			Expect(err).NotTo(HaveOccurred())
			service, ok := services[registration.ID]
			Expect(ok).To(BeTrue())
			Expect(*service).To(Equal(api.AgentService{
				ID:      registration.ID,
				Service: registration.Name,
Пример #21
0
		cellB ifrit.Process

		processGuid string
		appId       string
	)

	BeforeEach(func() {
		processGuid = helpers.GenerateGuid()
		appId = helpers.GenerateGuid()

		fileServer, fileServerStaticDir := componentMaker.FileServer()

		runtime = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
			{"router", componentMaker.Router()},
			{"file-server", fileServer},
			{"converger", componentMaker.Converger("-convergeRepeatInterval", "1s")},
			{"auctioneer", componentMaker.Auctioneer()},
			{"route-emitter", componentMaker.RouteEmitter()},
		}))

		cellAID = "cell-a"
		cellBID = "cell-b"

		cellAExecutorAddr = fmt.Sprintf("127.0.0.1:%d", 13100+GinkgoParallelNode())
		cellBExecutorAddr = fmt.Sprintf("127.0.0.1:%d", 13200+GinkgoParallelNode())

		cellARepAddr = fmt.Sprintf("0.0.0.0:%d", 14100+GinkgoParallelNode())
		cellBRepAddr = fmt.Sprintf("0.0.0.0:%d", 14200+GinkgoParallelNode())

		cellARepRunner = componentMaker.RepN(0,
			"-cellID", cellAID,
	BeforeEach(func() {
		fakeDriverFactory = new(volmanfakes.FakeDriverFactory)
		fakeClock = fakeclock.NewFakeClock(time.Unix(123, 456))

		scanInterval = 1 * time.Second

		driverRegistry = vollocal.NewDriverRegistry()
	})

	Describe("ListDrivers", func() {
		BeforeEach(func() {
			driverSyncer = vollocal.NewDriverSyncerWithDriverFactory(logger, driverRegistry, []string{"/somePath"}, scanInterval, fakeClock, fakeDriverFactory)
			client = vollocal.NewLocalClient(logger, driverRegistry, fakeClock)

			process = ginkgomon.Invoke(driverSyncer.Runner())
		})

		It("should report empty list of drivers", func() {
			drivers, err := client.ListDrivers(logger)
			Expect(err).NotTo(HaveOccurred())
			Expect(len(drivers.Drivers)).To(Equal(0))
		})

		Context("has no drivers in location", func() {

			BeforeEach(func() {
				fakeDriverFactory = new(volmanfakes.FakeDriverFactory)
			})

			It("should report empty list of drivers", func() {
Пример #23
0
func (m *MetronRunner) Start() ifrit.Process {
	runner := m.Configure()
	m.Process = ginkgomon.Invoke(runner)
	return m.Process
}
Пример #24
0
		client = volhttp.NewRemoteClient("http://" + listenAddr)
		Expect(err).NotTo(HaveOccurred())

		testLogger = lagertest.NewTestLogger("test")
	})

	JustBeforeEach(func() {
		args = append(args, "--listenAddr", listenAddr)
		args = append(args, "--volmanDriverPaths", driversPath)

		volmanRunner := ginkgomon.New(ginkgomon.Config{
			Name:       "volman",
			Command:    exec.Command(binaryPath, args...),
			StartCheck: "started",
		})
		process = ginkgomon.Invoke(volmanRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(process)
		err := os.RemoveAll(driversPath)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should listen on the given address", func() {
		_, err := client.ListDrivers(testLogger)
		Expect(err).NotTo(HaveOccurred())
	})

	Context("given a driverspath with a single spec file", func() {
		BeforeEach(func() {
Пример #25
0
	"fmt"
	"sync/atomic"

	"github.com/cloudfoundry-incubator/receptor"
	"github.com/cloudfoundry-incubator/receptor/serialization"
	"github.com/cloudfoundry-incubator/runtime-schema/models"
	"github.com/tedsuo/ifrit/ginkgomon"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Desired LRP API", func() {

	BeforeEach(func() {
		receptorProcess = ginkgomon.Invoke(receptorRunner)
	})

	AfterEach(func() {
		ginkgomon.Kill(receptorProcess)
	})

	Describe("POST /v1/desired_lrps/", func() {
		var lrpToCreate receptor.DesiredLRPCreateRequest
		var createErr error

		BeforeEach(func() {
			lrpToCreate = newValidDesiredLRPCreateRequest()
			createErr = client.CreateDesiredLRP(lrpToCreate)
		})
Пример #26
0
		}

		pipelineDB.ScopedNameStub = func(thing string) string {
			return "pipeline:" + thing
		}
		pipelineDB.GetConfigReturns(initialConfig, 1, nil)

		lock = new(dbfakes.FakeLock)
		locker.AcquireWriteLockImmediatelyReturns(lock, nil)
	})

	JustBeforeEach(func() {
		process = ginkgomon.Invoke(&Runner{
			Logger:    lagertest.NewTestLogger("test"),
			Locker:    locker,
			DB:        pipelineDB,
			Scheduler: scheduler,
			Noop:      noop,
			Interval:  100 * time.Millisecond,
		})
	})

	AfterEach(func() {
		ginkgomon.Interrupt(process)
	})

	It("acquires the build scheduling lock for each job", func() {
		Eventually(locker.AcquireWriteLockImmediatelyCallCount).Should(Equal(2))

		job := locker.AcquireWriteLockImmediatelyArgsForCall(0)
		Ω(job).Should(Equal([]db.NamedLock{db.JobSchedulingLock("pipeline:some-job")}))
Пример #27
0
package main_test

import (
	"code.cloudfoundry.org/bbs/cmd/bbs/testrunner"
	"github.com/tedsuo/ifrit/ginkgomon"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("ETCD Metrics", func() {
	BeforeEach(func() {
		bbsRunner = testrunner.New(bbsBinPath, bbsArgs)
		bbsProcess = ginkgomon.Invoke(bbsRunner)
	})

	It("starts emitting metrics", func() {
		Eventually(testMetricsChan).Should(Receive())
	})
})
Пример #28
0
  web: the-start-command
EOF
				`},
	}

	BeforeEach(func() {
		appId = helpers.GenerateGuid()
		taskId = helpers.GenerateGuid()

		fileServer, dir := componentMaker.FileServer()
		fileServerStaticDir = dir

		fakeCC = componentMaker.FakeCC()

		cell = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
			{"rep", componentMaker.Rep("-memoryMB=1024")},
		}))

		brain = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
			{"bbs", componentMaker.BBS()},
			{"receptor", componentMaker.Receptor()},
			{"auctioneer", componentMaker.Auctioneer()},
			{"file-server", fileServer},
		}))

		bridge = ginkgomon.Invoke(grouper.NewParallel(os.Kill, grouper.Members{
			{"cc", fakeCC},
			{"stager", componentMaker.Stager()},
			{"nsync-listener", componentMaker.NsyncListener()},
		}))
Пример #29
0
				"-bulkBatchSize", "10",
				"-lifecycle", "buildpack/some-stack:some-health-check.tar.gz",
				"-lifecycle", "docker:the/docker/lifecycle/path.tgz",
				"-fileServerURL", "http://file-server.com",
				"-lockRetryInterval", "1s",
				"-consulCluster", consulRunner.ConsulCluster(),
				"-bbsAddress", fakeBBS.URL(),
				"-privilegedContainers", "false",
			),
		})

		if !check {
			runner.StartCheck = ""
		}

		return ginkgomon.Invoke(runner)
	}

	BeforeEach(func() {
		logger = lagertest.NewTestLogger("test")

		fakeCC = ghttp.NewServer()

		pollingInterval = 500 * time.Millisecond
		domainTTL = 1 * time.Second
		heartbeatInterval = 30 * time.Second

		desiredAppResponses := map[string]string{
			"process-guid-1": `{
					"disk_mb": 1024,
					"environment": [
Пример #30
0
	RegisterFailHandler(Fail)
	RunSpecs(t, "Migrations Suite")
}

var _ = BeforeSuite(func() {
	logger = lagertest.NewTestLogger("test")

	etcdPort = 4001 + GinkgoParallelNode()
	etcdUrl = fmt.Sprintf("http://127.0.0.1:%d", etcdPort)
	etcdRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1, nil)

	etcdRunner.Start()

	dbName := fmt.Sprintf("diego_%d", GinkgoParallelNode())
	sqlRunner = test_helpers.NewSQLRunner(dbName)
	sqlProcess = ginkgomon.Invoke(sqlRunner)

	// mysql must be set up on localhost as described in the CONTRIBUTING.md doc
	// in diego-release.
	var err error
	rawSQLDB, err = sql.Open(sqlRunner.DriverName(), sqlRunner.ConnectionString())
	Expect(err).NotTo(HaveOccurred())
	Expect(rawSQLDB.Ping()).NotTo(HaveOccurred())

	flavor = sqlRunner.DriverName()

	encryptionKey, err := encryption.NewKey("label", "passphrase")
	Expect(err).NotTo(HaveOccurred())
	keyManager, err := encryption.NewKeyManager(encryptionKey, nil)
	Expect(err).NotTo(HaveOccurred())
	cryptor = encryption.NewCryptor(keyManager, rand.Reader)