func TestConnectionManager_whenInitialized_acceptsConnectionsCorrectly(t *testing.T) { gomega.RegisterTestingT(t) // Choose a high port, so we don't need sudo to run tests config := Config{} config.Port = 55555 SetConfig(&config) underTest := NewConnectionManager() go underTest.Start() gomega.Eventually(func() net.Listener { return underTest.tcpLn }).ShouldNot(gomega.BeNil()) testConn, err := net.Dial("tcp", "localhost:55555") if err != nil || testConn == nil { t.Fail() } fmt.Fprintf(testConn, "PINGREQ\n") response, err := bufio.NewReader(testConn).ReadString('\n') if err != nil || response != "PINGRESP\n" { t.Fail() } testConn.Close() }
// MakeRequest makes a get request, checksthe http status code, and returns the body as string func MakeRequest(method, url, body string, expectedCode int) (string, *http.Response) { log15.Debug("MakeRequest", "verb", method, "url", url) // prepare the request body var bodyReader io.Reader if body != "" { bodyReader = strings.NewReader(body) } // make the request req, _ := http.NewRequest(method, url, bodyReader) resp, err := http.DefaultClient.Do(req) // check the response for basic validity gomega.Ω(err).ShouldNot(gomega.HaveOccurred()) gomega.Ω(resp.StatusCode).Should(gomega.Equal(expectedCode)) gomega.Ω(resp.Header.Get("Content-Type")).ShouldNot(gomega.BeNil()) // read the response body respBody, err := ioutil.ReadAll(resp.Body) gomega.Ω(err).ShouldNot(gomega.HaveOccurred()) return string(respBody), resp }
func TestQueue_ReceiveBeforeSend_ReturnsExpectedResult(t *testing.T) { gomega.RegisterTestingT(t) underTest := NewQueue("TestQueue") var receivedMessage *message.Message go func() { receivedMessage = <-underTest.OutputChannel }() time.Sleep(time.Millisecond * 10) testMessagePayload := []byte("TestMessage") underTest.InputChannel <- (message.NewHeaderlessMessage(&testMessagePayload)) gomega.Eventually(func() *message.Message { return receivedMessage }).Should(gomega.Not(gomega.BeNil())) if !bytes.Equal(*receivedMessage.Body, testMessagePayload) { t.Fail() } }
func tryEchoUDP(svc *kapi.Service) error { rawIP := svc.Spec.ClusterIP o.Expect(rawIP).NotTo(o.BeEmpty(), "The service should have a cluster IP set") ip := net.ParseIP(rawIP) o.Expect(ip).NotTo(o.BeNil(), "The service should have a valid cluster IP, but %q was not valid", rawIP) var udpPort int for _, port := range svc.Spec.Ports { if port.Protocol == "UDP" { udpPort = int(port.Port) break } } o.Expect(udpPort).NotTo(o.Equal(0), "The service should have a UDP port exposed") // For UDP, we just drop packets on the floor rather than queue them up readTimeout := 5 * time.Second expectedBuff := []byte("It's time to UDP!\n") o.Eventually(func() ([]byte, error) { return tryEchoUDPOnce(ip, udpPort, expectedBuff, readTimeout) }, 2*time.Minute, readTimeout).Should(o.Equal(expectedBuff)) return nil }
g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.SetOutputDir(exutil.TestContext.OutputDir) }) g.Describe("being created from new-build", func() { g.It("should create a image via new-build", func() { g.By(fmt.Sprintf("calling oc new-build with Dockerfile")) err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("origin-base") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(bc.Spec.Source.Git).To(o.BeNil()) o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile)) buildName := "origin-base-1" g.By("expecting the Dockerfile build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) o.Expect(err).NotTo(o.HaveOccurred()) g.By("getting the build Docker image reference from ImageStream") image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("origin-base", "latest") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001")) }) g.It("should create a image via new-build and infer the origin tag", func() {
//ArtifactDirPath returns the value of ARTIFACT_DIR environment variable func ArtifactDirPath() string { path := os.Getenv("ARTIFACT_DIR") o.Expect(path).NotTo(o.BeNil()) o.Expect(path).NotTo(o.BeEmpty()) return path }
oc = exutil.NewCLI("cli-build-nosrc", exutil.KubeConfigPath()) exampleBuild = exutil.FixturePath("..", "extended", "testdata", "test-build-app") ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("started build", func() { g.It("should build even with an empty source in build config", func() { g.By("starting the build with --wait flag") out, err := oc.Run("start-build").Args("nosrc-build", "--wait", fmt.Sprintf("--from-dir=%s", exampleBuild)).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying build success") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "nosrc-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) g.By(fmt.Sprintf("verifying the build %q status", out)) build, err := oc.REST().Builds(oc.Namespace()).Get("nosrc-build-1") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Spec.Source.Dockerfile).To(o.BeNil()) o.Expect(build.Spec.Source.Git).To(o.BeNil()) o.Expect(build.Spec.Source.Images).To(o.BeNil()) o.Expect(build.Spec.Source.Binary).NotTo(o.BeNil()) }) }) })
var ( buildFixture = exutil.FixturePath("..", "extended", "fixtures", "test-build-revision.json") oc = exutil.NewCLI("cli-build-revision", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("started build", func() { g.It("should contain source revision information", func() { g.By("starting the build with --wait flag") out, err := oc.Run("start-build").Args("sample-build", "--wait").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build %q status", out)) build, err := oc.REST().Builds(oc.Namespace()).Get(out) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Spec.Revision).NotTo(o.BeNil()) o.Expect(build.Spec.Revision.Git).NotTo(o.BeNil()) o.Expect(build.Spec.Revision.Git.Commit).NotTo(o.BeEmpty()) o.Expect(build.Spec.Revision.Git.Author.Name).NotTo(o.BeEmpty()) o.Expect(build.Spec.Revision.Git.Committer.Name).NotTo(o.BeEmpty()) o.Expect(build.Spec.Revision.Git.Message).NotTo(o.BeEmpty()) }) }) })
ginkgo.AfterEach(func() { server.Close() }) ginkgo.Describe("Get", func() { ginkgo.It("Get deployment info successfully", func() { baseVersion := "1.1.0" fullVersion := "1.1.0-bcea65f" gitCommitHash := "bcea65f" networkType := "SOFTWARE_DEFINED" server.SetResponseJson(200, Info{ BaseVersion: baseVersion, FullVersion: fullVersion, GitCommitHash: gitCommitHash, NetworkType: networkType, }) info, err := client.Info.Get() ginkgo.GinkgoT().Log(err) gomega.Expect(err).Should(gomega.BeNil()) gomega.Expect(info).ShouldNot(gomega.BeNil()) gomega.Expect(info.BaseVersion).Should(gomega.Equal(baseVersion)) gomega.Expect(info.FullVersion).Should(gomega.Equal(fullVersion)) gomega.Expect(info.GitCommitHash).Should(gomega.Equal(gitCommitHash)) gomega.Expect(info.NetworkType).Should(gomega.Equal(networkType)) }) }) })
"--wait").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("creating a build that tries to gain root access via su") err = oc.Run("create").Args("-f", rootAccessBuildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("start the root-access-build with the --wait flag") err = oc.Run("start-build").Args("root-access-build", "--wait").Execute() o.Expect(err).To(o.HaveOccurred()) g.By("verifying the build status") builds, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).ToNot(o.BeEmpty()) // Find the build var build *buildapi.Build for i := range builds.Items { if builds.Items[i].Name == "root-access-build-1" { build = &builds.Items[i] break } } o.Expect(build).NotTo(o.BeNil()) o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) }) }) })
import ( // "fmt" "github.com/CapillarySoftware/gostat/protoStat" gi "github.com/onsi/ginkgo" gom "github.com/onsi/gomega" nano "github.com/op/go-nanomsg" // "strings" "time" ) var _ = gi.Describe("Goreport", func() { var rep Reporter gi.BeforeEach(func() { ReporterConfig("ipc:///tmp/goreportertest.ipc", 1) rep = NewReporter() gom.Expect(rep).ShouldNot(gom.Equal(gom.BeNil())) }) gi.It("End to End integration test with stats", func() { pull, err := nano.NewPullSocket() gom.Expect(err).Should(gom.BeNil()) pull.SetRecvTimeout(6 * time.Second) pull.SetRecvBuffer(1000) pull.Bind("ipc:///tmp/goreportertest.ipc") key := "key" rep.RegisterStat(key) rep.RegisterStatWIndex(key, "index") rep.AddStat(key, 2) rep.AddStat(key, 2) rep.AddStatWIndex(key, 2, "index") rep.AddStatWIndex(key, 2, "index")
bytes.NewReader(body)) }) It("Return a status code of 200", func() { serve.Handler.ServeHTTP(recorder, request) Expect(recorder.Code).To(gomega.Equal(200)) }) It("User must be saved in the database", func() { user := gory.Build("userOk").(*models.User) userTest := models.User{ IdUser: user.IdUser, } userTest, err := userTest.Get() Expect(err).To(gomega.BeNil()) Expect(user.Name).To(gomega.Equal(userTest.Name)) }) It("Response need have hash token", func() { serve.Handler.ServeHTTP(recorder, request) Expect(recorder.Code).To(gomega.Equal(200)) Expect(recorder.HeaderMap["Content-Type"][0]). To(gomega.ContainSubstring("application/json; charset=UTF-8")) data := myCloser{bytes.NewBufferString(recorder.Body.String())} token, err := DecodeToken(data) Expect(err).To(gomega.BeNil()) Expect(token).ShouldNot(gomega.BeZero()) }) })
func TestMetricsManager_ReceivesBasicMetric_PublishesDownstreamAndSendsToStatsD(t *testing.T) { gomega.RegisterTestingT(t) // Listen on UDP var statsDBuffer [2048]byte var udpPacketsReceived int udpAddr, _ := net.ResolveUDPAddr("udp", ":0") udpConn, _ := net.ListenUDP("udp", udpAddr) // Don't care about the contents of the received messages - just the fact // that we received them. We trust the StatsD library go func() { for i := 0; i < 3; i++ { _, _, _ = udpConn.ReadFromUDP(statsDBuffer[0:]) udpPacketsReceived++ } }() config := Config{StatsDEndpoint: fmt.Sprintf("localhost:%d", udpConn.LocalAddr().(*net.UDPAddr).Port)} SetConfig(&config) qm := newQueueManager() // Listen to metrics queue writerBuffer := new(bytes.Buffer) dummyWriter := bufio.NewWriter(writerBuffer) closedChannel := make(chan bool) dummyClient := Client{Name: "Test", Writer: dummyWriter, Closed: &closedChannel} qm.Subscribe("metrics", &dummyClient) // Log one of each metric // Check we've received metrics both via UDP - and on the metrics channel testMetric := NewMetric("test", "guage", 123) qm.metricsManager.metricsChannel <- testMetric gomega.Eventually(func() int { return udpPacketsReceived }, "2s").Should(gomega.Equal(1)) gomega.Eventually(func() []byte { return writerBuffer.Bytes() }).ShouldNot(gomega.BeNil()) writerBuffer.Reset() testMetric2 := NewMetric("test", "counter", 123) qm.metricsManager.metricsChannel <- testMetric2 gomega.Eventually(func() int { return udpPacketsReceived }, "2s").Should(gomega.Equal(2)) gomega.Eventually(func() []byte { return writerBuffer.Bytes() }).ShouldNot(gomega.BeNil()) writerBuffer.Reset() testMetric3 := NewMetric("test", "timing", 123) qm.metricsManager.metricsChannel <- testMetric3 gomega.Eventually(func() int { return udpPacketsReceived }, "2s").Should(gomega.Equal(3)) gomega.Eventually(func() []byte { return writerBuffer.Bytes() }).ShouldNot(gomega.BeNil()) writerBuffer.Reset() }
is, err := oc.Client().ImageStreams(oc.Namespace()).Get(name) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(is.Status.DockerImageRepository).NotTo(o.BeEmpty()) o.Expect(is.Status.Tags["direct"].Items).NotTo(o.BeEmpty()) o.Expect(is.Status.Tags["pullthrough"].Items).NotTo(o.BeEmpty()) dc, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get(name) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(dc.Spec.Triggers).To(o.HaveLen(3)) imageID := is.Status.Tags["pullthrough"].Items[0].Image resolvedReference := fmt.Sprintf("%s@%s", is.Status.DockerImageRepository, imageID) directReference := is.Status.Tags["direct"].Items[0].DockerImageReference // controller should be using pullthrough for this (pointing to local registry) o.Expect(dc.Spec.Triggers[1].ImageChangeParams).NotTo(o.BeNil()) o.Expect(dc.Spec.Triggers[1].ImageChangeParams.LastTriggeredImage).To(o.Equal(resolvedReference)) o.Expect(dc.Spec.Template.Spec.Containers[0].Image).To(o.Equal(resolvedReference)) // controller should have preferred the base image o.Expect(dc.Spec.Triggers[2].ImageChangeParams).NotTo(o.BeNil()) o.Expect(dc.Spec.Triggers[2].ImageChangeParams.LastTriggeredImage).To(o.Equal(directReference)) o.Expect(dc.Spec.Template.Spec.Containers[1].Image).To(o.Equal(directReference)) }) g.Describe("with test deployments [Conformance]", func() { g.AfterEach(func() { failureTrap(oc, "deployment-test", g.CurrentGinkgoTestDescription().Failed) }) g.It("should run a deployment to completion and then scale to zero", func() {
token, _ := models.GenerateToken(user.Email + "#" + user.Pass) user.Token = token user.Save() body := "" request, _ = http.NewRequest("GET", "/fake", strings.NewReader(body)) request.Header.Add("Authorization", "Bearer "+token.Hash) }) It("Check if user with token is saved on the database", func() { chkUser := models.User{} chkUser.IdUser = user.IdUser chkUser, err := chkUser.Get() Expect(err).To(gomega.BeNil()) Expect(chkUser).ShouldNot(gomega.BeZero()) }) It("Return status code 200", func() { serve.Handler.ServeHTTP(recorder, request) Expect(recorder.Code).To(gomega.Equal(200)) }) }) Context("With a user who does not exist in the database", func() { var user *models.User BeforeEach(func() { user = gory.Build("userBad").(*models.User)
ginkgo.AfterEach(func() { cleanVirtualSubnets(client, projectId) server.Close() }) ginkgo.Describe("CreateDeleteVirtualSubnet", func() { ginkgo.It("Virtual subnet create and delete succeeds", func() { mockTask := createMockTask("CREATE_VIRTUAL_NETWORK", "COMPLETED") server.SetResponseJson(200, mockTask) task, err := client.VirtualSubnets.Create(projectId, networkSpec) task, err = client.Tasks.Wait(task.ID) ginkgo.GinkgoT().Log(err) gomega.Expect(err).Should(gomega.BeNil()) gomega.Expect(task).ShouldNot(gomega.BeNil()) gomega.Expect(task.Operation).Should(gomega.Equal("CREATE_VIRTUAL_NETWORK")) gomega.Expect(task.State).Should(gomega.Equal("COMPLETED")) mockTask = createMockTask("DELETE_VIRTUAL_NETWORK", "COMPLETED") server.SetResponseJson(200, mockTask) ginkgo.GinkgoT().Log(err) task, err = client.VirtualSubnets.Delete(task.Entity.ID) task, err = client.Tasks.Wait(task.ID) gomega.Expect(err).Should(gomega.BeNil()) gomega.Expect(task).ShouldNot(gomega.BeNil()) gomega.Expect(task.Operation).Should(gomega.Equal("DELETE_VIRTUAL_NETWORK")) gomega.Expect(task.State).Should(gomega.Equal("COMPLETED"))