Exemplo n.º 1
0
func TestQueue_EvenNumberOfPushesAndPops_GivesZeroFinalLength(t *testing.T) {
	underTest := NewQueue("Test")
	numberOfRounds := 200

	for i := 0; i < numberOfRounds; i++ {
		dummyMessagePayLoad := []byte{byte(i)}
		dummyMessage := message.NewHeaderlessMessage(&dummyMessagePayLoad)
		underTest.InputChannel <- dummyMessage
	}

	gomega.Eventually(func() int {
		return underTest.length
	}).Should(gomega.Equal(numberOfRounds))

	for i := 0; i < numberOfRounds; i++ {
		message := <-underTest.OutputChannel
		if int((*message.Body)[0]) != i {
			t.Logf("Expected %d, got %d", i, int((*message.Body)[0]))
			t.FailNow()
		}
	}

	gomega.Eventually(func() int {
		return underTest.length
	}).Should(gomega.Equal(0))
}
Exemplo n.º 2
0
// A unsubscribing client should not be considered for message delivery
func TestQueue_sendMessageAfterUnsubscribe_messageReceivedSuccessfully(t *testing.T) {
	// Need gomega for async testing
	gomega.RegisterTestingT(t)

	testMessagePayload := []byte("Testing!")
	expectedMessagePayload := []byte("Testing!\r\n.\r\n")
	testMessage := message.NewHeaderlessMessage(&testMessagePayload)

	dummyMetricsPipe := make(chan<- *Metric, 10)
	dummyClosingPipe := make(chan<- *string)

	underTest := newMessageQueue(TEST_QUEUE_NAME, dummyMetricsPipe, dummyClosingPipe)

	writerBuffer1 := new(bytes.Buffer)
	dummyWriter1 := bufio.NewWriter(writerBuffer1)
	closedChannel1 := make(chan bool)
	dummyClient1 := Client{Name: "Test1", Writer: dummyWriter1, Closed: &closedChannel1}

	writerBuffer2 := new(bytes.Buffer)
	dummyWriter2 := bufio.NewWriter(writerBuffer2)
	closedChannel2 := make(chan bool)
	dummyClient2 := Client{Name: "Test2", Writer: dummyWriter2, Closed: &closedChannel2}

	// Add the subscription
	underTest.AddSubscriber(&dummyClient1)
	underTest.AddSubscriber(&dummyClient2)

	// Queue the message
	underTest.Publish(testMessage)

	// Bit of a hack - only one of the subscribers will get the message,
	// and we don't know which one
	gomega.Eventually(func() []byte {
		if writerBuffer1.String() == "" {
			return writerBuffer2.Bytes()
		} else {
			return writerBuffer1.Bytes()
		}
	}).Should(gomega.Equal(expectedMessagePayload))

	// We'll be reusing these buffers
	writerBuffer1.Reset()
	writerBuffer2.Reset()

	// Close one client
	*dummyClient1.Closed <- true

	// Should remove the client from the map
	gomega.Eventually(func() bool {
		return underTest.subscribers[dummyClient1.Name] == nil
	}).Should(gomega.BeTrue())

	// Now send a message - the remaining client should receive it without issue
	underTest.Publish(testMessage)

	gomega.Eventually(func() []byte {
		return writerBuffer2.Bytes()
	}).Should(gomega.Equal(expectedMessagePayload))

}
Exemplo n.º 3
0
func TestTokenRefresh(t *testing.T) {
	gomega.RegisterTestingT(t)
	Convey("Test making request", t, func() {
		setup(MockRoute{"GET", "/v2/organizations", listOrgsPayload})
		c := &Config{
			ApiAddress:   server.URL,
			LoginAddress: fakeUAAServer.URL,
			Username:     "******",
			Password:     "******",
		}
		client := NewClient(c)
		gomega.Consistently(client.GetToken()).Should(gomega.Equal("bearer foobar2"))
		gomega.Eventually(client.GetToken(), "3s").Should(gomega.Equal("bearer foobar3"))
	})
}
Exemplo n.º 4
0
// Check that messages sent to a queue are eventually sent to consumers
func TestQueue_sendMessage_messageReceivedSuccessfully(t *testing.T) {
	// Need gomega for async testing
	gomega.RegisterTestingT(t)

	testMessagePayload := []byte("Testing!")
	expectedMessagePayload := []byte("Testing!\r\n.\r\n")
	testMessage := message.NewHeaderlessMessage(&testMessagePayload)

	dummyMetricsPipe := make(chan<- *Metric)
	dummyClosingPipe := make(chan<- *string)
	underTest := newMessageQueue(TEST_QUEUE_NAME, dummyMetricsPipe, dummyClosingPipe)

	writerBuffer := new(bytes.Buffer)
	dummyWriter := bufio.NewWriter(writerBuffer)
	closedChannel := make(chan bool)
	dummyClient := Client{Name: "Test", Writer: dummyWriter, Closed: &closedChannel}

	// Add the subscription
	underTest.AddSubscriber(&dummyClient)

	// Queue the message
	underTest.Publish(testMessage)

	gomega.Eventually(func() []byte {
		return writerBuffer.Bytes()
	}).Should(gomega.Equal(expectedMessagePayload))
}
Exemplo n.º 5
0
func TestQueue_xPendingMetrics_producesCorrectMetric(t *testing.T) {
	// Need gomega for async testing
	gomega.RegisterTestingT(t)

	numberOfMessagesToSend := 10

	testMessagePayload := []byte("Testing!")
	testMessage := message.NewHeaderlessMessage(&testMessagePayload)

	dummyMetricsPipe := make(chan *Metric)
	dummyClosingPipe := make(chan *string)
	underTest := newMessageQueue(TEST_QUEUE_NAME, dummyMetricsPipe, dummyClosingPipe)

	for i := 0; i < numberOfMessagesToSend; i++ {
		underTest.Publish(testMessage)
	}

	// Eventually, we should see `numberOfMessagesToSend` pending messages
	gomega.Eventually(func() int {
		metric := <-dummyMetricsPipe
		if strings.Contains(metric.Name, "pending") {
			return int(metric.Value)
		} else {
			return -1
		}
	}, "5s").Should(gomega.Equal(numberOfMessagesToSend))
}
Exemplo n.º 6
0
// Waits for the timestamp on the Jenkins job to change. Returns
// and error if the timeout expires.
func (jmon *JobMon) await(timeout time.Duration) error {
	err := wait.Poll(10*time.Second, timeout, func() (bool, error) {

		buildNumber, err := jmon.j.getJobBuildNumber(jmon.jobName, time.Minute)
		o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

		ginkgolog("Checking build number for job %q current[%v] vs last[%v]", jmon.jobName, buildNumber, jmon.lastBuildNumber)
		if buildNumber == jmon.lastBuildNumber {
			return false, nil
		}

		if jmon.buildNumber == "" {
			jmon.buildNumber = buildNumber
		}
		body, status, err := jmon.j.getResource("job/%s/%s/api/json?depth=1", jmon.jobName, jmon.buildNumber)
		o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
		o.ExpectWithOffset(1, status).To(o.Equal(200))

		body = strings.ToLower(body)
		if strings.Contains(body, "\"building\":true") {
			ginkgolog("Jenkins job %q still building:\n%s\n\n", jmon.jobName, body)
			return false, nil
		}

		if strings.Contains(body, "\"result\":null") {
			ginkgolog("Jenkins job %q still building result:\n%s\n\n", jmon.jobName, body)
			return false, nil
		}

		ginkgolog("Jenkins job %q build complete:\n%s\n\n", jmon.jobName, body)
		return true, nil
	})
	return err
}
Exemplo n.º 7
0
func compareSchemas(actual, expected []*schema.Schema) {
	g.Expect(actual).To(g.HaveLen(len(expected)))
	for i, s := range actual {
		sortProperties(s)
		sortProperties(expected[i])
		g.Expect(s).To(g.Equal(expected[i]))
	}
}
Exemplo n.º 8
0
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
// and then confirm the application is serving an expected string value.
func NewSampleRepoTest(c SampleRepoConfig) func() {
	return func() {
		defer g.GinkgoRecover()
		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())

		g.JustBeforeEach(func() {
			g.By("Waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Describe("Building "+c.repoName+" app from new-app", func() {
			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
				oc.SetOutputDir(exutil.TestContext.OutputDir)

				exutil.CheckOpenShiftNamespaceImageStreams(oc)
				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
				err := oc.Run("new-app").Args("-f", c.templateURL).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				// all the templates automatically start a build.
				buildName := c.buildConfigName + "-1"

				g.By("expecting the build is in the Complete phase")
				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
				if err != nil {
					exutil.DumpBuildLogs(c.buildConfigName, oc)
				}
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the app deployment to be complete")
				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
				o.Expect(err).NotTo(o.HaveOccurred())

				if len(c.dbDeploymentConfigName) > 0 {
					g.By("expecting the db deployment to be complete")
					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
					o.Expect(err).NotTo(o.HaveOccurred())
				}

				g.By("expecting the service is available")
				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(serviceIP).ShouldNot(o.Equal(""))

				g.By("expecting an endpoint is available")
				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("verifying string from app request")
				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
			})
		})
	}
}
func (m *BeContainedInMatcher) Match(actual interface{}) (success bool, err error) {
	for _, value := range m.elements {
		submatcher := gomega.Equal(value)
		success, err = submatcher.Match(actual)
		if success || err != nil {
			return
		}
	}
	return
}
Exemplo n.º 10
0
// Finds the pod running Jenkins
func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod {
	pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins")
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

	if pods == nil || pods.Items == nil {
		g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace())
	}

	o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1))
	return &pods.Items[0]
}
Exemplo n.º 11
0
//VerifyImagesSame will take the two supplied image tags and see if they reference the same hexadecimal image ID;  strategy is for debug
func VerifyImagesSame(comp1, comp2, strategy string) {
	tag1 := comp1 + ":latest"
	tag2 := comp2 + ":latest"

	comps := []string{tag1, tag2}
	retIDs, gerr := GetImageIDForTags(comps)

	o.Expect(gerr).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("%s  compare image - %s, %s, %s, %s", strategy, tag1, tag2, retIDs[0], retIDs[1]))
	o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0))
	o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0))
	o.Ω(retIDs[0]).Should(o.Equal(retIDs[1]))
}
Exemplo n.º 12
0
func (m *OneOfMatcher) Match(actual interface{}) (success bool, err error) {
	for _, value := range m.Elements {
		submatcher, elementIsMatcher := value.(types.GomegaMatcher)
		if !elementIsMatcher {
			submatcher = gomega.Equal(value)
		}

		success, err = submatcher.Match(actual)
		if success || err != nil {
			return
		}
	}
	return
}
Exemplo n.º 13
0
func assertMembersInReplica(oc *exutil.CLI, db exutil.Database, expectedReplicas int) {
	isMasterCmd := "printjson(db.isMaster())"
	getReplicaHostsCmd := "print(db.isMaster().hosts.length)"

	// pod is running but we need to wait when it will be really ready (became member of the replica)
	err := exutil.WaitForQueryOutputSatisfies(oc, db, 1*time.Minute, false, isMasterCmd, func(commandOutput string) bool {
		return commandOutput != ""
	})
	o.Expect(err).ShouldNot(o.HaveOccurred())

	isMasterOutput, _ := db.Query(oc, isMasterCmd)
	fmt.Fprintf(g.GinkgoWriter, "DEBUG: Output of the db.isMaster() command: %v\n", isMasterOutput)

	members, err := db.Query(oc, getReplicaHostsCmd)
	o.Expect(err).ShouldNot(o.HaveOccurred())
	o.Expect(members).Should(o.Equal(strconv.Itoa(expectedReplicas)))
}
Exemplo n.º 14
0
// StartJob triggers a named Jenkins job. The job can be monitored with the
// returned object.
func (j *JenkinsRef) StartJob(jobName string) *JobMon {
	lastBuildNumber, err := j.GetJobBuildNumber(jobName, time.Minute)
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

	jmon := &JobMon{
		j:               j,
		lastBuildNumber: lastBuildNumber,
		buildNumber:     "",
		jobName:         jobName,
	}

	ginkgolog("Current timestamp for [%s]: %q", jobName, jmon.lastBuildNumber)
	g.By(fmt.Sprintf("Starting jenkins job: %s", jobName))
	_, status, err := j.PostXML(nil, "job/%s/build?delay=0sec", jobName)
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
	o.ExpectWithOffset(1, status).To(o.Equal(201))

	return jmon
}
Exemplo n.º 15
0
// MakeRequest makes a get request, checksthe http status code, and returns the body as string
func MakeRequest(method, url, body string, expectedCode int) (string, *http.Response) {
	log15.Debug("MakeRequest", "verb", method, "url", url)
	// prepare the request body
	var bodyReader io.Reader
	if body != "" {
		bodyReader = strings.NewReader(body)
	}
	// make the request
	req, _ := http.NewRequest(method, url, bodyReader)
	resp, err := http.DefaultClient.Do(req)
	// check the response for basic validity
	gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
	gomega.Ω(resp.StatusCode).Should(gomega.Equal(expectedCode))
	gomega.Ω(resp.Header.Get("Content-Type")).ShouldNot(gomega.BeNil())
	// read the response body
	respBody, err := ioutil.ReadAll(resp.Body)
	gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
	return string(respBody), resp
}
Exemplo n.º 16
0
func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]string, resourceName string, kind string) {
	g.By("Idling the service")
	_, err := oc.Run("idle").Args("--resource-names-file", idlingFile).Output()
	o.Expect(err).ToNot(o.HaveOccurred())

	g.By("Ensuring the scale is zero (and stays zero)")
	objName := resources[resourceName][0]
	// make sure we don't get woken up by an incorrect router health check or anything like that
	o.Consistently(func() (string, error) {
		return oc.Run("get").Args(resourceName+"/"+objName, "--output=jsonpath=\"{.spec.replicas}\"").Output()
	}, 20*time.Second, 500*time.Millisecond).Should(o.ContainSubstring("0"))

	g.By("Fetching the service and checking the annotations are present")
	serviceName := resources["service"][0]
	endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
	o.Expect(err).NotTo(o.HaveOccurred())

	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation))
	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.UnidleTargetAnnotation))

	g.By("Checking the idled-at time")
	idledAtAnnotation := endpoints.Annotations[unidlingapi.IdledAtAnnotation]
	idledAtTime, err := time.Parse(time.RFC3339, idledAtAnnotation)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(idledAtTime).To(o.BeTemporally("~", time.Now(), 5*time.Minute))

	g.By("Checking the idle targets")
	unidleTargetAnnotation := endpoints.Annotations[unidlingapi.UnidleTargetAnnotation]
	unidleTargets := []unidlingapi.RecordedScaleReference{}
	err = json.Unmarshal([]byte(unidleTargetAnnotation), &unidleTargets)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(unidleTargets).To(o.Equal([]unidlingapi.RecordedScaleReference{
		{
			Replicas: 2,
			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
				Name: resources[resourceName][0],
				Kind: kind,
			},
		},
	}))
}
Exemplo n.º 17
0
// buildAndPushImage tries to build an image. The image is stored as an image stream tag <name>:<tag>. If
// shouldBeDenied is true, a build will be expected to fail with a denied error.
func buildAndPushImage(oc *exutil.CLI, namespace, name, tag string, shouldBeDenied bool) {
	istName := name
	if tag != "" {
		istName += ":" + tag
	}
	g.By(fmt.Sprintf("building an image %q", istName))

	bc, err := oc.REST().BuildConfigs(namespace).Get(name)
	if err == nil {
		g.By(fmt.Sprintf("changing build config %s to store result into %s", name, istName))
		o.Expect(bc.Spec.BuildSpec.Output.To.Kind).To(o.Equal("ImageStreamTag"))
		bc.Spec.BuildSpec.Output.To.Name = istName
		_, err := oc.REST().BuildConfigs(namespace).Update(bc)
		o.Expect(err).NotTo(o.HaveOccurred())
	} else {
		g.By(fmt.Sprintf("creating a new build config %s with output to %s ", name, istName))
		err = oc.Run("new-build").Args(
			"--binary",
			"--name", name,
			"--to", istName).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
	}

	tempDir, err := ioutil.TempDir("", "name-build")
	o.Expect(err).NotTo(o.HaveOccurred())

	err = createRandomBlob(path.Join(tempDir, "data"), imageSize)
	o.Expect(err).NotTo(o.HaveOccurred())
	err = ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte("FROM scratch\nCOPY data /data\n"), 0644)
	o.Expect(err).NotTo(o.HaveOccurred())

	err = oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Execute()
	if shouldBeDenied {
		o.Expect(err).To(o.HaveOccurred())
		out, err := oc.Run("logs").Args("bc/" + name).Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(out).Should(o.MatchRegexp("(?i)Failed to push image:.*denied"))
	} else {
		o.Expect(err).NotTo(o.HaveOccurred())
	}
}
Exemplo n.º 18
0
func TestMessageShipper_SuccessfullyForwardsMessages(t *testing.T) {
	gomega.RegisterTestingT(t)

	inputChannel := make(chan *message.Message, 0)

	writerBuffer := new(bytes.Buffer)
	dummyWriter := bufio.NewWriter(writerBuffer)
	closedChannel := make(chan bool)
	dummyClient := Client{Name: "Test", Writer: dummyWriter, Closed: &closedChannel}
	dummyMetricsChannel := make(chan *Metric)

	underTest := newMessageShipper(inputChannel, &dummyClient, dummyMetricsChannel, "test")

	testMessagePayload := []byte("This is a test!")
	expectedMessagePayload := []byte("This is a test!\r\n.\r\n")
	testMessage := message.NewHeaderlessMessage(&testMessagePayload)
	underTest.messageChannel <- testMessage

	gomega.Eventually(func() []byte {
		return writerBuffer.Bytes()
	}).Should(gomega.Equal(expectedMessagePayload))
}
Exemplo n.º 19
0
func tryEchoUDP(svc *kapi.Service) error {
	rawIP := svc.Spec.ClusterIP
	o.Expect(rawIP).NotTo(o.BeEmpty(), "The service should have a cluster IP set")
	ip := net.ParseIP(rawIP)
	o.Expect(ip).NotTo(o.BeNil(), "The service should have a valid cluster IP, but %q was not valid", rawIP)

	var udpPort int
	for _, port := range svc.Spec.Ports {
		if port.Protocol == "UDP" {
			udpPort = int(port.Port)
			break
		}
	}
	o.Expect(udpPort).NotTo(o.Equal(0), "The service should have a UDP port exposed")

	// For UDP, we just drop packets on the floor rather than queue them up
	readTimeout := 5 * time.Second

	expectedBuff := []byte("It's time to UDP!\n")
	o.Eventually(func() ([]byte, error) { return tryEchoUDPOnce(ip, udpPort, expectedBuff, readTimeout) }, 2*time.Minute, readTimeout).Should(o.Equal(expectedBuff))

	return nil
}
Exemplo n.º 20
0
func (matcher *PlanMatcher) Match(actual interface{}) (bool, error) {
	actualPlan, ok := actual.(atc.Plan)
	if !ok {
		return false, fmt.Errorf("expected a %T, got a %T", matcher.ExpectedPlan, actual)
	}

	expectedStripped, expectedIDs := stripIDs(matcher.ExpectedPlan)
	actualStripped, actualIDs := stripIDs(actualPlan)

	planMatcher := gomega.Equal(expectedStripped)
	idsMatcher := gomega.ConsistOf(expectedIDs)

	matched, err := planMatcher.Match(actualStripped)
	if err != nil {
		return false, err
	}

	if !matched {
		matcher.failedMatcher = planMatcher
		matcher.failedValue = actualStripped
		return false, nil
	}

	matched, err = idsMatcher.Match(actualIDs)
	if err != nil {
		return false, err
	}

	if !matched {
		matcher.failedMatcher = idsMatcher
		matcher.failedValue = actualIDs
		return false, nil
	}

	return true, nil
}
Exemplo n.º 21
0
func replicationTestFactory(oc *exutil.CLI, template string) func() {
	return func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer cleanup(oc)

		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", "templates", true)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", template).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
		o.Expect(err).NotTo(o.HaveOccurred())

		tableCounter := 0
		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
			tableCounter++
			table := fmt.Sprintf("table_%0.2d", tableCounter)

			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())

			// Test if we can query as root
			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
			err := helper.TestRemoteLogin(oc, "mysql-master")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Create a new table with random name
			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Write new data to the table through master
			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data is present on master
			err = exutil.WaitForQueryOutput(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data was replicated to all slaves
			for _, slave := range slaves {
				err = exutil.WaitForQueryOutput(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
				o.Expect(err).NotTo(o.HaveOccurred())
			}

			return master, slaves, helper
		}

		g.By("after initial deployment")
		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)

		g.By("after master is restarted by changing the Deployment Config")
		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after master is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		g.By("after slave is scaled to 0 and then back to 4 replicas")
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
	}
}
Exemplo n.º 22
0
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.SetOutputDir(exutil.TestContext.OutputDir)
	})

	g.Describe("being created from new-build", func() {
		g.It("should create a image via new-build", func() {
			g.By(fmt.Sprintf("calling oc new-build with Dockerfile"))
			err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("origin-base")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(bc.Spec.Source.Git).To(o.BeNil())
			o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil())
			o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile))

			buildName := "origin-base-1"
			g.By("expecting the Dockerfile build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the build Docker image reference from ImageStream")
			image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("origin-base", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001"))
		})

		g.It("should create a image via new-build and infer the origin tag", func() {
			g.By(fmt.Sprintf("calling oc new-build with Dockerfile that uses the same tag as the output"))
			err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile2).Execute()
Exemplo n.º 23
0
			iterations := 10
			for i := 0; i < iterations; i++ {
				o.Expect(waitForLatestCondition(oc, "history-limit", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred(),
					"the current deployment needs to have finished before attempting to trigger a new deployment through configuration change")
				e2e.Logf("%02d: triggering a new deployment with config change", i)
				out, err := oc.Run("set", "env").Args("dc/history-limit", fmt.Sprintf("A=%d", i)).Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(out).To(o.ContainSubstring("updated"))
			}

			o.Expect(waitForLatestCondition(oc, "history-limit", deploymentRunTimeout, checkDeploymentConfigHasSynced)).NotTo(o.HaveOccurred(),
				"the controller needs to have synced with the updated deployment configuration before checking that the revision history limits are being adhered to")
			deploymentConfig, deployments, _, err := deploymentInfo(oc, "history-limit")
			o.Expect(err).NotTo(o.HaveOccurred())
			// sanity check to ensure that the following asertion on the amount of old deployments is valid
			o.Expect(*deploymentConfig.Spec.RevisionHistoryLimit).To(o.Equal(int32(revisionHistoryLimit)))

			// we need to filter out any deployments that we don't care about,
			// namely the active deployment and any newer deployments
			oldDeployments := deployutil.DeploymentsForCleanup(deploymentConfig, deployments)

			// we should not have more deployments than acceptable
			o.Expect(len(oldDeployments)).To(o.BeNumerically("==", revisionHistoryLimit))

			// the deployments we continue to keep should be the latest ones
			for _, deployment := range oldDeployments {
				o.Expect(deployutil.DeploymentVersionFor(&deployment)).To(o.BeNumerically(">=", iterations-revisionHistoryLimit))
			}
		})
	})
Exemplo n.º 24
0
				result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.BeTrue())
			}

			g.By("checking page count")
			assertPageCountIs(1)
			assertPageCountIs(2)

			g.By("modifying the source code with disabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(3)

			pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods.Items)).To(o.Equal(1))

			g.By("turning on hot-deploy")
			err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("modifying the source code with enabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(1337)
		})
Exemplo n.º 25
0
		g.By(fmt.Sprintf("trying to tag a docker image exceeding limit %v", limit))
		is, err := oc.Client().ImageStreams(oc.Namespace()).Get("stream")
		o.Expect(err).NotTo(o.HaveOccurred())
		is.Spec.Tags["foo"] = imageapi.TagReference{
			Name: "foo",
			From: &kapi.ObjectReference{
				Kind: "DockerImage",
				Name: tag2Image["tag2"].DockerImageReference,
			},
			ImportPolicy: imageapi.TagImportPolicy{
				Insecure: true,
			},
		}
		_, err = oc.Client().ImageStreams(oc.Namespace()).Update(is)
		o.Expect(err).To(o.HaveOccurred())
		o.Expect(quotautil.IsErrorQuotaExceeded(err)).Should(o.Equal(true))

		g.By("re-tagging the image under different tag")
		is, err = oc.Client().ImageStreams(oc.Namespace()).Get("stream")
		o.Expect(err).NotTo(o.HaveOccurred())
		is.Spec.Tags["duplicate"] = imageapi.TagReference{
			Name: "duplicate",
			From: &kapi.ObjectReference{
				Kind: "DockerImage",
				Name: tag2Image["tag1"].DockerImageReference,
			},
			ImportPolicy: imageapi.TagImportPolicy{
				Insecure: true,
			},
		}
		_, err = oc.Client().ImageStreams(oc.Namespace()).Update(is)
Exemplo n.º 26
0
	defer g.GinkgoRecover()
	var (
		configPath = exeutil.FixturePath("fixtures", "job-controller.yaml")
		oc         = exeutil.NewCLI("job-controller", exeutil.KubeConfigPath())
	)
	g.Describe("controller", func() {
		g.It("should create and run a job in user project", func() {
			oc.SetOutputDir(exeutil.TestContext.OutputDir)
			g.By(fmt.Sprintf("creating a job from %q", configPath))
			err := oc.Run("create").Args("-f", configPath).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("Waiting for pod..."))
			podNames, err := exeutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), exeutil.ParseLabelsOrDie("app=pi"), exeutil.CheckPodIsSucceededFn, 1, 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(podNames)).Should(o.Equal(1))
			podName := podNames[0]

			g.By("retrieving logs from pod " + podName)
			logs, err := oc.Run("logs").Args(podName).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(logs).Should(o.Equal("3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117068"))

			g.By("checking job status")
			jobs, err := oc.KubeREST().Jobs(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exeutil.ParseLabelsOrDie("app=pi")})
			o.Expect(err).NotTo(o.HaveOccurred())

			o.Expect(len(jobs.Items)).Should(o.Equal(1))
			job := jobs.Items[0]
			o.Expect(len(job.Status.Conditions)).Should(o.Equal(1))
			o.Expect(job.Status.Conditions[0].Type).Should(o.Equal(kapiextensions.JobComplete))
Exemplo n.º 27
0
while this test is running and compare results.  Restarting your docker daemon, assuming you can ping docker.io quickly, could
be a quick fix.
*/

var _ = g.BeforeSuite(func() {
	// do a pull initially just to insure have the latest version
	exutil.PullImage(s2iDockBldr)
	exutil.PullImage(custBldr)
	// save hex image IDs for image reset after corruption
	tags := []string{s2iDockBldr + ":latest", custBldr + ":latest"}
	hexIDs, ierr := exutil.GetImageIDForTags(tags)
	o.Expect(ierr).NotTo(o.HaveOccurred())
	for _, hexID := range hexIDs {
		g.By(fmt.Sprintf("\n%s FORCE PULL TEST:  hex id %s ", time.Now().Format(time.RFC850), hexID))
	}
	o.Expect(len(hexIDs)).To(o.Equal(2))
	resetData = map[string]string{s2iDockBldr: hexIDs[0], custBldr: hexIDs[1]}
	g.By(fmt.Sprintf("\n%s FORCE PULL TEST:  hex id for s2i/docker %s and for custom %s ", time.Now().Format(time.RFC850), hexIDs[0], hexIDs[1]))
})

// TODO this seems like a weird restriction with segregated namespaces.  provide a better explanation of why this doesn't work
// we don't run in parallel with this suite - do not want different tests tagging the same image in different ways at the same time
var _ = g.Describe("builds: serial: ForcePull from OpenShift induced builds (vs. sti)", func() {
	defer g.GinkgoRecover()
	var oc = exutil.NewCLI("force-pull-s2i", exutil.KubeConfigPath())

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})
Exemplo n.º 28
0
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("starting the source strategy build")
			err = oc.Run("start-build").Args("imagesourcebuild").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("expecting the builds to complete successfully")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "imagesourcebuild-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("imagesourcebuild", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to deploy successfully")
			pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods)).To(o.Equal(1))
			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to contain the file from the input image")
			out, err := oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "ls", "injected/dir").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("jenkins.war"))
		})
	})
	g.Describe("build with image docker", func() {
		g.It("should complete successfully and contain the expected file", func() {
			g.By("Creating build configs for docker build")
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("starting the docker strategy build")
Exemplo n.º 29
0
func testPruneImages(oc *exutil.CLI, schemaVersion int) {
	var mediaType string
	switch schemaVersion {
	case 1:
		mediaType = schema1.MediaTypeManifest
	case 2:
		mediaType = schema2.MediaTypeManifest
	default:
		g.Fail(fmt.Sprintf("unexpected schema version %d", schemaVersion))
	}

	oc.SetOutputDir(exutil.TestContext.OutputDir)
	outSink := g.GinkgoWriter

	cleanUp := cleanUpContainer{}
	defer tearDownPruneImagesTest(oc, &cleanUp)

	dClient, err := testutil.NewDockerClient()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion))
	imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName)
	pruneSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName)
	keepSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(pruneSize < keepSize).To(o.BeTrue())

	g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion))
	imgPrune, err := oc.AsAdmin().Client().Images().Get(imgPruneName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType))
	imgKeep, err := oc.AsAdmin().Client().Images().Get(imgKeepName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType))

	g.By("prune the first image uploaded (dry-run)")
	output, err := oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	noConfirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(noConfirmSize).To(o.Equal(keepSize))

	g.By("prune the first image uploaded (confirm)")
	output, err = oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0", "--confirm").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	confirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize))
	o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue())
	o.Expect(confirmSize < keepSize).To(o.BeTrue())
	g.By(fmt.Sprintf("confirming pruned size: sizeOfPruneImage=%d <= (sizeAfterPrune=%d - sizeBeforePrune=%d)", imgPrune, keepSize, confirmSize))
	o.Expect(imgPrune.DockerImageMetadata.Size <= keepSize-confirmSize).To(o.BeTrue())
}
Exemplo n.º 30
0
func matcherToGomegaMatcher(matcher interface{}) (types.GomegaMatcher, error) {
	switch x := matcher.(type) {
	case string, int, bool, float64:
		return gomega.Equal(x), nil
	case []interface{}:
		var matchers []types.GomegaMatcher
		for _, valueI := range x {
			if subMatcher, ok := valueI.(types.GomegaMatcher); ok {
				matchers = append(matchers, subMatcher)
			} else {
				matchers = append(matchers, gomega.ContainElement(valueI))
			}
		}
		return gomega.And(matchers...), nil
	}
	matcher = sanitizeExpectedValue(matcher)
	if matcher == nil {
		return nil, fmt.Errorf("Missing Required Attribute")
	}
	matcherMap, ok := matcher.(map[string]interface{})
	if !ok {
		panic(fmt.Sprintf("Unexpected matcher type: %T\n\n", matcher))
	}
	var matchType string
	var value interface{}
	for matchType, value = range matcherMap {
		break
	}
	switch matchType {
	case "have-prefix":
		return gomega.HavePrefix(value.(string)), nil
	case "have-suffix":
		return gomega.HaveSuffix(value.(string)), nil
	case "match-regexp":
		return gomega.MatchRegexp(value.(string)), nil
	case "have-len":
		value = sanitizeExpectedValue(value)
		return gomega.HaveLen(value.(int)), nil
	case "contain-element":
		subMatcher, err := matcherToGomegaMatcher(value)
		if err != nil {
			return nil, err
		}
		return gomega.ContainElement(subMatcher), nil
	case "not":
		subMatcher, err := matcherToGomegaMatcher(value)
		if err != nil {
			return nil, err
		}
		return gomega.Not(subMatcher), nil
	case "consist-of":
		subMatchers, err := sliceToGomega(value)
		if err != nil {
			return nil, err
		}
		var interfaceSlice []interface{}
		for _, d := range subMatchers {
			interfaceSlice = append(interfaceSlice, d)
		}
		return gomega.ConsistOf(interfaceSlice...), nil
	case "and":
		subMatchers, err := sliceToGomega(value)
		if err != nil {
			return nil, err
		}
		return gomega.And(subMatchers...), nil
	case "or":
		subMatchers, err := sliceToGomega(value)
		if err != nil {
			return nil, err
		}
		return gomega.Or(subMatchers...), nil
	case "gt", "ge", "lt", "le":
		// Golang json escapes '>', '<' symbols, so we use 'gt', 'le' instead
		comparator := map[string]string{
			"gt": ">",
			"ge": ">=",
			"lt": "<",
			"le": "<=",
		}[matchType]
		return gomega.BeNumerically(comparator, value), nil

	default:
		return nil, fmt.Errorf("Unknown matcher: %s", matchType)

	}
}