示例#1
0
// A unsubscribing client should not be considered for message delivery
func TestQueue_sendMessageAfterUnsubscribe_messageReceivedSuccessfully(t *testing.T) {
	// Need gomega for async testing
	gomega.RegisterTestingT(t)

	testMessagePayload := []byte("Testing!")
	expectedMessagePayload := []byte("Testing!\r\n.\r\n")
	testMessage := message.NewHeaderlessMessage(&testMessagePayload)

	dummyMetricsPipe := make(chan<- *Metric, 10)
	dummyClosingPipe := make(chan<- *string)

	underTest := newMessageQueue(TEST_QUEUE_NAME, dummyMetricsPipe, dummyClosingPipe)

	writerBuffer1 := new(bytes.Buffer)
	dummyWriter1 := bufio.NewWriter(writerBuffer1)
	closedChannel1 := make(chan bool)
	dummyClient1 := Client{Name: "Test1", Writer: dummyWriter1, Closed: &closedChannel1}

	writerBuffer2 := new(bytes.Buffer)
	dummyWriter2 := bufio.NewWriter(writerBuffer2)
	closedChannel2 := make(chan bool)
	dummyClient2 := Client{Name: "Test2", Writer: dummyWriter2, Closed: &closedChannel2}

	// Add the subscription
	underTest.AddSubscriber(&dummyClient1)
	underTest.AddSubscriber(&dummyClient2)

	// Queue the message
	underTest.Publish(testMessage)

	// Bit of a hack - only one of the subscribers will get the message,
	// and we don't know which one
	gomega.Eventually(func() []byte {
		if writerBuffer1.String() == "" {
			return writerBuffer2.Bytes()
		} else {
			return writerBuffer1.Bytes()
		}
	}).Should(gomega.Equal(expectedMessagePayload))

	// We'll be reusing these buffers
	writerBuffer1.Reset()
	writerBuffer2.Reset()

	// Close one client
	*dummyClient1.Closed <- true

	// Should remove the client from the map
	gomega.Eventually(func() bool {
		return underTest.subscribers[dummyClient1.Name] == nil
	}).Should(gomega.BeTrue())

	// Now send a message - the remaining client should receive it without issue
	underTest.Publish(testMessage)

	gomega.Eventually(func() []byte {
		return writerBuffer2.Bytes()
	}).Should(gomega.Equal(expectedMessagePayload))

}
示例#2
0
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) {

	buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
	o.Expect(err).NotTo(o.HaveOccurred())

	// Ensure that expected start-build environment variables were injected
	for _, build := range buildList.Items {
		ginkgolog("Found build: %q", build.GetName())
		if strings.HasPrefix(build.GetName(), buildPrefix) {
			envs := []kapi.EnvVar{}
			if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil {
				envs = build.Spec.Strategy.DockerStrategy.Env
			} else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil {
				envs = build.Spec.Strategy.SourceStrategy.Env
			} else {
				continue
			}

			for k, v := range varsToFind {
				found := false
				for _, env := range envs {
					ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName())
					if k == env.Name && v == env.Value {
						found = true
						break
					}
				}
				o.ExpectWithOffset(1, found).To(o.BeTrue())
			}
		}
	}
}
示例#3
0
// Dumps logs and triggers a Ginkgo assertion if the build did NOT succeed.
func (t *BuildResult) AssertSuccess() *BuildResult {
	if !t.BuildSuccess {
		t.DumpLogs()
	}
	o.ExpectWithOffset(1, t.BuildSuccess).To(o.BeTrue())
	return t
}
示例#4
0
// Dumps logs and triggers a Ginkgo assertion if the build did NOT have an error (this will not assert on timeouts)
func (t *BuildResult) AssertFailure() *BuildResult {
	if !t.BuildFailure {
		t.DumpLogs()
	}
	o.ExpectWithOffset(1, t.BuildFailure).To(o.BeTrue())
	return t
}
示例#5
0
// Validate create/delete of objects
func validateCreateDelete(create bool, key, out string, err error) {
	ginkgolog("\nOBJ: %s\n", out)
	if create {
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(strings.Contains(out, key)).To(o.BeTrue())
	} else {
		o.Expect(err).To(o.HaveOccurred())
	}
}
示例#6
0
//VerifyImagesDifferent will that the two supplied image tags and see if they reference different hexadecimal image IDs; strategy is for ginkgo debug, also leverage ginkgo error checking
func VerifyImagesDifferent(comp1, comp2, strategy string) {
	tag1 := comp1 + ":latest"
	tag2 := comp2 + ":latest"

	comps := []string{tag1, tag2}
	retIDs, gerr := GetImageIDForTags(comps)

	o.Expect(gerr).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("%s  compare image - %s, %s, %s, %s", strategy, tag1, tag2, retIDs[0], retIDs[1]))
	o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0))
	o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0))
	o.Ω(retIDs[0] != retIDs[1]).Should(o.BeTrue())
}
示例#7
0
func TestQueue_sendMessage_generatesMetrics(t *testing.T) {
	// More async testing
	gomega.RegisterTestingT(t)

	// We should receive metrics ending in these names from a queue during
	// normal operation
	expectedMetricNames := [...]string{"messagerate", "subscribers", "pending"}

	// Mocking
	dummyMetricsChannel := make(chan *Metric)
	dummyClosingChannel := make(chan *string)

	underTest := newMessageQueue(TEST_QUEUE_NAME, dummyMetricsChannel, dummyClosingChannel)

	// After a subscriber is added, we should start receiving metrics
	dummySubscriber := Client{Closed: new(chan bool)}
	underTest.AddSubscriber(&dummySubscriber)

	seenMetricNames := make(map[string]bool)
	go func() {
		for {
			metric := <-dummyMetricsChannel
			metricNameChunks := strings.Split(metric.Name, ".")
			finalMetricName := metricNameChunks[len(metricNameChunks)-1]
			seenMetricNames[finalMetricName] = true
		}
	}()

	// Check we've received metrics ending in all the expected names
	// NOTE: It might take longer than the default gomega 1 second timeout to
	// receive all the metrics we're expecting
	gomega.Eventually(func() bool {
		toReturn := true
		for _, metricName := range expectedMetricNames {
			if !seenMetricNames[metricName] {
				toReturn = false
			}
		}
		return toReturn
	}, "5s").Should(gomega.BeTrue()) //  Timeout upped to 5 seconds
}
func TestConnectionManager_disconnectCommand_removesClient(t *testing.T) {
	gomega.RegisterTestingT(t)

	underTest := ConnectionManager{}

	buf := new(bytes.Buffer)
	bufWriter := bufio.NewWriter(buf)
	mockClient := NewClient("Mock", bufWriter, nil)
	closedChannel := make(chan bool, 1)
	mockClient.Closed = &closedChannel

	t.Log("Disconnecting")

	var emptyMessage []byte

	underTest.parseClientCommand([]string{"disconnect"}, &emptyMessage, mockClient)

	gomega.Eventually(func() bool {
		closed := <-*(mockClient.Closed)
		return closed
	}).Should(gomega.BeTrue())
}
示例#9
0
// Reusing the same (closed) channel name shouldn't make us crash
// Wait between sub/unsub operations to make sure metrics are sent proparly
func TestQueueManager_queuesClosed_removedFromMap(t *testing.T) {
	config := Config{}
	SetConfig(&config)

	underTest := newQueueManager()

	// Create dummy clients
	dummyClient := Client{}
	dummyClient.Name = "Dummy"
	closedChannel := make(chan bool)
	dummyClient.Closed = &closedChannel

	// Subscribe
	underTest.Subscribe(TestQueueName, &dummyClient)

	// Close the queue
	*dummyClient.Closed <- true

	// Check that TestQueueName is removed from the QueueManager map
	gomega.Eventually(func() bool {
		return underTest.queues[TestQueueName] != nil
	}).Should(gomega.BeTrue())
}
示例#10
0
		subject = EqualTo(true)
	})

	It("should return a string", func() {
		g.Expect(subject.String()).To(g.Equal(`=true`))
	})

	It("should have an ID", func() {
		g.Expect(subject.CRC64()).To(g.Equal(uint64(971422227693832935)))
		g.Expect(EqualTo(27).CRC64()).To(g.Equal(uint64(9340596851114011254)))
	})

	It("should check equality", func() {
		g.Expect(subject.Match(nil)).To(g.BeFalse())
		g.Expect(subject.Match(1)).To(g.BeFalse())
		g.Expect(subject.Match(true)).To(g.BeTrue())
		g.Expect(subject.Match(false)).To(g.BeFalse())
	})
})

var _ = Describe("NumericGreater", func() {
	var subject *NumericGreater
	var _ Condition = subject

	BeforeEach(func() {
		subject = GreaterThan(5.1)
	})

	It("should return a string", func() {
		g.Expect(subject.String()).To(g.Equal(`>5.1`))
	})
示例#11
0
var _ = g.Describe("builds: image source", func() {
	defer g.GinkgoRecover()
	var (
		buildFixture = exutil.FixturePath("fixtures", "test-build-hello-openshift.yaml")
		helloBuilder = exutil.FixturePath("fixtures", "hello-builder")
		oc           = exutil.NewCLI("build-image-source", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("build with image source", func() {
		g.It("should complete successfully and deploy resulting image", func() {
			g.By("Creating build configs, deployment config, and service for hello-openshift")
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("starting the builder image build with a directory")
			err = oc.Run("start-build").Args("hello-builder", fmt.Sprintf("--from-dir=%s", helloBuilder)).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("expect the builds to complete successfully and deploy a hello-openshift pod")
			success, err := images.CheckPageContains(oc, "hello-openshift", "", "Hello OpenShift!")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(success).To(o.BeTrue())
		})

	})
})
示例#12
0
func testPruneImages(oc *exutil.CLI, schemaVersion int) {
	var mediaType string
	switch schemaVersion {
	case 1:
		mediaType = schema1.MediaTypeManifest
	case 2:
		mediaType = schema2.MediaTypeManifest
	default:
		g.Fail(fmt.Sprintf("unexpected schema version %d", schemaVersion))
	}

	oc.SetOutputDir(exutil.TestContext.OutputDir)
	outSink := g.GinkgoWriter

	cleanUp := cleanUpContainer{}
	defer tearDownPruneImagesTest(oc, &cleanUp)

	dClient, err := testutil.NewDockerClient()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion))
	imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName)
	pruneSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName)
	keepSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(pruneSize < keepSize).To(o.BeTrue())

	g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion))
	imgPrune, err := oc.AsAdmin().Client().Images().Get(imgPruneName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType))
	imgKeep, err := oc.AsAdmin().Client().Images().Get(imgKeepName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType))

	g.By("prune the first image uploaded (dry-run)")
	output, err := oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	noConfirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(noConfirmSize).To(o.Equal(keepSize))

	g.By("prune the first image uploaded (confirm)")
	output, err = oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0", "--confirm").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	confirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize))
	o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue())
	o.Expect(confirmSize < keepSize).To(o.BeTrue())
	g.By(fmt.Sprintf("confirming pruned size: sizeOfPruneImage=%d <= (sizeAfterPrune=%d - sizeBeforePrune=%d)", imgPrune, keepSize, confirmSize))
	o.Expect(imgPrune.DockerImageMetadata.Size <= keepSize-confirmSize).To(o.BeTrue())
}
示例#13
0
				err = oc.Run("rsync").Args(
					fmt.Sprintf("%s:/tmp/image-streams/", podName),
					tempDir,
					fmt.Sprintf("--strategy=%s", strategy)).Execute()

				g.By(fmt.Sprintf("Verifying that files were copied to the local directory"))
				files, err := ioutil.ReadDir(tempDir)
				o.Expect(err).NotTo(o.HaveOccurred())
				found := false
				for _, f := range files {
					if strings.Contains(f.Name(), "application-template-stibuild.json") {
						found = true
						break
					}
				}
				o.Expect(found).To(o.BeTrue())

				g.By(fmt.Sprintf("Copying files from container to local directory with --delete: oc rsync %s:/tmp/image-streams/ %s --strategy=%s", podName, tempDir, strategy))
				originalName := "application-template-stibuild.json"
				modifiedName := "application-template-stirenamed.json"
				err = os.Rename(filepath.Join(tempDir, originalName), filepath.Join(tempDir, modifiedName))
				o.Expect(err).NotTo(o.HaveOccurred())

				err = oc.Run("rsync").Args(
					fmt.Sprintf("%s:/tmp/image-streams/", podName),
					tempDir,
					"--delete",
					fmt.Sprintf("--strategy=%s", strategy)).Execute()
				g.By(fmt.Sprintf("Verifying that the expected files are in the local directory"))
				o.Expect(err).NotTo(o.HaveOccurred())
				// After the copy with --delete, the file with 'modifiedName' should have been deleted
示例#14
0
	. "github.com/onsi/ginkgo"
	g "github.com/onsi/gomega"
)

var _ = Describe("Ints64", func() {
	var subject Ints64

	BeforeEach(func() {
		subject = SortInts64(4, 6, 2)
	})

	It("should normalize", func() {
		g.Expect(subject).To(g.Equal(Ints64{2, 4, 6}))
		g.Expect(subject.crc64('x')).To(g.Equal(uint64(6934466117131854228)))
	})

	It("should check if exists", func() {
		g.Expect(subject.Exists(1)).To(g.BeFalse())
		g.Expect(subject.Exists(2)).To(g.BeTrue())
		g.Expect(subject.Exists(3)).To(g.BeFalse())
		g.Expect(subject.Exists(4)).To(g.BeTrue())
	})

	It("should check for intersections", func() {
		g.Expect(subject.Inter(SortInts64(3))).To(g.BeFalse())
		g.Expect(subject.Inter(SortInts64(3, 5))).To(g.BeFalse())
		g.Expect(subject.Inter(SortInts64(3, 4, 5, 7))).To(g.BeTrue())
	})

})
			server.SetResponseJson(200, createMockVirtualSubnetsPage(VirtualSubnet{Name: networkSpec.Name}))
			subnets, err := client.VirtualSubnets.GetAll(projectId, &VirtualSubnetGetOptions{})
			ginkgo.GinkgoT().Log(err)

			gomega.Expect(err).Should(gomega.BeNil())
			gomega.Expect(subnets).ShouldNot(gomega.BeNil())

			var found bool
			for _, subnet := range subnets.Items {
				if subnet.Name == networkSpec.Name && subnet.ID == task.Entity.ID {
					found = true
					break
				}
			}
			gomega.Expect(found).Should(gomega.BeTrue())

			mockTask = createMockTask("DELETE_VIRTUAL_NETWORK", "COMPLETED")
			server.SetResponseJson(200, mockTask)
			ginkgo.GinkgoT().Log(err)

			task, err = client.VirtualSubnets.Delete(task.Entity.ID)
			task, err = client.Tasks.Wait(task.ID)

			gomega.Expect(err).Should(gomega.BeNil())
			gomega.Expect(task).ShouldNot(gomega.BeNil())
			gomega.Expect(task.Operation).Should(gomega.Equal("DELETE_VIRTUAL_NETWORK"))
			gomega.Expect(task.State).Should(gomega.Equal("COMPLETED"))
		})

		ginkgo.It("Get virtual subnet with the given name succeeds", func() {
示例#16
0
		g.By("kick off the build for the jenkins ephermeral and application templates")
		tag := []string{localPluginSnapshotImage}
		hexIDs, err := exutil.DumpAndReturnTagging(tag)

		// If the user has expressed an interest in local plugin testing by setting the
		// SNAPSHOT_JENKINS_IMAGE environment variable, try to use the local image. Inform them
		// either about which image is being used in case their test fails.
		snapshotImagePresent := len(hexIDs) > 0 && err == nil
		useSnapshotImage := os.Getenv(useLocalPluginSnapshotEnvVarName) != ""

		//TODO disabling oauth until we can update getAdminPassword path to handle oauth (perhaps borrow from oauth integration tests)
		newAppArgs := []string{exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json"), "-p", "ENABLE_OAUTH=false"}

		if useSnapshotImage {
			g.By("Creating a snapshot Jenkins imagestream and overridding the default Jenkins imagestream")
			o.Expect(snapshotImagePresent).To(o.BeTrue())

			ginkgolog("")
			ginkgolog("")
			ginkgolog("IMPORTANT: You are testing a local jenkins snapshot image.")
			ginkgolog("In order to target the official image stream, you must unset %s before running extended tests.", useLocalPluginSnapshotEnvVarName)
			ginkgolog("")
			ginkgolog("")

			// Create an imagestream based on the Jenkins' plugin PR-Testing image (https://github.com/openshift/jenkins-plugin/blob/master/PR-Testing/README).
			snapshotImageStream := "jenkins-plugin-snapshot-test"
			err = oc.Run("new-build").Args("-D", fmt.Sprintf("FROM %s", localPluginSnapshotImage), "--to", snapshotImageStream).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			err = oc.Run("logs").Args("-f", "bc/jenkins-plugin-snapshot-test").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("oc start-build source-build --wait", func() {
		g.It("Source: should start a build and wait for the build failed and build pod being killed by kubelet", func() {

			g.By("calling oc create source-build")
			err := oc.Run("create").Args("-f", sourceFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting the source build with --wait flag and short timeout")
			br, err := exutil.StartBuildAndWait(oc, "source-build", "--wait")
			o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error

			g.By("verifying the build status")
			o.Expect(br.BuildAttempt).To(o.BeTrue())                                            // the build should have been attempted
			o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed

			g.By("verifying the build pod status")
			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed))
			o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded"))

		})
	})

	g.Describe("oc start-build docker-build --wait", func() {
		g.It("Docker: should start a build and wait for the build failed and build pod being killed by kubelet", func() {

			g.By("calling oc create docker-build")
示例#18
0
				fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: [ 'echo', 'hello', 'world', '4' ] )", targetPod.Name, targetContainer.Name),
				fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: 'echo', arguments : [ 'hello', 'world', '5' ] )", targetPod.Name, targetContainer.Name),
				fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: 'echo', arguments : [ [ value: 'hello' ], [ value : 'world' ], [ value : '6' ] ] )", targetPod.Name, targetContainer.Name),
				"}",
			)

			jobName := "test-exec-dsl-job"
			j.createItem(jobName, data)
			monitor := j.startJob(jobName)
			err = monitor.await(10 * time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())

			log, err := j.getLastJobConsoleLogs(jobName)
			ginkgolog("Job logs>>\n%s\n\n", log)

			o.Expect(strings.Contains(log, "hello world 1")).To(o.BeTrue())
			o.Expect(strings.Contains(log, "hello world 2")).To(o.BeTrue())
			o.Expect(strings.Contains(log, "hello world 3")).To(o.BeTrue())
			o.Expect(strings.Contains(log, "hello world 4")).To(o.BeTrue())
			o.Expect(strings.Contains(log, "hello world 5")).To(o.BeTrue())
			o.Expect(strings.Contains(log, "hello world 6")).To(o.BeTrue())
		})

		g.It("jenkins-plugin test multitag", func() {

			loadFixture(oc, "multitag-template.json")
			err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
				_, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig")
				if err != nil {
					return false, nil
				}
示例#19
0
				// build to become running are Pending, verify the first build is still
				// running (so the other two builds are started in parallel with first
				// build).
				// TODO: This might introduce flakes in case the first build complete
				// sooner or fail.
				if build.Status.Phase == buildapi.BuildPhasePending {
					c := buildclient.NewOSClientBuildClient(oc.Client())
					firstBuildRunning := false
					_, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool {
						if b.Name == startedBuilds[0] && b.Status.Phase == buildapi.BuildPhaseRunning {
							firstBuildRunning = true
						}
						return false
					})
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(firstBuildRunning).Should(o.BeTrue())
					counter++
				}
				// When the build failed or completed prematurely, fail the test
				o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse())
				if counter == 2 {
					break
				}
			}
			o.Expect(counter).Should(o.BeEquivalentTo(2))
		})
	})

	g.Describe("build configuration with Serial build run policy", func() {
		g.It("runs the builds in serial order", func() {
			g.By("starting multiple builds")
示例#20
0
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("ensuring no scale up of the deployment happens")
			wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
				rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-1")
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(rc.Spec.Replicas).Should(o.BeEquivalentTo(0))
				o.Expect(rc.Status.Replicas).Should(o.BeEquivalentTo(0))
				return false, nil
			})

			g.By("verifying the scale is updated on the deployment config")
			config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get("deployment-test")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(config.Spec.Replicas).Should(o.BeEquivalentTo(1))
			o.Expect(config.Spec.Test).Should(o.BeTrue())

			g.By("deploying a few more times")
			for i := 0; i < 3; i++ {
				out, err = oc.Run("deploy").Args("--latest", "--follow", "deployment-test").Output()
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("verifying the deployment is marked complete and scaled to zero")
				o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred())

				g.By(fmt.Sprintf("checking the logs for substrings\n%s", out))
				o.Expect(out).To(o.ContainSubstring(fmt.Sprintf("deployment-test-%d up to 1", i+2)))
				o.Expect(out).To(o.ContainSubstring("--> pre: Success"))
				o.Expect(out).To(o.ContainSubstring("test pre hook executed"))
				o.Expect(out).To(o.ContainSubstring("--> Success"))
			}
示例#21
0
				err = oc.Run("rsync").Args(
					fmt.Sprintf("%s:/tmp/image-streams/", podName),
					tempDir,
					fmt.Sprintf("--strategy=%s", strategy)).Execute()

				g.By(fmt.Sprintf("Verifying that files were copied to the local directory"))
				files, err := ioutil.ReadDir(tempDir)
				o.Expect(err).NotTo(o.HaveOccurred())
				found := false
				for _, f := range files {
					if strings.Contains(f.Name(), "application-template-stibuild.json") {
						found = true
						break
					}
				}
				o.Expect(found).To(o.BeTrue())

				g.By("Getting an error if copying to a destination directory where there is no write permission")
				result, err = oc.Run("rsync").Args(
					sourcePath1,
					fmt.Sprintf("%s:/", podName),
					fmt.Sprintf("--strategy=%s", strategy)).Output()
				o.Expect(err).To(o.HaveOccurred())
			}
		}

		for _, strategy := range strategies {
			g.It(fmt.Sprintf("should copy files with the %s strategy", strategy), testRsyncFunc(strategy))
		}
	})
})
示例#22
0
	var subject Rule

	BeforeEach(func() {
		subject = CheckFact(FactKey(33), OneOf([]int64{3, 2, 1}))
	})

	It("should return a string", func() {
		g.Expect(subject.String()).To(g.Equal(`[33]+[1 2 3]`))
	})

	It("should have an ID", func() {
		g.Expect(subject.crc64()).To(g.Equal(uint64(3048486384098978521)))
	})

	It("should perform", func() {
		g.Expect(subject.perform(mockFact{FactKey(33): []int64{1}}, NewState())).To(g.BeTrue())
		g.Expect(subject.perform(mockFact{FactKey(33): []int64{4}}, NewState())).To(g.BeFalse())
		g.Expect(subject.perform(mockFact{FactKey(34): []int64{1}}, NewState())).To(g.BeFalse())
	})

	It("should capture state", func() {
		state := NewState()
		g.Expect(subject.perform(mockFact{FactKey(33): []int64{1}}, state)).To(g.BeTrue())
		g.Expect(state.rules).To(g.Equal(map[uint64]bool{
			3048486384098978521: true,
		}))
		g.Expect(state.facts).To(g.HaveLen(1))
		g.Expect(state.facts).To(g.HaveKey(FactKey(33)))
	})
})
示例#23
0
			g.By("waiting for build to finish")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for endpoint")
			err = oc.KubeFramework().WaitForAnEndpoint("cakephp-mysql-example")
			o.Expect(err).NotTo(o.HaveOccurred())

			assertPageCountIs := func(i int) {
				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
				o.Expect(err).NotTo(o.HaveOccurred())

				result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.BeTrue())
			}

			g.By("checking page count")

			assertPageCountIs(1)
			assertPageCountIs(2)

			g.By("modifying the source code with disabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			g.By("checking page count after modifying the source code")
			assertPageCountIs(1337)
		})
	})
})
示例#24
0
		oc                 = exutil.NewCLI("local-quota", exutil.KubeConfigPath())
		emptyDirPodFixture = exutil.FixturePath("..", "..", "examples", "hello-openshift", "hello-pod.json")
	)

	g.Describe("FSGroup local storage quota", func() {
		g.It("should be applied to XFS filesystem when a pod is created", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)
			project := oc.Namespace()

			// Verify volDir is on XFS, if not this test can't pass:
			volDir := os.Getenv(volDirEnvVar)
			g.By(fmt.Sprintf("make sure volume directory (%s) is on an XFS filesystem", volDir))
			o.Expect(volDir).NotTo(o.Equal(""))
			args := []string{"-f", "-c", "'%T'", volDir}
			outBytes, _ := exec.Command("stat", args...).Output()
			o.Expect(strings.Contains(string(outBytes), "xfs")).To(o.BeTrue())

			g.By("lookup test projects fsGroup ID")
			fsGroup, err := lookupFSGroup(oc, project)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("create hello-openshift pod with emptyDir volume")
			_, createPodErr := oc.Run("create").Args("-f", emptyDirPodFixture).Output()
			o.Expect(createPodErr).NotTo(o.HaveOccurred())

			g.By("wait for XFS quota to be applied and verify")
			lookupQuotaErr := waitForQuotaToBeApplied(oc, fsGroup, volDir)
			o.Expect(lookupQuotaErr).NotTo(o.HaveOccurred())
		})
	})
})
示例#25
0
			g.By("expecting the mongodb pod is running")
			podNames, err := exutil.WaitForPods(
				oc.KubeREST().Pods(oc.Namespace()),
				exutil.ParseLabelsOrDie("name=mongodb"),
				exutil.CheckPodIsRunningFn,
				1,
				1*time.Minute,
			)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(podNames).Should(o.HaveLen(1))

			g.By("expecting the mongodb service is answering for ping")
			mongo := db.NewMongoDB(podNames[0])
			ok, err := mongo.IsReady(oc)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(ok).Should(o.BeTrue())

			g.By("expecting that we can insert a new record")
			result, err := mongo.Query(oc, `db.foo.save({ "status": "passed" })`)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(result).Should(o.ContainSubstring(`WriteResult({ "nInserted" : 1 })`))

			g.By("expecting that we can read a record")
			findCmd := "printjson(db.foo.find({}, {_id: 0}).toArray())" // don't include _id field to output because it changes every time
			result, err = mongo.Query(oc, findCmd)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(result).Should(o.ContainSubstring(`{ "status" : "passed" }`))
		})
	})

})
示例#26
0
		g.By("kick off the build for the jenkins ephermeral and application templates")
		tag := []string{localPluginSnapshotImage}
		hexIDs, err := exutil.DumpAndReturnTagging(tag)

		// If the user has expressed an interest in local plugin testing by setting the
		// SNAPSHOT_JENKINS_IMAGE environment variable, try to use the local image. Inform them
		// either about which image is being used in case their test fails.
		snapshotImagePresent := len(hexIDs) > 0 && err == nil
		useSnapshotImage := os.Getenv(useLocalPluginSnapshotEnvVarName) != ""

		//TODO disabling oauth until we can update getAdminPassword path to handle oauth (perhaps borrow from oauth integration tests)
		newAppArgs := []string{exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json"), "-p", "ENABLE_OAUTH=false"}

		if useSnapshotImage {
			g.By("Creating a snapshot Jenkins imagestream and overridding the default Jenkins imagestream")
			o.Expect(snapshotImagePresent).To(o.BeTrue())

			ginkgolog("")
			ginkgolog("")
			ginkgolog("IMPORTANT: You are testing a local jenkins snapshot image.")
			ginkgolog("In order to target the official image stream, you must unset %s before running extended tests.", useLocalPluginSnapshotEnvVarName)
			ginkgolog("")
			ginkgolog("")

			// Create an imagestream based on the Jenkins' plugin PR-Testing image (https://github.com/openshift/jenkins-plugin/blob/master/PR-Testing/README).
			snapshotImageStream := "jenkins-plugin-snapshot-test"
			err = oc.Run("new-build").Args("-D", fmt.Sprintf("FROM %s", localPluginSnapshotImage), "--to", snapshotImageStream).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			err = oc.Run("logs").Args("-f", "bc/jenkins-plugin-snapshot-test").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())