Exemplo n.º 1
0
// NewRef creates a jenkins reference from an OC client
func NewRef(oc *exutil.CLI) *JenkinsRef {
	g.By("get ip and port for jenkins service")
	serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())
	port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By("get admin password")
	password := GetAdminPassword(oc)
	o.Expect(password).ShouldNot(o.BeEmpty())

	j := &JenkinsRef{
		oc:        oc,
		host:      serviceIP,
		port:      port,
		namespace: oc.Namespace(),
		password:  password,
	}
	return j
}
Exemplo n.º 2
0
func tryEchoUDP(svc *kapi.Service) error {
	rawIP := svc.Spec.ClusterIP
	o.Expect(rawIP).NotTo(o.BeEmpty(), "The service should have a cluster IP set")
	ip := net.ParseIP(rawIP)
	o.Expect(ip).NotTo(o.BeNil(), "The service should have a valid cluster IP, but %q was not valid", rawIP)

	var udpPort int
	for _, port := range svc.Spec.Ports {
		if port.Protocol == "UDP" {
			udpPort = int(port.Port)
			break
		}
	}
	o.Expect(udpPort).NotTo(o.Equal(0), "The service should have a UDP port exposed")

	// For UDP, we just drop packets on the floor rather than queue them up
	readTimeout := 5 * time.Second

	expectedBuff := []byte("It's time to UDP!\n")
	o.Eventually(func() ([]byte, error) { return tryEchoUDPOnce(ip, udpPort, expectedBuff, readTimeout) }, 2*time.Minute, readTimeout).Should(o.Equal(expectedBuff))

	return nil
}
Exemplo n.º 3
0
				_, stderr, err := exutil.StartBuild(oc, "sample-build", "--wait")
				o.Expect(err).To(o.HaveOccurred())
				o.Expect(stderr).Should(o.ContainSubstring(`status is "Cancelled"`))
			}()

			g.By("getting the build name")
			var buildName string
			wait.Poll(time.Duration(100*time.Millisecond), 1*time.Minute, func() (bool, error) {
				out, err := oc.Run("get").
					Args("build", "--template", "{{ (index .items 0).metadata.name }}").Output()
				// Give it second chance in case the build resource was not created yet
				if err != nil || len(out) == 0 {
					return false, nil
				}

				buildName = out
				return true, nil
			})

			o.Expect(buildName).ToNot(o.BeEmpty())

			g.By(fmt.Sprintf("cancelling the build %q", buildName))
			err := oc.Run("cancel-build").Args(buildName).Execute()
			o.Expect(err).ToNot(o.HaveOccurred())
			wg.Wait()
		})

	})

})
Exemplo n.º 4
0
						continue
					}
					// Verify there are no other running or pending builds than this
					// build as serial build always runs alone.
					c := buildclient.NewOSClientBuildClient(oc.Client())
					builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool {
						if b.Name == build.Name {
							return false
						}
						if b.Status.Phase == buildapi.BuildPhaseRunning || b.Status.Phase == buildapi.BuildPhasePending {
							return true
						}
						return false
					})
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(builds.Items).Should(o.BeEmpty())

					// The builds should start in the same order as they were created.
					o.Expect(build.Name).Should(o.BeEquivalentTo(startedBuilds[counter]))

					buildVerified[build.Name] = true
					counter++
				}
				if counter == len(startedBuilds) {
					break
				}
			}
		})
	})

	g.Describe("build configuration with SerialLatestOnly build run policy", func() {
Exemplo n.º 5
0
//ArtifactDirPath returns the value of ARTIFACT_DIR environment variable
func ArtifactDirPath() string {
	path := os.Getenv("ARTIFACT_DIR")
	o.Expect(path).NotTo(o.BeNil())
	o.Expect(path).NotTo(o.BeEmpty())
	return path
}
Exemplo n.º 6
0
			// find the docker id of our running container.
			g.By("finding the docker container id on the pod")
			retrievedPod, err = f.Client.Pods(f.Namespace.Name).Get(submittedPod.Name)
			o.Expect(err).NotTo(o.HaveOccurred())
			containerID, err := getContainerID(retrievedPod)
			o.Expect(err).NotTo(o.HaveOccurred())

			// now check the host config of the container which should have been updated by the
			// kubelet.  If that is good then ensure we have the groups we expected.
			g.By("inspecting the container")
			dockerContainer, err := dockerCli.InspectContainer(containerID)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("ensuring the host config has GroupAdd")
			groupAdd := dockerContainer.HostConfig.GroupAdd
			o.Expect(groupAdd).ToNot(o.BeEmpty(), fmt.Sprintf("groupAdd on host config was %v", groupAdd))

			g.By("ensuring the groups are set")
			o.Expect(configHasGroup(fsGroup, dockerContainer.HostConfig)).To(o.Equal(true), fmt.Sprintf("fsGroup should exist on host config: %v", groupAdd))
			o.Expect(configHasGroup(supGroup, dockerContainer.HostConfig)).To(o.Equal(true), fmt.Sprintf("supGroup should exist on host config: %v", groupAdd))
		})

	})
})

// supportsSupplementalGroups does a check on the docker version to ensure it is at least
// 1.8.2.  This could still fail if the version does not have the /etc/groups patch
// but it will fail when launching the pod so this is as safe as we can get.
func supportsSupplementalGroups(dockerVersion string) (bool, error, string) {
	parts := strings.Split(dockerVersion, ".")
Exemplo n.º 7
0
	var (
		buildFixture = exutil.FixturePath("..", "extended", "fixtures", "test-build-revision.json")
		oc           = exutil.NewCLI("cli-build-revision", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("started build", func() {
		g.It("should contain source revision information", func() {
			g.By("starting the build with --wait flag")
			out, err := oc.Run("start-build").Args("sample-build", "--wait").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("verifying the build %q status", out))
			build, err := oc.REST().Builds(oc.Namespace()).Get(out)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Spec.Revision).NotTo(o.BeNil())
			o.Expect(build.Spec.Revision.Git).NotTo(o.BeNil())
			o.Expect(build.Spec.Revision.Git.Commit).NotTo(o.BeEmpty())
			o.Expect(build.Spec.Revision.Git.Author.Name).NotTo(o.BeEmpty())
			o.Expect(build.Spec.Revision.Git.Committer.Name).NotTo(o.BeEmpty())
			o.Expect(build.Spec.Revision.Git.Message).NotTo(o.BeEmpty())
		})
	})
})
Exemplo n.º 8
0
			g.By("wait for the master to be built")
			err = wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
				return waitForBuildComplete("jenkins-master-1")
			})
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("wait for jenkins deployment")
			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("get ip and port for jenkins service")
			serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config",
				exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(serviceIP).NotTo(o.BeEmpty())
			port, err := oc.Run("get").Args("svc", "jenkins", "--config",
				exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(port).NotTo(o.BeEmpty())

			jenkinsUri := fmt.Sprintf("http://%s:%s", serviceIP, port)
			g.By(fmt.Sprintf("wait for jenkins to come up at %q", jenkinsUri))
			err = waitForJenkinsActivity(jenkinsUri, "", 200)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("inspecting the Jenkins master logs the slave image should be registered")
			out, err := oc.Run("logs").Args("dc/jenkins").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("Adding image ruby-22-centos7-jenkins-slave:latest as Kubernetes slave"))
Exemplo n.º 9
0
			err = oc.Run("start-build").Args("rootable-ruby", fmt.Sprintf("--from-dir=%s", s2ibuilderFixture),
				"--wait").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("creating a build that tries to gain root access via su")
			err = oc.Run("create").Args("-f", rootAccessBuildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("start the root-access-build with the --wait flag")
			err = oc.Run("start-build").Args("root-access-build", "--wait").Execute()
			o.Expect(err).To(o.HaveOccurred())

			g.By("verifying the build status")
			builds, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(builds.Items).ToNot(o.BeEmpty())

			// Find the build
			var build *buildapi.Build
			for i := range builds.Items {
				if builds.Items[i].Name == "root-access-build-1" {
					build = &builds.Items[i]
					break
				}
			}
			o.Expect(build).NotTo(o.BeNil())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed))
		})
	})

})
Exemplo n.º 10
0
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("waiting for jenkins deployment")
		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("get ip and port for jenkins service")
		serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		hostPort = fmt.Sprintf("%s:%s", serviceIP, port)

		g.By("get admin password")
		password = getAdminPassword(oc)
		o.Expect(password).ShouldNot(o.BeEmpty())

		g.By("wait for jenkins to come up")
		err = waitForJenkinsActivity(fmt.Sprintf("http://%s", hostPort), password, "", 200)
		if err != nil {
			exutil.DumpDeploymentLogs("jenkins", oc)
		}
		o.Expect(err).NotTo(o.HaveOccurred())

		if testingSnapshot {
			g.By("verifying the test image is being used")
			// for the test image, confirm that a snapshot version of the plugin is running in the jenkins image we'll test against
			err = waitForJenkinsActivity(fmt.Sprintf("http://%s/pluginManager/plugin/openshift-pipeline/thirdPartyLicenses", hostPort), password, `About OpenShift Pipeline Jenkins Plugin ([0-9\.]+)-SNAPSHOT`, 200)
		}

	})
Exemplo n.º 11
0
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("build the Jenkins master")
			br, err = exutil.StartBuildAndWait(oc, "jenkins-master", "--wait", "--from-dir", "examples/jenkins/master-slave")
			br.AssertSuccess()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("wait for jenkins deployment")
			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("get ip and port for jenkins service")
			serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config",
				exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(serviceIP).NotTo(o.BeEmpty())
			port, err := oc.Run("get").Args("svc", "jenkins", "--config",
				exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(port).NotTo(o.BeEmpty())

			g.By("get admin password")
			password := getAdminPassword(oc)
			o.Expect(password).ShouldNot(o.BeEmpty())

			j := JenkinsRef{
				oc:        oc,
				host:      serviceIP,
				port:      port,
				password:  password,
				namespace: oc.Namespace(),
Exemplo n.º 12
0
				return firstDeployerRemoved && secondDeployerExists, nil
			})
			o.Expect(err).NotTo(o.HaveOccurred())
		})
	})

	g.It("should respect image stream tag reference policy [Conformance]", func() {
		o.Expect(oc.Run("create").Args("-f", resolutionFixture).Execute()).NotTo(o.HaveOccurred())

		name := "deployment-image-resolution"
		o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentImageTriggersResolved(2))).NotTo(o.HaveOccurred())

		is, err := oc.Client().ImageStreams(oc.Namespace()).Get(name)
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(is.Status.DockerImageRepository).NotTo(o.BeEmpty())
		o.Expect(is.Status.Tags["direct"].Items).NotTo(o.BeEmpty())
		o.Expect(is.Status.Tags["pullthrough"].Items).NotTo(o.BeEmpty())

		dc, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get(name)
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(dc.Spec.Triggers).To(o.HaveLen(3))

		imageID := is.Status.Tags["pullthrough"].Items[0].Image
		resolvedReference := fmt.Sprintf("%s@%s", is.Status.DockerImageRepository, imageID)
		directReference := is.Status.Tags["direct"].Items[0].DockerImageReference

		// controller should be using pullthrough for this (pointing to local registry)
		o.Expect(dc.Spec.Triggers[1].ImageChangeParams).NotTo(o.BeNil())
		o.Expect(dc.Spec.Triggers[1].ImageChangeParams.LastTriggeredImage).To(o.Equal(resolvedReference))
		o.Expect(dc.Spec.Template.Spec.Containers[0].Image).To(o.Equal(resolvedReference))