Beispiel #1
0
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) {

	buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
	o.Expect(err).NotTo(o.HaveOccurred())

	// Ensure that expected start-build environment variables were injected
	for _, build := range buildList.Items {
		ginkgolog("Found build: %q", build.GetName())
		if strings.HasPrefix(build.GetName(), buildPrefix) {
			envs := []kapi.EnvVar{}
			if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil {
				envs = build.Spec.Strategy.DockerStrategy.Env
			} else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil {
				envs = build.Spec.Strategy.SourceStrategy.Env
			} else {
				continue
			}

			for k, v := range varsToFind {
				found := false
				for _, env := range envs {
					ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName())
					if k == env.Name && v == env.Value {
						found = true
						break
					}
				}
				o.ExpectWithOffset(1, found).To(o.BeTrue())
			}
		}
	}
}
// buildAndPushTestImagesTo builds a given number of test images. The images are pushed to a new image stream
// of given name under <tagPrefix><X> where X is a number of image starting from 1.
func buildAndPushTestImagesTo(oc *exutil.CLI, isName string, tagPrefix string, numberOfImages int) (tag2Image map[string]imageapi.Image, err error) {
	dClient, err := testutil.NewDockerClient()
	if err != nil {
		return
	}
	tag2Image = make(map[string]imageapi.Image)

	for i := 1; i <= numberOfImages; i++ {
		tag := fmt.Sprintf("%s%d", tagPrefix, i)
		dgst, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, tag, imageSize, 2, g.GinkgoWriter, true)
		if err != nil {
			return nil, err
		}
		ist, err := oc.Client().ImageStreamTags(oc.Namespace()).Get(isName, tag)
		if err != nil {
			return nil, err
		}
		if dgst != ist.Image.Name {
			return nil, fmt.Errorf("digest of built image does not match stored: %s != %s", dgst, ist.Image.Name)
		}
		tag2Image[tag] = ist.Image
	}

	return
}
Beispiel #3
0
func doTest(bldPrefix, debugStr string, same bool, oc *exutil.CLI) {
	// corrupt the builder image
	exutil.CorruptImage(fullImageName, corruptor)

	if bldPrefix == buildPrefixFC || bldPrefix == buildPrefixTC {
		// grant access to the custom build strategy
		err := oc.AsAdmin().Run("adm").Args("policy", "add-cluster-role-to-user", "system:build-strategy-custom", oc.Username()).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		defer func() {
			err = oc.AsAdmin().Run("adm").Args("policy", "remove-cluster-role-from-user", "system:build-strategy-custom", oc.Username()).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
		}()
	}

	// kick off the app/lang build and verify the builder image accordingly
	_, err := exutil.StartBuildAndWait(oc, bldPrefix)
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

	if same {
		exutil.VerifyImagesSame(fullImageName, corruptor, debugStr)
	} else {
		exutil.VerifyImagesDifferent(fullImageName, corruptor, debugStr)
	}

	// reset corrupted tagging for next test
	exutil.ResetImage(resetData)
	// dump tags/hexids for debug
	_, err = exutil.DumpAndReturnTagging(tags)
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
}
Beispiel #4
0
func createFixture(oc *exutil.CLI, path string) ([]string, []string, error) {
	output, err := oc.Run("create").Args("-f", path, "-o", "name").Output()
	if err != nil {
		return nil, nil, err
	}

	lines := strings.Split(output, "\n")

	resources := make([]string, 0, len(lines)-1)
	names := make([]string, 0, len(lines)-1)

	for _, line := range lines {
		if line == "" {
			continue
		}
		parts := strings.Split(line, "/")
		if len(parts) != 2 {
			return nil, nil, fmt.Errorf("expected type/name syntax, got: %q", line)
		}
		resources = append(resources, parts[0])
		names = append(names, parts[1])
	}

	return resources, names, nil
}
Beispiel #5
0
func tearDownPruneImagesTest(oc *exutil.CLI, cleanUp *cleanUpContainer) {
	for _, image := range cleanUp.imageNames {
		err := oc.AsAdmin().Client().Images().Delete(image)
		if err != nil {
			fmt.Fprintf(g.GinkgoWriter, "clean up of image %q failed: %v\n", image, err)
		}
	}
}
// waitForLimitSync waits until a usage of a quota reaches given limit with a short timeout
func waitForLimitSync(oc *exutil.CLI, hardLimit kapi.ResourceList) error {
	g.By(fmt.Sprintf("waiting for resource quota %s to get updated", quotaName))
	return testutil.WaitForResourceQuotaLimitSync(
		oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
		quotaName,
		hardLimit,
		waitTimeout)
}
Beispiel #7
0
func waitForImageUpdate(oc *exutil.CLI, image *imageapi.Image) error {
	return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) {
		newImage, err := oc.AsAdmin().Client().Images().Get(image.Name)
		if err != nil {
			return false, err
		}

		return (image.ResourceVersion < newImage.ResourceVersion), nil
	})
}
Beispiel #8
0
// Finds the pod running Jenkins
func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod {
	pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins")
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

	if pods == nil || pods.Items == nil {
		g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace())
	}

	o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1))
	return &pods.Items[0]
}
Beispiel #9
0
// createFixture will create the provided fixture and return the resource and the
// name separately.
// TODO: Probably move to a more general location like test/extended/util/cli.go
func createFixture(oc *exutil.CLI, fixture string) (string, string, error) {
	resource, err := oc.Run("create").Args("-f", fixture, "-o", "name").Output()
	if err != nil {
		return "", "", err
	}
	parts := strings.Split(resource, "/")
	if len(parts) != 2 {
		return "", "", fmt.Errorf("expected type/name syntax, got: %s", resource)
	}
	return resource, parts[1], nil
}
Beispiel #10
0
func waitForNoPodsAvailable(oc *exutil.CLI) error {
	return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) {
		//ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{})
		if err != nil {
			return false, err
		}

		return len(pods.Items) == 0, nil
	})
}
Beispiel #11
0
func waitForEndpointsAvailable(oc *exutil.CLI, serviceName string) error {
	return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) {
		ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
		// Tolerate NotFound b/c it could take a moment for the endpoints to be created
		if errors.TolerateNotFoundError(err) != nil {
			return false, err
		}

		return (len(ep.Subsets) > 0) && (len(ep.Subsets[0].Addresses) > 0), nil
	})
}
Beispiel #12
0
// GetDockerRegistryURL returns a cluster URL of internal docker registry if available.
func GetDockerRegistryURL(oc *exutil.CLI) (string, error) {
	svc, err := oc.AdminKubeREST().Services("default").Get("docker-registry")
	if err != nil {
		return "", err
	}
	url := svc.Spec.ClusterIP
	for _, p := range svc.Spec.Ports {
		url = fmt.Sprintf("%s:%d", url, p.Port)
		break
	}
	return url, nil
}
Beispiel #13
0
func getAdminPassword(oc *exutil.CLI) string {
	envs, err := oc.Run("set").Args("env", "dc/jenkins", "--list").Output()
	o.Expect(err).NotTo(o.HaveOccurred())
	kvs := strings.Split(envs, "\n")
	for _, kv := range kvs {
		if strings.HasPrefix(kv, "JENKINS_PASSWORD="******"=")
			fmt.Fprintf(g.GinkgoWriter, "\nJenkins admin password %s\n", s[1])
			return s[1]
		}
	}
	return "password"
}
Beispiel #14
0
// QueryPrivileged executes an SQL query as a root user and returns the result.
func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return "", err
	}
	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return "", err
	}
	return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("mysql -h 127.0.0.1 -uroot -e \"%s\" %s",
			query, masterConf.Env["MYSQL_DATABASE"])).Output()
}
Beispiel #15
0
func executeShellCommand(oc *util.CLI, podName string, command string) (string, error) {
	out, err := oc.Run("exec").Args(podName, "--", "bash", "-c", command).Output()
	if err != nil {
		switch err.(type) {
		case *util.ExitError, *exec.ExitError:
			return "", nil
		default:
			return "", err
		}
	}

	return out, nil
}
// waitForResourceQuotaSync waits until a usage of a quota reaches given limit with a short timeout
func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kapi.ResourceList) (kapi.ResourceList, error) {
	g.By(fmt.Sprintf("waiting for resource quota %s to get updated", name))
	used, err := exutil.WaitForResourceQuotaSync(
		oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
		quotaName,
		expectedResources,
		false,
		waitTimeout,
	)
	if err != nil {
		return nil, err
	}
	return used, nil
}
// bumpLimit changes the limit value for given resource for all the limit types of limit range object
func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (kapi.ResourceList, error) {
	g.By(fmt.Sprintf("bump a limit on resource %q to %s", resourceName, limit))
	lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Get(limitRangeName)
	if err != nil {
		return nil, err
	}
	res := kapi.ResourceList{}

	change := false
	for i := range lr.Spec.Limits {
		item := &lr.Spec.Limits[i]
		if old, exists := item.Max[resourceName]; exists {
			for k, v := range item.Max {
				res[k] = v
			}
			parsed := resource.MustParse(limit)
			if old.Cmp(parsed) != 0 {
				item.Max[resourceName] = parsed
				change = true
			}
		}
	}

	if !change {
		return res, nil
	}
	_, err = oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Update(lr)
	return res, err
}
Beispiel #18
0
// QueryPrivileged executes an SQL query as a root user and returns the result.
func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return "", err
	}
	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return "", err
	}
	return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("psql postgres://postgres:%[email protected]/%s -x -c \"%s\"",
			masterConf.Env["POSTGRESQL_ADMIN_PASSWORD"],
			masterConf.Env["POSTGRESQL_DATABASE"], query)).Output()
}
Beispiel #19
0
// ModifySourceCode will modify source code in the pod of the application
// according to the sed script.
func ModifySourceCode(oc *exutil.CLI, selector labels.Selector, sedScript, file string) error {
	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFunc, 1, 120*time.Second)
	if err != nil {
		return err
	}
	if len(pods) != 1 {
		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
	}

	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
	if err != nil {
		return err
	}
	return oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "sed", "-ie", sedScript, file).Execute()
}
Beispiel #20
0
// TestRemoteLogin will test whether we can login through to a remote database.
func (m MySQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error {
	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return err
	}
	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return err
	}
	err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("mysql -h %s -u%s -p%s -e \"SELECT 1;\" %s",
			hostAddress, masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"],
			masterConf.Env["MYSQL_DATABASE"])).Execute()
	return err
}
func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string {
	g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label))

	podNames, err := exutil.WaitForPods(
		oc.KubeClient().Core().Pods(oc.Namespace()),
		exutil.ParseLabelsOrDie("name="+label),
		exutil.CheckPodIsRunningFn,
		number,
		1*time.Minute,
	)
	o.Expect(err).ShouldNot(o.HaveOccurred())
	o.Expect(podNames).Should(o.HaveLen(number))

	return podNames
}
Beispiel #22
0
// TestRemoteLogin will test whether we can login through to a remote database.
func (m PostgreSQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error {
	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return err
	}
	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return err
	}
	err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("psql postgres://%s:%s@%s/%s -x -c \"SELECT 1;\"",
			masterConf.Env["POSTGRESQL_USER"], masterConf.Env["POSTGRESQL_PASSWORD"],
			hostAddress, masterConf.Env["POSTGRESQL_DATABASE"])).Execute()
	return err
}
Beispiel #23
0
// RunInPodContainer will run provided command in the specified pod container.
func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error {
	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
	if err != nil {
		return err
	}
	if len(pods) != 1 {
		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
	}

	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
	if err != nil {
		return err
	}
	args := []string{pod.Name, "-c", pod.Spec.Containers[0].Name, "--"}
	args = append(args, cmd...)
	return oc.Run("exec").Args(args...).Execute()
}
Beispiel #24
0
func waitForSyncedConfig(oc *exutil.CLI, name string, timeout time.Duration) error {
	dc, rcs, pods, err := deploymentInfo(oc, name)
	if err != nil {
		return err
	}
	if err := checkDeploymentInvariants(dc, rcs, pods); err != nil {
		return err
	}
	generation := dc.Generation
	return wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) {
		config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
		if err != nil {
			return false, err
		}
		return deployutil.HasSynced(config, generation), nil
	})
}
// createResourceQuota creates a resource quota with given hard limits in a current namespace and waits until
// a first usage refresh
func createResourceQuota(oc *exutil.CLI, hard kapi.ResourceList) (*kapi.ResourceQuota, error) {
	rq := &kapi.ResourceQuota{
		ObjectMeta: kapi.ObjectMeta{
			Name: quotaName,
		},
		Spec: kapi.ResourceQuotaSpec{
			Hard: hard,
		},
	}

	g.By(fmt.Sprintf("creating resource quota with a limit %v", hard))
	rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Create(rq)
	if err != nil {
		return nil, err
	}
	err = waitForLimitSync(oc, hard)
	return rq, err
}
Beispiel #26
0
func lookupFSGroup(oc *exutil.CLI, project string) (int, error) {
	gidRange, err := oc.Run("get").Args("project", project,
		"--template='{{ index .metadata.annotations \"openshift.io/sa.scc.supplemental-groups\" }}'").Output()
	if err != nil {
		return 0, err
	}

	// gidRange will be something like: 1000030000/10000
	fsGroupStr := strings.Split(gidRange, "/")[0]
	fsGroupStr = strings.Replace(fsGroupStr, "'", "", -1)

	fsGroup, err := strconv.Atoi(fsGroupStr)
	if err != nil {
		return 0, err
	}

	return fsGroup, nil
}
// createLimitRangeOfType creates a new limit range object with given limits for given limit type in current namespace
func createLimitRangeOfType(oc *exutil.CLI, limitType kapi.LimitType, maxLimits kapi.ResourceList) (*kapi.LimitRange, error) {
	lr := &kapi.LimitRange{
		ObjectMeta: kapi.ObjectMeta{
			Name: limitRangeName,
		},
		Spec: kapi.LimitRangeSpec{
			Limits: []kapi.LimitRangeItem{
				{
					Type: limitType,
					Max:  maxLimits,
				},
			},
		},
	}

	g.By(fmt.Sprintf("creating limit range object %q with %s limited to: %v", limitRangeName, limitType, maxLimits))
	lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Create(lr)
	return lr, err
}
Beispiel #28
0
// buildAndPushImage tries to build an image. The image is stored as an image stream tag <name>:<tag>. If
// shouldBeDenied is true, a build will be expected to fail with a denied error.
func buildAndPushImage(oc *exutil.CLI, namespace, name, tag string, shouldBeDenied bool) {
	istName := name
	if tag != "" {
		istName += ":" + tag
	}
	g.By(fmt.Sprintf("building an image %q", istName))

	bc, err := oc.REST().BuildConfigs(namespace).Get(name)
	if err == nil {
		g.By(fmt.Sprintf("changing build config %s to store result into %s", name, istName))
		o.Expect(bc.Spec.BuildSpec.Output.To.Kind).To(o.Equal("ImageStreamTag"))
		bc.Spec.BuildSpec.Output.To.Name = istName
		_, err := oc.REST().BuildConfigs(namespace).Update(bc)
		o.Expect(err).NotTo(o.HaveOccurred())
	} else {
		g.By(fmt.Sprintf("creating a new build config %s with output to %s ", name, istName))
		err = oc.Run("new-build").Args(
			"--binary",
			"--name", name,
			"--to", istName).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
	}

	tempDir, err := ioutil.TempDir("", "name-build")
	o.Expect(err).NotTo(o.HaveOccurred())

	err = createRandomBlob(path.Join(tempDir, "data"), imageSize)
	o.Expect(err).NotTo(o.HaveOccurred())
	err = ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte("FROM scratch\nCOPY data /data\n"), 0644)
	o.Expect(err).NotTo(o.HaveOccurred())

	err = oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Execute()
	if shouldBeDenied {
		o.Expect(err).To(o.HaveOccurred())
		out, err := oc.Run("logs").Args("bc/" + name).Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(out).Should(o.MatchRegexp("(?i)Failed to push image:.*denied"))
	} else {
		o.Expect(err).NotTo(o.HaveOccurred())
	}
}
Beispiel #29
0
// deleteTestImages deletes test images built in current and shared
// namespaces. It also deletes shared projects.
func deleteTestImages(oc *exutil.CLI) {
	g.By(fmt.Sprintf("Deleting images and image streams in project %q", oc.Namespace()))
	iss, err := oc.AdminClient().ImageStreams(oc.Namespace()).List(kapi.ListOptions{})
	if err != nil {
		return
	}
	for _, is := range iss.Items {
		for _, history := range is.Status.Tags {
			for i := range history.Items {
				oc.AdminClient().Images().Delete(history.Items[i].Image)
			}
		}
	}
}
Beispiel #30
0
// waitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag
func waitForAnImageStreamTag(oc *exutil.CLI, name, tag string) error {
	g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name))
	start := time.Now()
	c := make(chan error)
	go func() {
		err := exutil.WaitForAnImageStream(
			oc.REST().ImageStreams(oc.Namespace()),
			name,
			func(is *imageapi.ImageStream) bool {
				if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 {
					return false
				}
				return true
			},
			func(is *imageapi.ImageStream) bool {
				return time.Now().After(start.Add(waitTimeout))
			})
		c <- err
	}()

	select {
	case e := <-c:
		return e
	case <-time.After(waitTimeout):
		return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", oc.Namespace(), name, tag)
	}
}