Beispiel #1
0
// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or
// not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly
// and the function blocks until the registry is re-deployed and ready for new requests.
func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error {
	ns := oc.Namespace()
	oc = oc.SetNamespace(kapi.NamespaceDefault).AsAdmin()
	defer oc.SetNamespace(ns)
	env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output()
	if err != nil {
		return err
	}

	value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept)
	if strings.Contains(env, value) {
		if accept {
			g.By("docker-registry is already configured to accept schema 2")
		} else {
			g.By("docker-registry is already configured to refuse schema 2")
		}
		return nil
	}

	dc, err := oc.Client().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
	if err != nil {
		return err
	}
	waitForVersion := dc.Status.LatestVersion + 1

	g.By("configuring Docker registry to accept schema 2")
	err = oc.Run("env").Args("dc/docker-registry", value).Execute()
	if err != nil {
		return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err)
	}
	return exutil.WaitForRegistry(oc.AdminClient(), oc.AdminKubeClient(), &waitForVersion, oc)
}
Beispiel #2
0
//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on;  strategy is for ginkgo debug; ginkgo error checking leveraged
func CorruptImage(corruptee, corruptor, strategy string) {
	g.By(fmt.Sprintf("\n%s  Calling docker tag to corrupt %s builder image %s by tagging %s", time.Now().Format(time.RFC850), strategy, corruptee, corruptor))

	cerr := TagImage(corruptee, corruptor)

	g.By(fmt.Sprintf("\n%s  Tagging %s to %s complete with err %v", time.Now().Format(time.RFC850), corruptor, corruptee, cerr))
	o.Expect(cerr).NotTo(o.HaveOccurred())
}
//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on;  strategy is for ginkgo debug; ginkgo error checking leveraged
func CorruptImage(corruptee, corruptor string) {
	g.By(fmt.Sprintf("Calling docker tag to corrupt builder image %s by tagging %s", corruptee, corruptor))

	cerr := TagImage(corruptee, corruptor)

	g.By(fmt.Sprintf("Tagging %s to %s complete with err %v", corruptor, corruptee, cerr))
	o.Expect(cerr).NotTo(o.HaveOccurred())
	VerifyImagesSame(corruptee, corruptor, "image corruption")
}
//ResetImage is a helper the allows the programmer to undo any corruption performed by CorruptImage; ginkgo error checking leveraged
func ResetImage(tags map[string]string) {
	g.By(fmt.Sprintf("Calling docker tag to reset images"))

	for corruptedTag, goodTag := range tags {
		err := TagImage(corruptedTag, goodTag)
		g.By(fmt.Sprintf("Reset for %s to %s complete with err %v", corruptedTag, goodTag, err))
		o.Expect(err).NotTo(o.HaveOccurred())
	}

}
// bumpLimit changes the limit value for given resource for all the limit types of limit range object
func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (kapi.ResourceList, error) {
	g.By(fmt.Sprintf("bump a limit on resource %q to %s", resourceName, limit))
	lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Get(limitRangeName)
	if err != nil {
		return nil, err
	}
	res := kapi.ResourceList{}

	change := false
	for i := range lr.Spec.Limits {
		item := &lr.Spec.Limits[i]
		if old, exists := item.Max[resourceName]; exists {
			for k, v := range item.Max {
				res[k] = v
			}
			parsed := resource.MustParse(limit)
			if old.Cmp(parsed) != 0 {
				item.Max[resourceName] = parsed
				change = true
			}
		}
	}

	if !change {
		return res, nil
	}
	_, err = oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Update(lr)
	return res, err
}
Beispiel #6
0
// WaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag.
func WaitForAnImageStreamTag(oc *CLI, namespace, name, tag string) error {
	waitTimeout := time.Second * 60
	g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name))
	start := time.Now()
	c := make(chan error)
	go func() {
		err := WaitForAnImageStream(
			oc.REST().ImageStreams(namespace),
			name,
			func(is *imageapi.ImageStream) bool {
				if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 {
					return false
				}
				return true
			},
			func(is *imageapi.ImageStream) bool {
				return time.Now().After(start.Add(waitTimeout))
			})
		c <- err
	}()

	select {
	case e := <-c:
		return e
	case <-time.After(waitTimeout):
		return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", namespace, name, tag)
	}
}
Beispiel #7
0
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
// and then confirm the application is serving an expected string value.
func NewSampleRepoTest(c SampleRepoConfig) func() {
	return func() {
		defer g.GinkgoRecover()
		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())

		g.JustBeforeEach(func() {
			g.By("Waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Describe("Building "+c.repoName+" app from new-app", func() {
			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
				oc.SetOutputDir(exutil.TestContext.OutputDir)

				exutil.CheckOpenShiftNamespaceImageStreams(oc)
				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
				err := oc.Run("new-app").Args("-f", c.templateURL).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				// all the templates automatically start a build.
				buildName := c.buildConfigName + "-1"

				g.By("expecting the build is in the Complete phase")
				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
				if err != nil {
					exutil.DumpBuildLogs(c.buildConfigName, oc)
				}
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the app deployment to be complete")
				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
				o.Expect(err).NotTo(o.HaveOccurred())

				if len(c.dbDeploymentConfigName) > 0 {
					g.By("expecting the db deployment to be complete")
					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
					o.Expect(err).NotTo(o.HaveOccurred())
				}

				g.By("expecting the service is available")
				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(serviceIP).ShouldNot(o.Equal(""))

				g.By("expecting an endpoint is available")
				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("verifying string from app request")
				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
			})
		})
	}
}
// waitForLimitSync waits until a usage of a quota reaches given limit with a short timeout
func waitForLimitSync(oc *exutil.CLI, hardLimit kapi.ResourceList) error {
	g.By(fmt.Sprintf("waiting for resource quota %s to get updated", quotaName))
	return testutil.WaitForResourceQuotaLimitSync(
		oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
		quotaName,
		hardLimit,
		waitTimeout)
}
Beispiel #9
0
//DumpAndReturnTagging takes and array of tags and obtains the hex image IDs, dumps them to ginkgo for printing, and then returns them
func DumpAndReturnTagging(tags []string) []string {
	hexIDs, err := GetImageIDForTags(tags)
	o.Expect(err).NotTo(o.HaveOccurred())
	for i, hexID := range hexIDs {
		g.By(fmt.Sprintf("tag %s hex id %s ", tags[i], hexID))
	}
	return hexIDs
}
Beispiel #10
0
//WaitForBuild is a wrapper for WaitForABuild in this package that takes in an oc/cli client; some ginkgo based debug along with ginkgo error checking
func WaitForBuild(context, buildName string, oc *CLI) {
	g.By(fmt.Sprintf("\n%s %s:   waiting for %s", time.Now().Format(time.RFC850), context, buildName))
	WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName,
		// The build passed
		func(b *buildapi.Build) bool {
			return b.Name == buildName && b.Status.Phase == buildapi.BuildPhaseComplete
		},
		// The build failed
		func(b *buildapi.Build) bool {
			if b.Name != buildName {
				return false
			}
			return b.Status.Phase == buildapi.BuildPhaseFailed || b.Status.Phase == buildapi.BuildPhaseError
		},
	)
	// do not care if build returned an error ... entirely possible ... we only check if the image was updated or left the same appropriately
	g.By(fmt.Sprintf("\n%s %s   done waiting for %s", time.Now().Format(time.RFC850), context, buildName))
}
//DumpAndReturnTagging takes and array of tags and obtains the hex image IDs, dumps them to ginkgo for printing, and then returns them
func DumpAndReturnTagging(tags []string) ([]string, error) {
	hexIDs, err := GetImageIDForTags(tags)
	if err != nil {
		return nil, err
	}
	for i, hexID := range hexIDs {
		g.By(fmt.Sprintf("tag %s hex id %s ", tags[i], hexID))
	}
	return hexIDs, nil
}
Beispiel #12
0
// NewRef creates a jenkins reference from an OC client
func NewRef(oc *exutil.CLI) *JenkinsRef {
	g.By("get ip and port for jenkins service")
	serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())
	port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By("get admin password")
	password := GetAdminPassword(oc)
	o.Expect(password).ShouldNot(o.BeEmpty())

	j := &JenkinsRef{
		oc:        oc,
		host:      serviceIP,
		port:      port,
		namespace: oc.Namespace(),
		password:  password,
	}
	return j
}
Beispiel #13
0
func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]string, resourceName string, kind string) {
	g.By("Idling the service")
	_, err := oc.Run("idle").Args("--resource-names-file", idlingFile).Output()
	o.Expect(err).ToNot(o.HaveOccurred())

	g.By("Ensuring the scale is zero (and stays zero)")
	objName := resources[resourceName][0]
	// make sure we don't get woken up by an incorrect router health check or anything like that
	o.Consistently(func() (string, error) {
		return oc.Run("get").Args(resourceName+"/"+objName, "--output=jsonpath=\"{.spec.replicas}\"").Output()
	}, 20*time.Second, 500*time.Millisecond).Should(o.ContainSubstring("0"))

	g.By("Fetching the service and checking the annotations are present")
	serviceName := resources["service"][0]
	endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
	o.Expect(err).NotTo(o.HaveOccurred())

	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation))
	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.UnidleTargetAnnotation))

	g.By("Checking the idled-at time")
	idledAtAnnotation := endpoints.Annotations[unidlingapi.IdledAtAnnotation]
	idledAtTime, err := time.Parse(time.RFC3339, idledAtAnnotation)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(idledAtTime).To(o.BeTemporally("~", time.Now(), 5*time.Minute))

	g.By("Checking the idle targets")
	unidleTargetAnnotation := endpoints.Annotations[unidlingapi.UnidleTargetAnnotation]
	unidleTargets := []unidlingapi.RecordedScaleReference{}
	err = json.Unmarshal([]byte(unidleTargetAnnotation), &unidleTargets)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(unidleTargets).To(o.Equal([]unidlingapi.RecordedScaleReference{
		{
			Replicas: 2,
			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
				Name: resources[resourceName][0],
				Kind: kind,
			},
		},
	}))
}
Beispiel #14
0
// buildAndPushImage tries to build an image. The image is stored as an image stream tag <name>:<tag>. If
// shouldBeDenied is true, a build will be expected to fail with a denied error.
func buildAndPushImage(oc *exutil.CLI, namespace, name, tag string, shouldBeDenied bool) {
	istName := name
	if tag != "" {
		istName += ":" + tag
	}
	g.By(fmt.Sprintf("building an image %q", istName))

	bc, err := oc.REST().BuildConfigs(namespace).Get(name)
	if err == nil {
		g.By(fmt.Sprintf("changing build config %s to store result into %s", name, istName))
		o.Expect(bc.Spec.BuildSpec.Output.To.Kind).To(o.Equal("ImageStreamTag"))
		bc.Spec.BuildSpec.Output.To.Name = istName
		_, err := oc.REST().BuildConfigs(namespace).Update(bc)
		o.Expect(err).NotTo(o.HaveOccurred())
	} else {
		g.By(fmt.Sprintf("creating a new build config %s with output to %s ", name, istName))
		err = oc.Run("new-build").Args(
			"--binary",
			"--name", name,
			"--to", istName).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
	}

	tempDir, err := ioutil.TempDir("", "name-build")
	o.Expect(err).NotTo(o.HaveOccurred())

	err = createRandomBlob(path.Join(tempDir, "data"), imageSize)
	o.Expect(err).NotTo(o.HaveOccurred())
	err = ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte("FROM scratch\nCOPY data /data\n"), 0644)
	o.Expect(err).NotTo(o.HaveOccurred())

	err = oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Execute()
	if shouldBeDenied {
		o.Expect(err).To(o.HaveOccurred())
		out, err := oc.Run("logs").Args("bc/" + name).Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(out).Should(o.MatchRegexp("(?i)Failed to push image:.*denied"))
	} else {
		o.Expect(err).NotTo(o.HaveOccurred())
	}
}
//VerifyImagesDifferent will that the two supplied image tags and see if they reference different hexadecimal image IDs; strategy is for ginkgo debug, also leverage ginkgo error checking
func VerifyImagesDifferent(comp1, comp2, strategy string) {
	tag1 := comp1 + ":latest"
	tag2 := comp2 + ":latest"

	comps := []string{tag1, tag2}
	retIDs, gerr := GetImageIDForTags(comps)

	o.Expect(gerr).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("%s  compare image - %s, %s, %s, %s", strategy, tag1, tag2, retIDs[0], retIDs[1]))
	o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0))
	o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0))
	o.Ω(retIDs[0] != retIDs[1]).Should(o.BeTrue())
}
Beispiel #16
0
//VerifyImagesSame will take the two supplied image tags and see if they reference the same hexadecimal image ID;  strategy is for debug
func VerifyImagesSame(comp1, comp2, strategy string) {
	tag1 := comp1 + ":latest"
	tag2 := comp2 + ":latest"

	comps := []string{tag1, tag2}
	retIDs, gerr := GetImageIDForTags(comps)

	o.Expect(gerr).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("\n%s %s  compare image - %s, %s, %s, %s", time.Now().Format(time.RFC850), strategy, tag1, tag2, retIDs[0], retIDs[1]))
	o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0))
	o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0))
	o.Ω(retIDs[0]).Should(o.Equal(retIDs[1]))
}
// waitForResourceQuotaSync waits until a usage of a quota reaches given limit with a short timeout
func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kapi.ResourceList) (kapi.ResourceList, error) {
	g.By(fmt.Sprintf("waiting for resource quota %s to get updated", name))
	used, err := exutil.WaitForResourceQuotaSync(
		oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
		quotaName,
		expectedResources,
		false,
		waitTimeout,
	)
	if err != nil {
		return nil, err
	}
	return used, nil
}
Beispiel #18
0
// deleteTestImages deletes test images built in current and shared
// namespaces. It also deletes shared projects.
func deleteTestImages(oc *exutil.CLI) {
	g.By(fmt.Sprintf("Deleting images and image streams in project %q", oc.Namespace()))
	iss, err := oc.AdminClient().ImageStreams(oc.Namespace()).List(kapi.ListOptions{})
	if err != nil {
		return
	}
	for _, is := range iss.Items {
		for _, history := range is.Status.Tags {
			for i := range history.Items {
				oc.AdminClient().Images().Delete(history.Items[i].Image)
			}
		}
	}
}
// deleteTestImagesAndStreams deletes test images built in current and shared
// namespaces. It also deletes shared projects.
func deleteTestImagesAndStreams(oc *exutil.CLI) {
	for _, projectName := range []string{
		oc.Namespace() + "-s2",
		oc.Namespace() + "-s1",
		oc.Namespace() + "-shared",
		oc.Namespace(),
	} {
		g.By(fmt.Sprintf("Deleting images and image streams in project %q", projectName))
		iss, err := oc.AdminClient().ImageStreams(projectName).List(kapi.ListOptions{})
		if err != nil {
			continue
		}
		for _, is := range iss.Items {
			for _, history := range is.Status.Tags {
				for i := range history.Items {
					oc.AdminClient().Images().Delete(history.Items[i].Image)
				}
			}
			for _, tagRef := range is.Spec.Tags {
				switch tagRef.From.Kind {
				case "ImageStreamImage":
					_, id, err := imageapi.ParseImageStreamImageName(tagRef.From.Name)
					if err != nil {
						continue
					}
					oc.AdminClient().Images().Delete(id)
				}
			}
		}

		// let the extended framework take care of the current namespace
		if projectName != oc.Namespace() {
			g.By(fmt.Sprintf("Deleting project %q", projectName))
			oc.AdminClient().Projects().Delete(projectName)
		}
	}
}
func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string {
	g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label))

	podNames, err := exutil.WaitForPods(
		oc.KubeClient().Core().Pods(oc.Namespace()),
		exutil.ParseLabelsOrDie("name="+label),
		exutil.CheckPodIsRunningFn,
		number,
		1*time.Minute,
	)
	o.Expect(err).ShouldNot(o.HaveOccurred())
	o.Expect(podNames).Should(o.HaveLen(number))

	return podNames
}
// bumpQuota modifies hard spec of quota object with the given value. It returns modified hard spec.
func bumpQuota(oc *exutil.CLI, resourceName kapi.ResourceName, value int64) (kapi.ResourceList, error) {
	g.By(fmt.Sprintf("bump the quota to %s=%d", resourceName, value))
	rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Get(quotaName)
	if err != nil {
		return nil, err
	}
	rq.Spec.Hard[resourceName] = *resource.NewQuantity(value, resource.DecimalSI)
	_, err = oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Update(rq)
	if err != nil {
		return nil, err
	}
	err = waitForLimitSync(oc, rq.Spec.Hard)
	if err != nil {
		return nil, err
	}
	return rq.Spec.Hard, nil
}
// createResourceQuota creates a resource quota with given hard limits in a current namespace and waits until
// a first usage refresh
func createResourceQuota(oc *exutil.CLI, hard kapi.ResourceList) (*kapi.ResourceQuota, error) {
	rq := &kapi.ResourceQuota{
		ObjectMeta: kapi.ObjectMeta{
			Name: quotaName,
		},
		Spec: kapi.ResourceQuotaSpec{
			Hard: hard,
		},
	}

	g.By(fmt.Sprintf("creating resource quota with a limit %v", hard))
	rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Create(rq)
	if err != nil {
		return nil, err
	}
	err = waitForLimitSync(oc, hard)
	return rq, err
}
Beispiel #23
0
// StartJob triggers a named Jenkins job. The job can be monitored with the
// returned object.
func (j *JenkinsRef) StartJob(jobName string) *JobMon {
	lastBuildNumber, err := j.GetJobBuildNumber(jobName, time.Minute)
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

	jmon := &JobMon{
		j:               j,
		lastBuildNumber: lastBuildNumber,
		buildNumber:     "",
		jobName:         jobName,
	}

	ginkgolog("Current timestamp for [%s]: %q", jobName, jmon.lastBuildNumber)
	g.By(fmt.Sprintf("Starting jenkins job: %s", jobName))
	_, status, err := j.PostXML(nil, "job/%s/build?delay=0sec", jobName)
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
	o.ExpectWithOffset(1, status).To(o.Equal(201))

	return jmon
}
// createLimitRangeOfType creates a new limit range object with given limits for given limit type in current namespace
func createLimitRangeOfType(oc *exutil.CLI, limitType kapi.LimitType, maxLimits kapi.ResourceList) (*kapi.LimitRange, error) {
	lr := &kapi.LimitRange{
		ObjectMeta: kapi.ObjectMeta{
			Name: limitRangeName,
		},
		Spec: kapi.LimitRangeSpec{
			Limits: []kapi.LimitRangeItem{
				{
					Type: limitType,
					Max:  maxLimits,
				},
			},
		},
	}

	g.By(fmt.Sprintf("creating limit range object %q with %s limited to: %v", limitRangeName, limitType, maxLimits))
	lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Create(lr)
	return lr, err
}
Beispiel #25
0
//BuildAuthConfiguration constructs a non-standard dockerClient.AuthConfiguration that can be used to communicate with the openshift internal docker registry
func BuildAuthConfiguration(credKey string, oc *CLI) (*dockerClient.AuthConfiguration, error) {
	authCfg := &dockerClient.AuthConfiguration{}
	secretList, err := oc.AdminKubeREST().Secrets(oc.Namespace()).List(kapi.ListOptions{})

	g.By(fmt.Sprintf("get secret list err %v ", err))
	if err == nil {
		for _, secret := range secretList.Items {
			g.By(fmt.Sprintf("secret name %s ", secret.ObjectMeta.Name))
			if strings.Contains(secret.ObjectMeta.Name, "builder-dockercfg") {
				dockercfgToken := secret.Data[".dockercfg"]
				dockercfgTokenJson := string(dockercfgToken)
				g.By(fmt.Sprintf("docker cfg token json %s ", dockercfgTokenJson))

				creds := credentialprovider.DockerConfig{}
				err = gson.Unmarshal(dockercfgToken, &creds)
				g.By(fmt.Sprintf("json unmarshal err %v ", err))
				if err == nil {

					// borrowed from openshift/origin/pkg/build/builder/cmd/dockercfg/cfg.go, but we get the
					// secrets and dockercfg data via `oc` vs. internal use of env vars and local file reading,
					// so we don't use the public methods present there
					keyring := credentialprovider.BasicDockerKeyring{}
					keyring.Add(creds)
					authConfs, found := keyring.Lookup(credKey)
					g.By(fmt.Sprintf("found auth %v with auth cfg len %d ", found, len(authConfs)))
					if !found || len(authConfs) == 0 {
						return authCfg, err
					}
					// have seen this does not get set
					if len(authConfs[0].ServerAddress) == 0 {
						authConfs[0].ServerAddress = credKey
					}
					g.By(fmt.Sprintf("dockercfg with svrAddr %s user %s pass %s email %s ", authConfs[0].ServerAddress, authConfs[0].Username, authConfs[0].Password, authConfs[0].Email))
					c := dockerClient.AuthConfiguration{Username: authConfs[0].Username, ServerAddress: authConfs[0].ServerAddress, Password: authConfs[0].Password}
					return &c, err
				}
			}
		}
	}
	return authCfg, err
}
Beispiel #26
0
	"time"

	exutil "github.com/openshift/origin/test/extended/util"
	"github.com/openshift/origin/test/extended/util/db"
)

var _ = g.Describe("[images][mongodb] openshift mongodb image", func() {
	defer g.GinkgoRecover()

	templatePath := exutil.FixturePath("..", "..", "examples", "db-templates", "mongodb-ephemeral-template.json")
	oc := exutil.NewCLI("mongodb-create", exutil.KubeConfigPath()).Verbose()

	g.Describe("creating from a template", func() {
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {

			g.By("creating a new app")
			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())

			g.By("waiting for the deployment to complete")
			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb")
			if err != nil {
				exutil.DumpDeploymentLogs("mongodb", oc)
			}
			o.Expect(err).ShouldNot(o.HaveOccurred())

			g.By("expecting the mongodb pod is running")
			podNames, err := exutil.WaitForPods(
				oc.KubeREST().Pods(oc.Namespace()),
				exutil.ParseLabelsOrDie("name=mongodb"),
				exutil.CheckPodIsRunningFn,
				1,
Beispiel #27
0
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] build can have Docker image source", func() {
	defer g.GinkgoRecover()
	var (
		buildFixture     = exutil.FixturePath("testdata", "test-imagesource-build.yaml")
		oc               = exutil.NewCLI("build-image-source", exutil.KubeConfigPath())
		imageSourceLabel = exutil.ParseLabelsOrDie("app=imagesourceapp")
		imageDockerLabel = exutil.ParseLabelsOrDie("app=imagedockerapp")
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("waiting for imagestreams to be imported")
		err = exutil.WaitForAnImageStream(oc.AdminREST().ImageStreams("openshift"), "jenkins", exutil.CheckImageStreamLatestTagPopulatedFn, exutil.CheckImageStreamTagNotFoundFn)
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("build with image source", func() {
		g.It("should complete successfully and contain the expected file", func() {
			g.By("Creating build configs for source build")
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("starting the source strategy build")
			err = oc.Run("start-build").Args("imagesourcebuild").Execute()
Beispiel #28
0
var _ = g.Describe("[builds][Conformance] build without output image", func() {
	defer g.GinkgoRecover()
	var (
		dockerImageFixture = exutil.FixturePath("fixtures", "test-docker-no-outputname.json")
		s2iImageFixture    = exutil.FixturePath("fixtures", "test-s2i-no-outputname.json")
		oc                 = exutil.NewCLI("build-no-outputname", exutil.KubeConfigPath())
	)

	g.Describe("building from templates", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It(fmt.Sprintf("should create an image from %q docker template without an output image reference defined", dockerImageFixture), func() {
			err := oc.Run("create").Args("-f", dockerImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-docker", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`Build does not have an Output defined, no output image was pushed to a registry.`))
		})

		g.It(fmt.Sprintf("should create an image from %q S2i template without an output image reference defined", s2iImageFixture), func() {
			err := oc.Run("create").Args("-f", s2iImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-sti", "--follow", "--wait").Output()
			if err != nil {
Beispiel #29
0
	defer g.GinkgoRecover()
	var (
		buildSecretBaseDir   = exutil.FixturePath("fixtures", "build-secrets")
		secretsFixture       = filepath.Join(buildSecretBaseDir, "test-secret.json")
		secondSecretsFixture = filepath.Join(buildSecretBaseDir, "test-secret-2.json")
		isFixture            = filepath.Join(buildSecretBaseDir, "test-is.json")
		dockerBuildFixture   = filepath.Join(buildSecretBaseDir, "test-docker-build.json")
		sourceBuildFixture   = filepath.Join(buildSecretBaseDir, "test-sti-build.json")
		oc                   = exutil.NewCLI("build-secrets", exutil.KubeConfigPath())
	)

	g.Describe("build with secrets", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It("should print the secrets during the source strategy build", func() {
			g.By("creating the sample secret files")
			err := oc.Run("create").Args("-f", secretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("create").Args("-f", secondSecretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("creating the sample source build config and image stream")
			err = oc.Run("create").Args("-f", isFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			err = oc.Run("create").Args("-f", sourceBuildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting the sample source build")
			out, err := oc.Run("start-build").Args("test", "--follow", "--wait").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
Beispiel #30
0
func replicationTestFactory(oc *exutil.CLI, template string) func() {
	return func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer cleanup(oc)

		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", "templates", true)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", template).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
		o.Expect(err).NotTo(o.HaveOccurred())

		tableCounter := 0
		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
			tableCounter++
			table := fmt.Sprintf("table_%0.2d", tableCounter)

			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())

			// Test if we can query as root
			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
			err := helper.TestRemoteLogin(oc, "mysql-master")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Create a new table with random name
			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Write new data to the table through master
			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data is present on master
			err = exutil.WaitForQueryOutput(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data was replicated to all slaves
			for _, slave := range slaves {
				err = exutil.WaitForQueryOutput(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
				o.Expect(err).NotTo(o.HaveOccurred())
			}

			return master, slaves, helper
		}

		g.By("after initial deployment")
		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)

		g.By("after master is restarted by changing the Deployment Config")
		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after master is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		g.By("after slave is scaled to 0 and then back to 4 replicas")
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
	}
}