Ejemplo n.º 1
0
func makeNamespaceGlobal(ns *api.Namespace) {
	client, err := testutil.GetClusterAdminClient(testexutil.KubeConfigPath())
	expectNoError(err)
	netns, err := client.NetNamespaces().Get(ns.Name)
	expectNoError(err)
	netns.NetID = 0
	_, err = client.NetNamespaces().Update(netns)
	expectNoError(err)
}
Ejemplo n.º 2
0
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
// and then confirm the application is serving an expected string value.
func NewSampleRepoTest(c SampleRepoConfig) func() {
	return func() {
		defer g.GinkgoRecover()
		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())

		g.JustBeforeEach(func() {
			g.By("Waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Describe("Building "+c.repoName+" app from new-app", func() {
			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
				oc.SetOutputDir(exutil.TestContext.OutputDir)

				exutil.CheckOpenShiftNamespaceImageStreams(oc)
				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
				err := oc.Run("new-app").Args("-f", c.templateURL).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				// all the templates automatically start a build.
				buildName := c.buildConfigName + "-1"

				g.By("expecting the build is in the Complete phase")
				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
				if err != nil {
					exutil.DumpBuildLogs(c.buildConfigName, oc)
				}
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the app deployment to be complete")
				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
				o.Expect(err).NotTo(o.HaveOccurred())

				if len(c.dbDeploymentConfigName) > 0 {
					g.By("expecting the db deployment to be complete")
					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
					o.Expect(err).NotTo(o.HaveOccurred())
				}

				g.By("expecting the service is available")
				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(serviceIP).ShouldNot(o.Equal(""))

				g.By("expecting an endpoint is available")
				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("verifying string from app request")
				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
			})
		})
	}
}
Ejemplo n.º 3
0
// NewRef creates a jenkins reference from an OC client
func NewRef(oc *exutil.CLI) *JenkinsRef {
	g.By("get ip and port for jenkins service")
	serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())
	port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By("get admin password")
	password := GetAdminPassword(oc)
	o.Expect(password).ShouldNot(o.BeEmpty())

	j := &JenkinsRef{
		oc:        oc,
		host:      serviceIP,
		port:      port,
		namespace: oc.Namespace(),
		password:  password,
	}
	return j
}
Ejemplo n.º 4
0
// init initialize the extended testing suite.
// You can set these environment variables to configure extended tests:
// KUBECONFIG - Path to kubeconfig containing embedded authinfo
func init() {
	// Turn on verbose by default to get spec names
	config.DefaultReporterConfig.Verbose = true

	// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
	config.GinkgoConfig.EmitSpecProgress = true

	// Randomize specs as well as suites
	config.GinkgoConfig.RandomizeAllSpecs = false

	extendedOutputDir := filepath.Join(os.TempDir(), "openshift-extended-tests")
	os.MkdirAll(extendedOutputDir, 0600)

	flag.StringVar(&exutil.TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, exutil.KubeConfigPath(), "Path to kubeconfig containing embedded authinfo.")
	flag.StringVar(&exutil.TestContext.OutputDir, "extended-tests-output-dir", extendedOutputDir, "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")

	// Override the default Kubernetes E2E configuration
	e2e.SetTestContext(exutil.TestContext)
}
Ejemplo n.º 5
0
package builds

import (
	"time"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] build can have Docker image source", func() {
	defer g.GinkgoRecover()
	var (
		buildFixture     = exutil.FixturePath("testdata", "test-imagesource-build.yaml")
		oc               = exutil.NewCLI("build-image-source", exutil.KubeConfigPath())
		imageSourceLabel = exutil.ParseLabelsOrDie("app=imagesourceapp")
		imageDockerLabel = exutil.ParseLabelsOrDie("app=imagedockerapp")
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("waiting for imagestreams to be imported")
		err = exutil.WaitForAnImageStream(oc.AdminREST().ImageStreams("openshift"), "jenkins", exutil.CheckImageStreamLatestTagPopulatedFn, exutil.CheckImageStreamTagNotFoundFn)
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("build with image source", func() {
Ejemplo n.º 6
0
	buildapi "github.com/openshift/origin/pkg/build/api"
	exutil "github.com/openshift/origin/test/extended/util"
	kapi "k8s.io/kubernetes/pkg/api"
)

var _ = g.Describe("[builds] can use build secrets", func() {
	defer g.GinkgoRecover()
	var (
		buildSecretBaseDir   = exutil.FixturePath("fixtures", "build-secrets")
		secretsFixture       = filepath.Join(buildSecretBaseDir, "test-secret.json")
		secondSecretsFixture = filepath.Join(buildSecretBaseDir, "test-secret-2.json")
		isFixture            = filepath.Join(buildSecretBaseDir, "test-is.json")
		dockerBuildFixture   = filepath.Join(buildSecretBaseDir, "test-docker-build.json")
		sourceBuildFixture   = filepath.Join(buildSecretBaseDir, "test-sti-build.json")
		oc                   = exutil.NewCLI("build-secrets", exutil.KubeConfigPath())
	)

	g.Describe("build with secrets", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It("should print the secrets during the source strategy build", func() {
			g.By("creating the sample secret files")
			err := oc.Run("create").Args("-f", secretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("create").Args("-f", secondSecretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("creating the sample source build config and image stream")
			err = oc.Run("create").Args("-f", isFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 7
0
package builds

import (
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] build can have Dockerfile input", func() {
	defer g.GinkgoRecover()
	var (
		oc             = exutil.NewCLI("build-dockerfile-env", exutil.KubeConfigPath())
		testDockerfile = `
FROM openshift/origin-base
USER 1001
`
		testDockerfile2 = `
FROM centos:7
RUN yum install -y httpd
USER 1001
`
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.SetOutputDir(exutil.TestContext.OutputDir)
Ejemplo n.º 8
0
	"k8s.io/kubernetes/pkg/util/wait"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] starting a build using CLI", func() {
	defer g.GinkgoRecover()
	var (
		buildFixture   = exutil.FixturePath("testdata", "test-build.json")
		exampleGemfile = exutil.FixturePath("testdata", "test-build-app", "Gemfile")
		exampleBuild   = exutil.FixturePath("testdata", "test-build-app")
		oc             = exutil.NewCLI("cli-start-build", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("oc start-build --wait", func() {
		g.It("should start a build and wait for the build to complete", func() {
			g.By("starting the build with --wait flag")
			br, err := exutil.StartBuildAndWait(oc, "sample-build", "--wait")
			o.Expect(err).NotTo(o.HaveOccurred())
			br.AssertSuccess()
Ejemplo n.º 9
0
import (
	"fmt"
	"time"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[images][php][Slow] hot deploy for openshift php image", func() {
	defer g.GinkgoRecover()
	var (
		cakephpTemplate = "https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json"
		oc              = exutil.NewCLI("s2i-php", exutil.KubeConfigPath())
		hotDeployParam  = "OPCACHE_REVALIDATE_FREQ=0"
		modifyCommand   = []string{"sed", "-ie", `s/\$result\['c'\]/1337/`, "app/View/Layouts/default.ctp"}
		pageCountFn     = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) }
		dcName          = "cakephp-mysql-example-1"
		dcLabel         = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
	)
	g.Describe("CakePHP example", func() {
		g.It(fmt.Sprintf("should work with hot deploy"), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app -f %q -p %q", cakephpTemplate, hotDeployParam))
			err := oc.Run("new-app").Args("-f", cakephpTemplate, "-p", hotDeployParam).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for build to finish")
Ejemplo n.º 10
0
	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] incremental s2i build", func() {
	defer g.GinkgoRecover()

	const (
		buildTestPod     = "build-test-pod"
		buildTestService = "build-test-svc"
	)

	var (
		templateFixture      = exutil.FixturePath("testdata", "incremental-auth-build.json")
		podAndServiceFixture = exutil.FixturePath("testdata", "test-build-podsvc.json")
		oc                   = exutil.NewCLI("build-sti-inc", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("Building from a template", func() {
		g.It(fmt.Sprintf("should create a build from %q template and run it", templateFixture), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app -f %q", templateFixture))
			err := oc.Run("new-app").Args("-f", templateFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 11
0
import (
	"fmt"
	"time"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("images: s2i: python", func() {
	defer g.GinkgoRecover()

	var (
		oc               = exutil.NewCLI("s2i-python", exutil.KubeConfigPath())
		djangoRepository = "https://github.com/openshift/django-ex.git"
		modifyCommand    = []string{"sed", "-ie", `s/'count': PageView.objects.count()/'count': 1337/`, "welcome/views.py"}
		pageCountFn      = func(count int) string { return fmt.Sprintf("Page views: %d", count) }
		dcName           = "django-ex-1"
		dcLabel          = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
	)
	g.Describe("Django example", func() {
		g.It(fmt.Sprintf("should work with hot deploy"), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app %s", djangoRepository))
			err := oc.Run("new-app").Args(djangoRepository, "--strategy=source").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for build to finish")
Ejemplo n.º 12
0
	"fmt"
	"time"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"
	exeutil "github.com/openshift/origin/test/extended/util"

	kapi "k8s.io/kubernetes/pkg/api"
	kapiextensions "k8s.io/kubernetes/pkg/apis/extensions"
)

var _ = g.Describe("[job] openshift can execute jobs", func() {
	defer g.GinkgoRecover()
	var (
		configPath = exeutil.FixturePath("fixtures", "job-controller.yaml")
		oc         = exeutil.NewCLI("job-controller", exeutil.KubeConfigPath())
	)
	g.Describe("controller", func() {
		g.It("should create and run a job in user project", func() {
			oc.SetOutputDir(exeutil.TestContext.OutputDir)
			g.By(fmt.Sprintf("creating a job from %q", configPath))
			err := oc.Run("create").Args("-f", configPath).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("Waiting for pod..."))
			podNames, err := exeutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), exeutil.ParseLabelsOrDie("app=pi"), exeutil.CheckPodIsSucceededFn, 1, 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(podNames)).Should(o.Equal(1))
			podName := podNames[0]

			g.By("retrieving logs from pod " + podName)
Ejemplo n.º 13
0
package images

import (
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[images][mariadb][Slow] openshift mariadb image", func() {
	defer g.GinkgoRecover()
	var (
		templatePath = exutil.FixturePath("..", "..", "examples", "db-templates", "mariadb-ephemeral-template.json")
		oc           = exutil.NewCLI("mariadb-create", exutil.KubeConfigPath())
	)
	g.Describe("Creating from a template", func() {
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc process -f %q", templatePath))
			configFile, err := oc.Run("process").Args("-f", templatePath).OutputToFile("config.json")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("calling oc create -f %q", configFile))
			err = oc.Run("create").Args("-f", configFile).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
Ejemplo n.º 14
0
	kapi "k8s.io/kubernetes/pkg/api"

	dockerregistryserver "github.com/openshift/origin/pkg/dockerregistry/server"
	exutil "github.com/openshift/origin/test/extended/util"
	testutil "github.com/openshift/origin/test/util"
)

const testImageSize = 1024

type cleanUpContainer struct {
	imageNames []string
}

var _ = g.Describe("[images] prune images", func() {
	defer g.GinkgoRecover()
	var oc = exutil.NewCLI("prune-images", exutil.KubeConfigPath())
	var originalAcceptSchema2 *bool

	g.JustBeforeEach(func() {
		if originalAcceptSchema2 == nil {
			accepts, err := doesRegistryAcceptSchema2(oc)
			o.Expect(err).NotTo(o.HaveOccurred())
			originalAcceptSchema2 = &accepts
		}

		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("give a user %s a right to prune images with %s role", oc.Username(), "system:image-pruner"))
		err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "system:image-pruner", oc.Username()).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 15
0
	tags := []string{s2iDockBldr + ":latest", custBldr + ":latest"}
	hexIDs, ierr := exutil.GetImageIDForTags(tags)
	o.Expect(ierr).NotTo(o.HaveOccurred())
	for _, hexID := range hexIDs {
		g.By(fmt.Sprintf("\n%s FORCE PULL TEST:  hex id %s ", time.Now().Format(time.RFC850), hexID))
	}
	o.Expect(len(hexIDs)).To(o.Equal(2))
	resetData = map[string]string{s2iDockBldr: hexIDs[0], custBldr: hexIDs[1]}
	g.By(fmt.Sprintf("\n%s FORCE PULL TEST:  hex id for s2i/docker %s and for custom %s ", time.Now().Format(time.RFC850), hexIDs[0], hexIDs[1]))
})

// TODO this seems like a weird restriction with segregated namespaces.  provide a better explanation of why this doesn't work
// we don't run in parallel with this suite - do not want different tests tagging the same image in different ways at the same time
var _ = g.Describe("builds: serial: ForcePull from OpenShift induced builds (vs. sti)", func() {
	defer g.GinkgoRecover()
	var oc = exutil.NewCLI("force-pull-s2i", exutil.KubeConfigPath())

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("\n FORCE PULL TEST:  Force pull and s2i builder", func() {
		// corrupt the s2i builder image
		g.BeforeEach(func() {
			exutil.CorruptImage(s2iDockBldr, custBldr, "s21")
		})

		g.AfterEach(func() {
			exutil.ResetImage(resetData)
Ejemplo n.º 16
0
	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][pullsecret][Conformance] docker build using a pull secret", func() {
	defer g.GinkgoRecover()
	const (
		buildTestPod     = "build-test-pod"
		buildTestService = "build-test-svc"
	)

	var (
		buildFixture = exutil.FixturePath("testdata", "test-docker-build-pullsecret.json")
		oc           = exutil.NewCLI("docker-build-pullsecret", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("Building from a template", func() {
		g.It("should create a docker build that pulls using a secret run it", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", buildFixture))
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 17
0
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	eximages "github.com/openshift/origin/test/extended/images"
	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] result image should have proper labels set", func() {
	defer g.GinkgoRecover()
	var (
		imageStreamFixture = exutil.FixturePath("..", "integration", "testdata", "test-image-stream.json")
		stiBuildFixture    = exutil.FixturePath("testdata", "test-s2i-build.json")
		dockerBuildFixture = exutil.FixturePath("testdata", "test-docker-build.json")
		oc                 = exutil.NewCLI("build-sti-labels", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("S2I build from a template", func() {
		g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", stiBuildFixture), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture))
			err := oc.Run("create").Args("-f", imageStreamFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 18
0
import (
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	buildapi "github.com/openshift/origin/pkg/build/api"
	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] the s2i build should support proxies", func() {
	defer g.GinkgoRecover()
	var (
		buildFixture = exutil.FixturePath("..", "extended", "fixtures", "test-build-proxy.json")
		oc           = exutil.NewCLI("build-proxy", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("start build with broken proxy", func() {
		g.It("should start a build and wait for the build to to fail", func() {
			g.By("starting the build with --wait and --follow flags")
			out, err := oc.Run("start-build").Args("sample-build", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
Ejemplo n.º 19
0
	"k8s.io/kubernetes/pkg/labels"
	"k8s.io/kubernetes/pkg/util/wait"
	e2e "k8s.io/kubernetes/test/e2e/framework"

	"github.com/openshift/origin/pkg/client"
	deployapi "github.com/openshift/origin/pkg/deploy/api"
	deployutil "github.com/openshift/origin/pkg/deploy/util"
	exutil "github.com/openshift/origin/test/extended/util"
)

const deploymentRunTimeout = 5 * time.Minute

var _ = g.Describe("deploymentconfigs", func() {
	defer g.GinkgoRecover()
	var (
		oc                              = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
		deploymentFixture               = exutil.FixturePath("testdata", "test-deployment-test.yaml")
		simpleDeploymentFixture         = exutil.FixturePath("testdata", "deployment-simple.yaml")
		customDeploymentFixture         = exutil.FixturePath("testdata", "custom-deployment.yaml")
		generationFixture               = exutil.FixturePath("testdata", "generation-test.yaml")
		pausedDeploymentFixture         = exutil.FixturePath("testdata", "paused-deployment.yaml")
		failedHookFixture               = exutil.FixturePath("testdata", "failing-pre-hook.yaml")
		brokenDeploymentFixture         = exutil.FixturePath("testdata", "test-deployment-broken.yaml")
		historyLimitedDeploymentFixture = exutil.FixturePath("testdata", "deployment-history-limit.yaml")
		minReadySecondsFixture          = exutil.FixturePath("testdata", "deployment-min-ready-seconds.yaml")
		multipleICTFixture              = exutil.FixturePath("testdata", "deployment-example.yaml")
		tagImagesFixture                = exutil.FixturePath("testdata", "tag-images-deployment.yaml")
	)

	g.Describe("when run iteratively", func() {
		g.AfterEach(func() {
Ejemplo n.º 20
0
package builds

import (
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds] build with empty source", func() {
	defer g.GinkgoRecover()
	var (
		buildFixture = exutil.FixturePath("..", "extended", "testdata", "test-nosrc-build.json")
		oc           = exutil.NewCLI("cli-build-nosrc", exutil.KubeConfigPath())
		exampleBuild = exutil.FixturePath("..", "extended", "testdata", "test-build-app")
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("started build", func() {
		g.It("should build even with an empty source in build config", func() {
			g.By("starting the build with --wait flag")
			out, err := oc.Run("start-build").Args("nosrc-build", "--wait", fmt.Sprintf("--from-dir=%s", exampleBuild)).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 21
0
	o "github.com/onsi/gomega"

	kapi "k8s.io/kubernetes/pkg/api"
	e2e "k8s.io/kubernetes/test/e2e/framework"

	buildapi "github.com/openshift/origin/pkg/build/api"
	buildclient "github.com/openshift/origin/pkg/build/client"
	buildutil "github.com/openshift/origin/pkg/build/util"
	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func() {
	defer g.GinkgoRecover()
	var (
		// Use invalid source here as we don't care about the result
		oc = exutil.NewCLI("cli-build-run-policy", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		// Create all fixtures
		oc.Run("create").Args("-f", exutil.FixturePath("testdata", "run_policy")).Execute()
	})

	g.Describe("build configuration with Parallel build run policy", func() {
		g.It("runs the builds in parallel", func() {
			g.By("starting multiple builds")
			var (
				startedBuilds []string
Ejemplo n.º 22
0
	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][quota][Slow] docker build with a quota", func() {
	defer g.GinkgoRecover()
	const (
		buildTestPod     = "build-test-pod"
		buildTestService = "build-test-svc"
	)

	var (
		buildFixture = exutil.FixturePath("testdata", "test-docker-build-quota.json")
		oc           = exutil.NewCLI("docker-build-quota", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.Describe("Building from a template", func() {
		g.It("should create a docker build with a quota and run it", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", buildFixture))
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 23
0
	postgreSQLReplicationTemplate = "https://raw.githubusercontent.com/openshift/postgresql/master/examples/replica/postgresql_replica.json"
	postgreSQLEphemeralTemplate   = exutil.FixturePath("..", "..", "examples", "db-templates", "postgresql-ephemeral-template.json")
	postgreSQLHelperName          = "postgresql-helper"
	postgreSQLImages              = []string{
		"openshift/postgresql-92-centos7",
		"centos/postgresql-94-centos7",
		"registry.access.redhat.com/openshift3/postgresql-92-rhel7",
		"registry.access.redhat.com/rhscl/postgresql-94-rhel7",
	}
)

var _ = g.Describe("[LocalNode][images][postgresql][Slow] openshift postgresql replication", func() {
	defer g.GinkgoRecover()

	for i, image := range postgreSQLImages {
		oc := exutil.NewCLI(fmt.Sprintf("postgresql-replication-%d", i), exutil.KubeConfigPath())
		testFn := PostgreSQLReplicationTestFactory(oc, image)
		g.It(fmt.Sprintf("postgresql replication works for %s", image), testFn)
	}
})

// CreatePostgreSQLReplicationHelpers creates a set of PostgreSQL helpers for master,
// slave an en extra helper that is used for remote login test.
func CreatePostgreSQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
	o.Expect(err).NotTo(o.HaveOccurred())
	masterPod := podNames[0]

	slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 3*time.Minute)
	o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 24
0
	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	imageapi "github.com/openshift/origin/pkg/image/api"
	quotautil "github.com/openshift/origin/pkg/quota/util"
	imagesutil "github.com/openshift/origin/test/extended/images"
	exutil "github.com/openshift/origin/test/extended/util"
	testutil "github.com/openshift/origin/test/util"
)

const limitRangeName = "limits"

var _ = g.Describe("[imageapis] openshift limit range admission", func() {
	defer g.GinkgoRecover()
	var oc = exutil.NewCLI("limitrange-admission", exutil.KubeConfigPath())

	g.JustBeforeEach(func() {
		g.By("Waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	// needs to be run at the of of each It; cannot be run in AfterEach which is run after the project
	// is destroyed
	tearDown := func(oc *exutil.CLI) {
		g.By(fmt.Sprintf("Deleting limit range %s", limitRangeName))
		oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Delete(limitRangeName, nil)

		deleteTestImagesAndStreams(oc)
	}
Ejemplo n.º 25
0
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		g.By("after slave is scaled to 0 and then back to 4 replicas")
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
	}
}

var _ = g.Describe("images: mysql: replication", func() {
	defer g.GinkgoRecover()

	ocs := make([]*exutil.CLI, len(templatePaths))
	for i, template := range templatePaths {
		ocs[i] = exutil.NewCLI(fmt.Sprintf("mysql-replication-%d", i), exutil.KubeConfigPath())
		g.It(fmt.Sprintf("MySQL replication template %s", template), replicationTestFactory(ocs[i], template))
	}
})
Ejemplo n.º 26
0
	"path/filepath"
	"strings"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"
	"k8s.io/kubernetes/pkg/fields"
	"k8s.io/kubernetes/pkg/labels"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("cli: parallel: oc rsync", func() {
	defer g.GinkgoRecover()

	var (
		oc           = exutil.NewCLI("cli-rsync", exutil.KubeConfigPath())
		templatePath = exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json")
		sourcePath1  = exutil.FixturePath("..", "..", "examples", "image-streams")
		sourcePath2  = exutil.FixturePath("..", "..", "examples", "sample-app")
		strategies   = []string{"rsync", "rsync-daemon", "tar"}
	)

	var podName string
	g.JustBeforeEach(func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.By(fmt.Sprintf("calling oc new-app -f %q", templatePath))
		err := oc.Run("new-app").Args("-f", templatePath).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("expecting the jenkins service get endpoints")
Ejemplo n.º 27
0
import (
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("[builds][Conformance] build without output image", func() {
	defer g.GinkgoRecover()
	var (
		dockerImageFixture = exutil.FixturePath("fixtures", "test-docker-no-outputname.json")
		s2iImageFixture    = exutil.FixturePath("fixtures", "test-s2i-no-outputname.json")
		oc                 = exutil.NewCLI("build-no-outputname", exutil.KubeConfigPath())
	)

	g.Describe("building from templates", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It(fmt.Sprintf("should create an image from %q docker template without an output image reference defined", dockerImageFixture), func() {
			err := oc.Run("create").Args("-f", dockerImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-docker", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
Ejemplo n.º 28
0
var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb replication", func() {
	defer g.GinkgoRecover()

	const (
		templatePath         = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/2.4/examples/replica/mongodb-clustered.json"
		deploymentConfigName = "mongodb"
		expectedValue        = `{ "status" : "passed" }`
		insertCmd            = "db.bar.save(" + expectedValue + ")"
	)

	const (
		expectedReplicasAfterDeployment = 3
		expectedReplicasAfterScalingUp  = expectedReplicasAfterDeployment + 2
	)

	oc := exutil.NewCLI("mongodb-replica", exutil.KubeConfigPath()).Verbose()

	g.Describe("creating from a template", func() {
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {

			exutil.CheckOpenShiftNamespaceImageStreams(oc)
			g.By("creating a new app")
			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())

			g.By("waiting for the deployment to complete")
			err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc)
			o.Expect(err).NotTo(o.HaveOccurred())

			podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica")
			mongo := db.NewMongoDB(podNames[0])
Ejemplo n.º 29
0
	"fmt"

	g "github.com/onsi/ginkgo"
	o "github.com/onsi/gomega"

	"time"

	exutil "github.com/openshift/origin/test/extended/util"
	"github.com/openshift/origin/test/extended/util/db"
)

var _ = g.Describe("[images][mongodb] openshift mongodb image", func() {
	defer g.GinkgoRecover()

	templatePath := exutil.FixturePath("..", "..", "examples", "db-templates", "mongodb-ephemeral-template.json")
	oc := exutil.NewCLI("mongodb-create", exutil.KubeConfigPath()).Verbose()

	g.Describe("creating from a template", func() {
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {

			g.By("creating a new app")
			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())

			g.By("waiting for the deployment to complete")
			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb")
			if err != nil {
				exutil.DumpDeploymentLogs("mongodb", oc)
			}
			o.Expect(err).ShouldNot(o.HaveOccurred())

			g.By("expecting the mongodb pod is running")
Ejemplo n.º 30
0
	}
}

func jenkinsJobBytes(filename, namespace string) []byte {
	pre := exutil.FixturePath("fixtures", filename)
	post := exutil.ArtifactPath(filename)
	err := exutil.VarSubOnFile(pre, post, "PROJECT_NAME", namespace)
	o.Expect(err).NotTo(o.HaveOccurred())
	data, err := ioutil.ReadFile(post)
	o.Expect(err).NotTo(o.HaveOccurred())
	return data
}

var _ = g.Describe("[jenkins][Slow] openshift pipeline plugin", func() {
	defer g.GinkgoRecover()
	var oc = exutil.NewCLI("jenkins-plugin", exutil.KubeConfigPath())
	var hostPort string

	g.BeforeEach(func() {

		g.By("set up policy for jenkins jobs")
		err := oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+oc.Namespace()+":default").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("kick off the build for the jenkins ephermeral and application templates")
		tag := []string{"openshift/jenkins-plugin-snapshot-test:latest"}
		hexIDs, err := exutil.DumpAndReturnTagging(tag)
		var jenkinsEphemeralPath string
		var testingSnapshot bool
		if len(hexIDs) > 0 && err == nil {
			// found an openshift pipeline plugin test image, must be testing a proposed change to the plugin