// init initialize the extended testing suite. // You can set these environment variables to configure extended tests: // KUBECONFIG - Path to kubeconfig containing embedded authinfo // TEST_REPORT_DIR - If set, JUnit output will be written to this directory for each test // TEST_REPORT_FILE_NAME - If set, will determine the name of the file that JUnit output is written to func InitTest() { // Add hooks to skip all kubernetes or origin tests ginkgo.BeforeEach(checkSuiteSkips) extendedOutputDir := filepath.Join(os.TempDir(), "openshift-extended-tests") os.MkdirAll(extendedOutputDir, 0777) TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" TestContext.VerifyServiceAccount = true TestContext.RepoRoot = os.Getenv("KUBE_REPO_ROOT") TestContext.KubeVolumeDir = os.Getenv("VOLUME_DIR") if len(TestContext.KubeVolumeDir) == 0 { TestContext.KubeVolumeDir = "/var/lib/origin/volumes" } TestContext.KubectlPath = "kubectl" TestContext.KubeConfig = KubeConfigPath() os.Setenv("KUBECONFIG", TestContext.KubeConfig) // load and set the host variable for kubectl clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{}) cfg, err := clientConfig.ClientConfig() if err != nil { FatalErr(err) } TestContext.Host = cfg.Host reportDir = os.Getenv("TEST_REPORT_DIR") reportFileName = os.Getenv("TEST_REPORT_FILE_NAME") if reportFileName == "" { reportFileName = "junit" } quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true" //flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, KubeConfigPath(), "Path to kubeconfig containing embedded authinfo.") flag.StringVar(&TestContext.OutputDir, "extended-tests-output-dir", extendedOutputDir, "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") rflag.StringVar(&config.GinkgoConfig.FocusString, "focus", "", "DEPRECATED: use --ginkgo.focus") // Ensure that Kube tests run privileged (like they do upstream) TestContext.CreateTestingNS = createTestingNS // Override the default Kubernetes E2E configuration e2e.SetTestContext(TestContext) }
package photon import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" "github.com/vmware/photon-controller-go-sdk/photon/internal/mocks" ) var _ = ginkgo.Describe("Info", func() { var ( server *mocks.Server client *Client ) ginkgo.BeforeEach(func() { server, client = testSetup() }) ginkgo.AfterEach(func() { server.Close() }) ginkgo.Describe("Get", func() { ginkgo.It("Get deployment info successfully", func() { baseVersion := "1.1.0" fullVersion := "1.1.0-bcea65f" gitCommitHash := "bcea65f" networkType := "SOFTWARE_DEFINED" server.SetResponseJson(200, Info{ BaseVersion: baseVersion,
e2e "k8s.io/kubernetes/test/e2e/framework" exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("[networking][router] weighted openshift router", func() { defer g.GinkgoRecover() var ( configPath = exutil.FixturePath("testdata", "weighted-router.yaml") oc = exutil.NewCLI("weighted-router", exutil.KubeConfigPath()) ) g.BeforeEach(func() { // defer oc.Run("delete").Args("-f", configPath).Execute() err := oc.AsAdmin().Run("adm").Args("policy", "add-cluster-role-to-user", "system:router", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("create").Args("-f", configPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("The HAProxy router", func() { g.It("should appropriately serve a route that points to two services", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("creating a weighted router from a config file %q", configPath)) var routerIP string err := wait.Poll(time.Second, 2*time.Minute, func() (bool, error) { pod, err := oc.KubeFramework().Client.Pods(oc.KubeFramework().Namespace.Name).Get("weighted-router") if err != nil {
bdd "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) type Rec struct { Id int `bson:"_id"` Name string } var _ = bdd.Describe("mongotest", func() { var ( testDb *TestDb ) bdd.BeforeEach(func() { reset.Enable() testDb = New("/unittest") }) bdd.AfterEach(func() { reset.Disable() session, err := mgo.Dial("127.0.0.1") Ω(err).Should(Succeed()) defer session.Close() // ensure database are deleted in reset dbs, err := session.DatabaseNames() Ω(err).Should(Succeed()) Ω(dbs).ShouldNot(ContainElement(mongo.DefaultTestDBName)) })
import ( // "fmt" "github.com/CapillarySoftware/gostat/protoStat" gi "github.com/onsi/ginkgo" gom "github.com/onsi/gomega" nano "github.com/op/go-nanomsg" // "strings" "time" ) var _ = gi.Describe("Goreport", func() { var rep Reporter gi.BeforeEach(func() { ReporterConfig("ipc:///tmp/goreportertest.ipc", 1) rep = NewReporter() gom.Expect(rep).ShouldNot(gom.Equal(gom.BeNil())) }) gi.It("End to End integration test with stats", func() { pull, err := nano.NewPullSocket() gom.Expect(err).Should(gom.BeNil()) pull.SetRecvTimeout(6 * time.Second) pull.SetRecvBuffer(1000) pull.Bind("ipc:///tmp/goreportertest.ipc") key := "key" rep.RegisterStat(key) rep.RegisterStatWIndex(key, "index") rep.AddStat(key, 2) rep.AddStat(key, 2) rep.AddStatWIndex(key, 2, "index")
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e" exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("Router", func() { defer g.GinkgoRecover() var ( configPath = exutil.FixturePath("fixtures", "scoped-router.yaml") oc = exutil.NewCLI("scoped-router", exutil.KubeConfigPath()) ) g.BeforeEach(func() { // defer oc.Run("delete").Args("-f", configPath).Execute() err := oc.Run("create").Args("-f", configPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("The HAProxy router", func() { g.It("should serve the correct routes when scoped to a single namespace and label set", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("creating a scoped router from a config file %q", configPath)) var routerIP string err := wait.Poll(time.Second, 2*time.Minute, func() (bool, error) { pod, err := oc.KubeFramework().Client.Pods(oc.KubeFramework().Namespace.Name).Get("scoped-router") if err != nil { return false, err }
) var _ = ginkgo.Describe("VirtualSubnet", func() { var ( server *mocks.Server client *Client networkSpec *VirtualSubnetCreateSpec ) var projectId = "project1" ginkgo.BeforeEach(func() { server, client = testSetup() networkSpec = &VirtualSubnetCreateSpec{ Name: randomString(10, "go-sdk-virtual-network-"), Description: "a test virtual network", RoutingType: "ROUTED", Size: 256, ReservedStaticIpSize: 20, } }) ginkgo.AfterEach(func() { cleanVirtualSubnets(client, projectId) server.Close() }) ginkgo.Describe("CreateDeleteVirtualSubnet", func() { ginkgo.It("Virtual subnet create and delete succeeds", func() { mockTask := createMockTask("CREATE_VIRTUAL_NETWORK", "COMPLETED") server.SetResponseJson(200, mockTask)
g.BeforeEach(func() { g.By("set up policy for jenkins jobs") err := oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+oc.Namespace()+":default").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("kick off the build for the jenkins ephermeral and application templates") tag := []string{"openshift/jenkins-plugin-snapshot-test:latest"} hexIDs, err := exutil.DumpAndReturnTagging(tag) var jenkinsEphemeralPath string var testingSnapshot bool if len(hexIDs) > 0 && err == nil { // found an openshift pipeline plugin test image, must be testing a proposed change to the plugin jenkinsEphemeralPath = exutil.FixturePath("fixtures", "jenkins-ephemeral-template-test-new-plugin.json") testingSnapshot = true } else { // no test image, testing the base jenkins image with the current, supported version of the plugin jenkinsEphemeralPath = exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json") } err = oc.Run("new-app").Args(jenkinsEphemeralPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) jenkinsApplicationPath := exutil.FixturePath("..", "..", "examples", "jenkins", "application-template.json") err = oc.Run("new-app").Args(jenkinsApplicationPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for jenkins deployment") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins") if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) hostPort = fmt.Sprintf("%s:%s", serviceIP, port) g.By("wait for jenkins to come up") err = waitForJenkinsActivity(fmt.Sprintf("http://%s", hostPort), "", 200) o.Expect(err).NotTo(o.HaveOccurred()) if testingSnapshot { g.By("verifying the test image is being used") // for the test image, confirm that a snapshot version of the plugin is running in the jenkins image we'll test against err = waitForJenkinsActivity(fmt.Sprintf("http://%s/pluginManager/plugin/openshift-pipeline/thirdPartyLicenses", hostPort), `About OpenShift Pipeline Jenkins Plugin ([0-9\.]+)-SNAPSHOT`, 200) } })
) var _ = bdd.Describe("reset - recover", func() { var ( log = "" appendLog = func(msg string) { log += msg + "\n" } assertLog = func(expected string) { Ω(log).Should(Equal(expected)) log = "" } ) bdd.BeforeEach(func() { log = "" }) bdd.AfterEach(func() { ClearInternal() if Enabled() { Disable() } }) bdd.It("One", func() { Register(func() { appendLog("onReset") }, func() { appendLog("onRecover")
food *messaging.Food swallowChan chan *messaging.Food wg sync.WaitGroup ) gi.BeforeEach(func() { timestamp = int64(time.Now().Unix()) hostname = "hostname" tag = "tag" content = "content" priority = 1 facility = 7 severity = 2 fType := messaging.RFC3164 food = new(messaging.Food) food.Type = &fType msg := new(messaging.Rfc3164) msg.Timestamp = ×tamp msg.Hostname = &hostname msg.Tag = &tag msg.Content = &content msg.Priority = &priority msg.Severity = &severity food.Rfc3164 = append(food.Rfc3164, msg) swallowChan = make(chan *messaging.Food, 1) //blocking }) gi.It("Test RFC3164", func() { // log.Info(food) db := new(DB) wg.Add(1)
g.BeforeEach(func() { g.By("refresh corruptor, prep forcepull builder") exutil.PullImage(corruptor, dockerClient.AuthConfiguration{}) exutil.DumpImage(corruptor) // create the image streams and build configs for a test case specific builders setupPath := exutil.FixturePath("testdata", "forcepull-setup.json") err := exutil.CreateResource(setupPath, oc) // kick off the build for the new builder image just for force pull so we can corrupt them without conflicting with // any other tests potentially running in parallel br, _ := exutil.StartBuildAndWait(oc, bldrPrefix) br.AssertSuccess() serviceIP, err := oc.Run("get").Args("svc", "docker-registry", "-n", "default", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "docker-registry", "-n", "default", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("docker-registry service IP is %s and port %s ", serviceIP, port)) // get the auth so we can pull the build image from the internal docker registry since the builder controller will remove it // from the docker daemon cache when the docker build completes; authCfg, err = exutil.BuildAuthConfiguration(serviceIP+":"+port, oc) // now actually pull the image back in from the openshift internal docker registry fullImageName = authCfg.ServerAddress + "/" + oc.Namespace() + "/" + bldr err = exutil.PullImage(fullImageName, *authCfg) o.Expect(err).NotTo(o.HaveOccurred()) exutil.DumpImage(fullImageName) //update the build configs in the json for the app/lang builds to point to the builder images in the internal docker registry // and then create the build config resources pre := exutil.FixturePath("testdata", "forcepull-test.json") post := exutil.ArtifactPath("forcepull-test.json") varSubDest = authCfg.ServerAddress + "/" + oc.Namespace() // grant access to the custom build strategy g.By("granting system:build-strategy-custom") err = oc.AsAdmin().Run("adm").Args("policy", "add-cluster-role-to-user", "system:build-strategy-custom", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) defer func() { err = oc.AsAdmin().Run("adm").Args("policy", "remove-cluster-role-from-user", "system:build-strategy-custom", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }() err = exutil.VarSubOnFile(pre, post, varSubSrc, varSubDest) o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.CreateResource(post, oc) o.Expect(err).NotTo(o.HaveOccurred()) // dump the image textual tags and hex ids out for debug tags = []string{fullImageName + ":latest", corruptor + ":latest"} hexIDs, err := exutil.DumpAndReturnTagging(tags) o.Expect(err).NotTo(o.HaveOccurred()) resetData = map[string]string{fullImageName: hexIDs[0], corruptor: hexIDs[1]} })
g.BeforeEach(func() { g.By("set up policy for jenkins jobs") err := oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+oc.Namespace()+":default").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("kick off the build for the jenkins ephermeral and application templates") jenkinsEphemeralPath := exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json") err = oc.Run("new-app").Args(jenkinsEphemeralPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) jenkinsApplicationPath := exutil.FixturePath("..", "..", "examples", "jenkins", "application-template.json") err = oc.Run("new-app").Args(jenkinsApplicationPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for jenkins deployment") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins") o.Expect(err).NotTo(o.HaveOccurred()) g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) hostPort = fmt.Sprintf("%s:%s", serviceIP, port) g.By("wait for jenkins to come up") err = waitForJenkinsActivity(fmt.Sprintf("http://%s", hostPort), "", 200) o.Expect(err).NotTo(o.HaveOccurred()) })
g.BeforeEach(func() { testNamespace := oc.Namespace() jenkinsNamespace := oc.Namespace() + "-jenkins" g.By("Starting a Jenkins instance in namespace: " + jenkinsNamespace) oc.Run("new-project").Args(jenkinsNamespace).Execute() oc.SetNamespace(jenkinsNamespace) time.Sleep(10 * time.Second) // Give project time to initialize g.By("kick off the build for the jenkins ephermeral and application templates") tag := []string{"openshift/jenkins-plugin-snapshot-test:latest"} hexIDs, err := exutil.DumpAndReturnTagging(tag) var jenkinsEphemeralPath string var testingSnapshot bool if len(hexIDs) > 0 && err == nil { // found an openshift pipeline plugin test image, must be testing a proposed change to the plugin jenkinsEphemeralPath = exutil.FixturePath("testdata", "jenkins-ephemeral-template-test-new-plugin.json") testingSnapshot = true } else { // no test image, testing the base jenkins image with the current, supported version of the plugin //TODO disabling oauth until we can update getAdminPassword path to handle oauth (perhaps borrow from oauth integration tests) jenkinsEphemeralPath = exutil.FixturePath("testdata", "jenkins-ephemeral-template-no-oauth.json") } err = oc.Run("new-app").Args(jenkinsEphemeralPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for jenkins deployment") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("get admin password") password := getAdminPassword(oc) o.Expect(password).ShouldNot(o.BeEmpty()) j = &JenkinsRef{ oc: oc, host: serviceIP, port: port, namespace: jenkinsNamespace, password: password, } g.By("wait for jenkins to come up") _, err = j.waitForContent("", 200, 10*time.Minute, "") if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) if testingSnapshot { g.By("verifying the test image is being used") // for the test image, confirm that a snapshot version of the plugin is running in the jenkins image we'll test against _, err = j.waitForContent(`About OpenShift Pipeline Jenkins Plugin ([0-9\.]+)-SNAPSHOT`, 200, 10*time.Minute, "/pluginManager/plugin/openshift-pipeline/thirdPartyLicenses") o.Expect(err).NotTo(o.HaveOccurred()) } // Start capturing logs from this deployment config. // This command will terminate if the Jekins instance crashes. This // ensures that even if the Jenkins DC restarts, we should capture // logs from the crash. dcLogFollow, dcLogStdOut, dcLogStdErr, err = oc.Run("logs").Args("-f", "dc/jenkins").Background() o.Expect(err).NotTo(o.HaveOccurred()) oc.SetNamespace(testNamespace) g.By("set up policy for jenkins jobs in " + oc.Namespace()) err = oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+j.namespace+":jenkins").Execute() o.Expect(err).NotTo(o.HaveOccurred()) // Populate shared Jenkins namespace with artifacts that can be used by all tests loadFixture(oc, "shared-resources-template.json") // Allow resources to settle. ImageStream tags seem unavailable without this wait. time.Sleep(10 * time.Second) })
package rate import ( "time" "github.com/redforks/testing/reset" bdd "github.com/onsi/ginkgo" "github.com/redforks/hal/timeth" "github.com/stretchr/testify/assert" ) var _ = bdd.Describe("limiter", func() { bdd.BeforeEach(func() { reset.Enable() timeth.Install() }) bdd.AfterEach(func() { reset.Disable() }) bdd.XIt("Accept one", func() { l := NewLimiter(1, 10*time.Second) assert.True(t(), l.Accept()) timeth.Tick(time.Second) assert.False(t(), l.Accept()) timeth.Tick(9*time.Second + time.Millisecond)
var ( jenkinsMasterTemplate string jenkinsSlaveBuilderTemplate string jsonTempDir string ) g.Describe("use of jenkins with kubernetes plugin", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.BeforeEach(func() { var err error jsonTempDir, err = ioutil.TempDir(exutil.TestContext.OutputDir, "jenkins-kubernetes-") o.Expect(err).NotTo(o.HaveOccurred()) // We need to prepare the templates first in order to use binary builds: // 1. remove BuildConfig triggers to not start build immediately after instantiating template, // 2. remove contextDir so that we can send just that directory as a binary, not whole repo. jenkinsMasterTemplate = patchTemplate(filepath.Join(jenkinsExampleDir, "jenkins-master-template.json"), jsonTempDir) jenkinsSlaveBuilderTemplate = patchTemplate(filepath.Join(jenkinsExampleDir, "jenkins-slave-template.json"), jsonTempDir) }) g.AfterEach(func() { if len(jsonTempDir) > 0 { os.RemoveAll(jsonTempDir) } }) g.It("by creating slave from existing builder and adding it to Jenkins master", func() { g.By("create the jenkins slave builder template")
// TODO this seems like a weird restriction with segregated namespaces. provide a better explanation of why this doesn't work // we don't run in parallel with this suite - do not want different tests tagging the same image in different ways at the same time var _ = g.Describe("builds: serial: ForcePull from OpenShift induced builds (vs. sti)", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("force-pull-s2i", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("\n FORCE PULL TEST: Force pull and s2i builder", func() { // corrupt the s2i builder image g.BeforeEach(func() { exutil.CorruptImage(s2iDockBldr, custBldr, "s21") }) g.AfterEach(func() { exutil.ResetImage(resetData) }) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Context("\n FORCE PULL TEST: when s2i force pull is false and the image is bad", func() { g.It("\n FORCE PULL TEST s2i false", func() {
g.BeforeEach(func() { testNamespace := oc.Namespace() jenkinsNamespace := oc.Namespace() + "-jenkins" g.By("Starting a Jenkins instance in namespace: " + jenkinsNamespace) oc.Run("new-project").Args(jenkinsNamespace).Execute() oc.SetNamespace(jenkinsNamespace) time.Sleep(10 * time.Second) // Give project time to initialize g.By("kick off the build for the jenkins ephermeral and application templates") tag := []string{localPluginSnapshotImage} hexIDs, err := exutil.DumpAndReturnTagging(tag) // If the user has expressed an interest in local plugin testing by setting the // SNAPSHOT_JENKINS_IMAGE environment variable, try to use the local image. Inform them // either about which image is being used in case their test fails. snapshotImagePresent := len(hexIDs) > 0 && err == nil useSnapshotImage := os.Getenv(useLocalPluginSnapshotEnvVarName) != "" //TODO disabling oauth until we can update getAdminPassword path to handle oauth (perhaps borrow from oauth integration tests) newAppArgs := []string{exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json"), "-p", "ENABLE_OAUTH=false"} if useSnapshotImage { g.By("Creating a snapshot Jenkins imagestream and overridding the default Jenkins imagestream") o.Expect(snapshotImagePresent).To(o.BeTrue()) ginkgolog("") ginkgolog("") ginkgolog("IMPORTANT: You are testing a local jenkins snapshot image.") ginkgolog("In order to target the official image stream, you must unset %s before running extended tests.", useLocalPluginSnapshotEnvVarName) ginkgolog("") ginkgolog("") // Create an imagestream based on the Jenkins' plugin PR-Testing image (https://github.com/openshift/jenkins-plugin/blob/master/PR-Testing/README). snapshotImageStream := "jenkins-plugin-snapshot-test" err = oc.Run("new-build").Args("-D", fmt.Sprintf("FROM %s", localPluginSnapshotImage), "--to", snapshotImageStream).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("logs").Args("-f", "bc/jenkins-plugin-snapshot-test").Execute() o.Expect(err).NotTo(o.HaveOccurred()) // Supplant the normal imagestream with the local imagestream using template parameters newAppArgs = append(newAppArgs, "-p", fmt.Sprintf("NAMESPACE=%s", oc.Namespace())) newAppArgs = append(newAppArgs, "-p", fmt.Sprintf("JENKINS_IMAGE_STREAM_TAG=%s:latest", snapshotImageStream)) } else { if snapshotImagePresent { ginkgolog("") ginkgolog("") ginkgolog("IMPORTANT: You have a local OpenShift jenkins snapshot image, but it is not being used for testing.") ginkgolog("In order to target your local image, you must set %s to some value before running extended tests.", useLocalPluginSnapshotEnvVarName) ginkgolog("") ginkgolog("") } } err = oc.Run("new-app").Args(newAppArgs...).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for jenkins deployment") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("get admin password") password := getAdminPassword(oc) o.Expect(password).ShouldNot(o.BeEmpty()) j = &JenkinsRef{ oc: oc, host: serviceIP, port: port, namespace: jenkinsNamespace, password: password, } g.By("wait for jenkins to come up") _, err = j.waitForContent("", 200, 10*time.Minute, "") if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) if useSnapshotImage { g.By("verifying the test image is being used") // for the test image, confirm that a snapshot version of the plugin is running in the jenkins image we'll test against _, err = j.waitForContent(`About OpenShift Pipeline Jenkins Plugin ([0-9\.]+)-SNAPSHOT`, 200, 10*time.Minute, "/pluginManager/plugin/openshift-pipeline/thirdPartyLicenses") o.Expect(err).NotTo(o.HaveOccurred()) } // Start capturing logs from this deployment config. // This command will terminate if the Jekins instance crashes. This // ensures that even if the Jenkins DC restarts, we should capture // logs from the crash. dcLogFollow, dcLogStdOut, dcLogStdErr, err = oc.Run("logs").Args("-f", "dc/jenkins").Background() o.Expect(err).NotTo(o.HaveOccurred()) oc.SetNamespace(testNamespace) g.By("set up policy for jenkins jobs in " + oc.Namespace()) err = oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+j.namespace+":jenkins").Execute() o.Expect(err).NotTo(o.HaveOccurred()) // Populate shared Jenkins namespace with artifacts that can be used by all tests loadFixture(oc, "shared-resources-template.json") // Allow resources to settle. ImageStream tags seem unavailable without this wait. time.Sleep(10 * time.Second) })
) func validErrorWrite(w io.Writer, dataLen, writes int) { n, err := w.Write(make([]byte, dataLen)) Ω(err).Should(MatchError(ErrWriter)) Ω(n).Should(Equal(writes), "ErrorWriter should write %d bytes", writes) } func validErrorWriteSuccess(w io.Writer, dataLen int) { Ω(w.Write(make([]byte, dataLen))).Should(Equal(dataLen), "ErrorWriter should write succeed") } var _ = bdd.Describe("Writers", func() { bdd.BeforeEach(func() { reset.Enable() }) bdd.AfterEach(func() { reset.Disable() }) bdd.It("ErrorWriter", func() { w := ErrorWriter(0) validErrorWrite(w, 10, 0) w = ErrorWriter(5) validErrorWriteSuccess(w, 3) validErrorWrite(w, 10, 2) validErrorWrite(w, 10, 0)
g.By("Waiting for the endpoints to exist") serviceName := resources["service"][0] g.By("Waiting for endpoints to be up") err = waitForEndpointsAvailable(oc, serviceName) o.Expect(err).ToNot(o.HaveOccurred()) }) g.AfterEach(func() { g.By("Cleaning up the idling file") os.Remove(idlingFile) }) g.Describe("idling", func() { g.Context("with a single service and DeploymentConfig [Conformance]", func() { g.BeforeEach(func() { framework.BeforeEach() fixture = echoServerFixture }) g.It("should idle the service and DeploymentConfig properly", func() { checkSingleIdle(oc, idlingFile, resources, "deploymentconfig", "DeploymentConfig") }) }) g.Context("with a single service and ReplicationController", func() { g.BeforeEach(func() { framework.BeforeEach() fixture = echoServerRcFixture }) g.It("should idle the service and ReplicationController properly", func() { checkSingleIdle(oc, idlingFile, resources, "replicationcontroller", "ReplicationController")