// waitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag func waitForAnImageStreamTag(oc *exutil.CLI, name, tag string) error { g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name)) start := time.Now() c := make(chan error) go func() { err := exutil.WaitForAnImageStream( oc.REST().ImageStreams(oc.Namespace()), name, func(is *imageapi.ImageStream) bool { if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 { return false } return true }, func(is *imageapi.ImageStream) bool { return time.Now().After(start.Add(waitTimeout)) }) c <- err }() select { case e := <-c: return e case <-time.After(waitTimeout): return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", oc.Namespace(), name, tag) } }
// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or // not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly // and the function blocks until the registry is re-deployed and ready for new requests. func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error { ns := oc.Namespace() oc = oc.SetNamespace(kapi.NamespaceDefault).AsAdmin() defer oc.SetNamespace(ns) env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output() if err != nil { return err } value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept) if strings.Contains(env, value) { if accept { g.By("docker-registry is already configured to accept schema 2") } else { g.By("docker-registry is already configured to refuse schema 2") } return nil } dc, err := oc.REST().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry") if err != nil { return err } waitForVersion := dc.Status.LatestVersion + 1 g.By("configuring Docker registry to accept schema 2") err = oc.Run("env").Args("dc/docker-registry", value).Execute() if err != nil { return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err) } return exutil.WaitForRegistry(oc.AdminREST(), oc.AdminKubeREST(), &waitForVersion, oc) }
// buildAndPushTestImagesTo builds a given number of test images. The images are pushed to a new image stream // of given name under <tagPrefix><X> where X is a number of image starting from 1. func buildAndPushTestImagesTo(oc *exutil.CLI, isName string, tagPrefix string, numberOfImages int) (tag2Image map[string]imageapi.Image, err error) { dClient, err := testutil.NewDockerClient() if err != nil { return } tag2Image = make(map[string]imageapi.Image) for i := 1; i <= numberOfImages; i++ { tag := fmt.Sprintf("%s%d", tagPrefix, i) dgst, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, tag, imageSize, 2, g.GinkgoWriter, true) if err != nil { return nil, err } ist, err := oc.REST().ImageStreamTags(oc.Namespace()).Get(isName, tag) if err != nil { return nil, err } if dgst != ist.Image.Name { return nil, fmt.Errorf("digest of built image does not match stored: %s != %s", dgst, ist.Image.Name) } tag2Image[tag] = ist.Image } return }
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) { buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) // Ensure that expected start-build environment variables were injected for _, build := range buildList.Items { ginkgolog("Found build: %q", build.GetName()) if strings.HasPrefix(build.GetName(), buildPrefix) { envs := []kapi.EnvVar{} if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil { envs = build.Spec.Strategy.DockerStrategy.Env } else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil { envs = build.Spec.Strategy.SourceStrategy.Env } else { continue } for k, v := range varsToFind { found := false for _, env := range envs { ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName()) if k == env.Name && v == env.Value { found = true break } } o.ExpectWithOffset(1, found).To(o.BeTrue()) } } } }
func waitForSyncedConfig(oc *exutil.CLI, name string, timeout time.Duration) error { dc, rcs, pods, err := deploymentInfo(oc, name) if err != nil { return err } if err := checkDeploymentInvariants(dc, rcs, pods); err != nil { return err } generation := dc.Generation return wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) { config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name) if err != nil { return false, err } return deployutil.HasSynced(config, generation), nil }) }
// buildAndPushImage tries to build an image. The image is stored as an image stream tag <name>:<tag>. If // shouldBeDenied is true, a build will be expected to fail with a denied error. func buildAndPushImage(oc *exutil.CLI, namespace, name, tag string, shouldBeDenied bool) { istName := name if tag != "" { istName += ":" + tag } g.By(fmt.Sprintf("building an image %q", istName)) bc, err := oc.REST().BuildConfigs(namespace).Get(name) if err == nil { g.By(fmt.Sprintf("changing build config %s to store result into %s", name, istName)) o.Expect(bc.Spec.BuildSpec.Output.To.Kind).To(o.Equal("ImageStreamTag")) bc.Spec.BuildSpec.Output.To.Name = istName _, err := oc.REST().BuildConfigs(namespace).Update(bc) o.Expect(err).NotTo(o.HaveOccurred()) } else { g.By(fmt.Sprintf("creating a new build config %s with output to %s ", name, istName)) err = oc.Run("new-build").Args( "--binary", "--name", name, "--to", istName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } tempDir, err := ioutil.TempDir("", "name-build") o.Expect(err).NotTo(o.HaveOccurred()) err = createRandomBlob(path.Join(tempDir, "data"), imageSize) o.Expect(err).NotTo(o.HaveOccurred()) err = ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte("FROM scratch\nCOPY data /data\n"), 0644) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Execute() if shouldBeDenied { o.Expect(err).To(o.HaveOccurred()) out, err := oc.Run("logs").Args("bc/" + name).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.MatchRegexp("(?i)Failed to push image:.*denied")) } else { o.Expect(err).NotTo(o.HaveOccurred()) } }
func deploymentInfo(oc *exutil.CLI, name string) (*deployapi.DeploymentConfig, []kapi.ReplicationController, []kapi.Pod, error) { dc, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name) if err != nil { return nil, nil, nil, err } // get pods before RCs, so we see more RCs than pods. pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{}) if err != nil { return nil, nil, nil, err } rcs, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).List(kapi.ListOptions{ LabelSelector: deployutil.ConfigSelector(name), }) if err != nil { return nil, nil, nil, err } sort.Sort(deployutil.ByLatestVersionAsc(rcs.Items)) return dc, rcs.Items, pods.Items, nil }
func replicationTestFactory(oc *exutil.CLI, template string) func() { return func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer cleanup(oc) _, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1) o.Expect(err).NotTo(o.HaveOccurred()) err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", "templates", true) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", template).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.KubeFramework().WaitForAnEndpoint(helperName) o.Expect(err).NotTo(o.HaveOccurred()) tableCounter := 0 assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) { tableCounter++ table := fmt.Sprintf("table_%0.2d", tableCounter) master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred()) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred()) // Test if we can query as root oc.KubeFramework().WaitForAnEndpoint("mysql-master") err := helper.TestRemoteLogin(oc, "mysql-master") o.Expect(err).NotTo(o.HaveOccurred()) // Create a new table with random name _, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Write new data to the table through master _, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data is present on master err = exutil.WaitForQueryOutput(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data was replicated to all slaves for _, slave := range slaves { err = exutil.WaitForQueryOutput(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) } return master, slaves, helper } g.By("after initial deployment") master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1) g.By("after master is restarted by changing the Deployment Config") err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after master is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) _, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after slave is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) g.By("after slave is scaled to 0 and then back to 4 replicas") err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute() o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4) } }
// BuildAndPushImageOfSizeWithBuilder tries to build an image of wanted size and number of layers. Built image // is stored as an image stream tag <name>:<tag>. If shouldSucceed is false, a build is expected to fail with // a denied error. Note the size is only approximate. Resulting image size will be different depending on used // compression algorithm and metadata overhead. func BuildAndPushImageOfSizeWithBuilder( oc *exutil.CLI, dClient *dockerclient.Client, namespace, name, tag string, size uint64, numberOfLayers int, shouldSucceed bool, ) error { istName := name if tag != "" { istName += ":" + tag } bc, err := oc.REST().BuildConfigs(namespace).Get(name) if err == nil { if bc.Spec.CommonSpec.Output.To.Kind != "ImageStreamTag" { return fmt.Errorf("Unexpected kind of buildspec's output (%s != %s)", bc.Spec.CommonSpec.Output.To.Kind, "ImageStreamTag") } bc.Spec.CommonSpec.Output.To.Name = istName if _, err = oc.REST().BuildConfigs(namespace).Update(bc); err != nil { return err } } else { err = oc.Run("new-build").Args("--binary", "--name", name, "--to", istName).Execute() if err != nil { return err } } tempDir, err := ioutil.TempDir("", "name-build") if err != nil { return err } dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers) lines := make([]string, numberOfLayers+1) lines[0] = "FROM scratch" for i := 1; i <= numberOfLayers; i++ { blobName := fmt.Sprintf("data%d", i) if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil { return err } lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName) } if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil { return err } out, err := oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Output() fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out) buildLog, logsErr := oc.Run("logs").Args("bc/" + name).Output() if match := reSuccessfulBuild.FindStringSubmatch(buildLog); len(match) > 1 { defer dClient.RemoveImageExtended(match[1], dockerclient.RemoveImageOptions{Force: true}) } if shouldSucceed && err != nil { return fmt.Errorf("Got unexpected build error: %v", err) } if !shouldSucceed { if err == nil { return fmt.Errorf("Build unexpectedly succeeded") } if logsErr != nil { return fmt.Errorf("Failed to show log of build config %s: %v", name, err) } if !reExpectedDeniedError.MatchString(buildLog) { return fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), buildLog) } } return nil }
func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() { return func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer cleanup(oc) _, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1) o.Expect(err).NotTo(o.HaveOccurred()) err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true) o.Expect(err).NotTo(o.HaveOccurred()) exutil.CheckOpenShiftNamespaceImageStreams(oc) err = oc.Run("new-app").Args("-f", postgreSQLReplicationTemplate, "-p", fmt.Sprintf("POSTGRESQL_IMAGE=%s", image)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", postgreSQLEphemeralTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", postgreSQLHelperName)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), postgreSQLHelperName, oc) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.KubeFramework().WaitForAnEndpoint(postgreSQLHelperName) o.Expect(err).NotTo(o.HaveOccurred()) tableCounter := 0 assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) { tableCounter++ table := fmt.Sprintf("table_%0.2d", tableCounter) master, slaves, helper := CreatePostgreSQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", postgreSQLHelperName), slaveCount) err := exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper}) if err != nil { exutil.DumpDeploymentLogs("postgresql-helper", oc) } o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilAllHelpersAreUp(oc, slaves) if err != nil { exutil.DumpDeploymentLogs("postgresql-slave", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // Test if we can query as admin oc.KubeFramework().WaitForAnEndpoint("postgresql-master") err = helper.TestRemoteLogin(oc, "postgresql-master") o.Expect(err).NotTo(o.HaveOccurred()) // Create a new table with random name _, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Write new data to the table through master _, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data is present on master err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s;", table), "col1 | val1\ncol2 | val2") o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data was replicated to all slaves for _, slave := range slaves { err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s;", table), "col1 | val1\ncol2 | val2") o.Expect(err).NotTo(o.HaveOccurred()) } return master, slaves, helper } g.By("after initial deployment") master, _, _ := assertReplicationIsWorking("postgresql-master-1", "postgresql-slave-1", 1) g.By("after master is restarted by changing the Deployment Config") err = oc.Run("env").Args("dc", "postgresql-master", "POSTGRESQL_ADMIN_PASSWORD=newpass").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) master, _, _ = assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1) g.By("after master is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-master-2").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) _, slaves, _ := assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1) g.By("after slave is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-slave-1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1) pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=postgresql-slave-1")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) g.By("after slave is scaled to 0 and then back to 4 replicas") err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=4").Execute() o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 4) } }