// waitForLimitSync waits until a usage of a quota reaches given limit with a short timeout func waitForLimitSync(oc *exutil.CLI, hardLimit kapi.ResourceList) error { g.By(fmt.Sprintf("waiting for resource quota %s to get updated", quotaName)) return testutil.WaitForResourceQuotaLimitSync( oc.KubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, hardLimit, waitTimeout) }
func waitForNoPodsAvailable(oc *exutil.CLI) error { return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) { //ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName) pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{}) if err != nil { return false, err } return len(pods.Items) == 0, nil }) }
func waitForEndpointsAvailable(oc *exutil.CLI, serviceName string) error { return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) { ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName) // Tolerate NotFound b/c it could take a moment for the endpoints to be created if errors.TolerateNotFoundError(err) != nil { return false, err } return (len(ep.Subsets) > 0) && (len(ep.Subsets[0].Addresses) > 0), nil }) }
// QueryPrivileged executes an SQL query as a root user and returns the result. func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) { container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return "", err } masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName) if err != nil { return "", err } return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("mysql -h 127.0.0.1 -uroot -e \"%s\" %s", query, masterConf.Env["MYSQL_DATABASE"])).Output() }
// QueryPrivileged executes an SQL query as a root user and returns the result. func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error) { container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return "", err } masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName) if err != nil { return "", err } return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("psql postgres://postgres:%[email protected]/%s -x -c \"%s\"", masterConf.Env["POSTGRESQL_ADMIN_PASSWORD"], masterConf.Env["POSTGRESQL_DATABASE"], query)).Output() }
// waitForResourceQuotaSync waits until a usage of a quota reaches given limit with a short timeout func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kapi.ResourceList) (kapi.ResourceList, error) { g.By(fmt.Sprintf("waiting for resource quota %s to get updated", name)) used, err := exutil.WaitForResourceQuotaSync( oc.KubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, expectedResources, false, waitTimeout, ) if err != nil { return nil, err } return used, nil }
// TestRemoteLogin will test whether we can login through to a remote database. func (m PostgreSQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error { container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return err } masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName) if err != nil { return err } err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("psql postgres://%s:%s@%s/%s -x -c \"SELECT 1;\"", masterConf.Env["POSTGRESQL_USER"], masterConf.Env["POSTGRESQL_PASSWORD"], hostAddress, masterConf.Env["POSTGRESQL_DATABASE"])).Execute() return err }
func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string { g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label)) podNames, err := exutil.WaitForPods( oc.KubeClient().Core().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name="+label), exutil.CheckPodIsRunningFn, number, 1*time.Minute, ) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(number)) return podNames }
// TestRemoteLogin will test whether we can login through to a remote database. func (m MySQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error { container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return err } masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName) if err != nil { return err } err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("mysql -h %s -u%s -p%s -e \"SELECT 1;\" %s", hostAddress, masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"], masterConf.Env["MYSQL_DATABASE"])).Execute() return err }
// IsReady pings the PostgreSQL server. func (m PostgreSQL) IsReady(oc *util.CLI) (bool, error) { conf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return false, err } out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c", "psql postgresql://[email protected] -x -c \"SELECT 1;\"").Output() if err != nil { switch err.(type) { case *util.ExitError, *exec.ExitError: return false, nil default: return false, err } } return strings.Contains(out, "-[ RECORD 1 ]\n?column? | 1"), nil }
// RunInPodContainer will run provided command in the specified pod container. func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error { pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) if err != nil { return err } if len(pods) != 1 { return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector) } pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0]) if err != nil { return err } args := []string{pod.Name, "-c", pod.Spec.Containers[0].Name, "--"} args = append(args, cmd...) return oc.Run("exec").Args(args...).Execute() }
// IsReady pings the MySQL server. func (m MySQL) IsReady(oc *util.CLI) (bool, error) { conf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return false, err } out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c", "mysqladmin -h 127.0.0.1 -uroot ping").Output() if err != nil { switch err.(type) { case *util.ExitError, *exec.ExitError: return false, nil default: return false, err } } return strings.Contains(out, "mysqld is alive"), nil }
// Stands up a simple pod which can be used for exec commands func initExecPod(oc *exutil.CLI) *kapi.Pod { // Create a running pod in which we can execute our commands oc.Run("run").Args("centos", "--image", "centos:7", "--command", "--", "sleep", "1800").Execute() var targetPod *kapi.Pod err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{}) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) for _, p := range pods.Items { if strings.HasPrefix(p.Name, "centos") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" { targetPod = &p return true, nil } } return false, nil }) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) return targetPod }
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() { return func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer cleanup(oc) _, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2) o.Expect(err).NotTo(o.HaveOccurred()) err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true) o.Expect(err).NotTo(o.HaveOccurred()) exutil.CheckOpenShiftNamespaceImageStreams(oc) err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment g.By("waiting for the deployment to complete") err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for an endpoint") err = oc.KubeFramework().WaitForAnEndpoint(helperName) o.Expect(err).NotTo(o.HaveOccurred()) tableCounter := 0 assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) { tableCounter++ table := fmt.Sprintf("table_%0.2d", tableCounter) g.By("creating replication helpers") master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred()) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred()) // Test if we can query as root g.By("wait for mysql-master endpoint") oc.KubeFramework().WaitForAnEndpoint("mysql-master") err := helper.TestRemoteLogin(oc, "mysql-master") o.Expect(err).NotTo(o.HaveOccurred()) // Create a new table with random name g.By("create new table") _, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Write new data to the table through master _, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data is present on master err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data was replicated to all slaves for _, slave := range slaves { err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) } return master, slaves, helper } g.By("after initial deployment") master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1) if tc.SkipReplication { return } g.By("after master is restarted by changing the Deployment Config") err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after master is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) _, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after slave is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) // NOTE: Commented out, current template does not support multiple replicas. /* g.By("after slave is scaled to 0 and then back to 4 replicas") err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute() o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4) */ } }