// bumpLimit changes the limit value for given resource for all the limit types of limit range object func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (kapi.ResourceList, error) { g.By(fmt.Sprintf("bump a limit on resource %q to %s", resourceName, limit)) lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Get(limitRangeName) if err != nil { return nil, err } res := kapi.ResourceList{} change := false for i := range lr.Spec.Limits { item := &lr.Spec.Limits[i] if old, exists := item.Max[resourceName]; exists { for k, v := range item.Max { res[k] = v } parsed := resource.MustParse(limit) if old.Cmp(parsed) != 0 { item.Max[resourceName] = parsed change = true } } } if !change { return res, nil } _, err = oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Update(lr) return res, err }
// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or // not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly // and the function blocks until the registry is re-deployed and ready for new requests. func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error { ns := oc.Namespace() oc = oc.SetNamespace(kapi.NamespaceDefault).AsAdmin() defer oc.SetNamespace(ns) env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output() if err != nil { return err } value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept) if strings.Contains(env, value) { if accept { g.By("docker-registry is already configured to accept schema 2") } else { g.By("docker-registry is already configured to refuse schema 2") } return nil } dc, err := oc.Client().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry") if err != nil { return err } waitForVersion := dc.Status.LatestVersion + 1 g.By("configuring Docker registry to accept schema 2") err = oc.Run("env").Args("dc/docker-registry", value).Execute() if err != nil { return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err) } return exutil.WaitForRegistry(oc.AdminClient(), oc.AdminKubeClient(), &waitForVersion, oc) }
func cleanup(oc *exutil.CLI) { exutil.DumpImageStreams(oc) oc.AsAdmin().Run("delete").Args("all", "--all", "-n", oc.Namespace()).Execute() exutil.DumpImageStreams(oc) oc.AsAdmin().Run("delete").Args("pvc", "--all", "-n", oc.Namespace()).Execute() exutil.CleanupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace()) }
// GetDockerRegistryURL returns a cluster URL of internal docker registry if available. func GetDockerRegistryURL(oc *exutil.CLI) (string, error) { svc, err := oc.AdminKubeClient().Core().Services("default").Get("docker-registry") if err != nil { return "", err } url := svc.Spec.ClusterIP for _, p := range svc.Spec.Ports { url = fmt.Sprintf("%s:%d", url, p.Port) break } return url, nil }
// bumpQuota modifies hard spec of quota object with the given value. It returns modified hard spec. func bumpQuota(oc *exutil.CLI, resourceName kapi.ResourceName, value int64) (kapi.ResourceList, error) { g.By(fmt.Sprintf("bump the quota to %s=%d", resourceName, value)) rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Get(quotaName) if err != nil { return nil, err } rq.Spec.Hard[resourceName] = *resource.NewQuantity(value, resource.DecimalSI) _, err = oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Update(rq) if err != nil { return nil, err } err = waitForLimitSync(oc, rq.Spec.Hard) if err != nil { return nil, err } return rq.Spec.Hard, nil }
// createResourceQuota creates a resource quota with given hard limits in a current namespace and waits until // a first usage refresh func createResourceQuota(oc *exutil.CLI, hard kapi.ResourceList) (*kapi.ResourceQuota, error) { rq := &kapi.ResourceQuota{ ObjectMeta: kapi.ObjectMeta{ Name: quotaName, }, Spec: kapi.ResourceQuotaSpec{ Hard: hard, }, } g.By(fmt.Sprintf("creating resource quota with a limit %v", hard)) rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Create(rq) if err != nil { return nil, err } err = waitForLimitSync(oc, hard) return rq, err }
// createLimitRangeOfType creates a new limit range object with given limits for given limit type in current namespace func createLimitRangeOfType(oc *exutil.CLI, limitType kapi.LimitType, maxLimits kapi.ResourceList) (*kapi.LimitRange, error) { lr := &kapi.LimitRange{ ObjectMeta: kapi.ObjectMeta{ Name: limitRangeName, }, Spec: kapi.LimitRangeSpec{ Limits: []kapi.LimitRangeItem{ { Type: limitType, Max: maxLimits, }, }, }, } g.By(fmt.Sprintf("creating limit range object %q with %s limited to: %v", limitRangeName, limitType, maxLimits)) lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Create(lr) return lr, err }
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() { return func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer cleanup(oc) _, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2) o.Expect(err).NotTo(o.HaveOccurred()) err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true) o.Expect(err).NotTo(o.HaveOccurred()) exutil.CheckOpenShiftNamespaceImageStreams(oc) err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment g.By("waiting for the deployment to complete") err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for an endpoint") err = oc.KubeFramework().WaitForAnEndpoint(helperName) o.Expect(err).NotTo(o.HaveOccurred()) tableCounter := 0 assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) { tableCounter++ table := fmt.Sprintf("table_%0.2d", tableCounter) g.By("creating replication helpers") master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred()) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred()) // Test if we can query as root g.By("wait for mysql-master endpoint") oc.KubeFramework().WaitForAnEndpoint("mysql-master") err := helper.TestRemoteLogin(oc, "mysql-master") o.Expect(err).NotTo(o.HaveOccurred()) // Create a new table with random name g.By("create new table") _, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Write new data to the table through master _, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data is present on master err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data was replicated to all slaves for _, slave := range slaves { err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) } return master, slaves, helper } g.By("after initial deployment") master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1) if tc.SkipReplication { return } g.By("after master is restarted by changing the Deployment Config") err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after master is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) _, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after slave is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) // NOTE: Commented out, current template does not support multiple replicas. /* g.By("after slave is scaled to 0 and then back to 4 replicas") err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute() o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4) */ } }