// buildAndPushTestImagesTo builds a given number of test images. The images are pushed to a new image stream // of given name under <tagPrefix><X> where X is a number of image starting from 1. func buildAndPushTestImagesTo(oc *exutil.CLI, isName string, tagPrefix string, numberOfImages int) (tag2Image map[string]imageapi.Image, err error) { dClient, err := testutil.NewDockerClient() if err != nil { return } tag2Image = make(map[string]imageapi.Image) for i := 1; i <= numberOfImages; i++ { tag := fmt.Sprintf("%s%d", tagPrefix, i) dgst, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, tag, imageSize, 2, g.GinkgoWriter, true) if err != nil { return nil, err } ist, err := oc.Client().ImageStreamTags(oc.Namespace()).Get(isName, tag) if err != nil { return nil, err } if dgst != ist.Image.Name { return nil, fmt.Errorf("digest of built image does not match stored: %s != %s", dgst, ist.Image.Name) } tag2Image[tag] = ist.Image } return }
// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or // not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly // and the function blocks until the registry is re-deployed and ready for new requests. func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error { ns := oc.Namespace() oc = oc.SetNamespace(kapi.NamespaceDefault).AsAdmin() defer oc.SetNamespace(ns) env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output() if err != nil { return err } value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept) if strings.Contains(env, value) { if accept { g.By("docker-registry is already configured to accept schema 2") } else { g.By("docker-registry is already configured to refuse schema 2") } return nil } dc, err := oc.Client().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry") if err != nil { return err } waitForVersion := dc.Status.LatestVersion + 1 g.By("configuring Docker registry to accept schema 2") err = oc.Run("env").Args("dc/docker-registry", value).Execute() if err != nil { return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err) } return exutil.WaitForRegistry(oc.AdminClient(), oc.AdminKubeClient(), &waitForVersion, oc) }
// bumpLimit changes the limit value for given resource for all the limit types of limit range object func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (kapi.ResourceList, error) { g.By(fmt.Sprintf("bump a limit on resource %q to %s", resourceName, limit)) lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Get(limitRangeName) if err != nil { return nil, err } res := kapi.ResourceList{} change := false for i := range lr.Spec.Limits { item := &lr.Spec.Limits[i] if old, exists := item.Max[resourceName]; exists { for k, v := range item.Max { res[k] = v } parsed := resource.MustParse(limit) if old.Cmp(parsed) != 0 { item.Max[resourceName] = parsed change = true } } } if !change { return res, nil } _, err = oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Update(lr) return res, err }
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) { buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) // Ensure that expected start-build environment variables were injected for _, build := range buildList.Items { ginkgolog("Found build: %q", build.GetName()) if strings.HasPrefix(build.GetName(), buildPrefix) { envs := []kapi.EnvVar{} if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil { envs = build.Spec.Strategy.DockerStrategy.Env } else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil { envs = build.Spec.Strategy.SourceStrategy.Env } else { continue } for k, v := range varsToFind { found := false for _, env := range envs { ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName()) if k == env.Name && v == env.Value { found = true break } } o.ExpectWithOffset(1, found).To(o.BeTrue()) } } } }
// waitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag func waitForAnImageStreamTag(oc *exutil.CLI, name, tag string) error { g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name)) start := time.Now() c := make(chan error) go func() { err := exutil.WaitForAnImageStream( oc.REST().ImageStreams(oc.Namespace()), name, func(is *imageapi.ImageStream) bool { if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 { return false } return true }, func(is *imageapi.ImageStream) bool { return time.Now().After(start.Add(waitTimeout)) }) c <- err }() select { case e := <-c: return e case <-time.After(waitTimeout): return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", oc.Namespace(), name, tag) } }
// waitForLimitSync waits until a usage of a quota reaches given limit with a short timeout func waitForLimitSync(oc *exutil.CLI, hardLimit kapi.ResourceList) error { g.By(fmt.Sprintf("waiting for resource quota %s to get updated", quotaName)) return testutil.WaitForResourceQuotaLimitSync( oc.KubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, hardLimit, waitTimeout) }
func doesRegistryAcceptSchema2(oc *exutil.CLI) (bool, error) { ns := oc.Namespace() defer oc.SetNamespace(ns) env, err := oc.SetNamespace(kapi.NamespaceDefault).AsAdmin().Run("env").Args("dc/docker-registry", "--list").Output() if err != nil { return false, err } return strings.Contains(env, fmt.Sprintf("%s=true", dockerregistryserver.AcceptSchema2EnvVar)), nil }
func waitForNoPodsAvailable(oc *exutil.CLI) error { return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) { //ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName) pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{}) if err != nil { return false, err } return len(pods.Items) == 0, nil }) }
func waitForEndpointsAvailable(oc *exutil.CLI, serviceName string) error { return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) { ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName) // Tolerate NotFound b/c it could take a moment for the endpoints to be created if errors.TolerateNotFoundError(err) != nil { return false, err } return (len(ep.Subsets) > 0) && (len(ep.Subsets[0].Addresses) > 0), nil }) }
// Finds the pod running Jenkins func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod { pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins") o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) if pods == nil || pods.Items == nil { g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace()) } o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1)) return &pods.Items[0] }
// QueryPrivileged executes an SQL query as a root user and returns the result. func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) { container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName) if err != nil { return "", err } masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName) if err != nil { return "", err } return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("mysql -h 127.0.0.1 -uroot -e \"%s\" %s", query, masterConf.Env["MYSQL_DATABASE"])).Output() }
// waitForResourceQuotaSync waits until a usage of a quota reaches given limit with a short timeout func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kapi.ResourceList) (kapi.ResourceList, error) { g.By(fmt.Sprintf("waiting for resource quota %s to get updated", name)) used, err := exutil.WaitForResourceQuotaSync( oc.KubeClient().Core().ResourceQuotas(oc.Namespace()), quotaName, expectedResources, false, waitTimeout, ) if err != nil { return nil, err } return used, nil }
// QueryPrivileged executes an SQL query as a root user and returns the result. func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error) { container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return "", err } masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName) if err != nil { return "", err } return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("psql postgres://postgres:%[email protected]/%s -x -c \"%s\"", masterConf.Env["POSTGRESQL_ADMIN_PASSWORD"], masterConf.Env["POSTGRESQL_DATABASE"], query)).Output() }
// deleteTestImages deletes test images built in current and shared // namespaces. It also deletes shared projects. func deleteTestImages(oc *exutil.CLI) { g.By(fmt.Sprintf("Deleting images and image streams in project %q", oc.Namespace())) iss, err := oc.AdminClient().ImageStreams(oc.Namespace()).List(kapi.ListOptions{}) if err != nil { return } for _, is := range iss.Items { for _, history := range is.Status.Tags { for i := range history.Items { oc.AdminClient().Images().Delete(history.Items[i].Image) } } } }
// ModifySourceCode will modify source code in the pod of the application // according to the sed script. func ModifySourceCode(oc *exutil.CLI, selector labels.Selector, sedScript, file string) error { pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFunc, 1, 120*time.Second) if err != nil { return err } if len(pods) != 1 { return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector) } pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0]) if err != nil { return err } return oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "sed", "-ie", sedScript, file).Execute() }
// TestRemoteLogin will test whether we can login through to a remote database. func (m MySQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error { container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName) if err != nil { return err } masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName) if err != nil { return err } err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("mysql -h %s -u%s -p%s -e \"SELECT 1;\" %s", hostAddress, masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"], masterConf.Env["MYSQL_DATABASE"])).Execute() return err }
func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string { g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label)) podNames, err := exutil.WaitForPods( oc.KubeClient().Core().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name="+label), exutil.CheckPodIsRunningFn, number, 1*time.Minute, ) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(number)) return podNames }
// TestRemoteLogin will test whether we can login through to a remote database. func (m PostgreSQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error { container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return err } masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName) if err != nil { return err } err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", fmt.Sprintf("psql postgres://%s:%s@%s/%s -x -c \"SELECT 1;\"", masterConf.Env["POSTGRESQL_USER"], masterConf.Env["POSTGRESQL_PASSWORD"], hostAddress, masterConf.Env["POSTGRESQL_DATABASE"])).Execute() return err }
func waitForSyncedConfig(oc *exutil.CLI, name string, timeout time.Duration) error { dc, rcs, pods, err := deploymentInfo(oc, name) if err != nil { return err } if err := checkDeploymentInvariants(dc, rcs, pods); err != nil { return err } generation := dc.Generation return wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) { config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name) if err != nil { return false, err } return deployutil.HasSynced(config, generation), nil }) }
// IsReady pings the MySQL server. func (m MySQL) IsReady(oc *util.CLI) (bool, error) { conf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.podName) if err != nil { return false, err } out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c", "mysqladmin -h 127.0.0.1 -uroot ping").Output() if err != nil { switch err.(type) { case *util.ExitError, *exec.ExitError: return false, nil default: return false, err } } return strings.Contains(out, "mysqld is alive"), nil }
// IsReady pings the PostgreSQL server. func (m PostgreSQL) IsReady(oc *util.CLI) (bool, error) { conf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName) if err != nil { return false, err } out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c", "psql postgresql://[email protected] -x -c \"SELECT 1;\"").Output() if err != nil { switch err.(type) { case *util.ExitError, *exec.ExitError: return false, nil default: return false, err } } return strings.Contains(out, "-[ RECORD 1 ]\n?column? | 1"), nil }
// RunInPodContainer will run provided command in the specified pod container. func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error { pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) if err != nil { return err } if len(pods) != 1 { return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector) } pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0]) if err != nil { return err } args := []string{pod.Name, "-c", pod.Spec.Containers[0].Name, "--"} args = append(args, cmd...) return oc.Run("exec").Args(args...).Execute() }
// bumpQuota modifies hard spec of quota object with the given value. It returns modified hard spec. func bumpQuota(oc *exutil.CLI, resourceName kapi.ResourceName, value int64) (kapi.ResourceList, error) { g.By(fmt.Sprintf("bump the quota to %s=%d", resourceName, value)) rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Get(quotaName) if err != nil { return nil, err } rq.Spec.Hard[resourceName] = *resource.NewQuantity(value, resource.DecimalSI) _, err = oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Update(rq) if err != nil { return nil, err } err = waitForLimitSync(oc, rq.Spec.Hard) if err != nil { return nil, err } return rq.Spec.Hard, nil }
// createResourceQuota creates a resource quota with given hard limits in a current namespace and waits until // a first usage refresh func createResourceQuota(oc *exutil.CLI, hard kapi.ResourceList) (*kapi.ResourceQuota, error) { rq := &kapi.ResourceQuota{ ObjectMeta: kapi.ObjectMeta{ Name: quotaName, }, Spec: kapi.ResourceQuotaSpec{ Hard: hard, }, } g.By(fmt.Sprintf("creating resource quota with a limit %v", hard)) rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Create(rq) if err != nil { return nil, err } err = waitForLimitSync(oc, hard) return rq, err }
// createLimitRangeOfType creates a new limit range object with given limits for given limit type in current namespace func createLimitRangeOfType(oc *exutil.CLI, limitType kapi.LimitType, maxLimits kapi.ResourceList) (*kapi.LimitRange, error) { lr := &kapi.LimitRange{ ObjectMeta: kapi.ObjectMeta{ Name: limitRangeName, }, Spec: kapi.LimitRangeSpec{ Limits: []kapi.LimitRangeItem{ { Type: limitType, Max: maxLimits, }, }, }, } g.By(fmt.Sprintf("creating limit range object %q with %s limited to: %v", limitRangeName, limitType, maxLimits)) lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Create(lr) return lr, err }
func getRegistryStorageSize(oc *exutil.CLI) (int64, error) { ns := oc.Namespace() defer oc.SetNamespace(ns) out, err := oc.SetNamespace(kapi.NamespaceDefault).AsAdmin().Run("rsh").Args("dc/docker-registry", "du", "--bytes", "--summarize", "/registry/docker/registry").Output() if err != nil { return 0, err } m := regexp.MustCompile(`^\d+`).FindString(out) if len(m) == 0 { return 0, fmt.Errorf("failed to parse du output: %s", out) } size, err := strconv.ParseInt(m, 10, 64) if err != nil { return 0, fmt.Errorf("failed to parse du output: %s", m) } return size, nil }
// NewRef creates a jenkins reference from an OC client func NewRef(oc *exutil.CLI) *JenkinsRef { g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("get admin password") password := GetAdminPassword(oc) o.Expect(password).ShouldNot(o.BeEmpty()) j := &JenkinsRef{ oc: oc, host: serviceIP, port: port, namespace: oc.Namespace(), password: password, } return j }
// Stands up a simple pod which can be used for exec commands func initExecPod(oc *exutil.CLI) *kapi.Pod { // Create a running pod in which we can execute our commands oc.Run("run").Args("centos", "--image", "centos:7", "--command", "--", "sleep", "1800").Execute() var targetPod *kapi.Pod err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{}) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) for _, p := range pods.Items { if strings.HasPrefix(p.Name, "centos") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" { targetPod = &p return true, nil } } return false, nil }) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) return targetPod }
func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]string, resourceName string, kind string) { g.By("Idling the service") _, err := oc.Run("idle").Args("--resource-names-file", idlingFile).Output() o.Expect(err).ToNot(o.HaveOccurred()) g.By("Ensuring the scale is zero (and stays zero)") objName := resources[resourceName][0] // make sure we don't get woken up by an incorrect router health check or anything like that o.Consistently(func() (string, error) { return oc.Run("get").Args(resourceName+"/"+objName, "--output=jsonpath=\"{.spec.replicas}\"").Output() }, 20*time.Second, 500*time.Millisecond).Should(o.ContainSubstring("0")) g.By("Fetching the service and checking the annotations are present") serviceName := resources["service"][0] endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation)) o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.UnidleTargetAnnotation)) g.By("Checking the idled-at time") idledAtAnnotation := endpoints.Annotations[unidlingapi.IdledAtAnnotation] idledAtTime, err := time.Parse(time.RFC3339, idledAtAnnotation) o.Expect(err).ToNot(o.HaveOccurred()) o.Expect(idledAtTime).To(o.BeTemporally("~", time.Now(), 5*time.Minute)) g.By("Checking the idle targets") unidleTargetAnnotation := endpoints.Annotations[unidlingapi.UnidleTargetAnnotation] unidleTargets := []unidlingapi.RecordedScaleReference{} err = json.Unmarshal([]byte(unidleTargetAnnotation), &unidleTargets) o.Expect(err).ToNot(o.HaveOccurred()) o.Expect(unidleTargets).To(o.Equal([]unidlingapi.RecordedScaleReference{ { Replicas: 2, CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{ Name: resources[resourceName][0], Kind: kind, }, }, })) }
func cleanup(oc *exutil.CLI) { exutil.DumpImageStreams(oc) oc.AsAdmin().Run("delete").Args("all", "--all", "-n", oc.Namespace()).Execute() exutil.DumpImageStreams(oc) oc.AsAdmin().Run("delete").Args("pvc", "--all", "-n", oc.Namespace()).Execute() exutil.CleanupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace()) }