Пример #1
0
func createFixture(oc *exutil.CLI, path string) ([]string, []string, error) {
	output, err := oc.Run("create").Args("-f", path, "-o", "name").Output()
	if err != nil {
		return nil, nil, err
	}

	lines := strings.Split(output, "\n")

	resources := make([]string, 0, len(lines)-1)
	names := make([]string, 0, len(lines)-1)

	for _, line := range lines {
		if line == "" {
			continue
		}
		parts := strings.Split(line, "/")
		if len(parts) != 2 {
			return nil, nil, fmt.Errorf("expected type/name syntax, got: %q", line)
		}
		resources = append(resources, parts[0])
		names = append(names, parts[1])
	}

	return resources, names, nil
}
Пример #2
0
// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or
// not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly
// and the function blocks until the registry is re-deployed and ready for new requests.
func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error {
	ns := oc.Namespace()
	oc = oc.SetNamespace(kapi.NamespaceDefault).AsAdmin()
	defer oc.SetNamespace(ns)
	env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output()
	if err != nil {
		return err
	}

	value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept)
	if strings.Contains(env, value) {
		if accept {
			g.By("docker-registry is already configured to accept schema 2")
		} else {
			g.By("docker-registry is already configured to refuse schema 2")
		}
		return nil
	}

	dc, err := oc.Client().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
	if err != nil {
		return err
	}
	waitForVersion := dc.Status.LatestVersion + 1

	g.By("configuring Docker registry to accept schema 2")
	err = oc.Run("env").Args("dc/docker-registry", value).Execute()
	if err != nil {
		return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err)
	}
	return exutil.WaitForRegistry(oc.AdminClient(), oc.AdminKubeClient(), &waitForVersion, oc)
}
Пример #3
0
// createFixture will create the provided fixture and return the resource and the
// name separately.
// TODO: Probably move to a more general location like test/extended/util/cli.go
func createFixture(oc *exutil.CLI, fixture string) (string, string, error) {
	resource, err := oc.Run("create").Args("-f", fixture, "-o", "name").Output()
	if err != nil {
		return "", "", err
	}
	parts := strings.Split(resource, "/")
	if len(parts) != 2 {
		return "", "", fmt.Errorf("expected type/name syntax, got: %s", resource)
	}
	return resource, parts[1], nil
}
Пример #4
0
func executeShellCommand(oc *util.CLI, podName string, command string) (string, error) {
	out, err := oc.Run("exec").Args(podName, "--", "bash", "-c", command).Output()
	if err != nil {
		switch err.(type) {
		case *util.ExitError, *exec.ExitError:
			return "", nil
		default:
			return "", err
		}
	}

	return out, nil
}
Пример #5
0
// QueryPrivileged executes an SQL query as a root user and returns the result.
func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return "", err
	}
	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return "", err
	}
	return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("mysql -h 127.0.0.1 -uroot -e \"%s\" %s",
			query, masterConf.Env["MYSQL_DATABASE"])).Output()
}
Пример #6
0
func getAdminPassword(oc *exutil.CLI) string {
	envs, err := oc.Run("set").Args("env", "dc/jenkins", "--list").Output()
	o.Expect(err).NotTo(o.HaveOccurred())
	kvs := strings.Split(envs, "\n")
	for _, kv := range kvs {
		if strings.HasPrefix(kv, "JENKINS_PASSWORD="******"=")
			fmt.Fprintf(g.GinkgoWriter, "\nJenkins admin password %s\n", s[1])
			return s[1]
		}
	}
	return "password"
}
Пример #7
0
// QueryPrivileged executes an SQL query as a root user and returns the result.
func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return "", err
	}
	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return "", err
	}
	return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("psql postgres://postgres:%[email protected]/%s -x -c \"%s\"",
			masterConf.Env["POSTGRESQL_ADMIN_PASSWORD"],
			masterConf.Env["POSTGRESQL_DATABASE"], query)).Output()
}
Пример #8
0
// TestRemoteLogin will test whether we can login through to a remote database.
func (m MySQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error {
	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return err
	}
	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return err
	}
	err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("mysql -h %s -u%s -p%s -e \"SELECT 1;\" %s",
			hostAddress, masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"],
			masterConf.Env["MYSQL_DATABASE"])).Execute()
	return err
}
Пример #9
0
// TestRemoteLogin will test whether we can login through to a remote database.
func (m PostgreSQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error {
	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return err
	}
	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
	if err != nil {
		return err
	}
	err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c",
		fmt.Sprintf("psql postgres://%s:%s@%s/%s -x -c \"SELECT 1;\"",
			masterConf.Env["POSTGRESQL_USER"], masterConf.Env["POSTGRESQL_PASSWORD"],
			hostAddress, masterConf.Env["POSTGRESQL_DATABASE"])).Execute()
	return err
}
Пример #10
0
// ModifySourceCode will modify source code in the pod of the application
// according to the sed script.
func ModifySourceCode(oc *exutil.CLI, selector labels.Selector, sedScript, file string) error {
	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFunc, 1, 120*time.Second)
	if err != nil {
		return err
	}
	if len(pods) != 1 {
		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
	}

	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
	if err != nil {
		return err
	}
	return oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "sed", "-ie", sedScript, file).Execute()
}
Пример #11
0
// RunInPodContainer will run provided command in the specified pod container.
func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error {
	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
	if err != nil {
		return err
	}
	if len(pods) != 1 {
		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
	}

	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
	if err != nil {
		return err
	}
	args := []string{pod.Name, "-c", pod.Spec.Containers[0].Name, "--"}
	args = append(args, cmd...)
	return oc.Run("exec").Args(args...).Execute()
}
Пример #12
0
// IsReady pings the PostgreSQL server.
func (m PostgreSQL) IsReady(oc *util.CLI) (bool, error) {
	conf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return false, err
	}
	out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c",
		"psql postgresql://[email protected] -x -c \"SELECT 1;\"").Output()
	if err != nil {
		switch err.(type) {
		case *util.ExitError, *exec.ExitError:
			return false, nil
		default:
			return false, err
		}
	}
	return strings.Contains(out, "-[ RECORD 1 ]\n?column? | 1"), nil
}
Пример #13
0
// IsReady pings the MySQL server.
func (m MySQL) IsReady(oc *util.CLI) (bool, error) {
	conf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.podName)
	if err != nil {
		return false, err
	}
	out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c",
		"mysqladmin -h 127.0.0.1 -uroot ping").Output()
	if err != nil {
		switch err.(type) {
		case *util.ExitError, *exec.ExitError:
			return false, nil
		default:
			return false, err
		}
	}
	return strings.Contains(out, "mysqld is alive"), nil
}
Пример #14
0
func lookupFSGroup(oc *exutil.CLI, project string) (int, error) {
	gidRange, err := oc.Run("get").Args("project", project,
		"--template='{{ index .metadata.annotations \"openshift.io/sa.scc.supplemental-groups\" }}'").Output()
	if err != nil {
		return 0, err
	}

	// gidRange will be something like: 1000030000/10000
	fsGroupStr := strings.Split(gidRange, "/")[0]
	fsGroupStr = strings.Replace(fsGroupStr, "'", "", -1)

	fsGroup, err := strconv.Atoi(fsGroupStr)
	if err != nil {
		return 0, err
	}

	return fsGroup, nil
}
Пример #15
0
// NewRef creates a jenkins reference from an OC client
func NewRef(oc *exutil.CLI) *JenkinsRef {
	g.By("get ip and port for jenkins service")
	serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())
	port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By("get admin password")
	password := GetAdminPassword(oc)
	o.Expect(password).ShouldNot(o.BeEmpty())

	j := &JenkinsRef{
		oc:        oc,
		host:      serviceIP,
		port:      port,
		namespace: oc.Namespace(),
		password:  password,
	}
	return j
}
Пример #16
0
// Stands up a simple pod which can be used for exec commands
func initExecPod(oc *exutil.CLI) *kapi.Pod {
	// Create a running pod in which we can execute our commands
	oc.Run("run").Args("centos", "--image", "centos:7", "--command", "--", "sleep", "1800").Execute()

	var targetPod *kapi.Pod
	err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{})
		o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
		for _, p := range pods.Items {
			if strings.HasPrefix(p.Name, "centos") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" {
				targetPod = &p
				return true, nil
			}
		}
		return false, nil
	})
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())

	return targetPod
}
Пример #17
0
func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]string, resourceName string, kind string) {
	g.By("Idling the service")
	_, err := oc.Run("idle").Args("--resource-names-file", idlingFile).Output()
	o.Expect(err).ToNot(o.HaveOccurred())

	g.By("Ensuring the scale is zero (and stays zero)")
	objName := resources[resourceName][0]
	// make sure we don't get woken up by an incorrect router health check or anything like that
	o.Consistently(func() (string, error) {
		return oc.Run("get").Args(resourceName+"/"+objName, "--output=jsonpath=\"{.spec.replicas}\"").Output()
	}, 20*time.Second, 500*time.Millisecond).Should(o.ContainSubstring("0"))

	g.By("Fetching the service and checking the annotations are present")
	serviceName := resources["service"][0]
	endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
	o.Expect(err).NotTo(o.HaveOccurred())

	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation))
	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.UnidleTargetAnnotation))

	g.By("Checking the idled-at time")
	idledAtAnnotation := endpoints.Annotations[unidlingapi.IdledAtAnnotation]
	idledAtTime, err := time.Parse(time.RFC3339, idledAtAnnotation)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(idledAtTime).To(o.BeTemporally("~", time.Now(), 5*time.Minute))

	g.By("Checking the idle targets")
	unidleTargetAnnotation := endpoints.Annotations[unidlingapi.UnidleTargetAnnotation]
	unidleTargets := []unidlingapi.RecordedScaleReference{}
	err = json.Unmarshal([]byte(unidleTargetAnnotation), &unidleTargets)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(unidleTargets).To(o.Equal([]unidlingapi.RecordedScaleReference{
		{
			Replicas: 2,
			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
				Name: resources[resourceName][0],
				Kind: kind,
			},
		},
	}))
}
Пример #18
0
func failureTrap(oc *exutil.CLI, name string, failed bool) {
	if !failed {
		return
	}
	out, err := oc.Run("get").Args("dc/"+name, "-o", "yaml").Output()
	if err != nil {
		e2e.Logf("Error getting Deployment Config %s: %v", name, err)
		return
	}
	e2e.Logf("\n%s\n", out)
	_, rcs, pods, err := deploymentInfo(oc, name)
	if err != nil {
		e2e.Logf("Error getting deployment %s info: %v", name, err)
		return
	}
	for _, r := range rcs {
		out, err := oc.Run("get").Args("rc/"+r.Name, "-o", "yaml").Output()
		if err != nil {
			e2e.Logf("Error getting replication controller %s info: %v", r.Name, err)
			return
		}
		e2e.Logf("\n%s\n", out)
	}
	p, _ := deploymentPods(pods)
	for _, v := range p {
		for _, pod := range v {
			out, err := oc.Run("get").Args("pod/"+pod.Name, "-o", "yaml").Output()
			if err != nil {
				e2e.Logf("Error getting pod %s: %v", pod.Name, err)
				return
			}
			e2e.Logf("\n%s\n", out)
			out, _ = oc.Run("logs").Args("pod/" + pod.Name).Output()
			e2e.Logf("--- pod %s logs\n%s---\n", pod.Name, out)
		}
	}
}
Пример #19
0
// buildAndPushImage tries to build an image. The image is stored as an image stream tag <name>:<tag>. If
// shouldBeDenied is true, a build will be expected to fail with a denied error.
func buildAndPushImage(oc *exutil.CLI, namespace, name, tag string, shouldBeDenied bool) {
	istName := name
	if tag != "" {
		istName += ":" + tag
	}
	g.By(fmt.Sprintf("building an image %q", istName))

	bc, err := oc.REST().BuildConfigs(namespace).Get(name)
	if err == nil {
		g.By(fmt.Sprintf("changing build config %s to store result into %s", name, istName))
		o.Expect(bc.Spec.BuildSpec.Output.To.Kind).To(o.Equal("ImageStreamTag"))
		bc.Spec.BuildSpec.Output.To.Name = istName
		_, err := oc.REST().BuildConfigs(namespace).Update(bc)
		o.Expect(err).NotTo(o.HaveOccurred())
	} else {
		g.By(fmt.Sprintf("creating a new build config %s with output to %s ", name, istName))
		err = oc.Run("new-build").Args(
			"--binary",
			"--name", name,
			"--to", istName).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
	}

	tempDir, err := ioutil.TempDir("", "name-build")
	o.Expect(err).NotTo(o.HaveOccurred())

	err = createRandomBlob(path.Join(tempDir, "data"), imageSize)
	o.Expect(err).NotTo(o.HaveOccurred())
	err = ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte("FROM scratch\nCOPY data /data\n"), 0644)
	o.Expect(err).NotTo(o.HaveOccurred())

	err = oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Execute()
	if shouldBeDenied {
		o.Expect(err).To(o.HaveOccurred())
		out, err := oc.Run("logs").Args("bc/" + name).Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(out).Should(o.MatchRegexp("(?i)Failed to push image:.*denied"))
	} else {
		o.Expect(err).NotTo(o.HaveOccurred())
	}
}
Пример #20
0
func replicationTestFactory(oc *exutil.CLI, template string) func() {
	return func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer cleanup(oc)

		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", "templates", true)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", template).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
		o.Expect(err).NotTo(o.HaveOccurred())

		tableCounter := 0
		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
			tableCounter++
			table := fmt.Sprintf("table_%0.2d", tableCounter)

			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())

			// Test if we can query as root
			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
			err := helper.TestRemoteLogin(oc, "mysql-master")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Create a new table with random name
			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Write new data to the table through master
			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data is present on master
			err = exutil.WaitForQueryOutput(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data was replicated to all slaves
			for _, slave := range slaves {
				err = exutil.WaitForQueryOutput(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
				o.Expect(err).NotTo(o.HaveOccurred())
			}

			return master, slaves, helper
		}

		g.By("after initial deployment")
		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)

		g.By("after master is restarted by changing the Deployment Config")
		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after master is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		g.By("after slave is scaled to 0 and then back to 4 replicas")
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
	}
}
Пример #21
0
func doGetFlowsForNode(oc *testexutil.CLI, nodeName string) ([]string, error) {
	pod := &kapi.Pod{
		TypeMeta: kapiunversioned.TypeMeta{
			Kind: "Pod",
		},
		ObjectMeta: kapi.ObjectMeta{
			GenerateName: "flow-check",
		},
		Spec: kapi.PodSpec{
			Containers: []kapi.Container{
				{
					Name:  "flow-check",
					Image: "openshift/openvswitch",
					// kubernetes seems to get confused sometimes if the pod exits too quickly
					Command: []string{"sh", "-c", "ovs-ofctl -O OpenFlow13 dump-flows br0 && sleep 1"},
					VolumeMounts: []kapi.VolumeMount{
						{
							Name:      "ovs-socket",
							MountPath: "/var/run/openvswitch/br0.mgmt",
						},
					},
				},
			},
			Volumes: []kapi.Volume{
				{
					Name: "ovs-socket",
					VolumeSource: kapi.VolumeSource{
						HostPath: &kapi.HostPathVolumeSource{
							Path: "/var/run/openvswitch/br0.mgmt",
						},
					},
				},
			},
			NodeName:      nodeName,
			RestartPolicy: kapi.RestartPolicyNever,
			// We don't actually need HostNetwork, we just set it so that deploying this pod won't cause any OVS flows to be added
			SecurityContext: &kapi.PodSecurityContext{
				HostNetwork: true,
			},
		},
	}
	f := oc.KubeFramework()
	podClient := f.Client.Pods(f.Namespace.Name)
	pod, err := podClient.Create(pod)
	if err != nil {
		return nil, err
	}
	defer podClient.Delete(pod.Name, nil)
	err = waitForPodSuccessInNamespace(f.Client, pod.Name, "flow-check", f.Namespace.Name)
	if err != nil {
		return nil, err
	}
	logs, err := oc.Run("logs").Args(pod.Name).Output()
	if err != nil {
		return nil, err
	}

	// For ease of comparison, strip out the parts of the rules that change
	flows := strings.Split(logs, "\n")
	strip_re := regexp.MustCompile(`(duration|n_packets|n_bytes)=[^,]*, `)
	for i := range flows {
		flows[i] = strip_re.ReplaceAllLiteralString(flows[i], "")
	}
	return flows, nil
}
Пример #22
0
func followDCLogs(oc *exutil.CLI, jenkinsNamespace string) {
	oc.SetNamespace(jenkinsNamespace)
	oc.Run("logs").Args("-f", "dc/jenkins").Execute()

}
Пример #23
0
func isolateProjects(oc *exutil.CLI, f *e2e.Framework) error {
	return oc.Run("adm").Args("pod-network", "isolate-projects", f.Namespace.Name).Execute()
}
Пример #24
0
func joinProjects(oc *exutil.CLI, f1, f2 *e2e.Framework) error {
	return oc.Run("adm").Args("pod-network", "join-projects", fmt.Sprintf("--to=%s", f1.Namespace.Name), f2.Namespace.Name).Execute()
}
Пример #25
0
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
	return func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer cleanup(oc)

		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
		o.Expect(err).NotTo(o.HaveOccurred())

		exutil.CheckOpenShiftNamespaceImageStreams(oc)
		err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
		g.By("waiting for the deployment to complete")
		err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("waiting for an endpoint")
		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
		o.Expect(err).NotTo(o.HaveOccurred())

		tableCounter := 0
		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
			tableCounter++
			table := fmt.Sprintf("table_%0.2d", tableCounter)

			g.By("creating replication helpers")
			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())

			// Test if we can query as root
			g.By("wait for mysql-master endpoint")
			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
			err := helper.TestRemoteLogin(oc, "mysql-master")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Create a new table with random name
			g.By("create new table")
			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Write new data to the table through master
			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data is present on master
			err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data was replicated to all slaves
			for _, slave := range slaves {
				err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
				o.Expect(err).NotTo(o.HaveOccurred())
			}

			return master, slaves, helper
		}

		g.By("after initial deployment")
		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)

		if tc.SkipReplication {
			return
		}

		g.By("after master is restarted by changing the Deployment Config")
		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after master is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		// NOTE: Commented out, current template does not support multiple replicas.
		/*
			g.By("after slave is scaled to 0 and then back to 4 replicas")
			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
		*/
	}
}
Пример #26
0
// BuildAndPushImageOfSizeWithBuilder tries to build an image of wanted size and number of layers. Built image
// is stored as an image stream tag <name>:<tag>. If shouldSucceed is false, a build is expected to fail with
// a denied error. Note the size is only approximate. Resulting image size will be different depending on used
// compression algorithm and metadata overhead.
func BuildAndPushImageOfSizeWithBuilder(
	oc *exutil.CLI,
	dClient *dockerclient.Client,
	namespace, name, tag string,
	size uint64,
	numberOfLayers int,
	shouldSucceed bool,
) error {
	istName := name
	if tag != "" {
		istName += ":" + tag
	}

	bc, err := oc.REST().BuildConfigs(namespace).Get(name)
	if err == nil {
		if bc.Spec.CommonSpec.Output.To.Kind != "ImageStreamTag" {
			return fmt.Errorf("Unexpected kind of buildspec's output (%s != %s)", bc.Spec.CommonSpec.Output.To.Kind, "ImageStreamTag")
		}
		bc.Spec.CommonSpec.Output.To.Name = istName
		if _, err = oc.REST().BuildConfigs(namespace).Update(bc); err != nil {
			return err
		}
	} else {
		err = oc.Run("new-build").Args("--binary", "--name", name, "--to", istName).Execute()
		if err != nil {
			return err
		}
	}

	tempDir, err := ioutil.TempDir("", "name-build")
	if err != nil {
		return err
	}

	dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers)

	lines := make([]string, numberOfLayers+1)
	lines[0] = "FROM scratch"
	for i := 1; i <= numberOfLayers; i++ {
		blobName := fmt.Sprintf("data%d", i)
		if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil {
			return err
		}
		lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName)
	}
	if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil {
		return err
	}

	out, err := oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Output()
	fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out)

	buildLog, logsErr := oc.Run("logs").Args("bc/" + name).Output()
	if match := reSuccessfulBuild.FindStringSubmatch(buildLog); len(match) > 1 {
		defer dClient.RemoveImageExtended(match[1], dockerclient.RemoveImageOptions{Force: true})
	}

	if shouldSucceed && err != nil {
		return fmt.Errorf("Got unexpected build error: %v", err)
	}

	if !shouldSucceed {
		if err == nil {
			return fmt.Errorf("Build unexpectedly succeeded")
		}
		if logsErr != nil {
			return fmt.Errorf("Failed to show log of build config %s: %v", name, err)
		}
		if !reExpectedDeniedError.MatchString(buildLog) {
			return fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), buildLog)
		}
	}

	return nil
}
Пример #27
0
func makeProjectsGlobal(oc *exutil.CLI, f *e2e.Framework) error {
	return oc.Run("adm").Args("pod-network", "make-projects-global", f.Namespace.Name).Execute()
}
Пример #28
0
// Loads a Jenkins related template using new-app.
func loadFixture(oc *exutil.CLI, filename string) {
	resourcePath := exutil.FixturePath("testdata", "jenkins-plugin", filename)
	err := oc.Run("new-app").Args(resourcePath).Execute()
	o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
}
Пример #29
0
// BuildAndPushImageOfSizeWithDocker tries to build an image of wanted size and number of layers. It instructs
// Docker daemon directly. Built image is stored as an image stream tag <name>:<tag>. If shouldSucceed is
// false, a push is expected to fail with a denied error. Note the size is only approximate. Resulting image
// size will be different depending on used compression algorithm and metadata overhead.
func BuildAndPushImageOfSizeWithDocker(
	oc *exutil.CLI,
	dClient *dockerclient.Client,
	name, tag string,
	size uint64,
	numberOfLayers int,
	outSink io.Writer,
	shouldSucceed bool,
) (imageDigest string, err error) {
	registryURL, err := GetDockerRegistryURL(oc)
	if err != nil {
		return "", err
	}
	tempDir, err := ioutil.TempDir("", "name-build")
	if err != nil {
		return "", err
	}

	dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers)

	lines := make([]string, numberOfLayers+1)
	lines[0] = "FROM scratch"
	for i := 1; i <= numberOfLayers; i++ {
		blobName := fmt.Sprintf("data%d", i)
		if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil {
			return "", err
		}
		lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName)
	}
	if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil {
		return "", err
	}

	imageName := fmt.Sprintf("%s/%s/%s", registryURL, oc.Namespace(), name)
	taggedName := fmt.Sprintf("%s:%s", imageName, tag)

	err = dClient.BuildImage(dockerclient.BuildImageOptions{
		Name:                taggedName,
		RmTmpContainer:      true,
		ForceRmTmpContainer: true,
		ContextDir:          tempDir,
		OutputStream:        outSink,
	})
	if err != nil {
		return "", fmt.Errorf("failed to build %q image: %v", taggedName, err)
	}

	image, err := dClient.InspectImage(taggedName)
	if err != nil {
		return "", err
	}

	defer dClient.RemoveImageExtended(image.ID, dockerclient.RemoveImageOptions{Force: true})
	digest := ""
	if len(image.RepoDigests) == 1 {
		digest = image.RepoDigests[0]
	}

	out, err := oc.Run("whoami").Args("-t").Output()
	if err != nil {
		return "", err
	}
	token := strings.TrimSpace(out)

	var buf bytes.Buffer
	err = dClient.PushImage(dockerclient.PushImageOptions{
		Name:         imageName,
		Tag:          tag,
		Registry:     registryURL,
		OutputStream: &buf,
	}, dockerclient.AuthConfiguration{
		Username:      "******",
		Password:      token,
		Email:         "*****@*****.**",
		ServerAddress: registryURL,
	})
	out = buf.String()
	outSink.Write([]byte(out))

	if shouldSucceed {
		if err != nil {
			return "", fmt.Errorf("Got unexpected push error: %v", err)
		}
		if len(digest) == 0 {
			outSink.Write([]byte("matching digest string\n"))
			match := rePushedImageDigest.FindStringSubmatch(out)
			if len(match) < 2 {
				return "", fmt.Errorf("Failed to parse digest")
			}
			digest = match[1]
		}
		return digest, nil
	}

	if err == nil {
		return "", fmt.Errorf("Push unexpectedly succeeded")
	}
	if !reExpectedDeniedError.MatchString(err.Error()) {
		return "", fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), err.Error())
	}

	return "", nil
}
Пример #30
0
func testPruneImages(oc *exutil.CLI, schemaVersion int) {
	var mediaType string
	switch schemaVersion {
	case 1:
		mediaType = schema1.MediaTypeManifest
	case 2:
		mediaType = schema2.MediaTypeManifest
	default:
		g.Fail(fmt.Sprintf("unexpected schema version %d", schemaVersion))
	}

	oc.SetOutputDir(exutil.TestContext.OutputDir)
	outSink := g.GinkgoWriter

	cleanUp := cleanUpContainer{}
	defer tearDownPruneImagesTest(oc, &cleanUp)

	dClient, err := testutil.NewDockerClient()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion))
	imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName)
	pruneSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName)
	keepSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(pruneSize < keepSize).To(o.BeTrue())

	g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion))
	imgPrune, err := oc.AsAdmin().REST().Images().Get(imgPruneName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType))
	imgKeep, err := oc.AsAdmin().REST().Images().Get(imgKeepName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType))

	g.By("prune the first image uploaded (dry-run)")
	output, err := oc.Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	noConfirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(noConfirmSize).To(o.Equal(keepSize))

	g.By("prune the first image uploaded (confirm)")
	output, err = oc.Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0", "--confirm").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	confirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize))
	o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue())
	o.Expect(confirmSize < keepSize).To(o.BeTrue())
	g.By(fmt.Sprintf("confirming pruned size: sizeOfPruneImage=%d <= (sizeAfterPrune=%d - sizeBeforePrune=%d)", imgPrune, keepSize, confirmSize))
	o.Expect(imgPrune.DockerImageMetadata.Size <= keepSize-confirmSize).To(o.BeTrue())
}