// buildAndPushTestImagesTo builds a given number of test images. The images are pushed to a new image stream
// of given name under <tagPrefix><X> where X is a number of image starting from 1.
func buildAndPushTestImagesTo(oc *exutil.CLI, isName string, tagPrefix string, numberOfImages int) (tag2Image map[string]imageapi.Image, err error) {
	dClient, err := testutil.NewDockerClient()
	if err != nil {
		return
	}
	tag2Image = make(map[string]imageapi.Image)

	for i := 1; i <= numberOfImages; i++ {
		tag := fmt.Sprintf("%s%d", tagPrefix, i)
		dgst, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, tag, imageSize, 2, g.GinkgoWriter, true)
		if err != nil {
			return nil, err
		}
		ist, err := oc.Client().ImageStreamTags(oc.Namespace()).Get(isName, tag)
		if err != nil {
			return nil, err
		}
		if dgst != ist.Image.Name {
			return nil, fmt.Errorf("digest of built image does not match stored: %s != %s", dgst, ist.Image.Name)
		}
		tag2Image[tag] = ist.Image
	}

	return
}
Ejemplo n.º 2
0
// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or
// not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly
// and the function blocks until the registry is re-deployed and ready for new requests.
func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error {
	ns := oc.Namespace()
	oc = oc.SetNamespace(kapi.NamespaceDefault).AsAdmin()
	defer oc.SetNamespace(ns)
	env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output()
	if err != nil {
		return err
	}

	value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept)
	if strings.Contains(env, value) {
		if accept {
			g.By("docker-registry is already configured to accept schema 2")
		} else {
			g.By("docker-registry is already configured to refuse schema 2")
		}
		return nil
	}

	dc, err := oc.Client().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
	if err != nil {
		return err
	}
	waitForVersion := dc.Status.LatestVersion + 1

	g.By("configuring Docker registry to accept schema 2")
	err = oc.Run("env").Args("dc/docker-registry", value).Execute()
	if err != nil {
		return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err)
	}
	return exutil.WaitForRegistry(oc.AdminClient(), oc.AdminKubeClient(), &waitForVersion, oc)
}
Ejemplo n.º 3
0
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) {

	buildList, err := oc.Client().Builds(oc.Namespace()).List(kapi.ListOptions{})
	o.Expect(err).NotTo(o.HaveOccurred())

	// Ensure that expected start-build environment variables were injected
	for _, build := range buildList.Items {
		ginkgolog("Found build: %q", build.GetName())
		if strings.HasPrefix(build.GetName(), buildPrefix) {
			envs := []kapi.EnvVar{}
			if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil {
				envs = build.Spec.Strategy.DockerStrategy.Env
			} else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil {
				envs = build.Spec.Strategy.SourceStrategy.Env
			} else {
				continue
			}

			for k, v := range varsToFind {
				found := false
				for _, env := range envs {
					ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName())
					if k == env.Name && v == env.Value {
						found = true
						break
					}
				}
				o.ExpectWithOffset(1, found).To(o.BeTrue())
			}
		}
	}
}
Ejemplo n.º 4
0
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
	return func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer cleanup(oc)

		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
		o.Expect(err).NotTo(o.HaveOccurred())

		err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
		o.Expect(err).NotTo(o.HaveOccurred())

		exutil.CheckOpenShiftNamespaceImageStreams(oc)
		err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())

		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
		g.By("waiting for the deployment to complete")
		err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By("waiting for an endpoint")
		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
		o.Expect(err).NotTo(o.HaveOccurred())

		tableCounter := 0
		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
			tableCounter++
			table := fmt.Sprintf("table_%0.2d", tableCounter)

			g.By("creating replication helpers")
			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())

			// Test if we can query as root
			g.By("wait for mysql-master endpoint")
			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
			err := helper.TestRemoteLogin(oc, "mysql-master")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Create a new table with random name
			g.By("create new table")
			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Write new data to the table through master
			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data is present on master
			err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
			o.Expect(err).NotTo(o.HaveOccurred())

			// Make sure data was replicated to all slaves
			for _, slave := range slaves {
				err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
				o.Expect(err).NotTo(o.HaveOccurred())
			}

			return master, slaves, helper
		}

		g.By("after initial deployment")
		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)

		if tc.SkipReplication {
			return
		}

		g.By("after master is restarted by changing the Deployment Config")
		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after master is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		g.By("after slave is restarted by deleting the pod")
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		// NOTE: Commented out, current template does not support multiple replicas.
		/*
			g.By("after slave is scaled to 0 and then back to 4 replicas")
			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
		*/
	}
}
Ejemplo n.º 5
0
// BuildAndPushImageOfSizeWithBuilder tries to build an image of wanted size and number of layers. Built image
// is stored as an image stream tag <name>:<tag>. If shouldSucceed is false, a build is expected to fail with
// a denied error. Note the size is only approximate. Resulting image size will be different depending on used
// compression algorithm and metadata overhead.
func BuildAndPushImageOfSizeWithBuilder(
	oc *exutil.CLI,
	dClient *dockerclient.Client,
	namespace, name, tag string,
	size uint64,
	numberOfLayers int,
	shouldSucceed bool,
) error {
	istName := name
	if tag != "" {
		istName += ":" + tag
	}

	bc, err := oc.Client().BuildConfigs(namespace).Get(name)
	if err == nil {
		if bc.Spec.CommonSpec.Output.To.Kind != "ImageStreamTag" {
			return fmt.Errorf("Unexpected kind of buildspec's output (%s != %s)", bc.Spec.CommonSpec.Output.To.Kind, "ImageStreamTag")
		}
		bc.Spec.CommonSpec.Output.To.Name = istName
		if _, err = oc.Client().BuildConfigs(namespace).Update(bc); err != nil {
			return err
		}
	} else {
		err = oc.Run("new-build").Args("--binary", "--name", name, "--to", istName).Execute()
		if err != nil {
			return err
		}
	}

	tempDir, err := ioutil.TempDir("", "name-build")
	if err != nil {
		return err
	}

	dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers)

	lines := make([]string, numberOfLayers+1)
	lines[0] = "FROM scratch"
	for i := 1; i <= numberOfLayers; i++ {
		blobName := fmt.Sprintf("data%d", i)
		if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil {
			return err
		}
		lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName)
	}
	if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil {
		return err
	}

	br, _ := exutil.StartBuildAndWait(oc, name, "--from-dir", tempDir)
	if shouldSucceed {
		br.AssertSuccess()
	} else {
		br.AssertFailure()
	}
	buildLog, logsErr := br.Logs()

	if match := reSuccessfulBuild.FindStringSubmatch(buildLog); len(match) > 1 {
		defer dClient.RemoveImageExtended(match[1], dockerclient.RemoveImageOptions{Force: true})
	}

	if !shouldSucceed {
		if logsErr != nil {
			return fmt.Errorf("Failed to show log of build config %s: %v", name, err)
		}
		if !reExpectedDeniedError.MatchString(buildLog) {
			return fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), buildLog)
		}
	}

	return nil
}