Ejemplo n.º 1
0
// SetupProject creates a new project and assign a random user to the project.
// All resources will be then created within this project and Kubernetes E2E
// suite will destroy the project after test case finish.
func (c *CLI) SetupProject(name string, kubeClient *kclient.Client, _ map[string]string) (*kapi.Namespace, error) {
	newNamespace := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("extended-test-%s-", name))
	c.SetNamespace(newNamespace).ChangeUser(fmt.Sprintf("%s-user", c.Namespace()))
	e2e.Logf("The user is now %q", c.Username())

	e2e.Logf("Creating project %q", c.Namespace())
	_, err := c.REST().ProjectRequests().Create(&projectapi.ProjectRequest{
		ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()},
	})
	if err != nil {
		e2e.Logf("Failed to create a project and namespace %q: %v", c.Namespace(), err)
		return nil, err
	}
	if err := wait.ExponentialBackoff(kclient.DefaultBackoff, func() (bool, error) {
		if _, err := c.KubeREST().Pods(c.Namespace()).List(kapi.ListOptions{}); err != nil {
			if apierrs.IsForbidden(err) {
				e2e.Logf("Waiting for user to have access to the namespace")
				return false, nil
			}
		}
		return true, nil
	}); err != nil {
		return nil, err
	}
	return &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()}}, err
}
Ejemplo n.º 2
0
// SetupProject creates a new project and assign a random user to the project.
// All resources will be then created within this project and Kubernetes E2E
// suite will destroy the project after test case finish.
// Note that the kubeClient is not used and serves just to make this function
// compatible with upstream function.
func (c *CLI) SetupProject(name string, kubeClient *kclient.Client) (*kapi.Namespace, error) {
	newNamespace := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("extended-test-%s-", name))
	c.SetNamespace(newNamespace).ChangeUser(fmt.Sprintf("%s-user", c.Namespace()))
	e2e.Logf("The user is now %q", c.Username())

	projectOpts := cmdapi.NewProjectOptions{
		ProjectName: c.Namespace(),
		Client:      c.REST(),
		Out:         c.stdout,
	}
	e2e.Logf("Creating project %q", c.Namespace())
	return c.kubeFramework.Namespace, projectOpts.Run()
}
Ejemplo n.º 3
0
func isNodeReadySetAsExpected(node *api.Node, wantReady bool) bool {
	// Check the node readiness condition (logging all).
	for i, cond := range node.Status.Conditions {
		e2e.Logf("Node %s condition %d/%d: type: %v, status: %v, reason: %q, message: %q, last transition time: %v",
			node.Name, i+1, len(node.Status.Conditions), cond.Type, cond.Status,
			cond.Reason, cond.Message, cond.LastTransitionTime)
		// Ensure that the condition type is readiness and the status
		// matches as desired.
		if cond.Type == api.NodeReady && (cond.Status == api.ConditionTrue) == wantReady {
			e2e.Logf("Successfully found node %s readiness to be %t", node.Name, wantReady)
			return true
		}
	}
	return false
}
Ejemplo n.º 4
0
// SetupProject creates a new project and assign a random user to the project.
// All resources will be then created within this project and Kubernetes E2E
// suite will destroy the project after test case finish.
func (c *CLI) SetupProject(name string, kubeClient *kclient.Client, _ map[string]string) (*kapi.Namespace, error) {
	newNamespace := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("extended-test-%s-", name))
	c.SetNamespace(newNamespace).ChangeUser(fmt.Sprintf("%s-user", c.Namespace()))
	e2e.Logf("The user is now %q", c.Username())

	e2e.Logf("Creating project %q", c.Namespace())
	_, err := c.REST().ProjectRequests().Create(&projectapi.ProjectRequest{
		ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()},
	})
	if err != nil {
		e2e.Logf("Failed to create a project and namespace %q: %v", c.Namespace(), err)
		return nil, err
	}
	return &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()}}, err
}
Ejemplo n.º 5
0
func validateDNSResults(f *e2e.Framework, pod *api.Pod, fileNames sets.String, expect int) {
	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		e2e.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	Expect(f.WaitForPodRunning(pod.Name)).To(BeNil())
	Expect(wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
		pod, err := podClient.Get(pod.Name)
		if err != nil {
			return false, err
		}
		switch pod.Status.Phase {
		case api.PodSucceeded:
			return true, nil
		case api.PodFailed:
			return false, fmt.Errorf("pod failed")
		default:
			return false, nil
		}
	})).To(BeNil())

	By("retrieving the pod logs")
	r, err := podClient.GetLogs(pod.Name, &api.PodLogOptions{Container: "querier"}).Stream()
	if err != nil {
		e2e.Failf("Failed to get pod logs %s: %v", pod.Name, err)
	}
	out, err := ioutil.ReadAll(r)
	if err != nil {
		e2e.Failf("Failed to read pod logs %s: %v", pod.Name, err)
	}

	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")

	if err := assertLinesExist(fileNames, expect, bytes.NewBuffer(out)); err != nil {
		e2e.Logf("Got results from pod:\n%s", out)
		e2e.Failf("Unexpected results: %v", err)
	}

	e2e.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Ejemplo n.º 6
0
func deploymentReachedCompletion(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error) {
	if len(rcs) == 0 {
		return false, nil
	}
	rc := rcs[len(rcs)-1]
	version := deployutil.DeploymentVersionFor(&rc)
	if version != dc.Status.LatestVersion {
		return false, nil
	}

	status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
	if deployapi.DeploymentStatus(status) != deployapi.DeploymentStatusComplete {
		return false, nil
	}
	expectedReplicas := dc.Spec.Replicas
	if dc.Spec.Test {
		expectedReplicas = 0
	}
	if rc.Spec.Replicas != expectedReplicas {
		return false, fmt.Errorf("deployment is complete but doesn't have expected spec replicas: %d %d", rc.Spec.Replicas, expectedReplicas)
	}
	if rc.Status.Replicas != expectedReplicas {
		e2e.Logf("POSSIBLE_ANOMALY: deployment is complete but doesn't have expected status replicas: %d %d", rc.Status.Replicas, expectedReplicas)
		return false, nil
	}
	return true, nil
}
Ejemplo n.º 7
0
// ChangeUser changes the user used by the current CLI session.
func (c *CLI) ChangeUser(name string) *CLI {
	adminClientConfig, err := testutil.GetClusterAdminClientConfig(c.adminConfigPath)
	if err != nil {
		FatalErr(err)
	}
	_, _, clientConfig, err := testutil.GetClientForUser(*adminClientConfig, name)
	if err != nil {
		FatalErr(err)
	}

	kubeConfig, err := config.CreateConfig(c.Namespace(), clientConfig)
	if err != nil {
		FatalErr(err)
	}

	c.configPath = filepath.Join(c.outputDir, name+".kubeconfig")
	err = clientcmd.WriteToFile(*kubeConfig, c.configPath)
	if err != nil {
		FatalErr(err)
	}

	c.username = name
	e2e.Logf("configPath is now %q", c.configPath)
	return c
}
Ejemplo n.º 8
0
// createTestingNS delegates to custom namespace creation functions if registered.
// otherwise, it ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs
func createTestingNS(baseName string, c *kclient.Client, labels map[string]string) (*kapi.Namespace, error) {
	// If a custom function exists, call it
	if fn, exists := customCreateTestingNSFuncs[baseName]; exists {
		return fn(baseName, c, labels)
	}

	// Otherwise use the upstream default
	ns, err := e2e.CreateTestingNS(baseName, c, labels)
	if err != nil {
		return ns, err
	}

	// Add anyuid and privileged permissions for upstream tests
	if isKubernetesE2ETest() {
		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		// add to the "privileged" scc to ensure pods that explicitly
		// request extra capabilities are not rejected
		addE2EServiceAccountsToSCC(c, []kapi.Namespace{*ns}, "privileged")
		// add to the "anyuid" scc to ensure pods that don't specify a
		// uid don't get forced into a range (mimics upstream
		// behavior)
		addE2EServiceAccountsToSCC(c, []kapi.Namespace{*ns}, "anyuid")

		// The intra-pod test requires that the service account have
		// permission to retrieve service endpoints.
		osClient, _, err := configapi.GetOpenShiftClient(KubeConfigPath())
		if err != nil {
			return ns, err
		}
		addRoleToE2EServiceAccounts(osClient, []kapi.Namespace{*ns}, bootstrappolicy.ViewRoleName)
	}

	return ns, err
}
Ejemplo n.º 9
0
func launchWebserverPod(f *e2e.Framework, podName string, nodeName string) (ip string) {
	containerName := fmt.Sprintf("%s-container", podName)
	port := 8080
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: "gcr.io/google_containers/porter:59ad46ed2c56ba50fa7f1dc176c07c37",
					Env:   []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
					Ports: []api.ContainerPort{{ContainerPort: port}},
				},
			},
			NodeName:      nodeName,
			RestartPolicy: api.RestartPolicyNever,
		},
	}
	podClient := f.Client.Pods(f.Namespace.Name)
	_, err := podClient.Create(pod)
	expectNoError(err)
	expectNoError(f.WaitForPodRunning(podName))
	createdPod, err := podClient.Get(podName)
	expectNoError(err)
	ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
	e2e.Logf("Target pod IP:port is %s", ip)
	return
}
Ejemplo n.º 10
0
Archivo: test.go Proyecto: richm/origin
// verifyTestSuitePreconditions ensures that all namespaces prefixed with 'e2e-' have their
// service accounts in the privileged and anyuid SCCs, and that Origin/Kubernetes synthetic
// skip labels are applied
func verifyTestSuitePreconditions() {
	desc := ginkgo.CurrentGinkgoTestDescription()

	switch {
	case strings.Contains(desc.FileName, "/origin/test/"):
		if strings.Contains(config.GinkgoConfig.SkipString, "[Origin]") {
			ginkgo.Skip("skipping [Origin] tests")
		}

	case strings.Contains(desc.FileName, "/kubernetes/test/e2e/"):
		if strings.Contains(config.GinkgoConfig.SkipString, "[Kubernetes]") {
			ginkgo.Skip("skipping [Kubernetes] tests")
		}

		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		c, _, err := configapi.GetKubeClient(KubeConfigPath())
		if err != nil {
			FatalErr(err)
		}
		namespaces, err := c.Namespaces().List(kapi.ListOptions{})
		if err != nil {
			FatalErr(err)
		}
		// add to the "privileged" scc to ensure pods that explicitly
		// request extra capabilities are not rejected
		addE2EServiceAccountsToSCC(c, namespaces, "privileged")
		// add to the "anyuid" scc to ensure pods that don't specify a
		// uid don't get forced into a range (mimics upstream
		// behavior)
		addE2EServiceAccountsToSCC(c, namespaces, "anyuid")
	}
}
Ejemplo n.º 11
0
func getMultipleNodes(f *e2e.Framework) (nodes *api.NodeList) {
	nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		e2e.Failf("Failed to list nodes: %v", err)
	}
	// previous tests may have cause failures of some nodes. Let's skip
	// 'Not Ready' nodes, just in case (there is no need to fail the test).
	filterNodes(nodes, func(node api.Node) bool {
		return isNodeReadySetAsExpected(&node, true)
	})

	if len(nodes.Items) == 0 {
		e2e.Failf("No Ready nodes found.")
	}
	if len(nodes.Items) == 1 {
		// in general, the test requires two nodes. But for local development, often a one node cluster
		// is created, for simplicity and speed. (see issue #10012). We permit one-node test
		// only in some cases
		if !providerIs("local") {
			e2e.Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", exutil.TestContext.Provider))
		}
		e2e.Logf("Only one ready node is detected. The test has limited scope in such setting. " +
			"Rerun it with at least two nodes to get complete coverage.")
	}
	return
}
Ejemplo n.º 12
0
// waitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
	return waitForPodCondition(c, namespace, podName, "success or failure", podStartTimeout, func(pod *api.Pod) (bool, error) {
		// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
		ci, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, contName)
		if !ok {
			e2e.Logf("No Status.Info for container '%s' in pod '%s' yet", contName, podName)
		} else {
			if ci.State.Terminated != nil {
				if ci.State.Terminated.ExitCode == 0 {
					By("Saw pod success")
					return true, nil
				}
				return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Terminated)
			}
			e2e.Logf("Nil State.Terminated for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace)
		}
		return false, nil
	})
}
Ejemplo n.º 13
0
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
	e2e.Logf("Waiting up to %[1]v for pod %-[2]*[3]s status to be %[4]s", timeout, podPrintWidth, podName, desc)
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
		pod, err := c.Pods(ns).Get(podName)
		if err != nil {
			// Aligning this text makes it much more readable
			e2e.Logf("Get pod %-[1]*[2]s in namespace '%[3]s' failed, ignoring for %[4]v. Error: %[5]v",
				podPrintWidth, podName, ns, poll, err)
			continue
		}
		done, err := condition(pod)
		if done {
			return err
		}
		e2e.Logf("Waiting for pod %-[1]*[2]s in namespace '%[3]s' status to be '%[4]s'"+
			"(found phase: %[5]q, readiness: %[6]t) (%[7]v elapsed)",
			podPrintWidth, podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
	}
	return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
Ejemplo n.º 14
0
// Output executes the command and return the output as string
func (c *CLI) Output() (string, error) {
	if c.verbose {
		fmt.Printf("DEBUG: oc %s\n", c.printCmd())
	}
	cmd := exec.Command(c.execPath, c.finalArgs...)
	cmd.Stdin = c.stdin
	e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " "))
	out, err := cmd.CombinedOutput()
	trimmed := strings.TrimSpace(string(out))
	switch err.(type) {
	case nil:
		c.stdout = bytes.NewBuffer(out)
		return trimmed, nil
	case *exec.ExitError:
		e2e.Logf("Error running %v:\n%s", cmd, trimmed)
		return trimmed, err
	default:
		FatalErr(fmt.Errorf("unable to execute %q: %v", c.execPath, err))
		// unreachable code
		return "", nil
	}
}
Ejemplo n.º 15
0
// Background executes the command in the background and returns the Cmd object
// returns the Cmd which should be killed later via cmd.Process.Kill(), as well
// as the stdout and stderr byte buffers assigned to the cmd.Stdout and cmd.Stderr
// writers.
func (c *CLI) Background() (*exec.Cmd, *bytes.Buffer, *bytes.Buffer, error) {
	if c.verbose {
		fmt.Printf("DEBUG: oc %s\n", c.printCmd())
	}
	cmd := exec.Command(c.execPath, c.finalArgs...)
	cmd.Stdin = c.stdin
	var stdout, stderr bytes.Buffer
	cmd.Stdout = bufio.NewWriter(&stdout)
	cmd.Stderr = bufio.NewWriter(&stderr)

	e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " "))

	err := cmd.Start()
	return cmd, &stdout, &stderr, err
}
Ejemplo n.º 16
0
Archivo: test.go Proyecto: o8o/origin
// ensureKubeE2EPrivilegedSA ensures that all namespaces prefixed with 'e2e-' have their
// service accounts in the privileged and anyuid SCCs
func ensureKubeE2EPrivilegedSA() {
	desc := ginkgo.CurrentGinkgoTestDescription()
	if strings.Contains(desc.FileName, "/kubernetes/test/e2e/") {
		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		c, _, err := configapi.GetKubeClient(KubeConfigPath())
		if err != nil {
			FatalErr(err)
		}
		namespaces, err := c.Namespaces().List(kapi.ListOptions{})
		if err != nil {
			FatalErr(err)
		}
		// add to the "privileged" scc to ensure pods that explicitly
		// request extra capabilities are not rejected
		addE2EServiceAccountsToSCC(c, namespaces, "privileged")
		// add to the "anyuid" scc to ensure pods that don't specify a
		// uid don't get forced into a range (mimics upstream
		// behavior)
		addE2EServiceAccountsToSCC(c, namespaces, "anyuid")
	}
}
Ejemplo n.º 17
0
func addRoleToE2EServiceAccounts(c *client.Client, namespaces []kapi.Namespace, roleName string) {
	err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		for _, ns := range namespaces {
			if strings.HasPrefix(ns.Name, "e2e-") && ns.Status.Phase != kapi.NamespaceTerminating {
				sa := fmt.Sprintf("system:serviceaccount:%s:default", ns.Name)
				addRole := &policy.RoleModificationOptions{
					RoleNamespace:       "",
					RoleName:            roleName,
					RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(ns.Name, c),
					Users:               []string{sa},
				}
				if err := addRole.AddRole(); err != nil {
					e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
				}
			}
		}
		return nil
	})
	if err != nil {
		FatalErr(err)
	}
}
Ejemplo n.º 18
0
func waitForRouterOKResponse(url, host string, timeout time.Duration) error {
	return wait.Poll(time.Second, timeout, func() (bool, error) {
		req, err := requestViaReverseProxy("GET", url, host)
		if err != nil {
			return false, err
		}
		resp, err := http.DefaultClient.Do(req)
		if err != nil {
			return false, nil
		}
		resp.Body.Close()
		if resp.StatusCode == http.StatusServiceUnavailable {
			// not ready yet
			return false, nil
		}
		if resp.StatusCode != http.StatusOK {
			e2e.Logf("unexpected response: %#v", resp.StatusCode)
			return false, nil
		}
		return true, nil
	})
}
Ejemplo n.º 19
0
func launchWebserverService(f *e2e.Framework, serviceName string, nodeName string) (serviceAddr string) {
	e2e.LaunchWebserverPod(f, serviceName, nodeName)
	// FIXME: make e2e.LaunchWebserverPod() set the label when creating the pod
	podClient := f.Client.Pods(f.Namespace.Name)
	pod, err := podClient.Get(serviceName)
	expectNoError(err)
	pod.ObjectMeta.Labels = make(map[string]string)
	pod.ObjectMeta.Labels["name"] = "web"
	podClient.Update(pod)

	servicePort := 8080
	service := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: serviceName,
		},
		Spec: api.ServiceSpec{
			Type: api.ServiceTypeClusterIP,
			Ports: []api.ServicePort{
				{
					Protocol: api.ProtocolTCP,
					Port:     servicePort,
				},
			},
			Selector: map[string]string{
				"name": "web",
			},
		},
	}
	serviceClient := f.Client.Services(f.Namespace.Name)
	_, err = serviceClient.Create(service)
	expectNoError(err)
	expectNoError(f.WaitForAnEndpoint(serviceName))
	createdService, err := serviceClient.Get(serviceName)
	expectNoError(err)
	serviceAddr = fmt.Sprintf("%s:%d", createdService.Spec.ClusterIP, servicePort)
	e2e.Logf("Target service IP:port is %s", serviceAddr)
	return
}
Ejemplo n.º 20
0
// ensureKubeE2EPrivilegedSA ensures that all namespaces prefixed with 'e2e-' have their
// service accounts in the privileged SCC
func ensureKubeE2EPrivilegedSA() {
	desc := ginkgo.CurrentGinkgoTestDescription()
	if strings.Contains(desc.FileName, "/kubernetes/test/e2e/") {
		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		c, _, err := configapi.GetKubeClient(KubeConfigPath())
		if err != nil {
			FatalErr(err)
		}
		priv, err := c.SecurityContextConstraints().Get("privileged")
		if err != nil {
			if apierrs.IsNotFound(err) {
				return
			}
			FatalErr(err)
		}
		namespaces, err := c.Namespaces().List(labels.Everything(), fields.Everything())
		if err != nil {
			FatalErr(err)
		}
		groups := []string{}
		for _, name := range priv.Groups {
			if !strings.Contains(name, "e2e-") {
				groups = append(groups, name)
			}
		}
		for _, ns := range namespaces.Items {
			if strings.HasPrefix(ns.Name, "e2e-") {
				groups = append(groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
			}
		}
		priv.Groups = groups
		if _, err := c.SecurityContextConstraints().Update(priv); err != nil {
			FatalErr(err)
		}
	}
}
Ejemplo n.º 21
0
	defer g.GinkgoRecover()
	var (
		deploymentFixture       = exutil.FixturePath("..", "extended", "fixtures", "test-deployment-test.yaml")
		simpleDeploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "deployment-simple.yaml")
		oc                      = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
	)

	g.Describe("when run iteratively", func() {
		g.It("should only deploy the last deployment [Conformance]", func() {
			// print some debugging output if the deploymeent fails
			defer func() {
				if !g.CurrentGinkgoTestDescription().Failed {
					return
				}
				if dc, rcs, pods, err := deploymentInfo(oc, "deployment-simple"); err == nil {
					e2e.Logf("DC: %#v", dc)
					e2e.Logf("  RCs: %#v", rcs)
					p, _ := deploymentPods(pods)
					e2e.Logf("  Deployers: %#v", p)
				}
			}()

			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {
Ejemplo n.º 22
0
func checkDeploymentInvariants(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController, pods []kapi.Pod) error {
	deployers, err := deploymentPods(pods)
	if err != nil {
		return err
	}
	if len(deployers) > len(rcs) {
		existing := sets.NewString()
		for k := range deployers {
			existing.Insert(k)
		}
		for _, rc := range rcs {
			if existing.Has(rc.Name) {
				existing.Delete(rc.Name)
			} else {
				e2e.Logf("ANOMALY: No deployer pod found for deployment %q", rc.Name)
			}
		}
		for k := range existing {
			// TODO: we are missing RCs? https://github.com/openshift/origin/pull/8483#issuecomment-209150611
			e2e.Logf("ANOMALY: Deployer pod found for %q but no RC exists", k)
			//return fmt.Errorf("more deployer pods found than deployments: %#v %#v", deployers, rcs)
		}
	}
	running := sets.NewString()
	completed := 0
	for k, v := range deployers {
		isRunning, isCompleted, err := checkDeployerPodInvariants(k, v)
		if err != nil {
			return err
		}
		if isCompleted {
			completed++
		}
		if isRunning {
			running.Insert(k)
		}
	}
	if running.Len() > 1 {
		return fmt.Errorf("found multiple running deployments: %v", running.List())
	}
	sawStatus := sets.NewString()
	statuses := []string{}
	for _, rc := range rcs {
		status := deployutil.DeploymentStatusFor(&rc)
		if sawStatus.Len() != 0 {
			switch status {
			case deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed:
				if sawStatus.Difference(completedStatuses).Len() != 0 {
					return fmt.Errorf("rc %s was %s, but earlier RCs were not completed: %v", rc.Name, status, statuses)
				}
			case deployapi.DeploymentStatusRunning, deployapi.DeploymentStatusPending:
				if sawStatus.Has(string(status)) {
					return fmt.Errorf("rc %s was %s, but so was an earlier RC: %v", rc.Name, status, statuses)
				}
				if sawStatus.Difference(completedStatuses).Len() != 0 {
					return fmt.Errorf("rc %s was %s, but earlier RCs were not completed: %v", rc.Name, status, statuses)
				}
			case deployapi.DeploymentStatusNew:
			default:
				return fmt.Errorf("rc %s has unexpected status %s: %v", rc.Name, status, statuses)
			}
		}
		sawStatus.Insert(string(status))
		statuses = append(statuses, string(status))
	}
	return nil
}