Ejemplo n.º 1
0
// FindLabelsInSet gets as many key/value pairs as possible out of a label set.
func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string {
	aL := make(map[string]string)
	for _, l := range labelsToKeep {
		if selector.Has(l) {
			aL[l] = selector.Get(l)
		}
	}
	return aL
}
Ejemplo n.º 2
0
// AddUnsetLabelsToMap backfills missing values with values we find in a map.
func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) {
	for _, l := range labelsToAdd {
		// if the label is already there, dont overwrite it.
		if _, exists := aL[l]; exists {
			continue
		}
		// otherwise, backfill this label.
		if labelSet.Has(l) {
			aL[l] = labelSet.Get(l)
		}
	}
}
Ejemplo n.º 3
0
func printDebugInfo(c clientset.Interface) {
	set := labels.Set{"k8s-app": "heapster"}
	options := v1.ListOptions{LabelSelector: set.AsSelector().String()}
	podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
	if err != nil {
		framework.Logf("Error while listing pods %v", err)
		return
	}
	for _, pod := range podList.Items {
		framework.Logf("Kubectl output:\n%v",
			framework.RunKubectlOrDie("log", pod.Name, "--namespace=kube-system", "--container=heapster"))
	}
}
Ejemplo n.º 4
0
func checkExistingRCRecovers(f *framework.Framework) {
	By("assert that the pre-existing replication controller recovers")
	podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
	rcSelector := labels.Set{"name": "baz"}.AsSelector()

	By("deleting pods from existing replication controller")
	framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		options := v1.ListOptions{LabelSelector: rcSelector.String()}
		pods, err := podClient.List(options)
		if err != nil {
			framework.Logf("apiserver returned error, as expected before recovery: %v", err)
			return false, nil
		}
		if len(pods.Items) == 0 {
			return false, nil
		}
		for _, pod := range pods.Items {
			err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
			Expect(err).NotTo(HaveOccurred())
		}
		framework.Logf("apiserver has recovered")
		return true, nil
	}))

	By("waiting for replication controller to recover")
	framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		options := v1.ListOptions{LabelSelector: rcSelector.String()}
		pods, err := podClient.List(options)
		Expect(err).NotTo(HaveOccurred())
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
				return true, nil
			}
		}
		return false, nil
	}))
}
Ejemplo n.º 5
0
func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
		options := v1.ListOptions{LabelSelector: selector.String()}
		deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		// Check all the replication controllers.
		for _, rc := range rcList.Items {
			selector := labels.Set(rc.Spec.Selector).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// Do the same for all deployments.
		for _, rc := range deploymentList.Items {
			selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// And for pet sets.
		for _, ps := range psList.Items {
			selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}