Example #1
0
// Query fetches all releases that match the provided map of labels.
// An error is returned if the configmap fails to retrieve the releases.
func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, error) {
	ls := kblabels.Set{}
	for k, v := range labels {
		ls[k] = v
	}

	opts := api.ListOptions{LabelSelector: ls.AsSelector()}

	list, err := cfgmaps.impl.List(opts)
	if err != nil {
		logerrf(err, "query: failed to query with labels")
		return nil, err
	}

	if len(list.Items) == 0 {
		return nil, ErrReleaseNotFound
	}

	var results []*rspb.Release
	for _, item := range list.Items {
		rls, err := decodeRelease(item.Data["release"])
		if err != nil {
			logerrf(err, "query: failed to decode release: %s", err)
			continue
		}
		results = append(results, rls)
	}
	return results, nil
}
Example #2
0
// FindLabelsInSet gets as many key/value pairs as possible out of a label set.
func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string {
	aL := make(map[string]string)
	for _, l := range labelsToKeep {
		if selector.Has(l) {
			aL[l] = selector.Get(l)
		}
	}
	return aL
}
Example #3
0
func printDebugInfo(c *client.Client) {
	set := labels.Set{"k8s-app": "heapster"}
	options := api.ListOptions{LabelSelector: set.AsSelector()}
	podList, err := c.Pods(api.NamespaceSystem).List(options)
	if err != nil {
		Logf("Error while listing pods %v", err)
		return
	}
	for _, pod := range podList.Items {
		Logf("Kubectl output:\n%v", runKubectlOrDie("log", pod.Name, "--namespace=kube-system"))
	}
}
Example #4
0
// AddUnsetLabelsToMap backfills missing values with values we find in a map.
func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) {
	for _, l := range labelsToAdd {
		// if the label is already there, dont overwrite it.
		if _, exists := aL[l]; exists {
			continue
		}
		// otherwise, backfill this label.
		if labelSet.Has(l) {
			aL[l] = labelSet.Get(l)
		}
	}
}
func printDebugInfo(c clientset.Interface) {
	set := labels.Set{"k8s-app": "heapster"}
	options := api.ListOptions{LabelSelector: set.AsSelector()}
	podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
	if err != nil {
		framework.Logf("Error while listing pods %v", err)
		return
	}
	for _, pod := range podList.Items {
		framework.Logf("Kubectl output:\n%v",
			framework.RunKubectlOrDie("log", pod.Name, "--namespace=kube-system", "--container=heapster"))
	}
}
Example #6
0
func checkExistingRCRecovers(f *framework.Framework) {
	By("assert that the pre-existing replication controller recovers")
	podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
	rcSelector := labels.Set{"name": "baz"}.AsSelector()

	By("deleting pods from existing replication controller")
	framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		options := v1.ListOptions{LabelSelector: rcSelector.String()}
		pods, err := podClient.List(options)
		if err != nil {
			framework.Logf("apiserver returned error, as expected before recovery: %v", err)
			return false, nil
		}
		if len(pods.Items) == 0 {
			return false, nil
		}
		for _, pod := range pods.Items {
			err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
			Expect(err).NotTo(HaveOccurred())
		}
		framework.Logf("apiserver has recovered")
		return true, nil
	}))

	By("waiting for replication controller to recover")
	framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		options := v1.ListOptions{LabelSelector: rcSelector.String()}
		pods, err := podClient.List(options)
		Expect(err).NotTo(HaveOccurred())
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
				return true, nil
			}
		}
		return false, nil
	}))
}
Example #7
0
	. "github.com/onsi/gomega"
	"k8s.io/kubernetes/pkg/api"
	"k8s.io/kubernetes/pkg/api/pod"
	"k8s.io/kubernetes/pkg/api/unversioned"
	"k8s.io/kubernetes/pkg/apimachinery/registered"
	client "k8s.io/kubernetes/pkg/client/unversioned"
	"k8s.io/kubernetes/pkg/labels"
	"k8s.io/kubernetes/pkg/util"
	"k8s.io/kubernetes/pkg/util/wait"
)

const dnsTestPodHostName = "dns-querier-1"
const dnsTestServiceName = "dns-test-service"

var dnsServiceLabelSelector = labels.Set{
	"k8s-app":                       "kube-dns",
	"kubernetes.io/cluster-service": "true",
}.AsSelector()

func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string) *api.Pod {
	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind:       "Pod",
			APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
		},
		ObjectMeta: api.ObjectMeta{
			Name:      "dns-test-" + string(util.NewUUID()),
			Namespace: namespace,
			Annotations: map[string]string{
				pod.PodHostnameAnnotation:  dnsTestPodHostName,
				pod.PodSubdomainAnnotation: dnsTestServiceName,
			},
Example #8
0
func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
		options := v1.ListOptions{LabelSelector: selector.String()}
		deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		// Check all the replication controllers.
		for _, rc := range rcList.Items {
			selector := labels.Set(rc.Spec.Selector).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// Do the same for all deployments.
		for _, rc := range deploymentList.Items {
			selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// And for pet sets.
		for _, ps := range psList.Items {
			selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}