// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} selector := labels.Set(spec.Selector).AsSelector() if selector.Empty() { allErrs = append(allErrs, errs.NewFieldRequired("selector")) } if spec.Template == nil { allErrs = append(allErrs, errs.NewFieldRequired("template")) } else { labels := labels.Set(spec.Template.Labels) if !selector.Matches(labels) { allErrs = append(allErrs, errs.NewFieldInvalid("template.metadata.labels", spec.Template.Labels, "selector does not match template")) } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(spec.Template).Prefix("template")...) // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes).Prefix("template.spec.volumes")...) // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { allErrs = append(allErrs, errs.NewFieldValueNotSupported("template.spec.restartPolicy", spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) } } return allErrs }
// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { var selector labels.Selector var rc api.ReplicationController if len(pod.Labels) == 0 { err = fmt.Errorf("No controllers found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { rc = *m.(*api.ReplicationController) if rc.Namespace != pod.Namespace { continue } labelSet := labels.Set(rc.Spec.Selector) selector = labels.Set(rc.Spec.Selector).AsSelector() // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } controllers = append(controllers, rc) } if len(controllers) == 0 { err = fmt.Errorf("Could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// GetPodDaemonSets returns a list of daemon sets managing a pod. // Returns an error if and only if no matching daemon sets are found. func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) { var selector labels.Selector var daemonSet extensions.DaemonSet if len(pod.Labels) == 0 { err = fmt.Errorf("No daemon sets found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { daemonSet = *m.(*extensions.DaemonSet) if daemonSet.Namespace != pod.Namespace { continue } selector = labels.Set(daemonSet.Spec.Selector).AsSelector() // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } daemonSets = append(daemonSets, daemonSet) } if len(daemonSets) == 0 { err = fmt.Errorf("Could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// TODO: Move this back to scheduler as a helper function that takes a Store, // rather than a method of StoreToServiceLister. func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { var selector labels.Selector var service api.Service for _, m := range s.Store.List() { service = *m.(*api.Service) // consider only services that are in the same namespace as the pod if service.Namespace != pod.Namespace { continue } if service.Spec.Selector == nil { // services with nil selectors match nothing, not everything. continue } selector = labels.Set(service.Spec.Selector).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { services = append(services, service) } } if len(services) == 0 { err = fmt.Errorf("Could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
func MappingTypeForPod(pod *api.Pod) HostPortMappingType { filter := map[string]string{ PortMappingLabelKey: string(HostPortMappingFixed), } selector := labels.Set(filter).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { return HostPortMappingFixed } return HostPortMappingWildcard }
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { rcs, err := c.List(labels.Everything(), fields.Everything()) if err != nil { return nil, fmt.Errorf("error getting replication controllers: %v", err) } var matchingRCs []api.ReplicationController rcLabels := labels.Set(rc.Spec.Selector) for _, controller := range rcs.Items { newRCLabels := labels.Set(controller.Spec.Selector) if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) { matchingRCs = append(matchingRCs, controller) } } return matchingRCs, nil }
func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, timestamp time.Time, err error) { podList, err := h.client.Pods(namespace). List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything()) if err != nil { return nil, nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err) } podNames := []string{} sum := resource.MustParse("0") missing := false for _, pod := range podList.Items { podNames = append(podNames, pod.Name) for _, container := range pod.Spec.Containers { containerRequest := container.Resources.Requests[resourceName] if containerRequest.Amount != nil { sum.Add(containerRequest) } else { missing = true } } } if missing || sum.Cmp(resource.MustParse("0")) == 0 { return nil, nil, time.Time{}, fmt.Errorf("some pods do not have request for %s", resourceName) } glog.Infof("Sum of %s requested: %v", resourceName, sum) avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items)))) request = &avg consumption, timestamp, err = h.getForPods(resourceName, namespace, podNames) if err != nil { return nil, nil, time.Time{}, err } return consumption, request, timestamp, nil }
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) { ctx := t.TestContext() testLabels := map[string]string{"key": "value"} foo1 := copyOrDie(obj) t.setObjectMeta(foo1, "foo1") foo2 := copyOrDie(obj) foo2Meta := t.getObjectMetaOrFail(foo2) foo2Meta.Name = "foo2" foo2Meta.Namespace = api.NamespaceValue(ctx) foo2Meta.Labels = testLabels existing := assignFn([]runtime.Object{foo1, foo2}) filtered := []runtime.Object{existing[1]} selector := labels.SelectorFromSet(labels.Set(testLabels)) listObj, err := t.storage.(rest.Lister).List(ctx, selector, fields.Everything()) if err != nil { t.Errorf("unexpected error: %v", err) } items, err := listToItems(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if len(items) != len(filtered) { t.Errorf("unexpected number of items: %v", len(items)) } if !api.Semantic.DeepEqual(filtered, items) { t.Errorf("expected: %#v, got: %#v", filtered, items) } }
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { expectedPods := []string{} // Iterate over the labels that identify the replication controllers that we // want to check. The rcLabels contains the value values for the k8s-app key // that identify the replication controllers that we want to check. Using a label // rather than an explicit name is preferred because the names will typically have // a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling // update e.g. to heapster-monitoring-v2. By using a label query we can check for the // situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller // is running (which would be an error except during a rolling update). for _, rcLabel := range rcLabels { rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector()) if err != nil { return nil, err } if len(rcList.Items) != 1 { return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d", rcLabel, len(rcList.Items)) } for _, rc := range rcList.Items { podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything()) if err != nil { return nil, err } for _, pod := range podList.Items { if pod.DeletionTimestamp != nil { continue } expectedPods = append(expectedPods, string(pod.UID)) } } } return expectedPods, nil }
func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames []string) func() (bool, error) { return func() (bool, error) { podList, err := f.Client.Pods(f.Namespace.Name).List(labels.Set(selector).AsSelector(), fields.Everything()) if err != nil { return false, nil } pods := podList.Items nodesToPodCount := make(map[string]int) for _, pod := range pods { nodesToPodCount[pod.Spec.NodeName] += 1 } // Ensure that exactly 1 pod is running on all nodes in nodeNames. for _, nodeName := range nodeNames { if nodesToPodCount[nodeName] != 1 { return false, nil } } // Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in // nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any // other nodes. return len(nodesToPodCount) == len(nodeNames), nil } }
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool { if len(pod.Spec.NodeSelector) == 0 { return true } selector := labels.SelectorFromSet(pod.Spec.NodeSelector) return selector.Matches(labels.Set(node.Labels)) }
// pollForReadyPods polls oldRc and newRc each interval and returns the old // and new ready counts for their pods. If a pod is observed as being ready, // it's considered ready even if it later becomes notReady. func (r *RollingUpdater) pollForReadyPods(interval, timeout time.Duration, oldRc, newRc *api.ReplicationController) (int, int, error) { controllers := []*api.ReplicationController{oldRc, newRc} oldReady := 0 newReady := 0 err := wait.Poll(interval, timeout, func() (done bool, err error) { anyReady := false for _, controller := range controllers { selector := labels.Set(controller.Spec.Selector).AsSelector() pods, err := r.c.Pods(controller.Namespace).List(selector, fields.Everything()) if err != nil { return false, err } for _, pod := range pods.Items { if api.IsPodReady(&pod) { switch controller.Name { case oldRc.Name: oldReady++ case newRc.Name: newReady++ } anyReady = true } } } if anyReady { return true, nil } return false, nil }) return oldReady, newReady, err }
// GetPodControllers returns a list of jobs managing a pod. Returns an error only if no matching jobs are found. func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []extensions.Job, err error) { var selector labels.Selector var job extensions.Job if len(pod.Labels) == 0 { err = fmt.Errorf("No jobs found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { job = *m.(*extensions.Job) if job.Namespace != pod.Namespace { continue } selector, _ = extensions.PodSelectorAsSelector(job.Spec.Selector) if !selector.Matches(labels.Set(pod.Labels)) { continue } jobs = append(jobs, job) } if len(jobs) == 0 { err = fmt.Errorf("Could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// Returns the old RCs targetted by the given Deployment. func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) { namespace := deployment.ObjectMeta.Namespace // 1. Find all pods whose labels match deployment.Spec.Selector podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything()) if err != nil { return nil, fmt.Errorf("error listing pods: %v", err) } // 2. Find the corresponding RCs for pods in podList. // TODO: Right now we list all RCs and then filter. We should add an API for this. oldRCs := map[string]api.ReplicationController{} rcList, err := c.ReplicationControllers(namespace).List(labels.Everything()) if err != nil { return nil, fmt.Errorf("error listing replication controllers: %v", err) } for _, pod := range podList.Items { podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) for _, rc := range rcList.Items { rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector) if rcLabelsSelector.Matches(podLabelsSelector) { // Filter out RC that has the same pod template spec as the deployment - that is the new RC. if api.Semantic.DeepEqual(rc.Spec.Template, GetNewRCTemplate(deployment)) { continue } oldRCs[rc.ObjectMeta.Name] = rc } } } requiredRCs := []*api.ReplicationController{} for _, value := range oldRCs { requiredRCs = append(requiredRCs, &value) } return requiredRCs, nil }
func ValidateJobSpec(spec *extensions.JobSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if spec.Parallelism != nil { allErrs = append(allErrs, apivalidation.ValidatePositiveField(int64(*spec.Parallelism), "parallelism")...) } if spec.Completions != nil { allErrs = append(allErrs, apivalidation.ValidatePositiveField(int64(*spec.Completions), "completions")...) } if spec.Selector == nil { allErrs = append(allErrs, errs.NewFieldRequired("selector")) } else { allErrs = append(allErrs, ValidatePodSelector(spec.Selector).Prefix("selector")...) } if selector, err := extensions.PodSelectorAsSelector(spec.Selector); err == nil { labels := labels.Set(spec.Template.Labels) if !selector.Matches(labels) { allErrs = append(allErrs, errs.NewFieldInvalid("template.metadata.labels", spec.Template.Labels, "selector does not match template")) } } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template).Prefix("template")...) if spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure && spec.Template.Spec.RestartPolicy != api.RestartPolicyNever { allErrs = append(allErrs, errs.NewFieldValueNotSupported("template.spec.restartPolicy", spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)})) } return allErrs }
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { pods, err := c.Pods(ns).List(label, fields.Everything()) if err != nil { return nil, err } created := []api.Pod{} for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { continue } created = append(created, pod) } Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) if len(created) == replicas { pods.Items = created return pods, nil } } return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas) }
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value. // If presence is true, prioritizes nodes that have the specified label, regardless of value. // If presence is false, prioritizes nodes that do not have the specified label. func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) { var score int nodes, err := nodeLister.List() if err != nil { return nil, err } labeledNodes := map[string]bool{} for _, node := range nodes.Items { exists := labels.Set(node.Labels).Has(n.label) labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence) } result := []algorithm.HostPriority{} //score int - scale of 0-10 // 0 being the lowest priority and 10 being the highest for nodeName, success := range labeledNodes { if success { score = 10 } else { score = 0 } result = append(result, algorithm.HostPriority{Host: nodeName, Score: score}) } return result, nil }
// List returns []*api.Pod matching a query. func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) { for _, pod := range f { if s.Matches(labels.Set(pod.Labels)) { selected = append(selected, pod) } } return selected, nil }
// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { var selector labels.Selector for _, controller := range f { if controller.Namespace != pod.Namespace { continue } selector = labels.Set(controller.Spec.Selector).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { controllers = append(controllers, controller) } } if len(controllers) == 0 { err = fmt.Errorf("Could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// syncReplicationController will sync the rc with the given key if it has had its expectations fulfilled, meaning // it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked // concurrently with the same key. func (rm *ReplicationManager) syncReplicationController(key string) error { startTime := time.Now() defer func() { glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime)) }() obj, exists, err := rm.rcStore.Store.GetByKey(key) if !exists { glog.Infof("Replication Controller has been deleted %v", key) rm.expectations.DeleteExpectations(key) return nil } if err != nil { glog.Infof("Unable to retrieve rc %v from store: %v", key, err) rm.queue.Add(key) return err } rc := *obj.(*api.ReplicationController) if !rm.podStoreSynced() { // Sleep so we give the pod reflector goroutine a chance to run. time.Sleep(PodStoreSyncedPollPeriod) glog.Infof("Waiting for pods controller to sync, requeuing rc %v", rc.Name) rm.enqueueController(&rc) return nil } // Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in // and update the expectations after we've retrieved active pods from the store. If a new pod enters // the store after we've checked the expectation, the rc sync is just deferred till the next relist. rcKey, err := controller.KeyFunc(&rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) return err } rcNeedsSync := rm.expectations.SatisfiedExpectations(rcKey) podList, err := rm.podStore.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelector()) if err != nil { glog.Errorf("Error getting pods for rc %q: %v", key, err) rm.queue.Add(key) return err } // TODO: Do this in a single pass, or use an index. filteredPods := controller.FilterActivePods(podList.Items) if rcNeedsSync { rm.manageReplicas(filteredPods, &rc) } // Always updates status as pods come up or die. if err := updateReplicaCount(rm.kubeClient.ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil { // Multiple things could lead to this update failing. Requeuing the controller ensures // we retry with some fairness. glog.V(2).Infof("Failed to update replica count for controller %v, requeuing", rc.Name) rm.enqueueController(&rc) } return nil }
// MatchPersistentVolumeClaim returns a generic matcher for a given label and field selector. func MatchPersistentVolumeClaim(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { persistentvolumeclaimObj, ok := obj.(*api.PersistentVolumeClaim) if !ok { return false, fmt.Errorf("not a persistentvolumeclaim") } fields := PersistentVolumeClaimToSelectableFields(persistentvolumeclaimObj) return label.Matches(labels.Set(persistentvolumeclaimObj.Labels)) && field.Matches(fields), nil }) }
// GetPodServices gets the services that have the selector that match the labels on the given pod func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { var selector labels.Selector for _, service := range f { // consider only services that are in the same namespace as the pod if service.Namespace != pod.Namespace { continue } selector = labels.Set(service.Spec.Selector).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { services = append(services, service) } } if len(services) == 0 { err = fmt.Errorf("Could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// Matcher returns a generic matcher for a given label and field selector. func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { sa, ok := obj.(*api.ServiceAccount) if !ok { return false, fmt.Errorf("not a serviceaccount") } fields := SelectableFields(sa) return label.Matches(labels.Set(sa.Labels)) && field.Matches(fields), nil }) }
// MatchNamespace returns a generic matcher for a given label and field selector. func MatchNamespace(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { namespaceObj, ok := obj.(*api.Namespace) if !ok { return false, fmt.Errorf("not a namespace") } fields := NamespaceToSelectableFields(namespaceObj) return label.Matches(labels.Set(namespaceObj.Labels)) && field.Matches(fields), nil }) }
// Matcher returns a generic matcher for a given label and field selector. func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { sa, ok := obj.(*extensions.ThirdPartyResource) if !ok { return false, fmt.Errorf("not a ThirdPartyResource") } fields := SelectableFields(sa) return label.Matches(labels.Set(sa.Labels)) && field.Matches(fields), nil }) }
// MatchResourceQuota returns a generic matcher for a given label and field selector. func MatchResourceQuota(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { resourcequotaObj, ok := obj.(*api.ResourceQuota) if !ok { return false, fmt.Errorf("not a resourcequota") } fields := ResourceQuotaToSelectableFields(resourcequotaObj) return label.Matches(labels.Set(resourcequotaObj.Labels)) && field.Matches(fields), nil }) }
// Please note that selector is filtering among the pods that have gotten into // the store; there may have been some filtering that already happened before // that. func (s storePodsNamespacer) List(selector labels.Selector) (pods api.PodList, err error) { list := api.PodList{} for _, m := range s.store.List() { pod := m.(*api.Pod) if s.namespace == api.NamespaceAll || s.namespace == pod.Namespace { if selector.Matches(labels.Set(pod.Labels)) { list.Items = append(list.Items, *pod) } } } return list, nil }
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes. func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*api.Pod, error) { nodeToDaemonPods := make(map[string][]*api.Pod) daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(labels.Set(ds.Spec.Selector).AsSelector()) if err != nil { return nodeToDaemonPods, err } for i := range daemonPods.Items { nodeName := daemonPods.Items[i].Spec.NodeName nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], &daemonPods.Items[i]) } return nodeToDaemonPods, nil }
// MatchJob is the filter used by the generic etcd backend to route // watch events from etcd to clients of the apiserver only interested in specific // labels/fields. func MatchJob(label labels.Selector, field fields.Selector) generic.Matcher { return &generic.SelectionPredicate{ Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { job, ok := obj.(*extensions.Job) if !ok { return nil, nil, fmt.Errorf("Given object is not a job.") } return labels.Set(job.ObjectMeta.Labels), JobToSelectableFields(job), nil }, } }
// MatchIngress is the filter used by the generic etcd backend to ingress // watch events from etcd to clients of the apiserver only interested in specific // labels/fields. func MatchIngress(label labels.Selector, field fields.Selector) generic.Matcher { return &generic.SelectionPredicate{ Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { ingress, ok := obj.(*extensions.Ingress) if !ok { return nil, nil, fmt.Errorf("Given object is not an Ingress.") } return labels.Set(ingress.ObjectMeta.Labels), IngressToSelectableFields(ingress), nil }, } }