func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *experimental.DaemonSet) { glog.Infof("Updating daemon set status") nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err) } nodeList, err := dsc.nodeStore.List() if err != nil { glog.Errorf("Couldn't get list of nodes when updating daemon set %+v: %v", ds, err) } var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int for _, node := range nodeList.Items { nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() shouldRun := nodeSelector.Matches(labels.Set(node.Labels)) numDaemonPods := len(nodeToDaemonPods[node.Name]) if numDaemonPods > 0 { currentNumberScheduled++ } if shouldRun { desiredNumberScheduled++ } else if numDaemonPods >= 0 { numberMisscheduled++ } } err = storeDaemonSetStatus(dsc.kubeClient.Experimental().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled) if err != nil { glog.Errorf("Error storing status for daemon set %+v: %v", ds, err) } }
// Please note that selector is filtering among the pods that have gotten into // the store; there may have been some filtering that already happened before // that. func (s storePodsNamespacer) List(selector labels.Selector) (pods api.PodList, err error) { list := api.PodList{} if s.namespace == api.NamespaceAll { for _, m := range s.indexer.List() { pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { list.Items = append(list.Items, *pod) } } return list, nil } key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} items, err := s.indexer.Index(NamespaceIndex, key) if err != nil { glog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range s.indexer.List() { pod := m.(*api.Pod) if s.namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) { list.Items = append(list.Items, *pod) } } return list, err } for _, m := range items { pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { list.Items = append(list.Items, *pod) } } return list, nil }
// List all the image streams that match the provided selector using a namespace index. // If the indexed list fails then we will fallback to listing from all namespaces and filter // by the namespace we want. func (s storeImageStreamsNamespacer) List(selector labels.Selector) ([]*imageapi.ImageStream, error) { streams := []*imageapi.ImageStream{} if s.namespace == kapi.NamespaceAll { for _, obj := range s.indexer.List() { stream := obj.(*imageapi.ImageStream) if selector.Matches(labels.Set(stream.Labels)) { streams = append(streams, stream) } } return streams, nil } items, err := s.indexer.ByIndex(cache.NamespaceIndex, s.namespace) if err != nil { return nil, err } for _, obj := range items { stream := obj.(*imageapi.ImageStream) if selector.Matches(labels.Set(stream.Labels)) { streams = append(streams, stream) } } return streams, nil }
func (s storeLimitRangesNamespacer) List(selector labels.Selector) ([]*kapi.LimitRange, error) { var controllers []*kapi.LimitRange if s.namespace == kapi.NamespaceAll { for _, m := range s.indexer.List() { rc := m.(*kapi.LimitRange) if selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } return controllers, nil } key := &kapi.LimitRange{ObjectMeta: kapi.ObjectMeta{Namespace: s.namespace}} items, err := s.indexer.Index(cache.NamespaceIndex, key) if err != nil { for _, m := range s.indexer.List() { rc := m.(*kapi.LimitRange) if s.namespace == rc.Namespace && selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } return controllers, nil } for _, m := range items { rc := m.(*kapi.LimitRange) if selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } return controllers, nil }
// GetPodControllers returns a list of jobs managing a pod. Returns an error only if no matching jobs are found. func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []extensions.Job, err error) { var selector labels.Selector var job extensions.Job if len(pod.Labels) == 0 { err = fmt.Errorf("No jobs found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { job = *m.(*extensions.Job) if job.Namespace != pod.Namespace { continue } labelSet := labels.Set(job.Spec.Selector) selector = labels.Set(job.Spec.Selector).AsSelector() // Job with a nil or empty selector match nothing if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } jobs = append(jobs, job) } if len(jobs) == 0 { err = fmt.Errorf("Could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// TODO: Move this back to scheduler as a helper function that takes a Store, // rather than a method of StoreToServiceLister. func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { var selector labels.Selector var service api.Service for _, m := range s.Store.List() { service = *m.(*api.Service) // consider only services that are in the same namespace as the pod if service.Namespace != pod.Namespace { continue } if service.Spec.Selector == nil { // services with nil selectors match nothing, not everything. continue } selector = labels.Set(service.Spec.Selector).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { services = append(services, service) } } if len(services) == 0 { err = fmt.Errorf("could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// storeToDeploymentNamespacer lists deployments under its namespace in the store. func (s storeToDeploymentNamespacer) List(selector labels.Selector) (deployments []extensions.Deployment, err error) { if s.namespace == api.NamespaceAll { for _, m := range s.indexer.List() { d := *(m.(*extensions.Deployment)) if selector.Matches(labels.Set(d.Labels)) { deployments = append(deployments, d) } } return } key := &extensions.Deployment{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} items, err := s.indexer.Index(NamespaceIndex, key) if err != nil { // Ignore error; do slow search without index. glog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range s.indexer.List() { d := *(m.(*extensions.Deployment)) if s.namespace == d.Namespace && selector.Matches(labels.Set(d.Labels)) { deployments = append(deployments, d) } } return deployments, nil } for _, m := range items { d := *(m.(*extensions.Deployment)) if selector.Matches(labels.Set(d.Labels)) { deployments = append(deployments, d) } } return }
// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { var selector labels.Selector var rc api.ReplicationController if len(pod.Labels) == 0 { err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { rc = *m.(*api.ReplicationController) if rc.Namespace != pod.Namespace { continue } labelSet := labels.Set(rc.Spec.Selector) selector = labels.Set(rc.Spec.Selector).AsSelector() // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } controllers = append(controllers, rc) } if len(controllers) == 0 { err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
func (s storeReplicationControllersNamespacer) List(selector labels.Selector) ([]api.ReplicationController, error) { controllers := []api.ReplicationController{} if s.namespace == api.NamespaceAll { for _, m := range s.indexer.List() { rc := *(m.(*api.ReplicationController)) if selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } return controllers, nil } key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} items, err := s.indexer.Index(NamespaceIndex, key) if err != nil { // Ignore error; do slow search without index. glog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range s.indexer.List() { rc := *(m.(*api.ReplicationController)) if s.namespace == rc.Namespace && selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } return controllers, nil } for _, m := range items { rc := *(m.(*api.ReplicationController)) if selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } return controllers, nil }
func ValidateJobSpec(spec *experimental.JobSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if spec.Parallelism != nil && *spec.Parallelism < 0 { allErrs = append(allErrs, errs.NewFieldInvalid("parallelism", spec.Parallelism, isNegativeErrorMsg)) } if spec.Completions != nil && *spec.Completions < 0 { allErrs = append(allErrs, errs.NewFieldInvalid("completions", spec.Completions, isNegativeErrorMsg)) } selector := labels.Set(spec.Selector).AsSelector() if selector.Empty() { allErrs = append(allErrs, errs.NewFieldRequired("selector")) } if spec.Template == nil { allErrs = append(allErrs, errs.NewFieldRequired("template")) } else { labels := labels.Set(spec.Template.Labels) if !selector.Matches(labels) { allErrs = append(allErrs, errs.NewFieldInvalid("template.labels", spec.Template.Labels, "selector does not match template")) } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(spec.Template).Prefix("template")...) if spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure && spec.Template.Spec.RestartPolicy != api.RestartPolicyNever { allErrs = append(allErrs, errs.NewFieldValueNotSupported("template.spec.restartPolicy", spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)})) } } return allErrs }
// Please note that selector is filtering among the pods that have gotten into // the store; there may have been some filtering that already happened before // that. // We explicitly don't return api.PodList, to avoid expensive allocations, which // in most cases are unnecessary. func (s storePodsNamespacer) List(selector labels.Selector) (pods []*api.Pod, err error) { if s.namespace == api.NamespaceAll { for _, m := range s.indexer.List() { pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { pods = append(pods, pod) } } return pods, nil } key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} items, err := s.indexer.Index(NamespaceIndex, key) if err != nil { // Ignore error; do slow search without index. glog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range s.indexer.List() { pod := m.(*api.Pod) if s.namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) { pods = append(pods, pod) } } return pods, nil } for _, m := range items { pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { pods = append(pods, pod) } } return pods, nil }
// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. func ValidateDaemonSetSpec(spec *experimental.DaemonSetSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} selector := labels.Set(spec.Selector).AsSelector() if selector.Empty() { allErrs = append(allErrs, errs.NewFieldRequired("selector")) } if spec.Template == nil { allErrs = append(allErrs, errs.NewFieldRequired("template")) } else { labels := labels.Set(spec.Template.Labels) if !selector.Matches(labels) { allErrs = append(allErrs, errs.NewFieldInvalid("template.metadata.labels", spec.Template.Labels, "selector does not match template")) } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(spec.Template).Prefix("template")...) // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes).Prefix("template.spec.volumes")...) // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { allErrs = append(allErrs, errs.NewFieldValueNotSupported("template.spec.restartPolicy", spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) } } return allErrs }
// List all the deploymentconfigs that match the provided selector using a namespace index. // If the indexed list fails then we will fallback to listing from all namespaces and filter // by the namespace we want. func (s storeDeploymentConfigsNamespacer) List(selector labels.Selector) ([]*deployapi.DeploymentConfig, error) { configs := []*deployapi.DeploymentConfig{} if s.namespace == kapi.NamespaceAll { for _, obj := range s.indexer.List() { dc := obj.(*deployapi.DeploymentConfig) if selector.Matches(labels.Set(dc.Labels)) { configs = append(configs, dc) } } return configs, nil } key := &deployapi.DeploymentConfig{ObjectMeta: kapi.ObjectMeta{Namespace: s.namespace}} items, err := s.indexer.Index(cache.NamespaceIndex, key) if err != nil { // Ignore error; do slow search without index. glog.Warningf("can not retrieve list of objects using index : %v", err) for _, obj := range s.indexer.List() { dc := obj.(*deployapi.DeploymentConfig) if s.namespace == dc.Namespace && selector.Matches(labels.Set(dc.Labels)) { configs = append(configs, dc) } } return configs, nil } for _, obj := range items { dc := obj.(*deployapi.DeploymentConfig) if selector.Matches(labels.Set(dc.Labels)) { configs = append(configs, dc) } } return configs, nil }
// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { var selector labels.Selector var rc api.ReplicationController if len(pod.Labels) == 0 { err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) return } key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}} items, err := s.Indexer.Index(NamespaceIndex, key) if err != nil { return } for _, m := range items { rc = *m.(*api.ReplicationController) selector = labels.Set(rc.Spec.Selector).AsSelectorPreValidated() // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } controllers = append(controllers, rc) } if len(controllers) == 0 { err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// Classify first filters out inactive pods, then it classify the remaining pods // into three categories: 1. matchesAndControlled are the pods whose labels // match the selector of the RC, and have a controllerRef pointing to the // controller 2. matchesNeedsController are the pods whose labels match the RC, // but don't have a controllerRef. (Pods with matching labels but with a // controllerRef pointing to other object are ignored) 3. controlledDoesNotMatch // are the pods that have a controllerRef pointing to the controller, but their // labels no longer match the selector. func (m *PodControllerRefManager) Classify(pods []*v1.Pod) ( matchesAndControlled []*v1.Pod, matchesNeedsController []*v1.Pod, controlledDoesNotMatch []*v1.Pod) { for i := range pods { pod := pods[i] if !IsPodActive(pod) { glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", pod.Namespace, pod.Name, pod.Status.Phase, pod.DeletionTimestamp) continue } controllerRef := getControllerOf(pod.ObjectMeta) if controllerRef != nil { if controllerRef.UID == m.controllerObject.UID { // already controlled if m.controllerSelector.Matches(labels.Set(pod.Labels)) { matchesAndControlled = append(matchesAndControlled, pod) } else { controlledDoesNotMatch = append(controlledDoesNotMatch, pod) } } else { // ignoring the pod controlled by other controller glog.V(4).Infof("Ignoring pod %v/%v, it's owned by [%s/%s, name: %s, uid: %s]", pod.Namespace, pod.Name, controllerRef.APIVersion, controllerRef.Kind, controllerRef.Name, controllerRef.UID) continue } } else { if !m.controllerSelector.Matches(labels.Set(pod.Labels)) { continue } matchesNeedsController = append(matchesNeedsController, pod) } } return matchesAndControlled, matchesNeedsController, controlledDoesNotMatch }
// List all the ServiceAccounts that match the provided selector using a namespace index. // If the indexed list fails then we will fallback to listing from all namespaces and filter // by the namespace we want. func (s storeServiceAccountsNamespacer) List(selector labels.Selector) ([]*kapi.ServiceAccount, error) { serviceAccounts := []*kapi.ServiceAccount{} if s.namespace == kapi.NamespaceAll { for _, obj := range s.indexer.List() { bc := obj.(*kapi.ServiceAccount) if selector.Matches(labels.Set(bc.Labels)) { serviceAccounts = append(serviceAccounts, bc) } } return serviceAccounts, nil } items, err := s.indexer.ByIndex(cache.NamespaceIndex, s.namespace) if err != nil { return nil, err } for _, obj := range items { bc := obj.(*kapi.ServiceAccount) if selector.Matches(labels.Set(bc.Labels)) { serviceAccounts = append(serviceAccounts, bc) } } return serviceAccounts, nil }
// GetPodDaemonSets returns a list of daemon sets managing a pod. // Returns an error if and only if no matching daemon sets are found. func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) { var selector labels.Selector var daemonSet extensions.DaemonSet if len(pod.Labels) == 0 { err = fmt.Errorf("No daemon sets found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { daemonSet = *m.(*extensions.DaemonSet) if daemonSet.Namespace != pod.Namespace { continue } selector = labels.Set(daemonSet.Spec.Selector).AsSelector() // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } daemonSets = append(daemonSets, daemonSet) } if len(daemonSets) == 0 { err = fmt.Errorf("Could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return }
// List all the deploymentconfigs that match the provided selector using a namespace index. // If the indexed list fails then we will fallback to listing from all namespaces and filter // by the namespace we want. func (s storeDeploymentConfigsNamespacer) List(selector labels.Selector) ([]*deployapi.DeploymentConfig, error) { configs := []*deployapi.DeploymentConfig{} if s.namespace == kapi.NamespaceAll { for _, obj := range s.indexer.List() { dc := obj.(*deployapi.DeploymentConfig) if selector.Matches(labels.Set(dc.Labels)) { configs = append(configs, dc) } } return configs, nil } items, err := s.indexer.ByIndex(cache.NamespaceIndex, s.namespace) if err != nil { return nil, err } for _, obj := range items { dc := obj.(*deployapi.DeploymentConfig) if selector.Matches(labels.Set(dc.Labels)) { configs = append(configs, dc) } } return configs, nil }
// GetDeploymentsForRC returns a list of deployments managing a replication controller. Returns an error only if no matching deployments are found. func (s *StoreToDeploymentLister) GetDeploymentsForRC(rc *api.ReplicationController) (deployments []extensions.Deployment, err error) { var selector labels.Selector var d extensions.Deployment if len(rc.Labels) == 0 { err = fmt.Errorf("no deployments found for replication controller %v because it has no labels", rc.Name) return } // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label for _, m := range s.Store.List() { d = *m.(*extensions.Deployment) if d.Namespace != rc.Namespace { continue } labelSet := labels.Set(d.Spec.Selector) selector = labels.Set(d.Spec.Selector).AsSelector() // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(rc.Labels)) { continue } deployments = append(deployments, d) } if len(deployments) == 0 { err = fmt.Errorf("could not find deployments set for replication controller %s in namespace %s with labels: %v", rc.Name, rc.Namespace, rc.Labels) } return }
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service // on machines with the same value for a particular label. // The label to be considered is provided to the struct (ServiceAntiAffinity). func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { var nsServicePods []*api.Pod if services, err := s.serviceLister.GetPodServices(pod); err == nil { // just use the first service and get the other pods within the service // TODO: a separate predicate can be created that tries to handle all services for the pod selector := labels.SelectorFromSet(services[0].Spec.Selector) pods, err := s.podLister.List(selector) if err != nil { return nil, err } // consider only the pods that belong to the same namespace for _, nsPod := range pods { if nsPod.Namespace == pod.Namespace { nsServicePods = append(nsServicePods, nsPod) } } } // separate out the nodes that have the label from the ones that don't otherNodes := []string{} labeledNodes := map[string]string{} for _, node := range nodes { if labels.Set(node.Labels).Has(s.label) { label := labels.Set(node.Labels).Get(s.label) labeledNodes[node.Name] = label } else { otherNodes = append(otherNodes, node.Name) } } podCounts := map[string]int{} for _, pod := range nsServicePods { label, exists := labeledNodes[pod.Spec.NodeName] if !exists { continue } podCounts[label]++ } numServicePods := len(nsServicePods) result := []schedulerapi.HostPriority{} //score int - scale of 0-maxPriority // 0 being the lowest priority and maxPriority being the highest for node := range labeledNodes { // initializing to the default/max node score of maxPriority fScore := float32(maxPriority) if numServicePods > 0 { fScore = maxPriority * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods)) } result = append(result, schedulerapi.HostPriority{Host: node, Score: int(fScore)}) } // add the open nodes with a score of 0 for _, node := range otherNodes { result = append(result, schedulerapi.HostPriority{Host: node, Score: 0}) } return result, nil }
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { expectedPods := []string{} // Iterate over the labels that identify the replication controllers that we // want to check. The rcLabels contains the value values for the k8s-app key // that identify the replication controllers that we want to check. Using a label // rather than an explicit name is preferred because the names will typically have // a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling // update e.g. to heapster-monitoring-v2. By using a label query we can check for the // situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller // is running (which would be an error except during a rolling update). for _, rcLabel := range rcLabels { selector := labels.Set{"k8s-app": rcLabel}.AsSelector() options := api.ListOptions{LabelSelector: selector} deploymentList, err := c.Deployments(api.NamespaceSystem).List(options) if err != nil { return nil, err } rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(options) if err != nil { return nil, err } if (len(rcList.Items) + len(deploymentList.Items)) != 1 { return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d", rcLabel, len(rcList.Items)) } // Check all the replication controllers. for _, rc := range rcList.Items { selector := labels.Set(rc.Spec.Selector).AsSelector() options := api.ListOptions{LabelSelector: selector} podList, err := c.Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } for _, pod := range podList.Items { if pod.DeletionTimestamp != nil { continue } expectedPods = append(expectedPods, string(pod.UID)) } } // Do the same for all deployments. for _, rc := range deploymentList.Items { selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() options := api.ListOptions{LabelSelector: selector} podList, err := c.Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } for _, pod := range podList.Items { if pod.DeletionTimestamp != nil { continue } expectedPods = append(expectedPods, string(pod.UID)) } } } return expectedPods, nil }
func MappingTypeForPod(pod *api.Pod) HostPortMappingType { filter := map[string]string{ PortMappingLabelKey: string(HostPortMappingFixed), } selector := labels.Set(filter).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { return HostPortMappingFixed } return HostPortMappingWildcard }
// NewHostPortMapper returns a new mapper based // based on the port mapping key value func NewHostPortMapper(pod *api.Pod) HostPortMapper { filter := map[string]string{ meta.PortMappingKey: HostPortMappingFixed, } selector := labels.Set(filter).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { return HostPortMapperFunc(FixedMapper) } return HostPortMapperFunc(WildcardMapper) }
// isControllerMatch take a Pod and ReplicationController, return whether the Pod and ReplicationController are matching // TODO(mqliang): This logic is a copy from GetPodControllers(), remove the duplication func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool { if rc.Namespace != pod.Namespace { return false } selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { return false } return true }
// Checks whether the given node has pods which satisfy all the required pod affinity scheduling rules. // If node has pods which satisfy all the required pod affinity scheduling rules then return true. func (checker *PodAffinityChecker) NodeMatchesHardPodAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinity *api.PodAffinity) bool { var podAffinityTerms []api.PodAffinityTerm if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { podAffinityTerms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // podAffinityTerms = append(podAffinityTerms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} for _, podAffinityTerm := range podAffinityTerms { podAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAffinityTerm) if err != nil { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, an error ocurred when checking existing pods on the node for PodAffinityTerm %v err: %v", podName(pod), node.Name, podAffinityTerm, err) return false } if !podAffinityTermMatches { // TODO: Think about whether this can be simplified once we have controllerRef // Check if it is in special case that the requiredDuringScheduling affinity requirement can be disregarded. // If the requiredDuringScheduling affinity requirement matches a pod's own labels and namespace, and there are no other such pods // anywhere, then disregard the requirement. // This allows rules like "schedule all of the pods of this collection to the same zone" to not block forever // because the first pod of the collection can't be scheduled. names := priorityutil.GetNamespacesFromPodAffinityTerm(pod, podAffinityTerm) labelSelector, err := unversioned.LabelSelectorAsSelector(podAffinityTerm.LabelSelector) if err != nil || !names.Has(pod.Namespace) || !labelSelector.Matches(labels.Set(pod.Labels)) { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v, err: %+v", podName(pod), node.Name, podAffinityTerm, err) return false } // the affinity is to put the pod together with other pods from its same service or controller filteredPods := priorityutil.FilterPodsByNameSpaces(names, allPods) for _, filteredPod := range filteredPods { // if found an existing pod from same service or RC, // the affinity scheduling rules cannot be disregarded. if labelSelector.Matches(labels.Set(filteredPod.Labels)) { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v", podName(pod), node.Name, podAffinityTerm) return false } } } } // all the required pod affinity scheduling rules satisfied glog.V(10).Infof("All the required pod affinity scheduling rules are satisfied for Pod %+v, on node %v", podName(pod), node.Name) return true }
// NewREST returns a RESTStorage object that will work with testtype. func NewREST(config *storagebackend.Config, storageDecorator generic.StorageDecorator) *REST { prefix := "/testtype" newListFunc := func() runtime.Object { return &testgroup.TestTypeList{} } // Usually you should reuse your RESTCreateStrategy. strategy := &NotNamespaceScoped{} getAttrs := func(obj runtime.Object) (labels.Set, fields.Set, error) { testObj, ok := obj.(*testgroup.TestType) if !ok { return nil, nil, fmt.Errorf("not a TestType") } return labels.Set(testObj.Labels), nil, nil } storageInterface, _ := storageDecorator( config, 100, &testgroup.TestType{}, prefix, strategy, newListFunc, getAttrs, storage.NoTriggerPublisher) store := &genericregistry.Store{ NewFunc: func() runtime.Object { return &testgroup.TestType{} }, // NewListFunc returns an object capable of storing results of an etcd list. NewListFunc: newListFunc, // Produces a path that etcd understands, to the root of the resource // by combining the namespace in the context with the given prefix. KeyRootFunc: func(ctx api.Context) string { return genericregistry.NamespaceKeyRootFunc(ctx, prefix) }, // Produces a path that etcd understands, to the resource by combining // the namespace in the context with the given prefix. KeyFunc: func(ctx api.Context, name string) (string, error) { return genericregistry.NamespaceKeyFunc(ctx, prefix, name) }, // Retrieve the name field of the resource. ObjectNameFunc: func(obj runtime.Object) (string, error) { return obj.(*testgroup.TestType).Name, nil }, // Used to match objects based on labels/fields for list. PredicateFunc: func(label labels.Selector, field fields.Selector) storage.SelectionPredicate { return storage.SelectionPredicate{ Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { testType, ok := obj.(*testgroup.TestType) if !ok { return nil, nil, fmt.Errorf("unexpected type of given object") } return labels.Set(testType.ObjectMeta.Labels), fields.Set{}, nil }, } }, Storage: storageInterface, } return &REST{store} }
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool { // Check if the node satisfies the daemon set's node selector. nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() if !nodeSelector.Matches(labels.Set(node.Labels)) { return false } // If the daemon set specifies a node name, check that it matches with node.Name. if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) { return false } for _, c := range node.Status.Conditions { if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue { return false } } newPod := &api.Pod{Spec: ds.Spec.Template.Spec} newPod.Spec.NodeName = node.Name pods := []*api.Pod{newPod} for _, m := range dsc.podStore.Indexer.List() { pod := m.(*api.Pod) if pod.Spec.NodeName != node.Name { continue } if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { continue } // ignore pods that belong to the daemonset when taking into account wheter // a daemonset should bind to a node. if pds := dsc.getPodDaemonSet(pod); pds != nil && ds.Name == pds.Name { continue } pods = append(pods, pod) } _, notFittingCPU, notFittingMemory, notFittingNvidiaGPU := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable) if len(notFittingCPU)+len(notFittingMemory)+len(notFittingNvidiaGPU) != 0 { dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: insufficent free resources", node.ObjectMeta.Name) return false } ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name) return false } } return true }
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { rcs, err := rcClient.List(api.ListOptions{}) if err != nil { return nil, fmt.Errorf("error getting replication controllers: %v", err) } var matchingRCs []api.ReplicationController rcLabels := labels.Set(rc.Spec.Selector) for _, controller := range rcs.Items { newRCLabels := labels.Set(controller.Spec.Selector) if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) { matchingRCs = append(matchingRCs, controller) } } return matchingRCs, nil }
// OverlapsWith returns true when two given deployments are different and overlap with each other func OverlapsWith(current, other *extensions.Deployment) (bool, error) { if current.UID == other.UID { return false, nil } currentSelector, err := metav1.LabelSelectorAsSelector(current.Spec.Selector) if err != nil { return false, fmt.Errorf("deployment %s/%s has invalid label selector: %v", current.Namespace, current.Name, err) } otherSelector, err := metav1.LabelSelectorAsSelector(other.Spec.Selector) if err != nil { return false, fmt.Errorf("deployment %s/%s has invalid label selector: %v", other.Namespace, other.Name, err) } return (!currentSelector.Empty() && currentSelector.Matches(labels.Set(other.Spec.Template.Labels))) || (!otherSelector.Empty() && otherSelector.Matches(labels.Set(current.Spec.Template.Labels))), nil }
// GetAttrs returns labels and fields of a given object for filtering purposes. func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) { networkPolicy, ok := obj.(*extensions.NetworkPolicy) if !ok { return nil, nil, fmt.Errorf("given object is not a NetworkPolicy.") } return labels.Set(networkPolicy.ObjectMeta.Labels), NetworkPolicyToSelectableFields(networkPolicy), nil }