// checkHostPortConflicts detects pods with conflicted host ports. func hasHostPortConflicts(pods []*api.Pod) bool { ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) return true } if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 { glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) return true } } return false }
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool { // Check if the node satisfies the daemon set's node selector. nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() if !nodeSelector.Matches(labels.Set(node.Labels)) { return false } // If the daemon set specifies a node name, check that it matches with node.Name. if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) { return false } for _, c := range node.Status.Conditions { if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue { return false } } newPod := &api.Pod{Spec: ds.Spec.Template.Spec} newPod.Spec.NodeName = node.Name pods := []*api.Pod{newPod} for _, m := range dsc.podStore.Indexer.List() { pod := m.(*api.Pod) if pod.Spec.NodeName != node.Name { continue } if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { continue } // ignore pods that belong to the daemonset when taking into account wheter // a daemonset should bind to a node. if pds := dsc.getPodDaemonSet(pod); pds != nil && ds.Name == pds.Name { continue } pods = append(pods, pod) } _, notFittingCPU, notFittingMemory, notFittingNvidiaGPU := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable) if len(notFittingCPU)+len(notFittingMemory)+len(notFittingNvidiaGPU) != 0 { dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: insufficent free resources", node.ObjectMeta.Name) return false } ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name) return false } } return true }
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool { // Check if the node satisfies the daemon set's node selector. nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() if !nodeSelector.Matches(labels.Set(node.Labels)) { return false } // If the daemon set specifies a node name, check that it matches with node.Name. if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) { return false } // If the node is not ready, don't run on it. // TODO(mikedanese): remove this once daemonpods forgive nodes if !api.IsNodeReady(node) { return false } for _, c := range node.Status.Conditions { if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue { return false } } newPod := &api.Pod{Spec: ds.Spec.Template.Spec} newPod.Spec.NodeName = node.Name pods := []*api.Pod{newPod} for _, m := range dsc.podStore.Store.List() { pod := m.(*api.Pod) if pod.Spec.NodeName != node.Name { continue } pods = append(pods, pod) } _, notFittingCPU, notFittingMemory := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable) if len(notFittingCPU)+len(notFittingMemory) != 0 { return false } ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { return false } } return true }