func TestFindFitSomeError(t *testing.T) { nodes := []string{"3", "2", "1"} predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate} pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}} _, predicateMap, err := findNodesThatFit(pod, algorithm.FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes), nil) if err != nil { t.Errorf("unexpected error: %v", err) } if len(predicateMap) != (len(nodes) - 1) { t.Errorf("unexpected failed predicate map: %v", predicateMap) } for _, node := range nodes { if node == pod.Name { continue } failures, found := predicateMap[node] if !found { t.Errorf("failed to find node: %s in %v", node, predicateMap) } if len(failures) != 1 || !failures.Has("match") { t.Errorf("unexpected failures: %v", failures) } } }
func TestFindFitAllError(t *testing.T) { nodes := []string{"3", "2", "1"} predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate} _, predicateMap, err := findNodesThatFit(&api.Pod{}, algorithm.FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes), nil) if err != nil { t.Errorf("unexpected error: %v", err) } if len(predicateMap) != len(nodes) { t.Errorf("unexpected failed predicate map: %v", predicateMap) } for _, node := range nodes { failures, found := predicateMap[node] if !found { t.Errorf("failed to find node: %s in %v", node, predicateMap) } if len(failures) != 1 || !failures.Has("false") { t.Errorf("unexpected failures: %v", failures) } } }
func TestZeroRequest(t *testing.T) { // A pod with no resources. We expect spreading to count it as having the default resources. noResources := api.PodSpec{ Containers: []api.Container{ {}, }, } noResources1 := noResources noResources1.NodeName = "machine1" // A pod with the same resources as a 0-request pod gets by default as its resources (for spreading). small := api.PodSpec{ Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse( strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"), "memory": resource.MustParse( strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)), }, }, }, }, } small2 := small small2.NodeName = "machine2" // A larger pod. large := api.PodSpec{ Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse( strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"), "memory": resource.MustParse( strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)), }, }, }, }, } large1 := large large1.NodeName = "machine1" large2 := large large2.NodeName = "machine2" tests := []struct { pod *api.Pod pods []*api.Pod nodes []api.Node test string }{ // The point of these next two tests is to show you get the same priority for a zero-request pod // as for a pod with the defaults requests, both when the zero-request pod is already on the machine // and when the zero-request pod is the one being scheduled. { pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, test: "test priority of zero-request pod with machine with zero-request pod", pods: []*api.Pod{ {Spec: large1}, {Spec: noResources1}, {Spec: large2}, {Spec: small2}, }, }, { pod: &api.Pod{Spec: small}, nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, test: "test priority of nonzero-request pod with machine with zero-request pod", pods: []*api.Pod{ {Spec: large1}, {Spec: noResources1}, {Spec: large2}, {Spec: small2}, }, }, // The point of this test is to verify that we're not just getting the same score no matter what we schedule. { pod: &api.Pod{Spec: large}, nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, test: "test priority of larger pod with machine with zero-request pod", pods: []*api.Pod{ {Spec: large1}, {Spec: noResources1}, {Spec: large2}, {Spec: small2}, }, }, } const expectedPriority int = 25 for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) list, err := scheduler.PrioritizeNodes( test.pod, nodeNameToInfo, // This should match the configuration in defaultPriorities() in // plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want // to test what's actually in production. []algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{}), algorithm.FakeReplicaSetLister([]extensions.ReplicaSet{})), Weight: 1}}, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), []algorithm.SchedulerExtender{}) if err != nil { t.Errorf("unexpected error: %v", err) } for _, hp := range list { if test.test == "test priority of larger pod with machine with zero-request pod" { if hp.Score == expectedPriority { t.Errorf("%s: expected non-%d for all priorities, got list %#v", test.test, expectedPriority, list) } } else { if hp.Score != expectedPriority { t.Errorf("%s: expected %d for all priorities, got list %#v", test.test, expectedPriority, list) } } } } }
func TestZoneSpreadPriority(t *testing.T) { labels1 := map[string]string{ "foo": "bar", "baz": "blah", } labels2 := map[string]string{ "bar": "foo", "baz": "blah", } zone1 := map[string]string{ "zone": "zone1", } zone2 := map[string]string{ "zone": "zone2", } nozone := map[string]string{ "name": "value", } zone0Spec := api.PodSpec{ NodeName: "machine01", } zone1Spec := api.PodSpec{ NodeName: "machine11", } zone2Spec := api.PodSpec{ NodeName: "machine21", } labeledNodes := map[string]map[string]string{ "machine01": nozone, "machine02": nozone, "machine11": zone1, "machine12": zone1, "machine21": zone2, "machine22": zone2, } tests := []struct { pod *api.Pod pods []*api.Pod nodes map[string]map[string]string services []api.Service expectedList schedulerapi.HostPriorityList test string }{ { pod: new(api.Pod), nodes: labeledNodes, expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10}, {"machine21", 10}, {"machine22", 10}, {"machine01", 0}, {"machine02", 0}}, test: "nothing scheduled", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{{Spec: zone1Spec}}, nodes: labeledNodes, expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10}, {"machine21", 10}, {"machine22", 10}, {"machine01", 0}, {"machine02", 0}}, test: "no services", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10}, {"machine21", 10}, {"machine22", 10}, {"machine01", 0}, {"machine02", 0}}, test: "different services", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10}, {"machine21", 0}, {"machine22", 0}, {"machine01", 0}, {"machine02", 0}}, test: "three pods, one service pod", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 5}, {"machine12", 5}, {"machine21", 5}, {"machine22", 5}, {"machine01", 0}, {"machine02", 0}}, test: "three pods, two service pods on different machines", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, }, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 0}, {"machine12", 0}, {"machine21", 10}, {"machine22", 10}, {"machine01", 0}, {"machine02", 0}}, test: "three service label match pods in different namespaces", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 6}, {"machine12", 6}, {"machine21", 3}, {"machine22", 3}, {"machine01", 0}, {"machine02", 0}}, test: "four pods, three service pods", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 3}, {"machine12", 3}, {"machine21", 6}, {"machine22", 6}, {"machine01", 0}, {"machine02", 0}}, test: "service with partial pod label matches", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine11", 7}, {"machine12", 7}, {"machine21", 5}, {"machine22", 5}, {"machine01", 0}, {"machine02", 0}}, test: "service pod on non-zoned node", }, } for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) zoneSpread := ServiceAntiAffinity{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"} list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeLabeledNodeList(test.nodes))) if err != nil { t.Errorf("unexpected error: %v", err) } // sort the two lists to avoid failures on account of different ordering sort.Sort(test.expectedList) sort.Sort(list) if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) } } }
func TestSelectorSpreadPriority(t *testing.T) { labels1 := map[string]string{ "foo": "bar", "baz": "blah", } labels2 := map[string]string{ "bar": "foo", "baz": "blah", } zone1Spec := api.PodSpec{ NodeName: "machine1", } zone2Spec := api.PodSpec{ NodeName: "machine2", } tests := []struct { pod *api.Pod pods []*api.Pod nodes []string rcs []api.ReplicationController rss []extensions.ReplicaSet services []api.Service expectedList schedulerapi.HostPriorityList test string }{ { pod: new(api.Pod), nodes: []string{"machine1", "machine2"}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "nothing scheduled", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{{Spec: zone1Spec}}, nodes: []string{"machine1", "machine2"}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "no services", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "different services", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, test: "two pods, one service pod", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, test: "five pods, one service pod in no namespace", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, test: "four pods, one service pod in default namespace", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, test: "five pods, one service pod in specific namespace", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "three pods, two service pods on different machines", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 0}}, test: "four pods, three service pods", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "service with partial pod label matches", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, // "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to // do spreading between all pods. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "service with partial pod label matches with service and replication controller", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "service with partial pod label matches with service and replication controller", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, // Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "disjoined service and replication controller should be treated equally", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "disjoined service and replication controller should be treated equally", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, // Both Nodes have one pod from the given RC, hence both get 0 score. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "Replication controller with partial pod label matches", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "Replication controller with partial pod label matches", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "Replication controller with partial pod label matches", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "Replication controller with partial pod label matches", }, } for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) selectorSpread := SelectorSpread{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs), replicaSetLister: algorithm.FakeReplicaSetLister(test.rss)} list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeNodeList(test.nodes))) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) } } }
func TestZoneSelectorSpreadPriority(t *testing.T) { labels1 := map[string]string{ "label1": "l1", "baz": "blah", } labels2 := map[string]string{ "label2": "l2", "baz": "blah", } const nodeMachine1Zone1 = "machine1.zone1" const nodeMachine1Zone2 = "machine1.zone2" const nodeMachine2Zone2 = "machine2.zone2" const nodeMachine1Zone3 = "machine1.zone3" const nodeMachine2Zone3 = "machine2.zone3" const nodeMachine3Zone3 = "machine3.zone3" buildNodeLabels := func(failureDomain string) map[string]string { labels := map[string]string{ wellknownlabels.LabelZoneFailureDomain: failureDomain, } return labels } labeledNodes := map[string]map[string]string{ nodeMachine1Zone1: buildNodeLabels("zone1"), nodeMachine1Zone2: buildNodeLabels("zone2"), nodeMachine2Zone2: buildNodeLabels("zone2"), nodeMachine1Zone3: buildNodeLabels("zone3"), nodeMachine2Zone3: buildNodeLabels("zone3"), nodeMachine3Zone3: buildNodeLabels("zone3"), } buildPod := func(nodeName string, labels map[string]string) *api.Pod { pod := &api.Pod{Spec: api.PodSpec{NodeName: nodeName}, ObjectMeta: api.ObjectMeta{Labels: labels}} return pod } tests := []struct { pod *api.Pod pods []*api.Pod nodes []string rcs []api.ReplicationController rss []extensions.ReplicaSet services []api.Service expectedList schedulerapi.HostPriorityList test string }{ { pod: new(api.Pod), expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 10}, {nodeMachine1Zone2, 10}, {nodeMachine2Zone2, 10}, {nodeMachine1Zone3, 10}, {nodeMachine2Zone3, 10}, {nodeMachine3Zone3, 10}, }, test: "nothing scheduled", }, { pod: buildPod("", labels1), pods: []*api.Pod{buildPod(nodeMachine1Zone1, nil)}, expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 10}, {nodeMachine1Zone2, 10}, {nodeMachine2Zone2, 10}, {nodeMachine1Zone3, 10}, {nodeMachine2Zone3, 10}, {nodeMachine3Zone3, 10}, }, test: "no services", }, { pod: buildPod("", labels1), pods: []*api.Pod{buildPod(nodeMachine1Zone1, labels2)}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 10}, {nodeMachine1Zone2, 10}, {nodeMachine2Zone2, 10}, {nodeMachine1Zone3, 10}, {nodeMachine2Zone3, 10}, {nodeMachine3Zone3, 10}, }, test: "different services", }, { pod: buildPod("", labels1), pods: []*api.Pod{ buildPod(nodeMachine1Zone1, labels2), buildPod(nodeMachine1Zone2, labels1), }, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 10}, {nodeMachine1Zone2, 0}, // Already have pod on machine {nodeMachine2Zone2, 3}, // Already have pod in zone {nodeMachine1Zone3, 10}, {nodeMachine2Zone3, 10}, {nodeMachine3Zone3, 10}, }, test: "two pods, 1 matching (in z2)", }, { pod: buildPod("", labels1), pods: []*api.Pod{ buildPod(nodeMachine1Zone1, labels2), buildPod(nodeMachine1Zone2, labels1), buildPod(nodeMachine2Zone2, labels1), buildPod(nodeMachine1Zone3, labels2), buildPod(nodeMachine2Zone3, labels1), }, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 10}, {nodeMachine1Zone2, 0}, // Pod on node {nodeMachine2Zone2, 0}, // Pod on node {nodeMachine1Zone3, 6}, // Pod in zone {nodeMachine2Zone3, 3}, // Pod on node {nodeMachine3Zone3, 6}, // Pod in zone }, test: "five pods, 3 matching (z2=2, z3=1)", }, { pod: buildPod("", labels1), pods: []*api.Pod{ buildPod(nodeMachine1Zone1, labels1), buildPod(nodeMachine1Zone2, labels1), buildPod(nodeMachine2Zone2, labels2), buildPod(nodeMachine1Zone3, labels1), }, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 0}, // Pod on node {nodeMachine1Zone2, 0}, // Pod on node {nodeMachine2Zone2, 3}, // Pod in zone {nodeMachine1Zone3, 0}, // Pod on node {nodeMachine2Zone3, 3}, // Pod in zone {nodeMachine3Zone3, 3}, // Pod in zone }, test: "four pods, 3 matching (z1=1, z2=1, z3=1)", }, { pod: buildPod("", labels1), pods: []*api.Pod{ buildPod(nodeMachine1Zone1, labels1), buildPod(nodeMachine1Zone2, labels1), buildPod(nodeMachine1Zone3, labels1), buildPod(nodeMachine2Zone2, labels2), }, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {nodeMachine1Zone1, 0}, // Pod on node {nodeMachine1Zone2, 0}, // Pod on node {nodeMachine2Zone2, 3}, // Pod in zone {nodeMachine1Zone3, 0}, // Pod on node {nodeMachine2Zone3, 3}, // Pod in zone {nodeMachine3Zone3, 3}, // Pod in zone }, test: "four pods, 3 matching (z1=1, z2=1, z3=1)", }, { pod: buildPod("", labels1), pods: []*api.Pod{ buildPod(nodeMachine1Zone3, labels1), buildPod(nodeMachine1Zone2, labels1), buildPod(nodeMachine1Zone3, labels1), }, rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ // Note that because we put two pods on the same node (nodeMachine1Zone3), // the values here are questionable for zone2, in particular for nodeMachine1Zone2. // However they kind of make sense; zone1 is still most-highly favored. // zone3 is in general least favored, and m1.z3 particularly low priority. // We would probably prefer to see a bigger gap between putting a second // pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct. // This is also consistent with what we have already. {nodeMachine1Zone1, 10}, // No pods in zone {nodeMachine1Zone2, 5}, // Pod on node {nodeMachine2Zone2, 6}, // Pod in zone {nodeMachine1Zone3, 0}, // Two pods on node {nodeMachine2Zone3, 3}, // Pod in zone {nodeMachine3Zone3, 3}, // Pod in zone }, test: "Replication controller spreading (z1=0, z2=1, z3=2)", }, } for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) selectorSpread := SelectorSpread{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs), replicaSetLister: algorithm.FakeReplicaSetLister(test.rss)} list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeLabeledNodeList(labeledNodes))) if err != nil { t.Errorf("unexpected error: %v", err) } // sort the two lists to avoid failures on account of different ordering sort.Sort(test.expectedList) sort.Sort(list) if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) } } }
func TestServiceAffinity(t *testing.T) { selector := map[string]string{"foo": "bar"} labels1 := map[string]string{ "region": "r1", "zone": "z11", } labels2 := map[string]string{ "region": "r1", "zone": "z12", } labels3 := map[string]string{ "region": "r2", "zone": "z21", } labels4 := map[string]string{ "region": "r2", "zone": "z22", } node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}} node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labels2}} node3 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labels3}} node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}} node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}} tests := []struct { pod *api.Pod pods []*api.Pod services []api.Service node string labels []string fits bool test string }{ { pod: new(api.Pod), node: "machine1", fits: true, labels: []string{"region"}, test: "nothing scheduled", }, { pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}}, node: "machine1", fits: true, labels: []string{"region"}, test: "pod with region label match", }, { pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}}, node: "machine1", fits: false, labels: []string{"region"}, test: "pod with region label mismatch", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, labels: []string{"region"}, test: "service pod on same node", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, labels: []string{"region"}, test: "service pod on different node, region match", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: false, labels: []string{"region"}, test: "service pod on different node, region mismatch", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}}, fits: true, labels: []string{"region"}, test: "service in different namespace, region mismatch", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, fits: true, labels: []string{"region"}, test: "pod in different namespace, region mismatch", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, fits: false, labels: []string{"region"}, test: "service and pod in same namespace, region mismatch", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: false, labels: []string{"region", "zone"}, test: "service pod on different node, multiple labels, not all match", }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine4", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, labels: []string{"region", "zone"}, test: "service pod on different node, multiple labels, all match", }, } for _, test := range tests { nodes := []api.Node{node1, node2, node3, node4, node5} serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels} fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node) if err != nil { t.Errorf("unexpected error: %v", err) } if fits != test.fits { t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits) } } }
func TestBalancedResourceAllocation(t *testing.T) { labels1 := map[string]string{ "foo": "bar", "baz": "blah", } labels2 := map[string]string{ "bar": "foo", "baz": "blah", } machine1Spec := api.PodSpec{ NodeName: "machine1", } machine2Spec := api.PodSpec{ NodeName: "machine2", } noResources := api.PodSpec{ Containers: []api.Container{}, } cpuOnly := api.PodSpec{ NodeName: "machine1", Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("0"), }, }, }, { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("0"), }, }, }, }, } cpuOnly2 := cpuOnly cpuOnly2.NodeName = "machine2" cpuAndMemory := api.PodSpec{ NodeName: "machine2", Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("2000"), }, }, }, { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("3000"), }, }, }, }, } tests := []struct { pod *api.Pod pods []*api.Pod nodes []api.Node expectedList schedulerapi.HostPriorityList test string }{ { /* Node1 scores (remaining resources) on 0-10 scale CPU Fraction: 0 / 4000 = 0% Memory Fraction: 0 / 10000 = 0% Node1 Score: 10 - (0-0)*10 = 10 Node2 scores (remaining resources) on 0-10 scale CPU Fraction: 0 / 4000 = 0 % Memory Fraction: 0 / 10000 = 0% Node2 Score: 10 - (0-0)*10 = 10 */ pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "nothing scheduled, nothing requested", }, { /* Node1 scores on 0-10 scale CPU Fraction: 3000 / 4000= 75% Memory Fraction: 5000 / 10000 = 50% Node1 Score: 10 - (0.75-0.5)*10 = 7 Node2 scores on 0-10 scale CPU Fraction: 3000 / 6000= 50% Memory Fraction: 5000/10000 = 50% Node2 Score: 10 - (0.5-0.5)*10 = 10 */ pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 10}}, test: "nothing scheduled, resources requested, differently sized machines", }, { /* Node1 scores on 0-10 scale CPU Fraction: 0 / 4000= 0% Memory Fraction: 0 / 10000 = 0% Node1 Score: 10 - (0-0)*10 = 10 Node2 scores on 0-10 scale CPU Fraction: 0 / 4000= 0% Memory Fraction: 0 / 10000 = 0% Node2 Score: 10 - (0-0)*10 = 10 */ pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "no resources requested, pods scheduled", pods: []*api.Pod{ {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, }, { /* Node1 scores on 0-10 scale CPU Fraction: 6000 / 10000 = 60% Memory Fraction: 0 / 20000 = 0% Node1 Score: 10 - (0.6-0)*10 = 4 Node2 scores on 0-10 scale CPU Fraction: 6000 / 10000 = 60% Memory Fraction: 5000 / 20000 = 25% Node2 Score: 10 - (0.6-0.25)*10 = 6 */ pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 6}}, test: "no resources requested, pods scheduled with resources", pods: []*api.Pod{ {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, }, { /* Node1 scores on 0-10 scale CPU Fraction: 6000 / 10000 = 60% Memory Fraction: 5000 / 20000 = 25% Node1 Score: 10 - (0.6-0.25)*10 = 6 Node2 scores on 0-10 scale CPU Fraction: 6000 / 10000 = 60% Memory Fraction: 10000 / 20000 = 50% Node2 Score: 10 - (0.6-0.5)*10 = 9 */ pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 9}}, test: "resources requested, pods scheduled with resources", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { /* Node1 scores on 0-10 scale CPU Fraction: 6000 / 10000 = 60% Memory Fraction: 5000 / 20000 = 25% Node1 Score: 10 - (0.6-0.25)*10 = 6 Node2 scores on 0-10 scale CPU Fraction: 6000 / 10000 = 60% Memory Fraction: 10000 / 50000 = 20% Node2 Score: 10 - (0.6-0.2)*10 = 6 */ pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 6}}, test: "resources requested, pods scheduled with resources, differently sized machines", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { /* Node1 scores on 0-10 scale CPU Fraction: 6000 / 4000 > 100% ==> Score := 0 Memory Fraction: 0 / 10000 = 0 Node1 Score: 0 Node2 scores on 0-10 scale CPU Fraction: 6000 / 4000 > 100% ==> Score := 0 Memory Fraction 5000 / 10000 = 50% Node2 Score: 0 */ pod: &api.Pod{Spec: cpuOnly}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "requested resources exceed node capacity", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "zero node resources, pods scheduled with resources", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, } for _, test := range tests { m2p, err := predicates.MapPodsToMachines(algorithm.FakePodLister(test.pods)) if err != nil { t.Errorf("unexpected error: %v", err) } list, err := BalancedResourceAllocation(test.pod, m2p, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) } } }
func TestLeastRequested(t *testing.T) { labels1 := map[string]string{ "foo": "bar", "baz": "blah", } labels2 := map[string]string{ "bar": "foo", "baz": "blah", } machine1Spec := api.PodSpec{ NodeName: "machine1", } machine2Spec := api.PodSpec{ NodeName: "machine2", } noResources := api.PodSpec{ Containers: []api.Container{}, } cpuOnly := api.PodSpec{ NodeName: "machine1", Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("0"), }, }, }, { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("0"), }, }, }, }, } cpuOnly2 := cpuOnly cpuOnly2.NodeName = "machine2" cpuAndMemory := api.PodSpec{ NodeName: "machine2", Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("2000"), }, }, }, { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("3000"), }, }, }, }, } tests := []struct { pod *api.Pod pods []*api.Pod nodes []api.Node expectedList schedulerapi.HostPriorityList test string }{ { /* Node1 scores (remaining resources) on 0-10 scale CPU Score: ((4000 - 0) *10) / 4000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10 Node1 Score: (10 + 10) / 2 = 10 Node2 scores (remaining resources) on 0-10 scale CPU Score: ((4000 - 0) *10) / 4000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10 Node2 Score: (10 + 10) / 2 = 10 */ pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "nothing scheduled, nothing requested", }, { /* Node1 scores on 0-10 scale CPU Score: ((4000 - 3000) *10) / 4000 = 2.5 Memory Score: ((10000 - 5000) *10) / 10000 = 5 Node1 Score: (2.5 + 5) / 2 = 3 Node2 scores on 0-10 scale CPU Score: ((6000 - 3000) *10) / 6000 = 5 Memory Score: ((10000 - 5000) *10) / 10000 = 5 Node2 Score: (5 + 5) / 2 = 5 */ pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 5}}, test: "nothing scheduled, resources requested, differently sized machines", }, { /* Node1 scores on 0-10 scale CPU Score: ((4000 - 0) *10) / 4000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10 Node1 Score: (10 + 10) / 2 = 10 Node2 scores on 0-10 scale CPU Score: ((4000 - 0) *10) / 4000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10 Node2 Score: (10 + 10) / 2 = 10 */ pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "no resources requested, pods scheduled", pods: []*api.Pod{ {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, }, { /* Node1 scores on 0-10 scale CPU Score: ((10000 - 6000) *10) / 10000 = 4 Memory Score: ((20000 - 0) *10) / 20000 = 10 Node1 Score: (4 + 10) / 2 = 7 Node2 scores on 0-10 scale CPU Score: ((10000 - 6000) *10) / 10000 = 4 Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Node2 Score: (4 + 7.5) / 2 = 5 */ pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}}, test: "no resources requested, pods scheduled with resources", pods: []*api.Pod{ {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, }, { /* Node1 scores on 0-10 scale CPU Score: ((10000 - 6000) *10) / 10000 = 4 Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Node1 Score: (4 + 7.5) / 2 = 5 Node2 scores on 0-10 scale CPU Score: ((10000 - 6000) *10) / 10000 = 4 Memory Score: ((20000 - 10000) *10) / 20000 = 5 Node2 Score: (4 + 5) / 2 = 4 */ pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}}, test: "resources requested, pods scheduled with resources", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { /* Node1 scores on 0-10 scale CPU Score: ((10000 - 6000) *10) / 10000 = 4 Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Node1 Score: (4 + 7.5) / 2 = 5 Node2 scores on 0-10 scale CPU Score: ((10000 - 6000) *10) / 10000 = 4 Memory Score: ((50000 - 10000) *10) / 50000 = 8 Node2 Score: (4 + 8) / 2 = 6 */ pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}}, test: "resources requested, pods scheduled with resources, differently sized machines", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { /* Node1 scores on 0-10 scale CPU Score: ((4000 - 6000) *10) / 4000 = 0 Memory Score: ((10000 - 0) *10) / 10000 = 10 Node1 Score: (0 + 10) / 2 = 5 Node2 scores on 0-10 scale CPU Score: ((4000 - 6000) *10) / 4000 = 0 Memory Score: ((10000 - 5000) *10) / 10000 = 5 Node2 Score: (0 + 5) / 2 = 2 */ pod: &api.Pod{Spec: cpuOnly}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}}, test: "requested resources exceed node capacity", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "zero node resources, pods scheduled with resources", pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, } for _, test := range tests { m2p, err := predicates.MapPodsToMachines(algorithm.FakePodLister(test.pods)) if err != nil { t.Errorf("unexpected error: %v", err) } list, err := LeastRequestedPriority(test.pod, m2p, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) } } }
func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) { labelAzAZ1 := map[string]string{ "az": "az1", } LabelZoneFailureDomainAZ1 := map[string]string{ unversioned.LabelZoneFailureDomain: "az1", } podLabel1 := map[string]string{ "security": "S1", } antiAffinity1 := map[string]string{ v1.AffinityAnnotationKey: ` {"podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S1"] }] }, "namespaces": [], "topologyKey": "" } }] }}`, } tests := []struct { pod *v1.Pod pods []*v1.Pod nodes []*v1.Node failureDomains priorityutil.Topologies expectedList schedulerapi.HostPriorityList test string }{ { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, }, failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, }, failureDomains: priorityutil.Topologies{}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, and no failure domains indicated, regard as topologyKey not match.", }, } for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) ipa := InterPodAffinity{ info: FakeNodeListInfo(test.nodes), nodeLister: algorithm.FakeNodeLister(test.nodes), podLister: algorithm.FakePodLister(test.pods), hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, failureDomains: test.failureDomains, } list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) } } }
func TestHardPodAffinitySymmetricWeight(t *testing.T) { podLabelServiceS1 := map[string]string{ "service": "S1", } labelRgChina := map[string]string{ "region": "China", } labelRgIndia := map[string]string{ "region": "India", } labelAzAz1 := map[string]string{ "az": "az1", } hardPodAffinity := map[string]string{ v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [ { "labelSelector":{ "matchExpressions": [{ "key": "service", "operator": "In", "values": ["S1"] }] }, "namespaces": [], "topologyKey": "region" } ] }}`, } tests := []struct { pod *v1.Pod pods []*v1.Pod nodes []*v1.Node hardPodAffinityWeight int expectedList schedulerapi.HostPriorityList test string }{ { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, hardPodAffinityWeight: 0, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match", }, } for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) ipa := InterPodAffinity{ info: FakeNodeListInfo(test.nodes), nodeLister: algorithm.FakeNodeLister(test.nodes), podLister: algorithm.FakePodLister(test.pods), hardPodAffinityWeight: test.hardPodAffinityWeight, } list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) } } }
func TestInterPodAffinityPriority(t *testing.T) { labelRgChina := map[string]string{ "region": "China", } labelRgIndia := map[string]string{ "region": "India", } labelAzAz1 := map[string]string{ "az": "az1", } labelAzAz2 := map[string]string{ "az": "az2", } labelRgChinaAzAz1 := map[string]string{ "region": "China", "az": "az1", } podLabelSecurityS1 := map[string]string{ "security": "S1", } podLabelSecurityS2 := map[string]string{ "security": "S2", } // considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity stayWithS1InRegion := map[string]string{ v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S1"] }] }, "namespaces": [], "topologyKey": "region" } }] }}`, } stayWithS2InRegion := map[string]string{ v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 6, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S2"] }] }, "namespaces": [], "topologyKey": "region" } }] }}`, } affinity3 := map[string]string{ v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [ { "weight": 8, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "NotIn", "values":["S1"] }, { "key": "security", "operator": "In", "values":["S2"] }] }, "namespaces": [], "topologyKey": "region" } }, { "weight": 2, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "Exists" }, { "key": "wrongkey", "operator": "DoesNotExist" }] }, "namespaces": [], "topologyKey": "region" } } ] }}`, } hardAffinity := map[string]string{ v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [ { "labelSelector":{ "matchExpressions": [{ "key": "security", "operator": "In", "values": ["S1", "value2"] }] }, "namespaces": [], "topologyKey": "region" }, { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "Exists" }, { "key": "wrongkey", "operator": "DoesNotExist" }] }, "namespaces": [], "topologyKey": "region" } ] }}`, } awayFromS1InAz := map[string]string{ v1.AffinityAnnotationKey: ` {"podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S1"] }] }, "namespaces": [], "topologyKey": "az" } }] }}`, } // to stay away from security S2 in any az. awayFromS2InAz := map[string]string{ v1.AffinityAnnotationKey: ` {"podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S2"] }] }, "namespaces": [], "topologyKey": "az" } }] }}`, } // to stay with security S1 in same region, stay away from security S2 in any az. stayWithS1InRegionAwayFromS2InAz := map[string]string{ v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 8, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S1"] }] }, "namespaces": [], "topologyKey": "region" } }] }, "podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, "podAffinityTerm": { "labelSelector": { "matchExpressions": [{ "key": "security", "operator": "In", "values":["S2"] }] }, "namespaces": [], "topologyKey": "az" } }] }}`, } tests := []struct { pod *v1.Pod pods []*v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}}, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "all machines are same priority as Affinity is nil", }, // the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score // the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score // the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + "which doesn't match either pods in nodes or in topology key", }, // the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score // the node2(machine2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1 // the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector, // get a low score. { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegion}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", }, // there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference. // But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia. // Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score), // while all the nodes in regionIndia should get another same score(low score). { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}}, test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", }, // Test with the different operators and values for pod affinity scheduling preference, including some match failures. { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", }, // Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, // but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference. { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, // The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity. // the nodes that have the label {"node": "bar"} (match the topology key) and that have existing pods that match the labelSelector get low score // the nodes that don't have the label {"node": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get high score // the nodes that have the label {"node": "bar"} (match the topology key) but that have existing pods that mismatch the labelSelector get high score // there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels. // But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2. { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", }, // Test the symmetry cases for anti affinity { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", }, // Test both affinity and anti-affinity { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", }, // Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service), // the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level, // so that all the pods of a RC/service can stay in a same region but trying to separate with each other // machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}}, test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", }, // Consider Affinity, Anti Affinity and symmetry together. // for Affinity, the weights are: 8, 0, 0, 0 // for Anti Affinity, the weights are: 0, -5, 0, 0 // for Affinity symmetry, the weights are: 0, 0, 8, 0 // for Anti Affinity symmetry, the weights are: 0, 0, 0, -5 { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, pods: []*v1.Pod{ {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}}, {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Annotations: awayFromS1InAz}}, }, nodes: []*v1.Node{ {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, {ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}}, test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", }, } for _, test := range tests { nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) interPodAffinity := InterPodAffinity{ info: FakeNodeListInfo(test.nodes), nodeLister: algorithm.FakeNodeLister(test.nodes), podLister: algorithm.FakePodLister(test.pods), hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")}, } list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) } } }
func TestGenericScheduler(t *testing.T) { tests := []struct { name string predicates map[string]algorithm.FitPredicate prioritizers []algorithm.PriorityConfig nodes []string pod *api.Pod pods []*api.Pod expectedHost string expectsErr bool }{ { predicates: map[string]algorithm.FitPredicate{"false": falsePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}}, nodes: []string{"machine1", "machine2"}, expectsErr: true, name: "test 1", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}}, nodes: []string{"machine1", "machine2"}, // Random choice between both, the rand seeded above with zero, chooses "machine1" expectedHost: "machine1", name: "test 2", }, { // Fits on a machine where the pod ID matches the machine name predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}}, nodes: []string{"machine1", "machine2"}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}}, expectedHost: "machine2", name: "test 3", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"3", "2", "1"}, expectedHost: "3", name: "test 4", }, { predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"3", "2", "1"}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, expectedHost: "2", name: "test 5", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}}, nodes: []string{"3", "2", "1"}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, expectedHost: "1", name: "test 6", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"3", "2", "1"}, expectsErr: true, name: "test 7", }, { predicates: map[string]algorithm.FitPredicate{ "nopods": hasNoPodsPredicate, "matches": matchesPredicate, }, pods: []*api.Pod{ { ObjectMeta: api.ObjectMeta{Name: "2"}, Spec: api.PodSpec{ NodeName: "2", }, Status: api.PodStatus{ Phase: api.PodRunning, }, }, }, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"1", "2"}, expectsErr: true, name: "test 8", }, } for _, test := range tests { random := rand.New(rand.NewSource(0)) scheduler := NewGenericScheduler(test.predicates, test.prioritizers, []algorithm.SchedulerExtender{}, algorithm.FakePodLister(test.pods), random) machine, err := scheduler.Schedule(test.pod, algorithm.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { if err == nil { t.Error("Unexpected non-error") } } else { if err != nil { t.Errorf("Unexpected error: %v", err) } if test.expectedHost != machine { t.Errorf("Failed : %s, Expected: %s, Saw: %s", test.name, test.expectedHost, machine) } } } }
func TestImageLocalityPriority(t *testing.T) { test_40_250 := api.PodSpec{ Containers: []api.Container{ { Image: "gcr.io/40", }, { Image: "gcr.io/250", }, }, } test_40_140 := api.PodSpec{ Containers: []api.Container{ { Image: "gcr.io/40", }, { Image: "gcr.io/140", }, }, } test_min_max := api.PodSpec{ Containers: []api.Container{ { Image: "gcr.io/10", }, { Image: "gcr.io/2000", }, }, } node_40_140_2000 := api.NodeStatus{ Images: []api.ContainerImage{ { RepoTags: []string{ "gcr.io/40", "gcr.io/40:v1", "gcr.io/40:v1", }, Size: int64(40 * mb), }, { RepoTags: []string{ "gcr.io/140", "gcr.io/140:v1", }, Size: int64(140 * mb), }, { RepoTags: []string{ "gcr.io/2000", }, Size: int64(2000 * mb), }, }, } node_250_10 := api.NodeStatus{ Images: []api.ContainerImage{ { RepoTags: []string{ "gcr.io/250", }, Size: int64(250 * mb), }, { RepoTags: []string{ "gcr.io/10", "gcr.io/10:v1", }, Size: int64(10 * mb), }, }, } tests := []struct { pod *api.Pod pods []*api.Pod nodes []api.Node expectedList schedulerapi.HostPriorityList test string }{ { // Pod: gcr.io/40 gcr.io/250 // Node1 // Image: gcr.io/40 40MB // Score: (40M-23M)/97.7M + 1 = 1 // Node2 // Image: gcr.io/250 250MB // Score: (250M-23M)/97.7M + 1 = 3 pod: &api.Pod{Spec: test_40_250}, nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine2", 3}}, test: "two images spread on two nodes, prefer the larger image one", }, { // Pod: gcr.io/40 gcr.io/140 // Node1 // Image: gcr.io/40 40MB, gcr.io/140 140MB // Score: (40M+140M-23M)/97.7M + 1 = 2 // Node2 // Image: not present // Score: 0 pod: &api.Pod{Spec: test_40_140}, nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 0}}, test: "two images on one node, prefer this node", }, { // Pod: gcr.io/2000 gcr.io/10 // Node1 // Image: gcr.io/2000 2000MB // Score: 2000 > max score = 10 // Node2 // Image: gcr.io/10 10MB // Score: 10 < min score = 0 pod: &api.Pod{Spec: test_min_max}, nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, test: "if exceed limit, use limit", }, } for _, test := range tests { m2p, err := predicates.MapPodsToMachines(algorithm.FakePodLister(test.pods)) if err != nil { t.Errorf("unexpected error: %v", err) } list, err := ImageLocalityPriority(test.pod, m2p, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) if err != nil { t.Errorf("unexpected error: %v", err) } sort.Sort(test.expectedList) sort.Sort(list) if !reflect.DeepEqual(test.expectedList, list) { t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) } } }
func TestGenericSchedulerWithExtenders(t *testing.T) { tests := []struct { name string predicates map[string]algorithm.FitPredicate prioritizers []algorithm.PriorityConfig extenders []FakeExtender extenderPredicates []fitPredicate extenderPrioritizers []priorityConfig nodes []string pod *api.Pod pods []*api.Pod expectedHost string expectsErr bool }{ { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}}, extenders: []FakeExtender{ { predicates: []fitPredicate{truePredicateExtender}, }, { predicates: []fitPredicate{errorPredicateExtender}, }, }, nodes: []string{"machine1", "machine2"}, expectsErr: true, name: "test 1", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}}, extenders: []FakeExtender{ { predicates: []fitPredicate{truePredicateExtender}, }, { predicates: []fitPredicate{falsePredicateExtender}, }, }, nodes: []string{"machine1", "machine2"}, expectsErr: true, name: "test 2", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}}, extenders: []FakeExtender{ { predicates: []fitPredicate{truePredicateExtender}, }, { predicates: []fitPredicate{machine1PredicateExtender}, }, }, nodes: []string{"machine1", "machine2"}, expectedHost: "machine1", name: "test 3", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}}, extenders: []FakeExtender{ { predicates: []fitPredicate{machine2PredicateExtender}, }, { predicates: []fitPredicate{machine1PredicateExtender}, }, }, nodes: []string{"machine1", "machine2"}, expectsErr: true, name: "test 4", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}}, extenders: []FakeExtender{ { predicates: []fitPredicate{truePredicateExtender}, prioritizers: []priorityConfig{{errorPrioritizerExtender, 10}}, weight: 1, }, }, nodes: []string{"machine1"}, expectedHost: "machine1", name: "test 5", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}}, extenders: []FakeExtender{ { predicates: []fitPredicate{truePredicateExtender}, prioritizers: []priorityConfig{{machine1PrioritizerExtender, 10}}, weight: 1, }, { predicates: []fitPredicate{truePredicateExtender}, prioritizers: []priorityConfig{{machine2PrioritizerExtender, 10}}, weight: 5, }, }, nodes: []string{"machine1", "machine2"}, expectedHost: "machine2", name: "test 6", }, { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{machine2Prioritizer, 20}}, extenders: []FakeExtender{ { predicates: []fitPredicate{truePredicateExtender}, prioritizers: []priorityConfig{{machine1PrioritizerExtender, 10}}, weight: 1, }, }, nodes: []string{"machine1", "machine2"}, expectedHost: "machine2", // machine2 has higher score name: "test 7", }, } for _, test := range tests { random := rand.New(rand.NewSource(0)) extenders := []algorithm.SchedulerExtender{} for ii := range test.extenders { extenders = append(extenders, &test.extenders[ii]) } scheduler := NewGenericScheduler(test.predicates, test.prioritizers, extenders, algorithm.FakePodLister(test.pods), random) machine, err := scheduler.Schedule(test.pod, algorithm.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { if err == nil { t.Errorf("Unexpected non-error for %s, machine %s", test.name, machine) } } else { if err != nil { t.Errorf("Unexpected error: %v", err) } if test.expectedHost != machine { t.Errorf("Failed : %s, Expected: %s, Saw: %s", test.name, test.expectedHost, machine) } } } }