// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes // pods and the node when kubelet has not reported, and the cloudprovider says // the node is gone. func TestCloudProviderNoRateLimit(t *testing.T) { fnh := &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node0")}}), deleteWaitChan: make(chan struct{}), } nodeController := NewNodeController(nil, fnh, 10*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.cloud = &fakecloud.FakeCloud{} nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) { return false, nil } // monitorNodeStatus should allow this node to be immediately deleted if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } select { case <-fnh.deleteWaitChan: case <-time.After(wait.ForeverTestTimeout): t.Errorf("Timed out waiting %v for node to be deleted", wait.ForeverTestTimeout) } if len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != "node0" { t.Errorf("Node was not deleted") } if nodeOnQueue := nodeController.podEvictor.Remove("node0"); nodeOnQueue { t.Errorf("Node was queued for eviction. Should have been immediately deleted.") } }
func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) table := []struct { fakeNodeHandler *FakeNodeHandler timeToPass time.Duration newNodeStatus api.NodeStatus expectedPodStatusUpdate bool }{ // Node created recently, without status. // Expect no action from node controller (within startup grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedPodStatusUpdate: false, }, // Node created long time ago, with status updated recently. // Expect no action from node controller (within monitor grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedPodStatusUpdate: false, }, // Node created long time ago, with status updated by kubelet exceeds grace period. // Expect pods status updated and Unknown node status posted from node controller { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, timeToPass: 1 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, expectedPodStatusUpdate: true, }, } for i, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("Case[%d] unexpected error: %v", i, err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("Case[%d] unexpected error: %v", i, err) } } podStatusUpdated := false for _, action := range item.fakeNodeHandler.Actions() { if action.GetVerb() == "update" && action.GetResource().Resource == "pods" && action.GetSubresource() == "status" { podStatusUpdated = true } } if podStatusUpdated != item.expectedPodStatusUpdate { t.Errorf("Case[%d] expect pod status updated to be %v, but got %v", i, item.expectedPodStatusUpdate, podStatusUpdated) } } }
func TestMonitorNodeStatusUpdateStatus(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) table := []struct { fakeNodeHandler *FakeNodeHandler timeToPass time.Duration newNodeStatus api.NodeStatus expectedEvictPods bool expectedRequestCount int expectedNodes []*api.Node }{ // Node created long time ago, without status: // Expect Unknown status posted from node controller. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 2, // List+Update expectedNodes: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, Reason: "NodeStatusNeverUpdated", Message: "Kubelet never posted node status.", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, { Type: api.NodeOutOfDisk, Status: api.ConditionUnknown, Reason: "NodeStatusNeverUpdated", Message: "Kubelet never posted node status.", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, }, }, }, }, }, // Node created recently, without status. // Expect no action from node controller (within startup grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 1, // List expectedNodes: nil, }, // Node created long time ago, with status updated by kubelet exceeds grace period. // Expect Unknown status posted from node controller. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 3, // (List+)List+Update timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, expectedNodes: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, Reason: "NodeStatusUnknown", Message: "Kubelet stopped posting node status.", LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, { Type: api.NodeOutOfDisk, Status: api.ConditionUnknown, Reason: "NodeStatusUnknown", Message: "Kubelet stopped posting node status.", LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, }, // Node created long time ago, with status updated recently. // Expect no action from node controller (within monitor grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 1, // List expectedNodes: nil, }, } for i, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } } if item.expectedRequestCount != item.fakeNodeHandler.RequestCount { t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount) } if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) { t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0])) } if len(item.fakeNodeHandler.UpdatedNodeStatuses) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodeStatuses) { t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodeStatuses[0])) } } }
func TestMonitorNodeStatusEvictPods(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) evictionTimeout := 10 * time.Minute // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady // we need second healthy node in tests. Because of how the tests are written we need to update // the status of this Node. healthyNodeNewStatus := api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated, and is NotReady for 10min. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, } table := []struct { fakeNodeHandler *FakeNodeHandler daemonSets []extensions.DaemonSet timeToPass time.Duration newNodeStatus api.NodeStatus secondNodeNewStatus api.NodeStatus expectedEvictPods bool description string }{ // Node created recently, with no status (happens only at cluster startup). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 0, newNodeStatus: api.NodeStatus{}, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Node created recently, with no status.", }, // Node created long time ago, and kubelet posted NotReady for a short period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: evictionTimeout, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 10min. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Node created long time ago, and kubelet posted NotReady for a short period of time.", }, // Pod is ds-managed, and kubelet posted NotReady for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset( &api.PodList{ Items: []api.Pod{ { ObjectMeta: api.ObjectMeta{ Name: "pod0", Namespace: "default", Labels: map[string]string{"daemon": "yes"}, }, Spec: api.PodSpec{ NodeName: "node0", }, }, }, }, ), }, daemonSets: []extensions.DaemonSet{ { ObjectMeta: api.ObjectMeta{ Name: "ds0", Namespace: "default", }, Spec: extensions.DaemonSetSpec{ Selector: &unversioned.LabelSelector{ MatchLabels: map[string]string{"daemon": "yes"}, }, }, }, }, timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Pod is ds-managed, and kubelet posted NotReady for a long period of time.", }, // Node created long time ago, and kubelet posted NotReady for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: true, description: "Node created long time ago, and kubelet posted NotReady for a long period of time.", }, // Node created long time ago, node controller posted Unknown for a short period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: evictionTimeout - testNodeMonitorGracePeriod, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 10min ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Node created long time ago, node controller posted Unknown for a short period of time.", }, // Node created long time ago, node controller posted Unknown for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: true, description: "Node created long time ago, node controller posted Unknown for a long period of time.", }, // NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: false, description: "Network Segmentation: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.", }, // NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period // of on first Node, eviction should stop even though -master Node is healthy. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node-master", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period of on first Node, eviction should stop even though -master Node is healthy", }, } for _, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, evictionTimeout, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } for _, ds := range item.daemonSets { nodeController.daemonSetStore.Add(&ds) } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { remaining, _ := nodeController.deletePods(value.Value) if remaining { nodeController.terminationEvictor.Add(value.Value) } return true, 0 }) nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { nodeController.terminatePods(value.Value, value.AddedAt) return true, 0 }) podEvicted := false for _, action := range item.fakeNodeHandler.Actions() { if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { podEvicted = true } } if item.expectedEvictPods != podEvicted { t.Errorf("expected pod eviction: %+v, got %+v for %+v", item.expectedEvictPods, podEvicted, item.description) } } }
func TestNodeDeletion(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) fakeNodeHandler := &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}), } nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } fakeNodeHandler.Delete("node1", nil) if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { nodeController.deletePods(value.Value) return true, 0 }) podEvicted := false for _, action := range fakeNodeHandler.Actions() { if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { podEvicted = true } } if !podEvicted { t.Error("expected pods to be evicted from the deleted node") } }
// TestScale tests proportional scaling of deployments. Note that fenceposts for // rolling out (maxUnavailable, maxSurge) have no meaning for simple scaling other // than recording maxSurge as part of the max-replicas annotation that is taken // into account in the next scale event (max-replicas is used for calculating the // proportion of a replica set). func TestScale(t *testing.T) { newTimestamp := unversioned.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC) oldTimestamp := unversioned.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC) olderTimestamp := unversioned.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC) tests := []struct { name string deployment *exp.Deployment oldDeployment *exp.Deployment newRS *exp.ReplicaSet oldRSs []*exp.ReplicaSet expectedNew *exp.ReplicaSet expectedOld []*exp.ReplicaSet desiredReplicasAnnotations map[string]int32 }{ { name: "normal scaling event: 10 -> 12", deployment: newDeployment(12, nil), oldDeployment: newDeployment(10, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{}, expectedNew: rs("foo-v1", 12, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{}, }, { name: "normal scaling event: 10 -> 5", deployment: newDeployment(5, nil), oldDeployment: newDeployment(10, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{}, expectedNew: rs("foo-v1", 5, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{}, }, { name: "proportional scaling: 5 -> 10", deployment: newDeployment(10, nil), oldDeployment: newDeployment(5, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, }, { name: "proportional scaling: 5 -> 3", deployment: newDeployment(3, nil), oldDeployment: newDeployment(5, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 1, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, }, { name: "proportional scaling: 9 -> 4", deployment: newDeployment(4, nil), oldDeployment: newDeployment(9, nil), newRS: rs("foo-v2", 8, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, }, { name: "proportional scaling: 7 -> 10", deployment: newDeployment(10, nil), oldDeployment: newDeployment(7, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 3, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, }, { name: "proportional scaling: 13 -> 8", deployment: newDeployment(8, nil), oldDeployment: newDeployment(13, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales up the new replica set. { name: "leftover distribution: 3 -> 4", deployment: newDeployment(4, nil), oldDeployment: newDeployment(3, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, }, // Scales down the older replica set. { name: "leftover distribution: 3 -> 2", deployment: newDeployment(2, nil), oldDeployment: newDeployment(3, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up the latest replica set first. { name: "proportional scaling (no new rs): 4 -> 5", deployment: newDeployment(5, nil), oldDeployment: newDeployment(4, nil), newRS: nil, oldRSs: []*exp.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: nil, expectedOld: []*exp.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales down to zero { name: "proportional scaling: 6 -> 0", deployment: newDeployment(0, nil), oldDeployment: newDeployment(6, nil), newRS: rs("foo-v3", 3, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 0, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up from zero { name: "proportional scaling: 0 -> 6", deployment: newDeployment(6, nil), oldDeployment: newDeployment(0, nil), newRS: rs("foo-v3", 0, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 6, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 ) // Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to // update. { name: "failed rs update", deployment: newDeployment(5, nil), oldDeployment: newDeployment(5, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)}, }, { name: "deployment with surge pods", deployment: newDeploymentEnhanced(20, intstr.FromInt(2)), oldDeployment: newDeploymentEnhanced(10, intstr.FromInt(2)), newRS: rs("foo-v2", 6, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 11, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, }, { name: "change both surge and size", deployment: newDeploymentEnhanced(50, intstr.FromInt(6)), oldDeployment: newDeploymentEnhanced(10, intstr.FromInt(3)), newRS: rs("foo-v2", 5, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 22, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, }, } for _, test := range tests { _ = olderTimestamp t.Log(test.name) fake := fake.Clientset{} dc := &DeploymentController{ client: &fake, eventRecorder: &record.FakeRecorder{}, } if test.newRS != nil { desiredReplicas := test.oldDeployment.Spec.Replicas if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { desiredReplicas = desired } setReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+maxSurge(*test.oldDeployment)) } for i := range test.oldRSs { rs := test.oldRSs[i] if rs == nil { continue } desiredReplicas := test.oldDeployment.Spec.Replicas if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { desiredReplicas = desired } setReplicasAnnotations(rs, desiredReplicas, desiredReplicas+maxSurge(*test.oldDeployment)) } if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } if test.expectedNew != nil && test.newRS != nil && test.expectedNew.Spec.Replicas != test.newRS.Spec.Replicas { t.Errorf("%s: expected new replicas: %d, got: %d", test.name, test.expectedNew.Spec.Replicas, test.newRS.Spec.Replicas) continue } if len(test.expectedOld) != len(test.oldRSs) { t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) continue } for n := range test.oldRSs { rs := test.oldRSs[n] exp := test.expectedOld[n] if exp.Spec.Replicas != rs.Spec.Replicas { t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, exp.Spec.Replicas, rs.Spec.Replicas) } } } }