func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset { fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { obj := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) return true, obj, nil }) return fakeClient }
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset { fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { obj := action.(testclient.UpdateAction).GetObject().(*api.Pod) return true, obj, nil }) return fakeClient }
// hasCreateNamespaceAction returns true if it has the create namespace action func hasCreateNamespaceAction(mockClient *fake.Clientset) bool { for _, action := range mockClient.Actions() { if action.GetVerb() == "create" && action.GetResource().Resource == "namespaces" { return true } } return false }
func noAction(kubeClient *fake.Clientset, originClient *testclient.Fake) string { if len(kubeClient.Actions()) != 0 { return fmt.Sprintf("unexpected actions: %v", kubeClient.Actions()) } if len(originClient.Actions()) != 0 { return fmt.Sprintf("unexpected actions: %v", originClient.Actions()) } return "" }
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource *framework.FakePVControllerSource, claimSource *framework.FakePVCControllerSource, errors []reactorError) *volumeReactor { reactor := &volumeReactor{ volumes: make(map[string]*api.PersistentVolume), claims: make(map[string]*api.PersistentVolumeClaim), ctrl: ctrl, volumeSource: volumeSource, claimSource: claimSource, errors: errors, } client.AddReactor("*", "*", reactor.React) return reactor }
func TestSyncBatchIgnoresNotFound(t *testing.T) { client := fake.Clientset{} syncer := newTestManager(&client) client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) { return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod") }) syncer.SetPodStatus(getTestPod(), getRandomPodStatus()) syncer.testSyncBatch() verifyActions(t, syncer.kubeClient, []core.Action{ core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}}, }) }
func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { rsList, ok := obj.(*extensions.ReplicaSetList) fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { name := action.(testclient.GetAction).GetName() if ok { for _, rs := range rsList.Items { if rs.Name == name { return true, &rs, nil } } } return false, nil, fmt.Errorf("could not find the requested replica set: %s", name) }) return fakeClient }
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) { tests := []struct { deploymentReplicas int maxUnavailable intstr.IntOrString readyPods int oldReplicas int scaleExpected bool expectedOldReplicas int }{ { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(0), readyPods: 10, oldReplicas: 10, scaleExpected: false, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 10, oldReplicas: 10, scaleExpected: true, expectedOldReplicas: 8, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 8, oldReplicas: 10, scaleExpected: false, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 10, oldReplicas: 0, scaleExpected: false, }, } for i, test := range tests { t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil) allRSs := []*exp.ReplicaSet{oldRS} oldRSs := []*exp.ReplicaSet{oldRS} deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable) fakeClientset := fake.Clientset{} fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { switch action.(type) { case core.ListAction: podList := &api.PodList{} for podIndex := 0; podIndex < test.readyPods; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-pod-%d", oldRS.Name, podIndex), Labels: map[string]string{"foo": "bar"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionTrue, }, }, }, }) } return true, podList, nil } return false, nil, nil }) controller := &DeploymentController{ client: &fakeClientset, eventRecorder: &record.FakeRecorder{}, } scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, &deployment) if err != nil { t.Errorf("unexpected error: %v", err) continue } if !test.scaleExpected { if scaled != 0 { t.Errorf("unexpected scaling: %v", fakeClientset.Actions()) } continue } if test.scaleExpected && scaled == 0 { t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions()) continue } // There are both list and update actions logged, so extract the update // action for verification. var updateAction testclient.UpdateAction for _, action := range fakeClientset.Actions() { switch a := action.(type) { case testclient.UpdateAction: if updateAction != nil { t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a) } else { updateAction = a } } } if updateAction == nil { t.Errorf("expected an update action") continue } updated := updateAction.GetObject().(*exp.ReplicaSet) if e, a := test.expectedOldReplicas, updated.Spec.Replicas; e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } } }
func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) { tests := []struct { oldReplicas int readyPods int unHealthyPods int maxCleanupCount int cleanupCountExpected int }{ { oldReplicas: 10, readyPods: 8, unHealthyPods: 2, maxCleanupCount: 1, cleanupCountExpected: 1, }, { oldReplicas: 10, readyPods: 8, unHealthyPods: 2, maxCleanupCount: 3, cleanupCountExpected: 2, }, { oldReplicas: 10, readyPods: 8, unHealthyPods: 2, maxCleanupCount: 0, cleanupCountExpected: 0, }, { oldReplicas: 10, readyPods: 10, unHealthyPods: 0, maxCleanupCount: 3, cleanupCountExpected: 0, }, } for i, test := range tests { t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil) oldRSs := []*exp.ReplicaSet{oldRS} deployment := deployment("foo", 10, intstr.FromInt(2), intstr.FromInt(2)) fakeClientset := fake.Clientset{} fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { switch action.(type) { case core.ListAction: podList := &api.PodList{} for podIndex := 0; podIndex < test.readyPods; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-readyPod-%d", oldRS.Name, podIndex), }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionTrue, }, }, }, }) } for podIndex := 0; podIndex < test.unHealthyPods; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-unHealthyPod-%d", oldRS.Name, podIndex), }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionFalse, }, }, }, }) } return true, podList, nil } return false, nil, nil }) controller := &DeploymentController{ client: &fakeClientset, eventRecorder: &record.FakeRecorder{}, } cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, test.maxCleanupCount) if err != nil { t.Errorf("unexpected error: %v", err) continue } if cleanupCount != test.cleanupCountExpected { t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount) continue } } }
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) { tests := []struct { deploymentReplicas int maxUnavailable intstr.IntOrString oldReplicas int newReplicas int readyPodsFromOldRS int readyPodsFromNewRS int scaleExpected bool expectedOldReplicas int }{ { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(0), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 10, readyPodsFromNewRS: 0, scaleExpected: false, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 10, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 8, }, { // expect unhealthy replicas from old replica sets been cleaned up deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 8, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 8, }, { // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 9, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 8, }, { // the unavailable pods from the newRS would not make us scale down old RSs in a further step deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 8, newReplicas: 2, readyPodsFromOldRS: 8, readyPodsFromNewRS: 0, scaleExpected: false, }, } for i, test := range tests { t.Logf("executing scenario %d", i) newSelector := map[string]string{"foo": "new"} oldSelector := map[string]string{"foo": "old"} newRS := rs("foo-new", test.newReplicas, newSelector) oldRS := rs("foo-old", test.oldReplicas, oldSelector) oldRSs := []*exp.ReplicaSet{oldRS} allRSs := []*exp.ReplicaSet{oldRS, newRS} deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable) fakeClientset := fake.Clientset{} fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { switch action.(type) { case core.ListAction: podList := &api.PodList{} for podIndex := 0; podIndex < test.readyPodsFromOldRS; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-oldReadyPod-%d", oldRS.Name, podIndex), Labels: oldSelector, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionTrue, }, }, }, }) } for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-oldUnhealthyPod-%d", oldRS.Name, podIndex), Labels: oldSelector, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionFalse, }, }, }, }) } for podIndex := 0; podIndex < test.readyPodsFromNewRS; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-newReadyPod-%d", oldRS.Name, podIndex), Labels: newSelector, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionTrue, }, }, }, }) } for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ { podList.Items = append(podList.Items, api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-newUnhealthyPod-%d", oldRS.Name, podIndex), Labels: newSelector, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Type: api.PodReady, Status: api.ConditionFalse, }, }, }, }) } return true, podList, nil } return false, nil, nil }) controller := &DeploymentController{ client: &fakeClientset, eventRecorder: &record.FakeRecorder{}, } scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, &deployment) if err != nil { t.Errorf("unexpected error: %v", err) continue } if !test.scaleExpected && scaled { t.Errorf("unexpected scaling: %v", fakeClientset.Actions()) } if test.scaleExpected && !scaled { t.Errorf("expected scaling to occur") continue } continue } }
func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { tests := []struct { deploymentReplicas int maxSurge intstr.IntOrString oldReplicas int newReplicas int scaleExpected bool expectedNewReplicas int }{ { // Should not scale up. deploymentReplicas: 10, maxSurge: intstr.FromInt(0), oldReplicas: 10, newReplicas: 0, scaleExpected: false, }, { deploymentReplicas: 10, maxSurge: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, scaleExpected: true, expectedNewReplicas: 2, }, { deploymentReplicas: 10, maxSurge: intstr.FromInt(2), oldReplicas: 5, newReplicas: 0, scaleExpected: true, expectedNewReplicas: 7, }, { deploymentReplicas: 10, maxSurge: intstr.FromInt(2), oldReplicas: 10, newReplicas: 2, scaleExpected: false, }, { // Should scale down. deploymentReplicas: 10, maxSurge: intstr.FromInt(2), oldReplicas: 2, newReplicas: 11, scaleExpected: true, expectedNewReplicas: 10, }, } for i, test := range tests { t.Logf("executing scenario %d", i) newRS := rs("foo-v2", test.newReplicas, nil) oldRS := rs("foo-v2", test.oldReplicas, nil) allRSs := []*exp.ReplicaSet{newRS, oldRS} deployment := deployment("foo", test.deploymentReplicas, test.maxSurge, intstr.FromInt(0)) fake := fake.Clientset{} controller := &DeploymentController{ client: &fake, eventRecorder: &record.FakeRecorder{}, } scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, &deployment) if err != nil { t.Errorf("unexpected error: %v", err) continue } if !test.scaleExpected { if scaled || len(fake.Actions()) > 0 { t.Errorf("unexpected scaling: %v", fake.Actions()) } continue } if test.scaleExpected && !scaled { t.Errorf("expected scaling to occur") continue } if len(fake.Actions()) != 1 { t.Errorf("expected 1 action during scale, got: %v", fake.Actions()) continue } updated := fake.Actions()[0].(testclient.UpdateAction).GetObject().(*exp.ReplicaSet) if e, a := test.expectedNewReplicas, updated.Spec.Replicas; e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } } }
func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, obj, nil }) return fakeClient }
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) { tests := []struct { deploymentReplicas int maxUnavailable intstr.IntOrString readyPods int oldReplicas int scaleExpected bool expectedOldReplicas int }{ { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(0), readyPods: 10, oldReplicas: 10, scaleExpected: true, expectedOldReplicas: 9, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 10, oldReplicas: 10, scaleExpected: true, expectedOldReplicas: 8, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 8, oldReplicas: 10, scaleExpected: false, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 10, oldReplicas: 0, scaleExpected: false, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), readyPods: 1, oldReplicas: 10, scaleExpected: false, }, } for i := range tests { test := tests[i] t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPods) allRSs := []*extensions.ReplicaSet{oldRS} oldRSs := []*extensions.ReplicaSet{oldRS} maxSurge := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"}) fakeClientset := fake.Clientset{} controller := &DeploymentController{ client: &fakeClientset, eventRecorder: &record.FakeRecorder{}, } scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment) if err != nil { t.Errorf("unexpected error: %v", err) continue } if !test.scaleExpected { if scaled != 0 { t.Errorf("unexpected scaling: %v", fakeClientset.Actions()) } continue } if test.scaleExpected && scaled == 0 { t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions()) continue } // There are both list and update actions logged, so extract the update // action for verification. var updateAction core.UpdateAction for _, action := range fakeClientset.Actions() { switch a := action.(type) { case core.UpdateAction: if updateAction != nil { t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a) } else { updateAction = a } } } if updateAction == nil { t.Errorf("expected an update action") continue } updated := updateAction.GetObject().(*extensions.ReplicaSet) if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } } }
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) { tests := []struct { deploymentReplicas int maxUnavailable intstr.IntOrString oldReplicas int newReplicas int readyPodsFromOldRS int readyPodsFromNewRS int scaleExpected bool expectedOldReplicas int }{ { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(0), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 10, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 9, }, { deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 10, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 8, }, { // expect unhealthy replicas from old replica sets been cleaned up deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 8, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 8, }, { // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 10, newReplicas: 0, readyPodsFromOldRS: 9, readyPodsFromNewRS: 0, scaleExpected: true, expectedOldReplicas: 8, }, { // the unavailable pods from the newRS would not make us scale down old RSs in a further step deploymentReplicas: 10, maxUnavailable: intstr.FromInt(2), oldReplicas: 8, newReplicas: 2, readyPodsFromOldRS: 8, readyPodsFromNewRS: 0, scaleExpected: false, }, } for i := range tests { test := tests[i] t.Logf("executing scenario %d", i) newSelector := map[string]string{"foo": "new"} oldSelector := map[string]string{"foo": "old"} newRS := rs("foo-new", test.newReplicas, newSelector, noTimestamp) newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS) oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS) oldRSs := []*extensions.ReplicaSet{oldRS} allRSs := []*extensions.ReplicaSet{oldRS, newRS} maxSurge := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector) fakeClientset := fake.Clientset{} controller := &DeploymentController{ client: &fakeClientset, eventRecorder: &record.FakeRecorder{}, } scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment) if err != nil { t.Errorf("unexpected error: %v", err) continue } if !test.scaleExpected && scaled { t.Errorf("unexpected scaling: %v", fakeClientset.Actions()) } if test.scaleExpected && !scaled { t.Errorf("expected scaling to occur") continue } continue } }
func TestScale(t *testing.T) { newTimestamp := unversioned.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC) oldTimestamp := unversioned.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC) olderTimestamp := unversioned.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC) var updatedTemplate = func(replicas int) *extensions.Deployment { d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"}) d.Spec.Template.Labels["another"] = "label" return d } tests := []struct { name string deployment *extensions.Deployment oldDeployment *extensions.Deployment newRS *extensions.ReplicaSet oldRSs []*extensions.ReplicaSet expectedNew *extensions.ReplicaSet expectedOld []*extensions.ReplicaSet wasntUpdated map[string]bool desiredReplicasAnnotations map[string]int32 }{ { name: "normal scaling event: 10 -> 12", deployment: newDeployment("foo", 12, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{}, expectedNew: rs("foo-v1", 12, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{}, }, { name: "normal scaling event: 10 -> 5", deployment: newDeployment("foo", 5, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{}, expectedNew: rs("foo-v1", 5, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{}, }, { name: "proportional scaling: 5 -> 10", deployment: newDeployment("foo", 10, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, }, { name: "proportional scaling: 5 -> 3", deployment: newDeployment("foo", 3, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 1, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, }, { name: "proportional scaling: 9 -> 4", deployment: newDeployment("foo", 4, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil), newRS: rs("foo-v2", 8, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, }, { name: "proportional scaling: 7 -> 10", deployment: newDeployment("foo", 10, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 3, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, }, { name: "proportional scaling: 13 -> 8", deployment: newDeployment("foo", 8, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales up the new replica set. { name: "leftover distribution: 3 -> 4", deployment: newDeployment("foo", 4, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, }, // Scales down the older replica set. { name: "leftover distribution: 3 -> 2", deployment: newDeployment("foo", 2, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up the latest replica set first. { name: "proportional scaling (no new rs): 4 -> 5", deployment: newDeployment("foo", 5, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil), newRS: nil, oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: nil, expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales down to zero { name: "proportional scaling: 6 -> 0", deployment: newDeployment("foo", 0, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil), newRS: rs("foo-v3", 3, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 0, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up from zero { name: "proportional scaling: 0 -> 6", deployment: newDeployment("foo", 6, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil), newRS: rs("foo-v3", 0, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 6, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true}, }, // Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 ) // Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to // update. { name: "failed rs update", deployment: newDeployment("foo", 5, nil, nil, nil, nil), oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true}, desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)}, }, { name: "deployment with surge pods", deployment: newDeployment("foo", 20, nil, maxSurge(2), nil, nil), oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil), newRS: rs("foo-v2", 6, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 11, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, }, { name: "change both surge and size", deployment: newDeployment("foo", 50, nil, maxSurge(6), nil, nil), oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil), newRS: rs("foo-v2", 5, nil, newTimestamp), oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 22, nil, newTimestamp), expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, }, { name: "change both size and template", deployment: updatedTemplate(14), oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}), newRS: nil, oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: nil, expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, }, } for _, test := range tests { _ = olderTimestamp t.Log(test.name) fake := fake.Clientset{} dc := &DeploymentController{ client: &fake, eventRecorder: &record.FakeRecorder{}, } if test.newRS != nil { desiredReplicas := test.oldDeployment.Spec.Replicas if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { desiredReplicas = desired } deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) } for i := range test.oldRSs { rs := test.oldRSs[i] if rs == nil { continue } desiredReplicas := test.oldDeployment.Spec.Replicas if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { desiredReplicas = desired } deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) } if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } // Construct the nameToSize map that will hold all the sizes we got our of tests // Skip updating the map if the replica set wasn't updated since there will be // no update action for it. nameToSize := make(map[string]int32) if test.newRS != nil { nameToSize[test.newRS.Name] = test.newRS.Spec.Replicas } for i := range test.oldRSs { rs := test.oldRSs[i] nameToSize[rs.Name] = rs.Spec.Replicas } // Get all the UPDATE actions and update nameToSize with all the updated sizes. for _, action := range fake.Actions() { rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) if !test.wasntUpdated[rs.Name] { nameToSize[rs.Name] = rs.Spec.Replicas } } if test.expectedNew != nil && test.newRS != nil && test.expectedNew.Spec.Replicas != nameToSize[test.newRS.Name] { t.Errorf("%s: expected new replicas: %d, got: %d", test.name, test.expectedNew.Spec.Replicas, nameToSize[test.newRS.Name]) continue } if len(test.expectedOld) != len(test.oldRSs) { t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) continue } for n := range test.oldRSs { rs := test.oldRSs[n] expected := test.expectedOld[n] if expected.Spec.Replicas != nameToSize[rs.Name] { t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, expected.Spec.Replicas, nameToSize[rs.Name]) } } } }