func TestSyncReplicaSetDormancy(t *testing.T) { // Setup a test server so we can lie about the current state of pods fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "{}", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod") // Creates a replica and sets expectations rsSpec.Status.Replicas = 1 rsSpec.Status.ReadyReplicas = 1 manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // Expectations prevents replicas but not an update on status rsSpec.Status.Replicas = 0 rsSpec.Status.ReadyReplicas = 0 fakePodControl.Clear() manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) // Get the key for the controller rsKey, err := controller.KeyFunc(rsSpec) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the // fakePodControl error will prevent this, leaving expectations at 0, 0 manager.expectations.CreationObserved(rsKey) rsSpec.Status.Replicas = 1 rsSpec.Status.ReadyReplicas = 1 fakePodControl.Clear() fakePodControl.Err = fmt.Errorf("Fake Error") manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // This replica should not need a Lowering of expectations, since the previous create failed fakePodControl.Clear() fakePodControl.Err = nil manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // 1 PUT for the ReplicaSet status during dormancy window. // Note that the pod creates go through pod control so they're not recorded. fakeHandler.ValidateRequestCount(t, 1) }
func TestDeleteControllerAndExpectations(t *testing.T) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeResponse := serverResponse{ statusCode: 200, obj: &api.ReplicationController{}, } testServer, _ := makeTestServer(t, api.NamespaceDefault, api.TenantDefault, fakeResponse) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady rc := newReplicationController(1) manager.rcStore.Store.Add(rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl // This should set expectations for the rc manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 1, 0) fakePodControl.Clear() // Get the RC key rcKey, err := controller.KeyFunc(rc) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", rc, err) } // This is to simulate a concurrent addPod, that has a handle on the expectations // as the controller deletes it. podExp, exists, err := manager.expectations.GetExpectations(rcKey) if !exists || err != nil { t.Errorf("No expectations found for rc") } manager.rcStore.Delete(rc) manager.syncReplicationController(getKey(rc, t)) if _, exists, err = manager.expectations.GetExpectations(rcKey); exists { t.Errorf("Found expectaions, expected none since the rc has been deleted.") } // This should have no effect, since we've deleted the rc. podExp.Seen(1, 0) manager.podStore.Store.Replace(make([]interface{}, 0), "0") manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestDeleteControllerAndExpectations(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0) manager.podStoreSynced = alwaysReady rs := newReplicaSet(1, map[string]string{"foo": "bar"}) manager.rsStore.Store.Add(rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl // This should set expectations for the ReplicaSet manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) fakePodControl.Clear() // Get the ReplicaSet key rsKey, err := controller.KeyFunc(rs) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rs, err) } // This is to simulate a concurrent addPod, that has a handle on the expectations // as the controller deletes it. podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil { t.Errorf("No expectations found for ReplicaSet") } manager.rsStore.Delete(rs) manager.syncReplicaSet(getKey(rs, t)) if _, exists, err = manager.expectations.GetExpectations(rsKey); exists { t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.") } // This should have no effect, since we've deleted the ReplicaSet. podExp.Add(-1, 0) manager.podStore.Indexer.Replace(make([]interface{}, 0), "0") manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) }
func TestDeleteControllerAndExpectations(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady rc := newReplicationController(1) manager.rcStore.Store.Add(rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl // This should set expectations for the rc manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 1, 0) fakePodControl.Clear() // Get the RC key rcKey, err := controller.KeyFunc(rc) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", rc, err) } // This is to simulate a concurrent addPod, that has a handle on the expectations // as the controller deletes it. podExp, exists, err := manager.expectations.GetExpectations(rcKey) if !exists || err != nil { t.Errorf("No expectations found for rc") } manager.rcStore.Delete(rc) manager.syncReplicationController(getKey(rc, t)) if _, exists, err = manager.expectations.GetExpectations(rcKey); exists { t.Errorf("Found expectaions, expected none since the rc has been deleted.") } // This should have no effect, since we've deleted the rc. podExp.Seen(1, 0) manager.podStore.Store.Replace(make([]interface{}, 0), "0") manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestSyncReplicationControllerDormancy(t *testing.T) { // Setup a test server so we can lie about the current state of pods fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "{}", } testServer := httptest.NewServer(&fakeHandler) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl controllerSpec := newReplicationController(2) manager.rcStore.Store.Add(controllerSpec) newPodList(manager.podStore.Store, 1, api.PodRunning, controllerSpec) // Creates a replica and sets expectations controllerSpec.Status.Replicas = 1 manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 1, 0) // Expectations prevents replicas but not an update on status controllerSpec.Status.Replicas = 0 fakePodControl.Clear() manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) // Get the key for the controller rcKey, err := controller.KeyFunc(controllerSpec) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the // fakePodControl error will prevent this, leaving expectations at 0, 0 manager.expectations.CreationObserved(rcKey) controllerSpec.Status.Replicas = 1 fakePodControl.Clear() fakePodControl.Err = fmt.Errorf("Fake Error") manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) // This replica should not need a Lowering of expectations, since the previous create failed fakePodControl.Err = nil manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 1, 0) // 1 PUT for the rc status during dormancy window. // Note that the pod creates go through pod control so they're not recorded. fakeHandler.ValidateRequestCount(t, 1) }
// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite. func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, burstReplicas, 0) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(numReplicas, labelMap) manager.rsStore.Store.Add(rsSpec) expectedPods := int32(0) pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod") rsKey, err := controller.KeyFunc(rsSpec) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err) } // Size up the controller, then size it down, and confirm the expected create/delete pattern for _, replicas := range []int32{int32(numReplicas), 0} { rsSpec.Spec.Replicas = replicas manager.rsStore.Store.Add(rsSpec) for i := 0; i < numReplicas; i += burstReplicas { manager.syncReplicaSet(getKey(rsSpec, t)) // The store accrues active pods. It's also used by the ReplicaSet to determine how many // replicas to create. activePods := int32(len(manager.podStore.Indexer.List())) if replicas != 0 { // This is the number of pods currently "in flight". They were created by the // ReplicaSet controller above, which then puts the ReplicaSet to sleep till // all of them have been observed. expectedPods = replicas - activePods if expectedPods > int32(burstReplicas) { expectedPods = int32(burstReplicas) } // This validates the ReplicaSet manager sync actually created pods validateSyncReplicaSet(t, &fakePodControl, int(expectedPods), 0, 0) // This simulates the watch events for all but 1 of the expected pods. // None of these should wake the controller because it has expectations==BurstReplicas. for i := int32(0); i < expectedPods-1; i++ { manager.podStore.Indexer.Add(&pods.Items[i]) manager.addPod(&pods.Items[i]) } podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil { t.Fatalf("Did not find expectations for rs.") } if add, _ := podExp.GetExpectations(); add != 1 { t.Fatalf("Expectations are wrong %v", podExp) } } else { expectedPods = (replicas - activePods) * -1 if expectedPods > int32(burstReplicas) { expectedPods = int32(burstReplicas) } validateSyncReplicaSet(t, &fakePodControl, 0, int(expectedPods), 0) // To accurately simulate a watch we must delete the exact pods // the rs is waiting for. expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t)) podsToDelete := []*api.Pod{} for _, key := range expectedDels.List() { nsName := strings.Split(key, "/") podsToDelete = append(podsToDelete, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: nsName[1], Namespace: nsName[0], Labels: rsSpec.Spec.Selector.MatchLabels, }, }) } // Don't delete all pods because we confirm that the last pod // has exactly one expectation at the end, to verify that we // don't double delete. for i := range podsToDelete[1:] { manager.podStore.Indexer.Delete(podsToDelete[i]) manager.deletePod(podsToDelete[i]) } podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil { t.Fatalf("Did not find expectations for ReplicaSet.") } if _, del := podExp.GetExpectations(); del != 1 { t.Fatalf("Expectations are wrong %v", podExp) } } // Check that the ReplicaSet didn't take any action for all the above pods fakePodControl.Clear() manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) // Create/Delete the last pod // The last add pod will decrease the expectation of the ReplicaSet to 0, // which will cause it to create/delete the remaining replicas up to burstReplicas. if replicas != 0 { manager.podStore.Indexer.Add(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1]) } else { expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t)) if expectedDel.Len() != 1 { t.Fatalf("Waiting on unexpected number of deletes.") } nsName := strings.Split(expectedDel.List()[0], "/") lastPod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: nsName[1], Namespace: nsName[0], Labels: rsSpec.Spec.Selector.MatchLabels, }, } manager.podStore.Indexer.Delete(lastPod) manager.deletePod(lastPod) } pods.Items = pods.Items[expectedPods:] } // Confirm that we've created the right number of replicas activePods := int32(len(manager.podStore.Indexer.List())) if activePods != rsSpec.Spec.Replicas { t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods) } // Replenish the pod list, since we cut it down sizing up pods = newPodList(nil, int(replicas), api.PodRunning, labelMap, rsSpec, "pod") } }
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, burstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(numReplicas, labelMap) manager.rsStore.Store.Add(rsSpec) expectedPods := 0 pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec) rsKey, err := controller.KeyFunc(rsSpec) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err) } // Size up the controller, then size it down, and confirm the expected create/delete pattern for _, replicas := range []int{numReplicas, 0} { rsSpec.Spec.Replicas = replicas manager.rsStore.Store.Add(rsSpec) for i := 0; i < numReplicas; i += burstReplicas { manager.syncReplicaSet(getKey(rsSpec, t)) // The store accrues active pods. It's also used by the ReplicaSet to determine how many // replicas to create. activePods := len(manager.podStore.Store.List()) if replicas != 0 { // This is the number of pods currently "in flight". They were created by the // ReplicaSet controller above, which then puts the ReplicaSet to sleep till // all of them have been observed. expectedPods = replicas - activePods if expectedPods > burstReplicas { expectedPods = burstReplicas } // This validates the ReplicaSet manager sync actually created pods validateSyncReplicaSet(t, &fakePodControl, expectedPods, 0) // This simulates the watch events for all but 1 of the expected pods. // None of these should wake the controller because it has expectations==BurstReplicas. for i := 0; i < expectedPods-1; i++ { manager.podStore.Store.Add(&pods.Items[i]) manager.addPod(&pods.Items[i]) } podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil { t.Fatalf("Did not find expectations for ReplicaSet.") } if add, _ := podExp.GetExpectations(); add != 1 { t.Fatalf("Expectations are wrong %v", podExp) } } else { expectedPods = (replicas - activePods) * -1 if expectedPods > burstReplicas { expectedPods = burstReplicas } validateSyncReplicaSet(t, &fakePodControl, 0, expectedPods) for i := 0; i < expectedPods-1; i++ { manager.podStore.Store.Delete(&pods.Items[i]) manager.deletePod(&pods.Items[i]) } podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil { t.Fatalf("Did not find expectations for ReplicaSet.") } if _, del := podExp.GetExpectations(); del != 1 { t.Fatalf("Expectations are wrong %v", podExp) } } // Check that the ReplicaSet didn't take any action for all the above pods fakePodControl.Clear() manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) // Create/Delete the last pod // The last add pod will decrease the expectation of the ReplicaSet to 0, // which will cause it to create/delete the remaining replicas up to burstReplicas. if replicas != 0 { manager.podStore.Store.Add(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1]) } else { manager.podStore.Store.Delete(&pods.Items[expectedPods-1]) manager.deletePod(&pods.Items[expectedPods-1]) } pods.Items = pods.Items[expectedPods:] } // Confirm that we've created the right number of replicas activePods := len(manager.podStore.Store.List()) if activePods != rsSpec.Spec.Replicas { t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods) } // Replenish the pod list, since we cut it down sizing up pods = newPodList(nil, replicas, api.PodRunning, labelMap, rsSpec) } }
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeResponse := serverResponse{ statusCode: 200, obj: &api.ReplicationController{}, } testServer, _ := makeTestServer(t, api.NamespaceDefault, api.TenantDefault, fakeResponse) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, burstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl controllerSpec := newReplicationController(numReplicas) manager.rcStore.Store.Add(controllerSpec) expectedPods := 0 pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec) rcKey, err := controller.KeyFunc(controllerSpec) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) } // Size up the controller, then size it down, and confirm the expected create/delete pattern for _, replicas := range []int{numReplicas, 0} { controllerSpec.Spec.Replicas = replicas manager.rcStore.Store.Add(controllerSpec) for i := 0; i < numReplicas; i += burstReplicas { manager.syncReplicationController(getKey(controllerSpec, t)) // The store accrues active pods. It's also used by the rc to determine how many // replicas to create. activePods := len(manager.podStore.Store.List()) if replicas != 0 { // This is the number of pods currently "in flight". They were created by the rc manager above, // which then puts the rc to sleep till all of them have been observed. expectedPods = replicas - activePods if expectedPods > burstReplicas { expectedPods = burstReplicas } // This validates the rc manager sync actually created pods validateSyncReplication(t, &fakePodControl, expectedPods, 0) // This simulates the watch events for all but 1 of the expected pods. // None of these should wake the controller because it has expectations==BurstReplicas. for i := 0; i < expectedPods-1; i++ { manager.podStore.Store.Add(&pods.Items[i]) manager.addPod(&pods.Items[i]) } podExp, exists, err := manager.expectations.GetExpectations(rcKey) if !exists || err != nil { t.Fatalf("Did not find expectations for rc.") } if add, _ := podExp.GetExpectations(); add != 1 { t.Fatalf("Expectations are wrong %v", podExp) } } else { expectedPods = (replicas - activePods) * -1 if expectedPods > burstReplicas { expectedPods = burstReplicas } validateSyncReplication(t, &fakePodControl, 0, expectedPods) for i := 0; i < expectedPods-1; i++ { manager.podStore.Store.Delete(&pods.Items[i]) manager.deletePod(&pods.Items[i]) } podExp, exists, err := manager.expectations.GetExpectations(rcKey) if !exists || err != nil { t.Fatalf("Did not find expectations for rc.") } if _, del := podExp.GetExpectations(); del != 1 { t.Fatalf("Expectations are wrong %v", podExp) } } // Check that the rc didn't take any action for all the above pods fakePodControl.Clear() manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) // Create/Delete the last pod // The last add pod will decrease the expectation of the rc to 0, // which will cause it to create/delete the remaining replicas up to burstReplicas. if replicas != 0 { manager.podStore.Store.Add(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1]) } else { manager.podStore.Store.Delete(&pods.Items[expectedPods-1]) manager.deletePod(&pods.Items[expectedPods-1]) } pods.Items = pods.Items[expectedPods:] } // Confirm that we've created the right number of replicas activePods := len(manager.podStore.Store.List()) if activePods != controllerSpec.Spec.Replicas { t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods) } // Replenish the pod list, since we cut it down sizing up pods = newPodList(nil, replicas, api.PodRunning, controllerSpec) } }