func TestOverlappingRCs(t *testing.T) { c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) for i := 0; i < 5; i++ { manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store var controllers []*api.ReplicationController for j := 1; j < 10; j++ { controllerSpec := newReplicationController(1) controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) controllerSpec.Name = string(util.NewUUID()) controllers = append(controllers, controllerSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rcStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest rc is synced pods := newPodList(nil, 1, api.PodPending, controllers[0]) rcKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } } }
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { ns := api.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", Name: "foo", Namespace: ns, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }}) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1, 0) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) }
func TestDSManagerInit(t *testing.T) { // Insert a stable daemon set and make sure we don't create an extra pod // for the one node which already has a daemon after a simulated restart. ds := newDaemonSet("test") ds.Status = extensions.DaemonSetStatus{ CurrentNumberScheduled: 1, NumberMisscheduled: 0, DesiredNumberScheduled: 1, } nodeName := "only-node" podList := &api.PodList{ Items: []api.Pod{ *newPod("podname", nodeName, simpleDaemonSetLabel), }} response := runtime.EncodeOrDie(testapi.Default.Codec(), podList) fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: response, } testServer := httptest.NewServer(&fakeHandler) // TODO: Uncomment when fix #19254 // defer testServer.Close() clientset := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc) manager.dsStore.Add(ds) manager.nodeStore.Add(newNode(nodeName, nil)) manager.podStoreSynced = alwaysReady controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store) fakePodControl := &controller.FakePodControl{} manager.podControl = fakePodControl manager.syncHandler(getKey(ds, t)) validateSyncDaemonSets(t, fakePodControl, 0, 0) }
func TestDeleteFinalStateUnknown(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl received := make(chan string) manager.syncHandler = func(key string) error { received <- key return nil } // The DeletedFinalStateUnknown object should cause the ReplicaSet manager to insert // the controller matching the selectors of the deleted pod into the work queue. labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(1, labelMap) manager.rsStore.Store.Add(rsSpec) pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) go manager.worker() expected := getKey(rsSpec, t) select { case key := <-received: if key != expected { t.Errorf("Unexpected sync all for ReplicaSet %v, expected %v", key, expected) } case <-time.After(util.ForeverTestTimeout): t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") } }
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { ns := api.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}}, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) }
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods // and checking expectations. func TestSyncJobExpectations(t *testing.T) { clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewJobController(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady manager.updateHandler = func(job *extensions.Job) error { return nil } job := newJob(2, 2) manager.jobStore.Store.Add(job) pods := newPodList(2, api.PodPending, job) manager.podStore.Store.Add(&pods[0]) manager.expectations = FakeJobExpectations{ controller.NewControllerExpectations(), true, func() { // If we check active pods before checking expectataions, the job // will create a new replica because it doesn't see this pod, but // has fulfilled its expectations. manager.podStore.Store.Add(&pods[1]) }, } manager.syncJob(getKey(job, t)) if len(fakePodControl.Templates) != 0 { t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != 0 { t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) } }
func TestRSManagerNotReady(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2) manager.podControl = &fakePodControl manager.podStoreSynced = func() bool { return false } // Simulates the ReplicaSet reflector running before the pod reflector. We don't // want to end up creating replicas in this case until the pod reflector // has synced, so the ReplicaSet controller should just requeue the ReplicaSet. rsSpec := newReplicaSet(1, map[string]string{"foo": "bar"}) manager.rsStore.Store.Add(rsSpec) rsKey := getKey(rsSpec, t) manager.syncReplicaSet(rsKey) validateSyncReplicaSet(t, &fakePodControl, 0, 0) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } manager.podStoreSynced = alwaysReady manager.syncReplicaSet(rsKey) validateSyncReplicaSet(t, &fakePodControl, 1, 0) }
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods // and checking expectations. func TestRSSyncExpectations(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec) manager.podStore.Store.Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = FakeRSExpectations{ controller.NewControllerExpectations(), true, func() { // If we check active pods before checking expectataions, the // ReplicaSet will create a new replica because it doesn't see // this pod, but has fulfilled its expectations. manager.podStore.Store.Add(&postExpectationsPod) }, } manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) }
func TestSyncPastDeadlineJobFinished(t *testing.T) { clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewJobController(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady var actual *extensions.Job manager.updateHandler = func(job *extensions.Job) error { actual = job return nil } job := newJob(1, 1) activeDeadlineSeconds := int64(10) job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0) job.Status.StartTime = &start job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) manager.jobStore.Store.Add(job) err := manager.syncJob(getKey(job, t)) if err != nil { t.Errorf("Unexpected error when syncing jobs %v", err) } if len(fakePodControl.Templates) != 0 { t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != 0 { t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) } if actual != nil { t.Error("Unexpected job modification") } }
// NewMasterComponents creates, initializes and starts master components based on the given config. func NewMasterComponents(c *Config) *MasterComponents { m, s := startMasterOrDie(c.MasterConfig) // TODO: Allow callers to pipe through a different master url and create a client/start components using it. glog.Infof("Master %+v", s.URL) if c.DeleteEtcdKeys { DeleteAllEtcdKeys() } // TODO: caesarxuchao: remove this client when the refactoring of client libraray is done. restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst}) clientset := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, c.Burst) // TODO: Support events once we can cleanly shutdown an event recorder. controllerManager.SetEventRecorder(&record.FakeRecorder{}) if c.StartReplicationManager { go controllerManager.Run(goruntime.NumCPU(), rcStopCh) } var once sync.Once return &MasterComponents{ ApiServer: s, KubeMaster: m, RestClient: restClient, ControllerManager: controllerManager, rcStopCh: rcStopCh, once: once, } }
func (c *Client) Setup(t *testing.T) *Client { c.handler = &utiltesting.FakeHandler{ StatusCode: c.Response.StatusCode, } if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil { c.handler.ResponseBody = *responseBody } c.server = httptest.NewServer(c.handler) if c.Client == nil { c.Client = client.NewOrDie(&client.Config{ Host: c.server.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, }) // TODO: caesarxuchao: hacky way to specify version of Experimental client. // We will fix this by supporting multiple group versions in Config c.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{ Host: c.server.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}, }) c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL}) } c.QueryValidator = map[string]func(string, string) bool{} return c }
func TestCheckLeftoverEndpoints(t *testing.T) { ns := api.NamespaceDefault // Note that this requests *all* endpoints, therefore the NamespaceAll // below. testServer, _ := makeTestServer(t, api.NamespaceAll, serverResponse{http.StatusOK, &api.EndpointsList{ ListMeta: unversioned.ListMeta{ ResourceVersion: "1", }, Items: []api.Endpoints{{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}, }}) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) endpoints.checkLeftoverEndpoints() if e, a := 1, endpoints.queue.Len(); e != a { t.Fatalf("Expected %v, got %v", e, a) } got, _ := endpoints.queue.Get() if e, a := ns+"/foo", got; e != a { t.Errorf("Expected %v, got %v", e, a) } }
func TestOverlappingRSs(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) labelMap := map[string]string{"foo": "bar"} for i := 0; i < 5; i++ { manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady // Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store var controllers []*extensions.ReplicaSet for j := 1; j < 10; j++ { rsSpec := newReplicaSet(1, labelMap) rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) rsSpec.Name = string(util.NewUUID()) controllers = append(controllers, rsSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rsStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest ReplicaSet is synced pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0]) rsKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } } }
func TestPodControllerLookup(t *testing.T) { manager := NewReplicaSetController(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady testCases := []struct { inRSs []*extensions.ReplicaSet pod *api.Pod outRSName string }{ // pods without labels don't match any ReplicaSets { inRSs: []*extensions.ReplicaSet{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, outRSName: "", }, // Matching labels, not namespace { inRSs: []*extensions.ReplicaSet{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, Spec: extensions.ReplicaSetSpec{ Selector: &extensions.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRSName: "", }, // Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name { inRSs: []*extensions.ReplicaSet{ { ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, Spec: extensions.ReplicaSetSpec{ Selector: &extensions.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRSName: "bar", }, } for _, c := range testCases { for _, r := range c.inRSs { manager.rsStore.Add(r) } if rs := manager.getPodReplicaSet(c.pod); rs != nil { if c.outRSName != rs.Name { t.Errorf("Got replica set %+v expected %+v", rs.Name, c.outRSName) } } else if c.outRSName != "" { t.Errorf("Expected a replica set %v pod %v, found none", c.outRSName, c.pod.Name) } } }
func newTestController() (*DaemonSetsController, *controller.FakePodControl) { clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady podControl := &controller.FakePodControl{} manager.podControl = podControl return manager, podControl }
func TestSyncReplicaSetDormancy(t *testing.T) { // Setup a test server so we can lie about the current state of pods fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rsSpec) // Creates a replica and sets expectations rsSpec.Status.Replicas = 1 manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0) // Expectations prevents replicas but not an update on status rsSpec.Status.Replicas = 0 fakePodControl.Clear() manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) // Get the key for the controller rsKey, err := controller.KeyFunc(rsSpec) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the // fakePodControl error will prevent this, leaving expectations at 0, 0 manager.expectations.CreationObserved(rsKey) rsSpec.Status.Replicas = 1 fakePodControl.Clear() fakePodControl.Err = fmt.Errorf("Fake Error") manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) // This replica should not need a Lowering of expectations, since the previous create failed fakePodControl.Err = nil manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0) // 1 PUT for the ReplicaSet status during dormancy window. // Note that the pod creates go through pod control so they're not recorded. fakeHandler.ValidateRequestCount(t, 1) }
func TestSyncReplicationControllerCreates(t *testing.T) { c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady // A controller with 2 replicas and no pods in the store, 2 creates expected rc := newReplicationController(2) manager.rcStore.Store.Add(rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 2, 0) }
func TestSyncReplicationControllerDoesNothing(t *testing.T) { c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady // 2 running pods, a controller with 2 replicas, sync is a no-op controllerSpec := newReplicationController(2) manager.rcStore.Store.Add(controllerSpec) newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec) manager.podControl = &fakePodControl manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestSyncReplicaSetCreates(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady // A controller with 2 replicas and no pods in the store, 2 creates expected labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 2, 0) }
func TestSyncReplicationControllerDeletes(t *testing.T) { c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl // 2 running pods and a controller with 1 replica, one pod delete expected controllerSpec := newReplicationController(1) manager.rcStore.Store.Add(controllerSpec) newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec) manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 1) }
func TestSyncReplicaSetDeletes(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl // 2 running pods and a controller with 1 replica, one pod delete expected labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(1, labelMap) manager.rsStore.Store.Add(rsSpec) newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec) manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 1) }
func TestSyncEndpointsItemsWithLabels(t *testing.T) { ns := "other" testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{}}) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) addPods(endpoints.podStore.Store, ns, 3, 2, 0) serviceLabels := map[string]string{"foo": "bar"} endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, Labels: serviceLabels, }, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{ {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)}, }, }, }) endpoints.syncService(ns + "/foo") expectedSubsets := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{ {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, }, Ports: []api.EndpointPort{ {Name: "port0", Port: 8080, Protocol: "TCP"}, {Name: "port1", Port: 8088, Protocol: "TCP"}, }, }} data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "", Labels: serviceLabels, }, Subsets: endptspkg.SortSubsets(expectedSubsets), }) // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". endpointsHandler.ValidateRequestCount(t, 2) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, ""), "POST", &data) }
func TestSyncJobDeleted(t *testing.T) { clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewJobController(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady manager.updateHandler = func(job *extensions.Job) error { return nil } job := newJob(2, 2) err := manager.syncJob(getKey(job, t)) if err != nil { t.Errorf("Unexpected error when syncing jobs %v", err) } if len(fakePodControl.Templates) != 0 { t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != 0 { t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) } }
func TestControllerUpdateRequeue(t *testing.T) { // This server should force a requeue of the controller because it fails to update status.Replicas. fakeHandler := utiltesting.FakeHandler{ StatusCode: 500, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(1, labelMap) manager.rsStore.Store.Add(rs) rs.Status = extensions.ReplicaSetStatus{Replicas: 2} newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicaSet(getKey(rs, t)) ch := make(chan interface{}) go func() { item, _ := manager.queue.Get() ch <- item }() select { case key := <-ch: expectedKey := getKey(rs, t) if key != expectedKey { t.Errorf("Expected requeue of replica set with key %s got %s", expectedKey, key) } case <-time.After(util.ForeverTestTimeout): manager.queue.ShutDown() t.Errorf("Expected to find a ReplicaSet in the queue, found none.") } // 1 Update and 1 GET, both of which fail fakeHandler.ValidateRequestCount(t, 2) }
func TestControllerUpdateRequeue(t *testing.T) { // This server should force a requeue of the controller because it fails to update status.Replicas. fakeHandler := utiltesting.FakeHandler{ StatusCode: 500, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) // TODO: Uncomment when fix #19254 // defer testServer.Close() c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady rc := newReplicationController(1) manager.rcStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: 2} newPodList(manager.podStore.Store, 1, api.PodRunning, rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) ch := make(chan interface{}) go func() { item, _ := manager.queue.Get() ch <- item }() select { case key := <-ch: expectedKey := getKey(rc, t) if key != expectedKey { t.Errorf("Expected requeue of controller with key %s got %s", expectedKey, key) } case <-time.After(util.ForeverTestTimeout): manager.queue.ShutDown() t.Errorf("Expected to find an rc in the queue, found none.") } // 1 Update and 1 GET, both of which fail fakeHandler.ValidateRequestCount(t, 2) }
func TestSyncJobUpdateRequeue(t *testing.T) { clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewJobController(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady manager.updateHandler = func(job *extensions.Job) error { return fmt.Errorf("Fake error") } job := newJob(2, 2) manager.jobStore.Store.Add(job) err := manager.syncJob(getKey(job, t)) if err != nil { t.Errorf("Unxpected error when syncing jobs, got %v", err) } t.Log("Waiting for a job in the queue") key, _ := manager.queue.Get() expectedKey := getKey(job, t) if key != expectedKey { t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key) } }
func TestSyncEndpointsProtocolUDP(t *testing.T) { ns := "other" testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}}, }}, }}) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) addPods(endpoints.podStore.Store, ns, 1, 1, 0) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, Spec: api.ServiceSpec{ Selector: map[string]string{}, Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 2) data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "UDP"}}, }}, }) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) }
func TestDeleteControllerAndExpectations(t *testing.T) { client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady rs := newReplicaSet(1, map[string]string{"foo": "bar"}) manager.rsStore.Store.Add(rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl // This should set expectations for the ReplicaSet manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0) fakePodControl.Clear() // Get the ReplicaSet key rsKey, err := controller.KeyFunc(rs) if err != nil { t.Errorf("Couldn't get key for object %+v: %v", rs, err) } // This is to simulate a concurrent addPod, that has a handle on the expectations // as the controller deletes it. podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil { t.Errorf("No expectations found for ReplicaSet") } manager.rsStore.Delete(rs) manager.syncReplicaSet(getKey(rs, t)) if _, exists, err = manager.expectations.GetExpectations(rsKey); exists { t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.") } // This should have no effect, since we've deleted the ReplicaSet. podExp.Seen(1, 0) manager.podStore.Store.Replace(make([]interface{}, 0), "0") manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) }
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { // Setup a fake server to listen for requests, and run the ReplicaSet controller in steady state fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady // Steady state for the ReplicaSet, no Status.Replicas updates expected activePods := 5 labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(activePods, labelMap) manager.rsStore.Store.Add(rs) rs.Status = extensions.ReplicaSetStatus{Replicas: activePods} newPodList(manager.podStore.Store, activePods, api.PodRunning, labelMap, rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) if fakeHandler.RequestReceived != nil { t.Errorf("Unexpected update when pods and ReplicaSets are in a steady state") } // This response body is just so we don't err out decoding the http response, all // we care about is the request body sent below. response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) fakeHandler.ResponseBody = response rs.Generation = rs.Generation + 1 manager.syncReplicaSet(getKey(rs, t)) rs.Status.ObservedGeneration = rs.Generation updatedRc := runtime.EncodeOrDie(testapi.Extensions.Codec(), rs) fakeHandler.ValidateRequest(t, testapi.Extensions.ResourcePath(replicaSetResourceName(), rs.Namespace, rs.Name)+"/status", "PUT", &updatedRc) }
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) // TODO: Uncomment when fix #19254 // defer testServer.Close() c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady // Steady state for the replication controller, no Status.Replicas updates expected activePods := 5 rc := newReplicationController(activePods) manager.rcStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: activePods} newPodList(manager.podStore.Store, activePods, api.PodRunning, rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) if fakeHandler.RequestReceived != nil { t.Errorf("Unexpected update when pods and rcs are in a steady state") } // This response body is just so we don't err out decoding the http response, all // we care about is the request body sent below. response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) fakeHandler.ResponseBody = response rc.Generation = rc.Generation + 1 manager.syncReplicationController(getKey(rc, t)) rc.Status.ObservedGeneration = rc.Generation updatedRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &updatedRc) }