func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *ktestclient.Fake, *watch.FakeWatcher, *ServiceServingCertController) { certDir, err := ioutil.TempDir("", "serving-cert-unit-") if err != nil { t.Fatalf("unexpected error: %v", err) } caInfo := admin.DefaultServiceSignerCAInfo(certDir) caOptions := admin.CreateSignerCertOptions{ CertFile: caInfo.CertFile, KeyFile: caInfo.KeyFile, Name: admin.DefaultServiceServingCertSignerName(), Output: ioutil.Discard, } ca, err := caOptions.CreateSignerCert() if err != nil { t.Fatalf("unexpected error: %v", err) } kubeclient := ktestclient.NewSimpleFake(startingObjects...) fakeWatch := watch.NewFake() kubeclient.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, action.(ktestclient.CreateAction).GetObject(), nil }) kubeclient.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, action.(ktestclient.UpdateAction).GetObject(), nil }) kubeclient.PrependWatchReactor("*", ktestclient.DefaultWatchReactor(fakeWatch, nil)) controller := NewServiceServingCertController(kubeclient, kubeclient, ca, "cluster.local", 10*time.Minute) return caOptions.Name, kubeclient, fakeWatch, controller }
// mockREST mocks a DeploymentLog REST func mockREST(version, desired int, endStatus api.DeploymentStatus) *REST { // Fake deploymentConfig config := deploytest.OkDeploymentConfig(version) fakeDn := testclient.NewSimpleFake(config) fakeDn.PrependReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, config, nil }) // Fake deployments fakeDeployments := makeDeploymentList(version) fakeRn := ktestclient.NewSimpleFake(fakeDeployments) fakeRn.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, &fakeDeployments.Items[desired-1], nil }) // Fake watcher for deployments fakeWatch := watch.NewFake() fakeRn.PrependWatchReactor("replicationcontrollers", ktestclient.DefaultWatchReactor(fakeWatch, nil)) // Everything is fake connectionInfo := &kclient.HTTPKubeletClient{Config: &kclient.KubeletConfig{EnableHttps: true, Port: 12345}, Client: &http.Client{}} obj := &fakeDeployments.Items[desired-1] obj.Annotations[api.DeploymentStatusAnnotation] = string(endStatus) go fakeWatch.Add(obj) return &REST{ ConfigGetter: fakeDn, DeploymentGetter: fakeRn, PodGetter: &deployerPodGetter{}, ConnectionInfo: connectionInfo, Timeout: defaultTimeout, } }
func TestUpdatePods(t *testing.T) { fakeWatch := watch.NewFake() client := &testclient.Fake{} client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady received := make(chan string) manager.syncHandler = func(key string) error { obj, exists, err := manager.rcStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find controller under key %v", key) } received <- obj.(*api.ReplicationController).Name return nil } stopCh := make(chan struct{}) defer close(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) // Put 2 rcs and one pod into the controller's stores testControllerSpec1 := newReplicationController(1) manager.rcStore.Store.Add(testControllerSpec1) testControllerSpec2 := *testControllerSpec1 testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"} testControllerSpec2.Name = "barfoo" manager.rcStore.Store.Add(&testControllerSpec2) // Put one pod in the podStore pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1).Items[0] pod2 := pod1 pod2.Labels = testControllerSpec2.Spec.Selector // Send an update of the same pod with modified labels, and confirm we get a sync request for // both controllers manager.updatePod(&pod1, &pod2) expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name) for _, name := range expected.List() { t.Logf("Expecting update for %+v", name) select { case got := <-received: if !expected.Has(got) { t.Errorf("Expected keys %#v got %v", expected, got) } case <-time.After(util.ForeverTestTimeout): t.Errorf("Expected update notifications for controllers within 100ms each") } } }
func TestHookExecutor_executeExecNewPodFailed(t *testing.T) { hook := &deployapi.LifecycleHook{ FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort, ExecNewPod: &deployapi.ExecNewPodHook{ ContainerName: "container1", }, } config := deploytest.OkDeploymentConfig(1) deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion)) client := newTestClient(config) podCreated := make(chan struct{}) var createdPod *kapi.Pod client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) { defer close(podCreated) action := a.(testclient.CreateAction) object := action.GetObject() createdPod = object.(*kapi.Pod) return true, createdPod, nil }) podsWatch := watch.NewFake() client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil)) go func() { <-podCreated podsWatch.Add(createdPod) podCopy, _ := kapi.Scheme.Copy(createdPod) updatedPod := podCopy.(*kapi.Pod) updatedPod.Status.Phase = kapi.PodFailed podsWatch.Modify(updatedPod) }() executor := &HookExecutor{ pods: client, out: ioutil.Discard, decoder: kapi.Codecs.UniversalDecoder(), getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) { return ioutil.NopCloser(strings.NewReader("test")), nil }, } err := executor.executeExecNewPod(hook, deployment, "hook", "test") if err == nil { t.Fatalf("expected an error, got none") } t.Logf("got expected error: %T", err) }
func TestWatchPods(t *testing.T) { fakeWatch := watch.NewFake() client := &testclient.Fake{} client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady // Put one ReplicaSet and one pod into the controller's stores labelMap := map[string]string{"foo": "bar"} testRSSpec := newReplicaSet(1, labelMap) manager.rsStore.Store.Add(testRSSpec) received := make(chan string) // The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and // send it into the syncHandler. manager.syncHandler = func(key string) error { obj, exists, err := manager.rsStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find replica set under key %v", key) } rsSpec := obj.(*extensions.ReplicaSet) if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) { t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec) } close(received) return nil } // Start only the pod watcher and the workqueue, send a watch event, // and make sure it hits the sync method for the right ReplicaSet. stopCh := make(chan struct{}) defer close(stopCh) go manager.podController.Run(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec) testPod := pods.Items[0] testPod.Status.Phase = api.PodFailed fakeWatch.Add(&testPod) select { case <-received: case <-time.After(util.ForeverTestTimeout): t.Errorf("Expected 1 call but got 0") } }
func TestWatchPods(t *testing.T) { fakeWatch := watch.NewFake() client := &testclient.Fake{} client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewJobController(client, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady // Put one job and one pod into the store testJob := newJob(2, 2) manager.jobStore.Store.Add(testJob) received := make(chan string) // The pod update sent through the fakeWatcher should figure out the managing job and // send it into the syncHandler. manager.syncHandler = func(key string) error { obj, exists, err := manager.jobStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find job under key %v", key) } job := obj.(*extensions.Job) if !api.Semantic.DeepDerivative(job, testJob) { t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job) } close(received) return nil } // Start only the pod watcher and the workqueue, send a watch event, // and make sure it hits the sync method for the right job. stopCh := make(chan struct{}) defer close(stopCh) go manager.podController.Run(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) pods := newPodList(1, api.PodRunning, testJob) testPod := pods[0] testPod.Status.Phase = api.PodFailed fakeWatch.Add(&testPod) select { case <-received: case <-time.After(controllerTimeout): t.Errorf("Expected 1 call but got 0") } }
func TestWatchControllers(t *testing.T) { fakeWatch := watch.NewFake() client := &testclient.Fake{} client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady var testControllerSpec api.ReplicationController received := make(chan string) // The update sent through the fakeWatcher should make its way into the workqueue, // and eventually into the syncHandler. The handler validates the received controller // and closes the received channel to indicate that the test can finish. manager.syncHandler = func(key string) error { obj, exists, err := manager.rcStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find controller under key %v", key) } controllerSpec := *obj.(*api.ReplicationController) if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) { t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec) } close(received) return nil } // Start only the rc watcher and the workqueue, send a watch event, // and make sure it hits the sync method. stopCh := make(chan struct{}) defer close(stopCh) go manager.rcController.Run(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) testControllerSpec.Name = "foo" fakeWatch.Add(&testControllerSpec) select { case <-received: case <-time.After(util.ForeverTestTimeout): t.Errorf("Expected 1 call but got 0") } }
func controllerSetup(startingObjects []runtime.Object, t *testing.T) (*ktestclient.Fake, *watch.FakeWatcher, *DockerRegistryServiceController) { kubeclient := ktestclient.NewSimpleFake(startingObjects...) fakeWatch := watch.NewFake() kubeclient.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, action.(ktestclient.CreateAction).GetObject(), nil }) kubeclient.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, action.(ktestclient.UpdateAction).GetObject(), nil }) kubeclient.PrependWatchReactor("services", ktestclient.DefaultWatchReactor(fakeWatch, nil)) controller := NewDockerRegistryServiceController(kubeclient, DockerRegistryServiceControllerOptions{ Resync: 10 * time.Minute, RegistryNamespace: registryNamespace, RegistryServiceName: registryName, DockercfgController: &DockercfgController{}, DockerURLsIntialized: make(chan struct{}), }) return kubeclient, fakeWatch, controller }
func TestWatchJobs(t *testing.T) { client := testclient.NewSimpleFake() fakeWatch := watch.NewFake() client.PrependWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewJobController(client, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady var testJob extensions.Job received := make(chan struct{}) // The update sent through the fakeWatcher should make its way into the workqueue, // and eventually into the syncHandler. manager.syncHandler = func(key string) error { obj, exists, err := manager.jobStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find job under key %v", key) } job := *obj.(*extensions.Job) if !api.Semantic.DeepDerivative(job, testJob) { t.Errorf("Expected %#v, but got %#v", testJob, job) } close(received) return nil } // Start only the job watcher and the workqueue, send a watch event, // and make sure it hits the sync method. stopCh := make(chan struct{}) defer close(stopCh) go manager.jobController.Run(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) // We're sending new job to see if it reaches syncHandler. testJob.Name = "foo" fakeWatch.Add(&testJob) t.Log("Waiting for job to reach syncHandler") <-received }
func controllerSetup(t *testing.T, startingObjects []runtime.Object) (*ktestclient.Fake, *watch.FakeWatcher, *IngressIPController) { client := ktestclient.NewSimpleFake(startingObjects...) fakeWatch := watch.NewFake() client.PrependWatchReactor("*", ktestclient.DefaultWatchReactor(fakeWatch, nil)) client.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { obj := action.(ktestclient.CreateAction).GetObject() fakeWatch.Add(obj) return true, obj, nil }) // Ensure that updates the controller makes are passed through to the watcher. client.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { obj := action.(ktestclient.CreateAction).GetObject() fakeWatch.Modify(obj) return true, obj, nil }) controller := newController(t, client) return client, fakeWatch, controller }
func runFuzzer(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) startingNamespaces := CreateStartingNamespaces() kubeclient := ktestclient.NewSimpleFake(startingNamespaces...) nsWatch := watch.NewFake() kubeclient.PrependWatchReactor("namespaces", ktestclient.DefaultWatchReactor(nsWatch, nil)) startingQuotas := CreateStartingQuotas() originclient := testclient.NewSimpleFake(startingQuotas...) quotaWatch := watch.NewFake() originclient.AddWatchReactor("clusterresourcequotas", ktestclient.DefaultWatchReactor(quotaWatch, nil)) informerFactory := shared.NewInformerFactory(kubeclient, originclient, shared.DefaultListerWatcherOverrides{}, 10*time.Minute) controller := NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) go controller.Run(5, stopCh) informerFactory.Start(stopCh) informerFactory.StartCore(stopCh) finalNamespaces := map[string]*kapi.Namespace{} finalQuotas := map[string]*quotaapi.ClusterResourceQuota{} quotaActions := map[string][]string{} namespaceActions := map[string][]string{} finishedNamespaces := make(chan struct{}) finishedQuotas := make(chan struct{}) for _, quota := range startingQuotas { name := quota.(*quotaapi.ClusterResourceQuota).Name quotaActions[name] = append(quotaActions[name], fmt.Sprintf("inserting %v to %v", name, quota.(*quotaapi.ClusterResourceQuota).Spec.Selector)) finalQuotas[name] = quota.(*quotaapi.ClusterResourceQuota) } for _, namespace := range startingNamespaces { name := namespace.(*kapi.Namespace).Name namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("inserting %v to %v", name, namespace.(*kapi.Namespace).Labels)) finalNamespaces[name] = namespace.(*kapi.Namespace) } go func() { for i := 0; i < 200; i++ { name := quotaNames[rand.Intn(len(quotaNames))] _, exists := finalQuotas[name] if rand.Intn(50) == 0 { if !exists { continue } // due to the compression race (see big comment for impl), clear the queue then delete for { if len(quotaWatch.ResultChan()) == 0 { break } time.Sleep(10 * time.Millisecond) } quotaActions[name] = append(quotaActions[name], "deleting "+name) quotaWatch.Delete(finalQuotas[name]) delete(finalQuotas, name) continue } quota := NewQuota(name) finalQuotas[name] = quota copied, err := kapi.Scheme.Copy(quota) if err != nil { t.Fatal(err) } if exists { quotaActions[name] = append(quotaActions[name], fmt.Sprintf("updating %v to %v", name, quota.Spec.Selector)) quotaWatch.Modify(copied) } else { quotaActions[name] = append(quotaActions[name], fmt.Sprintf("adding %v to %v", name, quota.Spec.Selector)) quotaWatch.Add(copied) } } close(finishedQuotas) }() go func() { for i := 0; i < 200; i++ { name := namespaceNames[rand.Intn(len(namespaceNames))] _, exists := finalNamespaces[name] if rand.Intn(50) == 0 { if !exists { continue } // due to the compression race (see big comment for impl), clear the queue then delete for { if len(nsWatch.ResultChan()) == 0 { break } time.Sleep(10 * time.Millisecond) } namespaceActions[name] = append(namespaceActions[name], "deleting "+name) nsWatch.Delete(finalNamespaces[name]) delete(finalNamespaces, name) continue } ns := NewNamespace(name) finalNamespaces[name] = ns copied, err := kapi.Scheme.Copy(ns) if err != nil { t.Fatal(err) } if exists { namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("updating %v to %v", name, ns.Labels)) nsWatch.Modify(copied) } else { namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("adding %v to %v", name, ns.Labels)) nsWatch.Add(copied) } } close(finishedNamespaces) }() <-finishedQuotas <-finishedNamespaces finalFailures := []string{} for i := 0; i < 200; i++ { // better suggestions for testing doneness? Check the condition a few times? time.Sleep(50 * time.Millisecond) finalFailures = checkState(controller, finalNamespaces, finalQuotas, t, quotaActions, namespaceActions) if len(finalFailures) == 0 { break } } if len(finalFailures) > 0 { t.Logf("have %d quotas and %d namespaces", len(quotaWatch.ResultChan()), len(nsWatch.ResultChan())) t.Fatalf("failed on \n%v", strings.Join(finalFailures, "\n")) } }
func TestGetFirstPod(t *testing.T) { labelSet := map[string]string{"test": "selector"} tests := []struct { name string podList *api.PodList watching []watch.Event sortBy func([]*api.Pod) sort.Interface expected *api.Pod expectedNum int expectedErr bool }{ { name: "kubectl logs - two ready pods", podList: newPodList(2, -1, -1, labelSet), sortBy: func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, expectedNum: 2, }, { name: "kubectl logs - one unhealthy, one healthy", podList: newPodList(2, -1, 1, labelSet), sortBy: func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-2", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}}, }, }, expectedNum: 2, }, { name: "kubectl attach - two ready pods", podList: newPodList(2, -1, -1, labelSet), sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, expectedNum: 2, }, { name: "kubectl attach - wait for ready pod", podList: newPodList(1, 1, -1, labelSet), watching: []watch.Event{ { Type: watch.Modified, Object: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, }, }, sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, expectedNum: 1, }, } for i := range tests { test := tests[i] client := &testclient.Fake{} client.PrependReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { return true, test.podList, nil }) if len(test.watching) > 0 { watcher := watch.NewFake() for _, event := range test.watching { switch event.Type { case watch.Added: go watcher.Add(event.Object) case watch.Modified: go watcher.Modify(event.Object) } } client.PrependWatchReactor("pods", testclient.DefaultWatchReactor(watcher, nil)) } selector := labels.Set(labelSet).AsSelector() pod, numPods, err := GetFirstPod(client, api.NamespaceDefault, selector, 1*time.Minute, test.sortBy) if !test.expectedErr && err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } if test.expectedErr && err == nil { t.Errorf("%s: expected an error", test.name) continue } if test.expectedNum != numPods { t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods) continue } if !reflect.DeepEqual(test.expected, pod) { t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod) } } }
// TestAdmission func TestAdmission(t *testing.T) { namespaceObj := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "test", Namespace: "", }, Status: api.NamespaceStatus{ Phase: api.NamespaceActive, }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) store.Add(namespaceObj) fakeWatch := watch.NewFake() mockClient := &testclient.Fake{} mockClient.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) { if getAction, ok := action.(testclient.GetAction); ok && getAction.GetName() == namespaceObj.Name { return true, namespaceObj, nil } return true, nil, fmt.Errorf("No result for action %v", action) }) mockClient.AddReactor("list", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) { return true, &api.NamespaceList{Items: []api.Namespace{*namespaceObj}}, nil }) lfhandler := NewLifecycle(mockClient).(*lifecycle) lfhandler.store = store handler := admission.NewChainHandler(lfhandler) pod := api.Pod{ ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespaceObj.Name}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "vol"}}, Containers: []api.Container{{Name: "ctr", Image: "image"}}, }, } badPod := api.Pod{ ObjectMeta: api.ObjectMeta{Name: "456", Namespace: "doesnotexist"}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "vol"}}, Containers: []api.Container{{Name: "ctr", Image: "image"}}, }, } err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil)) if err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } // change namespace state to terminating namespaceObj.Status.Phase = api.NamespaceTerminating store.Add(namespaceObj) // verify create operations in the namespace cause an error err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil)) if err == nil { t.Errorf("Expected error rejecting creates in a namespace when it is terminating") } // verify update operations in the namespace can proceed err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Update, nil)) if err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } // verify delete operations in the namespace can proceed err = handler.Admit(admission.NewAttributesRecord(nil, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Delete, nil)) if err != nil { t.Errorf("Unexpected error returned from admission handler: %v", err) } // verify delete of namespace default can never proceed err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", api.NamespaceDefault, "namespaces", "", admission.Delete, nil)) if err == nil { t.Errorf("Expected an error that this namespace can never be deleted") } // verify delete of namespace other than default can proceed err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", "other", "namespaces", "", admission.Delete, nil)) if err != nil { t.Errorf("Did not expect an error %v", err) } // verify create/update/delete of object in non-existant namespace throws error err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Create, nil)) if err == nil { t.Errorf("Expected an aerror that objects cannot be created in non-existant namespaces", err) } err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Update, nil)) if err == nil { t.Errorf("Expected an aerror that objects cannot be updated in non-existant namespaces", err) } err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Delete, nil)) if err == nil { t.Errorf("Expected an aerror that objects cannot be deleted in non-existant namespaces", err) } }
func TestWatchJobs(t *testing.T) { fakeWatch := watch.NewFake() client := &testclient.Fake{} client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewJobController(client, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady var testJob extensions.Job received := make(chan string) // The update sent through the fakeWatcher should make its way into the workqueue, // and eventually into the syncHandler. manager.syncHandler = func(key string) error { obj, exists, err := manager.jobStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find job under key %v", key) } job := *obj.(*extensions.Job) if !api.Semantic.DeepDerivative(job, testJob) { t.Errorf("Expected %#v, but got %#v", testJob, job) } received <- key return nil } // Start only the job watcher and the workqueue, send a watch event, // and make sure it hits the sync method. stopCh := make(chan struct{}) defer close(stopCh) go manager.jobController.Run(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) // We're sending new job to see if it reaches syncHandler. testJob.Name = "foo" fakeWatch.Add(&testJob) select { case <-received: case <-time.After(controllerTimeout): t.Errorf("Expected 1 call but got 0") } // We're sending fake finished job, to see if it reaches syncHandler - it should not, // since we're filtering out finished jobs. testJobv2 := extensions.Job{ ObjectMeta: api.ObjectMeta{Name: "foo"}, Status: extensions.JobStatus{ Conditions: []extensions.JobCondition{{ Type: extensions.JobComplete, Status: api.ConditionTrue, LastProbeTime: unversioned.Now(), LastTransitionTime: unversioned.Now(), }}, }, } fakeWatch.Modify(&testJobv2) select { case <-received: t.Errorf("Expected 0 call but got 1") case <-time.After(controllerTimeout): } }
func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) { hook := &deployapi.LifecycleHook{ FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort, ExecNewPod: &deployapi.ExecNewPodHook{ ContainerName: "container1", }, } config := deploytest.OkDeploymentConfig(1) deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion)) deployment.Spec.Template.Spec.NodeSelector = map[string]string{"labelKey1": "labelValue1", "labelKey2": "labelValue2"} client := newTestClient(config) podCreated := make(chan struct{}) var createdPod *kapi.Pod client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) { defer close(podCreated) action := a.(testclient.CreateAction) object := action.GetObject() createdPod = object.(*kapi.Pod) return true, createdPod, nil }) podsWatch := watch.NewFake() client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil)) podLogs := &bytes.Buffer{} // Simulate creation of the lifecycle pod go func() { <-podCreated podsWatch.Add(createdPod) podCopy, _ := kapi.Scheme.Copy(createdPod) updatedPod := podCopy.(*kapi.Pod) updatedPod.Status.Phase = kapi.PodSucceeded podsWatch.Modify(updatedPod) }() executor := &HookExecutor{ pods: client, out: podLogs, decoder: kapi.Codecs.UniversalDecoder(), getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) { return ioutil.NopCloser(strings.NewReader("test")), nil }, } err := executor.executeExecNewPod(hook, deployment, "hook", "test") if err != nil { t.Fatalf("unexpected error: %s", err) } if e, a := "--> test: Running hook pod ...\ntest--> test: Success\n", podLogs.String(); e != a { t.Fatalf("expected pod logs to be %q, got %q", e, a) } if e, a := deployment.Spec.Template.Spec.NodeSelector, createdPod.Spec.NodeSelector; !reflect.DeepEqual(e, a) { t.Fatalf("expected pod NodeSelector %v, got %v", e, a) } if createdPod.Spec.ActiveDeadlineSeconds == nil { t.Fatalf("expected ActiveDeadlineSeconds to be set on the deployment hook executor pod") } if *createdPod.Spec.ActiveDeadlineSeconds >= deployapi.MaxDeploymentDurationSeconds { t.Fatalf("expected ActiveDeadlineSeconds %+v to be lower than %+v", *createdPod.Spec.ActiveDeadlineSeconds, deployapi.MaxDeploymentDurationSeconds) } }
// scc exec is a pass through to *constraint, so we only need to test that // it correctly limits its actions to certain conditions func TestExecAdmit(t *testing.T) { goodPod := func() *kapi.Pod { return &kapi.Pod{ Spec: kapi.PodSpec{ ServiceAccountName: "default", Containers: []kapi.Container{ { SecurityContext: &kapi.SecurityContext{}, }, }, }, } } testCases := map[string]struct { operation kadmission.Operation resource string subresource string pod *kapi.Pod shouldAdmit bool shouldHaveClientAction bool }{ "unchecked operation": { operation: kadmission.Create, resource: string(kapi.ResourcePods), subresource: "exec", pod: goodPod(), shouldAdmit: true, shouldHaveClientAction: false, }, "unchecked resource": { operation: kadmission.Connect, resource: string(kapi.ResourceSecrets), subresource: "exec", pod: goodPod(), shouldAdmit: true, shouldHaveClientAction: false, }, "unchecked subresource": { operation: kadmission.Connect, resource: string(kapi.ResourcePods), subresource: "not-exec", pod: goodPod(), shouldAdmit: true, shouldHaveClientAction: false, }, "attach check": { operation: kadmission.Connect, resource: string(kapi.ResourcePods), subresource: "attach", pod: goodPod(), shouldAdmit: false, shouldHaveClientAction: true, }, "exec check": { operation: kadmission.Connect, resource: string(kapi.ResourcePods), subresource: "exec", pod: goodPod(), shouldAdmit: false, shouldHaveClientAction: true, }, } for k, v := range testCases { tc := testclient.NewSimpleFake() tc.PrependReactor("get", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { return true, v.pod, nil }) tc.AddWatchReactor("*", testclient.DefaultWatchReactor(watch.NewFake(), nil)) // create the admission plugin p := NewSCCExecRestrictions(tc) attrs := kadmission.NewAttributesRecord(v.pod, kapi.Kind("Pod"), "namespace", "pod-name", kapi.Resource(v.resource), v.subresource, v.operation, &user.DefaultInfo{}) err := p.Admit(attrs) if v.shouldAdmit && err != nil { t.Errorf("%s: expected no errors but received %v", k, err) } if !v.shouldAdmit && err == nil { t.Errorf("%s: expected errors but received none", k) } for _, action := range tc.Actions() { t.Logf("%s: %#v", k, action) } if !v.shouldHaveClientAction && (len(tc.Actions()) > 0) { t.Errorf("%s: unexpected actions: %v", k, tc.Actions()) } if v.shouldHaveClientAction && (len(tc.Actions()) == 0) { t.Errorf("%s: no actions found", k) } if v.shouldHaveClientAction { if len(v.pod.Spec.ServiceAccountName) != 0 { t.Errorf("%s: sa name should have been cleared: %v", k, v.pod.Spec.ServiceAccountName) } } } }
func TestReplicationControllerStop(t *testing.T) { name := "foo" ns := "default" tests := []struct { Name string Objs []runtime.Object StopError error ExpectedActions []string }{ { Name: "OnlyOneRC", Objs: []runtime.Object{ &api.ReplicationController{ // GET ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, &api.ReplicationControllerList{ // LIST Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, }, }, }, StopError: nil, ExpectedActions: []string{"get", "list", "get", "update", "watch", "delete"}, }, { Name: "NoOverlapping", Objs: []runtime.Object{ &api.ReplicationController{ // GET ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, &api.ReplicationControllerList{ // LIST Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{ Name: "baz", Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k3": "v3"}}, }, { ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, }, }, }, StopError: nil, ExpectedActions: []string{"get", "list", "get", "update", "watch", "delete"}, }, { Name: "OverlappingError", Objs: []runtime.Object{ &api.ReplicationController{ // GET ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, &api.ReplicationControllerList{ // LIST Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{ Name: "baz", Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1", "k2": "v2"}}, }, { ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, }, }, }, StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz, please manage deletion individually with --cascade=false."), ExpectedActions: []string{"get", "list"}, }, { Name: "OverlappingButSafeDelete", Objs: []runtime.Object{ &api.ReplicationController{ // GET ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1", "k2": "v2"}}, }, &api.ReplicationControllerList{ // LIST Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{ Name: "baz", Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}}, }, { ObjectMeta: api.ObjectMeta{ Name: "zaz", Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, { ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1", "k2": "v2"}}, }, }, }, }, StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz,zaz, please manage deletion individually with --cascade=false."), ExpectedActions: []string{"get", "list"}, }, { Name: "TwoExactMatchRCs", Objs: []runtime.Object{ &api.ReplicationController{ // GET ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, &api.ReplicationControllerList{ // LIST Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{ Name: "zaz", Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, { ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: map[string]string{"k1": "v1"}}, }, }, }, }, StopError: nil, ExpectedActions: []string{"get", "list", "delete"}, }, } for _, test := range tests { copiedForWatch, err := api.Scheme.Copy(test.Objs[0]) if err != nil { t.Fatalf("%s unexpected error: %v", test.Name, err) } fake := testclient.NewSimpleFake(test.Objs...) fakeWatch := watch.NewFake() fake.PrependWatchReactor("replicationcontrollers", testclient.DefaultWatchReactor(fakeWatch, nil)) go func() { fakeWatch.Add(copiedForWatch) }() reaper := ReplicationControllerReaper{fake, time.Millisecond, time.Millisecond} err = reaper.Stop(ns, name, 0, nil) if !reflect.DeepEqual(err, test.StopError) { t.Errorf("%s unexpected error: %v", test.Name, err) continue } actions := fake.Actions() if len(actions) != len(test.ExpectedActions) { t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) continue } for i, verb := range test.ExpectedActions { if actions[i].GetResource() != "replicationcontrollers" { t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) } if actions[i].GetVerb() != verb { t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) } } } }
// mockREST mocks a DeploymentLog REST func mockREST(version, desired int64, status api.DeploymentStatus) *REST { connectionInfo := &kubeletclient.HTTPKubeletClient{Config: &kubeletclient.KubeletClientConfig{EnableHttps: true, Port: 12345}, Client: &http.Client{}} // Fake deploymentConfig config := deploytest.OkDeploymentConfig(version) fakeDn := testclient.NewSimpleFake(config) fakeDn.PrependReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, config, nil }) // Used for testing validation errors prior to getting replication controllers. if desired > version { return &REST{ dn: fakeDn, connInfo: connectionInfo, timeout: defaultTimeout, } } // Fake deployments fakeDeployments := makeDeploymentList(version) fakeRn := ktestclient.NewSimpleFake(fakeDeployments) fakeRn.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, &fakeDeployments.Items[desired-1], nil }) // Fake watcher for deployments fakeWatch := watch.NewFake() fakeRn.PrependWatchReactor("replicationcontrollers", ktestclient.DefaultWatchReactor(fakeWatch, nil)) obj := &fakeDeployments.Items[desired-1] obj.Annotations[api.DeploymentStatusAnnotation] = string(status) go fakeWatch.Add(obj) fakePn := ktestclient.NewSimpleFake() if status == api.DeploymentStatusComplete { // If the deployment is complete, we will try to get the logs from the oldest // application pod... fakePn.PrependReactor("list", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, fakePodList, nil }) fakePn.PrependReactor("get", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, &fakePodList.Items[0], nil }) } else { // ...otherwise try to get the logs from the deployer pod. fakeDeployer := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{ Name: deployutil.DeployerPodNameForDeployment(obj.Name), Namespace: kapi.NamespaceDefault, }, Spec: kapi.PodSpec{ Containers: []kapi.Container{ { Name: deployutil.DeployerPodNameForDeployment(obj.Name) + "-container", }, }, NodeName: "some-host", }, } fakePn.PrependReactor("get", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, fakeDeployer, nil }) } return &REST{ dn: fakeDn, rn: fakeRn, pn: fakePn, connInfo: connectionInfo, timeout: defaultTimeout, } }