func TestRolling_deployInitial(t *testing.T) { initialStrategyInvoked := false strategy := &RollingDeploymentStrategy{ decoder: kapi.Codecs.UniversalDecoder(), rcClient: fake.NewSimpleClientset().Core(), eventClient: fake.NewSimpleClientset().Core(), initialStrategy: &testStrategy{ deployFn: func(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error { initialStrategyInvoked = true return nil }, }, rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error { t.Fatalf("unexpected call to rollingUpdate") return nil }, getUpdateAcceptor: getUpdateAcceptor, apiRetryPeriod: 1 * time.Millisecond, apiRetryTimeout: 10 * time.Millisecond, } config := deploytest.OkDeploymentConfig(1) config.Spec.Strategy = deploytest.OkRollingStrategy() deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(registered.GroupOrDie(kapi.GroupName).GroupVersions[0])) strategy.out, strategy.errOut = &bytes.Buffer{}, &bytes.Buffer{} err := strategy.Deploy(nil, deployment, 2) if err != nil { t.Fatalf("unexpected error: %v", err) } if !initialStrategyInvoked { t.Fatalf("expected initial strategy to be invoked") } }
func TestIsValidSecret(t *testing.T) { fk := testclient.NewSimpleClientset(&api.Secret{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "demo", }, }) _, err := IsValidSecret(fk, "") if err == nil { t.Errorf("expected error but retuned nil") } s, err := IsValidSecret(fk, "default/demo") if err != nil { t.Errorf("unexpected error: %v", err) } if s == nil { t.Errorf("expected a Secret but retuned nil") } fk = testclient.NewSimpleClientset() s, err = IsValidSecret(fk, "default/demo") if err == nil { t.Errorf("expected an error but retuned nil") } if s != nil { t.Errorf("unexpected Secret returned: %v", s) } }
func TestAdmitImageStreamMapping(t *testing.T) { tests := map[string]struct { imageStreamMapping *imageapi.ImageStreamMapping limitRange *kapi.LimitRange shouldAdmit bool operation kadmission.Operation }{ "new ism, no limit range": { imageStreamMapping: getImageStreamMapping(), operation: kadmission.Create, shouldAdmit: true, }, "new ism, under limit range": { imageStreamMapping: getImageStreamMapping(), limitRange: getLimitRange("1Ki"), operation: kadmission.Create, shouldAdmit: true, }, "new ism, over limit range": { imageStreamMapping: getImageStreamMapping(), limitRange: getLimitRange("0Ki"), operation: kadmission.Create, shouldAdmit: false, }, } for k, v := range tests { var fakeKubeClient clientset.Interface if v.limitRange != nil { fakeKubeClient = clientsetfake.NewSimpleClientset(v.limitRange) } else { fakeKubeClient = clientsetfake.NewSimpleClientset() } plugin, err := NewImageLimitRangerPlugin(fakeKubeClient, nil) if err != nil { t.Errorf("%s failed creating plugin %v", k, err) continue } attrs := kadmission.NewAttributesRecord(v.imageStreamMapping, imageapi.Kind("ImageStreamMapping").WithVersion("version"), v.imageStreamMapping.Namespace, v.imageStreamMapping.Name, imageapi.Resource("imagestreammappings").WithVersion("version"), "", v.operation, nil) err = plugin.Admit(attrs) if v.shouldAdmit && err != nil { t.Errorf("%s expected to be admitted but received error %v", k, err) } if !v.shouldAdmit && err == nil { t.Errorf("%s expected to be rejected but received no error", k) } } }
// TestRolling_deployInitialHooks can go away once the rolling strategy // supports initial deployments. func TestRolling_deployInitialHooks(t *testing.T) { var hookError error strategy := &RollingDeploymentStrategy{ decoder: kapi.Codecs.UniversalDecoder(), rcClient: fake.NewSimpleClientset().Core(), eventClient: fake.NewSimpleClientset().Core(), initialStrategy: &testStrategy{ deployFn: func(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error { return nil }, }, rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error { return nil }, hookExecutor: &hookExecutorImpl{ executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error { return hookError }, }, getUpdateAcceptor: getUpdateAcceptor, apiRetryPeriod: 1 * time.Millisecond, apiRetryTimeout: 10 * time.Millisecond, } cases := []struct { params *deployapi.RollingDeploymentStrategyParams hookShouldFail bool deploymentShouldFail bool }{ {rollingParams(deployapi.LifecycleHookFailurePolicyAbort, ""), true, true}, {rollingParams(deployapi.LifecycleHookFailurePolicyAbort, ""), false, false}, {rollingParams("", deployapi.LifecycleHookFailurePolicyAbort), true, true}, {rollingParams("", deployapi.LifecycleHookFailurePolicyAbort), false, false}, } for i, tc := range cases { config := deploytest.OkDeploymentConfig(2) config.Spec.Strategy.RollingParams = tc.params deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(registered.GroupOrDie(kapi.GroupName).GroupVersions[0])) hookError = nil if tc.hookShouldFail { hookError = fmt.Errorf("hook failure") } strategy.out, strategy.errOut = &bytes.Buffer{}, &bytes.Buffer{} err := strategy.Deploy(nil, deployment, 2) if err != nil && tc.deploymentShouldFail { t.Logf("got expected error: %v", err) } if err == nil && tc.deploymentShouldFail { t.Errorf("%d: expected an error for case: %v", i, tc) } if err != nil && !tc.deploymentShouldFail { t.Errorf("%d: unexpected error for case: %v: %v", i, tc, err) } } }
// fakeMasterConfig creates a new fake master config with an empty kubelet config and dummy storage. func fakeMasterConfig() *MasterConfig { kubeInformerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 1*time.Second) informerFactory := shared.NewInformerFactory(kubeInformerFactory, fake.NewSimpleClientset(), testclient.NewSimpleFake(), shared.DefaultListerWatcherOverrides{}, 1*time.Second) return &MasterConfig{ KubeletClientConfig: &kubeletclient.KubeletClientConfig{}, RESTOptionsGetter: restoptions.NewSimpleGetter(&storagebackend.Config{ServerList: []string{"localhost"}}), Informers: informerFactory, ClusterQuotaMappingController: clusterquotamapping.NewClusterQuotaMappingController(kubeInformerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()), PrivilegedLoopbackKubernetesClientset: &kclientset.Clientset{}, } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } ep := &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Namespace: "nsA", Name: "ep", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}, Ports: []api.EndpointPort{{Name: "foo", Port: 80, Protocol: api.ProtocolTCP}}, }}, } client := fake.NewSimpleClientset(pv, claim, ep) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func TestDoNotDeleteMirrorPods(t *testing.T) { staticPod := getTestPod() staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} mirrorPod := getTestPod() mirrorPod.UID = "mirror-12345678" mirrorPod.Annotations = map[string]string{ kubetypes.ConfigSourceAnnotationKey: "api", kubetypes.ConfigMirrorAnnotationKey: "mirror", } // Set the deletion timestamp. mirrorPod.DeletionTimestamp = new(unversioned.Time) client := fake.NewSimpleClientset(mirrorPod) m := newTestManager(client) m.podManager.AddPod(staticPod) m.podManager.AddPod(mirrorPod) // Verify setup. assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod") assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod") assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID) status := getRandomPodStatus() now := unversioned.Now() status.StartTime = &now m.SetPodStatus(staticPod, status) m.testSyncBatch() // Expect not to see an delete action. verifyActions(t, m.kubeClient, []core.Action{ core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}}, core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}}, }) }
func TestRecreate_initialDeployment(t *testing.T) { var deployment *kapi.ReplicationController scaler := &cmdtest.FakeScaler{} strategy := &RecreateDeploymentStrategy{ out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, decoder: kapi.Codecs.UniversalDecoder(), retryTimeout: 1 * time.Second, retryPeriod: 1 * time.Millisecond, getUpdateAcceptor: getUpdateAcceptor, scaler: scaler, eventClient: fake.NewSimpleClientset().Core(), } config := deploytest.OkDeploymentConfig(1) config.Spec.Strategy = recreateParams(30, "", "", "") deployment, _ = deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(registered.GroupOrDie(kapi.GroupName).GroupVersions[0])) strategy.rcClient = &fakeControllerClient{deployment: deployment} err := strategy.Deploy(nil, deployment, 3) if err != nil { t.Fatalf("unexpected deploy error: %#v", err) } if e, a := 1, len(scaler.Events); e != a { t.Fatalf("expected %d scale calls, got %d", e, a) } if e, a := uint(3), scaler.Events[0].Size; e != a { t.Errorf("expected scale up to %d, got %d", e, a) } }
func TestLimitRangerCacheAndLRUExpiredMisses(t *testing.T) { liveLookupCache, err := lru.New(10000) if err != nil { t.Fatal(err) } limitRange := validLimitRangeNoDefaults() client := fake.NewSimpleClientset(&limitRange) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) handler := &limitRanger{ Handler: admission.NewHandler(admission.Create, admission.Update), client: client, actions: &DefaultLimitRangerActions{}, indexer: indexer, liveLookupCache: liveLookupCache, } testPod := validPod("testPod", 1, api.ResourceRequirements{}) // add to the lru cache liveLookupCache.Add(limitRange.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(-30 * time.Second)), items: []*api.LimitRange{}}) err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, nil)) if err == nil { t.Errorf("Expected an error since the pod did not specify resource limits in its update call") } err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, nil)) if err != nil { t.Errorf("Should have ignored calls to any subresource of pod %v", err) } }
func TestCasting(t *testing.T) { clientset := fake.NewSimpleClientset() binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second) pv := &api.PersistentVolume{} unk := cache.DeletedFinalStateUnknown{} pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{Name: "foo"}, Status: api.PersistentVolumeClaimStatus{Phase: api.ClaimBound}, } // Inject mockClient into the binder. This prevents weird errors on stderr // as the binder wants to load PV/PVC from API server. mockClient := &mockBinderClient{ volume: pv, claim: pvc, } binder.client = mockClient // none of these should fail casting. // the real test is not failing when passed DeletedFinalStateUnknown in the deleteHandler binder.addVolume(pv) binder.updateVolume(pv, pv) binder.deleteVolume(pv) binder.deleteVolume(unk) binder.addClaim(pvc) binder.updateClaim(pvc, pvc) }
func TestRepairEmpty(t *testing.T) { _, cidr, _ := net.ParseCIDR("192.168.1.0/24") previous := ipallocator.NewCIDRRange(cidr) previous.Allocate(net.ParseIP("192.168.1.10")) var dst api.RangeAllocation err := previous.Snapshot(&dst) if err != nil { t.Fatal(err) } fakeClient := fake.NewSimpleClientset() ipregistry := &mockRangeRegistry{ item: &api.RangeAllocation{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", }, Range: dst.Range, Data: dst.Data, }, } r := NewRepair(0, fakeClient.Core(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) } after := ipallocator.NewCIDRRange(cidr) if err := after.Restore(cidr, ipregistry.updated.Data); err != nil { t.Fatal(err) } if after.Has(net.ParseIP("192.168.1.10")) { t.Errorf("unexpected ipallocator state: %#v", after) } }
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission. func TestAdmitExceedQuotaLimit(t *testing.T) { resourceQuota := &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("3"), api.ResourceMemory: resource.MustParse("100Gi"), api.ResourcePods: resource.MustParse("5"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("1"), api.ResourceMemory: resource.MustParse("50Gi"), api.ResourcePods: resource.MustParse("3"), }, }, } kubeClient := fake.NewSimpleClientset(resourceQuota) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient)) evaluator.indexer = indexer evaluator.Run(5) handler := "aAdmission{ Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } indexer.Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil)) if err == nil { t.Errorf("Expected an error exceeding quota") } }
func TestDeployAppContainerCommands(t *testing.T) { command := "foo-command" commandArgs := "foo-command-args" spec := &AppDeploymentSpec{ Namespace: "foo-namespace", Name: "foo-name", ContainerCommand: &command, ContainerCommandArgs: &commandArgs, } testClient := fake.NewSimpleClientset() DeployApp(spec, testClient) createAction := testClient.Actions()[0].(core.CreateActionImpl) rc := createAction.GetObject().(*extensions.Deployment) container := rc.Spec.Template.Spec.Containers[0] if container.Command[0] != command { t.Errorf("Expected command to be %#v but got %#v", command, container.Command) } if container.Args[0] != commandArgs { t.Errorf("Expected command args to be %#v but got %#v", commandArgs, container.Args) } }
func TestDeployWithResourceRequirements(t *testing.T) { cpuRequirement := resource.Quantity{} memoryRequirement := resource.Quantity{} spec := &AppDeploymentSpec{ Namespace: "foo-namespace", Name: "foo-name", CpuRequirement: &cpuRequirement, MemoryRequirement: &memoryRequirement, } expectedResources := api.ResourceRequirements{ Requests: map[api.ResourceName]resource.Quantity{ api.ResourceMemory: memoryRequirement, api.ResourceCPU: cpuRequirement, }, } testClient := fake.NewSimpleClientset() DeployApp(spec, testClient) createAction := testClient.Actions()[0].(core.CreateActionImpl) rc := createAction.GetObject().(*extensions.Deployment) container := rc.Spec.Template.Spec.Containers[0] if !reflect.DeepEqual(container.Resources, expectedResources) { t.Errorf("Expected resource requirements to be %#v but got %#v", expectedResources, container.Resources) } }
func testPSPAdmit(testCaseName string, psps []*extensions.PodSecurityPolicy, pod *kapi.Pod, shouldPass bool, expectedPSP string, t *testing.T) { namespace := createNamespaceForTest() serviceAccount := createSAForTest() tc := clientsetfake.NewSimpleClientset(namespace, serviceAccount) store := cache.NewStore(cache.MetaNamespaceKeyFunc) for _, psp := range psps { store.Add(psp) } plugin := NewTestAdmission(store, tc) attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "namespace", "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, &user.DefaultInfo{}) err := plugin.Admit(attrs) if shouldPass && err != nil { t.Errorf("%s expected no errors but received %v", testCaseName, err) } if shouldPass && err == nil { if pod.Annotations[psputil.ValidatedPSPAnnotation] != expectedPSP { t.Errorf("%s expected to validate under %s but found %s", testCaseName, expectedPSP, pod.Annotations[psputil.ValidatedPSPAnnotation]) } } if !shouldPass && err == nil { t.Errorf("%s expected errors but received none", testCaseName) } }
// TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource // guaranteed pod. func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { resourceQuota := &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, Spec: api.ResourceQuotaSpec{ Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, }, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourcePods: resource.MustParse("5"), }, Used: api.ResourceList{ api.ResourcePods: resource.MustParse("3"), }, }, } kubeClient := fake.NewSimpleClientset(resourceQuota) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) handler := "aAdmission{ Handler: admission.NewHandler(admission.Create, admission.Update), client: kubeClient, indexer: indexer, registry: install.NewRegistry(kubeClient), } handler.indexer.Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } if len(kubeClient.Actions()) != 0 { t.Errorf("Expected no client actions because the incoming pod did not match best effort quota") } }
func TestLimitRangerIgnoresSubresource(t *testing.T) { client := fake.NewSimpleClientset() indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) handler := &limitRanger{ Handler: admission.NewHandler(admission.Create, admission.Update), client: client, actions: &DefaultLimitRangerActions{}, indexer: indexer, } limitRange := validLimitRangeNoDefaults() testPod := validPod("testPod", 1, api.ResourceRequirements{}) indexer.Add(&limitRange) err := handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, nil)) if err == nil { t.Errorf("Expected an error since the pod did not specify resource limits in its update call") } err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, nil)) if err != nil { t.Errorf("Should have ignored calls to any subresource of pod %v", err) } }
func TestGetDaemonSetPodsEvents(t *testing.T) { ds := createDaemonSet("ds-1", "test-namespace", map[string]string{"app": "test"}) cases := []struct { namespace, name string eventList *api.EventList podList *api.PodList daemonSet *extensions.DaemonSet expectedActions []string expected []api.Event }{ { "test-namespace", "ds-1", &api.EventList{Items: []api.Event{ {Message: "test-message", ObjectMeta: api.ObjectMeta{ Name: "ev-1", Namespace: "test-namespace", }}, }}, &api.PodList{Items: []api.Pod{ {ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: "test-namespace", Labels: map[string]string{"app": "test"}, }}, }}, &ds, []string{"get", "list", "list"}, []api.Event{ {Message: "test-message", ObjectMeta: api.ObjectMeta{ Name: "ev-1", Namespace: "test-namespace", }}, }, }, } for _, c := range cases { fakeClient := fake.NewSimpleClientset(c.daemonSet, c.podList, c.eventList) actual, _ := GetDaemonSetPodsEvents(fakeClient, c.namespace, c.name) actions := fakeClient.Actions() if len(actions) != len(c.expectedActions) { t.Errorf("Unexpected actions: %v, expected %d actions got %d", actions, len(c.expectedActions), len(actions)) continue } for i, verb := range c.expectedActions { if actions[i].GetVerb() != verb { t.Errorf("Unexpected action: %+v, expected %s", actions[i], verb) } } if !reflect.DeepEqual(actual, c.expected) { t.Errorf("GetDaemonSetPodsEvents(client,%#v, %#v) == \ngot: %#v, \nexpected %#v", c.namespace, c.name, actual, c.expected) } } }
func TestRecreate_deploymentMidHookFail(t *testing.T) { config := deploytest.OkDeploymentConfig(1) config.Spec.Strategy = recreateParams(30, "", deployapi.LifecycleHookFailurePolicyAbort, "") deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion)) scaler := &cmdtest.FakeScaler{} strategy := &RecreateDeploymentStrategy{ out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, decoder: kapi.Codecs.UniversalDecoder(), retryTimeout: 1 * time.Second, retryPeriod: 1 * time.Millisecond, rcClient: &fakeControllerClient{deployment: deployment}, eventClient: fake.NewSimpleClientset().Core(), getUpdateAcceptor: getUpdateAcceptor, hookExecutor: &hookExecutorImpl{ executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error { return fmt.Errorf("hook execution failure") }, }, scaler: scaler, } err := strategy.Deploy(nil, deployment, 2) if err == nil { t.Fatalf("expected a deploy error") } if len(scaler.Events) > 0 { t.Fatalf("unexpected scaling events: %v", scaler.Events) } }
func TestPlugin(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid") testVolumeName = "test_volume_name" testNamespace = "test_configmap_namespace" testName = "test_configmap_name" volumeSpec = volumeSpec(testVolumeName, testName, 0644) configMap = configMap(testNamespace, testName) client = fake.NewSimpleClientset(&configMap) pluginMgr = volume.VolumePluginMgr{} tempDir, host = newTestHost(t, client) ) defer os.RemoveAll(tempDir) pluginMgr.InitPlugins(ProbeVolumePlugins(), host) plugin, err := pluginMgr.FindPluginByName(configMapPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec)) if err != nil { t.Errorf("Failed to GetVolumeName: %v", err) } if vName != "test_volume_name/test_configmap_name" { t.Errorf("Got unexpect VolumeName %v", vName) } volumePath := mounter.GetPath() if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) { t.Errorf("Got unexpected path: %s", volumePath) } fsGroup := int64(1001) err = mounter.SetUp(&fsGroup) if err != nil { t.Errorf("Failed to setup volume: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } doTestConfigMapDataInVolume(volumePath, configMap, t) doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) }
func TestRecreate_deploymentPreHookSuccess(t *testing.T) { config := deploytest.OkDeploymentConfig(1) config.Spec.Strategy = recreateParams(30, deployapi.LifecycleHookFailurePolicyAbort, "", "") deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(registered.GroupOrDie(kapi.GroupName).GroupVersions[0])) scaler := &cmdtest.FakeScaler{} hookExecuted := false strategy := &RecreateDeploymentStrategy{ out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, decoder: kapi.Codecs.UniversalDecoder(), retryTimeout: 1 * time.Second, retryPeriod: 1 * time.Millisecond, getUpdateAcceptor: getUpdateAcceptor, eventClient: fake.NewSimpleClientset().Core(), rcClient: &fakeControllerClient{deployment: deployment}, hookExecutor: &hookExecutorImpl{ executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error { hookExecuted = true return nil }, }, scaler: scaler, } err := strategy.Deploy(nil, deployment, 2) if err != nil { t.Fatalf("unexpected deploy error: %#v", err) } if !hookExecuted { t.Fatalf("expected hook execution") } }
func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *fake.Clientset, *watch.FakeWatcher, *ServiceServingCertController) { certDir, err := ioutil.TempDir("", "serving-cert-unit-") if err != nil { t.Fatalf("unexpected error: %v", err) } caInfo := admin.DefaultServiceSignerCAInfo(certDir) caOptions := admin.CreateSignerCertOptions{ CertFile: caInfo.CertFile, KeyFile: caInfo.KeyFile, Name: admin.DefaultServiceServingCertSignerName(), Output: ioutil.Discard, } ca, err := caOptions.CreateSignerCert() if err != nil { t.Fatalf("unexpected error: %v", err) } kubeclient := fake.NewSimpleClientset(startingObjects...) fakeWatch := watch.NewFake() kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, action.(core.CreateAction).GetObject(), nil }) kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, action.(core.UpdateAction).GetObject(), nil }) kubeclient.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) controller := NewServiceServingCertController(kubeclient.Core(), kubeclient.Core(), ca, "cluster.local", 10*time.Minute) return caOptions.Name, kubeclient, fakeWatch, controller }
func TestSAR(t *testing.T) { store := projectcache.NewCacheStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc)) mockClient := &testclient.Fake{} mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { return true, nil, fmt.Errorf("shouldn't get here") }) cache := projectcache.NewFake(mockClient.Namespaces(), store, "") mockClientset := clientsetfake.NewSimpleClientset() handler := &lifecycle{client: mockClientset, creatableResources: recommendedCreatableResources} handler.SetProjectCache(cache) tests := map[string]struct { kind string resource string }{ "subject access review": { kind: "SubjectAccessReview", resource: "subjectaccessreviews", }, "local subject access review": { kind: "LocalSubjectAccessReview", resource: "localsubjectaccessreviews", }, } for k, v := range tests { err := handler.Admit(admission.NewAttributesRecord(nil, nil, kapi.Kind(v.kind).WithVersion("v1"), "foo", "name", kapi.Resource(v.resource).WithVersion("v1"), "", "CREATE", nil)) if err != nil { t.Errorf("Unexpected error for %s returned from admission handler: %v", k, err) } } }
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources // It verifies that creation of a pod that would have exceeded quota is properly failed // It verifies that create operations to a subresource that would have exceeded quota would succeed func TestAdmissionIgnoresSubresources(t *testing.T) { resourceQuota := &api.ResourceQuota{} resourceQuota.Name = "quota" resourceQuota.Namespace = "test" resourceQuota.Status = api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, } resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi") resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi") kubeClient := fake.NewSimpleClientset(resourceQuota) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) handler := "aAdmission{ Handler: admission.NewHandler(admission.Create, admission.Update), client: kubeClient, indexer: indexer, registry: install.NewRegistry(kubeClient), } handler.indexer.Add(resourceQuota) newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil)) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } err = handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "subresource", admission.Create, nil)) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } }
func TestStaticPodStatus(t *testing.T) { staticPod := getTestPod() staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} mirrorPod := getTestPod() mirrorPod.UID = "mirror-12345678" mirrorPod.Annotations = map[string]string{ kubetypes.ConfigSourceAnnotationKey: "api", kubetypes.ConfigMirrorAnnotationKey: "mirror", } client := fake.NewSimpleClientset(mirrorPod) m := newTestManager(client) m.podManager.AddPod(staticPod) m.podManager.AddPod(mirrorPod) // Verify setup. assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod") assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod") assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID) status := getRandomPodStatus() now := unversioned.Now() status.StartTime = &now m.SetPodStatus(staticPod, status) retrievedStatus := expectPodStatus(t, m, staticPod) normalizeStatus(&status) assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID) assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) // Should translate mirrorPod / staticPod UID. m.testSyncBatch() verifyActions(t, m.kubeClient, []core.Action{ core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}}, core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}}, }) updateAction := client.Actions()[1].(core.UpdateActionImpl) updatedPod := updateAction.Object.(*api.Pod) assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID) assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status) client.ClearActions() // No changes. m.testSyncBatch() verifyActions(t, m.kubeClient, []core.Action{}) // Mirror pod identity changes. m.podManager.DeletePod(mirrorPod) mirrorPod.UID = "new-mirror-pod" mirrorPod.Status = api.PodStatus{} m.podManager.AddPod(mirrorPod) // Expect update to new mirrorPod. m.testSyncBatch() verifyActions(t, m.kubeClient, []core.Action{ core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}}, core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}}, }) updateAction = client.Actions()[1].(core.UpdateActionImpl) updatedPod = updateAction.Object.(*api.Pod) assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID) assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status) }
func (c *fakePodClient) Pods(ns string) kcoreclient.PodInterface { deployerPod := &kapi.Pod{} deployerPod.Name = c.deployerName deployerPod.Namespace = ns deployerPod.Status = kapi.PodStatus{} return fake.NewSimpleClientset(deployerPod).Core().Pods(ns) }
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources // It verifies that creation of a pod that would have exceeded quota is properly failed // It verifies that create operations to a subresource that would have exceeded quota would succeed func TestAdmissionIgnoresSubresources(t *testing.T) { resourceQuota := &api.ResourceQuota{} resourceQuota.Name = "quota" resourceQuota.Namespace = "test" resourceQuota.Status = api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, } resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi") resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi") kubeClient := fake.NewSimpleClientset(resourceQuota) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) stopCh := make(chan struct{}) defer close(stopCh) quotaAccessor, _ := newQuotaAccessor(kubeClient) quotaAccessor.indexer = indexer go quotaAccessor.Run(stopCh) evaluator := NewQuotaEvaluator(quotaAccessor, install.NewRegistry(nil, nil), nil, 5, stopCh) handler := "aAdmission{ Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } indexer.Add(resourceQuota) newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil)) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, nil)) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } }
func TestSyncResourceQuotaNoChange(t *testing.T) { quota := api.ResourceQuota{ Spec: api.ResourceQuotaSpec{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, }, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), }, }, } kubeClient := fake.NewSimpleClientset(&api.PodList{}, "a) ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second)) err := ResourceQuotaController.syncResourceQuota(quota) if err != nil { t.Fatalf("Unexpected error %v", err) } actions := kubeClient.Actions() if len(actions) != 1 && !actions[0].Matches("list", "pods") { t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions) } }
// TestGetNodeAddresses verifies that proper results are returned // when requesting node addresses. func TestGetNodeAddresses(t *testing.T) { assert := assert.New(t) fakeNodeClient := fake.NewSimpleClientset(registrytest.MakeNodeList([]string{"node1", "node2"}, api.NodeResources{})).Core().Nodes() addressProvider := nodeAddressProvider{fakeNodeClient} // Fail case (no addresses associated with nodes) nodes, _ := fakeNodeClient.List(api.ListOptions{}) addrs, err := addressProvider.externalAddresses() assert.Error(err, "addresses should have caused an error as there are no addresses.") assert.Equal([]string(nil), addrs) // Pass case with External type IP nodes, _ = fakeNodeClient.List(api.ListOptions{}) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeExternalIP, Address: "127.0.0.1"}} fakeNodeClient.Update(&nodes.Items[index]) } addrs, err = addressProvider.externalAddresses() assert.NoError(err, "addresses should not have returned an error.") assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) // Pass case with LegacyHost type IP nodes, _ = fakeNodeClient.List(api.ListOptions{}) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.2"}} fakeNodeClient.Update(&nodes.Items[index]) } addrs, err = addressProvider.externalAddresses() assert.NoError(err, "addresses failback should not have returned an error.") assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs) }
// TestAdmitEnforceQuotaConstraints verifies that if a quota tracks a particular resource that that resource is // specified on the pod. In this case, we create a quota that tracks cpu request, memory request, and memory limit. // We ensure that a pod that does not specify a memory limit that it fails in admission. func TestAdmitEnforceQuotaConstraints(t *testing.T) { resourceQuota := &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("3"), api.ResourceMemory: resource.MustParse("100Gi"), api.ResourceLimitsMemory: resource.MustParse("200Gi"), api.ResourcePods: resource.MustParse("5"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("1"), api.ResourceMemory: resource.MustParse("50Gi"), api.ResourceLimitsMemory: resource.MustParse("100Gi"), api.ResourcePods: resource.MustParse("3"), }, }, } kubeClient := fake.NewSimpleClientset(resourceQuota) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) handler := "aAdmission{ Handler: admission.NewHandler(admission.Create, admission.Update), client: kubeClient, indexer: indexer, registry: install.NewRegistry(kubeClient), } handler.indexer.Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil)) if err == nil { t.Errorf("Expected an error because the pod does not specify a memory limit") } }