// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes // pods and the node when kubelet has not reported, and the cloudprovider says // the node is gone. func TestCloudProviderNoRateLimit(t *testing.T) { fnh := &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node0")}}), deleteWaitChan: make(chan struct{}), } nodeController, _ := NewNodeController(nil, fnh, 10*time.Minute, testRateLimiterQPS, testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.cloud = &fakecloud.FakeCloud{} nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) { return false, nil } // monitorNodeStatus should allow this node to be immediately deleted if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } select { case <-fnh.deleteWaitChan: case <-time.After(wait.ForeverTestTimeout): t.Errorf("Timed out waiting %v for node to be deleted", wait.ForeverTestTimeout) } if len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != "node0" { t.Errorf("Node was not deleted") } if nodeOnQueue := nodeController.zonePodEvictor[""].Remove("node0"); nodeOnQueue { t.Errorf("Node was queued for eviction. Should have been immediately deleted.") } }
func TestSortStatusTags(t *testing.T) { tests := []struct { name string tags map[string]TagEventList expected []string }{ { name: "all timestamps here", tags: map[string]TagEventList{ "other": { Items: []TagEvent{ { DockerImageReference: "other-ref", Created: unversioned.Date(2015, 9, 4, 13, 52, 0, 0, time.UTC), Image: "other-image", }, }, }, "latest": { Items: []TagEvent{ { DockerImageReference: "latest-ref", Created: unversioned.Date(2015, 9, 4, 13, 53, 0, 0, time.UTC), Image: "latest-image", }, }, }, "third": { Items: []TagEvent{ { DockerImageReference: "third-ref", Created: unversioned.Date(2015, 9, 4, 13, 54, 0, 0, time.UTC), Image: "third-image", }, }, }, }, expected: []string{"third", "latest", "other"}, }, } for _, test := range tests { got := SortStatusTags(test.tags) if !reflect.DeepEqual(test.expected, got) { t.Errorf("%s: tags mismatch: expected %v, got %v", test.name, test.expected, got) } } }
func TestOverlappingRCs(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) for i := 0; i < 5; i++ { manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) manager.podStoreSynced = alwaysReady // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store var controllers []*api.ReplicationController for j := 1; j < 10; j++ { controllerSpec := newReplicationController(1) controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) controllerSpec.Name = string(util.NewUUID()) controllers = append(controllers, controllerSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rcStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest rc is synced pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod") rcKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } } }
func TestLimitedLogAndRetryFinish(t *testing.T) { updater := &buildUpdater{} err := errors.New("funky error") now := unversioned.Now() retry := controller.Retry{ Count: 0, StartTimestamp: unversioned.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-31, now.Second(), now.Nanosecond(), now.Location()), } if limitedLogAndRetry(updater, 30*time.Minute)(&buildapi.Build{Status: buildapi.BuildStatus{Phase: buildapi.BuildPhaseNew}}, err, retry) { t.Error("Expected no more retries after reaching timeout!") } if updater.Build == nil { t.Fatal("BuildUpdater wasn't called!") } if updater.Build.Status.Phase != buildapi.BuildPhaseFailed { t.Errorf("Expected status %s, got %s!", buildapi.BuildPhaseFailed, updater.Build.Status.Phase) } if !strings.Contains(updater.Build.Status.Message, err.Error()) { t.Errorf("Expected message to contain %v, got %s!", err.Error(), updater.Build.Status.Message) } if updater.Build.Status.CompletionTimestamp == nil { t.Error("Expected CompletionTimestamp to be set!") } }
func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *api.PodList { pods := []api.Pod{} for i := 0; i < count; i++ { newPod := api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("pod-%d", i+1), Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC), Labels: labels, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, } pods = append(pods, newPod) } if isUnready > -1 && isUnready < count { pods[isUnready].Status.Conditions[0].Status = api.ConditionFalse } if isUnhealthy > -1 && isUnhealthy < count { pods[isUnhealthy].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 5}} } return &api.PodList{ Items: pods, } }
func TestOverlappingRSs(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) labelMap := map[string]string{"foo": "bar"} for i := 0; i < 5; i++ { manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0) manager.podStoreSynced = alwaysReady // Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store var controllers []*extensions.ReplicaSet for j := 1; j < 10; j++ { rsSpec := newReplicaSet(1, labelMap) rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) rsSpec.Name = string(uuid.NewUUID()) controllers = append(controllers, rsSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rsStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest ReplicaSet is synced pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod") rsKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } } }
func TestUnstructuredSetters(t *testing.T) { unstruct := runtime.Unstructured{} want := runtime.Unstructured{ Object: map[string]interface{}{ "kind": "test_kind", "apiVersion": "test_version", "metadata": map[string]interface{}{ "name": "test_name", "namespace": "test_namespace", "generateName": "test_generateName", "uid": "test_uid", "resourceVersion": "test_resourceVersion", "selfLink": "test_selfLink", "creationTimestamp": "2009-11-10T23:00:00Z", "deletionTimestamp": "2010-11-10T23:00:00Z", "labels": map[string]interface{}{ "test_label": "test_value", }, "annotations": map[string]interface{}{ "test_annotation": "test_value", }, }, }, } unstruct.SetAPIVersion("test_version") unstruct.SetKind("test_kind") unstruct.SetNamespace("test_namespace") unstruct.SetName("test_name") unstruct.SetGenerateName("test_generateName") unstruct.SetUID(types.UID("test_uid")) unstruct.SetResourceVersion("test_resourceVersion") unstruct.SetSelfLink("test_selfLink") unstruct.SetCreationTimestamp(unversioned.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)) date := unversioned.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) unstruct.SetDeletionTimestamp(&date) unstruct.SetLabels(map[string]string{"test_label": "test_value"}) unstruct.SetAnnotations(map[string]string{"test_annotation": "test_value"}) if !reflect.DeepEqual(unstruct, want) { t.Errorf("Wanted: \n%s\n Got:\n%s", unstruct, want) } }
func TestGetImageStreamTagDIR(t *testing.T) { expDockerImageReference := "foo/bar/baz:latest" image := &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "10"}, DockerImageReference: "foo/bar/baz:different"} repo := &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ Namespace: "default", Name: "test", }, Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": { Items: []api.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: expDockerImageReference, Image: "10", }, }, }, }, }, } fakeEtcdClient, _, storage := setup(t) fakeEtcdClient.Data[etcdtest.AddPrefix("/images/"+image.Name)] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, image), ModifiedIndex: 1, }, }, } fakeEtcdClient.Data[etcdtest.AddPrefix("/imagestreams/default/test")] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, repo), ModifiedIndex: 1, }, }, } obj, err := storage.Get(kapi.NewDefaultContext(), "test:latest") if err != nil { t.Fatalf("Unexpected error: %v", err) } actual := obj.(*api.ImageStreamTag) if actual.Image.DockerImageReference != expDockerImageReference { t.Errorf("Different DockerImageReference: expected %s, got %s", expDockerImageReference, actual.Image.DockerImageReference) } }
func TestLimitedLogAndRetryProcessing(t *testing.T) { updater := &buildUpdater{} err := errors.New("funky error") now := unversioned.Now() retry := controller.Retry{ Count: 0, StartTimestamp: unversioned.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-10, now.Second(), now.Nanosecond(), now.Location()), } if !limitedLogAndRetry(updater, 30*time.Minute)(&buildapi.Build{Status: buildapi.BuildStatus{Phase: buildapi.BuildPhaseNew}}, err, retry) { t.Error("Expected more retries!") } if updater.Build != nil { t.Fatal("BuildUpdater shouldn't be called!") } }
func TestGetImageStreamTagDIR(t *testing.T) { expDockerImageReference := "foo/bar/baz:latest" image := &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "10"}, DockerImageReference: "foo/bar/baz:different"} repo := &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ Namespace: "default", Name: "test", }, Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": { Items: []api.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: expDockerImageReference, Image: "10", }, }, }, }, }, } client, server, storage := setup(t) defer server.Terminate(t) client.Create( context.TODO(), etcdtest.AddPrefix("/images/"+image.Name), runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), image), ) client.Create( context.TODO(), etcdtest.AddPrefix("/imagestreams/default/test"), runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), repo), ) obj, err := storage.Get(kapi.NewDefaultContext(), "test:latest") if err != nil { t.Fatalf("Unexpected error: %v", err) } actual := obj.(*api.ImageStreamTag) if actual.Image.DockerImageReference != expDockerImageReference { t.Errorf("Different DockerImageReference: expected %s, got %s", expDockerImageReference, actual.Image.DockerImageReference) } }
func TestOverlappingRCs(t *testing.T) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeResponse := serverResponse{ statusCode: 200, obj: &api.ReplicationController{}, } testServer, _ := makeTestServer(t, api.NamespaceDefault, api.TenantDefault, fakeResponse) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) for i := 0; i < 5; i++ { manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store var controllers []*api.ReplicationController for j := 1; j < 10; j++ { controllerSpec := newReplicationController(1) controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) controllerSpec.Name = string(util.NewUUID()) controllers = append(controllers, controllerSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rcStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest rc is synced pods := newPodList(nil, 1, api.PodPending, controllers[0]) rcKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } } }
func TestUpdateExistingNodeStatus(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ { ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeOutOfDisk, Status: api.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: api.NodeMemoryPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: api.NodeDiskPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: api.NodeReady, Status: api.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, }, }, }}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 20E9, } mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) // Make kubelet report that it is out of disk space. if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil { t.Fatalf("can't update disk space manager: %v", err) } expectedNode := &api.Node{ ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeOutOfDisk, Status: api.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: unversioned.Time{}, // placeholder LastTransitionTime: unversioned.Time{}, // placeholder }, { Type: api.NodeMemoryPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeDiskPressure, Status: api.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, }, { Type: api.NodeReady, Status: api.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: unversioned.Time{}, // placeholder LastTransitionTime: unversioned.Time{}, // placeholder }, }, NodeInfo: api.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), }, Addresses: []api.NodeAddress{ {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, {Type: api.NodeInternalIP, Address: "127.0.0.1"}, {Type: api.NodeHostName, Address: testKubeletHostname}, }, // images will be sorted from max to min in node status. Images: []api.ContainerImage{ { Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, }, { Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, SizeBytes: 123, }, }, }, } kubelet.updateRuntimeUp() if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } actions := kubeClient.Actions() if len(actions) != 2 { t.Errorf("unexpected actions: %v", actions) } updateAction, ok := actions[1].(core.UpdateAction) if !ok { t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) } updatedNode, ok := updateAction.GetObject().(*api.Node) if !ok { t.Errorf("unexpected object type") } for i, cond := range updatedNode.Status.Conditions { // Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same. if old := unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) { t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, unversioned.Now(), old) } if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) { t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got) } updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } if !api.Semantic.DeepEqual(expectedNode, updatedNode) { t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) } }
func TestGetImageStreamTag(t *testing.T) { tests := map[string]struct { image *api.Image repo *api.ImageStream expectError bool errorTargetKind string errorTargetID string }{ "happy path": { image: &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "10"}, DockerImageReference: "foo/bar/baz"}, repo: &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: api.ImageStreamSpec{ Tags: map[string]api.TagReference{ "latest": { Annotations: map[string]string{ "color": "blue", "size": "large", }, }, }, }, Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": { Items: []api.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: "test", Image: "10", }, }, }, }, }, }, }, "image = ''": { repo: &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test"}, Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": {Items: []api.TagEvent{{DockerImageReference: "test", Image: ""}}}, }, }}, expectError: true, errorTargetKind: "imagestreamtags", errorTargetID: "test:latest", }, "missing image": { repo: &api.ImageStream{Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": {Items: []api.TagEvent{{DockerImageReference: "test", Image: "10"}}}, }, }}, expectError: true, errorTargetKind: "images", errorTargetID: "10", }, "missing repo": { expectError: true, errorTargetKind: "imagestreams", errorTargetID: "test", }, "missing tag": { image: &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "10"}, DockerImageReference: "foo/bar/baz"}, repo: &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test"}, Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "other": {Items: []api.TagEvent{{DockerImageReference: "test", Image: "10"}}}, }, }}, expectError: true, errorTargetKind: "imagestreamtags", errorTargetID: "test:latest", }, } for name, testCase := range tests { client, server, storage := setup(t) defer server.Terminate(t) if testCase.image != nil { client.Create( context.TODO(), etcdtest.AddPrefix("/images/"+testCase.image.Name), runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), testCase.image), ) } if testCase.repo != nil { client.Create( context.TODO(), etcdtest.AddPrefix("/imagestreams/default/test"), runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), testCase.repo), ) } obj, err := storage.Get(kapi.NewDefaultContext(), "test:latest") gotErr := err != nil if e, a := testCase.expectError, gotErr; e != a { t.Errorf("%s: Expected err=%v: got %v: %v", name, e, a, err) continue } if testCase.expectError { if !errors.IsNotFound(err) { t.Errorf("%s: unexpected error type: %v", name, err) continue } status := err.(statusError).Status() if status.Details.Kind != testCase.errorTargetKind || status.Details.Name != testCase.errorTargetID { t.Errorf("%s: unexpected status: %#v", name, status.Details) continue } } else { actual := obj.(*api.ImageStreamTag) if e, a := "default", actual.Namespace; e != a { t.Errorf("%s: namespace: expected %v, got %v", name, e, a) } if e, a := "test:latest", actual.Name; e != a { t.Errorf("%s: name: expected %v, got %v", name, e, a) } if e, a := map[string]string{"size": "large", "color": "blue"}, actual.Image.Annotations; !reflect.DeepEqual(e, a) { t.Errorf("%s: annotations: expected %v, got %v", name, e, a) } if e, a := unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), actual.CreationTimestamp; !a.Equal(e) { t.Errorf("%s: timestamp: expected %v, got %v", name, e, a) } } } }
func TestScale(t *testing.T) { newTimestamp := unversioned.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC) oldTimestamp := unversioned.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC) olderTimestamp := unversioned.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC) tests := []struct { name string deployment *exp.Deployment oldDeployment *exp.Deployment newRS *exp.ReplicaSet oldRSs []*exp.ReplicaSet expectedNew *exp.ReplicaSet expectedOld []*exp.ReplicaSet desiredReplicasAnnotations map[string]int32 }{ { name: "normal scaling event: 10 -> 12", deployment: newDeployment(12, nil), oldDeployment: newDeployment(10, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{}, expectedNew: rs("foo-v1", 12, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{}, }, { name: "normal scaling event: 10 -> 5", deployment: newDeployment(5, nil), oldDeployment: newDeployment(10, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{}, expectedNew: rs("foo-v1", 5, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{}, }, { name: "proportional scaling: 5 -> 10", deployment: newDeployment(10, nil), oldDeployment: newDeployment(5, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, }, { name: "proportional scaling: 5 -> 3", deployment: newDeployment(3, nil), oldDeployment: newDeployment(5, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 1, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, }, { name: "proportional scaling: 9 -> 4", deployment: newDeployment(4, nil), oldDeployment: newDeployment(9, nil), newRS: rs("foo-v2", 8, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, }, { name: "proportional scaling: 7 -> 10", deployment: newDeployment(10, nil), oldDeployment: newDeployment(7, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 3, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, }, { name: "proportional scaling: 13 -> 8", deployment: newDeployment(8, nil), oldDeployment: newDeployment(13, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales up the new replica set. { name: "leftover distribution: 3 -> 4", deployment: newDeployment(4, nil), oldDeployment: newDeployment(3, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, }, // Scales down the older replica set. { name: "leftover distribution: 3 -> 2", deployment: newDeployment(2, nil), oldDeployment: newDeployment(3, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up the latest replica set first. { name: "proportional scaling (no new rs): 4 -> 5", deployment: newDeployment(5, nil), oldDeployment: newDeployment(4, nil), newRS: nil, oldRSs: []*exp.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: nil, expectedOld: []*exp.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales down to zero { name: "proportional scaling: 6 -> 0", deployment: newDeployment(0, nil), oldDeployment: newDeployment(6, nil), newRS: rs("foo-v3", 3, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 0, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up from zero { name: "proportional scaling: 0 -> 6", deployment: newDeployment(6, nil), oldDeployment: newDeployment(0, nil), newRS: rs("foo-v3", 0, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 6, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 ) // Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to // update. { name: "failed rs update", deployment: newDeployment(5, nil), oldDeployment: newDeployment(5, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)}, }, { name: "deployment with surge pods", deployment: newDeploymentEnhanced(20, intstr.FromInt(2)), oldDeployment: newDeploymentEnhanced(10, intstr.FromInt(2)), newRS: rs("foo-v2", 6, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 11, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, }, { name: "change both surge and size", deployment: newDeploymentEnhanced(50, intstr.FromInt(6)), oldDeployment: newDeploymentEnhanced(10, intstr.FromInt(3)), newRS: rs("foo-v2", 5, nil, newTimestamp), oldRSs: []*exp.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 22, nil, newTimestamp), expectedOld: []*exp.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, }, } for _, test := range tests { _ = olderTimestamp t.Log(test.name) fake := fake.Clientset{} dc := &DeploymentController{ client: &fake, eventRecorder: &record.FakeRecorder{}, } if test.newRS != nil { desiredReplicas := test.oldDeployment.Spec.Replicas if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { desiredReplicas = desired } deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) } for i := range test.oldRSs { rs := test.oldRSs[i] if rs == nil { continue } desiredReplicas := test.oldDeployment.Spec.Replicas if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { desiredReplicas = desired } deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) } if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } if test.expectedNew != nil && test.newRS != nil && test.expectedNew.Spec.Replicas != test.newRS.Spec.Replicas { t.Errorf("%s: expected new replicas: %d, got: %d", test.name, test.expectedNew.Spec.Replicas, test.newRS.Spec.Replicas) continue } if len(test.expectedOld) != len(test.oldRSs) { t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) continue } for n := range test.oldRSs { rs := test.oldRSs[n] exp := test.expectedOld[n] if exp.Spec.Replicas != rs.Spec.Replicas { t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, exp.Spec.Replicas, rs.Spec.Replicas) } } } }
func TestUnstructuredSetters(t *testing.T) { unstruct := runtime.Unstructured{} trueVar := true want := runtime.Unstructured{ Object: map[string]interface{}{ "kind": "test_kind", "apiVersion": "test_version", "metadata": map[string]interface{}{ "name": "test_name", "namespace": "test_namespace", "generateName": "test_generateName", "uid": "test_uid", "resourceVersion": "test_resourceVersion", "selfLink": "test_selfLink", "creationTimestamp": "2009-11-10T23:00:00Z", "deletionTimestamp": "2010-11-10T23:00:00Z", "labels": map[string]interface{}{ "test_label": "test_value", }, "annotations": map[string]interface{}{ "test_annotation": "test_value", }, "ownerReferences": []map[string]interface{}{ { "kind": "Pod", "name": "poda", "apiVersion": "v1", "uid": "1", "controller": (*bool)(nil), }, { "kind": "Pod", "name": "podb", "apiVersion": "v1", "uid": "2", "controller": &trueVar, }, }, "finalizers": []interface{}{ "finalizer.1", "finalizer.2", }, "clusterName": "cluster123", }, }, } unstruct.SetAPIVersion("test_version") unstruct.SetKind("test_kind") unstruct.SetNamespace("test_namespace") unstruct.SetName("test_name") unstruct.SetGenerateName("test_generateName") unstruct.SetUID(types.UID("test_uid")) unstruct.SetResourceVersion("test_resourceVersion") unstruct.SetSelfLink("test_selfLink") unstruct.SetCreationTimestamp(unversioned.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)) date := unversioned.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) unstruct.SetDeletionTimestamp(&date) unstruct.SetLabels(map[string]string{"test_label": "test_value"}) unstruct.SetAnnotations(map[string]string{"test_annotation": "test_value"}) newOwnerReferences := []metatypes.OwnerReference{ { Kind: "Pod", Name: "poda", APIVersion: "v1", UID: "1", }, { Kind: "Pod", Name: "podb", APIVersion: "v1", UID: "2", Controller: &trueVar, }, } unstruct.SetOwnerReferences(newOwnerReferences) unstruct.SetFinalizers([]string{"finalizer.1", "finalizer.2"}) unstruct.SetClusterName("cluster123") if !reflect.DeepEqual(unstruct, want) { t.Errorf("Wanted: \n%s\n Got:\n%s", want, unstruct) } }
func TestMonitorNodeStatusUpdateStatus(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) table := []struct { fakeNodeHandler *FakeNodeHandler timeToPass time.Duration newNodeStatus api.NodeStatus expectedEvictPods bool expectedRequestCount int expectedNodes []*api.Node }{ // Node created long time ago, without status: // Expect Unknown status posted from node controller. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 2, // List+Update expectedNodes: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, Reason: "NodeStatusNeverUpdated", Message: "Kubelet never posted node status.", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, { Type: api.NodeOutOfDisk, Status: api.ConditionUnknown, Reason: "NodeStatusNeverUpdated", Message: "Kubelet never posted node status.", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, }, }, }, }, }, // Node created recently, without status. // Expect no action from node controller (within startup grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 1, // List expectedNodes: nil, }, // Node created long time ago, with status updated by kubelet exceeds grace period. // Expect Unknown status posted from node controller. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 3, // (List+)List+Update timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, expectedNodes: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, Reason: "NodeStatusUnknown", Message: "Kubelet stopped posting node status.", LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, { Type: api.NodeOutOfDisk, Status: api.ConditionUnknown, Reason: "NodeStatusUnknown", Message: "Kubelet stopped posting node status.", LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, }, // Node created long time ago, with status updated recently. // Expect no action from node controller (within monitor grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 1, // List expectedNodes: nil, }, } for i, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, util.NewFakeAlwaysRateLimiter(), util.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } } if item.expectedRequestCount != item.fakeNodeHandler.RequestCount { t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount) } if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) { t.Errorf("Case[%d] unexpected nodes: %s", i, util.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0])) } if len(item.fakeNodeHandler.UpdatedNodeStatuses) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodeStatuses) { t.Errorf("Case[%d] unexpected nodes: %s", i, util.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodeStatuses[0])) } } }
func TestNodeDeletion(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) fakeNodeHandler := &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}), } nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, util.NewFakeAlwaysRateLimiter(), util.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } fakeNodeHandler.Delete("node1", nil) if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { nodeController.deletePods(value.Value) return true, 0 }) podEvicted := false for _, action := range fakeNodeHandler.Actions() { if action.GetVerb() == "delete" && action.GetResource() == "pods" { podEvicted = true } } if !podEvicted { t.Error("expected pods to be evicted from the deleted node") } }
func TestMonitorNodeStatusEvictPods(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) evictionTimeout := 10 * time.Minute // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady // we need second healthy node in tests. Because of how the tests are written we need to update // the status of this Node. healthyNodeNewStatus := api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated, and is NotReady for 10min. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, } table := []struct { fakeNodeHandler *FakeNodeHandler daemonSets []extensions.DaemonSet timeToPass time.Duration newNodeStatus api.NodeStatus secondNodeNewStatus api.NodeStatus expectedEvictPods bool description string }{ // Node created recently, with no status (happens only at cluster startup). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 0, newNodeStatus: api.NodeStatus{}, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Node created recently, with no status.", }, // Node created long time ago, and kubelet posted NotReady for a short period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: evictionTimeout, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 10min. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Node created long time ago, and kubelet posted NotReady for a short period of time.", }, // Pod is ds-managed, and kubelet posted NotReady for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset( &api.PodList{ Items: []api.Pod{ { ObjectMeta: api.ObjectMeta{ Name: "pod0", Namespace: "default", Labels: map[string]string{"daemon": "yes"}, }, Spec: api.PodSpec{ NodeName: "node0", }, }, }, }, ), }, daemonSets: []extensions.DaemonSet{ { ObjectMeta: api.ObjectMeta{ Name: "ds0", Namespace: "default", }, Spec: extensions.DaemonSetSpec{ Selector: &unversioned.LabelSelector{ MatchLabels: map[string]string{"daemon": "yes"}, }, }, }, }, timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Pod is ds-managed, and kubelet posted NotReady for a long period of time.", }, // Node created long time ago, and kubelet posted NotReady for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: true, description: "Node created long time ago, and kubelet posted NotReady for a long period of time.", }, // Node created long time ago, node controller posted Unknown for a short period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: evictionTimeout - testNodeMonitorGracePeriod, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 10min ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "Node created long time ago, node controller posted Unknown for a short period of time.", }, // Node created long time ago, node controller posted Unknown for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: true, description: "Node created long time ago, node controller posted Unknown for a long period of time.", }, // NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region2", unversioned.LabelZoneFailureDomain: "zone2", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: false, description: "Network Segmentation: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.", }, // NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period // of on first Node, eviction should stop even though -master Node is healthy. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, { ObjectMeta: api.ObjectMeta{ Name: "node-master", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ unversioned.LabelZoneRegion: "region1", unversioned.LabelZoneFailureDomain: "zone1", }, }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, secondNodeNewStatus: healthyNodeNewStatus, expectedEvictPods: false, description: "NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period of on first Node, eviction should stop even though -master Node is healthy", }, } for _, item := range table { nodeController, _ := NewNodeController(nil, item.fakeNodeHandler, evictionTimeout, testRateLimiterQPS, testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } for _, ds := range item.daemonSets { nodeController.daemonSetStore.Add(&ds) } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } zones := getZones(item.fakeNodeHandler) for _, zone := range zones { nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) { remaining, _ := deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeController.daemonSetStore) if remaining { nodeController.zoneTerminationEvictor[zone].Add(value.Value) } return true, 0 }) nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) { terminatePods(item.fakeNodeHandler, nodeController.recorder, value.Value, value.AddedAt, nodeController.maximumGracePeriod) return true, 0 }) } podEvicted := false for _, action := range item.fakeNodeHandler.Actions() { if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { podEvicted = true } } if item.expectedEvictPods != podEvicted { t.Errorf("expected pod eviction: %+v, got %+v for %+v", item.expectedEvictPods, podEvicted, item.description) } } }
func TestGetFirstPod(t *testing.T) { labelSet := map[string]string{"test": "selector"} tests := []struct { name string podList *api.PodList watching []watch.Event sortBy func([]*api.Pod) sort.Interface expected *api.Pod expectedNum int expectedErr bool }{ { name: "kubectl logs - two ready pods", podList: newPodList(2, -1, -1, labelSet), sortBy: func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, expectedNum: 2, }, { name: "kubectl logs - one unhealthy, one healthy", podList: newPodList(2, -1, 1, labelSet), sortBy: func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-2", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}}, }, }, expectedNum: 2, }, { name: "kubectl attach - two ready pods", podList: newPodList(2, -1, -1, labelSet), sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, expectedNum: 2, }, { name: "kubectl attach - wait for ready pod", podList: newPodList(1, 1, -1, labelSet), watching: []watch.Event{ { Type: watch.Modified, Object: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, }, }, sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-1", Namespace: api.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, Status: api.PodStatus{ Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, }, expectedNum: 1, }, } for i := range tests { test := tests[i] fake := fake.NewSimpleClientset(test.podList) if len(test.watching) > 0 { watcher := watch.NewFake() for _, event := range test.watching { switch event.Type { case watch.Added: go watcher.Add(event.Object) case watch.Modified: go watcher.Modify(event.Object) } } fake.PrependWatchReactor("pods", testcore.DefaultWatchReactor(watcher, nil)) } selector := labels.Set(labelSet).AsSelector() pod, numPods, err := GetFirstPod(fake.Core(), api.NamespaceDefault, selector, 1*time.Minute, test.sortBy) if !test.expectedErr && err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } if test.expectedErr && err == nil { t.Errorf("%s: expected an error", test.name) continue } if test.expectedNum != numPods { t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods) continue } if !reflect.DeepEqual(test.expected, pod) { t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod) } } }
func TestFormatImageStreamTags(t *testing.T) { repo := imageapi.ImageStream{ Spec: imageapi.ImageStreamSpec{ Tags: map[string]imageapi.TagReference{ "spec1": { From: &kapi.ObjectReference{ Kind: "ImageStreamTag", Namespace: "foo", Name: "bar:latest", }, }, "spec2": { From: &kapi.ObjectReference{ Kind: "ImageStreamImage", Namespace: "mysql", Name: "latest@sha256:e52c6534db85036dabac5e71ff14e720db94def2d90f986f3548425ea27b3719", }, }, }, }, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ imageapi.DefaultImageTag: { Items: []imageapi.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: "registry:5000/foo/bar@sha256:4bd26aef1ce78b4f05ede83496276f11e3343441574ca1ce89dffd146c708c16", Image: "sha256:4bd26aef1ce78b4f05ede83496276f11e3343441574ca1ce89dffd146c708c16", }, { Created: unversioned.Date(2015, 3, 23, 7, 15, 0, 0, time.UTC), DockerImageReference: "registry:5000/foo/bar@sha256:062b80555a5dd7f5d58e78b266785a399277ff8c3e402ce5fa5d8571788e6bad", Image: "sha256:062b80555a5dd7f5d58e78b266785a399277ff8c3e402ce5fa5d8571788e6bad", }, }, }, "spec1": { Items: []imageapi.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: "registry:5000/foo/bar@sha256:4bd26aef1ce78b4f05ede83496276f11e3343441574ca1ce89dffd146c708c16", Image: "sha256:4bd26aef1ce78b4f05ede83496276f11e3343441574ca1ce89dffd146c708c16", }, }, }, "spec2": { Items: []imageapi.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: "mysql:latest", Image: "sha256:e52c6534db85036dabac5e71ff14e720db94def2d90f986f3548425ea27b3719", }, }, }, }, }, } out := new(tabwriter.Writer) b := make([]byte, 1024) buf := bytes.NewBuffer(b) out.Init(buf, 0, 8, 1, '\t', 0) formatImageStreamTags(out, &repo) out.Flush() actual := string(buf.String()) t.Logf("\n%s", actual) }
func mockStreams() []*imageapi.ImageStream { return []*imageapi.ImageStream{ { ObjectMeta: kapi.ObjectMeta{Name: "less-than-three-tags"}, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "other": { Items: []imageapi.TagEvent{ { DockerImageReference: "other-ref", Created: unversioned.Date(2015, 9, 4, 13, 52, 0, 0, time.UTC), Image: "other-image", }, }, }, "latest": { Items: []imageapi.TagEvent{ { DockerImageReference: "latest-ref", Created: unversioned.Date(2015, 9, 4, 13, 53, 0, 0, time.UTC), Image: "latest-image", }, }, }, }, }, }, { ObjectMeta: kapi.ObjectMeta{Name: "three-tags"}, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "other": { Items: []imageapi.TagEvent{ { DockerImageReference: "other-ref", Created: unversioned.Date(2015, 9, 4, 13, 52, 0, 0, time.UTC), Image: "other-image", }, }, }, "latest": { Items: []imageapi.TagEvent{ { DockerImageReference: "latest-ref", Created: unversioned.Date(2015, 9, 4, 13, 53, 0, 0, time.UTC), Image: "latest-image", }, }, }, "third": { Items: []imageapi.TagEvent{ { DockerImageReference: "third-ref", Created: unversioned.Date(2015, 9, 4, 13, 54, 0, 0, time.UTC), Image: "third-image", }, }, }, }, }, }, { ObjectMeta: kapi.ObjectMeta{Name: "more-than-three-tags"}, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "other": { Items: []imageapi.TagEvent{ { DockerImageReference: "other-ref", Created: unversioned.Date(2015, 9, 4, 13, 52, 0, 0, time.UTC), Image: "other-image", }, }, }, "latest": { Items: []imageapi.TagEvent{ { DockerImageReference: "latest-ref", Created: unversioned.Date(2015, 9, 4, 13, 53, 0, 0, time.UTC), Image: "latest-image", }, }, }, "third": { Items: []imageapi.TagEvent{ { DockerImageReference: "third-ref", Created: unversioned.Date(2015, 9, 4, 13, 54, 0, 0, time.UTC), Image: "third-image", }, }, }, "another": { Items: []imageapi.TagEvent{ { DockerImageReference: "another-ref", Created: unversioned.Date(2015, 9, 4, 13, 55, 0, 0, time.UTC), Image: "another-image", }, }, }, }, }, }, } }
func TestImageWithMetadata(t *testing.T) { tests := map[string]struct { image Image expectedImage Image expectError bool }{ "no manifest data": { image: Image{}, expectedImage: Image{}, }, "error unmarshalling manifest data": { image: Image{ DockerImageManifest: "{ no {{{ json here!!!", }, expectedImage: Image{}, expectError: true, }, "no history": { image: Image{ DockerImageManifest: `{"name": "library/ubuntu", "tag": "latest"}`, }, expectedImage: Image{}, }, "error unmarshalling v1 compat": { image: Image{ DockerImageManifest: `{"name": "library/ubuntu", "tag": "latest", "history": ["v1Compatibility": "{ not valid {{ json" }`, }, expectError: true, }, "happy path": { image: validImageWithManifestData(), expectedImage: Image{ ObjectMeta: kapi.ObjectMeta{ Name: "id", }, DockerImageManifest: "", DockerImageMetadata: DockerImage{ ID: "2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7", Parent: "117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c", Comment: "", Created: unversioned.Date(2015, 2, 21, 2, 11, 6, 735146646, time.UTC), Container: "c9a3eda5951d28aa8dbe5933be94c523790721e4f80886d0a8e7a710132a38ec", ContainerConfig: DockerConfig{ Hostname: "43bd710ec89a", Domainname: "", User: "", Memory: 0, MemorySwap: 0, CPUShares: 0, CPUSet: "", AttachStdin: false, AttachStdout: false, AttachStderr: false, PortSpecs: nil, ExposedPorts: nil, Tty: false, OpenStdin: false, StdinOnce: false, Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, Cmd: []string{"/bin/sh", "-c", "#(nop) CMD [/bin/bash]"}, Image: "117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c", Volumes: nil, WorkingDir: "", Entrypoint: nil, NetworkDisabled: false, SecurityOpts: nil, OnBuild: []string{}, }, DockerVersion: "1.4.1", Author: "", Config: &DockerConfig{ Hostname: "43bd710ec89a", Domainname: "", User: "", Memory: 0, MemorySwap: 0, CPUShares: 0, CPUSet: "", AttachStdin: false, AttachStdout: false, AttachStderr: false, PortSpecs: nil, ExposedPorts: nil, Tty: false, OpenStdin: false, StdinOnce: false, Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, Cmd: []string{"/bin/bash"}, Image: "117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c", Volumes: nil, WorkingDir: "", Entrypoint: nil, NetworkDisabled: false, OnBuild: []string{}, }, Architecture: "amd64", Size: 0, }, }, }, } for name, test := range tests { imageWithMetadata, err := ImageWithMetadata(test.image) gotError := err != nil if e, a := test.expectError, gotError; e != a { t.Fatalf("%s: expectError=%t, gotError=%t: %s", name, e, a, err) } if test.expectError { continue } if e, a := test.expectedImage, *imageWithMetadata; !kapi.Semantic.DeepEqual(e, a) { stringE := fmt.Sprintf("%#v", e) stringA := fmt.Sprintf("%#v", a) t.Errorf("%s: image: %s", name, util.StringDiff(stringE, stringA)) } } }
func TestDescribeBuildDuration(t *testing.T) { type testBuild struct { build *buildapi.Build output string } creation := unversioned.Date(2015, time.April, 9, 6, 0, 0, 0, time.Local) // now a minute ago minuteAgo := unversioned.Unix(unversioned.Now().Rfc3339Copy().Time.Unix()-60, 0) start := unversioned.Date(2015, time.April, 9, 6, 1, 0, 0, time.Local) completion := unversioned.Date(2015, time.April, 9, 6, 2, 0, 0, time.Local) duration := completion.Rfc3339Copy().Time.Sub(start.Rfc3339Copy().Time) zeroDuration := time.Duration(0) tests := []testBuild{ { // 0 - build new &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: minuteAgo}, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhaseNew, Duration: zeroDuration, }, }, "waiting for 1m0s", }, { // 1 - build pending &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: minuteAgo}, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhasePending, Duration: zeroDuration, }, }, "waiting for 1m0s", }, { // 2 - build running &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ StartTimestamp: &start, Phase: buildapi.BuildPhaseRunning, Duration: duration, }, }, "running for 1m0s", }, { // 3 - build completed &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ StartTimestamp: &start, CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseComplete, Duration: duration, }, }, "1m0s", }, { // 4 - build failed &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ StartTimestamp: &start, CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseFailed, Duration: duration, }, }, "1m0s", }, { // 5 - build error &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ StartTimestamp: &start, CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseError, Duration: duration, }, }, "1m0s", }, { // 6 - build cancelled before running, start time wasn't set yet &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseCancelled, Duration: duration, }, }, "waited for 2m0s", }, { // 7 - build cancelled while running, start time is set already &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ StartTimestamp: &start, CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseCancelled, Duration: duration, }, }, "1m0s", }, { // 8 - build failed before running, start time wasn't set yet &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseFailed, Duration: duration, }, }, "waited for 2m0s", }, { // 9 - build error before running, start time wasn't set yet &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{CreationTimestamp: creation}, Status: buildapi.BuildStatus{ CompletionTimestamp: &completion, Phase: buildapi.BuildPhaseError, Duration: duration, }, }, "waited for 2m0s", }, } for i, tc := range tests { if actual, expected := describeBuildDuration(tc.build), tc.output; actual != expected { t.Errorf("(%d) expected duration output %s, got %s", i, expected, actual) } } }
func TestImageWithMetadata(t *testing.T) { tests := map[string]struct { image Image expectedImage Image expectError bool }{ "no manifest data": { image: Image{}, expectedImage: Image{}, }, "error unmarshalling manifest data": { image: Image{ DockerImageManifest: "{ no {{{ json here!!!", }, expectedImage: Image{}, expectError: true, }, "no history": { image: Image{ DockerImageManifest: `{"name": "library/ubuntu", "tag": "latest"}`, }, expectedImage: Image{ DockerImageManifest: `{"name": "library/ubuntu", "tag": "latest"}`, }, }, "error unmarshalling v1 compat": { image: Image{ DockerImageManifest: "{\"name\": \"library/ubuntu\", \"tag\": \"latest\", \"history\": [\"v1Compatibility\": \"{ not valid {{ json\" }", }, expectError: true, }, "happy path": { image: validImageWithManifestData(), expectedImage: Image{ ObjectMeta: kapi.ObjectMeta{ Name: "id", }, DockerImageManifest: validImageWithManifestData().DockerImageManifest, DockerImageLayers: []ImageLayer{ {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Size: 0}, {Name: "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd", Size: 188097705}, {Name: "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171", Size: 194533}, {Name: "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0", Size: 1895}, {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Size: 0}, }, DockerImageMetadata: DockerImage{ ID: "2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7", Parent: "117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c", Comment: "", Created: unversioned.Date(2015, 2, 21, 2, 11, 6, 735146646, time.UTC), Container: "c9a3eda5951d28aa8dbe5933be94c523790721e4f80886d0a8e7a710132a38ec", ContainerConfig: DockerConfig{ Hostname: "43bd710ec89a", Domainname: "", User: "", Memory: 0, MemorySwap: 0, CPUShares: 0, CPUSet: "", AttachStdin: false, AttachStdout: false, AttachStderr: false, PortSpecs: nil, ExposedPorts: nil, Tty: false, OpenStdin: false, StdinOnce: false, Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, Cmd: []string{"/bin/sh", "-c", "#(nop) CMD [/bin/bash]"}, Image: "117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c", Volumes: nil, WorkingDir: "", Entrypoint: nil, NetworkDisabled: false, SecurityOpts: nil, OnBuild: []string{}, }, DockerVersion: "1.4.1", Author: "", Config: &DockerConfig{ Hostname: "43bd710ec89a", Domainname: "", User: "", Memory: 0, MemorySwap: 0, CPUShares: 0, CPUSet: "", AttachStdin: false, AttachStdout: false, AttachStderr: false, PortSpecs: nil, ExposedPorts: nil, Tty: false, OpenStdin: false, StdinOnce: false, Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, Cmd: []string{"/bin/bash"}, Image: "117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c", Volumes: nil, WorkingDir: "", Entrypoint: nil, NetworkDisabled: false, OnBuild: []string{}, }, Architecture: "amd64", Size: 188294133, }, }, }, } for name, test := range tests { imageWithMetadata := test.image err := ImageWithMetadata(&imageWithMetadata) gotError := err != nil if e, a := test.expectError, gotError; e != a { t.Fatalf("%s: expectError=%t, gotError=%t: %s", name, e, a, err) } if test.expectError { continue } if e, a := test.expectedImage, imageWithMetadata; !kapi.Semantic.DeepEqual(e, a) { t.Errorf("%s: image: %s", name, util.ObjectDiff(e, a)) } } }
func TestMonitorNodeStatusEvictPods(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) evictionTimeout := 10 * time.Minute table := []struct { fakeNodeHandler *FakeNodeHandler daemonSets []extensions.DaemonSet timeToPass time.Duration newNodeStatus api.NodeStatus expectedEvictPods bool description string }{ // Node created recently, with no status (happens only at cluster startup). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 0, newNodeStatus: api.NodeStatus{}, expectedEvictPods: false, description: "Node created recently, with no status.", }, // Node created long time ago, and kubelet posted NotReady for a short period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: evictionTimeout, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 10min. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: false, description: "Node created long time ago, and kubelet posted NotReady for a short period of time.", }, // Pod is ds-managed, and kubelet posted NotReady for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset( &api.PodList{ Items: []api.Pod{ { ObjectMeta: api.ObjectMeta{ Name: "pod0", Namespace: "default", Labels: map[string]string{"daemon": "yes"}, }, Spec: api.PodSpec{ NodeName: "node0", }, }, }, }, ), }, daemonSets: []extensions.DaemonSet{ { ObjectMeta: api.ObjectMeta{ Name: "ds0", Namespace: "default", }, Spec: extensions.DaemonSetSpec{ Selector: &unversioned.LabelSelector{ MatchLabels: map[string]string{"daemon": "yes"}, }, }, }, }, timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: false, description: "Pod is ds-managed, and kubelet posted NotReady for a long period of time.", }, // Node created long time ago, and kubelet posted NotReady for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionFalse, // Node status has just been updated, and is NotReady for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: true, description: "Node created long time ago, and kubelet posted NotReady for a long period of time.", }, // Node created long time ago, node controller posted Unknown for a short period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: evictionTimeout - testNodeMonitorGracePeriod, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 10min ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: false, description: "Node created long time ago, node controller posted Unknown for a short period of time.", }, // Node created long time ago, node controller posted Unknown for a long period of time. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, daemonSets: nil, timeToPass: 60 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, // Node status was updated by nodecontroller 1hr ago LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, }, expectedEvictPods: true, description: "Node created long time ago, node controller posted Unknown for a long period of time.", }, } for _, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, evictionTimeout, util.NewFakeAlwaysRateLimiter(), util.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false) nodeController.now = func() unversioned.Time { return fakeNow } for _, ds := range item.daemonSets { nodeController.daemonSetStore.Add(&ds) } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { remaining, _ := nodeController.deletePods(value.Value) if remaining { nodeController.terminationEvictor.Add(value.Value) } return true, 0 }) nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { nodeController.terminatePods(value.Value, value.AddedAt) return true, 0 }) podEvicted := false for _, action := range item.fakeNodeHandler.Actions() { if action.GetVerb() == "delete" && action.GetResource() == "pods" { podEvicted = true } } if item.expectedEvictPods != podEvicted { t.Errorf("expected pod eviction: %+v, got %+v for %+v", item.expectedEvictPods, podEvicted, item.description) } } }
func TestConvert(t *testing.T) { sinceSeconds := int64(123) sinceTime := unversioned.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC) tests := []struct { input interface{} expected url.Values }{ { input: &foo{ Str: "hello", }, expected: url.Values{"str": {"hello"}}, }, { input: &foo{ Str: "test string", Slice: []string{"one", "two", "three"}, Integer: 234, Boolean: true, }, expected: url.Values{"str": {"test string"}, "slice": {"one", "two", "three"}, "integer": {"234"}, "boolean": {"true"}}, }, { input: &foo{ Str: "named types", NamedStr: "value1", NamedBool: true, }, expected: url.Values{"str": {"named types"}, "namedStr": {"value1"}, "namedBool": {"true"}}, }, { input: &foo{ Str: "don't ignore embedded struct", Foobar: bar{ Float1: 5.0, }, }, expected: url.Values{"str": {"don't ignore embedded struct"}, "float1": {"5"}, "float2": {"0"}}, }, { // Ignore untagged fields input: &bar{ Float1: 23.5, Float2: 100.7, Int1: 1, Int2: 2, Int3: 3, Ignored: 1, Ignored2: "ignored", }, expected: url.Values{"float1": {"23.5"}, "float2": {"100.7"}, "int1": {"1"}, "int2": {"2"}, "int3": {"3"}}, }, { // include fields that are not tagged omitempty input: &foo{ NamedStr: "named str", }, expected: url.Values{"str": {""}, "namedStr": {"named str"}}, }, { input: &baz{ Ptr: intp(5), Bptr: boolp(true), }, expected: url.Values{"ptr": {"5"}, "bptr": {"true"}}, }, { input: &baz{ Bptr: boolp(true), }, expected: url.Values{"ptr": {""}, "bptr": {"true"}}, }, { input: &baz{ Ptr: intp(5), }, expected: url.Values{"ptr": {"5"}}, }, { input: &childStructs{ Container: "mycontainer", Follow: true, Previous: true, SinceSeconds: &sinceSeconds, SinceTime: &sinceTime, // test a custom marshaller EmptyTime: nil, // test a nil custom marshaller without omitempty }, expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "sinceTime": {"2000-01-01T12:34:56Z"}, "emptyTime": {""}}, }, { input: &childStructs{ Container: "mycontainer", Follow: true, Previous: true, SinceSeconds: &sinceSeconds, SinceTime: nil, // test a nil custom marshaller with omitempty }, expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "emptyTime": {""}}, }, } for _, test := range tests { result, err := queryparams.Convert(test.input) if err != nil { t.Errorf("Unexpected error while converting %#v: %v", test.input, err) } validateResult(t, test.input, result, test.expected) } }
func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) table := []struct { fakeNodeHandler *FakeNodeHandler timeToPass time.Duration newNodeStatus api.NodeStatus expectedPodStatusUpdate bool }{ // Node created recently, without status. // Expect no action from node controller (within startup grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedPodStatusUpdate: false, }, // Node created long time ago, with status updated recently. // Expect no action from node controller (within monitor grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedPodStatusUpdate: false, }, // Node created long time ago, with status updated by kubelet exceeds grace period. // Expect pods status updated and Unknown node status posted from node controller { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, timeToPass: 1 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, expectedPodStatusUpdate: true, }, } for i, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, util.NewFakeAlwaysRateLimiter(), util.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("Case[%d] unexpected error: %v", i, err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("Case[%d] unexpected error: %v", i, err) } } podStatusUpdated := false for _, action := range item.fakeNodeHandler.Actions() { if action.GetVerb() == "update" && action.GetResource() == "pods" && action.GetSubresource() == "status" { podStatusUpdated = true } } if podStatusUpdated != item.expectedPodStatusUpdate { t.Errorf("Case[%d] expect pod status updated to be %v, but got %v", i, item.expectedPodStatusUpdate, podStatusUpdated) } } }
func makeDeploymentList(versions int64) *kapi.ReplicationControllerList { list := &kapi.ReplicationControllerList{} for v := int64(1); v <= versions; v++ { list.Items = append(list.Items, makeDeployment(v)) } return list } var ( fakePodList = &kapi.PodList{ Items: []kapi.Pod{ { ObjectMeta: kapi.ObjectMeta{ Name: "config-5-application-pod-1", Namespace: kapi.NamespaceDefault, CreationTimestamp: unversioned.Date(2016, time.February, 1, 1, 0, 1, 0, time.UTC), Labels: testSelector, }, Spec: kapi.PodSpec{ Containers: []kapi.Container{ { Name: "config-5-container-1", }, }, NodeName: "some-host", }, }, { ObjectMeta: kapi.ObjectMeta{ Name: "config-5-application-pod-2", Namespace: kapi.NamespaceDefault,
func TestGetImageStreamTag(t *testing.T) { tests := map[string]struct { image *api.Image repo *api.ImageStream expectError bool errorTargetKind string errorTargetID string }{ "happy path": { image: &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "10"}, DockerImageReference: "foo/bar/baz"}, repo: &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: api.ImageStreamSpec{ Tags: map[string]api.TagReference{ "latest": { Annotations: map[string]string{ "color": "blue", "size": "large", }, }, }, }, Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": { Items: []api.TagEvent{ { Created: unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), DockerImageReference: "test", Image: "10", }, }, }, }, }, }, }, "image = ''": { repo: &api.ImageStream{Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": {Items: []api.TagEvent{{DockerImageReference: "test", Image: ""}}}, }, }}, expectError: true, errorTargetKind: "imageStreamTag", errorTargetID: "test:latest", }, "missing image": { repo: &api.ImageStream{Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": {Items: []api.TagEvent{{DockerImageReference: "test", Image: "10"}}}, }, }}, expectError: true, errorTargetKind: "image", errorTargetID: "10", }, "missing repo": { expectError: true, errorTargetKind: "imageStream", errorTargetID: "test", }, "missing tag": { image: &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "10"}, DockerImageReference: "foo/bar/baz"}, repo: &api.ImageStream{Status: api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "other": {Items: []api.TagEvent{{DockerImageReference: "test", Image: "10"}}}, }, }}, expectError: true, errorTargetKind: "imageStreamTag", errorTargetID: "test:latest", }, } for name, testCase := range tests { fakeEtcdClient, _, storage := setup(t) if testCase.image != nil { fakeEtcdClient.Data[etcdtest.AddPrefix("/images/"+testCase.image.Name)] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, testCase.image), ModifiedIndex: 1, }, }, } } else { fakeEtcdClient.Data[etcdtest.AddPrefix("/images/10")] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, E: tools.EtcdErrorNotFound, } } if testCase.repo != nil { fakeEtcdClient.Data[etcdtest.AddPrefix("/imagestreams/default/test")] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, testCase.repo), ModifiedIndex: 1, }, }, } } else { fakeEtcdClient.Data[etcdtest.AddPrefix("/imagestreams/default/test")] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, E: tools.EtcdErrorNotFound, } } obj, err := storage.Get(kapi.NewDefaultContext(), "test:latest") gotErr := err != nil if e, a := testCase.expectError, gotErr; e != a { t.Fatalf("%s: Expected err=%v: got %v: %v", name, e, a, err) } if testCase.expectError { if !errors.IsNotFound(err) { t.Fatalf("%s: unexpected error type: %v", name, err) } status := err.(statusError).Status() if status.Details.Kind != testCase.errorTargetKind || status.Details.Name != testCase.errorTargetID { t.Errorf("%s: unexpected status: %#v", name, status) } } else { actual := obj.(*api.ImageStreamTag) if e, a := "default", actual.Namespace; e != a { t.Errorf("%s: namespace: expected %v, got %v", name, e, a) } if e, a := "test:latest", actual.Name; e != a { t.Errorf("%s: name: expected %v, got %v", name, e, a) } if e, a := map[string]string{"size": "large", "color": "blue"}, actual.Image.Annotations; !reflect.DeepEqual(e, a) { t.Errorf("%s: annotations: expected %v, got %v", name, e, a) } if e, a := unversioned.Date(2015, 3, 24, 9, 38, 0, 0, time.UTC), actual.CreationTimestamp; !a.Equal(e) { t.Errorf("%s: timestamp: expected %v, got %v", name, e, a) } } } }
func TestUnstructuredGetters(t *testing.T) { unstruct := runtime.Unstructured{ Object: map[string]interface{}{ "kind": "test_kind", "apiVersion": "test_version", "metadata": map[string]interface{}{ "name": "test_name", "namespace": "test_namespace", "generateName": "test_generateName", "uid": "test_uid", "resourceVersion": "test_resourceVersion", "selfLink": "test_selfLink", "creationTimestamp": "2009-11-10T23:00:00Z", "deletionTimestamp": "2010-11-10T23:00:00Z", "labels": map[string]interface{}{ "test_label": "test_value", }, "annotations": map[string]interface{}{ "test_annotation": "test_value", }, "ownerReferences": []map[string]interface{}{ { "kind": "Pod", "name": "poda", "apiVersion": "v1", "uid": "1", }, { "kind": "Pod", "name": "podb", "apiVersion": "v1", "uid": "2", }, }, "finalizers": []interface{}{ "finalizer.1", "finalizer.2", }, "clusterName": "cluster123", }, }, } if got, want := unstruct.GetAPIVersion(), "test_version"; got != want { t.Errorf("GetAPIVersions() = %s, want %s", got, want) } if got, want := unstruct.GetKind(), "test_kind"; got != want { t.Errorf("GetKind() = %s, want %s", got, want) } if got, want := unstruct.GetNamespace(), "test_namespace"; got != want { t.Errorf("GetNamespace() = %s, want %s", got, want) } if got, want := unstruct.GetName(), "test_name"; got != want { t.Errorf("GetName() = %s, want %s", got, want) } if got, want := unstruct.GetGenerateName(), "test_generateName"; got != want { t.Errorf("GetGenerateName() = %s, want %s", got, want) } if got, want := unstruct.GetUID(), types.UID("test_uid"); got != want { t.Errorf("GetUID() = %s, want %s", got, want) } if got, want := unstruct.GetResourceVersion(), "test_resourceVersion"; got != want { t.Errorf("GetResourceVersion() = %s, want %s", got, want) } if got, want := unstruct.GetSelfLink(), "test_selfLink"; got != want { t.Errorf("GetSelfLink() = %s, want %s", got, want) } if got, want := unstruct.GetCreationTimestamp(), unversioned.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC); !got.Equal(want) { t.Errorf("GetCreationTimestamp() = %s, want %s", got, want) } if got, want := unstruct.GetDeletionTimestamp(), unversioned.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC); got == nil || !got.Equal(want) { t.Errorf("GetDeletionTimestamp() = %s, want %s", got, want) } if got, want := unstruct.GetLabels(), map[string]string{"test_label": "test_value"}; !reflect.DeepEqual(got, want) { t.Errorf("GetLabels() = %s, want %s", got, want) } if got, want := unstruct.GetAnnotations(), map[string]string{"test_annotation": "test_value"}; !reflect.DeepEqual(got, want) { t.Errorf("GetAnnotations() = %s, want %s", got, want) } refs := unstruct.GetOwnerReferences() expectedOwnerReferences := []metatypes.OwnerReference{ { Kind: "Pod", Name: "poda", APIVersion: "v1", UID: "1", }, { Kind: "Pod", Name: "podb", APIVersion: "v1", UID: "2", }, } if got, want := refs, expectedOwnerReferences; !reflect.DeepEqual(got, want) { t.Errorf("GetOwnerReferences()=%v, want %v", got, want) } if got, want := unstruct.GetFinalizers(), []string{"finalizer.1", "finalizer.2"}; !reflect.DeepEqual(got, want) { t.Errorf("GetFinalizers()=%v, want %v", got, want) } if got, want := unstruct.GetClusterName(), "cluster123"; got != want { t.Errorf("GetClusterName()=%v, want %v", got, want) } }