Ejemplo n.º 1
0
func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *api.PodList {
	pods := []api.Pod{}
	for i := 0; i < count; i++ {
		newPod := api.Pod{
			ObjectMeta: metav1.ObjectMeta{
				Name:              fmt.Sprintf("pod-%d", i+1),
				Namespace:         api.NamespaceDefault,
				CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC),
				Labels:            labels,
			},
			Status: api.PodStatus{
				Conditions: []api.PodCondition{
					{
						Status: api.ConditionTrue,
						Type:   api.PodReady,
					},
				},
			},
		}
		pods = append(pods, newPod)
	}
	if isUnready > -1 && isUnready < count {
		pods[isUnready].Status.Conditions[0].Status = api.ConditionFalse
	}
	if isUnhealthy > -1 && isUnhealthy < count {
		pods[isUnhealthy].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 5}}
	}
	return &api.PodList{
		Items: pods,
	}
}
Ejemplo n.º 2
0
func TestScale(t *testing.T) {
	newTimestamp := metav1.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC)
	oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC)
	olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC)

	var updatedTemplate = func(replicas int) *extensions.Deployment {
		d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"})
		d.Spec.Template.Labels["another"] = "label"
		return d
	}

	tests := []struct {
		name          string
		deployment    *extensions.Deployment
		oldDeployment *extensions.Deployment

		newRS  *extensions.ReplicaSet
		oldRSs []*extensions.ReplicaSet

		expectedNew  *extensions.ReplicaSet
		expectedOld  []*extensions.ReplicaSet
		wasntUpdated map[string]bool

		desiredReplicasAnnotations map[string]int32
	}{
		{
			name:          "normal scaling event: 10 -> 12",
			deployment:    newDeployment("foo", 12, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),

			newRS:  rs("foo-v1", 10, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{},

			expectedNew: rs("foo-v1", 12, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{},
		},
		{
			name:          "normal scaling event: 10 -> 5",
			deployment:    newDeployment("foo", 5, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),

			newRS:  rs("foo-v1", 10, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{},

			expectedNew: rs("foo-v1", 5, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{},
		},
		{
			name:          "proportional scaling: 5 -> 10",
			deployment:    newDeployment("foo", 10, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),

			newRS:  rs("foo-v2", 2, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},

			expectedNew: rs("foo-v2", 4, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
		},
		{
			name:          "proportional scaling: 5 -> 3",
			deployment:    newDeployment("foo", 3, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),

			newRS:  rs("foo-v2", 2, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},

			expectedNew: rs("foo-v2", 1, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
		},
		{
			name:          "proportional scaling: 9 -> 4",
			deployment:    newDeployment("foo", 4, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil),

			newRS:  rs("foo-v2", 8, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},

			expectedNew: rs("foo-v2", 4, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
		},
		{
			name:          "proportional scaling: 7 -> 10",
			deployment:    newDeployment("foo", 10, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 2, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},

			expectedNew: rs("foo-v3", 3, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
		},
		{
			name:          "proportional scaling: 13 -> 8",
			deployment:    newDeployment("foo", 8, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 2, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},

			expectedNew: rs("foo-v3", 1, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
		},
		// Scales up the new replica set.
		{
			name:          "leftover distribution: 3 -> 4",
			deployment:    newDeployment("foo", 4, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 1, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},

			expectedNew: rs("foo-v3", 2, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
		},
		// Scales down the older replica set.
		{
			name:          "leftover distribution: 3 -> 2",
			deployment:    newDeployment("foo", 2, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 1, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},

			expectedNew: rs("foo-v3", 1, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
		},
		// Scales up the latest replica set first.
		{
			name:          "proportional scaling (no new rs): 4 -> 5",
			deployment:    newDeployment("foo", 5, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil),

			newRS:  nil,
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},

			expectedNew: nil,
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
		},
		// Scales down to zero
		{
			name:          "proportional scaling: 6 -> 0",
			deployment:    newDeployment("foo", 0, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 3, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},

			expectedNew: rs("foo-v3", 0, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
		},
		// Scales up from zero
		{
			name:          "proportional scaling: 0 -> 6",
			deployment:    newDeployment("foo", 6, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 0, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},

			expectedNew:  rs("foo-v3", 6, nil, newTimestamp),
			expectedOld:  []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
			wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true},
		},
		// Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 )
		// Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to
		// update.
		{
			name:          "failed rs update",
			deployment:    newDeployment("foo", 5, nil, nil, nil, nil),
			oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),

			newRS:  rs("foo-v3", 2, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},

			expectedNew:  rs("foo-v3", 2, nil, newTimestamp),
			expectedOld:  []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
			wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true},

			desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)},
		},
		{
			name:          "deployment with surge pods",
			deployment:    newDeployment("foo", 20, nil, maxSurge(2), nil, nil),
			oldDeployment: newDeployment("foo", 10, nil, maxSurge(2), nil, nil),

			newRS:  rs("foo-v2", 6, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},

			expectedNew: rs("foo-v2", 11, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
		},
		{
			name:          "change both surge and size",
			deployment:    newDeployment("foo", 50, nil, maxSurge(6), nil, nil),
			oldDeployment: newDeployment("foo", 10, nil, maxSurge(3), nil, nil),

			newRS:  rs("foo-v2", 5, nil, newTimestamp),
			oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},

			expectedNew: rs("foo-v2", 22, nil, newTimestamp),
			expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
		},
		{
			name:          "change both size and template",
			deployment:    updatedTemplate(14),
			oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}),

			newRS:  nil,
			oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},

			expectedNew: nil,
			expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
		},
	}

	for _, test := range tests {
		_ = olderTimestamp
		t.Log(test.name)
		fake := fake.Clientset{}
		dc := &DeploymentController{
			client:        &fake,
			eventRecorder: &record.FakeRecorder{},
		}

		if test.newRS != nil {
			desiredReplicas := *(test.oldDeployment.Spec.Replicas)
			if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
				desiredReplicas = desired
			}
			deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
		}
		for i := range test.oldRSs {
			rs := test.oldRSs[i]
			if rs == nil {
				continue
			}
			desiredReplicas := *(test.oldDeployment.Spec.Replicas)
			if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
				desiredReplicas = desired
			}
			deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
		}

		if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
			t.Errorf("%s: unexpected error: %v", test.name, err)
			continue
		}

		// Construct the nameToSize map that will hold all the sizes we got our of tests
		// Skip updating the map if the replica set wasn't updated since there will be
		// no update action for it.
		nameToSize := make(map[string]int32)
		if test.newRS != nil {
			nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
		}
		for i := range test.oldRSs {
			rs := test.oldRSs[i]
			nameToSize[rs.Name] = *(rs.Spec.Replicas)
		}
		// Get all the UPDATE actions and update nameToSize with all the updated sizes.
		for _, action := range fake.Actions() {
			rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
			if !test.wasntUpdated[rs.Name] {
				nameToSize[rs.Name] = *(rs.Spec.Replicas)
			}
		}

		if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
			t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
			continue
		}
		if len(test.expectedOld) != len(test.oldRSs) {
			t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
			continue
		}
		for n := range test.oldRSs {
			rs := test.oldRSs[n]
			expected := test.expectedOld[n]
			if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
				t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
			}
		}
	}
}
Ejemplo n.º 3
0
func TestGetFirstPod(t *testing.T) {
	labelSet := map[string]string{"test": "selector"}
	tests := []struct {
		name string

		podList  *api.PodList
		watching []watch.Event
		sortBy   func([]*v1.Pod) sort.Interface

		expected    *api.Pod
		expectedNum int
		expectedErr bool
	}{
		{
			name:    "kubectl logs - two ready pods",
			podList: newPodList(2, -1, -1, labelSet),
			sortBy:  func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) },
			expected: &api.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Name:              "pod-1",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
				},
			},
			expectedNum: 2,
		},
		{
			name:    "kubectl logs - one unhealthy, one healthy",
			podList: newPodList(2, -1, 1, labelSet),
			sortBy:  func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) },
			expected: &api.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Name:              "pod-2",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
					ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}},
				},
			},
			expectedNum: 2,
		},
		{
			name:    "kubectl attach - two ready pods",
			podList: newPodList(2, -1, -1, labelSet),
			sortBy:  func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
			expected: &api.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Name:              "pod-1",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
				},
			},
			expectedNum: 2,
		},
		{
			name:    "kubectl attach - wait for ready pod",
			podList: newPodList(1, 1, -1, labelSet),
			watching: []watch.Event{
				{
					Type: watch.Modified,
					Object: &api.Pod{
						ObjectMeta: metav1.ObjectMeta{
							Name:              "pod-1",
							Namespace:         api.NamespaceDefault,
							CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
							Labels:            map[string]string{"test": "selector"},
						},
						Status: api.PodStatus{
							Conditions: []api.PodCondition{
								{
									Status: api.ConditionTrue,
									Type:   api.PodReady,
								},
							},
						},
					},
				},
			},
			sortBy: func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
			expected: &api.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Name:              "pod-1",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
				},
			},
			expectedNum: 1,
		},
	}

	for i := range tests {
		test := tests[i]
		fake := fake.NewSimpleClientset(test.podList)
		if len(test.watching) > 0 {
			watcher := watch.NewFake()
			for _, event := range test.watching {
				switch event.Type {
				case watch.Added:
					go watcher.Add(event.Object)
				case watch.Modified:
					go watcher.Modify(event.Object)
				}
			}
			fake.PrependWatchReactor("pods", testcore.DefaultWatchReactor(watcher, nil))
		}
		selector := labels.Set(labelSet).AsSelector()

		pod, numPods, err := GetFirstPod(fake.Core(), api.NamespaceDefault, selector, 1*time.Minute, test.sortBy)
		pod.Spec.SecurityContext = nil
		if !test.expectedErr && err != nil {
			t.Errorf("%s: unexpected error: %v", test.name, err)
			continue
		}
		if test.expectedErr && err == nil {
			t.Errorf("%s: expected an error", test.name)
			continue
		}
		if test.expectedNum != numPods {
			t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods)
			continue
		}
		if !reflect.DeepEqual(test.expected, pod) {
			t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod)
		}
	}
}
Ejemplo n.º 4
0
// This test checks that the node is deleted when kubelet stops reporting
// and cloud provider says node is gone
func TestNodeDeleted(t *testing.T) {
	pod0 := &v1.Pod{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: "default",
			Name:      "pod0",
		},
		Spec: v1.PodSpec{
			NodeName: "node0",
		},
		Status: v1.PodStatus{
			Conditions: []v1.PodCondition{
				{
					Type:   v1.PodReady,
					Status: v1.ConditionTrue,
				},
			},
		},
	}

	pod1 := &v1.Pod{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: "default",
			Name:      "pod1",
		},
		Spec: v1.PodSpec{
			NodeName: "node0",
		},
		Status: v1.PodStatus{
			Conditions: []v1.PodCondition{
				{
					Type:   v1.PodReady,
					Status: v1.ConditionTrue,
				},
			},
		},
	}

	fnh := &testutil.FakeNodeHandler{
		Existing: []*v1.Node{
			{
				ObjectMeta: metav1.ObjectMeta{
					Name:              "node0",
					CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				Status: v1.NodeStatus{
					Conditions: []v1.NodeCondition{
						{
							Type:               v1.NodeReady,
							Status:             v1.ConditionUnknown,
							LastHeartbeatTime:  metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
							LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
						},
					},
				},
			},
		},
		Clientset:      fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*pod0, *pod1}}),
		DeleteWaitChan: make(chan struct{}),
	}

	factory := informers.NewSharedInformerFactory(fnh, nil, controller.NoResyncPeriodFunc())

	eventBroadcaster := record.NewBroadcaster()
	cloudNodeController := &CloudNodeController{
		kubeClient:        fnh,
		nodeInformer:      factory.Nodes(),
		cloud:             &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},
		nodeMonitorPeriod: 5 * time.Second,
		recorder:          eventBroadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"}),
	}
	eventBroadcaster.StartLogging(glog.Infof)

	cloudNodeController.Run()

	select {
	case <-fnh.DeleteWaitChan:
	case <-time.After(wait.ForeverTestTimeout):
		t.Errorf("Timed out waiting %v for node to be deleted", wait.ForeverTestTimeout)
	}
	if len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != "node0" {
		t.Errorf("Node was not deleted")
	}
}
Ejemplo n.º 5
0
func TestConvert(t *testing.T) {
	sinceSeconds := int64(123)
	sinceTime := metav1.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC)

	tests := []struct {
		input    interface{}
		expected url.Values
	}{
		{
			input: &foo{
				Str: "hello",
			},
			expected: url.Values{"str": {"hello"}},
		},
		{
			input: &foo{
				Str:     "test string",
				Slice:   []string{"one", "two", "three"},
				Integer: 234,
				Boolean: true,
			},
			expected: url.Values{"str": {"test string"}, "slice": {"one", "two", "three"}, "integer": {"234"}, "boolean": {"true"}},
		},
		{
			input: &foo{
				Str:       "named types",
				NamedStr:  "value1",
				NamedBool: true,
			},
			expected: url.Values{"str": {"named types"}, "namedStr": {"value1"}, "namedBool": {"true"}},
		},
		{
			input: &foo{
				Str: "don't ignore embedded struct",
				Foobar: bar{
					Float1: 5.0,
				},
			},
			expected: url.Values{"str": {"don't ignore embedded struct"}, "float1": {"5"}, "float2": {"0"}},
		},
		{
			// Ignore untagged fields
			input: &bar{
				Float1:   23.5,
				Float2:   100.7,
				Int1:     1,
				Int2:     2,
				Int3:     3,
				Ignored:  1,
				Ignored2: "ignored",
			},
			expected: url.Values{"float1": {"23.5"}, "float2": {"100.7"}, "int1": {"1"}, "int2": {"2"}, "int3": {"3"}},
		},
		{
			// include fields that are not tagged omitempty
			input: &foo{
				NamedStr: "named str",
			},
			expected: url.Values{"str": {""}, "namedStr": {"named str"}},
		},
		{
			input: &baz{
				Ptr:  intp(5),
				Bptr: boolp(true),
			},
			expected: url.Values{"ptr": {"5"}, "bptr": {"true"}},
		},
		{
			input: &baz{
				Bptr: boolp(true),
			},
			expected: url.Values{"ptr": {""}, "bptr": {"true"}},
		},
		{
			input: &baz{
				Ptr: intp(5),
			},
			expected: url.Values{"ptr": {"5"}},
		},
		{
			input: &childStructs{
				Container:    "mycontainer",
				Follow:       true,
				Previous:     true,
				SinceSeconds: &sinceSeconds,
				SinceTime:    &sinceTime, // test a custom marshaller
				EmptyTime:    nil,        // test a nil custom marshaller without omitempty
			},
			expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "sinceTime": {"2000-01-01T12:34:56Z"}, "emptyTime": {""}},
		},
		{
			input: &childStructs{
				Container:    "mycontainer",
				Follow:       true,
				Previous:     true,
				SinceSeconds: &sinceSeconds,
				SinceTime:    nil, // test a nil custom marshaller with omitempty
			},
			expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "emptyTime": {""}},
		},
	}

	for _, test := range tests {
		result, err := queryparams.Convert(test.input)
		if err != nil {
			t.Errorf("Unexpected error while converting %#v: %v", test.input, err)
		}
		validateResult(t, test.input, result, test.expected)
	}
}
Ejemplo n.º 6
0
func TestThresholdsUpdatedStats(t *testing.T) {
	updatedThreshold := Threshold{
		Signal: SignalMemoryAvailable,
	}
	locationUTC, err := time.LoadLocation("UTC")
	if err != nil {
		t.Error(err)
		return
	}
	testCases := map[string]struct {
		thresholds   []Threshold
		observations signalObservations
		last         signalObservations
		result       []Threshold
	}{
		"empty": {
			thresholds:   []Threshold{},
			observations: signalObservations{},
			last:         signalObservations{},
			result:       []Threshold{},
		},
		"no-time": {
			thresholds: []Threshold{updatedThreshold},
			observations: signalObservations{
				SignalMemoryAvailable: signalObservation{},
			},
			last:   signalObservations{},
			result: []Threshold{updatedThreshold},
		},
		"no-last-observation": {
			thresholds: []Threshold{updatedThreshold},
			observations: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 0, 0, 0, locationUTC),
				},
			},
			last:   signalObservations{},
			result: []Threshold{updatedThreshold},
		},
		"time-machine": {
			thresholds: []Threshold{updatedThreshold},
			observations: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 0, 0, 0, locationUTC),
				},
			},
			last: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 1, 0, 0, locationUTC),
				},
			},
			result: []Threshold{},
		},
		"same-observation": {
			thresholds: []Threshold{updatedThreshold},
			observations: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 0, 0, 0, locationUTC),
				},
			},
			last: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 0, 0, 0, locationUTC),
				},
			},
			result: []Threshold{},
		},
		"new-observation": {
			thresholds: []Threshold{updatedThreshold},
			observations: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 1, 0, 0, locationUTC),
				},
			},
			last: signalObservations{
				SignalMemoryAvailable: signalObservation{
					time: metav1.Date(2016, 1, 1, 0, 0, 0, 0, locationUTC),
				},
			},
			result: []Threshold{updatedThreshold},
		},
	}
	for testName, testCase := range testCases {
		actual := thresholdsUpdatedStats(testCase.thresholds, testCase.observations, testCase.last)
		if !thresholdList(actual).Equal(thresholdList(testCase.result)) {
			t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
		}
	}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
	kubelet := testKubelet.kubelet
	kubeClient := testKubelet.fakeKubeClient
	existingNode := v1.Node{
		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
		Spec:       v1.NodeSpec{},
		Status: v1.NodeStatus{
			Conditions: []v1.NodeCondition{
				{
					Type:               v1.NodeOutOfDisk,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletOutOfDisk",
					Message:            "out of disk space",
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				{
					Type:               v1.NodeMemoryPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientMemory",
					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				{
					Type:               v1.NodeDiskPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientDisk",
					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				{
					Type:               v1.NodeReady,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletReady",
					Message:            fmt.Sprintf("kubelet is posting ready status"),
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
			},
			Capacity: v1.ResourceList{
				v1.ResourceCPU:    *resource.NewMilliQuantity(3000, resource.DecimalSI),
				v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
			},
			Allocatable: v1.ResourceList{
				v1.ResourceCPU:    *resource.NewMilliQuantity(2800, resource.DecimalSI),
				v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
			},
		},
	}
	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
	mockCadvisor := testKubelet.fakeCadvisor
	mockCadvisor.On("Start").Return(nil)
	machineInfo := &cadvisorapi.MachineInfo{
		MachineID:      "123",
		SystemUUID:     "abc",
		BootID:         "1b3",
		NumCores:       2,
		MemoryCapacity: 20E9,
	}
	mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
	versionInfo := &cadvisorapi.VersionInfo{
		KernelVersion:      "3.16.0-0.bpo.4-amd64",
		ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
	}
	mockCadvisor.On("VersionInfo").Return(versionInfo, nil)

	// Make kubelet report that it is out of disk space.
	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil {
		t.Fatalf("can't update disk space manager: %v", err)
	}

	expectedNode := &v1.Node{
		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
		Spec:       v1.NodeSpec{},
		Status: v1.NodeStatus{
			Conditions: []v1.NodeCondition{
				{
					Type:               v1.NodeOutOfDisk,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletOutOfDisk",
					Message:            "out of disk space",
					LastHeartbeatTime:  metav1.Time{}, // placeholder
					LastTransitionTime: metav1.Time{}, // placeholder
				},
				{
					Type:               v1.NodeMemoryPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientMemory",
					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{
					Type:               v1.NodeDiskPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientDisk",
					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{
					Type:               v1.NodeReady,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletReady",
					Message:            fmt.Sprintf("kubelet is posting ready status"),
					LastHeartbeatTime:  metav1.Time{}, // placeholder
					LastTransitionTime: metav1.Time{}, // placeholder
				},
			},
			NodeInfo: v1.NodeSystemInfo{
				MachineID:               "123",
				SystemUUID:              "abc",
				BootID:                  "1b3",
				KernelVersion:           "3.16.0-0.bpo.4-amd64",
				OSImage:                 "Debian GNU/Linux 7 (wheezy)",
				OperatingSystem:         goruntime.GOOS,
				Architecture:            goruntime.GOARCH,
				ContainerRuntimeVersion: "test://1.5.0",
				KubeletVersion:          version.Get().String(),
				KubeProxyVersion:        version.Get().String(),
			},
			Capacity: v1.ResourceList{
				v1.ResourceCPU:       *resource.NewMilliQuantity(2000, resource.DecimalSI),
				v1.ResourceMemory:    *resource.NewQuantity(20E9, resource.BinarySI),
				v1.ResourcePods:      *resource.NewQuantity(0, resource.DecimalSI),
				v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
			},
			Allocatable: v1.ResourceList{
				v1.ResourceCPU:       *resource.NewMilliQuantity(1800, resource.DecimalSI),
				v1.ResourceMemory:    *resource.NewQuantity(19900E6, resource.BinarySI),
				v1.ResourcePods:      *resource.NewQuantity(0, resource.DecimalSI),
				v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
			},
			Addresses: []v1.NodeAddress{
				{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
				{Type: v1.NodeHostName, Address: testKubeletHostname},
			},
			// images will be sorted from max to min in node status.
			Images: []v1.ContainerImage{
				{
					Names:     []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
					SizeBytes: 456,
				},
				{
					Names:     []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
					SizeBytes: 123,
				},
			},
		},
	}

	kubelet.updateRuntimeUp()
	if err := kubelet.updateNodeStatus(); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	actions := kubeClient.Actions()
	if len(actions) != 2 {
		t.Errorf("unexpected actions: %v", actions)
	}
	patchAction, ok := actions[1].(core.PatchActionImpl)
	if !ok {
		t.Errorf("unexpected action type.  expected PatchActionImpl, got %#v", actions[1])
	}
	updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
	if !ok {
		t.Fatalf("can't apply node status patch: %v", err)
	}
	for i, cond := range updatedNode.Status.Conditions {
		// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
		if old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
			t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, metav1.Now(), old)
		}
		if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
			t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
		}
		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
	}

	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
	if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
		t.Errorf("unexpected node condition order. NodeReady should be last.")
	}

	if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
		t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
	}
}
func TestUnstructuredSetters(t *testing.T) {
	unstruct := unstructured.Unstructured{}
	trueVar := true

	want := unstructured.Unstructured{
		Object: map[string]interface{}{
			"kind":       "test_kind",
			"apiVersion": "test_version",
			"metadata": map[string]interface{}{
				"name":              "test_name",
				"namespace":         "test_namespace",
				"generateName":      "test_generateName",
				"uid":               "test_uid",
				"resourceVersion":   "test_resourceVersion",
				"selfLink":          "test_selfLink",
				"creationTimestamp": "2009-11-10T23:00:00Z",
				"deletionTimestamp": "2010-11-10T23:00:00Z",
				"labels": map[string]interface{}{
					"test_label": "test_value",
				},
				"annotations": map[string]interface{}{
					"test_annotation": "test_value",
				},
				"ownerReferences": []map[string]interface{}{
					{
						"kind":       "Pod",
						"name":       "poda",
						"apiVersion": "v1",
						"uid":        "1",
						"controller": (*bool)(nil),
					},
					{
						"kind":       "Pod",
						"name":       "podb",
						"apiVersion": "v1",
						"uid":        "2",
						"controller": &trueVar,
					},
				},
				"finalizers": []interface{}{
					"finalizer.1",
					"finalizer.2",
				},
				"clusterName": "cluster123",
			},
		},
	}

	unstruct.SetAPIVersion("test_version")
	unstruct.SetKind("test_kind")
	unstruct.SetNamespace("test_namespace")
	unstruct.SetName("test_name")
	unstruct.SetGenerateName("test_generateName")
	unstruct.SetUID(types.UID("test_uid"))
	unstruct.SetResourceVersion("test_resourceVersion")
	unstruct.SetSelfLink("test_selfLink")
	unstruct.SetCreationTimestamp(metav1.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
	date := metav1.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)
	unstruct.SetDeletionTimestamp(&date)
	unstruct.SetLabels(map[string]string{"test_label": "test_value"})
	unstruct.SetAnnotations(map[string]string{"test_annotation": "test_value"})
	newOwnerReferences := []metav1.OwnerReference{
		{
			Kind:       "Pod",
			Name:       "poda",
			APIVersion: "v1",
			UID:        "1",
		},
		{
			Kind:       "Pod",
			Name:       "podb",
			APIVersion: "v1",
			UID:        "2",
			Controller: &trueVar,
		},
	}
	unstruct.SetOwnerReferences(newOwnerReferences)
	unstruct.SetFinalizers([]string{"finalizer.1", "finalizer.2"})
	unstruct.SetClusterName("cluster123")

	if !reflect.DeepEqual(unstruct, want) {
		t.Errorf("Wanted: \n%s\n Got:\n%s", want, unstruct)
	}
}
func TestUnstructuredGetters(t *testing.T) {
	unstruct := unstructured.Unstructured{
		Object: map[string]interface{}{
			"kind":       "test_kind",
			"apiVersion": "test_version",
			"metadata": map[string]interface{}{
				"name":              "test_name",
				"namespace":         "test_namespace",
				"generateName":      "test_generateName",
				"uid":               "test_uid",
				"resourceVersion":   "test_resourceVersion",
				"selfLink":          "test_selfLink",
				"creationTimestamp": "2009-11-10T23:00:00Z",
				"deletionTimestamp": "2010-11-10T23:00:00Z",
				"labels": map[string]interface{}{
					"test_label": "test_value",
				},
				"annotations": map[string]interface{}{
					"test_annotation": "test_value",
				},
				"ownerReferences": []map[string]interface{}{
					{
						"kind":       "Pod",
						"name":       "poda",
						"apiVersion": "v1",
						"uid":        "1",
					},
					{
						"kind":       "Pod",
						"name":       "podb",
						"apiVersion": "v1",
						"uid":        "2",
					},
				},
				"finalizers": []interface{}{
					"finalizer.1",
					"finalizer.2",
				},
				"clusterName": "cluster123",
			},
		},
	}

	if got, want := unstruct.GetAPIVersion(), "test_version"; got != want {
		t.Errorf("GetAPIVersions() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetKind(), "test_kind"; got != want {
		t.Errorf("GetKind() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetNamespace(), "test_namespace"; got != want {
		t.Errorf("GetNamespace() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetName(), "test_name"; got != want {
		t.Errorf("GetName() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetGenerateName(), "test_generateName"; got != want {
		t.Errorf("GetGenerateName() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetUID(), types.UID("test_uid"); got != want {
		t.Errorf("GetUID() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetResourceVersion(), "test_resourceVersion"; got != want {
		t.Errorf("GetResourceVersion() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetSelfLink(), "test_selfLink"; got != want {
		t.Errorf("GetSelfLink() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetCreationTimestamp(), metav1.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC); !got.Equal(want) {
		t.Errorf("GetCreationTimestamp() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetDeletionTimestamp(), metav1.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC); got == nil || !got.Equal(want) {
		t.Errorf("GetDeletionTimestamp() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetLabels(), map[string]string{"test_label": "test_value"}; !reflect.DeepEqual(got, want) {
		t.Errorf("GetLabels() = %s, want %s", got, want)
	}

	if got, want := unstruct.GetAnnotations(), map[string]string{"test_annotation": "test_value"}; !reflect.DeepEqual(got, want) {
		t.Errorf("GetAnnotations() = %s, want %s", got, want)
	}
	refs := unstruct.GetOwnerReferences()
	expectedOwnerReferences := []metav1.OwnerReference{
		{
			Kind:       "Pod",
			Name:       "poda",
			APIVersion: "v1",
			UID:        "1",
		},
		{
			Kind:       "Pod",
			Name:       "podb",
			APIVersion: "v1",
			UID:        "2",
		},
	}
	if got, want := refs, expectedOwnerReferences; !reflect.DeepEqual(got, want) {
		t.Errorf("GetOwnerReferences()=%v, want %v", got, want)
	}
	if got, want := unstruct.GetFinalizers(), []string{"finalizer.1", "finalizer.2"}; !reflect.DeepEqual(got, want) {
		t.Errorf("GetFinalizers()=%v, want %v", got, want)
	}
	if got, want := unstruct.GetClusterName(), "cluster123"; got != want {
		t.Errorf("GetClusterName()=%v, want %v", got, want)
	}
}