コード例 #1
1
func TestWatchPods(t *testing.T) {
	testJob := newJob(2, 2)
	clientset := fake.NewSimpleClientset(testJob)
	fakeWatch := watch.NewFake()
	clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
	manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
	manager.podStoreSynced = alwaysReady
	manager.jobStoreSynced = alwaysReady

	// Put one job and one pod into the store
	sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob)
	received := make(chan struct{})
	// The pod update sent through the fakeWatcher should figure out the managing job and
	// send it into the syncHandler.
	manager.syncHandler = func(key string) error {
		ns, name, err := cache.SplitMetaNamespaceKey(key)
		if err != nil {
			t.Errorf("Error getting namespace/name from key %v: %v", key, err)
		}
		job, err := manager.jobLister.Jobs(ns).Get(name)
		if err != nil {
			t.Errorf("Expected to find job under key %v: %v", key, err)
		}
		if !api.Semantic.DeepDerivative(job, testJob) {
			t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
			close(received)
			return nil
		}
		close(received)
		return nil
	}
	// Start only the pod watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method for the right job.
	stopCh := make(chan struct{})
	defer close(stopCh)
	go sharedInformerFactory.Pods().Informer().Run(stopCh)
	go wait.Until(manager.worker, 10*time.Millisecond, stopCh)

	pods := newPodList(1, v1.PodRunning, testJob)
	testPod := pods[0]
	testPod.Status.Phase = v1.PodFailed
	fakeWatch.Add(&testPod)

	t.Log("Waiting for pod to reach syncHandler")
	<-received
}
コード例 #2
0
ファイル: master_test.go プロジェクト: jonboulle/kubernetes
// TestGetNodeAddresses verifies that proper results are returned
// when requesting node addresses.
func TestGetNodeAddresses(t *testing.T) {
	assert := assert.New(t)

	fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).Core().Nodes()
	addressProvider := nodeAddressProvider{fakeNodeClient}

	// Fail case (no addresses associated with nodes)
	nodes, _ := fakeNodeClient.List(apiv1.ListOptions{})
	addrs, err := addressProvider.externalAddresses()

	assert.Error(err, "addresses should have caused an error as there are no addresses.")
	assert.Equal([]string(nil), addrs)

	// Pass case with External type IP
	nodes, _ = fakeNodeClient.List(apiv1.ListOptions{})
	for index := range nodes.Items {
		nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
		fakeNodeClient.Update(&nodes.Items[index])
	}
	addrs, err = addressProvider.externalAddresses()
	assert.NoError(err, "addresses should not have returned an error.")
	assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs)

	// Pass case with LegacyHost type IP
	nodes, _ = fakeNodeClient.List(apiv1.ListOptions{})
	for index := range nodes.Items {
		nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeLegacyHostIP, Address: "127.0.0.2"}}
		fakeNodeClient.Update(&nodes.Items[index])
	}
	addrs, err = addressProvider.externalAddresses()
	assert.NoError(err, "addresses failback should not have returned an error.")
	assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs)
}
コード例 #3
0
func TestPlugin(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid")
		testVolumeName = "test_volume_name"
		testNamespace  = "test_configmap_namespace"
		testName       = "test_configmap_name"

		volumeSpec    = volumeSpec(testVolumeName, testName, 0644)
		configMap     = configMap(testNamespace, testName)
		client        = fake.NewSimpleClientset(&configMap)
		pluginMgr     = volume.VolumePluginMgr{}
		tempDir, host = newTestHost(t, client)
	)

	defer os.RemoveAll(tempDir)
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)

	plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}

	pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
	mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Mounter: %v", err)
	}
	if mounter == nil {
		t.Errorf("Got a nil Mounter")
	}

	vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
	if err != nil {
		t.Errorf("Failed to GetVolumeName: %v", err)
	}
	if vName != "test_volume_name/test_configmap_name" {
		t.Errorf("Got unexpect VolumeName %v", vName)
	}

	volumePath := mounter.GetPath()
	if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
		t.Errorf("Got unexpected path: %s", volumePath)
	}

	fsGroup := int64(1001)
	err = mounter.SetUp(&fsGroup)
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}
	if _, err := os.Stat(volumePath); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	doTestConfigMapDataInVolume(volumePath, configMap, t)
	doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
コード例 #4
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	pv := &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name: "pvA",
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource: v1.PersistentVolumeSource{
				Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
			},
			ClaimRef: &v1.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &v1.PersistentVolumeClaim{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: v1.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: v1.PersistentVolumeClaimStatus{
			Phase: v1.ClaimBound,
		},
	}

	ep := &v1.Endpoints{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: "nsA",
			Name:      "ep",
		},
		Subsets: []v1.EndpointSubset{{
			Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
			Ports:     []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
		}},
	}

	client := fake.NewSimpleClientset(pv, claim, ep)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
	plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
コード例 #5
0
func TestDoNotDeleteMirrorPods(t *testing.T) {
	staticPod := getTestPod()
	staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
	mirrorPod := getTestPod()
	mirrorPod.UID = "mirror-12345678"
	mirrorPod.Annotations = map[string]string{
		kubetypes.ConfigSourceAnnotationKey: "api",
		kubetypes.ConfigMirrorAnnotationKey: "mirror",
	}
	// Set the deletion timestamp.
	mirrorPod.DeletionTimestamp = new(metav1.Time)
	client := fake.NewSimpleClientset(mirrorPod)
	m := newTestManager(client)
	m.podManager.AddPod(staticPod)
	m.podManager.AddPod(mirrorPod)
	// Verify setup.
	assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
	assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
	assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)

	status := getRandomPodStatus()
	now := metav1.Now()
	status.StartTime = &now
	m.SetPodStatus(staticPod, status)

	m.testSyncBatch()
	// Expect not to see an delete action.
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
	})
}
コード例 #6
0
ファイル: fc_test.go プロジェクト: kubernetes/kubernetes
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("fc_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	lun := int32(0)
	pv := &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name: "pvA",
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource: v1.PersistentVolumeSource{
				FC: &v1.FCVolumeSource{
					TargetWWNs: []string{"some_wwn"},
					FSType:     "ext4",
					Lun:        &lun,
				},
			},
			ClaimRef: &v1.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &v1.PersistentVolumeClaim{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: v1.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: v1.PersistentVolumeClaimStatus{
			Phase: v1.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
	plug, _ := plugMgr.FindPluginByName(fcPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
コード例 #7
0
ファイル: iscsi_test.go プロジェクト: jonboulle/kubernetes
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("iscsi_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name: "pvA",
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource: v1.PersistentVolumeSource{
				ISCSI: &v1.ISCSIVolumeSource{
					TargetPortal: "127.0.0.1:3260",
					IQN:          "iqn.2014-12.server:storage.target01",
					FSType:       "ext4",
					Lun:          0,
				},
			},
			ClaimRef: &v1.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &v1.PersistentVolumeClaim{
		ObjectMeta: v1.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: v1.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: v1.PersistentVolumeClaimStatus{
			Phase: v1.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
	plug, _ := plugMgr.FindPluginByName(iscsiPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
コード例 #8
0
func TestSyncBatch(t *testing.T) {
	syncer := newTestManager(&fake.Clientset{})
	testPod := getTestPod()
	syncer.kubeClient = fake.NewSimpleClientset(testPod)
	syncer.SetPodStatus(testPod, getRandomPodStatus())
	syncer.testSyncBatch()
	verifyActions(t, syncer.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
	},
	)
}
コード例 #9
0
func TestScaleDownOldReplicaSets(t *testing.T) {
	tests := []struct {
		oldRSSizes []int
		d          *extensions.Deployment
	}{
		{
			oldRSSizes: []int{3},
			d:          newDeployment("foo", 3, nil, nil, nil, map[string]string{"foo": "bar"}),
		},
	}

	for i := range tests {
		t.Logf("running scenario %d", i)
		test := tests[i]

		var oldRSs []*extensions.ReplicaSet
		var expected []runtime.Object

		for n, size := range test.oldRSSizes {
			rs := newReplicaSet(test.d, fmt.Sprintf("%s-%d", test.d.Name, n), size)
			oldRSs = append(oldRSs, rs)

			objCopy, err := api.Scheme.Copy(rs)
			if err != nil {
				t.Errorf("unexpected error while deep-copying: %v", err)
				continue
			}
			rsCopy := objCopy.(*extensions.ReplicaSet)

			zero := int32(0)
			rsCopy.Spec.Replicas = &zero
			expected = append(expected, rsCopy)

			if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*extensions.ReplicaSet).Spec.Replicas) {
				t.Errorf("broken test - original and expected RS have the same size")
			}
		}

		kc := fake.NewSimpleClientset(expected...)
		informers := informers.NewSharedInformerFactory(kc, nil, controller.NoResyncPeriodFunc())
		c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), kc)

		c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)
		for j := range oldRSs {
			rs := oldRSs[j]

			if *rs.Spec.Replicas != 0 {
				t.Errorf("rs %q has non-zero replicas", rs.Name)
			}
		}
	}
}
コード例 #10
0
ファイル: secret_test.go プロジェクト: kubernetes/kubernetes
// Test the case where the plugin's ready file exists, but the volume dir is not a
// mountpoint, which is the state the system will be in after reboot.  The dir
// should be mounter and the secret data written to it.
func TestPluginReboot(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid3")
		testVolumeName = "test_volume_name"
		testNamespace  = "test_secret_namespace"
		testName       = "test_secret_name"

		volumeSpec    = volumeSpec(testVolumeName, testName, 0644)
		secret        = secret(testNamespace, testName)
		client        = fake.NewSimpleClientset(&secret)
		pluginMgr     = volume.VolumePluginMgr{}
		rootDir, host = newTestHost(t, client)
	)
	defer os.RemoveAll(rootDir)
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)

	plugin, err := pluginMgr.FindPluginByName(secretPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}

	pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
	mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Mounter: %v", err)
	}
	if mounter == nil {
		t.Errorf("Got a nil Mounter")
	}

	podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir)
	util.SetReady(podMetadataDir)
	volumePath := mounter.GetPath()
	if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) {
		t.Errorf("Got unexpected path: %s", volumePath)
	}

	err = mounter.SetUp(nil)
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}
	if _, err := os.Stat(volumePath); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	doTestSecretDataInVolume(volumePath, secret, t)
	doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
コード例 #11
0
func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, *controller.FakePodControl, *fake.Clientset) {
	clientset := fake.NewSimpleClientset(initialObjects...)
	informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())

	manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)

	manager.podStoreSynced = alwaysReady
	manager.nodeStoreSynced = alwaysReady
	manager.dsStoreSynced = alwaysReady
	podControl := &controller.FakePodControl{}
	manager.podControl = podControl
	return manager, podControl, clientset
}
コード例 #12
0
func TestUpdateNodeStatusError(t *testing.T) {
	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
	kubelet := testKubelet.kubelet
	// No matching node for the kubelet
	testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain

	if err := kubelet.updateNodeStatus(); err == nil {
		t.Errorf("unexpected non error: %v", err)
	}
	if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
		t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
	}
}
コード例 #13
0
func TestSyncResourceQuotaNoChange(t *testing.T) {
	resourceQuota := v1.ResourceQuota{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: "default",
			Name:      "rq",
		},
		Spec: v1.ResourceQuotaSpec{
			Hard: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("4"),
			},
		},
		Status: v1.ResourceQuotaStatus{
			Hard: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("4"),
			},
			Used: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("0"),
			},
		},
	}

	kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
	resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
		KubeClient:   kubeClient,
		ResyncPeriod: controller.NoResyncPeriodFunc,
		Registry:     install.NewRegistry(kubeClient, nil),
		GroupKindsToReplenish: []schema.GroupKind{
			api.Kind("Pod"),
			api.Kind("Service"),
			api.Kind("ReplicationController"),
			api.Kind("PersistentVolumeClaim"),
		},
		ControllerFactory:         NewReplenishmentControllerFactoryFromClient(kubeClient),
		ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
	}
	quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
	err := quotaController.syncResourceQuota(resourceQuota)
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	expectedActionSet := sets.NewString(
		strings.Join([]string{"list", "pods", ""}, "-"),
	)
	actionSet := sets.NewString()
	for _, action := range kubeClient.Actions() {
		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
	}
	if !actionSet.HasAll(expectedActionSet.List()...) {
		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
	}
}
コード例 #14
0
func TestReconcilePodStatus(t *testing.T) {
	testPod := getTestPod()
	client := fake.NewSimpleClientset(testPod)
	syncer := newTestManager(client)
	syncer.SetPodStatus(testPod, getRandomPodStatus())
	// Call syncBatch directly to test reconcile
	syncer.syncBatch() // The apiStatusVersions should be set now

	podStatus, ok := syncer.GetPodStatus(testPod.UID)
	if !ok {
		t.Fatalf("Should find pod status for pod: %#v", testPod)
	}
	testPod.Status = podStatus

	// If the pod status is the same, a reconciliation is not needed,
	// syncBatch should do nothing
	syncer.podManager.UpdatePod(testPod)
	if syncer.needsReconcile(testPod.UID, podStatus) {
		t.Errorf("Pod status is the same, a reconciliation is not needed")
	}
	client.ClearActions()
	syncer.syncBatch()
	verifyActions(t, client, []core.Action{})

	// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
	// a reconciliation is not needed, syncBatch should do nothing.
	// The StartTime should have been set in SetPodStatus().
	// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
	normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
	testPod.Status.StartTime = &normalizedStartTime
	syncer.podManager.UpdatePod(testPod)
	if syncer.needsReconcile(testPod.UID, podStatus) {
		t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
	}
	client.ClearActions()
	syncer.syncBatch()
	verifyActions(t, client, []core.Action{})

	// If the pod status is different, a reconciliation is needed, syncBatch should trigger an update
	testPod.Status = getRandomPodStatus()
	syncer.podManager.UpdatePod(testPod)
	if !syncer.needsReconcile(testPod.UID, podStatus) {
		t.Errorf("Pod status is different, a reconciliation is needed")
	}
	client.ClearActions()
	syncer.syncBatch()
	verifyActions(t, client, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
	})
}
コード例 #15
0
func TestSyncBatchChecksMismatchedUID(t *testing.T) {
	syncer := newTestManager(&fake.Clientset{})
	pod := getTestPod()
	pod.UID = "first"
	syncer.podManager.AddPod(pod)
	differentPod := getTestPod()
	differentPod.UID = "second"
	syncer.podManager.AddPod(differentPod)
	syncer.kubeClient = fake.NewSimpleClientset(pod)
	syncer.SetPodStatus(differentPod, getRandomPodStatus())
	syncer.testSyncBatch()
	verifyActions(t, syncer.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
	})
}
コード例 #16
0
ファイル: quobyte_test.go プロジェクト: kubernetes/kubernetes
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name: "pvA",
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource: v1.PersistentVolumeSource{
				Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "******", Group: "root"},
			},
			ClaimRef: &v1.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &v1.PersistentVolumeClaim{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: v1.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: v1.PersistentVolumeClaimStatus{
			Phase: v1.ClaimBound,
		},
	}

	tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	client := fake.NewSimpleClientset(pv, claim)
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
	plug, _ := plugMgr.FindPluginByName(quobytePluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
コード例 #17
0
ファイル: aws_ebs_test.go プロジェクト: jonboulle/kubernetes
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name: "pvA",
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource: v1.PersistentVolumeSource{
				AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
			},
			ClaimRef: &v1.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &v1.PersistentVolumeClaim{
		ObjectMeta: v1.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: v1.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: v1.PersistentVolumeClaimStatus{
			Phase: v1.ClaimBound,
		},
	}

	clientset := fake.NewSimpleClientset(pv, claim)

	tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
	plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
コード例 #18
0
func (f *fixture) run(deploymentName string) {
	f.client = fake.NewSimpleClientset(f.objects...)
	informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
	c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
	c.eventRecorder = &record.FakeRecorder{}
	c.dListerSynced = alwaysReady
	c.rsListerSynced = alwaysReady
	c.podListerSynced = alwaysReady
	for _, d := range f.dLister {
		c.dLister.Indexer.Add(d)
	}
	for _, rs := range f.rsLister {
		c.rsLister.Indexer.Add(rs)
	}
	for _, pod := range f.podLister {
		c.podLister.Indexer.Add(pod)
	}
	stopCh := make(chan struct{})
	defer close(stopCh)
	informers.Start(stopCh)

	err := c.syncDeployment(deploymentName)
	if err != nil {
		f.t.Errorf("error syncing deployment: %v", err)
	}

	actions := filterInformerActions(f.client.Actions())
	for i, action := range actions {
		if len(f.actions) < i+1 {
			f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
			break
		}

		expectedAction := f.actions[i]
		if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
			f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
			continue
		}
	}

	if len(f.actions) > len(actions) {
		f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
	}
}
コード例 #19
0
func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)
	podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())

	node, pod, pv, claim := createObjects()
	kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)

	manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
	if err != nil {
		t.Fatalf("Failed to initialize volume manager: %v", err)
	}

	stopCh := runVolumeManager(manager)
	defer close(stopCh)

	podManager.SetPods([]*v1.Pod{pod})

	// Fake node status update
	go simulateVolumeInUseUpdate(
		v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
		stopCh,
		manager)

	err = manager.WaitForAttachAndMount(pod)
	if err != nil {
		t.Errorf("Expected success: %v", err)
	}

	expectedMounted := pod.Spec.Volumes[0].Name
	actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
	if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
		t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
	}

	expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
	actualInUse := manager.GetVolumesInUse()
	if !reflect.DeepEqual(expectedInUse, actualInUse) {
		t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
	}
}
コード例 #20
0
func TestStaleUpdates(t *testing.T) {
	pod := getTestPod()
	client := fake.NewSimpleClientset(pod)
	m := newTestManager(client)

	status := v1.PodStatus{Message: "initial status"}
	m.SetPodStatus(pod, status)
	status.Message = "first version bump"
	m.SetPodStatus(pod, status)
	status.Message = "second version bump"
	m.SetPodStatus(pod, status)
	verifyUpdates(t, m, 3)

	t.Logf("First sync pushes latest status.")
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
	})
	client.ClearActions()

	for i := 0; i < 2; i++ {
		t.Logf("Next 2 syncs should be ignored (%d).", i)
		m.testSyncBatch()
		verifyActions(t, m.kubeClient, []core.Action{})
	}

	t.Log("Unchanged status should not send an update.")
	m.SetPodStatus(pod, status)
	verifyUpdates(t, m, 0)

	t.Log("... unless it's stale.")
	m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1

	m.SetPodStatus(pod, status)
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
	})

	// Nothing stuck in the pipe.
	verifyUpdates(t, m, 0)
}
コード例 #21
0
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory) {
	f.client = fake.NewSimpleClientset(f.objects...)
	informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
	c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
	c.eventRecorder = &record.FakeRecorder{}
	c.dListerSynced = alwaysReady
	c.rsListerSynced = alwaysReady
	c.podListerSynced = alwaysReady
	for _, d := range f.dLister {
		c.dLister.Indexer.Add(d)
	}
	for _, rs := range f.rsLister {
		c.rsLister.Indexer.Add(rs)
	}
	for _, pod := range f.podLister {
		c.podLister.Indexer.Add(pod)
	}
	return c, informers
}
コード例 #22
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name: "pvA",
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource: v1.PersistentVolumeSource{
				HostPath: &v1.HostPathVolumeSource{Path: "foo"},
			},
			ClaimRef: &v1.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &v1.PersistentVolumeClaim{
		ObjectMeta: v1.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: v1.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: v1.PersistentVolumeClaimStatus{
			Phase: v1.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
	plug, _ := plugMgr.FindPluginByName(hostPathPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
コード例 #23
0
func TestWatchJobs(t *testing.T) {
	clientset := fake.NewSimpleClientset()
	fakeWatch := watch.NewFake()
	clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
	manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
	manager.podStoreSynced = alwaysReady
	manager.jobStoreSynced = alwaysReady

	var testJob batch.Job
	received := make(chan struct{})

	// The update sent through the fakeWatcher should make its way into the workqueue,
	// and eventually into the syncHandler.
	manager.syncHandler = func(key string) error {
		defer close(received)
		ns, name, err := cache.SplitMetaNamespaceKey(key)
		if err != nil {
			t.Errorf("Error getting namespace/name from key %v: %v", key, err)
		}
		job, err := manager.jobLister.Jobs(ns).Get(name)
		if err != nil || job == nil {
			t.Errorf("Expected to find job under key %v: %v", key, err)
			return nil
		}
		if !api.Semantic.DeepDerivative(*job, testJob) {
			t.Errorf("Expected %#v, but got %#v", testJob, *job)
		}
		return nil
	}
	// Start only the job watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method.
	stopCh := make(chan struct{})
	defer close(stopCh)
	sharedInformerFactory.Start(stopCh)
	go manager.Run(1, stopCh)

	// We're sending new job to see if it reaches syncHandler.
	testJob.Namespace = "bar"
	testJob.Name = "foo"
	fakeWatch.Add(&testJob)
	t.Log("Waiting for job to reach syncHandler")
	<-received
}
コード例 #24
0
func TestDeletePods(t *testing.T) {
	pod := getTestPod()
	// Set the deletion timestamp.
	pod.DeletionTimestamp = new(metav1.Time)
	client := fake.NewSimpleClientset(pod)
	m := newTestManager(client)
	m.podManager.AddPod(pod)

	status := getRandomPodStatus()
	now := metav1.Now()
	status.StartTime = &now
	m.SetPodStatus(pod, status)

	m.testSyncBatch()
	// Expect to see an delete action.
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
		core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}},
	})
}
コード例 #25
0
func TestDescribeDeployment(t *testing.T) {
	fake := fake.NewSimpleClientset()
	versionedFake := versionedfake.NewSimpleClientset(&v1beta1.Deployment{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "bar",
			Namespace: "foo",
		},
		Spec: v1beta1.DeploymentSpec{
			Replicas: util.Int32Ptr(1),
			Selector: &metav1.LabelSelector{},
			Template: v1.PodTemplateSpec{},
		},
	})
	d := DeploymentDescriber{fake, versionedFake}
	out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true})
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if !strings.Contains(out, "bar") || !strings.Contains(out, "foo") {
		t.Errorf("unexpected out: %s", out)
	}
}
コード例 #26
0
ファイル: services_test.go プロジェクト: jonboulle/kubernetes
func TestServiceEvaluatorMatchesResources(t *testing.T) {
	kubeClient := fake.NewSimpleClientset()
	evaluator := NewServiceEvaluator(kubeClient)
	// we give a lot of resources
	input := []api.ResourceName{
		api.ResourceConfigMaps,
		api.ResourceCPU,
		api.ResourceServices,
		api.ResourceServicesNodePorts,
		api.ResourceServicesLoadBalancers,
	}
	// but we only match these...
	expected := quota.ToSet([]api.ResourceName{
		api.ResourceServices,
		api.ResourceServicesNodePorts,
		api.ResourceServicesLoadBalancers,
	})
	actual := quota.ToSet(evaluator.MatchingResources(input))
	if !expected.Equal(actual) {
		t.Errorf("expected: %v, actual: %v", expected, actual)
	}
}
コード例 #27
0
func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)
	podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())

	node, pod, _, claim := createObjects()

	existingGid := pod.Spec.SecurityContext.SupplementalGroups[0]

	cases := []struct {
		gidAnnotation string
		expected      []int64
	}{
		{
			gidAnnotation: "777",
			expected:      []int64{777},
		},
		{
			gidAnnotation: strconv.FormatInt(existingGid, 10),
			expected:      []int64{},
		},
		{
			gidAnnotation: "a",
			expected:      []int64{},
		},
		{
			gidAnnotation: "",
			expected:      []int64{},
		},
	}

	for _, tc := range cases {
		pv := &v1.PersistentVolume{
			ObjectMeta: v1.ObjectMeta{
				Name: "pvA",
				Annotations: map[string]string{
					volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation,
				},
			},
			Spec: v1.PersistentVolumeSpec{
				PersistentVolumeSource: v1.PersistentVolumeSource{
					GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
						PDName: "fake-device",
					},
				},
				ClaimRef: &v1.ObjectReference{
					Name: claim.ObjectMeta.Name,
				},
			},
		}
		kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)

		manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
		if err != nil {
			t.Errorf("Failed to initialize volume manager: %v", err)
			continue
		}

		stopCh := runVolumeManager(manager)
		defer func() {
			close(stopCh)
		}()

		podManager.SetPods([]*v1.Pod{pod})

		// Fake node status update
		go simulateVolumeInUseUpdate(
			v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
			stopCh,
			manager)

		err = manager.WaitForAttachAndMount(pod)
		if err != nil {
			t.Errorf("Expected success: %v", err)
			continue
		}

		actual := manager.GetExtraSupplementalGroupsForPod(pod)
		if !reflect.DeepEqual(tc.expected, actual) {
			t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual)
		}
	}
}
コード例 #28
0
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
	kubelet := testKubelet.kubelet
	clock := testKubelet.fakeClock
	kubeClient := testKubelet.fakeKubeClient
	existingNode := v1.Node{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}}
	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
	mockCadvisor := testKubelet.fakeCadvisor
	mockCadvisor.On("Start").Return(nil)
	machineInfo := &cadvisorapi.MachineInfo{
		MachineID:      "123",
		SystemUUID:     "abc",
		BootID:         "1b3",
		NumCores:       2,
		MemoryCapacity: 10E9,
	}
	mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
	versionInfo := &cadvisorapi.VersionInfo{
		KernelVersion:      "3.16.0-0.bpo.4-amd64",
		ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
	}
	mockCadvisor.On("VersionInfo").Return(versionInfo, nil)

	// Make kubelet report that it has sufficient disk space.
	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
		t.Fatalf("can't update disk space manager: %v", err)
	}

	expectedNode := &v1.Node{
		ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
		Spec:       v1.NodeSpec{},
		Status: v1.NodeStatus{
			Conditions: []v1.NodeCondition{
				{
					Type:               v1.NodeOutOfDisk,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientDisk",
					Message:            "kubelet has sufficient disk space available",
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{
					Type:               v1.NodeMemoryPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientMemory",
					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{
					Type:               v1.NodeDiskPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasNoDiskPressure",
					Message:            fmt.Sprintf("kubelet has no disk pressure"),
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{}, //placeholder
			},
			NodeInfo: v1.NodeSystemInfo{
				MachineID:               "123",
				SystemUUID:              "abc",
				BootID:                  "1b3",
				KernelVersion:           "3.16.0-0.bpo.4-amd64",
				OSImage:                 "Debian GNU/Linux 7 (wheezy)",
				OperatingSystem:         goruntime.GOOS,
				Architecture:            goruntime.GOARCH,
				ContainerRuntimeVersion: "test://1.5.0",
				KubeletVersion:          version.Get().String(),
				KubeProxyVersion:        version.Get().String(),
			},
			Capacity: v1.ResourceList{
				v1.ResourceCPU:       *resource.NewMilliQuantity(2000, resource.DecimalSI),
				v1.ResourceMemory:    *resource.NewQuantity(10E9, resource.BinarySI),
				v1.ResourcePods:      *resource.NewQuantity(0, resource.DecimalSI),
				v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
			},
			Allocatable: v1.ResourceList{
				v1.ResourceCPU:       *resource.NewMilliQuantity(1800, resource.DecimalSI),
				v1.ResourceMemory:    *resource.NewQuantity(9900E6, resource.BinarySI),
				v1.ResourcePods:      *resource.NewQuantity(0, resource.DecimalSI),
				v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
			},
			Addresses: []v1.NodeAddress{
				{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
				{Type: v1.NodeHostName, Address: testKubeletHostname},
			},
			Images: []v1.ContainerImage{
				{
					Names:     []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
					SizeBytes: 456,
				},
				{
					Names:     []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
					SizeBytes: 123,
				},
			},
		},
	}

	checkNodeStatus := func(status v1.ConditionStatus, reason string) {
		kubeClient.ClearActions()
		if err := kubelet.updateNodeStatus(); err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		actions := kubeClient.Actions()
		if len(actions) != 2 {
			t.Fatalf("unexpected actions: %v", actions)
		}
		if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" {
			t.Fatalf("unexpected actions: %v", actions)
		}
		updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
		if err != nil {
			t.Fatalf("can't apply node status patch: %v", err)
		}

		for i, cond := range updatedNode.Status.Conditions {
			if cond.LastHeartbeatTime.IsZero() {
				t.Errorf("unexpected zero last probe timestamp")
			}
			if cond.LastTransitionTime.IsZero() {
				t.Errorf("unexpected zero last transition timestamp")
			}
			updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
			updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
		}

		// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
		lastIndex := len(updatedNode.Status.Conditions) - 1
		if updatedNode.Status.Conditions[lastIndex].Type != v1.NodeReady {
			t.Errorf("unexpected node condition order. NodeReady should be last.")
		}
		if updatedNode.Status.Conditions[lastIndex].Message == "" {
			t.Errorf("unexpected empty condition message")
		}
		updatedNode.Status.Conditions[lastIndex].Message = ""
		expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
			Type:               v1.NodeReady,
			Status:             status,
			Reason:             reason,
			LastHeartbeatTime:  metav1.Time{},
			LastTransitionTime: metav1.Time{},
		}
		if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
			t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
		}
	}

	// TODO(random-liu): Refactor the unit test to be table driven test.
	// Should report kubelet not ready if the runtime check is out of date
	clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")

	// Should report kubelet ready if the runtime check is updated
	clock.SetTime(time.Now())
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionTrue, "KubeletReady")

	// Should report kubelet not ready if the runtime check is out of date
	clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")

	// Should report kubelet not ready if the runtime check failed
	fakeRuntime := testKubelet.fakeRuntime
	// Inject error into fake runtime status check, node should be NotReady
	fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
	clock.SetTime(time.Now())
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")

	// Test cri integration.
	kubelet.kubeletConfiguration.EnableCRI = true
	fakeRuntime.StatusErr = nil

	// Should report node not ready if runtime status is nil.
	fakeRuntime.RuntimeStatus = nil
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")

	// Should report node not ready if runtime status is empty.
	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")

	// Should report node not ready if RuntimeReady is false.
	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
		Conditions: []kubecontainer.RuntimeCondition{
			{Type: kubecontainer.RuntimeReady, Status: false},
			{Type: kubecontainer.NetworkReady, Status: true},
		},
	}
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")

	// Should report node ready if RuntimeReady is true.
	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
		Conditions: []kubecontainer.RuntimeCondition{
			{Type: kubecontainer.RuntimeReady, Status: true},
			{Type: kubecontainer.NetworkReady, Status: true},
		},
	}
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionTrue, "KubeletReady")

	// Should report node not ready if NetworkReady is false.
	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
		Conditions: []kubecontainer.RuntimeCondition{
			{Type: kubecontainer.RuntimeReady, Status: true},
			{Type: kubecontainer.NetworkReady, Status: false},
		},
	}
	kubelet.updateRuntimeUp()
	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
}
コード例 #29
0
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
	kubelet := testKubelet.kubelet
	clock := testKubelet.fakeClock
	// Do not set nano second, because apiserver function doesn't support nano second. (Only support
	// RFC3339).
	clock.SetTime(time.Unix(123456, 0))
	kubeClient := testKubelet.fakeKubeClient
	existingNode := v1.Node{
		ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
		Spec:       v1.NodeSpec{},
		Status: v1.NodeStatus{
			Conditions: []v1.NodeCondition{
				{
					Type:               v1.NodeReady,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletReady",
					Message:            fmt.Sprintf("kubelet is posting ready status"),
					LastHeartbeatTime:  metav1.NewTime(clock.Now()),
					LastTransitionTime: metav1.NewTime(clock.Now()),
				},
				{
					Type:               v1.NodeOutOfDisk,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletOutOfDisk",
					Message:            "out of disk space",
					LastHeartbeatTime:  metav1.NewTime(clock.Now()),
					LastTransitionTime: metav1.NewTime(clock.Now()),
				},
			},
		},
	}
	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
	mockCadvisor := testKubelet.fakeCadvisor
	machineInfo := &cadvisorapi.MachineInfo{
		MachineID:      "123",
		SystemUUID:     "abc",
		BootID:         "1b3",
		NumCores:       2,
		MemoryCapacity: 1024,
	}
	fsInfo := cadvisorapiv2.FsInfo{
		Device: "123",
	}
	mockCadvisor.On("Start").Return(nil)
	mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
	mockCadvisor.On("ImagesFsInfo").Return(fsInfo, nil)
	mockCadvisor.On("RootFsInfo").Return(fsInfo, nil)
	versionInfo := &cadvisorapi.VersionInfo{
		KernelVersion:      "3.16.0-0.bpo.4-amd64",
		ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
		DockerVersion:      "1.5.0",
	}
	mockCadvisor.On("VersionInfo").Return(versionInfo, nil)

	kubelet.outOfDiskTransitionFrequency = 5 * time.Second

	ood := v1.NodeCondition{
		Type:               v1.NodeOutOfDisk,
		Status:             v1.ConditionTrue,
		Reason:             "KubeletOutOfDisk",
		Message:            "out of disk space",
		LastHeartbeatTime:  metav1.NewTime(clock.Now()), // placeholder
		LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder
	}
	noOod := v1.NodeCondition{
		Type:               v1.NodeOutOfDisk,
		Status:             v1.ConditionFalse,
		Reason:             "KubeletHasSufficientDisk",
		Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
		LastHeartbeatTime:  metav1.NewTime(clock.Now()), // placeholder
		LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder
	}

	testCases := []struct {
		rootFsAvail   uint64
		dockerFsAvail uint64
		expected      v1.NodeCondition
	}{
		{
			// NodeOutOfDisk==false
			rootFsAvail:   200,
			dockerFsAvail: 200,
			expected:      ood,
		},
		{
			// NodeOutOfDisk==true
			rootFsAvail:   50,
			dockerFsAvail: 200,
			expected:      ood,
		},
		{
			// NodeOutOfDisk==false
			rootFsAvail:   200,
			dockerFsAvail: 200,
			expected:      ood,
		},
		{
			// NodeOutOfDisk==true
			rootFsAvail:   200,
			dockerFsAvail: 50,
			expected:      ood,
		},
		{
			// NodeOutOfDisk==false
			rootFsAvail:   200,
			dockerFsAvail: 200,
			expected:      noOod,
		},
	}

	kubelet.updateRuntimeUp()
	for tcIdx, tc := range testCases {
		// Step by a second
		clock.Step(1 * time.Second)

		// Setup expected times.
		tc.expected.LastHeartbeatTime = metav1.NewTime(clock.Now())
		// In the last case, there should be a status transition for NodeOutOfDisk
		if tcIdx == len(testCases)-1 {
			tc.expected.LastTransitionTime = metav1.NewTime(clock.Now())
		}

		// Make kubelet report that it has sufficient disk space
		if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil {
			t.Fatalf("can't update disk space manager: %v", err)
		}

		if err := kubelet.updateNodeStatus(); err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		actions := kubeClient.Actions()
		if len(actions) != 2 {
			t.Errorf("%d. unexpected actions: %v", tcIdx, actions)
		}
		patchAction, ok := actions[1].(core.PatchActionImpl)
		if !ok {
			t.Errorf("%d. unexpected action type.  expected PatchActionImpl, got %#v", tcIdx, actions[1])
		}
		updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
		if err != nil {
			t.Fatalf("can't apply node status patch: %v", err)
		}
		kubeClient.ClearActions()

		var oodCondition v1.NodeCondition
		for i, cond := range updatedNode.Status.Conditions {
			if cond.Type == v1.NodeOutOfDisk {
				oodCondition = updatedNode.Status.Conditions[i]
			}
		}

		if !reflect.DeepEqual(tc.expected, oodCondition) {
			t.Errorf("%d.\nunexpected objects: %s", tcIdx, diff.ObjectDiff(tc.expected, oodCondition))
		}
	}
}
コード例 #30
0
func TestUpdateExistingNodeStatus(t *testing.T) {
	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
	kubelet := testKubelet.kubelet
	kubeClient := testKubelet.fakeKubeClient
	existingNode := v1.Node{
		ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
		Spec:       v1.NodeSpec{},
		Status: v1.NodeStatus{
			Conditions: []v1.NodeCondition{
				{
					Type:               v1.NodeOutOfDisk,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletOutOfDisk",
					Message:            "out of disk space",
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				{
					Type:               v1.NodeMemoryPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientMemory",
					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				{
					Type:               v1.NodeDiskPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientDisk",
					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
				{
					Type:               v1.NodeReady,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletReady",
					Message:            fmt.Sprintf("kubelet is posting ready status"),
					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
				},
			},
			Capacity: v1.ResourceList{
				v1.ResourceCPU:    *resource.NewMilliQuantity(3000, resource.DecimalSI),
				v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
			},
			Allocatable: v1.ResourceList{
				v1.ResourceCPU:    *resource.NewMilliQuantity(2800, resource.DecimalSI),
				v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
			},
		},
	}
	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
	mockCadvisor := testKubelet.fakeCadvisor
	mockCadvisor.On("Start").Return(nil)
	machineInfo := &cadvisorapi.MachineInfo{
		MachineID:      "123",
		SystemUUID:     "abc",
		BootID:         "1b3",
		NumCores:       2,
		MemoryCapacity: 20E9,
	}
	mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
	versionInfo := &cadvisorapi.VersionInfo{
		KernelVersion:      "3.16.0-0.bpo.4-amd64",
		ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
	}
	mockCadvisor.On("VersionInfo").Return(versionInfo, nil)

	// Make kubelet report that it is out of disk space.
	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil {
		t.Fatalf("can't update disk space manager: %v", err)
	}

	expectedNode := &v1.Node{
		ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
		Spec:       v1.NodeSpec{},
		Status: v1.NodeStatus{
			Conditions: []v1.NodeCondition{
				{
					Type:               v1.NodeOutOfDisk,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletOutOfDisk",
					Message:            "out of disk space",
					LastHeartbeatTime:  metav1.Time{}, // placeholder
					LastTransitionTime: metav1.Time{}, // placeholder
				},
				{
					Type:               v1.NodeMemoryPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientMemory",
					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{
					Type:               v1.NodeDiskPressure,
					Status:             v1.ConditionFalse,
					Reason:             "KubeletHasSufficientDisk",
					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
					LastHeartbeatTime:  metav1.Time{},
					LastTransitionTime: metav1.Time{},
				},
				{
					Type:               v1.NodeReady,
					Status:             v1.ConditionTrue,
					Reason:             "KubeletReady",
					Message:            fmt.Sprintf("kubelet is posting ready status"),
					LastHeartbeatTime:  metav1.Time{}, // placeholder
					LastTransitionTime: metav1.Time{}, // placeholder
				},
			},
			NodeInfo: v1.NodeSystemInfo{
				MachineID:               "123",
				SystemUUID:              "abc",
				BootID:                  "1b3",
				KernelVersion:           "3.16.0-0.bpo.4-amd64",
				OSImage:                 "Debian GNU/Linux 7 (wheezy)",
				OperatingSystem:         goruntime.GOOS,
				Architecture:            goruntime.GOARCH,
				ContainerRuntimeVersion: "test://1.5.0",
				KubeletVersion:          version.Get().String(),
				KubeProxyVersion:        version.Get().String(),
			},
			Capacity: v1.ResourceList{
				v1.ResourceCPU:       *resource.NewMilliQuantity(2000, resource.DecimalSI),
				v1.ResourceMemory:    *resource.NewQuantity(20E9, resource.BinarySI),
				v1.ResourcePods:      *resource.NewQuantity(0, resource.DecimalSI),
				v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
			},
			Allocatable: v1.ResourceList{
				v1.ResourceCPU:       *resource.NewMilliQuantity(1800, resource.DecimalSI),
				v1.ResourceMemory:    *resource.NewQuantity(19900E6, resource.BinarySI),
				v1.ResourcePods:      *resource.NewQuantity(0, resource.DecimalSI),
				v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
			},
			Addresses: []v1.NodeAddress{
				{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
				{Type: v1.NodeHostName, Address: testKubeletHostname},
			},
			// images will be sorted from max to min in node status.
			Images: []v1.ContainerImage{
				{
					Names:     []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
					SizeBytes: 456,
				},
				{
					Names:     []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
					SizeBytes: 123,
				},
			},
		},
	}

	kubelet.updateRuntimeUp()
	if err := kubelet.updateNodeStatus(); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	actions := kubeClient.Actions()
	if len(actions) != 2 {
		t.Errorf("unexpected actions: %v", actions)
	}
	patchAction, ok := actions[1].(core.PatchActionImpl)
	if !ok {
		t.Errorf("unexpected action type.  expected PatchActionImpl, got %#v", actions[1])
	}
	updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
	if !ok {
		t.Fatalf("can't apply node status patch: %v", err)
	}
	for i, cond := range updatedNode.Status.Conditions {
		// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
		if old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
			t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, metav1.Now(), old)
		}
		if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
			t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
		}
		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
	}

	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
	if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
		t.Errorf("unexpected node condition order. NodeReady should be last.")
	}

	if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
		t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
	}
}