コード例 #1
0
func TestSyncResourceQuotaNoChange(t *testing.T) {
	quota := api.ResourceQuota{
		Spec: api.ResourceQuotaSpec{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("4"),
			},
		},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("4"),
			},
			Used: api.ResourceList{
				api.ResourceCPU: resource.MustParse("0"),
			},
		},
	}

	kubeClient := fake.NewSimpleClientset(&api.PodList{}, &quota)

	ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
	err := ResourceQuotaController.syncResourceQuota(quota)
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}

	actions := kubeClient.Actions()
	if len(actions) != 1 && !actions[0].Matches("list", "pods") {
		t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
	}
}
コード例 #2
0
func TestIncrementUsageReplicationControllers(t *testing.T) {
	namespace := "default"
	client := fake.NewSimpleClientset(&api.ReplicationControllerList{
		Items: []api.ReplicationController{
			{
				ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
			},
		},
	})
	status := &api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	r := api.ResourceReplicationControllers
	status.Hard[r] = resource.MustParse("2")
	status.Used[r] = resource.MustParse("1")
	dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.ReplicationController{}, api.Kind("ReplicationController"), namespace, "name", api.Resource("replicationcontrollers"), "", admission.Create, nil), status, client)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if !dirty {
		t.Errorf("Expected the status to get incremented, therefore should have been dirty")
	}
	quantity := status.Used[r]
	if quantity.Value() != int64(2) {
		t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
	}
}
コード例 #3
0
ファイル: admission_test.go プロジェクト: richm/origin
func TestSAR(t *testing.T) {
	store := projectcache.NewCacheStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))
	mockClient := &testclient.Fake{}
	mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
		return true, nil, fmt.Errorf("shouldn't get here")
	})
	cache := projectcache.NewFake(mockClient.Namespaces(), store, "")

	mockClientset := clientsetfake.NewSimpleClientset()
	handler := &lifecycle{client: mockClientset}
	handler.SetProjectCache(cache)

	tests := map[string]struct {
		kind     string
		resource string
	}{
		"subject access review": {
			kind:     "SubjectAccessReview",
			resource: "subjectaccessreviews",
		},
		"local subject access review": {
			kind:     "LocalSubjectAccessReview",
			resource: "localsubjectaccessreviews",
		},
	}

	for k, v := range tests {
		err := handler.Admit(admission.NewAttributesRecord(nil, kapi.Kind(v.kind), "foo", "name", kapi.Resource(v.resource), "", "CREATE", nil))
		if err != nil {
			t.Errorf("Unexpected error for %s returned from admission handler: %v", k, err)
		}
	}
}
コード例 #4
0
ファイル: secret_test.go プロジェクト: koori02/kubernetes
func TestPlugin(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid")
		testVolumeName = "test_volume_name"
		testNamespace  = "test_secret_namespace"
		testName       = "test_secret_name"

		volumeSpec    = volumeSpec(testVolumeName, testName)
		secret        = secret(testNamespace, testName)
		client        = fake.NewSimpleClientset(&secret)
		pluginMgr     = volume.VolumePluginMgr{}
		rootDir, host = newTestHost(t, client)
	)

	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)

	plugin, err := pluginMgr.FindPluginByName(secretPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}

	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
	builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	volumePath := builder.GetPath()
	if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~secret/test_volume_name")) {
		t.Errorf("Got unexpected path: %s", volumePath)
	}

	err = builder.SetUp(nil)
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}
	if _, err := os.Stat(volumePath); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	// secret volume should create its own empty wrapper path
	podWrapperMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid/plugins/kubernetes.io~empty-dir/wrapped_test_volume_name", rootDir)

	if _, err := os.Stat(podWrapperMetadataDir); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}
	doTestSecretDataInVolume(volumePath, secret, t)
	doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
コード例 #5
0
func TestDoNotDeleteMirrorPods(t *testing.T) {
	staticPod := getTestPod()
	staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
	mirrorPod := getTestPod()
	mirrorPod.UID = "mirror-12345678"
	mirrorPod.Annotations = map[string]string{
		kubetypes.ConfigSourceAnnotationKey: "api",
		kubetypes.ConfigMirrorAnnotationKey: "mirror",
	}
	// Set the deletion timestamp.
	mirrorPod.DeletionTimestamp = new(unversioned.Time)
	client := fake.NewSimpleClientset(mirrorPod)
	m := newTestManager(client)
	m.podManager.AddPod(staticPod)
	m.podManager.AddPod(mirrorPod)
	// Verify setup.
	assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
	assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
	assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)

	status := getRandomPodStatus()
	now := unversioned.Now()
	status.StartTime = &now
	m.SetPodStatus(staticPod, status)

	m.testSyncBatch()
	// Expect not to see an delete action.
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
}
func TestCasting(t *testing.T) {
	clientset := fake.NewSimpleClientset()
	binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)

	pv := &api.PersistentVolume{}
	unk := cache.DeletedFinalStateUnknown{}
	pvc := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Status:     api.PersistentVolumeClaimStatus{Phase: api.ClaimBound},
	}

	// Inject mockClient into the binder. This prevents weird errors on stderr
	// as the binder wants to load PV/PVC from API server.
	mockClient := &mockBinderClient{
		volume: pv,
		claim:  pvc,
	}
	binder.client = mockClient

	// none of these should fail casting.
	// the real test is not failing when passed DeletedFinalStateUnknown in the deleteHandler
	binder.addVolume(pv)
	binder.updateVolume(pv, pv)
	binder.deleteVolume(pv)
	binder.deleteVolume(unk)
	binder.addClaim(pvc)
	binder.updateClaim(pvc, pvc)
}
コード例 #7
0
ファイル: admission_test.go プロジェクト: richm/origin
func TestLimitRangerCacheAndLRUExpiredMisses(t *testing.T) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		t.Fatal(err)
	}

	limitRange := validLimitRangeNoDefaults()
	client := fake.NewSimpleClientset(&limitRange)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		client:          client,
		limitFunc:       Limit,
		indexer:         indexer,
		liveLookupCache: liveLookupCache,
	}

	testPod := validPod("testPod", 1, api.ResourceRequirements{})

	// add to the lru cache
	liveLookupCache.Add(limitRange.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(-30 * time.Second)), items: []*api.LimitRange{}})

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
	}

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "status", admission.Update, nil))
	if err != nil {
		t.Errorf("Should have ignored calls to any subresource of pod %v", err)
	}
}
コード例 #8
0
ファイル: admission_test.go プロジェクト: richm/origin
func TestLimitRangerIgnoresSubresource(t *testing.T) {
	client := fake.NewSimpleClientset()
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &limitRanger{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		client:    client,
		limitFunc: Limit,
		indexer:   indexer,
	}

	limitRange := validLimitRangeNoDefaults()
	testPod := validPod("testPod", 1, api.ResourceRequirements{})

	indexer.Add(&limitRange)
	err := handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
	}

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "status", admission.Update, nil))
	if err != nil {
		t.Errorf("Should have ignored calls to any subresource of pod %v", err)
	}

}
コード例 #9
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	ep := &api.Endpoints{
		ObjectMeta: api.ObjectMeta{
			Name: "ep",
		},
		Subsets: []api.EndpointSubset{{
			Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
			Ports:     []api.EndpointPort{{"foo", 80, api.ProtocolTCP}},
		}},
	}

	client := fake.NewSimpleClientset(pv, claim, ep)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
	plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})

	if !builder.GetAttributes().ReadOnly {
		t.Errorf("Expected true for builder.IsReadOnly")
	}
}
コード例 #10
0
func TestStaticPodStatus(t *testing.T) {
	staticPod := getTestPod()
	staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
	mirrorPod := getTestPod()
	mirrorPod.UID = "mirror-12345678"
	mirrorPod.Annotations = map[string]string{
		kubetypes.ConfigSourceAnnotationKey: "api",
		kubetypes.ConfigMirrorAnnotationKey: "mirror",
	}
	client := fake.NewSimpleClientset(mirrorPod)
	m := newTestManager(client)
	m.podManager.AddPod(staticPod)
	m.podManager.AddPod(mirrorPod)
	// Verify setup.
	assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
	assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
	assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)

	status := getRandomPodStatus()
	now := unversioned.Now()
	status.StartTime = &now

	m.SetPodStatus(staticPod, status)
	retrievedStatus := expectPodStatus(t, m, staticPod)
	assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
	retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
	assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
	// Should translate mirrorPod / staticPod UID.
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
	updateAction := client.Actions()[1].(core.UpdateActionImpl)
	updatedPod := updateAction.Object.(*api.Pod)
	assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
	assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
	client.ClearActions()

	// No changes.
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{})

	// Mirror pod identity changes.
	m.podManager.DeletePod(mirrorPod)
	mirrorPod.UID = "new-mirror-pod"
	mirrorPod.Status = api.PodStatus{}
	m.podManager.AddPod(mirrorPod)
	// Expect update to new mirrorPod.
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
	updateAction = client.Actions()[1].(core.UpdateActionImpl)
	updatedPod = updateAction.Object.(*api.Pod)
	assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
	assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
}
コード例 #11
0
ファイル: secret_test.go プロジェクト: jetsanix/kubernetes
// Test the case where the 'ready' file has been created and the pod volume dir
// is a mountpoint.  Mount should not be called.
func TestPluginIdempotent(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid2")
		testVolumeName = "test_volume_name"
		testNamespace  = "test_secret_namespace"
		testName       = "test_secret_name"

		volumeSpec    = volumeSpec(testVolumeName, testName)
		secret        = secret(testNamespace, testName)
		client        = fake.NewSimpleClientset(&secret)
		pluginMgr     = volume.VolumePluginMgr{}
		rootDir, host = newTestHost(t, client)
	)

	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)

	plugin, err := pluginMgr.FindPluginByName(secretPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}

	podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir)
	podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
	mounter := host.GetMounter().(*mount.FakeMounter)
	mounter.MountPoints = []mount.MountPoint{
		{
			Path: podVolumeDir,
		},
	}
	util.SetReady(podMetadataDir)
	builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	volumePath := builder.GetPath()
	err = builder.SetUp(nil)
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}

	if len(mounter.Log) != 0 {
		t.Errorf("Unexpected calls made to mounter: %v", mounter.Log)
	}

	if _, err := os.Stat(volumePath); err != nil {
		if !os.IsNotExist(err) {
			t.Errorf("SetUp() failed unexpectedly: %v", err)
		}
	} else {
		t.Errorf("volume path should not exist: %v", volumePath)
	}
}
コード例 #12
0
func TestSyncResourceQuotaSpecChange(t *testing.T) {
	quota := api.ResourceQuota{
		Spec: api.ResourceQuotaSpec{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("4"),
			},
		},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("3"),
			},
			Used: api.ResourceList{
				api.ResourceCPU: resource.MustParse("0"),
			},
		},
	}

	expectedUsage := api.ResourceQuota{
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("4"),
			},
			Used: api.ResourceList{
				api.ResourceCPU: resource.MustParse("0"),
			},
		},
	}

	kubeClient := fake.NewSimpleClientset(&quota)

	ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
	err := ResourceQuotaController.syncResourceQuota(quota)
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}

	usage := kubeClient.Actions()[1].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)

	// ensure hard and used limits are what we expected
	for k, v := range expectedUsage.Status.Hard {
		actual := usage.Status.Hard[k]
		actualValue := actual.String()
		expectedValue := v.String()
		if expectedValue != actualValue {
			t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
		}
	}
	for k, v := range expectedUsage.Status.Used {
		actual := usage.Status.Used[k]
		actualValue := actual.String()
		expectedValue := v.String()
		if expectedValue != actualValue {
			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
		}
	}

}
func TestFailedRecycling(t *testing.T) {
	pv := &api.PersistentVolume{
		Spec: api.PersistentVolumeSpec{
			AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"),
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: "/somepath/data02",
				},
			},
			PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
			ClaimRef: &api.ObjectReference{
				Name:      "foo",
				Namespace: "bar",
			},
		},
		Status: api.PersistentVolumeStatus{
			Phase: api.VolumeReleased,
		},
	}

	mockClient := &mockBinderClient{
		volume: pv,
	}

	// no Init called for pluginMgr and no plugins are available.  Volume should fail recycling.
	plugMgr := volume.VolumePluginMgr{}

	recycler := &PersistentVolumeRecycler{
		kubeClient: fake.NewSimpleClientset(),
		client:     mockClient,
		pluginMgr:  plugMgr,
	}

	err := recycler.reclaimVolume(pv)
	if err != nil {
		t.Errorf("Unexpected non-nil error: %v", err)
	}

	if mockClient.volume.Status.Phase != api.VolumeFailed {
		t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase)
	}

	pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete
	err = recycler.reclaimVolume(pv)
	if err != nil {
		t.Errorf("Unexpected non-nil error: %v", err)
	}

	if mockClient.volume.Status.Phase != api.VolumeFailed {
		t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase)
	}
}
コード例 #14
0
func TestIncrementUsageOnUpdateIgnoresNonPodResources(t *testing.T) {
	testCase := []struct {
		kind        unversioned.GroupKind
		resource    unversioned.GroupResource
		subresource string
		object      runtime.Object
	}{
		{
			kind:     api.Kind("Service"),
			resource: api.Resource("services"),
			object:   &api.Service{},
		},
		{
			kind:     api.Kind("ReplicationController"),
			resource: api.Resource("replicationcontrollers"),
			object:   &api.ReplicationController{},
		},
		{
			kind:     api.Kind("ResourceQuota"),
			resource: api.Resource("resourcequotas"),
			object:   &api.ResourceQuota{},
		},
		{
			kind:     api.Kind("Secret"),
			resource: api.Resource("secrets"),
			object:   &api.Secret{},
		},
		{
			kind:     api.Kind("PersistentVolumeClaim"),
			resource: api.Resource("persistentvolumeclaims"),
			object:   &api.PersistentVolumeClaim{},
		},
	}

	for _, testCase := range testCase {
		client := fake.NewSimpleClientset()
		status := &api.ResourceQuotaStatus{
			Hard: api.ResourceList{},
			Used: api.ResourceList{},
		}
		r := resourceToResourceName[testCase.resource]
		status.Hard[r] = resource.MustParse("2")
		status.Used[r] = resource.MustParse("1")

		attributesRecord := admission.NewAttributesRecord(testCase.object, testCase.kind, "my-ns", "new-thing",
			testCase.resource, testCase.subresource, admission.Update, nil)
		dirty, err := IncrementUsage(attributesRecord, status, client)
		if err != nil {
			t.Errorf("Increment usage of resource %v had unexpected error: %v", testCase.resource, err)
		}
		if dirty {
			t.Errorf("Increment usage of resource %v should not result in a dirty quota on update", testCase.resource)
		}
	}
}
コード例 #15
0
ファイル: fc_test.go プロジェクト: richm/origin
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("fc_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	lun := 0
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				FC: &api.FCVolumeSource{
					TargetWWNs: []string{"some_wwn"},
					FSType:     "ext4",
					Lun:        &lun,
				},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
	plug, _ := plugMgr.FindPluginByName(fcPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})

	if !builder.GetAttributes().ReadOnly {
		t.Errorf("Expected true for builder.IsReadOnly")
	}
}
コード例 #16
0
// Test the case where the plugin's ready file exists, but the volume dir is not a
// mountpoint, which is the state the system will be in after reboot.  The dir
// should be mounter and the configMap data written to it.
func TestPluginReboot(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid3")
		testVolumeName = "test_volume_name"
		testNamespace  = "test_configmap_namespace"
		testName       = "test_configmap_name"

		volumeSpec    = volumeSpec(testVolumeName, testName)
		configMap     = configMap(testNamespace, testName)
		client        = fake.NewSimpleClientset(&configMap)
		pluginMgr     = volume.VolumePluginMgr{}
		rootDir, host = newTestHost(t, client)
	)

	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)

	plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}

	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
	builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir)
	util.SetReady(podMetadataDir)
	volumePath := builder.GetPath()
	if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) {
		t.Errorf("Got unexpected path: %s", volumePath)
	}

	fsGroup := int64(1001)
	err = builder.SetUp(&fsGroup)
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}
	if _, err := os.Stat(volumePath); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	doTestConfigMapDataInVolume(volumePath, configMap, t)
	doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
コード例 #17
0
func TestSyncBatch(t *testing.T) {
	syncer := newTestManager(&fake.Clientset{})
	testPod := getTestPod()
	syncer.kubeClient = fake.NewSimpleClientset(testPod)
	syncer.SetPodStatus(testPod, getRandomPodStatus())
	syncer.testSyncBatch()
	verifyActions(t, syncer.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	},
	)
}
コード例 #18
0
func TestReconcilePodStatus(t *testing.T) {
	testPod := getTestPod()
	client := fake.NewSimpleClientset(testPod)
	syncer := newTestManager(client)
	syncer.SetPodStatus(testPod, getRandomPodStatus())
	// Call syncBatch directly to test reconcile
	syncer.syncBatch() // The apiStatusVersions should be set now

	podStatus, ok := syncer.GetPodStatus(testPod.UID)
	if !ok {
		t.Fatal("Should find pod status for pod: %+v", testPod)
	}
	testPod.Status = podStatus

	// If the pod status is the same, a reconciliation is not needed,
	// syncBatch should do nothing
	syncer.podManager.UpdatePod(testPod)
	if syncer.needsReconcile(testPod.UID, podStatus) {
		t.Errorf("Pod status is the same, a reconciliation is not needed")
	}
	client.ClearActions()
	syncer.syncBatch()
	verifyActions(t, client, []core.Action{})

	// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
	// a reconciliation is not needed, syncBatch should do nothing.
	// The StartTime should have been set in SetPodStatus().
	// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
	normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
	testPod.Status.StartTime = &normalizedStartTime
	syncer.podManager.UpdatePod(testPod)
	if syncer.needsReconcile(testPod.UID, podStatus) {
		t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
	}
	client.ClearActions()
	syncer.syncBatch()
	verifyActions(t, client, []core.Action{})

	// If the pod status is different, a reconciliation is needed, syncBatch should trigger an update
	testPod.Status = getRandomPodStatus()
	syncer.podManager.UpdatePod(testPod)
	if !syncer.needsReconcile(testPod.UID, podStatus) {
		t.Errorf("Pod status is different, a reconciliation is needed")
	}
	client.ClearActions()
	syncer.syncBatch()
	verifyActions(t, client, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
}
コード例 #19
0
func TestUpdatePods(t *testing.T) {
	manager := NewReplicaSetController(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	received := make(chan string)

	manager.syncHandler = func(key string) error {
		obj, exists, err := manager.rsStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find replica set under key %v", key)
		}
		received <- obj.(*extensions.ReplicaSet).Name
		return nil
	}

	stopCh := make(chan struct{})
	defer close(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	// Put 2 ReplicaSets and one pod into the controller's stores
	labelMap1 := map[string]string{"foo": "bar"}
	testRSSpec1 := newReplicaSet(1, labelMap1)
	manager.rsStore.Store.Add(testRSSpec1)
	testRSSpec2 := *testRSSpec1
	labelMap2 := map[string]string{"bar": "foo"}
	testRSSpec2.Spec.Selector = &unversioned.LabelSelector{MatchLabels: labelMap2}
	testRSSpec2.Name = "barfoo"
	manager.rsStore.Store.Add(&testRSSpec2)

	// Put one pod in the podStore
	pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap1, testRSSpec1).Items[0]
	pod2 := pod1
	pod2.Labels = labelMap2

	// Send an update of the same pod with modified labels, and confirm we get a sync request for
	// both controllers
	manager.updatePod(&pod1, &pod2)

	expected := sets.NewString(testRSSpec1.Name, testRSSpec2.Name)
	for _, name := range expected.List() {
		t.Logf("Expecting update for %+v", name)
		select {
		case got := <-received:
			if !expected.Has(got) {
				t.Errorf("Expected keys %#v got %v", expected, got)
			}
		case <-time.After(util.ForeverTestTimeout):
			t.Errorf("Expected update notifications for replica sets within 100ms each")
		}
	}
}
コード例 #20
0
func TestSyncBatchChecksMismatchedUID(t *testing.T) {
	syncer := newTestManager(&fake.Clientset{})
	pod := getTestPod()
	pod.UID = "first"
	syncer.podManager.AddPod(pod)
	differentPod := getTestPod()
	differentPod.UID = "second"
	syncer.podManager.AddPod(differentPod)
	syncer.kubeClient = fake.NewSimpleClientset(pod)
	syncer.SetPodStatus(differentPod, getRandomPodStatus())
	syncer.testSyncBatch()
	verifyActions(t, syncer.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
	})
}
コード例 #21
0
func TestDeniesInvalidServiceAccount(t *testing.T) {
	ns := "myns"

	// Build a test client that the admission plugin can use to look up the service account missing from its cache
	client := fake.NewSimpleClientset()

	admit := NewServiceAccount(client)

	pod := &api.Pod{}
	attrs := admission.NewAttributesRecord(pod, api.Kind("Pod"), ns, "myname", api.Resource("pods"), "", admission.Create, nil)
	err := admit.Admit(attrs)
	if err == nil {
		t.Errorf("Expected error for missing service account, got none")
	}
}
コード例 #22
0
ファイル: aws_ebs_test.go プロジェクト: jetsanix/kubernetes
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	clientset := fake.NewSimpleClientset(pv, claim)

	tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, clientset, nil))
	plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)

	// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})

	if !builder.GetAttributes().ReadOnly {
		t.Errorf("Expected true for builder.IsReadOnly")
	}
}
コード例 #23
0
func TestExceedUsagePods(t *testing.T) {
	pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
	podList := &api.PodList{Items: []api.Pod{*pod}}
	client := fake.NewSimpleClientset(podList)
	status := &api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	r := api.ResourcePods
	status.Hard[r] = resource.MustParse("1")
	status.Used[r] = resource.MustParse("1")
	_, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, api.Kind("Pod"), pod.Namespace, "name", api.Resource("pods"), "", admission.Create, nil), status, client)
	if err == nil {
		t.Errorf("Expected error because this would exceed your quota")
	}
}
コード例 #24
0
func TestStaleUpdates(t *testing.T) {
	pod := getTestPod()
	client := fake.NewSimpleClientset(pod)
	m := newTestManager(client)

	status := api.PodStatus{Message: "initial status"}
	m.SetPodStatus(pod, status)
	status.Message = "first version bump"
	m.SetPodStatus(pod, status)
	status.Message = "second version bump"
	m.SetPodStatus(pod, status)
	verifyUpdates(t, m, 3)

	t.Logf("First sync pushes latest status.")
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
	client.ClearActions()

	for i := 0; i < 2; i++ {
		t.Logf("Next 2 syncs should be ignored (%d).", i)
		m.testSyncBatch()
		verifyActions(t, m.kubeClient, []core.Action{})
	}

	t.Log("Unchanged status should not send an update.")
	m.SetPodStatus(pod, status)
	verifyUpdates(t, m, 0)

	t.Log("... unless it's stale.")
	m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1

	m.SetPodStatus(pod, status)
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})

	// Nothing stuck in the pipe.
	verifyUpdates(t, m, 0)
}
コード例 #25
0
ファイル: describe_test.go プロジェクト: richm/origin
func TestDescribeDeployment(t *testing.T) {
	fake := fake.NewSimpleClientset(&extensions.Deployment{
		ObjectMeta: api.ObjectMeta{
			Name:      "bar",
			Namespace: "foo",
		},
		Spec: extensions.DeploymentSpec{
			Template: api.PodTemplateSpec{},
		},
	})
	d := DeploymentDescriber{fake}
	out, err := d.Describe("foo", "bar")
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if !strings.Contains(out, "bar") || !strings.Contains(out, "foo") {
		t.Errorf("unexpected out: %s", out)
	}
}
コード例 #26
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{Path: "foo"},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
	plug, _ := plugMgr.FindPluginByName(hostPathPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})

	if !builder.GetAttributes().ReadOnly {
		t.Errorf("Expected true for builder.IsReadOnly")
	}
}
func TestCasting(t *testing.T) {
	clientset := fake.NewSimpleClientset()
	binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)

	pv := &api.PersistentVolume{}
	unk := cache.DeletedFinalStateUnknown{}
	pvc := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Status:     api.PersistentVolumeClaimStatus{Phase: api.ClaimBound},
	}

	// none of these should fail casting.
	// the real test is not failing when passed DeletedFinalStateUnknown in the deleteHandler
	binder.addVolume(pv)
	binder.updateVolume(pv, pv)
	binder.deleteVolume(pv)
	binder.deleteVolume(unk)
	binder.addClaim(pvc)
	binder.updateClaim(pvc, pvc)
}
func TestRunStop(t *testing.T) {
	clientset := fake.NewSimpleClientset()
	binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)

	if len(binder.stopChannels) != 0 {
		t.Errorf("Non-running binder should not have any stopChannels.  Got %v", len(binder.stopChannels))
	}

	binder.Run()

	if len(binder.stopChannels) != 2 {
		t.Errorf("Running binder should have exactly 2 stopChannels.  Got %v", len(binder.stopChannels))
	}

	binder.Stop()

	if len(binder.stopChannels) != 0 {
		t.Errorf("Non-running binder should not have any stopChannels.  Got %v", len(binder.stopChannels))
	}
}
コード例 #29
0
func TestNewBuilderClaimNotBound(t *testing.T) {
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvC",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
			},
		},
	}
	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimC",
			Namespace: "nsA",
		},
	}
	podVolume := api.VolumeSource{
		PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
			ReadOnly:  false,
			ClaimName: "claimC",
		},
	}
	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	tempDir, vh := newTestHost(t, client)
	defer os.RemoveAll(tempDir)
	plugMgr.InitPlugins(testProbeVolumePlugins(), vh)

	plug, err := plugMgr.FindPluginByName("kubernetes.io/persistent-claim")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	spec := &volume.Spec{Volume: &api.Volume{VolumeSource: podVolume}}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, err := plug.NewBuilder(spec, pod, volume.VolumeOptions{})
	if builder != nil {
		t.Errorf("Expected a nil builder if the claim wasn't bound")
	}
}
コード例 #30
0
func TestRecyclingRetry(t *testing.T) {
	// Test that recycler controller retries to recycle a volume several times, which succeeds eventually
	pv := preparePV()

	mockClient := &mockBinderClient{
		volume: pv,
	}

	plugMgr := volume.VolumePluginMgr{}
	// Use a fake NewRecycler function
	plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newFailingMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
	// Reset a global call counter
	failedCallCount = 0

	recycler := &PersistentVolumeRecycler{
		kubeClient:      fake.NewSimpleClientset(),
		client:          mockClient,
		pluginMgr:       plugMgr,
		syncPeriod:      mySyncPeriod,
		maximumRetry:    myMaximumRetry,
		releasedVolumes: make(map[string]releasedVolumeStatus),
	}

	// All but the last attempt will fail
	testRecycleFailures(t, recycler, mockClient, pv, myMaximumRetry-1)

	// The last attempt should succeed
	err := recycler.reclaimVolume(pv)
	if err != nil {
		t.Errorf("Last step: Recycler failed: %v", err)
	}

	if mockClient.volume.Status.Phase != api.VolumePending {
		t.Errorf("Last step: The volume should be Pending, but is %s instead", mockClient.volume.Status.Phase)
	}
	// Check the cache, it should not have any entry
	status, found := recycler.releasedVolumes[pv.Name]
	if found {
		t.Errorf("Last step: Expected PV to be removed from cache, got %v", status)
	}
}