// TestPodDeletionDoesntEnqueueRecreateDeployment ensures that the deletion of a pod
// will not requeue a Recreate deployment iff there are other pods returned from the
// client.
func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
	f := newFixture(t)

	foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
	foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
	rs := newReplicaSet(foo, "foo-1", 1)
	pod := generatePodFromRS(rs)

	f.dLister = append(f.dLister, foo)
	f.rsLister = append(f.rsLister, rs)
	// Let's pretend this is a different pod. The gist is that the pod lister needs to
	// return a non-empty list.
	f.podLister = append(f.podLister, pod)

	c, informers := f.newController()
	enqueued := false
	c.enqueueDeployment = func(d *extensions.Deployment) {
		if d.Name == "foo" {
			enqueued = true
		}
	}
	stopCh := make(chan struct{})
	defer close(stopCh)
	informers.Start(stopCh)

	c.deletePod(pod)

	if enqueued {
		t.Errorf("expected deployment %q not to be queued after pod deletion", foo.Name)
	}
}
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
	fake := &fake.Clientset{}
	informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
	controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
	controller.eventRecorder = &record.FakeRecorder{}
	controller.dListerSynced = alwaysReady
	controller.rsListerSynced = alwaysReady
	controller.podListerSynced = alwaysReady

	stopCh := make(chan struct{})
	defer close(stopCh)
	informers.Start(stopCh)

	d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
	empty := metav1.LabelSelector{}
	d.Spec.Selector = &empty
	controller.dLister.Indexer.Add(d)
	// We expect the deployment controller to not take action here since it's configuration
	// is invalid, even though no replicasets exist that match it's selector.
	controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))

	filteredActions := filterInformerActions(fake.Actions())
	if len(filteredActions) == 0 {
		return
	}
	for _, action := range filteredActions {
		t.Logf("unexpected action: %#v", action)
	}
	t.Errorf("expected deployment controller to not take action")
}
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
// will requeue a Recreate deployment iff there is no other pod returned from the
// client.
func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) {
	f := newFixture(t)

	foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
	foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
	rs := newReplicaSet(foo, "foo-1", 1)
	pod := generatePodFromRS(rs)

	f.dLister = append(f.dLister, foo)
	f.rsLister = append(f.rsLister, rs)
	f.objects = append(f.objects, foo, rs)

	c, informers := f.newController()
	enqueued := false
	c.enqueueDeployment = func(d *extensions.Deployment) {
		if d.Name == "foo" {
			enqueued = true
		}
	}
	stopCh := make(chan struct{})
	defer close(stopCh)
	informers.Start(stopCh)

	c.deletePod(pod)

	if !enqueued {
		t.Errorf("expected deployment %q to be queued after pod deletion", foo.Name)
	}
}
func (f *fixture) run(deploymentName string) {
	c, informers := f.newController()
	stopCh := make(chan struct{})
	defer close(stopCh)
	informers.Start(stopCh)

	err := c.syncDeployment(deploymentName)
	if err != nil {
		f.t.Errorf("error syncing deployment: %v", err)
	}

	actions := filterInformerActions(f.client.Actions())
	for i, action := range actions {
		if len(f.actions) < i+1 {
			f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
			break
		}

		expectedAction := f.actions[i]
		if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
			f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
			continue
		}
	}

	if len(f.actions) > len(actions) {
		f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
	}
}
func (f *fixture) run(deploymentName string) {
	f.client = fake.NewSimpleClientset(f.objects...)
	informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
	c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
	c.eventRecorder = &record.FakeRecorder{}
	c.dListerSynced = alwaysReady
	c.rsListerSynced = alwaysReady
	c.podListerSynced = alwaysReady
	for _, d := range f.dLister {
		c.dLister.Indexer.Add(d)
	}
	for _, rs := range f.rsLister {
		c.rsLister.Indexer.Add(rs)
	}
	for _, pod := range f.podLister {
		c.podLister.Indexer.Add(pod)
	}
	stopCh := make(chan struct{})
	defer close(stopCh)
	informers.Start(stopCh)

	err := c.syncDeployment(deploymentName)
	if err != nil {
		f.t.Errorf("error syncing deployment: %v", err)
	}

	actions := filterInformerActions(f.client.Actions())
	for i, action := range actions {
		if len(f.actions) < i+1 {
			f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
			break
		}

		expectedAction := f.actions[i]
		if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
			f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
			continue
		}
	}

	if len(f.actions) > len(actions) {
		f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
	}
}
func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
	masterConfig := framework.NewIntegrationTestMasterConfig()
	_, s := framework.RunAMaster(masterConfig)

	config := restclient.Config{Host: s.URL}
	clientSet, err := clientset.NewForConfig(&config)
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	resyncPeriod := 12 * time.Hour

	informers := informers.NewSharedInformerFactory(clientSet, nil, resyncPeriod)
	podInformer := informers.Pods().Informer()
	rcInformer := informers.ReplicationControllers().Informer()
	rm := replication.NewReplicationManager(podInformer, rcInformer, clientSet, replication.BurstReplicas, 4096, enableGarbageCollector)
	informers.Start(stopCh)

	return s, rm, podInformer, clientSet
}
Example #7
0
func TestDeploymentController_cleanupDeployment(t *testing.T) {
	selector := map[string]string{"foo": "bar"}

	tests := []struct {
		oldRSs               []*extensions.ReplicaSet
		revisionHistoryLimit int32
		expectedDeletions    int
	}{
		{
			oldRSs: []*extensions.ReplicaSet{
				newRSWithStatus("foo-1", 0, 0, selector),
				newRSWithStatus("foo-2", 0, 0, selector),
				newRSWithStatus("foo-3", 0, 0, selector),
			},
			revisionHistoryLimit: 1,
			expectedDeletions:    2,
		},
		{
			// Only delete the replica set with Spec.Replicas = Status.Replicas = 0.
			oldRSs: []*extensions.ReplicaSet{
				newRSWithStatus("foo-1", 0, 0, selector),
				newRSWithStatus("foo-2", 0, 1, selector),
				newRSWithStatus("foo-3", 1, 0, selector),
				newRSWithStatus("foo-4", 1, 1, selector),
			},
			revisionHistoryLimit: 0,
			expectedDeletions:    1,
		},

		{
			oldRSs: []*extensions.ReplicaSet{
				newRSWithStatus("foo-1", 0, 0, selector),
				newRSWithStatus("foo-2", 0, 0, selector),
			},
			revisionHistoryLimit: 0,
			expectedDeletions:    2,
		},
		{
			oldRSs: []*extensions.ReplicaSet{
				newRSWithStatus("foo-1", 1, 1, selector),
				newRSWithStatus("foo-2", 1, 1, selector),
			},
			revisionHistoryLimit: 0,
			expectedDeletions:    0,
		},
	}

	for i := range tests {
		test := tests[i]
		fake := &fake.Clientset{}
		informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
		controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)

		controller.eventRecorder = &record.FakeRecorder{}
		controller.dListerSynced = alwaysReady
		controller.rsListerSynced = alwaysReady
		controller.podListerSynced = alwaysReady
		for _, rs := range test.oldRSs {
			controller.rsLister.Indexer.Add(rs)
		}

		stopCh := make(chan struct{})
		defer close(stopCh)
		informers.Start(stopCh)

		d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
		controller.cleanupDeployment(test.oldRSs, d)

		gotDeletions := 0
		for _, action := range fake.Actions() {
			if "delete" == action.GetVerb() {
				gotDeletions++
			}
		}
		if gotDeletions != test.expectedDeletions {
			t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions)
			continue
		}
	}
}
Example #8
0
// startServiceAccountTestServer returns a started server
// It is the responsibility of the caller to ensure the returned stopFunc is called
func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclient.Config, func()) {
	// Listener
	h := &framework.MasterHolder{Initialized: make(chan struct{})}
	apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		<-h.Initialized
		h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
	}))

	// Anonymous client config
	clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}}
	// Root client
	// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
	rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
	internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
	// Set up two authenticators:
	// 1. A token authenticator that maps the rootToken to the "root" user
	// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
	rootTokenAuth := authenticator.TokenFunc(func(token string) (user.Info, bool, error) {
		if token == rootToken {
			return &user.DefaultInfo{Name: rootUserName}, true, nil
		}
		return nil, false, nil
	})
	serviceAccountKey, _ := rsa.GenerateKey(rand.Reader, 2048)
	serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClientset)
	serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]interface{}{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter)
	authenticator := union.New(
		bearertoken.New(rootTokenAuth),
		bearertoken.New(serviceAccountTokenAuth),
	)

	// Set up a stub authorizer:
	// 1. The "root" user is allowed to do anything
	// 2. ServiceAccounts named "ro" are allowed read-only operations in their namespace
	// 3. ServiceAccounts named "rw" are allowed any operation in their namespace
	authorizer := authorizer.AuthorizerFunc(func(attrs authorizer.Attributes) (bool, string, error) {
		username := ""
		if user := attrs.GetUser(); user != nil {
			username = user.GetName()
		}
		ns := attrs.GetNamespace()

		// If the user is "root"...
		if username == rootUserName {
			// allow them to do anything
			return true, "", nil
		}

		// If the user is a service account...
		if serviceAccountNamespace, serviceAccountName, err := serviceaccount.SplitUsername(username); err == nil {
			// Limit them to their own namespace
			if serviceAccountNamespace == ns {
				switch serviceAccountName {
				case readOnlyServiceAccountName:
					if attrs.IsReadOnly() {
						return true, "", nil
					}
				case readWriteServiceAccountName:
					return true, "", nil
				}
			}
		}

		return false, fmt.Sprintf("User %s is denied (ns=%s, readonly=%v, resource=%s)", username, ns, attrs.IsReadOnly(), attrs.GetResource()), nil
	})

	// Set up admission plugin to auto-assign serviceaccounts to pods
	serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(internalRootClientset)

	masterConfig := framework.NewMasterConfig()
	masterConfig.GenericConfig.EnableIndex = true
	masterConfig.GenericConfig.Authenticator = authenticator
	masterConfig.GenericConfig.Authorizer = authorizer
	masterConfig.GenericConfig.AdmissionControl = serviceAccountAdmission
	framework.RunAMasterUsingServer(masterConfig, apiServer, h)

	// Start the service account and service account token controllers
	stopCh := make(chan struct{})
	tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
	go tokenController.Run(1, stopCh)

	informers := informers.NewSharedInformerFactory(rootClientset, nil, controller.NoResyncPeriodFunc())
	serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
	informers.Start(stopCh)
	go serviceAccountController.Run(5, stopCh)
	// Start the admission plugin reflectors
	serviceAccountAdmission.Run()

	stop := func() {
		close(stopCh)
		serviceAccountAdmission.Stop()
		apiServer.Close()
	}

	return rootClientset, clientConfig, stop
}
Example #9
0
// 1.2 code gets:
// 	quota_test.go:95: Took 4.218619579s to scale up without quota
// 	quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute)
// 1.3+ code gets:
// 	quota_test.go:100: Took 4.196205966s to scale up without quota
// 	quota_test.go:115: Took 12.021640372s to scale up with quota
func TestQuota(t *testing.T) {
	// Set up a master
	h := &framework.MasterHolder{Initialized: make(chan struct{})}
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		<-h.Initialized
		h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
	}))
	defer s.Close()

	admissionCh := make(chan struct{})
	clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
	internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
	admission, err := resourcequota.NewResourceQuota(quotainstall.NewRegistry(nil, nil), 5, admissionCh)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	admission.(kubeadmission.WantsInternalClientSet).SetInternalClientSet(internalClientset)
	defer close(admissionCh)

	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.GenericConfig.AdmissionControl = admission
	framework.RunAMasterUsingServer(masterConfig, s, h)

	ns := framework.CreateTestingNamespace("quotaed", s, t)
	defer framework.DeleteTestingNamespace(ns, s, t)
	ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
	defer framework.DeleteTestingNamespace(ns2, s, t)

	controllerCh := make(chan struct{})
	defer close(controllerCh)

	informers := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
	podInformer := informers.Pods().Informer()
	rcInformer := informers.ReplicationControllers().Informer()
	rm := replicationcontroller.NewReplicationManager(podInformer, rcInformer, clientset, replicationcontroller.BurstReplicas, 4096, false)
	rm.SetEventRecorder(&record.FakeRecorder{})
	informers.Start(controllerCh)
	go rm.Run(3, controllerCh)

	resourceQuotaRegistry := quotainstall.NewRegistry(clientset, nil)
	groupKindsToReplenish := []schema.GroupKind{
		api.Kind("Pod"),
	}
	resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
		KubeClient:                clientset,
		ResyncPeriod:              controller.NoResyncPeriodFunc,
		Registry:                  resourceQuotaRegistry,
		GroupKindsToReplenish:     groupKindsToReplenish,
		ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
		ControllerFactory:         resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(clientset),
	}
	go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh)

	startTime := time.Now()
	scale(t, ns2.Name, clientset)
	endTime := time.Now()
	t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))

	quota := &v1.ResourceQuota{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "quota",
			Namespace: ns.Name,
		},
		Spec: v1.ResourceQuotaSpec{
			Hard: v1.ResourceList{
				v1.ResourcePods: resource.MustParse("1000"),
			},
		},
	}
	waitForQuota(t, quota, clientset)

	startTime = time.Now()
	scale(t, "quotaed", clientset)
	endTime = time.Now()
	t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
}