Esempio n. 1
0
// NewOriginResourceQuota creates a new OriginResourceQuota admission plugin that takes care of admission of
// origin resources abusing resource quota.
func NewOriginResourceQuota(kClient clientset.Interface) admission.Interface {
	// defer an initialization of upstream controller until os client is set
	return &originQuotaAdmission{
		Handler: admission.NewHandler(admission.Create, admission.Update),
		kClient: kClient,
	}
}
Esempio n. 2
0
// NewImagePolicyWebhook a new imagePolicyWebhook from the provided config file.
// The config file is specified by --admission-controller-config-file and has the
// following format for a webhook:
//
//   {
//     "imagePolicy": {
//        "kubeConfigFile": "path/to/kubeconfig/for/backend",
//        "allowTTL": 30,           # time in s to cache approval
//        "denyTTL": 30,            # time in s to cache denial
//        "retryBackoff": 500,      # time in ms to wait between retries
//        "defaultAllow": true      # determines behavior if the webhook backend fails
//     }
//   }
//
// The config file may be json or yaml.
//
// The kubeconfig property refers to another file in the kubeconfig format which
// specifies how to connect to the webhook backend.
//
// The kubeconfig's cluster field is used to refer to the remote service, user refers to the returned authorizer.
//
//     # clusters refers to the remote service.
//     clusters:
//     - name: name-of-remote-imagepolicy-service
//       cluster:
//         certificate-authority: /path/to/ca.pem      # CA for verifying the remote service.
//         server: https://images.example.com/policy # URL of remote service to query. Must use 'https'.
//
//     # users refers to the API server's webhook configuration.
//     users:
//     - name: name-of-api-server
//       user:
//         client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
//         client-key: /path/to/key.pem          # key matching the cert
//
// For additional HTTP configuration, refer to the kubeconfig documentation
// http://kubernetes.io/v1.1/docs/user-guide/kubeconfig-file.html.
func NewImagePolicyWebhook(client clientset.Interface, configFile io.Reader) (admission.Interface, error) {
	var config AdmissionConfig
	d := yaml.NewYAMLOrJSONDecoder(configFile, 4096)
	err := d.Decode(&config)
	if err != nil {
		return nil, err
	}

	whConfig := config.ImagePolicyWebhook
	if err := normalizeWebhookConfig(&whConfig); err != nil {
		return nil, err
	}

	gw, err := webhook.NewGenericWebhook(whConfig.KubeConfigFile, groupVersions, whConfig.RetryBackoff)
	if err != nil {
		return nil, err
	}
	return &imagePolicyWebhook{
		Handler:       admission.NewHandler(admission.Create, admission.Update),
		webhook:       gw,
		responseCache: cache.NewLRUExpireCache(1024),
		allowTTL:      whConfig.AllowTTL,
		denyTTL:       whConfig.DenyTTL,
		defaultAllow:  whConfig.DefaultAllow,
	}, nil
}
Esempio n. 3
0
// TestAdmitEnforceQuotaConstraints verifies that if a quota tracks a particular resource that that resource is
// specified on the pod.  In this case, we create a quota that tracks cpu request, memory request, and memory limit.
// We ensure that a pod that does not specify a memory limit that it fails in admission.
func TestAdmitEnforceQuotaConstraints(t *testing.T) {
	resourceQuota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU:          resource.MustParse("3"),
				api.ResourceMemory:       resource.MustParse("100Gi"),
				api.ResourceLimitsMemory: resource.MustParse("200Gi"),
				api.ResourcePods:         resource.MustParse("5"),
			},
			Used: api.ResourceList{
				api.ResourceCPU:          resource.MustParse("1"),
				api.ResourceMemory:       resource.MustParse("50Gi"),
				api.ResourceLimitsMemory: resource.MustParse("100Gi"),
				api.ResourcePods:         resource.MustParse("3"),
			},
		},
	}
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &quotaAdmission{
		Handler:  admission.NewHandler(admission.Create, admission.Update),
		client:   kubeClient,
		indexer:  indexer,
		registry: install.NewRegistry(kubeClient),
	}
	handler.indexer.Add(resourceQuota)
	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod does not specify a memory limit")
	}
}
Esempio n. 4
0
// NewPlugin creates a new PSP admission plugin.
func NewPlugin(kclient clientset.Interface, strategyFactory psp.StrategyFactory, pspMatcher PSPMatchFn, failOnNoPolicies bool) *podSecurityPolicyPlugin {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kclient.Extensions().PodSecurityPolicies().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kclient.Extensions().PodSecurityPolicies().Watch(options)
			},
		},
		&extensions.PodSecurityPolicy{},
		store,
		0,
	)

	return &podSecurityPolicyPlugin{
		Handler:          admission.NewHandler(admission.Create, admission.Update),
		client:           kclient,
		strategyFactory:  strategyFactory,
		pspMatcher:       pspMatcher,
		failOnNoPolicies: failOnNoPolicies,

		store:     store,
		reflector: reflector,
	}
}
Esempio n. 5
0
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
func NewLimitRanger(client clientset.Interface, actions LimitRangerActions) (admission.Interface, error) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		return nil, err
	}

	lw := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			return client.Core().LimitRanges(api.NamespaceAll).List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			return client.Core().LimitRanges(api.NamespaceAll).Watch(options)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0)
	reflector.Run()

	if actions == nil {
		actions = &DefaultLimitRangerActions{}
	}

	return &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		client:          client,
		actions:         actions,
		indexer:         indexer,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
	}, nil
}
Esempio n. 6
0
// newPlugin creates a new admission plugin.
func newPlugin(kclient clientset.Interface) *claimDefaulterPlugin {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				internalOptions := api.ListOptions{}
				v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
				return kclient.Storage().StorageClasses().List(internalOptions)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				internalOptions := api.ListOptions{}
				v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
				return kclient.Storage().StorageClasses().Watch(internalOptions)
			},
		},
		&storage.StorageClass{},
		store,
		0,
	)

	return &claimDefaulterPlugin{
		Handler:   admission.NewHandler(admission.Create),
		client:    kclient,
		store:     store,
		reflector: reflector,
	}
}
Esempio n. 7
0
// NewSCCExecRestrictions creates a new admission controller that denies an exec operation on a privileged pod
func NewSCCExecRestrictions(client clientset.Interface) *sccExecRestrictions {
	return &sccExecRestrictions{
		Handler:             admission.NewHandler(admission.Connect),
		constraintAdmission: NewConstraint(client),
		client:              client,
	}
}
Esempio n. 8
0
func NewTestAdmission(lister *oscache.IndexerToSecurityContextConstraintsLister, kclient clientset.Interface) kadmission.Interface {
	return &constraint{
		Handler:   kadmission.NewHandler(kadmission.Create),
		client:    kclient,
		sccLister: lister,
	}
}
Esempio n. 9
0
// TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource
// guaranteed pod.
func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) {
	resourceQuota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
		Spec: api.ResourceQuotaSpec{
			Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
		},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourcePods: resource.MustParse("5"),
			},
			Used: api.ResourceList{
				api.ResourcePods: resource.MustParse("3"),
			},
		},
	}
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &quotaAdmission{
		Handler:  admission.NewHandler(admission.Create, admission.Update),
		client:   kubeClient,
		indexer:  indexer,
		registry: install.NewRegistry(kubeClient),
	}
	handler.indexer.Add(resourceQuota)
	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if len(kubeClient.Actions()) != 0 {
		t.Errorf("Expected no client actions because the incoming pod did not match best effort quota")
	}
}
Esempio n. 10
0
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
	resourceQuota := &api.ResourceQuota{}
	resourceQuota.Name = "quota"
	resourceQuota.Namespace = "test"
	resourceQuota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	stopCh := make(chan struct{})
	defer close(stopCh)

	quotaAccessor, _ := newQuotaAccessor(kubeClient)
	quotaAccessor.indexer = indexer
	go quotaAccessor.Run(stopCh)
	evaluator := NewQuotaEvaluator(quotaAccessor, install.NewRegistry(nil, nil), nil, 5, stopCh)

	handler := &quotaAdmission{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		evaluator: evaluator,
	}
	indexer.Add(resourceQuota)
	newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}
	err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}
}
// NewConstraint creates a new SCC constraint admission plugin.
func NewExternalIPRanger(reject, admit []*net.IPNet) *externalIPRanger {
	return &externalIPRanger{
		Handler: kadmission.NewHandler(kadmission.Create, kadmission.Update),
		reject:  reject,
		admit:   admit,
	}
}
Esempio n. 12
0
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
	resourceQuota := &api.ResourceQuota{}
	resourceQuota.Name = "quota"
	resourceQuota.Namespace = "test"
	resourceQuota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &quotaAdmission{
		Handler:  admission.NewHandler(admission.Create, admission.Update),
		client:   kubeClient,
		indexer:  indexer,
		registry: install.NewRegistry(kubeClient),
	}
	handler.indexer.Add(resourceQuota)
	newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}
	err = handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}
}
Esempio n. 13
0
func init() {
	admission.RegisterPlugin("OwnerReferencesPermissionEnforcement", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
		return &gcPermissionsEnforcement{
			Handler: admission.NewHandler(admission.Create, admission.Update),
		}, nil
	})
}
Esempio n. 14
0
func createProvision(c client.Interface, store cache.Store) admission.Interface {
	return &provision{
		Handler: admission.NewHandler(admission.Create),
		client:  c,
		store:   store,
	}
}
Esempio n. 15
0
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
func TestAdmitExceedQuotaLimit(t *testing.T) {
	resourceQuota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("3"),
				api.ResourceMemory: resource.MustParse("100Gi"),
				api.ResourcePods:   resource.MustParse("5"),
			},
			Used: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("1"),
				api.ResourceMemory: resource.MustParse("50Gi"),
				api.ResourcePods:   resource.MustParse("3"),
			},
		},
	}
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
	evaluator.indexer = indexer
	evaluator.Run(5)
	handler := &quotaAdmission{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		evaluator: evaluator,
	}
	indexer.Add(resourceQuota)
	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error exceeding quota")
	}
}
Esempio n. 16
0
func TestLimitRangerCacheAndLRUExpiredMisses(t *testing.T) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		t.Fatal(err)
	}

	limitRange := validLimitRangeNoDefaults()
	client := fake.NewSimpleClientset(&limitRange)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		client:          client,
		actions:         &DefaultLimitRangerActions{},
		indexer:         indexer,
		liveLookupCache: liveLookupCache,
	}

	testPod := validPod("testPod", 1, api.ResourceRequirements{})

	// add to the lru cache
	liveLookupCache.Add(limitRange.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(-30 * time.Second)), items: []*api.LimitRange{}})

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
	}

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, nil))
	if err != nil {
		t.Errorf("Should have ignored calls to any subresource of pod %v", err)
	}
}
Esempio n. 17
0
func NewTestAdmission(store cache.Store, kclient clientset.Interface) kadmission.Interface {
	return &constraint{
		Handler: kadmission.NewHandler(kadmission.Create),
		client:  kclient,
		store:   store,
	}
}
Esempio n. 18
0
func NewPodNodeSelector(client clientset.Interface, clusterNodeSelectors map[string]string) *podNodeSelector {
	return &podNodeSelector{
		Handler:              admission.NewHandler(admission.Create),
		client:               client,
		clusterNodeSelectors: clusterNodeSelectors,
	}
}
Esempio n. 19
0
// newClusterResourceOverride returns an admission controller for containers that
// configurably overrides container resource request/limits
func newClusterResourceOverride(client clientset.Interface, config io.Reader) (admission.Interface, error) {
	parsed, err := ReadConfig(config)
	if err != nil {
		glog.V(5).Infof("%s admission controller loaded with error: (%T) %[2]v", api.PluginName, err)
		return nil, err
	}
	if errs := validation.Validate(parsed); len(errs) > 0 {
		return nil, errs.ToAggregate()
	}
	glog.V(5).Infof("%s admission controller loaded with config: %v", api.PluginName, parsed)
	var internal *internalConfig
	if parsed != nil {
		internal = &internalConfig{
			limitCPUToMemoryRatio:     inf.NewDec(parsed.LimitCPUToMemoryPercent, 2),
			cpuRequestToLimitRatio:    inf.NewDec(parsed.CPURequestToLimitPercent, 2),
			memoryRequestToLimitRatio: inf.NewDec(parsed.MemoryRequestToLimitPercent, 2),
		}
	}

	limitRanger, err := limitranger.NewLimitRanger(client, wrapLimit)
	if err != nil {
		return nil, err
	}

	return &clusterResourceOverridePlugin{
		Handler:     admission.NewHandler(admission.Create),
		config:      internal,
		LimitRanger: limitRanger,
	}, nil
}
Esempio n. 20
0
func TestLimitRangerIgnoresSubresource(t *testing.T) {
	client := testclient.NewSimpleFake()
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &limitRanger{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		client:    client,
		limitFunc: Limit,
		indexer:   indexer,
	}

	limitRange := validLimitRangeNoDefaults()
	testPod := validPod("testPod", 1, api.ResourceRequirements{})

	indexer.Add(&limitRange)
	err := handler.Admit(admission.NewAttributesRecord(&testPod, "Pod", limitRange.Namespace, "testPod", "pods", "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
	}

	err = handler.Admit(admission.NewAttributesRecord(&testPod, "Pod", limitRange.Namespace, "testPod", "pods", "status", admission.Update, nil))
	if err != nil {
		t.Errorf("Should have ignored calls to any subresource of pod %v", err)
	}

}
Esempio n. 21
0
func createResourceQuota(client client.Interface, indexer cache.Indexer) admission.Interface {
	return &quota{
		Handler: admission.NewHandler(admission.Create, admission.Update),
		client:  client,
		indexer: indexer,
	}
}
Esempio n. 22
0
func TestLimitRangerCacheAndLRUMisses(t *testing.T) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		t.Fatal(err)
	}

	limitRange := validLimitRangeNoDefaults()
	client := fake.NewSimpleClientset(&limitRange)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		client:          client,
		limitFunc:       Limit,
		indexer:         indexer,
		liveLookupCache: liveLookupCache,
	}

	testPod := validPod("testPod", 1, api.ResourceRequirements{})

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
	}

	err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "status", admission.Update, nil))
	if err != nil {
		t.Errorf("Should have ignored calls to any subresource of pod %v", err)
	}
}
Esempio n. 23
0
func TestAdmission(t *testing.T) {
	privPod := validPod("privileged")
	priv := true
	privPod.Spec.Containers[0].SecurityContext = &api.SecurityContext{
		Privileged: &priv,
	}

	hostPIDPod := validPod("hostPID")
	hostPIDPod.Spec.SecurityContext = &api.PodSecurityContext{}
	hostPIDPod.Spec.SecurityContext.HostPID = true

	hostIPCPod := validPod("hostIPC")
	hostIPCPod.Spec.SecurityContext = &api.PodSecurityContext{}
	hostIPCPod.Spec.SecurityContext.HostIPC = true

	testCases := map[string]struct {
		pod          *api.Pod
		shouldAccept bool
	}{
		"priv": {
			shouldAccept: false,
			pod:          privPod,
		},
		"hostPID": {
			shouldAccept: false,
			pod:          hostPIDPod,
		},
		"hostIPC": {
			shouldAccept: false,
			pod:          hostIPCPod,
		},
		"non privileged": {
			shouldAccept: true,
			pod:          validPod("nonPrivileged"),
		},
	}

	// use the same code as NewDenyEscalatingExec, using the direct object though to allow testAdmission to
	// inject the client
	handler := &denyExec{
		Handler:    admission.NewHandler(admission.Connect),
		hostIPC:    true,
		hostPID:    true,
		privileged: true,
	}

	for _, tc := range testCases {
		testAdmission(t, tc.pod, handler, tc.shouldAccept)
	}

	// run with a permissive config and all cases should pass
	handler.privileged = false
	handler.hostPID = false
	handler.hostIPC = false

	for _, tc := range testCases {
		testAdmission(t, tc.pod, handler, true)
	}
}
Esempio n. 24
0
func newInitialResources(source dataSource, percentile int64, nsOnly bool) admission.Interface {
	return &initialResources{
		Handler:    admission.NewHandler(admission.Create),
		source:     source,
		percentile: percentile,
		nsOnly:     nsOnly,
	}
}
// NewConstraint creates a new SCC constraint admission plugin.
func NewExternalIPRanger(reject, admit []*net.IPNet, allowIngressIP bool) *externalIPRanger {
	return &externalIPRanger{
		Handler:        kadmission.NewHandler(kadmission.Create, kadmission.Update),
		reject:         reject,
		admit:          admit,
		allowIngressIP: allowIngressIP,
	}
}
Esempio n. 26
0
// NewDenyEscalatingExec creates a new admission controller that denies an exec operation on a pod
// using host based configurations.
func NewDenyEscalatingExec(client clientset.Interface) admission.Interface {
	return &denyExec{
		Handler:    admission.NewHandler(admission.Connect),
		client:     client,
		hostIPC:    true,
		hostPID:    true,
		privileged: true,
	}
}
Esempio n. 27
0
// NewDenyExecOnPrivileged creates a new admission controller that is only checking the privileged
// option.  This is for legacy support of the DenyExecOnPrivileged admission controller.  Most
// of the time NewDenyEscalatingExec should be preferred.
func NewDenyExecOnPrivileged(client client.Interface) admission.Interface {
	return &denyExec{
		Handler:    admission.NewHandler(admission.Connect),
		client:     client,
		hostIPC:    false,
		hostPID:    false,
		privileged: true,
	}
}
Esempio n. 28
0
func NewTestAdmission(store cache.Store, kclient clientset.Interface) kadmission.Interface {
	return &podSecurityPolicyPlugin{
		Handler:         kadmission.NewHandler(kadmission.Create),
		client:          kclient,
		store:           store,
		strategyFactory: kpsp.NewSimpleStrategyFactory(),
		pspMatcher:      getMatchingPolicies,
	}
}
Esempio n. 29
0
func newLifecycleWithClock(c clientset.Interface, immortalNamespaces sets.String, clock utilcache.Clock) (admission.Interface, error) {
	forceLiveLookupCache := utilcache.NewLRUExpireCacheWithClock(100, clock)
	return &lifecycle{
		Handler:              admission.NewHandler(admission.Create, admission.Update, admission.Delete),
		client:               c,
		immortalNamespaces:   immortalNamespaces,
		forceLiveLookupCache: forceLiveLookupCache,
	}, nil
}
Esempio n. 30
0
// NewPodNodeConstraints creates a new admission plugin to prevent objects that contain pod templates
// from containing node bindings by name or selector based on role permissions.
func NewPodNodeConstraints(config *api.PodNodeConstraintsConfig) admission.Interface {
	plugin := podNodeConstraints{
		config:  config,
		Handler: admission.NewHandler(admission.Create, admission.Update),
	}
	if config != nil {
		plugin.selectorLabelBlacklist = sets.NewString(config.NodeSelectorLabelBlacklist...)
	}

	return &plugin
}