Example #1
0
func TestLimitRangerIgnoresSubresource(t *testing.T) {
	client := testclient.NewSimpleFake()
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &limitRanger{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		client:    client,
		limitFunc: Limit,
		indexer:   indexer,
	}

	limitRange := validLimitRangeNoDefaults()
	testPod := validPod("testPod", 1, api.ResourceRequirements{})

	indexer.Add(&limitRange)
	err := handler.Admit(admission.NewAttributesRecord(&testPod, "Pod", limitRange.Namespace, "pods", "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
	}

	err = handler.Admit(admission.NewAttributesRecord(&testPod, "Pod", limitRange.Namespace, "pods", "status", admission.Update, nil))
	if err != nil {
		t.Errorf("Should have ignored calls to any subresource of pod %v", err)
	}

}
Example #2
0
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache {
	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)

	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})

	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return registry.ListClusterPolicies(ctx, labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return registry.WatchClusterPolicies(ctx, labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&authorizationapi.ClusterPolicy{},
		indexer,
		2*time.Minute,
	)

	return readOnlyClusterPolicyCache{
		registry:  registry,
		indexer:   indexer,
		reflector: *reflector,

		keyFunc: cache.MetaNamespaceKeyFunc,
	}
}
Example #3
0
func TestAdmissionIgnoresSubresources(t *testing.T) {
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := createResourceQuota(&testclient.Fake{}, indexer)

	quota := &api.ResourceQuota{}
	quota.Name = "quota"
	quota.Namespace = "test"
	quota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	quota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	quota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")

	indexer.Add(quota)

	newPod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: quota.Namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "2Gi")}},
		}}

	err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "pods", "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}

	err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "pods", "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}

}
Example #4
0
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
// while also providing event notifications. You should only used the returned
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
//  * lw is list and watch functions for the source of the resource you want to
//    be informed of.
//  * objType is an object of the type that you expect to receive.
//  * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
//    calls, even if nothing changed). Otherwise, re-list will be delayed as
//    long as possible (until the upstream source closes the watch or times out,
//    or you stop the controller).
//  * h is the object you want notifications sent to.
//
func NewIndexerInformer(
	lw cache.ListerWatcher,
	objType runtime.Object,
	resyncPeriod time.Duration,
	h ResourceEventHandler,
	indexers cache.Indexers,
) (cache.Indexer, *Controller) {
	// This will hold the client state, as we know it.
	clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)

	// This will hold incoming changes. Note how we pass clientState in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)

	cfg := &Config{
		Queue:            fifo,
		ListerWatcher:    lw,
		ObjectType:       objType,
		FullResyncPeriod: resyncPeriod,
		RetryOnError:     false,

		Process: func(obj interface{}) error {
			// from oldest to newest
			for _, d := range obj.(cache.Deltas) {
				switch d.Type {
				case cache.Sync, cache.Added, cache.Updated:
					if old, exists, err := clientState.Get(d.Object); err == nil && exists {
						if err := clientState.Update(d.Object); err != nil {
							return err
						}
						h.OnUpdate(old, d.Object)
					} else {
						if err := clientState.Add(d.Object); err != nil {
							return err
						}
						h.OnAdd(d.Object)
					}
				case cache.Deleted:
					if err := clientState.Delete(d.Object); err != nil {
						return err
					}
					h.OnDelete(d.Object)
				}
			}
			return nil
		},
	}
	return clientState, New(cfg)
}
Example #5
0
// NewCachedServiceAccessor returns a service accessor that can answer queries about services.
// It uses a backing cache to make PortalIP lookups efficient.
func NewCachedServiceAccessor(client *client.Client, stopCh <-chan struct{}) ServiceAccessor {
	lw := cache.NewListWatchFromClient(client, "services", api.NamespaceAll, fields.Everything())
	store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, map[string]cache.IndexFunc{
		"portalIP":  indexServiceByPortalIP, // for reverse lookups
		"namespace": cache.MetaNamespaceIndexFunc,
	})
	reflector := cache.NewReflector(lw, &api.Service{}, store, 2*time.Minute)
	if stopCh != nil {
		reflector.RunUntil(stopCh)
	} else {
		reflector.Run()
	}
	return &cachedServiceAccessor{
		reflector: reflector,
		store:     store,
	}
}
Example #6
0
// NewDataSet returns a DataSet over the specified items
func NewDataSet(buildConfigs []*buildapi.BuildConfig, builds []*buildapi.Build) DataSet {
	buildConfigStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	for _, buildConfig := range buildConfigs {
		buildConfigStore.Add(buildConfig)
	}

	buildIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
		"buildConfig": BuildByBuildConfigIndexFunc,
	})
	for _, build := range builds {
		buildIndexer.Add(build)
	}

	return &dataSet{
		buildConfigStore: buildConfigStore,
		buildIndexer:     buildIndexer,
	}
}
Example #7
0
// NewDataSet returns a DataSet over the specified items
func NewDataSet(deploymentConfigs []*deployapi.DeploymentConfig, deployments []*kapi.ReplicationController) DataSet {
	deploymentConfigStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	for _, deploymentConfig := range deploymentConfigs {
		deploymentConfigStore.Add(deploymentConfig)
	}

	deploymentIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
		"deploymentConfig": DeploymentByDeploymentConfigIndexFunc,
	})
	for _, deployment := range deployments {
		deploymentIndexer.Add(deployment)
	}

	return &dataSet{
		deploymentConfigStore: deploymentConfigStore,
		deploymentIndexer:     deploymentIndexer,
	}
}
Example #8
0
func NewGroupCache(groupRegistry groupregistry.Registry) *GroupCache {
	allNamespaceContext := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)

	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{byUserIndexName: ByUserIndexKeys})
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return groupRegistry.ListGroups(allNamespaceContext, labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return groupRegistry.WatchGroups(allNamespaceContext, labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&userapi.Group{},
		indexer,
		// TODO this was chosen via copy/paste.  If or when we choose to standardize these in some way, be sure to update this.
		2*time.Minute,
	)

	return &GroupCache{
		indexer:   indexer,
		reflector: reflector,
	}
}
Example #9
0
func NewPersistentVolumeOrderedIndex() *persistentVolumeOrderedIndex {
	return &persistentVolumeOrderedIndex{
		cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc}),
	}
}