Exemple #1
0
// Create creates a scheduler and all support functions.
func (factory *ConfigFactory) Create() *scheduler.Config {
	// Watch and queue pods that need scheduling.
	podQueue := cache.NewFIFO()
	cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run()

	// Watch and cache all running pods. Scheduler needs to find all pods
	// so it knows where it's safe to place a pod. Cache this locally.
	podCache := cache.NewStore()
	cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run()

	// Watch minions.
	// Minions may be listed frequently, so provide a local up-to-date cache.
	minionCache := cache.NewStore()
	if false {
		// Disable this code until minions support watches.
		cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run()
	} else {
		cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run()
	}

	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	minionLister := &storeToMinionLister{minionCache}

	algo := algorithm.NewGenericScheduler(
		[]algorithm.FitPredicate{
			// Fit is defined based on the absence of port conflicts.
			algorithm.PodFitsPorts,
			// Fit is determined by resource availability
			algorithm.NewResourceFitPredicate(minionLister),
			// Fit is determined by non-conflicting disk volumes
			algorithm.NoDiskConflict,
			// Fit is determined by node selector query
			algorithm.NewSelectorMatchPredicate(minionLister),
		},
		// Prioritize nodes by least requested utilization.
		algorithm.LeastRequestedPriority,
		&storeToPodLister{podCache}, r)

	podBackoff := podBackoff{
		perPodBackoff: map[string]*backoffEntry{},
		clock:         realClock{},
	}

	return &scheduler.Config{
		MinionLister: minionLister,
		Algorithm:    algo,
		Binder:       &binder{factory.Client},
		NextPod: func() *api.Pod {
			pod := podQueue.Pop().(*api.Pod)
			glog.V(2).Infof("About to try and schedule pod %v\n"+
				"\tknown minions: %v\n"+
				"\tknown scheduled pods: %v\n",
				pod.Name, minionCache.ContainedIDs(), podCache.ContainedIDs())
			return pod
		},
		Error: factory.makeDefaultErrorFunc(&podBackoff, podQueue),
	}
}
func TestModeler(t *testing.T) {
	table := []struct {
		queuedPods    []*api.Pod
		scheduledPods []*api.Pod
		assumedPods   []*api.Pod
		expectPods    names
	}{
		{
			queuedPods:    names{}.list(),
			scheduledPods: names{{"default", "foo"}, {"custom", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}, {"custom", "foo"}},
		}, {
			queuedPods:    names{}.list(),
			scheduledPods: names{{"default", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}, {"custom", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}, {"custom", "foo"}},
		}, {
			queuedPods:    names{{"custom", "foo"}}.list(),
			scheduledPods: names{{"default", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}, {"custom", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}},
		},
	}

	for _, item := range table {
		q := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
		for _, pod := range item.queuedPods {
			q.Store.Add(pod)
		}
		s := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
		for _, pod := range item.scheduledPods {
			s.Store.Add(pod)
		}
		m := NewSimpleModeler(q, s)
		for _, pod := range item.assumedPods {
			m.AssumePod(pod)
		}

		list, err := m.PodLister().List(labels.Everything())
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}

		found := 0
		for _, pod := range list {
			if item.expectPods.has(pod) {
				found++
			} else {
				t.Errorf("found unexpected pod %#v", pod)
			}
		}
		if e, a := item.expectPods, found; len(e) != a {
			t.Errorf("Expected pods:\n%+v\nFound pods:\n%s\n", podNames(e.list()), podNames(list))
		}
	}
}
Exemple #3
0
func newKube2Sky(ec etcdClient) *kube2sky {
	return &kube2sky{
		etcdClient:          ec,
		domain:              testDomain,
		etcdMutationTimeout: time.Second,
		endpointsStore:      cache.NewStore(cache.MetaNamespaceKeyFunc),
		servicesStore:       cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
}
Exemple #4
0
// Initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		NodeLister:         &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:      &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
	}
	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)
	c.modeler = modeler
	c.PodLister = modeler.PodLister()
	return c
}
Exemple #5
0
// Run starts a background goroutine that watches for changes to services that
// have (or had) externalLoadBalancers=true and ensures that they have external
// load balancers created and deleted appropriately.
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
// if external load balancers need to be updated to point to a new set.
func (s *ServiceController) Run(nodeSyncPeriod time.Duration) error {
	if err := s.init(); err != nil {
		return err
	}

	// We have to make this check beecause the ListWatch that we use in
	// WatchServices requires Client functions that aren't in the interface
	// for some reason.
	if _, ok := s.kubeClient.(*client.Client); !ok {
		return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.")
	}

	// Get the currently existing set of services and then all future creates
	// and updates of services.
	// No delta compressor is needed for the DeltaFIFO queue because we only ever
	// care about the most recent state.
	serviceQueue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.cache)
	lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything())
	cache.NewReflector(lw, &api.Service{}, serviceQueue, 0).Run()
	for i := 0; i < workerGoroutines; i++ {
		go s.watchServices(serviceQueue)
	}

	nodeLister := &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
	nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything())
	cache.NewReflector(nodeLW, &api.Node{}, nodeLister.Store, 0).Run()
	go s.nodeSyncLoop(nodeLister, nodeSyncPeriod)
	return nil
}
func NewFirstContainerReady(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *FirstContainerReady {
	return &FirstContainerReady{
		timeout:  timeout,
		interval: interval,
		podsForDeployment: func(deployment *kapi.ReplicationController) (*kapi.PodList, error) {
			selector := labels.Set(deployment.Spec.Selector).AsSelector()
			return kclient.Pods(deployment.Namespace).List(selector, fields.Everything())
		},
		getPodStore: func(namespace, name string) (cache.Store, chan struct{}) {
			sel, _ := fields.ParseSelector("metadata.name=" + name)
			store := cache.NewStore(cache.MetaNamespaceKeyFunc)
			lw := &deployutil.ListWatcherImpl{
				ListFunc: func() (runtime.Object, error) {
					return kclient.Pods(namespace).List(labels.Everything(), sel)
				},
				WatchFunc: func(resourceVersion string) (watch.Interface, error) {
					return kclient.Pods(namespace).Watch(labels.Everything(), sel, resourceVersion)
				},
			}
			stop := make(chan struct{})
			cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
			return store, stop
		},
	}
}
func NewTestController(kube kubeclient.Interface, client contrail.ApiClient, allocator AddressAllocator, networkMgr NetworkManager) *Controller {
	controller := new(Controller)
	controller.serviceStore = cache.NewStore(testKeyFunc)
	controller.eventChannel = make(chan notification, 32)
	controller.kube = kube

	controller.config = NewConfig()
	controller.config.PublicSubnet = "100.64.0.0/10"

	controller.client = client
	if allocator == nil {
		controller.allocator = NewAddressAllocator(client, controller.config)
	} else {
		controller.allocator = allocator
	}
	controller.instanceMgr = NewInstanceManager(client, controller.allocator)
	if networkMgr == nil {
		controller.networkMgr = NewNetworkManager(client, controller.config)
	} else {
		controller.networkMgr = networkMgr
	}
	controller.serviceMgr = NewServiceManager(client, controller.config, controller.networkMgr)
	controller.namespaceMgr = NewNamespaceManager(client)
	return controller
}
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()

	imageChangeController := &buildcontroller.ImageChangeController{
		BuildConfigStore:        store,
		BuildConfigInstantiator: factory.BuildConfigInstantiator,
		Stop: factory.Stop,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
					return false
				}
				return retries.Count < maxRetries
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			imageRepo := obj.(*imageapi.ImageStream)
			return imageChangeController.HandleImageRepo(imageRepo)
		},
	}
}
Exemple #9
0
// NewSimpleModeler returns a new SimpleModeler.
//   queuedPods: a PodLister that will return pods that have not been scheduled yet.
//   scheduledPods: a PodLister that will return pods that we know for sure have been scheduled.
func NewSimpleModeler(queuedPods, scheduledPods ExtendedPodLister) *SimpleModeler {
	return &SimpleModeler{
		queuedPods:    queuedPods,
		scheduledPods: scheduledPods,
		assumedPods:   &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
	}
}
Exemple #10
0
// TestAdmissionExists verifies you cannot create Origin content if namespace is not known
func TestAdmissionExists(t *testing.T) {
	mockClient := &testclient.Fake{
		Err: fmt.Errorf("DOES NOT EXIST"),
	}
	projectcache.FakeProjectCache(mockClient, cache.NewStore(cache.MetaNamespaceKeyFunc), "")
	handler := &lifecycle{client: mockClient}
	build := &buildapi.Build{
		ObjectMeta: kapi.ObjectMeta{Name: "buildid"},
		Parameters: buildapi.BuildParameters{
			Source: buildapi.BuildSource{
				Type: buildapi.BuildSourceGit,
				Git: &buildapi.GitBuildSource{
					URI: "http://github.com/my/repository",
				},
				ContextDir: "context",
			},
			Strategy: buildapi.BuildStrategy{
				Type:           buildapi.DockerBuildStrategyType,
				DockerStrategy: &buildapi.DockerBuildStrategy{},
			},
			Output: buildapi.BuildOutput{
				DockerImageReference: "repository/data",
			},
		},
		Status: buildapi.BuildStatusNew,
	}
	err := handler.Admit(admission.NewAttributesRecord(build, "Build", "bogus-ns", "builds", "CREATE", nil))
	if err == nil {
		t.Errorf("Expected an error because namespace does not exist")
	}
}
Exemple #11
0
func TestStoreToPodLister(t *testing.T) {
	store := cache.NewStore()
	ids := []string{"foo", "bar", "baz"}
	for _, id := range ids {
		store.Add(id, &api.Pod{
			JSONBase: api.JSONBase{ID: id},
			Labels:   map[string]string{"name": id},
		})
	}
	spl := storeToPodLister{store}

	for _, id := range ids {
		got, err := spl.ListPods(labels.Set{"name": id}.AsSelector())
		if err != nil {
			t.Errorf("Unexpected error: %v", err)
			continue
		}
		if e, a := 1, len(got); e != a {
			t.Errorf("Expected %v, got %v", e, a)
			continue
		}
		if e, a := id, got[0].ID; e != a {
			t.Errorf("Expected %v, got %v", e, a)
			continue
		}
	}
}
Exemple #12
0
func newPodsApi(client *kclient.Client) podsApi {
	// Extend the selector to include specific nodes to monitor
	// or provide an API to update the nodes to monitor.
	selector, err := kSelector.ParseSelector("spec.nodeName!=")
	if err != nil {
		panic(err)
	}

	lw := kcache.NewListWatchFromClient(client, "pods", kapi.NamespaceAll, selector)
	podLister := &kcache.StoreToPodLister{Store: kcache.NewStore(kcache.MetaNamespaceKeyFunc)}
	// Watch and cache all running pods.
	reflector := kcache.NewReflector(lw, &kapi.Pod{}, podLister.Store, 0)
	stopChan := make(chan struct{})
	reflector.RunUntil(stopChan)
	nStore, nController := kframework.NewInformer(
		createNamespaceLW(client),
		&kapi.Namespace{},
		resyncPeriod,
		kframework.ResourceEventHandlerFuncs{})
	go nController.Run(util.NeverStop)

	podsApi := &realPodsApi{
		client:         client,
		podLister:      podLister,
		stopChan:       stopChan,
		reflector:      reflector,
		namespaceStore: nStore,
	}

	return podsApi
}
Exemple #13
0
// TestAdmission verifies a namespace is created on create requests for namespace managed resources
func TestAdmission(t *testing.T) {
	namespace := "test"
	mockClient := &testclient.Fake{}
	handler := &provision{
		client: mockClient,
		store:  cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespace, "pods", "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
	if len(mockClient.Actions) != 1 {
		t.Errorf("Expected a create-namespace request")
	}
	if mockClient.Actions[0].Action != "create-namespace" {
		t.Errorf("Expected a create-namespace request to be made via the client")
	}
}
// Create constructs a BuildPodController
func (factory *BuildPodControllerFactory) Create() controller.RunnableController {
	factory.buildStore = cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, factory.buildStore, 2*time.Minute).Run()

	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&podLW{client: factory.KubeClient}, &kapi.Pod{}, queue, 2*time.Minute).Run()

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	buildPodController := &buildcontroller.BuildPodController{
		BuildStore:   factory.buildStore,
		BuildUpdater: factory.BuildUpdater,
		PodManager:   client,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				return retries.Count < maxRetries
			},
			kutil.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			pod := obj.(*kapi.Pod)
			return buildPodController.HandlePod(pod)
		},
	}
}
Exemple #15
0
// TestAdmissionNamespaceExists verifies that no client call is made when a namespace already exists
func TestAdmissionNamespaceExists(t *testing.T) {
	namespace := "test"
	mockClient := &testclient.Fake{}
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	store.Add(&api.Namespace{
		ObjectMeta: api.ObjectMeta{Name: namespace},
	})
	handler := &provision{
		client: mockClient,
		store:  store,
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespace, "pods", "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
	if len(mockClient.Actions) != 0 {
		t.Errorf("No client request should have been made")
	}
}
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
	if pcache != nil {
		return
	}

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.Namespace{},
		store,
		0,
	)
	reflector.Run()
	pcache = &ProjectCache{
		Client:              c,
		Store:               store,
		DefaultNodeSelector: defaultNodeSelector,
	}
}
Exemple #17
0
// Initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{},
		// Only nodes in the "Ready" condition with status == "True" are schedulable
		NodeLister:       &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:    &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ControllerLister: &cache.StoreToReplicationControllerLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		StopEverything:   make(chan struct{}),
	}
	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)
	c.modeler = modeler
	c.PodLister = modeler.PodLister()
	c.BindPodsRateLimiter = util.NewTokenBucketRateLimiter(BindPodsQps, BindPodsBurst)

	// On add/delete to the scheduled pods, remove from the assumed pods.
	// We construct this here instead of in CreateFromKeys because
	// ScheduledPodLister is something we provide to plug in functions that
	// they may need to call.
	c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
		c.createAssignedPodLW(),
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				if pod, ok := obj.(*api.Pod); ok {
					c.modeler.LockedAction(func() {
						c.modeler.ForgetPod(pod)
					})
				}
			},
			DeleteFunc: func(obj interface{}) {
				c.modeler.LockedAction(func() {
					switch t := obj.(type) {
					case *api.Pod:
						c.modeler.ForgetPod(t)
					case cache.DeletedFinalStateUnknown:
						c.modeler.ForgetPodByKey(t.Key)
					}
				})
			},
		},
	)

	return c
}
Exemple #18
0
// New creates a new Kubelet for use in main
func NewMainKubelet(
	hostname string,
	dockerClient dockertools.DockerInterface,
	etcdClient tools.EtcdClient,
	kubeClient *client.Client,
	rootDirectory string,
	networkContainerImage string,
	resyncInterval time.Duration,
	pullQPS float32,
	pullBurst int,
	minimumGCAge time.Duration,
	maxContainerCount int,
	sourceReady SourceReadyFn,
	clusterDomain string,
	clusterDNS net.IP,
	masterServiceNamespace string) (*Kubelet, error) {
	if rootDirectory == "" {
		return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
	}
	if resyncInterval <= 0 {
		return nil, fmt.Errorf("invalid sync frequency %d", resyncInterval)
	}
	if minimumGCAge <= 0 {
		return nil, fmt.Errorf("invalid minimum GC age %d", minimumGCAge)
	}

	serviceStore := cache.NewStore()
	cache.NewReflector(&cache.ListWatch{kubeClient, labels.Everything(), "services", api.NamespaceAll}, &api.Service{}, serviceStore).Run()
	serviceLister := &cache.StoreToServiceLister{serviceStore}

	klet := &Kubelet{
		hostname:               hostname,
		dockerClient:           dockerClient,
		etcdClient:             etcdClient,
		rootDirectory:          rootDirectory,
		resyncInterval:         resyncInterval,
		networkContainerImage:  networkContainerImage,
		podWorkers:             newPodWorkers(),
		dockerIDToRef:          map[dockertools.DockerID]*api.ObjectReference{},
		runner:                 dockertools.NewDockerContainerCommandRunner(dockerClient),
		httpClient:             &http.Client{},
		pullQPS:                pullQPS,
		pullBurst:              pullBurst,
		minimumGCAge:           minimumGCAge,
		maxContainerCount:      maxContainerCount,
		sourceReady:            sourceReady,
		clusterDomain:          clusterDomain,
		clusterDNS:             clusterDNS,
		serviceLister:          serviceLister,
		masterServiceNamespace: masterServiceNamespace,
	}

	if err := klet.setupDataDirs(); err != nil {
		return nil, err
	}

	return klet, nil
}
Exemple #19
0
// NewEventQueue returns a new EventQueue ready for action.
func NewEventQueue(keyFn kcache.KeyFunc) *EventQueue {
	q := &EventQueue{
		store:  kcache.NewStore(keyFn),
		events: map[string]watch.EventType{},
		queue:  []string{},
		keyFn:  keyFn,
	}
	q.cond.L = &q.lock
	return q
}
// storeServices stores the given services in a store.
func storeServices(svcs []*api.Service) cache.Store {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	found := make([]interface{}, 0, len(svcs))
	for i := range svcs {
		found = append(found, svcs[i])
	}
	if err := store.Replace(found); err != nil {
		glog.Fatalf("Unable to replace services %v", err)
	}
	return store
}
// storeEps stores the given endpoints in a store.
func storeEps(eps []*api.Endpoints) cache.Store {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	found := make([]interface{}, 0, len(eps))
	for i := range eps {
		found = append(found, eps[i])
	}
	if err := store.Replace(found); err != nil {
		glog.Fatalf("Unable to replace endpoints %v", err)
	}
	return store
}
Exemple #22
0
// Create creates a scheduler and all support functions.
func (factory *ConfigFactory) Create() *scheduler.Config {
	// Watch and queue pods that need scheduling.
	podQueue := cache.NewFIFO()
	cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run()

	// Watch and cache all running pods. Scheduler needs to find all pods
	// so it knows where it's safe to place a pod. Cache this locally.
	podCache := cache.NewStore()
	cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run()

	// Watch minions.
	// Minions may be listed frequently, so provide a local up-to-date cache.
	minionCache := cache.NewStore()
	if false {
		// Disable this code until minions support watches.
		cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run()
	} else {
		cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run()
	}

	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	algo := algorithm.NewRandomFitScheduler(
		&storeToPodLister{podCache}, r)

	return &scheduler.Config{
		MinionLister: &storeToMinionLister{minionCache},
		Algorithm:    algo,
		Binder:       &binder{factory.Client},
		NextPod: func() *api.Pod {
			pod := podQueue.Pop().(*api.Pod)
			// TODO: Remove or reduce verbosity by sep 6th, 2014. Leave until then to
			// make it easy to find scheduling problems.
			glog.Infof("About to try and schedule pod %v\n"+
				"\tknown minions: %v\n"+
				"\tknown scheduled pods: %v\n",
				pod.ID, minionCache.Contains(), podCache.Contains())
			return pod
		},
		Error: factory.makeDefaultErrorFunc(podQueue),
	}
}
Exemple #23
0
// NewAuthorizationCache creates a new AuthorizationCache
func NewAuthorizationCache(reviewer Reviewer, namespaceInterface kclient.NamespaceInterface, policyClient policyclient.ReadOnlyPolicyClient) *AuthorizationCache {
	result := &AuthorizationCache{
		namespaceStore:            cache.NewStore(cache.MetaNamespaceKeyFunc),
		namespaceInterface:        namespaceInterface,
		lastSyncResourceVersioner: &unchangingLastSyncResourceVersioner{},

		clusterPolicyResourceVersions:  util.NewStringSet(),
		clusterBindingResourceVersions: util.NewStringSet(),

		policyClient: policyClient,

		reviewRecordStore:       cache.NewStore(reviewRecordKeyFn),
		userSubjectRecordStore:  cache.NewStore(subjectRecordKeyFn),
		groupSubjectRecordStore: cache.NewStore(subjectRecordKeyFn),

		reviewer: reviewer,
		skip:     &neverSkipSynchronizer{},
	}
	result.syncHandler = result.syncRequest
	return result
}
// TestAdmission
func TestAdmission(t *testing.T) {
	namespaceObj := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      "test",
			Namespace: "",
		},
		Status: api.NamespaceStatus{
			Phase: api.NamespaceActive,
		},
	}
	store := cache.NewStore(cache.MetaNamespaceIndexFunc)
	store.Add(namespaceObj)
	mockClient := &testclient.Fake{}
	handler := &lifecycle{
		client: mockClient,
		store:  store,
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespaceObj.Namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespaceObj.Namespace, "pods", "CREATE"))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler: %v", err)
	}

	// change namespace state to terminating
	namespaceObj.Status.Phase = api.NamespaceTerminating
	store.Add(namespaceObj)

	// verify create operations in the namespace cause an error
	err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespaceObj.Namespace, "pods", "CREATE"))
	if err == nil {
		t.Errorf("Expected error rejecting creates in a namespace when it is terminating")
	}

	// verify update operations in the namespace can proceed
	err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespaceObj.Namespace, "pods", "UPDATE"))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler: %v", err)
	}

	// verify delete operations in the namespace can proceed
	err = handler.Admit(admission.NewAttributesRecord(nil, "Pod", namespaceObj.Namespace, "pods", "DELETE"))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler: %v", err)
	}

}
Exemple #25
0
// NewInformer returns a cache.Store and a controller for populating the store
// while also providing event notifications. You should only used the returned
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
//  * lw is list and watch functions for the source of the resource you want to
//    be informed of.
//  * objType is an object of the type that you expect to receive.
//  * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
//    calls, even if nothing changed). Otherwise, re-list will be delayed as
//    long as possible (until the upstream source closes the watch or times out,
//    or you stop the controller).
//  * h is the object you want notifications sent to.
//
func NewInformer(
	lw cache.ListerWatcher,
	objType runtime.Object,
	resyncPeriod time.Duration,
	h ResourceEventHandler,
) (cache.Store, *Controller) {
	// This will hold the client state, as we know it.
	clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)

	// This will hold incoming changes. Note how we pass clientState in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)

	cfg := &Config{
		Queue:            fifo,
		ListerWatcher:    lw,
		ObjectType:       objType,
		FullResyncPeriod: resyncPeriod,
		RetryOnError:     false,

		Process: func(obj interface{}) error {
			// from oldest to newest
			for _, d := range obj.(cache.Deltas) {
				switch d.Type {
				case cache.Sync, cache.Added, cache.Updated:
					if old, exists, err := clientState.Get(d.Object); err == nil && exists {
						if err := clientState.Update(d.Object); err != nil {
							return err
						}
						h.OnUpdate(old, d.Object)
					} else {
						if err := clientState.Add(d.Object); err != nil {
							return err
						}
						h.OnAdd(d.Object)
					}
				case cache.Deleted:
					if err := clientState.Delete(d.Object); err != nil {
						return err
					}
					h.OnDelete(d.Object)
				}
			}
			return nil
		},
	}
	return clientState, New(cfg)
}
Exemple #26
0
// synchronize runs a a full synchronization over the cache data.  it must be run in a single-writer model, it's not thread-safe by design.
func (ac *AuthorizationCache) synchronize() {
	// if none of our internal reflectors changed, then we can skip reviewing the cache
	skip, currentState := ac.skip.SkipSynchronize(ac.lastState, ac.lastSyncResourceVersioner, ac.policyClient)
	if skip {
		return
	}

	// by default, we update our current caches and do an incremental change
	userSubjectRecordStore := ac.userSubjectRecordStore
	groupSubjectRecordStore := ac.groupSubjectRecordStore
	reviewRecordStore := ac.reviewRecordStore

	// if there was a global change that forced complete invalidation, we rebuild our cache and do a fast swap at end
	invalidateCache := ac.invalidateCache()
	if invalidateCache {
		userSubjectRecordStore = cache.NewStore(subjectRecordKeyFn)
		groupSubjectRecordStore = cache.NewStore(subjectRecordKeyFn)
		reviewRecordStore = cache.NewStore(reviewRecordKeyFn)
	}

	// iterate over caches and synchronize our three caches
	namespaceSet := ac.synchronizeNamespaces(userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)
	ac.synchronizePolicies(userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)
	ac.synchronizePolicyBindings(userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)
	purgeDeletedNamespaces(namespaceSet, userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore)

	// if we did a full rebuild, now we swap the fully rebuilt cache
	if invalidateCache {
		ac.userSubjectRecordStore = userSubjectRecordStore
		ac.groupSubjectRecordStore = groupSubjectRecordStore
		ac.reviewRecordStore = reviewRecordStore
	}

	// we were able to update our cache since this last observation period
	ac.lastState = currentState
}
Exemple #27
0
func TestStoreToMinionLister(t *testing.T) {
	store := cache.NewStore()
	ids := util.NewStringSet("foo", "bar", "baz")
	for id := range ids {
		store.Add(id, &api.Minion{JSONBase: api.JSONBase{ID: id}})
	}
	sml := storeToMinionLister{store}

	got, err := sml.List()
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if !ids.HasAll(got...) || len(got) != len(ids) {
		t.Errorf("Expected %v, got %v", ids, got)
	}
}
func TestSchedulerRateLimitsBinding(t *testing.T) {
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}
	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{queuedPodStore}
	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	algo := NewGenericScheduler(
		map[string]algorithm.FitPredicate{},
		[]algorithm.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	// Rate limit to 1 pod
	fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
	c := &Config{
		Modeler: modeler,
		MinionLister: algorithm.FakeMinionLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder:            &record.FakeRecorder{},
		BindPodsRateLimiter: &fr,
	}

	s := New(c)
	firstPod := podWithID("foo", "")
	secondPod := podWithID("boo", "")
	queuedPodStore.Add(firstPod)
	queuedPodStore.Add(secondPod)

	for i, hitRateLimit := range []bool{true, false} {
		s.scheduleOne()
		if fr.acceptValues[i] != hitRateLimit {
			t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i])
		}
	}
}
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(kubeClient.Events(""))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})

	return &ServiceController{
		cloud:            cloud,
		kubeClient:       kubeClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
		nodeLister: cache.StoreToNodeLister{
			Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
		},
	}
}
Exemple #30
0
// NewProvision creates a new namespace provision admission control handler
func NewProvision(c client.Interface) admission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		store,
		0,
	)
	reflector.Run()
	return createProvision(c, store)
}