Ejemplo n.º 1
0
func newKube2Sky(ec etcdClient) *kube2sky {
	return &kube2sky{
		etcdClient:          ec,
		domain:              testDomain,
		etcdMutationTimeout: time.Second,
		endpointsStore:      cache.NewStore(cache.MetaNamespaceKeyFunc),
		servicesStore:       cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
}
Ejemplo n.º 2
0
func TestModeler(t *testing.T) {
	table := []struct {
		queuedPods    []*api.Pod
		scheduledPods []*api.Pod
		assumedPods   []*api.Pod
		expectPods    names
	}{
		{
			queuedPods:    names{}.list(),
			scheduledPods: names{{"default", "foo"}, {"custom", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}, {"custom", "foo"}},
		}, {
			queuedPods:    names{}.list(),
			scheduledPods: names{{"default", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}, {"custom", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}, {"custom", "foo"}},
		}, {
			queuedPods:    names{{"custom", "foo"}}.list(),
			scheduledPods: names{{"default", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}, {"custom", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}},
		},
	}

	for _, item := range table {
		q := &cache.StoreToPodLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}
		for _, pod := range item.queuedPods {
			q.Store.Add(pod)
		}
		s := &cache.StoreToPodLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}
		for _, pod := range item.scheduledPods {
			s.Store.Add(pod)
		}
		m := NewSimpleModeler(q, s)
		for _, pod := range item.assumedPods {
			m.AssumePod(pod)
		}

		list, err := m.PodLister().List(labels.Everything())
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}

		found := 0
		for _, pod := range list {
			if item.expectPods.has(pod) {
				found++
			} else {
				t.Errorf("found unexpected pod %#v", pod)
			}
		}
		if e, a := item.expectPods, found; len(e) != a {
			t.Errorf("Expected pods:\n%+v\nFound pods:\n%s\n", podNames(e.list()), podNames(list))
		}
	}
}
Ejemplo n.º 3
0
// TestAdmission verifies a namespace is created on create requests for namespace managed resources
func TestAdmission(t *testing.T) {
	namespace := "test"
	mockClient := &testclient.Fake{}
	handler := &provision{
		client: mockClient,
		store:  cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
	actions := mockClient.Actions()
	if len(actions) != 1 {
		t.Errorf("Expected a create-namespace request")
	}
	if !actions[0].Matches("create", "namespaces") {
		t.Errorf("Expected a create-namespace request to be made via the client")
	}
}
Ejemplo n.º 4
0
// TestAdmissionNamespaceExists verifies that no client call is made when a namespace already exists
func TestAdmissionNamespaceExists(t *testing.T) {
	namespace := "test"
	mockClient := &testclient.Fake{}
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	store.Add(&api.Namespace{
		ObjectMeta: api.ObjectMeta{Name: namespace},
	})
	handler := &provision{
		client: mockClient,
		store:  store,
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
	if len(mockClient.Actions()) != 0 {
		t.Errorf("No client request should have been made")
	}
}
Ejemplo n.º 5
0
// Initializes the factory.
func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter) *ConfigFactory {
	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{},
		// Only nodes in the "Ready" condition with status == "True" are schedulable
		NodeLister:       &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:    &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		StopEverything:   make(chan struct{}),
	}
	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: c.PodQueue}, c.ScheduledPodLister)
	c.modeler = modeler
	c.PodLister = modeler.PodLister()
	c.BindPodsRateLimiter = rateLimiter

	// On add/delete to the scheduled pods, remove from the assumed pods.
	// We construct this here instead of in CreateFromKeys because
	// ScheduledPodLister is something we provide to plug in functions that
	// they may need to call.
	c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
		c.createAssignedPodLW(),
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				if pod, ok := obj.(*api.Pod); ok {
					c.modeler.LockedAction(func() {
						c.modeler.ForgetPod(pod)
					})
				}
			},
			DeleteFunc: func(obj interface{}) {
				c.modeler.LockedAction(func() {
					switch t := obj.(type) {
					case *api.Pod:
						c.modeler.ForgetPod(t)
					case cache.DeletedFinalStateUnknown:
						c.modeler.ForgetPodByKey(t.Key)
					}
				})
			},
		},
	)

	return c
}
Ejemplo n.º 6
0
func newWatchCache(capacity int) *watchCache {
	return &watchCache{
		capacity:        capacity,
		cache:           make([]watchCacheElement, capacity),
		startIndex:      0,
		endIndex:        0,
		store:           cache.NewStore(cache.MetaNamespaceKeyFunc),
		resourceVersion: 0,
	}
}
Ejemplo n.º 7
0
// storeEps stores the given endpoints in a store.
func storeEps(eps []*api.Endpoints) cache.Store {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	found := make([]interface{}, 0, len(eps))
	for i := range eps {
		found = append(found, eps[i])
	}
	if err := store.Replace(found, "0"); err != nil {
		glog.Fatalf("Unable to replace endpoints %v", err)
	}
	return store
}
Ejemplo n.º 8
0
// storeServices stores the given services in a store.
func storeServices(svcs []*api.Service) cache.Store {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	found := make([]interface{}, 0, len(svcs))
	for i := range svcs {
		found = append(found, svcs[i])
	}
	if err := store.Replace(found, "0"); err != nil {
		glog.Fatalf("Unable to replace services %v", err)
	}
	return store
}
Ejemplo n.º 9
0
// NewInformer returns a cache.Store and a controller for populating the store
// while also providing event notifications. You should only used the returned
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
//  * lw is list and watch functions for the source of the resource you want to
//    be informed of.
//  * objType is an object of the type that you expect to receive.
//  * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
//    calls, even if nothing changed). Otherwise, re-list will be delayed as
//    long as possible (until the upstream source closes the watch or times out,
//    or you stop the controller).
//  * h is the object you want notifications sent to.
//
func NewInformer(
	lw cache.ListerWatcher,
	objType runtime.Object,
	resyncPeriod time.Duration,
	h ResourceEventHandler,
) (cache.Store, *Controller) {
	// This will hold the client state, as we know it.
	clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)

	// This will hold incoming changes. Note how we pass clientState in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)

	cfg := &Config{
		Queue:            fifo,
		ListerWatcher:    lw,
		ObjectType:       objType,
		FullResyncPeriod: resyncPeriod,
		RetryOnError:     false,

		Process: func(obj interface{}) error {
			// from oldest to newest
			for _, d := range obj.(cache.Deltas) {
				switch d.Type {
				case cache.Sync, cache.Added, cache.Updated:
					if old, exists, err := clientState.Get(d.Object); err == nil && exists {
						if err := clientState.Update(d.Object); err != nil {
							return err
						}
						h.OnUpdate(old, d.Object)
					} else {
						if err := clientState.Add(d.Object); err != nil {
							return err
						}
						h.OnAdd(d.Object)
					}
				case cache.Deleted:
					if err := clientState.Delete(d.Object); err != nil {
						return err
					}
					h.OnDelete(d.Object)
				}
			}
			return nil
		},
	}
	return clientState, New(cfg)
}
Ejemplo n.º 10
0
func TestSchedulerRateLimitsBinding(t *testing.T) {
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore}
	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore}
	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	algo := NewGenericScheduler(
		map[string]algorithm.FitPredicate{},
		[]algorithm.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	// Rate limit to 1 pod
	fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
	c := &Config{
		Modeler: modeler,
		NodeLister: algorithm.FakeNodeLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder:            &record.FakeRecorder{},
		BindPodsRateLimiter: &fr,
	}

	s := New(c)
	firstPod := podWithID("foo", "")
	secondPod := podWithID("boo", "")
	queuedPodStore.Add(firstPod)
	queuedPodStore.Add(secondPod)

	for i, hitRateLimit := range []bool{true, false} {
		s.scheduleOne()
		if fr.acceptValues[i] != hitRateLimit {
			t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i])
		}
	}
}
Ejemplo n.º 11
0
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(kubeClient.Events(""))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})

	return &ServiceController{
		cloud:            cloud,
		kubeClient:       kubeClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
		nodeLister: cache.StoreToNodeLister{
			Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
		},
	}
}
Ejemplo n.º 12
0
// NewProvision creates a new namespace provision admission control handler
func NewProvision(c client.Interface) admission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		store,
		0,
	)
	reflector.Run()
	return createProvision(c, store)
}
Ejemplo n.º 13
0
// NewExists creates a new namespace exists admission control handler
func NewExists(c client.Interface) admission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		store,
		5*time.Minute,
	)
	reflector.Run()
	return &exists{
		client:  c,
		store:   store,
		Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete),
	}
}
Ejemplo n.º 14
0
// TestAdmissionNamespaceExistsUnknownToHandler
func TestAdmissionNamespaceExistsUnknownToHandler(t *testing.T) {
	namespace := "test"
	mockClient := &testclient.Fake{}
	mockClient.AddReactor("create", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
		return true, nil, errors.NewAlreadyExists("namespaces", namespace)
	})

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	handler := &provision{
		client: mockClient,
		store:  store,
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
}
Ejemplo n.º 15
0
func Example() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// This will hold the downstream state, as we know it.
	downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)

	// This will hold incoming changes. Note how we pass downstream in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream)

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	cfg := &framework.Config{
		Queue:            fifo,
		ListerWatcher:    source,
		ObjectType:       &api.Pod{},
		FullResyncPeriod: time.Millisecond * 100,
		RetryOnError:     false,

		// Let's implement a simple controller that just deletes
		// everything that comes in.
		Process: func(obj interface{}) error {
			// Obj is from the Pop method of the Queue we make above.
			newest := obj.(cache.Deltas).Newest()

			if newest.Type != cache.Deleted {
				// Update our downstream store.
				err := downstream.Add(newest.Object)
				if err != nil {
					return err
				}

				// Delete this object.
				source.Delete(newest.Object.(runtime.Object))
			} else {
				// Update our downstream store.
				err := downstream.Delete(newest.Object)
				if err != nil {
					return err
				}

				// fifo's KeyOf is easiest, because it handles
				// DeletedFinalStateUnknown markers.
				key, err := fifo.KeyOf(newest.Object)
				if err != nil {
					return err
				}

				// Report this deletion.
				deletionCounter <- key
			}
			return nil
		},
	}

	// Create the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go framework.New(cfg).Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := sets.String{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
Ejemplo n.º 16
0
func TestCheckPod(t *testing.T) {

	tcs := []struct {
		pod   api.Pod
		prune bool
	}{

		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil},
				Spec:       api.PodSpec{NodeName: "new"},
			},
			prune: false,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil},
				Spec:       api.PodSpec{NodeName: "old"},
			},
			prune: false,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil},
				Spec:       api.PodSpec{NodeName: ""},
			},
			prune: false,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil},
				Spec:       api.PodSpec{NodeName: "nonexistant"},
			},
			prune: false,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
				Spec:       api.PodSpec{NodeName: "new"},
			},
			prune: false,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
				Spec:       api.PodSpec{NodeName: "old"},
			},
			prune: true,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
				Spec:       api.PodSpec{NodeName: "older"},
			},
			prune: true,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
				Spec:       api.PodSpec{NodeName: "oldest"},
			},
			prune: true,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
				Spec:       api.PodSpec{NodeName: ""},
			},
			prune: true,
		},
		{
			pod: api.Pod{
				ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
				Spec:       api.PodSpec{NodeName: "nonexistant"},
			},
			prune: true,
		},
	}

	nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, false)
	nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
	nc.nodeStore.Store.Add(&api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: "new",
		},
		Status: api.NodeStatus{
			NodeInfo: api.NodeSystemInfo{
				KubeletVersion: "v1.1.0",
			},
		},
	})
	nc.nodeStore.Store.Add(&api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: "old",
		},
		Status: api.NodeStatus{
			NodeInfo: api.NodeSystemInfo{
				KubeletVersion: "v1.0.0",
			},
		},
	})
	nc.nodeStore.Store.Add(&api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: "older",
		},
		Status: api.NodeStatus{
			NodeInfo: api.NodeSystemInfo{
				KubeletVersion: "v0.21.4",
			},
		},
	})
	nc.nodeStore.Store.Add(&api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: "oldest",
		},
		Status: api.NodeStatus{
			NodeInfo: api.NodeSystemInfo{
				KubeletVersion: "v0.19.3",
			},
		},
	})

	for i, tc := range tcs {
		var deleteCalls int
		nc.forcefullyDeletePod = func(_ *api.Pod) {
			deleteCalls++
		}

		nc.maybeDeleteTerminatingPod(&tc.pod)

		if tc.prune && deleteCalls != 1 {
			t.Errorf("[%v] expected number of delete calls to be 1 but got %v", i, deleteCalls)
		}
		if !tc.prune && deleteCalls != 0 {
			t.Errorf("[%v] expected number of delete calls to be 0 but got %v", i, deleteCalls)
		}
	}
}
Ejemplo n.º 17
0
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
	eventBroadcaster := record.NewBroadcaster()
	defer eventBroadcaster.StartLogging(t.Logf).Stop()

	// Setup modeler so we control the contents of all 3 stores: assumed,
	// scheduled and queued
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore}

	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore}

	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	// Create a fake clock used to timestamp entries and calculate ttl. Nothing
	// will expire till we flip to something older than the ttl, at which point
	// all entries inserted with fakeTime will expire.
	ttl := 30 * time.Second
	fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
	fakeClock := &util.FakeClock{Time: fakeTime}
	ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
	assumedPodsStore := cache.NewFakeExpirationStore(
		cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
	modeler.assumedPods = &cache.StoreToPodLister{Store: assumedPodsStore}

	// Port is the easiest way to cause a fit predicate failure
	podPort := 8080
	firstPod := podWithPort("foo", "", podPort)

	// Create the scheduler config
	algo := NewGenericScheduler(
		map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts},
		[]algorithm.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	var gotBinding *api.Binding
	c := &Config{
		Modeler: modeler,
		NodeLister: algorithm.FakeNodeLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
			gotBinding = b
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
	}

	// First scheduling pass should schedule the pod
	s := New(c)
	called := make(chan struct{})
	events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "Scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	queuedPodStore.Add(firstPod)
	// queuedPodStore: [foo:8080]
	// scheduledPodStore: []
	// assumedPods: []

	s.scheduleOne()
	// queuedPodStore: []
	// scheduledPodStore: [foo:8080]
	// assumedPods: [foo:8080]

	pod, exists, _ := scheduledPodStore.GetByKey("foo")
	if !exists {
		t.Errorf("Expected scheduled pod store to contain pod")
	}
	pod, exists, _ = queuedPodStore.GetByKey("foo")
	if exists {
		t.Errorf("Did not expect a queued pod, found %+v", pod)
	}
	pod, exists, _ = assumedPodsStore.GetByKey("foo")
	if !exists {
		t.Errorf("Assumed pod store should contain stale pod")
	}

	expectBind := &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}

	<-called
	events.Stop()

	scheduledPodStore.Delete(pod)
	_, exists, _ = assumedPodsStore.Get(pod)
	if !exists {
		t.Errorf("Expected pod %#v in assumed pod store", pod)
	}

	secondPod := podWithPort("bar", "", podPort)
	queuedPodStore.Add(secondPod)
	// queuedPodStore: [bar:8080]
	// scheduledPodStore: []
	// assumedPods: [foo:8080]

	// Second scheduling pass will fail to schedule if the store hasn't expired
	// the deleted pod. This would normally happen with a timeout.
	//expirationPolicy.NeverExpire = util.NewStringSet()
	fakeClock.Time = fakeClock.Time.Add(ttl + 1)

	called = make(chan struct{})
	events = eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "Scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	s.scheduleOne()

	expectBind = &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "bar"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}
	<-called
	events.Stop()
}
Ejemplo n.º 18
0
// TestAdmission
func TestAdmission(t *testing.T) {
	namespaceObj := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      "test",
			Namespace: "",
		},
		Status: api.NamespaceStatus{
			Phase: api.NamespaceActive,
		},
	}
	var namespaceLock sync.RWMutex

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	store.Add(namespaceObj)
	fakeWatch := watch.NewFake()
	mockClient := &testclient.Fake{}
	mockClient.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
	mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
		namespaceLock.RLock()
		defer namespaceLock.RUnlock()
		if getAction, ok := action.(testclient.GetAction); ok && getAction.GetName() == namespaceObj.Name {
			return true, namespaceObj, nil
		}
		return true, nil, fmt.Errorf("No result for action %v", action)
	})
	mockClient.AddReactor("list", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
		namespaceLock.RLock()
		defer namespaceLock.RUnlock()
		return true, &api.NamespaceList{Items: []api.Namespace{*namespaceObj}}, nil
	})

	lfhandler := NewLifecycle(mockClient).(*lifecycle)
	lfhandler.store = store
	handler := admission.NewChainHandler(lfhandler)
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespaceObj.Name},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	badPod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "456", Namespace: "doesnotexist"},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler: %v", err)
	}

	// change namespace state to terminating
	namespaceLock.Lock()
	namespaceObj.Status.Phase = api.NamespaceTerminating
	namespaceLock.Unlock()
	store.Add(namespaceObj)

	// verify create operations in the namespace cause an error
	err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected error rejecting creates in a namespace when it is terminating")
	}

	// verify update operations in the namespace can proceed
	err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Update, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler: %v", err)
	}

	// verify delete operations in the namespace can proceed
	err = handler.Admit(admission.NewAttributesRecord(nil, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Delete, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler: %v", err)
	}

	// verify delete of namespace default can never proceed
	err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", api.NamespaceDefault, "namespaces", "", admission.Delete, nil))
	if err == nil {
		t.Errorf("Expected an error that this namespace can never be deleted")
	}

	// verify delete of namespace other than default can proceed
	err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", "other", "namespaces", "", admission.Delete, nil))
	if err != nil {
		t.Errorf("Did not expect an error %v", err)
	}

	// verify create/update/delete of object in non-existant namespace throws error
	err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an aerror that objects cannot be created in non-existant namespaces", err)
	}

	err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Update, nil))
	if err == nil {
		t.Errorf("Expected an aerror that objects cannot be updated in non-existant namespaces", err)
	}

	err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Delete, nil))
	if err == nil {
		t.Errorf("Expected an aerror that objects cannot be deleted in non-existant namespaces", err)
	}
}
Ejemplo n.º 19
0
	containerRestartNodes := sets.NewString()
	for _, p := range pods.Items {
		for _, v := range FailedContainers(&p) {
			failedContainers = failedContainers + v.restarts
			containerRestartNodes.Insert(p.Spec.NodeName)
		}
	}
	return failedContainers, containerRestartNodes.List()
}

var _ = Describe("DaemonRestart", func() {

	framework := Framework{BaseName: "daemonrestart"}
	rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
	labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
	existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
	var ns string
	var config RCConfig
	var controller *controllerFramework.Controller
	var newPods cache.Store
	var stopCh chan struct{}

	BeforeEach(func() {

		// These tests require SSH
		// TODO(11834): Enable this test in GKE once experimental API there is switched on
		SkipUnlessProviderIs("gce")
		framework.beforeEach()
		ns = framework.Namespace.Name

		// All the restart tests need an rc and a watch on pods of the rc.