예제 #1
0
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) {
	masterConfig := framework.NewIntegrationTestMasterConfig()
	_, s := framework.RunAMaster(masterConfig)

	config := restclient.Config{Host: s.URL}
	clientSet, err := clientset.NewForConfig(&config)
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	resyncPeriod := 12 * time.Hour
	informers := informers.NewSharedInformerFactory(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), resyncPeriod)

	rm := replicaset.NewReplicaSetController(
		informers.ReplicaSets(),
		informers.Pods(),
		internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
		replicaset.BurstReplicas,
		4096,
		enableGarbageCollector,
	)

	if err != nil {
		t.Fatalf("Failed to create replicaset controller")
	}
	return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet
}
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
	masterConfig := framework.NewIntegrationTestMasterConfig()
	_, s := framework.RunAMaster(masterConfig)

	config := restclient.Config{Host: s.URL}
	clientSet, err := clientset.NewForConfig(&config)
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	resyncPeriod := 12 * time.Hour
	resyncPeriodFunc := func() time.Duration {
		return resyncPeriod
	}
	podInformer := informers.NewPodInformer(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
	rm := replication.NewReplicationManager(
		podInformer,
		internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
		resyncPeriodFunc,
		replication.BurstReplicas,
		4096,
		enableGarbageCollector,
	)

	if err != nil {
		t.Fatalf("Failed to create replication manager")
	}
	return s, rm, podInformer, clientSet
}
func createClients(t *testing.T, s *httptest.Server) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
	// Use higher QPS and Burst, there is a test for race conditions which
	// creates many objects and default values were too low.
	binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
	testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})

	host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
	plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{
		PluginName:             "plugin-name",
		Host:                   host,
		Config:                 volume.VolumeConfig{},
		LastProvisionerOptions: volume.VolumeOptions{},
		NewAttacherCallCount:   0,
		NewDetacherCallCount:   0,
		Mounters:               nil,
		Unmounters:             nil,
		Attachers:              nil,
		Detachers:              nil,
	}}
	cloud := &fake_cloud.FakeCloud{}

	syncPeriod := getSyncPeriod()
	ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, nil, plugins, cloud, "", nil, nil, nil)

	watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
	if err != nil {
		t.Fatalf("Failed to watch PersistentVolumes: %v", err)
	}
	watchPVC, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Watch(api.ListOptions{})
	if err != nil {
		t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
	}

	return testClient, ctrl, watchPV, watchPVC
}
func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
	// Use higher QPS and Burst, there is a test for race conditions which
	// creates many objects and default values were too low.
	binderClient := clientset.NewForConfigOrDie(&restclient.Config{
		Host:          s.URL,
		ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
		QPS:           1000000,
		Burst:         1000000,
	})
	testClient := clientset.NewForConfigOrDie(&restclient.Config{
		Host:          s.URL,
		ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
		QPS:           1000000,
		Burst:         1000000,
	})

	host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
	plugin := &volumetest.FakeVolumePlugin{
		PluginName:             provisionerPluginName,
		Host:                   host,
		Config:                 volume.VolumeConfig{},
		LastProvisionerOptions: volume.VolumeOptions{},
		NewAttacherCallCount:   0,
		NewDetacherCallCount:   0,
		Mounters:               nil,
		Unmounters:             nil,
		Attachers:              nil,
		Detachers:              nil,
	}
	plugins := []volume.VolumePlugin{plugin}
	cloud := &fake_cloud.FakeCloud{}

	syncPeriod = getSyncPeriod(syncPeriod)
	ctrl := persistentvolumecontroller.NewPersistentVolumeController(
		binderClient,
		syncPeriod,
		nil, // alpha provisioner
		plugins,
		cloud,
		"",   // cluster name
		nil,  // volumeSource
		nil,  // claimSource
		nil,  // classSource
		nil,  // eventRecorder
		true) // enableDynamicProvisioning

	watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
	if err != nil {
		t.Fatalf("Failed to watch PersistentVolumes: %v", err)
	}
	watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
	if err != nil {
		t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
	}

	return testClient, ctrl, watchPV, watchPVC
}
예제 #5
0
func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
	// Use higher QPS and Burst, there is a test for race conditions which
	// creates many objects and default values were too low.
	binderClient := clientset.NewForConfigOrDie(&restclient.Config{
		Host:          s.URL,
		ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion},
		QPS:           1000000,
		Burst:         1000000,
	})
	testClient := clientset.NewForConfigOrDie(&restclient.Config{
		Host:          s.URL,
		ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion},
		QPS:           1000000,
		Burst:         1000000,
	})

	host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
	plugin := &volumetest.FakeVolumePlugin{
		PluginName:             provisionerPluginName,
		Host:                   host,
		Config:                 volume.VolumeConfig{},
		LastProvisionerOptions: volume.VolumeOptions{},
		NewAttacherCallCount:   0,
		NewDetacherCallCount:   0,
		Mounters:               nil,
		Unmounters:             nil,
		Attachers:              nil,
		Detachers:              nil,
	}
	plugins := []volume.VolumePlugin{plugin}
	cloud := &fake_cloud.FakeCloud{}
	ctrl := persistentvolumecontroller.NewController(
		persistentvolumecontroller.ControllerParameters{
			KubeClient:    binderClient,
			SyncPeriod:    getSyncPeriod(syncPeriod),
			VolumePlugins: plugins,
			Cloud:         cloud,
			EnableDynamicProvisioning: true,
		})

	watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
	if err != nil {
		t.Fatalf("Failed to watch PersistentVolumes: %v", err)
	}
	watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
	if err != nil {
		t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
	}

	return testClient, ctrl, watchPV, watchPVC
}
예제 #6
0
func TestDSManagerInit(t *testing.T) {
	// Insert a stable daemon set and make sure we don't create an extra pod
	// for the one node which already has a daemon after a simulated restart.
	ds := newDaemonSet("test")
	ds.Status = extensions.DaemonSetStatus{
		CurrentNumberScheduled: 1,
		NumberMisscheduled:     0,
		DesiredNumberScheduled: 1,
	}
	nodeName := "only-node"
	podList := &api.PodList{
		Items: []api.Pod{
			*newPod("podname", nodeName, simpleDaemonSetLabel),
		}}
	response := runtime.EncodeOrDie(testapi.Default.Codec(), podList)
	fakeHandler := utiltesting.FakeHandler{
		StatusCode:   200,
		ResponseBody: response,
	}
	testServer := httptest.NewServer(&fakeHandler)
	// TODO: Uncomment when fix #19254
	// defer testServer.Close()

	clientset := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc)
	manager.dsStore.Add(ds)
	manager.nodeStore.Add(newNode(nodeName, nil))
	manager.podStoreSynced = alwaysReady
	controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)

	fakePodControl := &controller.FakePodControl{}
	manager.podControl = fakePodControl
	manager.syncHandler(getKey(ds, t))
	validateSyncDaemonSets(t, fakePodControl, 0, 0)
}
예제 #7
0
func (c *Client) Setup(t *testing.T) *Client {
	c.handler = &utiltesting.FakeHandler{
		StatusCode: c.Response.StatusCode,
	}
	if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil {
		c.handler.ResponseBody = *responseBody
	}
	c.server = httptest.NewServer(c.handler)
	if c.Client == nil {
		c.Client = client.NewOrDie(&client.Config{
			Host:          c.server.URL,
			ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
		})

		// TODO: caesarxuchao: hacky way to specify version of Experimental client.
		// We will fix this by supporting multiple group versions in Config
		c.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{
			Host:          c.server.URL,
			ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
		})

		c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL})
	}
	c.QueryValidator = map[string]func(string, string) bool{}
	return c
}
예제 #8
0
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestSyncJobExpectations(t *testing.T) {
	clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
	fakePodControl := controller.FakePodControl{}
	manager.podControl = &fakePodControl
	manager.podStoreSynced = alwaysReady
	manager.updateHandler = func(job *extensions.Job) error { return nil }

	job := newJob(2, 2)
	manager.jobStore.Store.Add(job)
	pods := newPodList(2, api.PodPending, job)
	manager.podStore.Store.Add(&pods[0])

	manager.expectations = FakeJobExpectations{
		controller.NewControllerExpectations(), true, func() {
			// If we check active pods before checking expectataions, the job
			// will create a new replica because it doesn't see this pod, but
			// has fulfilled its expectations.
			manager.podStore.Store.Add(&pods[1])
		},
	}
	manager.syncJob(getKey(job, t))
	if len(fakePodControl.Templates) != 0 {
		t.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
	}
	if len(fakePodControl.DeletePodName) != 0 {
		t.Errorf("Unexpected number of deletes.  Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
	}
}
예제 #9
0
func TestSyncPastDeadlineJobFinished(t *testing.T) {
	clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
	fakePodControl := controller.FakePodControl{}
	manager.podControl = &fakePodControl
	manager.podStoreSynced = alwaysReady
	var actual *extensions.Job
	manager.updateHandler = func(job *extensions.Job) error {
		actual = job
		return nil
	}

	job := newJob(1, 1)
	activeDeadlineSeconds := int64(10)
	job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
	start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0)
	job.Status.StartTime = &start
	job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
	manager.jobStore.Store.Add(job)
	err := manager.syncJob(getKey(job, t))
	if err != nil {
		t.Errorf("Unexpected error when syncing jobs %v", err)
	}
	if len(fakePodControl.Templates) != 0 {
		t.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
	}
	if len(fakePodControl.DeletePodName) != 0 {
		t.Errorf("Unexpected number of deletes.  Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
	}
	if actual != nil {
		t.Error("Unexpected job modification")
	}
}
func TestSyncJobUpdateRequeue(t *testing.T) {
	clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
	manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
	fakePodControl := controller.FakePodControl{}
	manager.podControl = &fakePodControl
	manager.podStoreSynced = alwaysReady
	manager.jobStoreSynced = alwaysReady
	updateError := fmt.Errorf("Update error")
	manager.updateHandler = func(job *batch.Job) error {
		manager.queue.AddRateLimited(getKey(job, t))
		return updateError
	}
	job := newJob(2, 2)
	sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
	err := manager.syncJob(getKey(job, t))
	if err == nil || err != updateError {
		t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
	}
	t.Log("Waiting for a job in the queue")
	key, _ := manager.queue.Get()
	expectedKey := getKey(job, t)
	if key != expectedKey {
		t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
	}
}
func TestBind(t *testing.T) {
	table := []struct {
		binding *api.Binding
	}{
		{binding: &api.Binding{
			ObjectMeta: api.ObjectMeta{
				Namespace: api.NamespaceDefault,
				Name:      "foo",
			},
			Target: api.ObjectReference{
				Name: "foohost.kubernetes.mydomain.com",
			},
		}},
	}

	for _, item := range table {
		handler := utiltesting.FakeHandler{
			StatusCode:   200,
			ResponseBody: "",
			T:            t,
		}
		server := httptest.NewServer(&handler)
		defer server.Close()
		client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
		b := binder{client}

		if err := b.Bind(item.binding); err != nil {
			t.Errorf("Unexpected error: %v", err)
			continue
		}
		expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding)
		handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody)
	}
}
func TestRCManagerNotReady(t *testing.T) {
	c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	fakePodControl := controller.FakePodControl{}
	manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
	manager.podControl = &fakePodControl
	manager.podStoreSynced = func() bool { return false }

	// Simulates the rc reflector running before the pod reflector. We don't
	// want to end up creating replicas in this case until the pod reflector
	// has synced, so the rc manager should just requeue the rc.
	controllerSpec := newReplicationController(1)
	manager.rcStore.Indexer.Add(controllerSpec)

	rcKey := getKey(controllerSpec, t)
	manager.syncReplicationController(rcKey)
	validateSyncReplication(t, &fakePodControl, 0, 0, 0)
	queueRC, _ := manager.queue.Get()
	if queueRC != rcKey {
		t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
	}

	manager.podStoreSynced = alwaysReady
	manager.syncReplicationController(rcKey)
	validateSyncReplication(t, &fakePodControl, 1, 0, 0)
}
예제 #13
0
func TestOverlappingRSs(t *testing.T) {
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	labelMap := map[string]string{"foo": "bar"}

	for i := 0; i < 5; i++ {
		manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0)
		manager.podStoreSynced = alwaysReady

		// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
		var controllers []*extensions.ReplicaSet
		for j := 1; j < 10; j++ {
			rsSpec := newReplicaSet(1, labelMap)
			rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			rsSpec.Name = string(uuid.NewUUID())
			controllers = append(controllers, rsSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rsStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest ReplicaSet is synced
		pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
		rsKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRS, _ := manager.queue.Get()
		if queueRS != rsKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
		}
	}
}
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
	ns := api.NamespaceDefault
	testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault,
		serverResponse{http.StatusOK, &api.Endpoints{
			ObjectMeta: api.ObjectMeta{
				ResourceVersion: "1",
				Name:            "foo",
				Namespace:       ns,
			},
			Subsets: []api.EndpointSubset{{
				Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
				Ports:     []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
			}},
		}})
	defer testServer.Close()
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
	endpoints.podStoreSynced = alwaysReady
	addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1, 0)
	endpoints.serviceStore.Store.Add(&api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"foo": "bar"},
			Ports:    []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
		},
	})
	endpoints.syncService(ns + "/foo")
	endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil)
}
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
	ns := api.NamespaceDefault
	testServer, endpointsHandler := makeTestServer(t, ns,
		serverResponse{http.StatusOK, &api.Endpoints{
			ObjectMeta: api.ObjectMeta{
				Name:            "foo",
				Namespace:       ns,
				ResourceVersion: "1",
			},
			Subsets: []api.EndpointSubset{{
				Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
				Ports:     []api.EndpointPort{{Port: 1000}},
			}},
		}})
	defer testServer.Close()
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
	endpoints.podStoreSynced = alwaysReady
	endpoints.serviceStore.Store.Add(&api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
		Spec:       api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}},
	})
	endpoints.syncService(ns + "/foo")
	endpointsHandler.ValidateRequestCount(t, 0)
}
func TestCheckLeftoverEndpoints(t *testing.T) {
	ns := api.NamespaceDefault
	// Note that this requests *all* endpoints, therefore the NamespaceAll
	// below.
	testServer, _ := makeTestServer(t, api.NamespaceAll,
		serverResponse{http.StatusOK, &api.EndpointsList{
			ListMeta: unversioned.ListMeta{
				ResourceVersion: "1",
			},
			Items: []api.Endpoints{{
				ObjectMeta: api.ObjectMeta{
					Name:            "foo",
					Namespace:       ns,
					ResourceVersion: "1",
				},
				Subsets: []api.EndpointSubset{{
					Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
					Ports:     []api.EndpointPort{{Port: 1000}},
				}},
			}},
		}})
	defer testServer.Close()
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
	endpoints.podStoreSynced = alwaysReady
	endpoints.checkLeftoverEndpoints()

	if e, a := 1, endpoints.queue.Len(); e != a {
		t.Fatalf("Expected %v, got %v", e, a)
	}
	got, _ := endpoints.queue.Get()
	if e, a := ns+"/foo", got; e != a {
		t.Errorf("Expected %v, got %v", e, a)
	}
}
func TestMasterService(t *testing.T) {
	_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
	defer s.Close()

	client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})

	err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
		svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
		if err != nil {
			t.Errorf("unexpected error: %v", err)
			return false, nil
		}
		found := false
		for i := range svcList.Items {
			if svcList.Items[i].Name == "kubernetes" {
				found = true
				break
			}
		}
		if found {
			ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes")
			if err != nil {
				return false, nil
			}
			if countEndpoints(ep) == 0 {
				return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
			}
			return true, nil
		}
		return false, nil
	})
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
}
예제 #18
0
func TestControllerUpdateRequeue(t *testing.T) {
	// This server should force a requeue of the controller because it fails to update status.Replicas.
	fakeHandler := utiltesting.FakeHandler{
		StatusCode:   500,
		ResponseBody: "",
	}
	testServer := httptest.NewServer(&fakeHandler)
	defer testServer.Close()

	c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.podStoreSynced = alwaysReady

	rc := newReplicationController(1)
	manager.rcStore.Indexer.Add(rc)
	rc.Status = api.ReplicationControllerStatus{Replicas: 2}
	newPodList(manager.podStore.Indexer, 1, api.PodRunning, rc, "pod")

	fakePodControl := controller.FakePodControl{}
	manager.podControl = &fakePodControl

	// an error from the sync function will be requeued, check to make sure we returned an error
	if err := manager.syncReplicationController(getKey(rc, t)); err == nil {
		t.Errorf("missing error for requeue")
	}

	// 1 Update and 1 GET, both of which fail
	fakeHandler.ValidateRequestCount(t, 2)
}
func TestOverlappingRCs(t *testing.T) {
	c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})

	for i := 0; i < 5; i++ {
		manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
		manager.podStoreSynced = alwaysReady

		// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
		var controllers []*api.ReplicationController
		for j := 1; j < 10; j++ {
			controllerSpec := newReplicationController(1)
			controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			controllerSpec.Name = string(util.NewUUID())
			controllers = append(controllers, controllerSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rcStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest rc is synced
		pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
		rcKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRC, _ := manager.queue.Get()
		if queueRC != rcKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
		}
	}
}
예제 #20
0
// NewMasterComponents creates, initializes and starts master components based on the given config.
func NewMasterComponents(c *Config) *MasterComponents {
	m, s := startMasterOrDie(c.MasterConfig)
	// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
	glog.Infof("Master %+v", s.URL)
	if c.DeleteEtcdKeys {
		DeleteAllEtcdKeys()
	}
	// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
	restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
	clientset := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
	rcStopCh := make(chan struct{})
	controllerManager := replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, c.Burst)

	// TODO: Support events once we can cleanly shutdown an event recorder.
	controllerManager.SetEventRecorder(&record.FakeRecorder{})
	if c.StartReplicationManager {
		go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
	}
	var once sync.Once
	return &MasterComponents{
		ApiServer:         s,
		KubeMaster:        m,
		RestClient:        restClient,
		ControllerManager: controllerManager,
		rcStopCh:          rcStopCh,
		once:              once,
	}
}
예제 #21
0
func TestPodReadOnlyFilesystem(t *testing.T) {
	_, s := framework.RunAMaster(nil)
	defer s.Close()

	isReadOnly := true
	ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
	defer framework.DeleteTestingNamespace(ns, s, t)

	client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: "xxx",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "fake-name",
					Image: "fakeimage",
					SecurityContext: &api.SecurityContext{
						ReadOnlyRootFilesystem: &isReadOnly,
					},
				},
			},
		},
	}

	if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
		t.Errorf("Failed to create pod: %v", err)
	}

	integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
}
예제 #22
0
func TestRSManagerNotReady(t *testing.T) {
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	fakePodControl := controller.FakePodControl{}
	manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 2, 0)
	manager.podControl = &fakePodControl
	manager.podStoreSynced = func() bool { return false }

	// Simulates the ReplicaSet reflector running before the pod reflector. We don't
	// want to end up creating replicas in this case until the pod reflector
	// has synced, so the ReplicaSet controller should just requeue the ReplicaSet.
	rsSpec := newReplicaSet(1, map[string]string{"foo": "bar"})
	manager.rsStore.Store.Add(rsSpec)

	rsKey := getKey(rsSpec, t)
	manager.syncReplicaSet(rsKey)
	validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
	queueRS, _ := manager.queue.Get()
	if queueRS != rsKey {
		t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
	}

	manager.podStoreSynced = alwaysReady
	manager.syncReplicaSet(rsKey)
	validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
}
예제 #23
0
func TestSyncReplicaSetDormancy(t *testing.T) {
	// Setup a test server so we can lie about the current state of pods
	fakeHandler := utiltesting.FakeHandler{
		StatusCode:   200,
		ResponseBody: "{}",
	}
	testServer := httptest.NewServer(&fakeHandler)
	defer testServer.Close()
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})

	fakePodControl := controller.FakePodControl{}
	manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.podStoreSynced = alwaysReady
	manager.podControl = &fakePodControl

	labelMap := map[string]string{"foo": "bar"}
	rsSpec := newReplicaSet(2, labelMap)
	manager.rsStore.Store.Add(rsSpec)
	newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod")

	// Creates a replica and sets expectations
	rsSpec.Status.Replicas = 1
	rsSpec.Status.ReadyReplicas = 1
	manager.syncReplicaSet(getKey(rsSpec, t))
	validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)

	// Expectations prevents replicas but not an update on status
	rsSpec.Status.Replicas = 0
	rsSpec.Status.ReadyReplicas = 0
	fakePodControl.Clear()
	manager.syncReplicaSet(getKey(rsSpec, t))
	validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)

	// Get the key for the controller
	rsKey, err := controller.KeyFunc(rsSpec)
	if err != nil {
		t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err)
	}

	// Lowering expectations should lead to a sync that creates a replica, however the
	// fakePodControl error will prevent this, leaving expectations at 0, 0
	manager.expectations.CreationObserved(rsKey)
	rsSpec.Status.Replicas = 1
	rsSpec.Status.ReadyReplicas = 1
	fakePodControl.Clear()
	fakePodControl.Err = fmt.Errorf("Fake Error")

	manager.syncReplicaSet(getKey(rsSpec, t))
	validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)

	// This replica should not need a Lowering of expectations, since the previous create failed
	fakePodControl.Clear()
	fakePodControl.Err = nil
	manager.syncReplicaSet(getKey(rsSpec, t))
	validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)

	// 1 PUT for the ReplicaSet status during dormancy window.
	// Note that the pod creates go through pod control so they're not recorded.
	fakeHandler.ValidateRequestCount(t, 1)
}
예제 #24
0
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestRSSyncExpectations(t *testing.T) {
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	fakePodControl := controller.FakePodControl{}
	manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 2, 0)
	manager.podStoreSynced = alwaysReady
	manager.podControl = &fakePodControl

	labelMap := map[string]string{"foo": "bar"}
	rsSpec := newReplicaSet(2, labelMap)
	manager.rsStore.Store.Add(rsSpec)
	pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod")
	manager.podStore.Indexer.Add(&pods.Items[0])
	postExpectationsPod := pods.Items[1]

	manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{
		controller.NewControllerExpectations(), true, func() {
			// If we check active pods before checking expectataions, the
			// ReplicaSet will create a new replica because it doesn't see
			// this pod, but has fulfilled its expectations.
			manager.podStore.Indexer.Add(&postExpectationsPod)
		},
	})
	manager.syncReplicaSet(getKey(rsSpec, t))
	validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
}
예제 #25
0
func TestDeleteFinalStateUnknown(t *testing.T) {
	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	fakePodControl := controller.FakePodControl{}
	manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.podStoreSynced = alwaysReady
	manager.podControl = &fakePodControl

	received := make(chan string)
	manager.syncHandler = func(key string) error {
		received <- key
		return nil
	}

	// The DeletedFinalStateUnknown object should cause the ReplicaSet manager to insert
	// the controller matching the selectors of the deleted pod into the work queue.
	labelMap := map[string]string{"foo": "bar"}
	rsSpec := newReplicaSet(1, labelMap)
	manager.rsStore.Store.Add(rsSpec)
	pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod")
	manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})

	go manager.worker()

	expected := getKey(rsSpec, t)
	select {
	case key := <-received:
		if key != expected {
			t.Errorf("Unexpected sync all for ReplicaSet %v, expected %v", key, expected)
		}
	case <-time.After(wait.ForeverTestTimeout):
		t.Errorf("Processing DeleteFinalStateUnknown took longer than expected")
	}
}
예제 #26
0
func TestPodControllerLookup(t *testing.T) {
	manager := NewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.podStoreSynced = alwaysReady
	testCases := []struct {
		inRSs     []*extensions.ReplicaSet
		pod       *api.Pod
		outRSName string
	}{
		// pods without labels don't match any ReplicaSets
		{
			inRSs: []*extensions.ReplicaSet{
				{ObjectMeta: api.ObjectMeta{Name: "basic"}}},
			pod:       &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}},
			outRSName: "",
		},
		// Matching labels, not namespace
		{
			inRSs: []*extensions.ReplicaSet{
				{
					ObjectMeta: api.ObjectMeta{Name: "foo"},
					Spec: extensions.ReplicaSetSpec{
						Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
					},
				},
			},
			pod: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
			outRSName: "",
		},
		// Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name
		{
			inRSs: []*extensions.ReplicaSet{
				{
					ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
					Spec: extensions.ReplicaSetSpec{
						Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
					},
				},
			},
			pod: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
			outRSName: "bar",
		},
	}
	for _, c := range testCases {
		for _, r := range c.inRSs {
			manager.rsStore.Add(r)
		}
		if rs := manager.getPodReplicaSet(c.pod); rs != nil {
			if c.outRSName != rs.Name {
				t.Errorf("Got replica set %+v expected %+v", rs.Name, c.outRSName)
			}
		} else if c.outRSName != "" {
			t.Errorf("Expected a replica set %v pod %v, found none", c.outRSName, c.pod.Name)
		}
	}
}
func TestPodControllerLookup(t *testing.T) {
	manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.podStoreSynced = alwaysReady
	testCases := []struct {
		inRCs     []*api.ReplicationController
		pod       *api.Pod
		outRCName string
	}{
		// pods without labels don't match any rcs
		{
			inRCs: []*api.ReplicationController{
				{ObjectMeta: api.ObjectMeta{Name: "basic"}}},
			pod:       &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}},
			outRCName: "",
		},
		// Matching labels, not namespace
		{
			inRCs: []*api.ReplicationController{
				{
					ObjectMeta: api.ObjectMeta{Name: "foo"},
					Spec: api.ReplicationControllerSpec{
						Selector: map[string]string{"foo": "bar"},
					},
				},
			},
			pod: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
			outRCName: "",
		},
		// Matching ns and labels returns the key to the rc, not the rc name
		{
			inRCs: []*api.ReplicationController{
				{
					ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
					Spec: api.ReplicationControllerSpec{
						Selector: map[string]string{"foo": "bar"},
					},
				},
			},
			pod: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
			outRCName: "bar",
		},
	}
	for _, c := range testCases {
		for _, r := range c.inRCs {
			manager.rcStore.Indexer.Add(r)
		}
		if rc := manager.getPodController(c.pod); rc != nil {
			if c.outRCName != rc.Name {
				t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName)
			}
		} else if c.outRCName != "" {
			t.Errorf("Expected a controller %v pod %v, found none", c.outRCName, c.pod.Name)
		}
	}
}
예제 #28
0
func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
	clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewDaemonSetsControllerFromClient(clientset, controller.NoResyncPeriodFunc, 0)
	manager.podStoreSynced = alwaysReady
	podControl := &controller.FakePodControl{}
	manager.podControl = podControl
	return manager, podControl
}
예제 #29
0
func TestThirdPartyMultiple(t *testing.T) {
	_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
	defer s.Close()

	clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
	client := internalclientset.NewForConfigOrDie(clientConfig)

	DoTestInstallMultipleAPIs(t, client, clientConfig)
}
예제 #30
0
// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
// and with garbageCollectorEnabled set to true
func setupManagerWithGCEnabled() (manager *ReplicaSetController, fakePodControl *controller.FakePodControl) {
	c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	fakePodControl = &controller.FakePodControl{}
	manager = NewReplicaSetControllerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.garbageCollectorEnabled = true
	manager.podStoreSynced = alwaysReady
	manager.podControl = fakePodControl
	return manager, fakePodControl
}