func TestDeleteControllerAndExpectations(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) manager := NewReplicationManager(client, 10) manager.podStoreSynced = alwaysReady rc := newReplicationController(1) manager.controllerStore.Store.Add(rc) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl // This should set expectations for the rc manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 1, 0) fakePodControl.clear() // This is to simulate a concurrent addPod, that has a handle on the expectations // as the controller deletes it. podExp, exists, err := manager.expectations.GetExpectations(rc) if !exists || err != nil { t.Errorf("No expectations found for rc") } manager.controllerStore.Delete(rc) manager.syncReplicationController(getKey(rc, t)) if _, exists, err = manager.expectations.GetExpectations(rc); exists { t.Errorf("Found expectaions, expected none since the rc has been deleted.") } // This should have no effect, since we've deleted the rc. podExp.Seen(1, 0) manager.podStore.Store.Replace(make([]interface{}, 0)) manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestCheckLeftoverEndpoints(t *testing.T) { ns := api.NamespaceDefault // Note that this requests *all* endpoints, therefore the NamespaceAll // below. testServer, _ := makeTestServer(t, api.NamespaceAll, serverResponse{http.StatusOK, &api.EndpointsList{ ListMeta: api.ListMeta{ ResourceVersion: "1", }, Items: []api.Endpoints{{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) endpoints.checkLeftoverEndpoints() if e, a := 1, endpoints.queue.Len(); e != a { t.Fatalf("Expected %v, got %v", e, a) } got, _ := endpoints.queue.Get() if e, a := ns+"/foo", got; e != a { t.Errorf("Expected %v, got %v", e, a) } }
// NewMasterComponents creates, initializes and starts master components based on the given config. func NewMasterComponents(c *Config) *MasterComponents { m, s, h := startMasterOrDie(c.MasterConfig) // TODO: Allow callers to pipe through a different master url and create a client/start components using it. glog.Infof("Master %+v", s.URL) if c.DeleteEtcdKeys { DeleteAllEtcdKeys() } restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: "v1beta3", QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := controller.NewReplicationManager(restClient, c.Burst) // TODO: Support events once we can cleanly shutdown an event recorder. controllerManager.SetEventRecorder(&record.FakeRecorder{}) if c.StartReplicationManager { go controllerManager.Run(runtime.NumCPU(), rcStopCh) } var once sync.Once return &MasterComponents{ ApiServer: s, QingMaster: m, RestClient: restClient, ControllerManager: controllerManager, rcStopCh: rcStopCh, EtcdHelper: h, once: once, } }
func TestDeleteFinalStateUnknown(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl received := make(chan string) manager.syncHandler = func(key string) error { received <- key return nil } // The DeletedFinalStateUnknown object should cause the rc manager to insert // the controller matching the selectors of the deleted pod into the work queue. controllerSpec := newReplicationController(1) manager.controllerStore.Store.Add(controllerSpec) pods := newPodList(nil, 1, api.PodRunning, controllerSpec) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) go manager.worker() expected := getKey(controllerSpec, t) select { case key := <-received: if key != expected { t.Errorf("Unexpected sync all for rc %v, expected %v", key, expected) } case <-time.After(100 * time.Millisecond): t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") } }
func TestSyncEndpointsProtocolUDP(t *testing.T) { ns := "other" testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, Spec: api.ServiceSpec{ Selector: map[string]string{}, Ports: []api.ServicePort{{Port: 80}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) }
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { ns := api.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", Name: "foo", Namespace: ns, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) }
func TestRCManagerNotReady(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, 2) manager.podControl = &fakePodControl manager.podStoreSynced = func() bool { return false } // Simulates the rc reflector running before the pod reflector. We don't // want to end up creating replicas in this case until the pod reflector // has synced, so the rc manager should just requeue the rc. controllerSpec := newReplicationController(1) manager.controllerStore.Store.Add(controllerSpec) rcKey := getKey(controllerSpec, t) manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 0, 0) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } manager.podStoreSynced = alwaysReady manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 1, 0) }
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods. func TestSecrets(t *testing.T) { helper, err := framework.NewHelper() if err != nil { t.Fatalf("unexpected error: %v", err) } var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ EtcdHelper: helper, QingletClient: client.FakeQingletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), }) framework.DeleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) DoTestSecrets(t, client, testapi.Version()) }
func TestPodControllerLookup(t *testing.T) { manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}), BurstReplicas) manager.podStoreSynced = alwaysReady testCases := []struct { inRCs []*api.ReplicationController pod *api.Pod outRCName string }{ // pods without labels don't match any rcs { inRCs: []*api.ReplicationController{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, outRCName: "", }, // Matching labels, not namespace { inRCs: []*api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRCName: "", }, // Matching ns and labels returns the key to the rc, not the rc name { inRCs: []*api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRCName: "bar", }, } for _, c := range testCases { for _, r := range c.inRCs { manager.controllerStore.Add(r) } if rc := manager.getPodControllers(c.pod); rc != nil { if c.outRCName != rc.Name { t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName) } } else if c.outRCName != "" { t.Errorf("Expected a controller %v pod %v, found none", c.outRCName, c.pod.Name) } } }
func TestSyncReplicationControllerCreates(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady // A controller with 2 replicas and no pods in the store, 2 creates expected controller := newReplicationController(2) manager.controllerStore.Store.Add(controller) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(controller, t)) validateSyncReplication(t, &fakePodControl, 2, 0) }
func TestSyncReplicationControllerDormancy(t *testing.T) { // Setup a test server so we can lie about the current state of pods fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl controllerSpec := newReplicationController(2) manager.controllerStore.Store.Add(controllerSpec) newPodList(manager.podStore.Store, 1, api.PodRunning, controllerSpec) // Creates a replica and sets expectations controllerSpec.Status.Replicas = 1 manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 1, 0) // Expectations prevents replicas but not an update on status controllerSpec.Status.Replicas = 0 fakePodControl.clear() manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) // Lowering expectations should lead to a sync that creates a replica, however the // fakePodControl error will prevent this, leaving expectations at 0, 0 manager.expectations.CreationObserved(controllerSpec) controllerSpec.Status.Replicas = 1 fakePodControl.clear() fakePodControl.err = fmt.Errorf("Fake Error") manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) // This replica should not need a Lowering of expectations, since the previous create failed fakePodControl.err = nil manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 1, 0) // 1 PUT for the rc status during dormancy window. // Note that the pod creates go through pod control so they're not recorded. fakeHandler.ValidateRequestCount(t, 1) }
func TestListWatchesCanList(t *testing.T) { fieldSelectorQueryParamName := api.FieldSelectorQueryParam(testapi.Version()) table := []struct { location string resource string namespace string fieldSelector fields.Selector }{ // Minion { location: testapi.ResourcePath("minions", api.NamespaceAll, ""), resource: "minions", namespace: api.NamespaceAll, fieldSelector: parseSelectorOrDie(""), }, // pod with "assigned" field selector. { location: buildLocation( testapi.ResourcePath("pods", api.NamespaceAll, ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), resource: "pods", namespace: api.NamespaceAll, fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), }, // pod in namespace "foo" { location: buildLocation( testapi.ResourcePath("pods", "foo", ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), resource: "pods", namespace: "foo", fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), }, } for _, item := range table { handler := util.FakeHandler{ StatusCode: 500, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) defer server.Close() client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. lw.List() handler.ValidateRequest(t, item.location, "GET", nil) } }
func TestSyncReplicationControllerDoesNothing(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady // 2 running pods, a controller with 2 replicas, sync is a no-op controllerSpec := newReplicationController(2) manager.controllerStore.Store.Add(controllerSpec) newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec) manager.podControl = &fakePodControl manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestSyncReplicationControllerDeletes(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl // 2 running pods and a controller with 1 replica, one pod delete expected controllerSpec := newReplicationController(1) manager.controllerStore.Store.Add(controllerSpec) newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec) manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 1) }
func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { ns := "bar" testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", Labels: map[string]string{ "foo": "bar", }, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 1, 1) serviceLabels := map[string]string{"baz": "blah"} endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, Labels: serviceLabels, }, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, }, }) endpoints.syncService(ns + "/foo") data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", Labels: serviceLabels, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data) }
func TestSyncEndpointsItemsWithLabels(t *testing.T) { ns := "other" testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{}}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 3, 2) serviceLabels := map[string]string{"foo": "bar"} endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, Labels: serviceLabels, }, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{ {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}, {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)}, }, }, }) endpoints.syncService(ns + "/foo") expectedSubsets := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{ {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, }, Ports: []api.EndpointPort{ {Name: "port0", Port: 8080, Protocol: "TCP"}, {Name: "port1", Port: 8088, Protocol: "TCP"}, }, }} data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "", Labels: serviceLabels, }, Subsets: endptspkg.SortSubsets(expectedSubsets), }) // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". endpointsHandler.ValidateRequestCount(t, 2) endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, ""), "POST", &data) }
func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { t := &testFactory{ Validator: validation.NullSchema{}, } generators := map[string]qingctl.Generator{ "run/v1": qingctl.BasicReplicationController{}, "service/v1": qingctl.ServiceGenerator{}, } return &cmdutil.Factory{ Object: func() (meta.RESTMapper, runtime.ObjectTyper) { return latest.RESTMapper, api.Scheme }, Client: func() (*client.Client, error) { // Swap out the HTTP client out of the client with the fake's version. fakeClient := t.Client.(*client.FakeRESTClient) c := client.NewOrDie(t.ClientConfig) c.Client = fakeClient.Client return c, t.Err }, RESTClient: func(*meta.RESTMapping) (resource.RESTClient, error) { return t.Client, t.Err }, Describer: func(*meta.RESTMapping) (qingctl.Describer, error) { return t.Describer, t.Err }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, columnLabels []string) (qingctl.ResourcePrinter, error) { return t.Printer, t.Err }, Validator: func() (validation.Schema, error) { return t.Validator, t.Err }, DefaultNamespace: func() (string, error) { return t.Namespace, t.Err }, ClientConfig: func() (*client.Config, error) { return t.ClientConfig, t.Err }, Generator: func(name string) (qingctl.Generator, bool) { generator, ok := generators[name] return generator, ok }, }, t, testapi.Codec() }
func TestApiserverMetrics(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) if _, err := client.Pods(api.NamespaceDefault).List(labels.Everything(), fields.Everything()); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } metrics, err := scrapeMetrics(s) if err != nil { t.Fatal(err) } checkForExpectedMetrics(t, metrics, []string{ "apiserver_request_count", "apiserver_request_latencies", }) }
func TestUnschedulableNodes(t *testing.T) { helper, err := framework.NewHelper() if err != nil { t.Fatalf("Couldn't create etcd helper: %v", err) } framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ EtcdHelper: helper, QingletClient: client.FakeQingletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), }) restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) schedulerConfigFactory := factory.NewConfigFactory(restClient) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
func TestControllerUpdateRequeue(t *testing.T) { // This server should force a requeue of the controller becuase it fails to update status.Replicas. fakeHandler := util.FakeHandler{ StatusCode: 500, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady rc := newReplicationController(1) manager.controllerStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: 2} newPodList(manager.podStore.Store, 1, api.PodRunning, rc) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) ch := make(chan interface{}) go func() { item, _ := manager.queue.Get() ch <- item }() select { case key := <-ch: expectedKey := getKey(rc, t) if key != expectedKey { t.Errorf("Expected requeue of controller with key %s got %s", expectedKey, key) } case <-time.After(controllerTimeout): manager.queue.ShutDown() t.Errorf("Expected to find an rc in the queue, found none.") } // 1 Update and 1 GET, both of which fail fakeHandler.ValidateRequestCount(t, 2) }
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady // Steady state for the replication controller, no Status.Replicas updates expected activePods := 5 rc := newReplicationController(activePods) manager.controllerStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: activePods} newPodList(manager.podStore.Store, activePods, api.PodRunning, rc) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) if fakeHandler.RequestReceived != nil { t.Errorf("Unexpected update when pods and rcs are in a steady state") } // This response body is just so we don't err out decoding the http response, all // we care about is the request body sent below. response := runtime.EncodeOrDie(testapi.Codec(), &api.ReplicationController{}) fakeHandler.ResponseBody = response rc.Generation = rc.Generation + 1 manager.syncReplicationController(getKey(rc, t)) rc.Status.ObservedGeneration = rc.Generation updatedRc := runtime.EncodeOrDie(testapi.Codec(), rc) fakeHandler.ValidateRequest(t, testapi.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &updatedRc) }
func TestControllerUpdateReplicas(t *testing.T) { // This is a happy server just to record the PUT request we expect for status.Replicas fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady // Insufficient number of pods in the system, and Status.Replicas is wrong; // Status.Replica should update to match number of pods in system, 1 new pod should be created. rc := newReplicationController(5) manager.controllerStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: 2, ObservedGeneration: 0} rc.Generation = 1 newPodList(manager.podStore.Store, 4, api.PodRunning, rc) // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Codec(), &api.ReplicationController{}) fakeHandler.ResponseBody = response fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) // 1. Status.Replicas should go up from 2->4 even though we created 5-4=1 pod. // 2. Every update to the status should include the Generation of the spec. rc.Status = api.ReplicationControllerStatus{Replicas: 4, ObservedGeneration: 1} decRc := runtime.EncodeOrDie(testapi.Codec(), rc) fakeHandler.ValidateRequest(t, testapi.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &decRc) validateSyncReplication(t, &fakePodControl, 1, 0) }
func TestCreateReplica(t *testing.T) { ns := api.NamespaceDefault body := runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}}) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) podControl := RealPodControl{ qingClient: client, recorder: &record.FakeRecorder{}, } controllerSpec := newReplicationController(1) // Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template podControl.createReplica(ns, controllerSpec) expectedPod := api.Pod{ ObjectMeta: api.ObjectMeta{ Labels: controllerSpec.Spec.Template.Labels, GenerateName: fmt.Sprintf("%s-", controllerSpec.Name), }, Spec: controllerSpec.Spec.Template.Spec, } fakeHandler.ValidateRequest(t, testapi.ResourcePath("pods", api.NamespaceDefault, ""), "POST", nil) actualPod, err := client.Codec.Decode([]byte(fakeHandler.RequestBody)) if err != nil { t.Errorf("Unexpected error: %#v", err) } if !api.Semantic.DeepDerivative(&expectedPod, actualPod) { t.Logf("Body: %s", fakeHandler.RequestBody) t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod) } }
// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods // and checking expectations. func TestRCSyncExpectations(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, 2) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl controllerSpec := newReplicationController(2) manager.controllerStore.Store.Add(controllerSpec) pods := newPodList(nil, 2, api.PodPending, controllerSpec) manager.podStore.Store.Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = FakeRCExpectations{ NewRCExpectations(), true, func() { // If we check active pods before checking expectataions, the rc // will create a new replica because it doesn't see this pod, but // has fulfilled its expectations. manager.podStore.Store.Add(&postExpectationsPod) }, } manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestAddDeploymentHash(t *testing.T) { buf := &bytes.Buffer{} codec := testapi.Codec() rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: "rc"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "foo": "bar", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, }, }, } podList := &api.PodList{ Items: []api.Pod{ {ObjectMeta: api.ObjectMeta{Name: "foo"}}, {ObjectMeta: api.ObjectMeta{Name: "bar"}}, {ObjectMeta: api.ObjectMeta{Name: "baz"}}, }, } seen := util.StringSet{} updatedRc := false fakeClient := &client.FakeRESTClient{ Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == testapi.ResourcePath("pods", "default", "") && m == "GET": if req.URL.RawQuery != "labelSelector=foo%3Dbar" { t.Errorf("Unexpected query string: %s", req.URL.RawQuery) } return &http.Response{StatusCode: 200, Body: objBody(codec, podList)}, nil case p == testapi.ResourcePath("pods", "default", "foo") && m == "PUT": seen.Insert("foo") obj := readOrDie(t, req, codec) podList.Items[0] = *(obj.(*api.Pod)) return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[0])}, nil case p == testapi.ResourcePath("pods", "default", "bar") && m == "PUT": seen.Insert("bar") obj := readOrDie(t, req, codec) podList.Items[1] = *(obj.(*api.Pod)) return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[1])}, nil case p == testapi.ResourcePath("pods", "default", "baz") && m == "PUT": seen.Insert("baz") obj := readOrDie(t, req, codec) podList.Items[2] = *(obj.(*api.Pod)) return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[2])}, nil case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": updatedRc = true return &http.Response{StatusCode: 200, Body: objBody(codec, rc)}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } clientConfig := &client.Config{Version: testapi.Version()} client := client.NewOrDie(clientConfig) client.Client = fakeClient.Client if _, err := AddDeploymentKeyToReplicationController(rc, client, "dk", "hash", api.NamespaceDefault, buf); err != nil { t.Errorf("unexpected error: %v", err) } for _, pod := range podList.Items { if !seen.Has(pod.Name) { t.Errorf("Missing update for pod: %s", pod.Name) } } if !updatedRc { t.Errorf("Failed to update replication controller with new labels") } }
func TestUpdateWithRetries(t *testing.T) { codec := testapi.Codec() rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: "rc", Labels: map[string]string{ "foo": "bar", }, }, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "foo": "bar", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, }, }, }, } // Test end to end updating of the rc with retries. Essentially make sure the update handler // sees the right updates, failures in update/get are handled properly, and that the updated // rc with new resource version is returned to the caller. Without any of these rollingupdate // will fail cryptically. newRc := *rc newRc.ResourceVersion = "2" newRc.Spec.Selector["baz"] = "foobar" updates := []*http.Response{ {StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})}, {StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})}, {StatusCode: 200, Body: objBody(codec, &newRc)}, } gets := []*http.Response{ {StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})}, {StatusCode: 200, Body: objBody(codec, rc)}, } fakeClient := &client.FakeRESTClient{ Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": update := updates[0] updates = updates[1:] // We should always get an update with a valid rc even when the get fails. The rc should always // contain the update. if c, ok := readOrDie(t, req, codec).(*api.ReplicationController); !ok || !reflect.DeepEqual(rc, c) { t.Errorf("Unexpected update body, got %+v expected %+v", c, rc) } else if sel, ok := c.Spec.Selector["baz"]; !ok || sel != "foobar" { t.Errorf("Expected selector label update, got %+v", c.Spec.Selector) } else { delete(c.Spec.Selector, "baz") } return update, nil case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "GET": get := gets[0] gets = gets[1:] return get, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } clientConfig := &client.Config{Version: testapi.Version()} client := client.NewOrDie(clientConfig) client.Client = fakeClient.Client if rc, err := updateWithRetries( client.ReplicationControllers("default"), rc, func(c *api.ReplicationController) { c.Spec.Selector["baz"] = "foobar" }); err != nil { t.Errorf("unexpected error: %v", err) } else if sel, ok := rc.Spec.Selector["baz"]; !ok || sel != "foobar" || rc.ResourceVersion != "2" { t.Errorf("Expected updated rc, got %+v", rc) } if len(updates) != 0 || len(gets) != 0 { t.Errorf("Remaining updates %+v gets %+v", updates, gets) } }
func TestClient(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault framework.DeleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) info, err := client.ServerVersion() if err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) { t.Errorf("expected %#v, got %#v", e, a) } pods, err := client.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 0 { t.Errorf("expected no pods, got %#v", pods) } // get a validation error pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ GenerateName: "test", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", }, }, }, } got, err := client.Pods(ns).Create(pod) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" got, err = client.Pods(ns).Create(pod) if err != nil { t.Fatalf("unexpected error: %v", err) } if got.Name == "" { t.Errorf("unexpected empty pod Name %v", got) } // pod is shown, but not scheduled pods, err = client.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 1 { t.Errorf("expected one pod, got %#v", pods) } actual := pods.Items[0] if actual.Name != got.Name { t.Errorf("expected pod %#v, got %#v", got, actual) } if actual.Spec.NodeName != "" { t.Errorf("expected pod to be unscheduled, got %#v", actual) } }
func TestMultiWatch(t *testing.T) { // Disable this test as long as it demonstrates a problem. // TODO: Reenable this test when we get #6059 resolved. return const watcherCount = 50 runtime.GOMAXPROCS(watcherCount) framework.DeleteAllEtcdKeys() defer framework.DeleteAllEtcdKeys() _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()), Namespace: ns, }, InvolvedObject: api.ObjectReference{ Name: name, Namespace: ns, }, Reason: fmt.Sprintf("unrelated change %v", i), } } type timePair struct { t time.Time name string } receivedTimes := make(chan timePair, watcherCount*2) watchesStarted := sync.WaitGroup{} // make a bunch of pods and watch them for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) got, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "qingyuan/pause", }}, }, }) if err != nil { t.Fatalf("Couldn't make %v: %v", name, err) } go func(name, rv string) { w, err := client.Pods(ns).Watch( labels.Set{"watchlabel": name}.AsSelector(), fields.Everything(), rv, ) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } defer w.Stop() watchesStarted.Done() e, ok := <-w.ResultChan() // should get the update (that we'll do below) if !ok { panic(fmt.Sprintf("%v ended early?", name)) } if e.Type != watch.Modified { panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object)) } receivedTimes <- timePair{time.Now(), name} }(name, got.ObjectMeta.ResourceVersion) } log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount) // wait for watches to start before we start spamming the system with // objects below, otherwise we'll hit the watch window restriction. watchesStarted.Wait() const ( useEventsAsUnrelatedType = false usePodsAsUnrelatedType = true ) // make a bunch of unrelated changes in parallel if useEventsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } if _, err := client.Events(ns).Create(dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } if usePodsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } name := fmt.Sprintf("unrelated-%v", i) _, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "qingyuan/pause", }}, }, }) if err != nil { panic(fmt.Sprintf("couldn't make unrelated pod: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } // Now we still have changes being made in parallel, but at least 1000 have been made. // Make some updates to send down the watches. sentTimes := make(chan timePair, watcherCount*2) for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) pod, err := client.Pods(ns).Get(name) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = "qingyuan/pause:1" sentTimes <- timePair{time.Now(), name} if _, err := client.Pods(ns).Update(pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) } sent := map[string]time.Time{} for i := 0; i < watcherCount; i++ { tp := <-sentTimes sent[tp.name] = tp.t } log.Printf("all changes made") dur := map[string]time.Duration{} for i := 0; i < watcherCount; i++ { tp := <-receivedTimes delta := tp.t.Sub(sent[tp.name]) dur[tp.name] = delta log.Printf("%v: %v", tp.name, delta) } log.Printf("all watches ended") t.Errorf("durations: %v", dur) }
func TestSingleWatch(t *testing.T) { _, s := runAMaster(t) defer s.Close() ns := "blargh" deleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) mkEvent := func(i int) *api.Event { name := fmt.Sprintf("event-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Namespace: ns, Name: name, }, InvolvedObject: api.ObjectReference{ Namespace: ns, Name: name, }, Reason: fmt.Sprintf("event %v", i), } } rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) got, err := client.Events(ns).Create(event) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } if rv1 == "" { rv1 = got.ResourceVersion if rv1 == "" { t.Fatal("did not get a resource version.") } } t.Logf("Created event %#v", got.ObjectMeta) } w, err := client.Get(). Prefix("watch"). NamespaceIfScoped(ns, len(ns) > 0). Resource("events"). Name("event-9"). Param("resourceVersion", rv1). Watch() if err != nil { t.Fatalf("Failed watch: %v", err) } defer w.Stop() select { case <-time.After(5 * time.Second): t.Fatal("watch took longer than 15 seconds") case got, ok := <-w.ResultChan(): if !ok { t.Fatal("Watch channel closed unexpectedly.") } // We expect to see an ADD of event-9 and only event-9. (This // catches a bug where all the events would have been sent down // the channel.) if e, a := watch.Added, got.Type; e != a { t.Errorf("Wanted %v, got %v", e, a) } switch o := got.Object.(type) { case *api.Event: if e, a := "event-9", o.Name; e != a { t.Errorf("Wanted %v, got %v", e, a) } default: t.Fatalf("Unexpected watch event containing object %#q", got) } } }
// startServiceAccountTestServer returns a started server // It is the responsibility of the caller to ensure the returned stopFunc is called func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config, func()) { deleteAllEtcdKeys() // Etcd helper, err := master.NewEtcdHelper(newEtcdClient(), testapi.Version(), etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } // Listener var m *master.Master apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) // Anonymous client config clientConfig := client.Config{Host: apiServer.URL, Version: testapi.Version()} // Root client rootClient := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Version(), BearerToken: rootToken}) // Set up two authenticators: // 1. A token authenticator that maps the rootToken to the "root" user // 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens rootTokenAuth := authenticator.TokenFunc(func(token string) (user.Info, bool, error) { if token == rootToken { return &user.DefaultInfo{rootUserName, "", []string{}}, true, nil } return nil, false, nil }) serviceAccountKey, err := rsa.GenerateKey(rand.Reader, 2048) serviceAccountTokenGetter := serviceaccount.NewGetterFromClient(rootClient) serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]*rsa.PublicKey{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter) authenticator := union.New( bearertoken.New(rootTokenAuth), bearertoken.New(serviceAccountTokenAuth), ) // Set up a stub authorizer: // 1. The "root" user is allowed to do anything // 2. ServiceAccounts named "ro" are allowed read-only operations in their namespace // 3. ServiceAccounts named "rw" are allowed any operation in their namespace authorizer := authorizer.AuthorizerFunc(func(attrs authorizer.Attributes) error { username := attrs.GetUserName() ns := attrs.GetNamespace() // If the user is "root"... if username == rootUserName { // allow them to do anything return nil } // If the user is a service account... if serviceAccountNamespace, serviceAccountName, err := serviceaccount.SplitUsername(username); err == nil { // Limit them to their own namespace if serviceAccountNamespace == ns { switch serviceAccountName { case readOnlyServiceAccountName: if attrs.IsReadOnly() { return nil } case readWriteServiceAccountName: return nil } } } return fmt.Errorf("User %s is denied (ns=%s, readonly=%v, resource=%s)", username, ns, attrs.IsReadOnly(), attrs.GetResource()) }) // Set up admission plugin to auto-assign serviceaccounts to pods serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClient) // Create a master and install handlers into mux. m = master.New(&master.Config{ EtcdHelper: helper, QingletClient: client.FakeQingletClient{}, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authenticator: authenticator, Authorizer: authorizer, AdmissionControl: serviceAccountAdmission, }) // Start the service account and service account token controllers tokenController := serviceaccount.NewTokensController(rootClient, serviceaccount.DefaultTokenControllerOptions(serviceaccount.JWTTokenGenerator(serviceAccountKey))) tokenController.Run() serviceAccountController := serviceaccount.NewServiceAccountsController(rootClient, serviceaccount.DefaultServiceAccountsControllerOptions()) serviceAccountController.Run() // Start the admission plugin reflectors serviceAccountAdmission.Run() stop := func() { tokenController.Stop() serviceAccountController.Stop() serviceAccountAdmission.Stop() apiServer.Close() } return rootClient, clientConfig, stop }