func TestControllerNoReplicaUpdate(t *testing.T) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client, BurstReplicas) // Steady state for the replication controller, no Status.Replicas updates expected activePods := 5 rc := newReplicationController(activePods) manager.controllerStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: activePods} newPodList(manager.podStore.Store, activePods, api.PodRunning, rc) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) if fakeHandler.RequestReceived != nil { t.Errorf("Unexpected update when pods and rcs are in a steady state") } }
func TestSyncEndpointsProtocolUDP(t *testing.T) { ns := "other" testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, Spec: api.ServiceSpec{ Selector: map[string]string{}, Ports: []api.ServicePort{{Port: 80}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) }
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { ns := api.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", Name: "foo", Namespace: ns, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithNamespaceQuery("endpoints", api.NamespaceDefault, "foo"), "GET", nil) }
func TestSyncEndpointsPodError(t *testing.T) { serviceList := api.ServiceList{ Items: []api.Service{ { Selector: map[string]string{ "foo": "bar", }, }, }, } testServer := makeTestServer(t, serverResponse{http.StatusInternalServerError, api.PodList{}}, serverResponse{http.StatusOK, serviceList}) client := client.NewOrDie(testServer.URL, nil) serviceRegistry := registrytest.ServiceRegistry{ List: api.ServiceList{ Items: []api.Service{ { Selector: map[string]string{ "foo": "bar", }, }, }, }, } endpoints := NewEndpointController(&serviceRegistry, client) if err := endpoints.SyncServiceEndpoints(); err == nil { t.Error("Unexpected non-error") } }
func TestCheckLeftoverEndpoints(t *testing.T) { ns := api.NamespaceDefault // Note that this requests *all* endpoints, therefore the NamespaceAll // below. testServer, _ := makeTestServer(t, api.NamespaceAll, serverResponse{http.StatusOK, &api.EndpointsList{ ListMeta: api.ListMeta{ ResourceVersion: "1", }, Items: []api.Endpoints{{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) endpoints.checkLeftoverEndpoints() if e, a := 1, endpoints.queue.Len(); e != a { t.Fatalf("Expected %v, got %v", e, a) } got, _ := endpoints.queue.Get() if e, a := ns+"/foo", got; e != a { t.Errorf("Expected %v, got %v", e, a) } }
func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { t := &testFactory{ Validator: validation.NullSchema{}, } return &cmdutil.Factory{ Object: func() (meta.RESTMapper, runtime.ObjectTyper) { return latest.RESTMapper, api.Scheme }, Client: func() (*client.Client, error) { // Swap out the HTTP client out of the client with the fake's version. fakeClient := t.Client.(*client.FakeRESTClient) c := client.NewOrDie(t.ClientConfig) c.Client = fakeClient.Client return c, t.Err }, RESTClient: func(*meta.RESTMapping) (resource.RESTClient, error) { return t.Client, t.Err }, Describer: func(*meta.RESTMapping) (kubectl.Describer, error) { return t.Describer, t.Err }, Printer: func(mapping *meta.RESTMapping, noHeaders bool) (kubectl.ResourcePrinter, error) { return t.Printer, t.Err }, Validator: func() (validation.Schema, error) { return t.Validator, t.Err }, DefaultNamespace: func() (string, error) { return t.Namespace, t.Err }, ClientConfig: func() (*client.Config, error) { return t.ClientConfig, t.Err }, }, t, testapi.Codec() }
func k8sClientFactory() *k8sClient { if len(*addr) > 0 && len(*user) > 0 && len(*pword) > 0 { config := client.Config{ Host: *addr, Username: *user, Password: *pword, Insecure: true, } return &k8sClient{client.NewOrDie(&config)} } else { kubernetesService := os.Getenv("KUBERNETES_SERVICE_HOST") if kubernetesService == "" { glog.Fatalf("Please specify the Kubernetes server with --server") } apiServer := fmt.Sprintf("https://%s:%s", kubernetesService, os.Getenv("KUBERNETES_SERVICE_PORT")) token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") if err != nil { glog.Fatalf("No service account token found") } config := client.Config{ Host: apiServer, BearerToken: string(token), Insecure: true, } c, err := client.New(&config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } return &k8sClient{c} } }
func TestSyncReplicationControllerCreates(t *testing.T) { body := runtime.EncodeOrDie(testapi.Codec(), newPodList(0)) fakePodHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } fakePodControl := FakePodControl{} controller := newReplicationController(2) fakeUpdateHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: runtime.EncodeOrDie(testapi.Codec(), &controller), T: t, } testServerMux := http.NewServeMux() testServerMux.Handle("/api/"+testapi.Version()+"/pods/", &fakePodHandler) testServerMux.Handle(fmt.Sprintf("/api/"+testapi.Version()+"/replicationControllers/%s", controller.Name), &fakeUpdateHandler) testServer := httptest.NewServer(testServerMux) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client) manager.podControl = &fakePodControl manager.syncReplicationController(controller) validateSyncReplication(t, &fakePodControl, 2, 0) // No Status.Replicas update expected even though 2 pods were just created, // because the controller manager can't observe the pods till the next sync cycle. if fakeUpdateHandler.RequestReceived != nil { t.Errorf("Unexpected updates for controller via %v", fakeUpdateHandler.RequestReceived.URL) } }
func main() { flag.Parse() bearerToken, err := ioutil.ReadFile(*argBearerTokenFile) if err != nil { log.Fatal(err) } config := client.Config{ Host: *argMaster, Insecure: *argInsecure, BearerToken: string(bearerToken), Version: *argApiVersion, } if _, err := os.Stat(*argCaCertFile); err == nil { config.Insecure = false config.TLSClientConfig = client.TLSClientConfig{ CAFile: *argCaCertFile, } } client := client.NewOrDie(&config) done := make(chan bool) go watchNodes(client) <-done }
func TestControllerUpdateReplicas(t *testing.T) { // Insufficient number of pods in the system, and Status.Replicas is wrong; // Status.Replica should update to match number of pods in system, 1 new pod should be created. rc := newReplicationController(5) rc.Status = api.ReplicationControllerStatus{Replicas: 2} activePods := 4 testServer, fakeUpdateHandler := makeTestServer(t, api.NamespaceDefault, rc.Name, serverResponse{http.StatusOK, newPodList(activePods)}, serverResponse{http.StatusOK, &api.ReplicationControllerList{ Items: []api.ReplicationController{rc}, }}, serverResponse{http.StatusOK, &rc}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.synchronize() // Status.Replicas should go up from 2->4 even though we created 5-4=1 pod rc.Status = api.ReplicationControllerStatus{Replicas: 4} // These are set by default. rc.Spec.Selector = rc.Spec.Template.Labels rc.Labels = rc.Spec.Template.Labels decRc := runtime.EncodeOrDie(testapi.Codec(), &rc) fakeUpdateHandler.ValidateRequest(t, testapi.ResourcePathWithNamespaceQuery(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &decRc) validateSyncReplication(t, &fakePodControl, 1, 0) }
func TestControllerNoReplicaUpdate(t *testing.T) { // Steady state for the replication controller, no Status.Replicas updates expected rc := newReplicationController(5) rc.Status = api.ReplicationControllerStatus{Replicas: 5} activePods := 5 testServer, fakeUpdateHandler := makeTestServer(t, api.NamespaceDefault, rc.Name, serverResponse{http.StatusOK, newPodList(activePods)}, serverResponse{http.StatusOK, &api.ReplicationControllerList{ Items: []api.ReplicationController{rc}, }}, serverResponse{http.StatusOK, &rc}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.synchronize() validateSyncReplication(t, &fakePodControl, 0, 0) if fakeUpdateHandler.RequestReceived != nil { t.Errorf("Unexpected updates for controller via %v", fakeUpdateHandler.RequestReceived.URL) } }
func TestSynchronize(t *testing.T) { controllerSpec1 := newReplicationController(4) controllerSpec2 := newReplicationController(3) controllerSpec2.Name = "bar" controllerSpec2.Spec.Template.ObjectMeta.Labels = map[string]string{ "name": "bar", "type": "production", } testServer, _ := makeTestServer(t, api.NamespaceDefault, "", serverResponse{http.StatusOK, newPodList(0)}, serverResponse{http.StatusOK, &api.ReplicationControllerList{ Items: []api.ReplicationController{ controllerSpec1, controllerSpec2, }}}, serverResponse{http.StatusInternalServerError, &api.ReplicationController{}}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.synchronize() validateSyncReplication(t, &fakePodControl, 7, 0) }
func TestControllerUpdateReplicas(t *testing.T) { // This is a happy server just to record the PUT request we expect for status.Replicas fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) manager := NewReplicationManager(client, BurstReplicas) // Insufficient number of pods in the system, and Status.Replicas is wrong; // Status.Replica should update to match number of pods in system, 1 new pod should be created. rc := newReplicationController(5) manager.controllerStore.Store.Add(rc) rc.Status = api.ReplicationControllerStatus{Replicas: 2} newPodList(manager.podStore.Store, 4, api.PodRunning, rc) response := runtime.EncodeOrDie(testapi.Codec(), rc) fakeHandler.ResponseBody = response fakePodControl := FakePodControl{} manager.podControl = &fakePodControl manager.syncReplicationController(getKey(rc, t)) // Status.Replicas should go up from 2->4 even though we created 5-4=1 pod rc.Status = api.ReplicationControllerStatus{Replicas: 4} decRc := runtime.EncodeOrDie(testapi.Codec(), rc) fakeHandler.ValidateRequest(t, testapi.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &decRc) validateSyncReplication(t, &fakePodControl, 1, 0) }
func TestDeleteControllerAndExpectations(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) manager := NewReplicationManager(client, 10) manager.podStoreSynced = alwaysReady rc := newReplicationController(1) manager.controllerStore.Store.Add(rc) fakePodControl := FakePodControl{} manager.podControl = &fakePodControl // This should set expectations for the rc manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 1, 0) fakePodControl.clear() // This is to simulate a concurrent addPod, that has a handle on the expectations // as the controller deletes it. podExp, exists, err := manager.expectations.GetExpectations(rc) if !exists || err != nil { t.Errorf("No expectations found for rc") } manager.controllerStore.Delete(rc) manager.syncReplicationController(getKey(rc, t)) if _, exists, err = manager.expectations.GetExpectations(rc); exists { t.Errorf("Found expectaions, expected none since the rc has been deleted.") } // This should have no effect, since we've deleted the rc. podExp.Seen(1, 0) manager.podStore.Store.Replace(make([]interface{}, 0)) manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0) }
func TestSyncEndpointsItems(t *testing.T) { body, _ := json.Marshal(newPodList(1)) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } testServer := httptest.NewTLSServer(&fakeHandler) client := client.NewOrDie(testServer.URL, nil) serviceRegistry := registrytest.ServiceRegistry{ List: api.ServiceList{ Items: []api.Service{ { Selector: map[string]string{ "foo": "bar", }, }, }, }, } endpoints := NewEndpointController(&serviceRegistry, client) if err := endpoints.SyncServiceEndpoints(); err != nil { t.Errorf("unexpected error: %v", err) } if len(serviceRegistry.Endpoints.Endpoints) != 1 { t.Errorf("Unexpected endpoints update: %#v", serviceRegistry.Endpoints) } }
func TestSyncEndpointsProtocolUDP(t *testing.T) { serviceList := api.ServiceList{ Items: []api.Service{ { ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "other"}, Spec: api.ServiceSpec{ Selector: map[string]string{}, Protocol: api.ProtocolUDP, }, }, }, } testServer, endpointsHandler := makeTestServer(t, serverResponse{http.StatusOK, newPodList(0)}, serverResponse{http.StatusOK, &serviceList}, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", ResourceVersion: "1", }, Protocol: api.ProtocolUDP, Endpoints: []api.Endpoint{{IP: "6.7.8.9", Port: 1000}}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) if err := endpoints.SyncServiceEndpoints(); err != nil { t.Errorf("unexpected error: %v", err) } endpointsHandler.ValidateRequestCount(t, 0) }
func newKubeSource(pollDuration time.Duration) (*kubeSource, error) { if len(*argMaster) == 0 { return nil, fmt.Errorf("kubernetes_master flag not specified") } if !(strings.HasPrefix(*argMaster, "http://") || strings.HasPrefix(*argMaster, "https://")) { *argMaster = "http://" + *argMaster } kubeClient := kube_client.NewOrDie(&kube_client.Config{ Host: *argMaster, Version: kubeClientVersion, Insecure: *argMasterInsecure, }) nodesApi, err := nodes.NewKubeNodes(kubeClient) if err != nil { return nil, err } glog.Infof("Using Kubernetes client with master %q and version %s\n", *argMaster, kubeClientVersion) glog.Infof("Using kubelet port %q", *argKubeletPort) return &kubeSource{ lastQuery: time.Now(), pollDuration: pollDuration, kubeletPort: *argKubeletPort, kubeletApi: datasource.NewKubelet(), nodesApi: nodesApi, podsApi: newPodsApi(kubeClient), podErrors: make(map[podInstance]int), }, nil }
func newApiClient(addr string, port int) *client.Client { apiServerURL := fmt.Sprintf("http://%s:%d", addr, port) cl := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Version()}) cl.PollPeriod = time.Second * 1 cl.Sync = true return cl }
func TestBind(t *testing.T) { table := []struct { binding *api.Binding }{ {binding: &api.Binding{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "foo", }, Target: api.ObjectReference{ Name: "foohost.kubernetes.mydomain.com", }, }}, } for _, item := range table { handler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) defer server.Close() client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) b := binder{client} if err := b.Bind(item.binding); err != nil { t.Errorf("Unexpected error: %v", err) continue } expectedBody := runtime.EncodeOrDie(testapi.Codec(), item.binding) handler.ValidateRequest(t, testapi.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody) } }
func TestSyncEndpointsItems(t *testing.T) { serviceList := api.ServiceList{ Items: []api.Service{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, Spec: api.ServiceSpec{ Selector: map[string]string{ "foo": "bar", }, }, }, }, } testServer, endpointsHandler := makeTestServer(t, serverResponse{http.StatusOK, newPodList(1)}, serverResponse{http.StatusOK, serviceList}, serverResponse{http.StatusOK, api.Endpoints{}}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) if err := endpoints.SyncServiceEndpoints(); err != nil { t.Errorf("unexpected error: %v", err) } data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "", }, Endpoints: []string{"1.2.3.4:8080"}, }) endpointsHandler.ValidateRequest(t, "/api/"+testapi.Version()+"/endpoints", "POST", &data) }
// NewMasterComponents creates, initializes and starts master components based on the given config. func NewMasterComponents(c *Config) *MasterComponents { m, s, h := startMasterOrDie(c.MasterConfig) // TODO: Allow callers to pipe through a different master url and create a client/start components using it. glog.Infof("Master %+v", s.URL) if c.DeleteEtcdKeys { DeleteAllEtcdKeys() } restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version(), QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := controller.NewReplicationManager(restClient, c.Burst) // TODO: Support events once we can cleanly shutdown an event recorder. controllerManager.SetEventRecorder(&record.FakeRecorder{}) if c.StartReplicationManager { go controllerManager.Run(runtime.NumCPU(), rcStopCh) } var once sync.Once return &MasterComponents{ ApiServer: s, KubeMaster: m, RestClient: restClient, ControllerManager: controllerManager, rcStopCh: rcStopCh, EtcdHelper: h, once: once, } }
func TestDefaultErrorFunc(t *testing.T) { testPod := &api.Pod{JSONBase: api.JSONBase{ID: "foo"}} handler := util.FakeHandler{ StatusCode: 200, ResponseBody: api.EncodeOrDie(testPod), T: t, } mux := http.NewServeMux() // FakeHandler musn't be sent requests other than the one you want to test. mux.Handle("/api/v1beta1/pods/foo", &handler) server := httptest.NewServer(mux) factory := ConfigFactory{client.NewOrDie(server.URL, nil)} queue := cache.NewFIFO() errFunc := factory.makeDefaultErrorFunc(queue) errFunc(testPod, nil) for { // This is a terrible way to do this but I plan on replacing this // whole error handling system in the future. The test will time // out if something doesn't work. time.Sleep(10 * time.Millisecond) got, exists := queue.Get("foo") if !exists { continue } handler.ValidateRequest(t, "/api/v1beta1/pods/foo", "GET", nil) if e, a := testPod, got; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } break } }
func TestBind(t *testing.T) { table := []struct { binding *api.Binding }{ {binding: &api.Binding{PodID: "foo", Host: "foohost.kubernetes.mydomain.com"}}, } for _, item := range table { handler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) client := client.NewOrDie(server.URL, nil) b := binder{client} if err := b.Bind(item.binding); err != nil { t.Errorf("Unexpected error: %v", err) continue } expectedBody := api.EncodeOrDie(item.binding) handler.ValidateRequest(t, "/api/v1beta1/bindings", "POST", &expectedBody) } }
func TestRCManagerNotReady(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, 2) manager.podControl = &fakePodControl manager.podStoreSynced = func() bool { return false } // Simulates the rc reflector running before the pod reflector. We don't // want to end up creating replicas in this case until the pod reflector // has synced, so the rc manager should just requeue the rc. controllerSpec := newReplicationController(1) manager.rcStore.Store.Add(controllerSpec) rcKey := getKey(controllerSpec, t) manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 0, 0) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } manager.podStoreSynced = alwaysReady manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 1, 0) }
func TestOverlappingRCs(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) for i := 0; i < 5; i++ { manager := NewReplicationManager(client, 10) manager.podStoreSynced = alwaysReady // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store var controllers []*api.ReplicationController for j := 1; j < 10; j++ { controllerSpec := newReplicationController(1) controllerSpec.CreationTimestamp = util.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) controllerSpec.Name = string(util.NewUUID()) controllers = append(controllers, controllerSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rcStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest rc is synced pods := newPodList(nil, 1, api.PodPending, controllers[0]) rcKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } } }
func TestDeleteFinalStateUnknown(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl received := make(chan string) manager.syncHandler = func(key string) error { received <- key return nil } // The DeletedFinalStateUnknown object should cause the rc manager to insert // the controller matching the selectors of the deleted pod into the work queue. controllerSpec := newReplicationController(1) manager.rcStore.Store.Add(controllerSpec) pods := newPodList(nil, 1, api.PodRunning, controllerSpec) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) go manager.worker() expected := getKey(controllerSpec, t) select { case key := <-received: if key != expected { t.Errorf("Unexpected sync all for rc %v, expected %v", key, expected) } case <-time.After(100 * time.Millisecond): t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") } }
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods. func TestSecrets(t *testing.T) { helper, err := framework.NewHelper() if err != nil { t.Fatalf("unexpected error: %v", err) } var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ EtcdHelper: helper, KubeletClient: client.FakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), }) framework.DeleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) DoTestSecrets(t, client, testapi.Version()) }
func TestPodControllerLookup(t *testing.T) { manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}), BurstReplicas) manager.podStoreSynced = alwaysReady testCases := []struct { inRCs []*api.ReplicationController pod *api.Pod outRCName string }{ // pods without labels don't match any rcs { inRCs: []*api.ReplicationController{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, outRCName: "", }, // Matching labels, not namespace { inRCs: []*api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRCName: "", }, // Matching ns and labels returns the key to the rc, not the rc name { inRCs: []*api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRCName: "bar", }, } for _, c := range testCases { for _, r := range c.inRCs { manager.rcStore.Add(r) } if rc := manager.getPodController(c.pod); rc != nil { if c.outRCName != rc.Name { t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName) } } else if c.outRCName != "" { t.Errorf("Expected a controller %v pod %v, found none", c.outRCName, c.pod.Name) } } }
func TestListWatchesCanWatch(t *testing.T) { table := []struct { rv string location string lw ListWatch }{ // Minion { location: buildLocation(buildResourcePath("watch", api.NamespaceAll, "minions"), buildQueryValues(api.NamespaceAll, url.Values{"resourceVersion": []string{""}})), rv: "", lw: ListWatch{ FieldSelector: parseSelectorOrDie(""), Resource: "minions", }, }, { location: buildLocation(buildResourcePath("watch", api.NamespaceAll, "minions"), buildQueryValues(api.NamespaceAll, url.Values{"resourceVersion": []string{"42"}})), rv: "42", lw: ListWatch{ FieldSelector: parseSelectorOrDie(""), Resource: "minions", }, }, // pod with "assigned" field selector. { location: buildLocation(buildResourcePath("watch", api.NamespaceAll, "pods"), buildQueryValues(api.NamespaceAll, url.Values{"fields": []string{"DesiredState.Host="}, "resourceVersion": []string{"0"}})), rv: "0", lw: ListWatch{ FieldSelector: labels.Set{"DesiredState.Host": ""}.AsSelector(), Resource: "pods", }, }, // pod with namespace foo and assigned field selector { location: buildLocation(buildResourcePath("watch", "foo", "pods"), buildQueryValues("foo", url.Values{"fields": []string{"DesiredState.Host="}, "resourceVersion": []string{"0"}})), rv: "0", lw: ListWatch{ FieldSelector: labels.Set{"DesiredState.Host": ""}.AsSelector(), Resource: "pods", Namespace: "foo", }, }, } for _, item := range table { handler := util.FakeHandler{ StatusCode: 500, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) defer server.Close() item.lw.Client = client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) // This test merely tests that the correct request is made. item.lw.Watch(item.rv) handler.ValidateRequest(t, item.location, "GET", nil) } }
// TestUnknownUserIsUnauthorized tests that a user who is unknown // to the authentication system get status code "Unauthorized". // An authorization module is installed in this scenario for integration // test purposes, but requests aren't expected to reach it. func TestUnknownUserIsUnauthorized(t *testing.T) { deleteAllEtcdKeys() // This file has alice and bob in it. // Set up a master helper, err := master.NewEtcdHelper(newEtcdClient(), "v1beta1") if err != nil { t.Fatalf("unexpected error: %v", err) } var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ Client: client.NewOrDie(&client.Config{Host: s.URL}), EtcdHelper: helper, KubeletClient: client.FakeKubeletClient{}, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authenticator: getTestTokenAuth(), Authorizer: allowAliceAuthorizer{}, AdmissionControl: admit.NewAlwaysAdmit(), }) transport := http.DefaultTransport for _, r := range getTestRequests() { token := UnknownToken bodyBytes := bytes.NewReader([]byte(r.body)) req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes) if err != nil { t.Fatalf("unexpected error: %v", err) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) func() { resp, err := transport.RoundTrip(req) defer resp.Body.Close() if err != nil { t.Logf("case %v", r) t.Fatalf("unexpected error: %v", err) } // Expect all of unauthenticated user's request to be "Unauthorized" if resp.StatusCode != http.StatusUnauthorized { t.Logf("case %v", r) t.Errorf("Expected status %v, but got %v", http.StatusUnauthorized, resp.StatusCode) b, _ := ioutil.ReadAll(resp.Body) t.Errorf("Body: %v", string(b)) } }() } }