func TestPersistentVolumeDeleter(t *testing.T) { _, s := runAMaster(t) defer s.Close() deleteAllEtcdKeys() binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second) binder.Run() defer binder.Stop() recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}}) recycler.Run() defer recycler.Stop() // This PV will be claimed, released, and recycled. pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{Name: "fake-pv"}, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}}, Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, }, } pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{Name: "fake-pvc"}, Spec: api.PersistentVolumeClaimSpec{ Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, }, } w, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0") defer w.Stop() _, _ = testClient.PersistentVolumes().Create(pv) _, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) // wait until the binder pairs the volume and claim waitForPersistentVolumePhase(w, api.VolumeBound) // deleting a claim releases the volume, after which it can be recycled if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } waitForPersistentVolumePhase(w, api.VolumeReleased) for { event := <-w.ResultChan() if event.Type == watch.Deleted { break } } }
func NewKubeletProvider(uri *url.URL) (MetricsSourceProvider, error) { // create clients kubeConfig, kubeletConfig, err := GetKubeConfigs(uri) if err != nil { return nil, err } kubeClient := kube_client.NewOrDie(kubeConfig) kubeletClient, err := NewKubeletClient(kubeletConfig) if err != nil { return nil, err } // Get nodes to test if the client is configured well. Watch gives less error information. if _, err := kubeClient.Nodes().List(kube_api.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}); err != nil { glog.Errorf("Failed to load nodes: %v", err) } // watch nodes lw := cache.NewListWatchFromClient(kubeClient, "nodes", kube_api.NamespaceAll, fields.Everything()) nodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)} reflector := cache.NewReflector(lw, &kube_api.Node{}, nodeLister.Store, time.Hour) reflector.Run() return &kubeletProvider{ nodeLister: nodeLister, reflector: reflector, kubeletClient: kubeletClient, }, nil }
func newTestController() (*DaemonSetsController, *FakePodControl) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) manager := NewDaemonSetsController(client) podControl := &FakePodControl{} manager.podControl = podControl return manager, podControl }
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods // and checking expectations. func TestSyncJobExpectations(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", GroupVersion: testapi.Default.GroupVersion()}) manager := NewJobController(client, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady manager.updateHandler = func(job *extensions.Job) error { return nil } job := newJob(2, 2) manager.jobStore.Store.Add(job) pods := newPodList(2, api.PodPending, job) manager.podStore.Store.Add(&pods[0]) manager.expectations = FakeJobExpectations{ controller.NewControllerExpectations(), true, func() { // If we check active pods before checking expectataions, the job // will create a new replica because it doesn't see this pod, but // has fulfilled its expectations. manager.podStore.Store.Add(&pods[1]) }, } manager.syncJob(getKey(job, t)) if len(fakePodControl.Templates) != 0 { t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != 0 { t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) } }
func TestRCManagerNotReady(t *testing.T) { // Setup a fake server to listen for requests, and run the rc manager in steady state fakeResponse := serverResponse{ statusCode: 200, obj: &api.ReplicationController{}, } testServer, _ := makeTestServer(t, api.NamespaceDefault, api.TenantDefault, fakeResponse) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2) manager.podControl = &fakePodControl manager.podStoreSynced = func() bool { return false } // Simulates the rc reflector running before the pod reflector. We don't // want to end up creating replicas in this case until the pod reflector // has synced, so the rc manager should just requeue the rc. controllerSpec := newReplicationController(1) manager.rcStore.Store.Add(controllerSpec) rcKey := getKey(controllerSpec, t) manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 0, 0) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } manager.podStoreSynced = alwaysReady manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 1, 0) }
func (c *Client) Setup(t *testing.T) *Client { c.handler = &utiltesting.FakeHandler{ StatusCode: c.Response.StatusCode, } if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil { c.handler.ResponseBody = *responseBody } c.server = httptest.NewServer(c.handler) if c.Client == nil { c.Client = client.NewOrDie(&client.Config{ Host: c.server.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, }) // TODO: caesarxuchao: hacky way to specify version of Experimental client. // We will fix this by supporting multiple group versions in Config c.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{ Host: c.server.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}, }) c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL}) } c.QueryValidator = map[string]func(string, string) bool{} return c }
func TestPodReadOnlyFilesystem(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() isReadOnly := true ns := framework.CreateTestingNamespace("pod-readonly-root", s, t) defer framework.DeleteTestingNamespace(ns, s, t) client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "xxx", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fake-name", Image: "fakeimage", SecurityContext: &api.SecurityContext{ ReadOnlyRootFilesystem: &isReadOnly, }, }, }, }, } if _, err := client.Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } deletePodOrErrorf(t, client, ns.Name, pod.Name) }
func TestUnschedulableNodes(t *testing.T) { framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() masterConfig := framework.NewIntegrationTestMasterConfig() m, err := master.New(masterConfig) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
func TestMasterService(t *testing.T) { _, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig()) defer s.Close() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) err := wait.Poll(time.Second, time.Minute, func() (bool, error) { svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) return false, nil } found := false for i := range svcList.Items { if svcList.Items[i].Name == "kubernetes" { found = true } } if found { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { return false, nil } if countEndpoints(ep) == 0 { return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep) } return true, nil } return false, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } }
func TestSyncPastDeadlineJobFinished(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewJobController(client, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady var actual *extensions.Job manager.updateHandler = func(job *extensions.Job) error { actual = job return nil } job := newJob(1, 1) activeDeadlineSeconds := int64(10) job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0) job.Status.StartTime = &start job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) manager.jobStore.Store.Add(job) err := manager.syncJob(getKey(job, t)) if err != nil { t.Errorf("Unexpected error when syncing jobs %v", err) } if len(fakePodControl.Templates) != 0 { t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != 0 { t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) } if actual != nil { t.Error("Unexpected job modification") } }
func TestSyncJobUpdateRequeue(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()}) manager := NewJobController(client, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady manager.updateHandler = func(job *extensions.Job) error { return fmt.Errorf("Fake error") } job := newJob(2, 2) manager.jobStore.Store.Add(job) err := manager.syncJob(getKey(job, t)) if err != nil { t.Errorf("Unxpected error when syncing jobs, got %v", err) } ch := make(chan interface{}) go func() { item, _ := manager.queue.Get() ch <- item }() select { case key := <-ch: expectedKey := getKey(job, t) if key != expectedKey { t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key) } case <-time.After(controllerTimeout): manager.queue.ShutDown() t.Errorf("Expected to find a job in the queue, found none.") } }
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { ns := api.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, GroupVersion: testapi.Default.GroupVersion()}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}}, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) }
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods. func TestSecrets(t *testing.T) { etcdStorage, err := framework.NewEtcdStorage() if err != nil { t.Fatalf("unexpected error: %v", err) } var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ DatabaseStorage: etcdStorage, KubeletClient: client.FakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), StorageVersions: map[string]string{"": testapi.Default.Version()}, }) framework.DeleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) DoTestSecrets(t, client, testapi.Default.Version()) }
func createKubeClientOrDie(kubernetesUrl *url.URL) *kube_client.Client { kubeConfig, err := kube_config.GetKubeClientConfig(kubernetesUrl) if err != nil { glog.Fatalf("Failed to get client config: %v", err) } return kube_client.NewOrDie(kubeConfig) }
// mustSetupScheduler starts the following components: // - k8s api server (a.k.a. master) // - scheduler // It returns scheduler config factory and destroyFunc which should be used to // remove resources after finished. // Notes on rate limiter: // - The BindPodsRateLimiter is nil, meaning no rate limits. // - client rate limit is set to 5000. func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) { framework.DeleteAllEtcdKeys() var m *master.Master masterConfig := framework.NewIntegrationTestMasterConfig() m = master.New(masterConfig) s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) c := client.NewOrDie(&client.Config{ Host: s.URL, GroupVersion: testapi.Default.GroupVersion(), QPS: 5000.0, Burst: 5000, }) schedulerConfigFactory = factory.NewConfigFactory(c, nil, api.DefaultSchedulerName) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { panic("Couldn't create scheduler config") } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(c.Events("")) scheduler.New(schedulerConfig).Run() destroyFunc = func() { glog.Infof("destroying") close(schedulerConfig.StopEverything) s.Close() glog.Infof("destroyed") } return }
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { ns := api.NamespaceDefault te := api.TenantDefault testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, te, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", Name: "foo", Namespace: ns, Tenant: te, }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1, 0) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"foo": "bar"}, Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) }
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods // and checking expectations. func TestRSSyncExpectations(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec) manager.podStore.Store.Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = FakeRSExpectations{ controller.NewControllerExpectations(), true, func() { // If we check active pods before checking expectataions, the // ReplicaSet will create a new replica because it doesn't see // this pod, but has fulfilled its expectations. manager.podStore.Store.Add(&postExpectationsPod) }, } manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) }
// NewMasterComponents creates, initializes and starts master components based on the given config. func NewMasterComponents(c *Config) *MasterComponents { m, s, e := startMasterOrDie(c.MasterConfig) // TODO: Allow callers to pipe through a different master url and create a client/start components using it. glog.Infof("Master %+v", s.URL) if c.DeleteEtcdKeys { DeleteAllEtcdKeys() } restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version(), QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := replicationcontroller.NewReplicationManager(restClient, c.Burst) // TODO: Support events once we can cleanly shutdown an event recorder. controllerManager.SetEventRecorder(&record.FakeRecorder{}) if c.StartReplicationManager { go controllerManager.Run(runtime.NumCPU(), rcStopCh) } var once sync.Once return &MasterComponents{ ApiServer: s, KubeMaster: m, RestClient: restClient, ControllerManager: controllerManager, rcStopCh: rcStopCh, EtcdStorage: e, once: once, } }
func TestSyncEndpointsProtocolUDP(t *testing.T) { ns := "other" te := "other" testServer, endpointsHandler := makeTestServer(t, ns, te, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, Spec: api.ServiceSpec{ Selector: map[string]string{}, Ports: []api.ServicePort{{Port: 80}}, }, }) endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) }
func TestBind(t *testing.T) { table := []struct { binding *api.Binding }{ {binding: &api.Binding{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "foo", }, Target: api.ObjectReference{ Name: "foohost.kubernetes.mydomain.com", }, }}, } for _, item := range table { handler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) defer server.Close() client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) b := binder{client} if err := b.Bind(item.binding); err != nil { t.Errorf("Unexpected error: %v", err) continue } expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding) handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody) } }
// NewMasterComponents creates, initializes and starts master components based on the given config. func NewMasterComponents(c *Config) *MasterComponents { m, s := startMasterOrDie(c.MasterConfig) // TODO: Allow callers to pipe through a different master url and create a client/start components using it. glog.Infof("Master %+v", s.URL) if c.DeleteEtcdKeys { DeleteAllEtcdKeys() } // TODO: caesarxuchao: remove this client when the refactoring of client libraray is done. restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst}) clientset := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, c.Burst) // TODO: Support events once we can cleanly shutdown an event recorder. controllerManager.SetEventRecorder(&record.FakeRecorder{}) if c.StartReplicationManager { go controllerManager.Run(goruntime.NumCPU(), rcStopCh) } var once sync.Once return &MasterComponents{ ApiServer: s, KubeMaster: m, RestClient: restClient, ControllerManager: controllerManager, rcStopCh: rcStopCh, once: once, } }
func TestOverlappingRCs(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) for i := 0; i < 5; i++ { manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10) manager.podStoreSynced = alwaysReady // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store var controllers []*api.ReplicationController for j := 1; j < 10; j++ { controllerSpec := newReplicationController(1) controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) controllerSpec.Name = string(util.NewUUID()) controllers = append(controllers, controllerSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rcStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest rc is synced pods := newPodList(nil, 1, api.PodPending, controllers[0]) rcKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } } }
func NewDefaultFramework(baseName string) *framework.Framework { client := client.NewOrDie(&restclient.Config{Host: *apiServerAddress}) return framework.NewFramework(baseName, framework.FrameworkOptions{ ClientQPS: 100, ClientBurst: 100, }, client) }
func TestDeleteFinalStateUnknown(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl received := make(chan string) manager.syncHandler = func(key string) error { received <- key return nil } // The DeletedFinalStateUnknown object should cause the rc manager to insert // the controller matching the selectors of the deleted pod into the work queue. controllerSpec := newReplicationController(1) manager.rcStore.Store.Add(controllerSpec) pods := newPodList(nil, 1, api.PodRunning, controllerSpec) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) go manager.worker() expected := getKey(controllerSpec, t) select { case key := <-received: if key != expected { t.Errorf("Unexpected sync all for rc %v, expected %v", key, expected) } case <-time.After(util.ForeverTestTimeout): t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") } }
func TestRCManagerNotReady(t *testing.T) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2) manager.podControl = &fakePodControl manager.podStoreSynced = func() bool { return false } // Simulates the rc reflector running before the pod reflector. We don't // want to end up creating replicas in this case until the pod reflector // has synced, so the rc manager should just requeue the rc. controllerSpec := newReplicationController(1) manager.rcStore.Store.Add(controllerSpec) rcKey := getKey(controllerSpec, t) manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 0, 0) queueRC, _ := manager.queue.Get() if queueRC != rcKey { t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) } manager.podStoreSynced = alwaysReady manager.syncReplicationController(rcKey) validateSyncReplication(t, &fakePodControl, 1, 0) }
func TestUnschedulableNodes(t *testing.T) { framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() masterConfig := framework.NewIntegrationTestMasterConfig() m = master.New(masterConfig) restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()}) schedulerConfigFactory := factory.NewConfigFactory(restClient, nil) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
func TestCheckLeftoverEndpoints(t *testing.T) { ns := api.NamespaceDefault // Note that this requests *all* endpoints, therefore the NamespaceAll // below. testServer, _ := makeTestServer(t, api.NamespaceAll, serverResponse{http.StatusOK, &api.EndpointsList{ ListMeta: unversioned.ListMeta{ ResourceVersion: "1", }, Items: []api.Endpoints{{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, Ports: []api.EndpointPort{{Port: 1000}}, }}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc) endpoints.checkLeftoverEndpoints() if e, a := 1, endpoints.queue.Len(); e != a { t.Fatalf("Expected %v, got %v", e, a) } got, _ := endpoints.queue.Get() if e, a := ns+"/foo", got; e != a { t.Errorf("Expected %v, got %v", e, a) } }
func TestDSManagerInit(t *testing.T) { // Insert a stable daemon set and make sure we don't create an extra pod // for the one node which already has a daemon after a simulated restart. ds := newDaemonSet("test") ds.Status = extensions.DaemonSetStatus{ CurrentNumberScheduled: 1, NumberMisscheduled: 0, DesiredNumberScheduled: 1, } nodeName := "only-node" podList := &api.PodList{ Items: []api.Pod{ *newPod("podname", nodeName, simpleDaemonSetLabel), }} response := runtime.EncodeOrDie(testapi.Default.Codec(), podList) fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: response, } testServer := httptest.NewServer(&fakeHandler) // TODO: Uncomment when fix #19254 // defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewDaemonSetsController(client, controller.NoResyncPeriodFunc) manager.dsStore.Add(ds) manager.nodeStore.Add(newNode(nodeName, nil)) manager.podStoreSynced = alwaysReady controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store) fakePodControl := &controller.FakePodControl{} manager.podControl = fakePodControl manager.syncHandler(getKey(ds, t)) validateSyncDaemonSets(t, fakePodControl, 0, 0) }
func TestPodControllerLookup(t *testing.T) { manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas) manager.podStoreSynced = alwaysReady testCases := []struct { inRCs []*api.ReplicationController pod *api.Pod outRCName string }{ // pods without labels don't match any rcs { inRCs: []*api.ReplicationController{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, outRCName: "", }, // Matching labels, not namespace { inRCs: []*api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRCName: "", }, // Matching ns and labels returns the key to the rc, not the rc name { inRCs: []*api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, outRCName: "bar", }, } for _, c := range testCases { for _, r := range c.inRCs { manager.rcStore.Add(r) } if rc := manager.getPodController(c.pod); rc != nil { if c.outRCName != rc.Name { t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName) } } else if c.outRCName != "" { t.Errorf("Expected a controller %v pod %v, found none", c.outRCName, c.pod.Name) } } }
func newTestController() (*DaemonSetsController, *controller.FakePodControl) { client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) manager := NewDaemonSetsController(client, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady podControl := &controller.FakePodControl{} manager.podControl = podControl return manager, podControl }