func TestUnschedulableNodes(t *testing.T) { etcdStorage, err := framework.NewEtcdStorage() if err != nil { t.Fatalf("Couldn't create etcd storage: %v", err) } expEtcdStorage, err := framework.NewExtensionsEtcdStorage(nil) if err != nil { t.Fatalf("unexpected error: %v", err) } storageDestinations := master.NewStorageDestinations() storageDestinations.AddAPIGroup("", etcdStorage) storageDestinations.AddAPIGroup("extensions", expEtcdStorage) storageVersions := make(map[string]string) storageVersions[""] = testapi.Default.Version() storageVersions["extensions"] = testapi.Extensions.GroupAndVersion() framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ StorageDestinations: storageDestinations, KubeletClient: client.FakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), StorageVersions: storageVersions, }) restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) schedulerConfigFactory := factory.NewConfigFactory(restClient, nil) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 50.0 kubeconfig.Burst = 100 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst)) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
func startComponents(firstManifestURL, secondManifestURL string) (string, string) { // Setup servers := []string{} glog.Infof("Creating etcd client pointing to %v", servers) handler := delegateHandler{} apiServer := httptest.NewServer(&handler) etcdClient := etcd.NewClient(servers) sleep := 4 * time.Second ok := false for i := 0; i < 3; i++ { keys, err := etcdClient.Get("/", false, false) if err != nil { glog.Warningf("Unable to list root etcd keys: %v", err) if i < 2 { time.Sleep(sleep) sleep = sleep * sleep } continue } for _, node := range keys.Node.Nodes { if _, err := etcdClient.Delete(node.Key, true); err != nil { glog.Fatalf("Unable delete key: %v", err) } } ok = true break } if !ok { glog.Fatalf("Failed to connect to etcd") } cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.GroupAndVersion()}) // TODO: caesarxuchao: hacky way to specify version of Experimental client. // We will fix this by supporting multiple group versions in Config cl.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Extensions.GroupAndVersion()}) storageVersions := make(map[string]string) etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.GroupAndVersion(), etcdtest.PathPrefix()) storageVersions[""] = testapi.Default.GroupAndVersion() if err != nil { glog.Fatalf("Unable to get etcd storage: %v", err) } expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("extensions").InterfacesFor, testapi.Extensions.GroupAndVersion(), etcdtest.PathPrefix()) storageVersions["extensions"] = testapi.Extensions.GroupAndVersion() if err != nil { glog.Fatalf("Unable to get etcd storage for experimental: %v", err) } storageDestinations := master.NewStorageDestinations() storageDestinations.AddAPIGroup("", etcdStorage) storageDestinations.AddAPIGroup("extensions", expEtcdStorage) // Master host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://")) if err != nil { glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err) } portNumber, err := strconv.Atoi(port) if err != nil { glog.Fatalf("Nonnumeric port? %v", err) } publicAddress := net.ParseIP(host) if publicAddress == nil { glog.Fatalf("no public address for %s", host) } // Create a master and install handlers into mux. m := master.New(&master.Config{ StorageDestinations: storageDestinations, KubeletClient: fakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableProfiling: true, APIPrefix: "/api", APIGroupPrefix: "/apis", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), ReadWritePort: portNumber, PublicAddress: publicAddress, CacheTimeout: 2 * time.Second, StorageVersions: storageVersions, }) handler.delegate = m.Handler // Scheduler schedulerConfigFactory := factory.NewConfigFactory(cl, nil) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { glog.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(cl.Events("")) scheduler.New(schedulerConfig).Run() // ensure the service endpoints are sync'd several times within the window that the integration tests wait go endpointcontroller.NewEndpointController(cl, controller.NoResyncPeriodFunc). Run(3, util.NeverStop) // TODO: Write an integration test for the replication controllers watch. go replicationControllerPkg.NewReplicationManager(cl, controller.NoResyncPeriodFunc, replicationControllerPkg.BurstReplicas). Run(3, util.NeverStop) nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, nil, false) nodeController.Run(5 * time.Second) cadvisorInterface := new(cadvisor.Fake) // Kubelet (localhost) testRootDir := makeTempDirOrDie("kubelet_integ_1.", "") configFilePath := makeTempDirOrDie("config", testRootDir) glog.Infof("Using %s as root dir for kubelet #1", testRootDir) fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.20"} kcfg := kubeletapp.SimpleKubelet( cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, /* KubeletPort */ 0, /* ReadOnlyPort */ api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{}, 1*time.Second, /* FileCheckFrequency */ 1*time.Second, /* HTTPCheckFrequency */ 10*time.Second, /* MinimumGCAge */ 3*time.Second, /* NodeStatusUpdateFrequency */ 10*time.Second /* SyncFrequency */) kubeletapp.RunKubelet(kcfg, nil) // Kubelet (machine) // Create a second kubelet so that the guestbook example's two redis slaves both // have a place they can schedule. testRootDir = makeTempDirOrDie("kubelet_integ_2.", "") glog.Infof("Using %s as root dir for kubelet #2", testRootDir) fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.20"} kcfg = kubeletapp.SimpleKubelet( cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, /* KubeletPort */ 0, /* ReadOnlyPort */ api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{}, 1*time.Second, /* FileCheckFrequency */ 1*time.Second, /* HTTPCheckFrequency */ 10*time.Second, /* MinimumGCAge */ 3*time.Second, /* NodeStatusUpdateFrequency */ 10*time.Second /* SyncFrequency */) kubeletapp.RunKubelet(kcfg, nil) return apiServer.URL, configFilePath }