// Run begins watching and syncing. func (c *RouterController) Run() { glog.V(4).Info("Running router controller") if c.Namespaces != nil { c.HandleNamespaces() go util.Forever(c.HandleNamespaces, c.NamespaceSyncInterval) } go util.Forever(c.HandleRoute, 0) go util.Forever(c.HandleEndpoints, 0) }
// Run starts an asynchronous loop that monitors the status of cluster nodes. func (nc *NodeController) Run(period time.Duration) { // Incorporate the results of node status pushed from kubelet to master. go util.Forever(func() { if err := nc.monitorNodeStatus(); err != nil { glog.Errorf("Error monitoring node status: %v", err) } }, nc.nodeMonitorPeriod) go util.Forever(func() { nc.podEvictor.TryEvict(func(nodeName string) { nc.deletePods(nodeName) }) }, nodeEvictionPeriod) }
func (s *Source) Run() error { // locate the oldest snapshot snapshotSize := uint64(1000) snapshotWindow := snapshotSize resp, err := s.client.Get("/", false, false) if err != nil { return err } recentIndex := uint64(1) if resp.EtcdIndex > snapshotSize { recentIndex = resp.EtcdIndex - snapshotWindow + 1 } watches := make(chan chan *etcd.Response) go util.Forever(func() { ch := make(chan *etcd.Response) watches <- ch if _, err := s.client.Watch("/", recentIndex, true, ch, nil); err != nil { snapshotWindow = snapshotWindow * 9 / 10 if etcdError, ok := err.(*etcd.EtcdError); ok { recentIndex = etcdError.Index - snapshotWindow } glog.Errorf("Unable to watch: %v", err) return } snapshotWindow = snapshotSize }, 1*time.Second) lowestIndex := uint64(0) go util.Forever(func() { glog.Infof("Ready to archive changes from etcd ...") for ch := range watches { glog.Infof("Watching ...") for resp := range ch { index, err := s.OnEvent(resp) if err != nil { glog.Errorf("error: %v", err) continue } if index == 0 { break } lowestIndex = index } } }, 10*time.Millisecond) return nil }
func (rc *RouteController) Run(syncPeriod time.Duration) { go util.Forever(func() { if err := rc.reconcileNodeRoutes(); err != nil { glog.Errorf("Couldn't reconcile node routes: %v", err) } }, syncPeriod) }
func (a *HorizontalPodAutoscalerController) Run(syncPeriod time.Duration) { go util.Forever(func() { if err := a.reconcileAutoscalers(); err != nil { glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err) } }, syncPeriod) }
func startKubelet(k KubeletBootstrap, podCfg *config.PodConfig, kc *KubeletConfig) { // start the kubelet go util.Forever(func() { k.Run(podCfg.Updates()) }, 0) // start the kubelet server if kc.EnableServer { go util.Forever(func() { k.ListenAndServe(kc.Address, kc.Port, kc.TLSOptions, kc.EnableDebuggingHandlers) }, 0) } if kc.ReadOnlyPort > 0 { go util.Forever(func() { k.ListenAndServeReadOnly(kc.Address, kc.ReadOnlyPort) }, 0) } }
// serve starts serving the provided http.Handler using security settings derived from the MasterConfig func (c *MasterConfig) serve(handler http.Handler, extra []string) { timeout := c.Options.ServingInfo.RequestTimeoutSeconds if timeout == -1 { timeout = 0 } server := &http.Server{ Addr: c.Options.ServingInfo.BindAddress, Handler: handler, ReadTimeout: time.Duration(timeout) * time.Second, WriteTimeout: time.Duration(timeout) * time.Second, MaxHeaderBytes: 1 << 20, } go util.Forever(func() { for _, s := range extra { glog.Infof(s, c.Options.ServingInfo.BindAddress) } if c.TLS { server.TLSConfig = &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types ClientAuth: tls.RequestClientCert, ClientCAs: c.ClientCAs, } glog.Fatal(cmdutil.ListenAndServeTLS(server, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.ServerCert.CertFile, c.Options.ServingInfo.ServerCert.KeyFile)) } else { glog.Fatal(server.ListenAndServe()) } }, 0) }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy() { // initialize kube proxy serviceConfig := pconfig.NewServiceConfig() endpointsConfig := pconfig.NewEndpointsConfig() loadBalancer := proxy.NewLoadBalancerRR() endpointsConfig.RegisterHandler(loadBalancer) host, _, err := net.SplitHostPort(c.BindAddress) if err != nil { glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress) } ip := net.ParseIP(host) if ip == nil { glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress) } protocol := iptables.ProtocolIpv4 if ip.To4() == nil { protocol = iptables.ProtocolIpv6 } syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod) if err != nil { glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err) } go util.Forever(func() { proxier, err := proxy.NewProxier(loadBalancer, ip, iptables.New(kexec.New(), protocol), util.PortRange{}, syncPeriod) if err != nil { switch { // conflicting use of iptables, retry case proxy.IsProxyLocked(err): glog.Errorf("Unable to start proxy, will retry: %v", err) return // on a system without iptables case strings.Contains(err.Error(), "executable file not found in path"): glog.V(4).Infof("kube-proxy initialization error: %v", err) glog.Warningf("WARNING: Could not find the iptables command. The service proxy requires iptables and will be disabled.") case err == proxy.ErrProxyOnLocalhost: glog.Warningf("WARNING: The service proxy cannot bind to localhost and will be disabled.") case strings.Contains(err.Error(), "you must be root"): glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy.") default: glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy: %v", err) } select {} } pconfig.NewSourceAPI( c.Client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api")) serviceConfig.RegisterHandler(proxier) glog.Infof("Started Kubernetes Proxy on %s", host) select {} }, 5*time.Second) }
func New(ttl time.Duration) GCStore { store := &gcStore{ data: make(map[interface{}]*dataItem), ttl: ttl, } go util.Forever(store.garbageCollect, ttl/2) return store }
// Run starts an http server for the static assets listening on the configured // bind address func (c *AssetConfig) Run() { publicURL, err := url.Parse(c.Options.PublicURL) if err != nil { glog.Fatal(err) } mux := http.NewServeMux() err = c.addHandlers(mux) if err != nil { glog.Fatal(err) } if publicURL.Path != "/" { mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, publicURL.Path, http.StatusFound) }) } timeout := c.Options.ServingInfo.RequestTimeoutSeconds if timeout == -1 { timeout = 0 } server := &http.Server{ Addr: c.Options.ServingInfo.BindAddress, Handler: mux, ReadTimeout: time.Duration(timeout) * time.Second, WriteTimeout: time.Duration(timeout) * time.Second, MaxHeaderBytes: 1 << 20, } isTLS := configapi.UseTLS(c.Options.ServingInfo.ServingInfo) go util.Forever(func() { if isTLS { extraCerts, err := configapi.GetNamedCertificateMap(c.Options.ServingInfo.NamedCertificates) if err != nil { glog.Fatal(err) } server.TLSConfig = &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // Set SNI certificate func GetCertificate: cmdutil.GetCertificateFunc(extraCerts), } glog.Infof("Web console listening at https://%s", c.Options.ServingInfo.BindAddress) glog.Fatal(cmdutil.ListenAndServeTLS(server, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.ServerCert.CertFile, c.Options.ServingInfo.ServerCert.KeyFile)) } else { glog.Infof("Web console listening at http://%s", c.Options.ServingInfo.BindAddress) glog.Fatal(server.ListenAndServe()) } }, 0) // Attempt to verify the server came up for 20 seconds (100 tries * 100ms, 100ms timeout per try) cmdutil.WaitForSuccessfulDial(isTLS, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.BindAddress, 100*time.Millisecond, 100*time.Millisecond, 100) glog.Infof("Web console available at %s", c.Options.PublicURL) }
func NewSourceFile(path string, nodeName string, period time.Duration, updates chan<- interface{}) { config := &sourceFile{ path: path, nodeName: nodeName, updates: updates, } glog.V(1).Infof("Watching path %q", path) go util.Forever(config.run, period) }
// Run begins watching and scheduling. It starts a goroutine and returns immediately. func (s *Scheduler) Run() { if s.config.BindPodsRateLimiter != nil { go util.Forever(func() { sat := s.config.BindPodsRateLimiter.Saturation() metrics.BindingRateLimiterSaturation.Set(sat) }, metrics.BindingSaturationReportInterval) } go util.Until(s.scheduleOne, 0, s.config.StopEverything) }
// experimental returns the resources and codec for the experimental api func (m *Master) experimental(c *Config) *apiserver.APIGroupVersion { controllerStorage := expcontrolleretcd.NewStorage(c.DatabaseStorage) autoscalerStorage := horizontalpodautoscaleretcd.NewREST(c.ExpDatabaseStorage) thirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(c.ExpDatabaseStorage) daemonSetStorage, daemonSetStatusStorage := daemonetcd.NewREST(c.ExpDatabaseStorage) deploymentStorage := deploymentetcd.NewStorage(c.ExpDatabaseStorage) jobStorage, jobStatusStorage := jobetcd.NewREST(c.ExpDatabaseStorage) thirdPartyControl := ThirdPartyController{ master: m, thirdPartyResourceRegistry: thirdPartyResourceStorage, } go func() { util.Forever(func() { if err := thirdPartyControl.SyncResources(); err != nil { glog.Warningf("third party resource sync failed: %v", err) } }, 10*time.Second) }() storage := map[string]rest.Storage{ strings.ToLower("replicationControllers"): controllerStorage.ReplicationController, strings.ToLower("replicationControllers/scale"): controllerStorage.Scale, strings.ToLower("horizontalpodautoscalers"): autoscalerStorage, strings.ToLower("thirdpartyresources"): thirdPartyResourceStorage, strings.ToLower("daemonsets"): daemonSetStorage, strings.ToLower("daemonsets/status"): daemonSetStatusStorage, strings.ToLower("deployments"): deploymentStorage.Deployment, strings.ToLower("deployments/scale"): deploymentStorage.Scale, strings.ToLower("jobs"): jobStorage, strings.ToLower("jobs/status"): jobStatusStorage, } expMeta := latest.GroupOrDie("experimental") return &apiserver.APIGroupVersion{ Root: m.apiGroupPrefix, APIRequestInfoResolver: m.newAPIRequestInfoResolver(), Creater: api.Scheme, Convertor: api.Scheme, Typer: api.Scheme, Mapper: expMeta.RESTMapper, Codec: expMeta.Codec, Linker: expMeta.SelfLinker, Storage: storage, Version: expMeta.GroupVersion, ServerVersion: latest.GroupOrDie("").GroupVersion, Admit: m.admissionControl, Context: m.requestContextMapper, ProxyDialerFn: m.dialer, MinRequestTimeout: m.minRequestTimeout, } }
func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) { config := &sourceURL{ url: url, header: header, nodeName: nodeName, updates: updates, data: nil, } glog.V(1).Infof("Watching URL %s", url) go util.Forever(config.run, period) }
// Run starts an asynchronous loop that monitors the status of cluster nodes. func (nc *NodeController) Run(period time.Duration) { // Incorporate the results of node status pushed from kubelet to master. go util.Forever(func() { if err := nc.monitorNodeStatus(); err != nil { glog.Errorf("Error monitoring node status: %v", err) } }, nc.nodeMonitorPeriod) go util.Forever(func() { nc.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { remaining, err := nc.deletePods(value.Value) if err != nil { util.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) return false, 0 } if remaining { glog.V(2).Infof("Pods terminating on %q", value.Value) nc.terminationEvictor.Add(value.Value) } return true, 0 }) }, nodeEvictionPeriod) // TODO: replace with a controller that ensures pods that are terminating complete // in a particular time period go util.Forever(func() { nc.terminationEvictor.Try(func(value TimedValue) (bool, time.Duration) { remaining, err := nc.terminatePods(value.Value, value.Added) if err != nil { util.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err)) return false, 0 } if remaining != 0 { glog.V(2).Infof("Pods still terminating on %q, estimated completion %s", value.Value, remaining) return false, remaining } return true, 0 }) }, nodeEvictionPeriod) }
// Start eager background caching of volume stats. func (s *fsResourceAnalyzer) Start() { if s.calcVolumePeriod <= 0 { glog.Info("Volume stats collection disabled.") return } glog.Info("Starting FS ResourceAnalyzer") go util.Forever(func() { startTime := time.Now() s.updateCachedPodVolumeStats() glog.V(3).Infof("Finished calculating volume stats in %v.", time.Now().Sub(startTime)) metrics.MetricsVolumeCalcLatency.Observe(metrics.SinceInMicroseconds(startTime)) }, s.calcVolumePeriod) }
func (c *PodConfig) Wait(waitCh <-chan struct{}) { c.wait = waitCh ch := make(chan kubelet.PodUpdate) oldCh := c.updates go util.Forever(func() { <-waitCh for { update := <-oldCh ch <- update } }, 0) c.updates = ch }
// Channel returns a channel where a configuration source // can send updates of new configurations. Multiple calls with the same // source will return the same channel. This allows change and state based sources // to use the same channel. Different source names however will be treated as a // union. func (m *Mux) Channel(source string) chan interface{} { if len(source) == 0 { panic("Channel given an empty name") } m.sourceLock.Lock() defer m.sourceLock.Unlock() channel, exists := m.sources[source] if exists { return channel } newChannel := make(chan interface{}) m.sources[source] = newChannel go util.Forever(func() { m.listen(source, newChannel) }, 0) return newChannel }
func (im *realImageManager) Start() error { // Initial detection make detected time "unknown" in the past. var zero time.Time err := im.detectImages(zero) if err != nil { return err } go util.Forever(func() { err := im.detectImages(time.Now()) if err != nil { glog.Warningf("[ImageManager] Failed to monitor images: %v", err) } }, 5*time.Minute) return nil }
func (s *statusManager) Start() { // Don't start the status manager if we don't have a client. This will happen // on the master, where the kubelet is responsible for bootstrapping the pods // of the master components. if s.kubeClient == nil { glog.Infof("Kubernetes client is nil, not starting status manager.") return } // syncBatch blocks when no updates are available, we can run it in a tight loop. glog.Info("Starting to sync pod status with apiserver") go util.Forever(func() { err := s.syncBatch() if err != nil { glog.Warningf("Failed to updated pod status: %v", err) } }, 0) }
func (m *manager) Start() { // Don't start the status manager if we don't have a client. This will happen // on the master, where the kubelet is responsible for bootstrapping the pods // of the master components. if m.kubeClient == nil { glog.Infof("Kubernetes client is nil, not starting status manager.") return } glog.Info("Starting to sync pod status with apiserver") syncTicker := time.Tick(syncPeriod) // syncPod and syncBatch share the same go routine to avoid sync races. go util.Forever(func() { select { case syncRequest := <-m.podStatusChannel: m.syncPod(syncRequest.podUID, syncRequest.status) case <-syncTicker: m.syncBatch() } }, 0) }
func NewManager( statusManager status.Manager, livenessManager results.Manager, runner kubecontainer.ContainerCommandRunner, refManager *kubecontainer.RefManager, recorder record.EventRecorder) Manager { prober := newProber(runner, refManager, recorder) readinessManager := results.NewManager() m := &manager{ statusManager: statusManager, prober: prober, readinessManager: readinessManager, livenessManager: livenessManager, workers: make(map[probeKey]*worker), } // Start syncing readiness. go util.Forever(m.updateReadiness, 0) return m }
// Run begins watching and synchronizing the cache func (ac *AuthorizationCache) Run(period time.Duration) { namespaceReflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return ac.namespaceInterface.List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return ac.namespaceInterface.Watch(options) }, }, &kapi.Namespace{}, ac.namespaceStore, 2*time.Minute, ) namespaceReflector.Run() ac.lastSyncResourceVersioner = namespaceReflector ac.skip = &statelessSkipSynchronizer{} go util.Forever(func() { ac.synchronize() }, period) }
// serve starts serving the provided http.Handler using security settings derived from the MasterConfig func (c *MasterConfig) serve(handler http.Handler, extra []string) { timeout := c.Options.ServingInfo.RequestTimeoutSeconds if timeout == -1 { timeout = 0 } server := &http.Server{ Addr: c.Options.ServingInfo.BindAddress, Handler: handler, ReadTimeout: time.Duration(timeout) * time.Second, WriteTimeout: time.Duration(timeout) * time.Second, MaxHeaderBytes: 1 << 20, } go util.Forever(func() { for _, s := range extra { glog.Infof(s, c.Options.ServingInfo.BindAddress) } if c.TLS { extraCerts, err := configapi.GetNamedCertificateMap(c.Options.ServingInfo.NamedCertificates) if err != nil { glog.Fatal(err) } server.TLSConfig = crypto.SecureTLSConfig(&tls.Config{ // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types ClientAuth: tls.RequestClientCert, ClientCAs: c.ClientCAs, // Set SNI certificate func GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }) glog.Fatal(cmdutil.ListenAndServeTLS(server, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.ServerCert.CertFile, c.Options.ServingInfo.ServerCert.KeyFile)) } else { glog.Fatal(server.ListenAndServe()) } }, 0) }
// experimental returns the resources and codec for the experimental api func (m *Master) experimental(c *Config) *apiserver.APIGroupVersion { // All resources except these are disabled by default. enabledResources := sets.NewString("jobs", "horizontalpodautoscalers", "ingresses") resourceOverrides := m.apiGroupVersionOverrides["extensions/v1beta1"].ResourceOverrides isEnabled := func(resource string) bool { // Check if the resource has been overriden. enabled, ok := resourceOverrides[resource] if !ok { return enabledResources.Has(resource) } return enabled } storageDecorator := c.storageDecorator() dbClient := func(resource string) storage.Interface { return c.StorageDestinations.get("extensions", resource) } storage := map[string]rest.Storage{} if isEnabled("horizontalpodautoscalers") { autoscalerStorage, autoscalerStatusStorage := horizontalpodautoscaleretcd.NewREST(dbClient("horizontalpodautoscalers"), storageDecorator) storage["horizontalpodautoscalers"] = autoscalerStorage storage["horizontalpodautoscalers/status"] = autoscalerStatusStorage controllerStorage := expcontrolleretcd.NewStorage(c.StorageDestinations.get("", "replicationControllers"), storageDecorator) storage["replicationcontrollers"] = controllerStorage.ReplicationController storage["replicationcontrollers/scale"] = controllerStorage.Scale } if isEnabled("thirdpartyresources") { thirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(dbClient("thirdpartyresources"), storageDecorator) thirdPartyControl := ThirdPartyController{ master: m, thirdPartyResourceRegistry: thirdPartyResourceStorage, } go func() { util.Forever(func() { if err := thirdPartyControl.SyncResources(); err != nil { glog.Warningf("third party resource sync failed: %v", err) } }, 10*time.Second) }() storage["thirdpartyresources"] = thirdPartyResourceStorage } if isEnabled("daemonsets") { daemonSetStorage, daemonSetStatusStorage := daemonetcd.NewREST(dbClient("daemonsets"), storageDecorator) storage["daemonsets"] = daemonSetStorage storage["daemonsets/status"] = daemonSetStatusStorage } if isEnabled("deployments") { deploymentStorage := deploymentetcd.NewStorage(dbClient("deployments"), storageDecorator) storage["deployments"] = deploymentStorage.Deployment storage["deployments/status"] = deploymentStorage.Status storage["deployments/scale"] = deploymentStorage.Scale } if isEnabled("jobs") { jobStorage, jobStatusStorage := jobetcd.NewREST(dbClient("jobs"), storageDecorator) storage["jobs"] = jobStorage storage["jobs/status"] = jobStatusStorage } if isEnabled("ingresses") { ingressStorage, ingressStatusStorage := ingressetcd.NewREST(dbClient("ingresses"), storageDecorator) storage["ingresses"] = ingressStorage storage["ingresses/status"] = ingressStatusStorage } extensionsGroup := latest.GroupOrDie("extensions") serverGroupVersion := unversioned.ParseGroupVersionOrDie(latest.GroupOrDie("").GroupVersion) return &apiserver.APIGroupVersion{ Root: m.apiGroupPrefix, RequestInfoResolver: m.newRequestInfoResolver(), Creater: api.Scheme, Convertor: api.Scheme, Typer: api.Scheme, Mapper: extensionsGroup.RESTMapper, Codec: extensionsGroup.Codec, Linker: extensionsGroup.SelfLinker, Storage: storage, GroupVersion: unversioned.ParseGroupVersionOrDie(extensionsGroup.GroupVersion), ServerGroupVersion: &serverGroupVersion, Admit: m.admissionControl, Context: m.requestContextMapper, MinRequestTimeout: m.minRequestTimeout, } }
// Run launches the OpenShift master. It takes optional installers that may install additional endpoints into the server. // All endpoints get configured CORS behavior // Protected installers' endpoints are protected by API authentication and authorization. // Unprotected installers' endpoints do not have any additional protection added. func (c *MasterConfig) Run(protected []APIInstaller, unprotected []APIInstaller) { var extra []string safe := kmaster.NewHandlerContainer(http.NewServeMux()) open := kmaster.NewHandlerContainer(http.NewServeMux()) // enforce authentication on protected endpoints protected = append(protected, APIInstallFunc(c.InstallProtectedAPI)) for _, i := range protected { extra = append(extra, i.InstallAPI(safe)...) } handler := c.authorizationFilter(safe) handler = authenticationHandlerFilter(handler, c.Authenticator, c.getRequestContextMapper()) handler = namespacingFilter(handler, c.getRequestContextMapper()) handler = cacheControlFilter(handler, "no-store") // protected endpoints should not be cached // unprotected resources unprotected = append(unprotected, APIInstallFunc(c.InstallUnprotectedAPI)) for _, i := range unprotected { extra = append(extra, i.InstallAPI(open)...) } handler = indexAPIPaths(handler) open.Handle("/", handler) // install swagger swaggerConfig := swagger.Config{ WebServicesUrl: c.Options.MasterPublicURL, WebServices: append(safe.RegisteredWebServices(), open.RegisteredWebServices()...), ApiPath: swaggerAPIPrefix, PostBuildHandler: customizeSwaggerDefinition, } // log nothing from swagger swagger.LogInfo = func(format string, v ...interface{}) {} swagger.RegisterSwaggerService(swaggerConfig, open) extra = append(extra, fmt.Sprintf("Started Swagger Schema API at %%s%s", swaggerAPIPrefix)) handler = open // add CORS support if origins := c.ensureCORSAllowedOrigins(); len(origins) != 0 { handler = apiserver.CORS(handler, origins, nil, nil, "true") } if c.WebConsoleEnabled() { handler = assetServerRedirect(handler, c.Options.AssetConfig.PublicURL) } // Make the outermost filter the requestContextMapper to ensure all components share the same context if contextHandler, err := kapi.NewRequestContextFilter(c.getRequestContextMapper(), handler); err != nil { glog.Fatalf("Error setting up request context filter: %v", err) } else { handler = contextHandler } // TODO: MaxRequestsInFlight should be subdivided by intent, type of behavior, and speed of // execution - updates vs reads, long reads vs short reads, fat reads vs skinny reads. if c.Options.ServingInfo.MaxRequestsInFlight > 0 { sem := make(chan bool, c.Options.ServingInfo.MaxRequestsInFlight) handler = apiserver.MaxInFlightLimit(sem, longRunningRE, handler) } timeout := c.Options.ServingInfo.RequestTimeoutSeconds if timeout == -1 { timeout = 0 } server := &http.Server{ Addr: c.Options.ServingInfo.BindAddress, Handler: handler, ReadTimeout: time.Duration(timeout) * time.Second, WriteTimeout: time.Duration(timeout) * time.Second, MaxHeaderBytes: 1 << 20, } go util.Forever(func() { for _, s := range extra { glog.Infof(s, c.Options.ServingInfo.BindAddress) } if c.TLS { server.TLSConfig = &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types ClientAuth: tls.RequestClientCert, ClientCAs: c.ClientCAs, } glog.Fatal(cmdutil.ListenAndServeTLS(server, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.ServerCert.CertFile, c.Options.ServingInfo.ServerCert.KeyFile)) } else { glog.Fatal(server.ListenAndServe()) } }, 0) // Attempt to verify the server came up for 20 seconds (100 tries * 100ms, 100ms timeout per try) cmdutil.WaitForSuccessfulDial(c.TLS, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.BindAddress, 100*time.Millisecond, 100*time.Millisecond, 100) // Create required policy rules if needed c.ensureComponentAuthorizationRules() // Ensure the default SCCs are created c.ensureDefaultSecurityContextConstraints() // Bind default roles for service accounts in the default namespace if needed c.ensureDefaultNamespaceServiceAccountRoles() // Create the infra namespace c.ensureOpenShiftInfraNamespace() // Create the shared resource namespace c.ensureOpenShiftSharedResourcesNamespace() }
// Run begins processing resources from Queue asynchronously. func (c *RetryController) Run() { go kutil.Forever(func() { c.handleOne(c.Queue.Pop()) }, 0) }
// Run runs the specified KubeletServer for the given KubeletConfig. This should never exit. // The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer. // Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults // will be ignored. func (s *KubeletServer) Run(kcfg *KubeletConfig) error { if kcfg == nil { cfg, err := s.KubeletConfig() if err != nil { return err } kcfg = cfg clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { kcfg.KubeClient, err = client.New(clientConfig) } if err != nil && len(s.APIServerList) > 0 { glog.Warningf("No API client: %v", err) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { return err } glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) kcfg.Cloud = cloud } if kcfg.CadvisorInterface == nil { ca, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } kcfg.CadvisorInterface = ca } util.ReallyCrash = s.ReallyCrashForTesting rand.Seed(time.Now().UTC().UnixNano()) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) glog.V(2).Infof("Using root directory: %v", s.RootDirectory) // TODO(vmarmol): Do this through container config. oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.Warning(err) } if err := RunKubelet(kcfg, nil); err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Forever(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } if s.RunOnce { return nil } // run forever select {} }
// getExperimentalResources returns the resources for extenstions api func (m *Master) getExtensionResources(c *Config) map[string]rest.Storage { // All resources except these are disabled by default. enabledResources := sets.NewString("jobs", "horizontalpodautoscalers", "ingresses") resourceOverrides := m.ApiGroupVersionOverrides["extensions/v1beta1"].ResourceOverrides isEnabled := func(resource string) bool { // Check if the resource has been overriden. enabled, ok := resourceOverrides[resource] if !ok { return enabledResources.Has(resource) } return enabled } storageDecorator := m.StorageDecorator() dbClient := func(resource string) storage.Interface { return c.StorageDestinations.Get(extensions.GroupName, resource) } storage := map[string]rest.Storage{} if isEnabled("horizontalpodautoscalers") { autoscalerStorage, autoscalerStatusStorage := horizontalpodautoscaleretcd.NewREST(dbClient("horizontalpodautoscalers"), storageDecorator) storage["horizontalpodautoscalers"] = autoscalerStorage storage["horizontalpodautoscalers/status"] = autoscalerStatusStorage controllerStorage := expcontrolleretcd.NewStorage(c.StorageDestinations.Get("", "replicationControllers"), storageDecorator) storage["replicationcontrollers"] = controllerStorage.ReplicationController storage["replicationcontrollers/scale"] = controllerStorage.Scale } if isEnabled("thirdpartyresources") { thirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(dbClient("thirdpartyresources"), storageDecorator) thirdPartyControl := ThirdPartyController{ master: m, thirdPartyResourceRegistry: thirdPartyResourceStorage, } go func() { util.Forever(func() { if err := thirdPartyControl.SyncResources(); err != nil { glog.Warningf("third party resource sync failed: %v", err) } }, 10*time.Second) }() storage["thirdpartyresources"] = thirdPartyResourceStorage } if isEnabled("daemonsets") { daemonSetStorage, daemonSetStatusStorage := daemonetcd.NewREST(dbClient("daemonsets"), storageDecorator) storage["daemonsets"] = daemonSetStorage storage["daemonsets/status"] = daemonSetStatusStorage } if isEnabled("deployments") { deploymentStorage := deploymentetcd.NewStorage(dbClient("deployments"), storageDecorator) storage["deployments"] = deploymentStorage.Deployment storage["deployments/status"] = deploymentStorage.Status storage["deployments/scale"] = deploymentStorage.Scale storage["deployments/rollback"] = deploymentStorage.Rollback } if isEnabled("jobs") { jobStorage, jobStatusStorage := jobetcd.NewREST(dbClient("jobs"), storageDecorator) storage["jobs"] = jobStorage storage["jobs/status"] = jobStatusStorage } if isEnabled("ingresses") { ingressStorage, ingressStatusStorage := ingressetcd.NewREST(dbClient("ingresses"), storageDecorator) storage["ingresses"] = ingressStorage storage["ingresses/status"] = ingressStatusStorage } return storage }
// Run runs the specified ProxyServer. This should never exit. func (s *ProxyServer) Run(_ []string) error { // TODO(vmarmol): Use container config for this. oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.V(2).Info(err) } // Run in its own container. if err := util.RunInResourceContainer(s.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } // define api config source if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } client, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } // Add event recorder Hostname := nodeutil.GetHostname(s.HostnameOverride) eventBroadcaster := record.NewBroadcaster() s.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: Hostname}) eventBroadcaster.StartRecordingToSink(client.Events("")) s.nodeRef = &api.ObjectReference{ Kind: "Node", Name: Hostname, UID: types.UID(Hostname), Namespace: "", } // Birth Cry s.birthCry() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := utiliptables.ProtocolIpv4 if s.BindAddress.To4() == nil { protocol = utiliptables.ProtocolIpv6 } var proxier proxy.ProxyProvider var endpointsHandler config.EndpointsConfigHandler // guaranteed false on error, error only necessary for debugging shouldUseIptables, err := iptables.ShouldUseIptablesProxier() if err != nil { glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err) } if !s.ForceUserspaceProxy && shouldUseIptables { glog.V(2).Info("Using iptables Proxier.") proxierIptables, err := iptables.NewProxier(utiliptables.New(exec.New(), protocol)) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierIptables endpointsHandler = proxierIptables } else { glog.V(2).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier(loadBalancer, s.BindAddress, utiliptables.New(exec.New(), protocol), s.PortRange) if err != nil { glog.Fatalf("Unable to create proxer: %v", err) } proxier = proxierUserspace } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire endpointsHandler to handle changes to endpoints to services endpointsConfig.RegisterHandler(endpointsHandler) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() return nil }