func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll) } tmpl, _ := template.New("nginx").Parse(nginxConf) rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(labels.Everything(), fields.Everything()) if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) { continue } known = ingresses if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxConf, err) } else if err := tmpl.Execute(w, ingresses); err != nil { log.Fatalf("Failed to write template %v", err) } shellOut("nginx -s reload") } }
// newLoadBalancerController creates a new controller from the given config. func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *unversioned.Client, namespace string) *loadBalancerController { lbc := loadBalancerController{ cfg: cfg, client: kubeClient, queue: workqueue.New(), reloadRateLimiter: util.NewTokenBucketRateLimiter( reloadQPS, int(reloadQPS)), targetService: *targetService, forwardServices: *forwardServices, httpPort: *httpPort, tcpServices: map[string]int{}, } for _, service := range strings.Split(*tcpServices, ",") { portSplit := strings.Split(service, ":") if len(portSplit) != 2 { glog.Errorf("Ignoring misconfigured TCP service %v", service) continue } if port, err := strconv.Atoi(portSplit[1]); err != nil { glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err) continue } else { lbc.tcpServices[portSplit[0]] = port } } enqueue := func(obj interface{}) { key, err := keyFunc(obj) if err != nil { glog.Infof("Couldn't get key for object %+v: %v", obj, err) return } lbc.queue.Add(key) } eventHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: enqueue, DeleteFunc: enqueue, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { enqueue(cur) } }, } lbc.svcLister.Store, lbc.svcController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "services", namespace, fields.Everything()), &api.Service{}, resyncPeriod, eventHandlers) lbc.epLister.Store, lbc.epController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "endpoints", namespace, fields.Everything()), &api.Endpoints{}, resyncPeriod, eventHandlers) return &lbc }
func newAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &altTokenSource{ oauthClient: client, tokenURL: tokenURL, tokenBody: tokenBody, throttle: util.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 50.0 kubeconfig.Burst = 100 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst)) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
// newDockerPuller creates a new instance of the default implementation of DockerPuller. func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller { dp := dockerPuller{ client: client, keyring: credentialprovider.NewDockerKeyring(), } if qps == 0.0 { return dp } return &throttledDockerPuller{ puller: dp, limiter: util.NewTokenBucketRateLimiter(qps, burst), } }
func TestSchedulerRateLimitsBinding(t *testing.T) { scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc) scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore} queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc) queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore} modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister) algo := NewGenericScheduler( map[string]algorithm.FitPredicate{}, []algorithm.PriorityConfig{}, modeler.PodLister(), rand.New(rand.NewSource(time.Now().UnixNano()))) // Rate limit to 1 pod fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}} c := &Config{ Modeler: modeler, NodeLister: algorithm.FakeNodeLister( api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, ), Algorithm: algo, Binder: fakeBinder{func(b *api.Binding) error { return nil }}, NextPod: func() *api.Pod { return queuedPodStore.Pop().(*api.Pod) }, Error: func(p *api.Pod, err error) { t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err) }, Recorder: &record.FakeRecorder{}, BindPodsRateLimiter: &fr, } s := New(c) firstPod := podWithID("foo", "") secondPod := podWithID("boo", "") queuedPodStore.Add(firstPod) queuedPodStore.Add(secondPod) for i, hitRateLimit := range []bool{true, false} { s.scheduleOne() if fr.acceptValues[i] != hitRateLimit { t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i]) } } }
// NewRESTClient creates a new RESTClient. This client performs generic REST functions // such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and // decoding of responses from the server. func NewRESTClient(baseURL *url.URL, apiVersion string, c runtime.Codec, maxQPS float32, maxBurst int) *RESTClient { base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" } base.RawQuery = "" base.Fragment = "" var throttle util.RateLimiter if maxQPS > 0 { throttle = util.NewTokenBucketRateLimiter(maxQPS, maxBurst) } return &RESTClient{ baseURL: &base, apiVersion: apiVersion, Codec: c, Throttle: throttle, } }
// Run runs the CMServer. This should never exit. func (s *CMServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 20.0 kubeconfig.Burst = 30 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() go endpointcontroller.NewEndpointController(kubeClient, s.resyncPeriod). Run(s.ConcurrentEndpointSyncs, util.NeverStop) go replicationControllerPkg.NewReplicationManager(kubeClient, s.resyncPeriod, replicationControllerPkg.BurstReplicas). Run(s.ConcurrentRCSyncs, util.NeverStop) if s.TerminatedPodGCThreshold > 0 { go gc.New(kubeClient, s.resyncPeriod, s.TerminatedPodGCThreshold). Run(util.NeverStop) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName) if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { if cloud == nil { glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.") } else if routes, ok := cloud.Routes(); !ok { glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.") } else { routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR) routeController.Run(s.NodeSyncPeriod) } } resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. var versionStrings []string err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { if versionStrings, err = client.ServerAPIVersions(kubeconfig); err == nil { return true, nil } glog.Errorf("Failed to get api versions from server: %v", err) return false, nil }) if err != nil { glog.Fatalf("Failed to get api versions from server: %v", err) } versions := &unversioned.APIVersions{Versions: versionStrings} resourceMap, err := kubeClient.Discovery().ServerResources() if err != nil { glog.Fatalf("Failed to get supported resources from server: %v", err) } namespacecontroller.NewNamespaceController(kubeClient, versions, s.NamespaceSyncPeriod).Run() groupVersion := "extensions/v1beta1" resources, found := resourceMap[groupVersion] // TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver if containsVersion(versions, groupVersion) && found { glog.Infof("Starting %s apis", groupVersion) if containsResource(resources, "horizontalpodautoscalers") { glog.Infof("Starting horizontal pod controller.") podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)). Run(s.HorizontalPodAutoscalerSyncPeriod) } if containsResource(resources, "daemonsets") { glog.Infof("Starting daemon set controller") go daemon.NewDaemonSetsController(kubeClient, s.resyncPeriod). Run(s.ConcurrentDSCSyncs, util.NeverStop) } if containsResource(resources, "jobs") { glog.Infof("Starting job controller") go job.NewJobController(kubeClient, s.resyncPeriod). Run(s.ConcurrentJobSyncs, util.NeverStop) } if containsResource(resources, "deployments") { glog.Infof("Starting deployment controller") deployment.New(kubeClient). Run(s.DeploymentControllerSyncPeriod) } } pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() var rootCA []byte if s.RootCAFile != "" { rootCA, err = ioutil.ReadFile(s.RootCAFile) if err != nil { return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err) } if _, err := util.CertsFromPEM(rootCA); err != nil { return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) } } else { rootCA = kubeconfig.CAData } if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( kubeClient, serviceaccount.TokensControllerOptions{ TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), RootCA: rootCA, }, ).Run() } } serviceaccount.NewServiceAccountsController( kubeClient, serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} }
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: // 1 Integration tests // 2 Kubelet binary // 3 Standalone 'kubernetes' binary // Eventually, #2 will be replaced with instances of #3 func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error { kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride) if len(kcfg.NodeName) == 0 { // Query the cloud provider for our node name, default to Hostname nodeName := kcfg.Hostname if kcfg.Cloud != nil { var err error instances, ok := kcfg.Cloud.Instances() if !ok { return fmt.Errorf("failed to get instances from cloud provider") } nodeName, err = instances.CurrentNodeName(kcfg.Hostname) if err != nil { return fmt.Errorf("error fetching current instance name from cloud provider: %v", err) } glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) } kcfg.NodeName = nodeName } eventBroadcaster := record.NewBroadcaster() kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName}) eventBroadcaster.StartLogging(glog.V(3).Infof) if kcfg.KubeClient != nil { glog.V(4).Infof("Sending events to api server.") if kcfg.EventRecordQPS == 0.0 { eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events("")) } else { eventClient := *kcfg.KubeClient eventClient.Throttle = util.NewTokenBucketRateLimiter(kcfg.EventRecordQPS, kcfg.EventBurst) eventBroadcaster.StartRecordingToSink(eventClient.Events("")) } } else { glog.Warning("No api server defined - no events will be sent to API server.") } privilegedSources := capabilities.PrivilegedSources{ HostNetworkSources: kcfg.HostNetworkSources, HostPIDSources: kcfg.HostPIDSources, HostIPCSources: kcfg.HostIPCSources, } capabilities.Setup(kcfg.AllowPrivileged, privilegedSources, 0) credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory) if builder == nil { builder = createAndInitKubelet } if kcfg.OSInterface == nil { kcfg.OSInterface = kubecontainer.RealOS{} } k, podCfg, err := builder(kcfg) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) } util.ApplyRLimitForSelf(kcfg.MaxOpenFiles) // process pods and exit. if kcfg.Runonce { if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } glog.Infof("Started kubelet as runonce") } else { startKubelet(k, podCfg, kcfg) glog.Infof("Started kubelet") } return nil }
func (s *CMServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 20.0 kubeconfig.Burst = 30 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { profile.InstallHandler(mux) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() endpoints := s.createEndpointController(kubeClient) go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop) go replicationcontroller.NewReplicationManager(kubeClient, s.resyncPeriod, replicationcontroller.BurstReplicas). Run(s.ConcurrentRCSyncs, util.NeverStop) go daemon.NewDaemonSetsController(kubeClient, s.resyncPeriod). Run(s.ConcurrentDSCSyncs, util.NeverStop) //TODO(jdef) should eventually support more cloud providers here if s.CloudProvider != mesos.ProviderName { glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName) if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { routes, ok := cloud.Routes() if !ok { glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set") } routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR)) routeController.Run(s.NodeSyncPeriod) } resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient) resourceQuotaController.Run(s.ResourceQuotaSyncPeriod) namespaceController := namespacecontroller.NewNamespaceController(kubeClient, &unversioned.APIVersions{}, s.NamespaceSyncPeriod) namespaceController.Run() pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() var rootCA []byte if s.RootCAFile != "" { rootCA, err = ioutil.ReadFile(s.RootCAFile) if err != nil { return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err) } if _, err := util.CertsFromPEM(rootCA); err != nil { return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) } } else { rootCA = kubeconfig.CAData } if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( kubeClient, serviceaccount.TokensControllerOptions{ TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), RootCA: rootCA, }, ).Run() } } serviceaccount.NewServiceAccountsController( kubeClient, serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} }