// RunNodeController starts the node controller // TODO: handle node CIDR and route allocation func (c *MasterConfig) RunNodeController() { s := c.ControllerManager // this cidr has been validated already _, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR) _, serviceCIDR, _ := net.ParseCIDR(s.ServiceCIDR) controller, err := nodecontroller.NewNodeController( c.CloudProvider, clientadapter.FromUnversionedClient(c.KubeClient), s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), // upstream uses the same ones too s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs, ) if err != nil { glog.Fatalf("Unable to start node controller: %v", err) } controller.Run(s.NodeSyncPeriod.Duration) }
// NewRESTClient creates a new RESTClient. This client performs generic REST functions // such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and // decoding of responses from the server. func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, client *http.Client) *RESTClient { base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" } base.RawQuery = "" base.Fragment = "" if config.GroupVersion == nil { config.GroupVersion = &unversioned.GroupVersion{} } if len(config.ContentType) == 0 { config.ContentType = "application/json" } var throttle flowcontrol.RateLimiter if maxQPS > 0 { throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) } return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, contentConfig: config, Throttle: throttle, Client: client, } }
// NewManager ... func NewManager(kubeClient *client.Client) *Manager { ngx := &Manager{ ConfigFile: "/etc/nginx/nginx.conf", defCfg: config.NewDefault(), defResolver: strings.Join(getDNSServers(), " "), reloadLock: &sync.Mutex{}, reloadRateLimiter: flowcontrol.NewTokenBucketRateLimiter(0.1, 1), } ngx.createCertsDir(config.SSLDirectory) ngx.sslDHParam = ngx.SearchDHParamFile(config.SSLDirectory) var onChange func() onChange = func() { template, err := ngx_template.NewTemplate(tmplPath, onChange) if err != nil { glog.Warningf("invalid NGINX template: %v", err) return } ngx.template.Close() ngx.template = template glog.Info("new NGINX template loaded") } template, err := ngx_template.NewTemplate(tmplPath, onChange) if err != nil { glog.Fatalf("invalid NGINX template: %v", err) } ngx.template = template return ngx }
// NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error cs.CoreV1Client, err = v1core.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.BatchV1Client, err = v1batch.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.ExtensionsV1beta1Client, err = v1beta1extensions.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.FederationV1beta1Client, err = v1beta1federation.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &cs, nil }
func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll) } tmpl, _ := template.New("nginx").Parse(nginxConf) rateLimiter := flowcontrol.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(api.ListOptions{}) if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) { continue } known = ingresses if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxConf, err) } else if err := tmpl.Execute(w, ingresses); err != nil { log.Fatalf("Failed to write template %v", err) } shellOut("nginx -s reload") } }
// CreateDeleteController constructs a BuildPodDeleteController func (factory *BuildPodControllerFactory) CreateDeleteController() controller.RunnableController { client := ControllerClient{factory.KubeClient, factory.OSClient} queue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, keyListerGetter{}) cache.NewReflector(&buildPodDeleteLW{client, queue}, &kapi.Pod{}, queue, 5*time.Minute).RunUntil(factory.Stop) buildPodDeleteController := &buildcontroller.BuildPodDeleteController{ BuildStore: factory.buildStore, BuildUpdater: factory.BuildUpdater, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, controller.RetryNever, flowcontrol.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { deltas := obj.(cache.Deltas) for _, delta := range deltas { if delta.Type == cache.Deleted { return buildPodDeleteController.HandleBuildPodDeletion(delta.Object.(*kapi.Pod)) } } return nil }, } }
// Create constructs a BuildPodController func (factory *BuildPodControllerFactory) Create() controller.RunnableController { factory.buildStore = cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, factory.buildStore, 2*time.Minute).RunUntil(factory.Stop) queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&podLW{client: factory.KubeClient}, &kapi.Pod{}, queue, 2*time.Minute).RunUntil(factory.Stop) client := ControllerClient{factory.KubeClient, factory.OSClient} buildPodController := &buildcontroller.BuildPodController{ BuildStore: factory.buildStore, BuildUpdater: factory.BuildUpdater, SecretClient: factory.KubeClient, PodManager: client, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, retryFunc("BuildPod", nil), flowcontrol.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { pod := obj.(*kapi.Pod) return buildPodController.HandlePod(pod) }, } }
// Create creates a new ImageChangeController which is used to trigger builds when a new // image is available func (factory *ImageChangeControllerFactory) Create() controller.RunnableController { queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).RunUntil(factory.Stop) store := cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).RunUntil(factory.Stop) imageChangeController := &buildcontroller.ImageChangeController{ BuildConfigStore: store, BuildConfigInstantiator: factory.BuildConfigInstantiator, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, retryFunc("ImageStream update", func(err error) bool { _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError) return isFatal }), flowcontrol.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { imageRepo := obj.(*imageapi.ImageStream) return imageChangeController.HandleImageRepo(imageRepo) }, } }
// SwapLimiter safely swaps current limiter for this queue with the passed one if capacities or qps's differ. func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) { q.limiterLock.Lock() defer q.limiterLock.Unlock() if q.limiter.QPS() == newQPS { return } var newLimiter flowcontrol.RateLimiter if newQPS <= 0 { newLimiter = flowcontrol.NewFakeNeverRateLimiter() } else { newLimiter = flowcontrol.NewTokenBucketRateLimiter(newQPS, evictionRateLimiterBurst) } // If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1 // TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep: // - saturation (percentage of used tokens) // - number of used tokens // - number of available tokens // - something else for q.limiter.Saturation() > newLimiter.Saturation() { // Check if we're not using fake limiter previousSaturation := newLimiter.Saturation() newLimiter.TryAccept() // It's a fake limiter if newLimiter.Saturation() == previousSaturation { break } } q.limiter.Stop() q.limiter = newLimiter }
// NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var clientset Clientset var err error clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } clientset.ExtensionsClient, err = unversionedextensions.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } clientset.AutoscalingClient, err = unversionedautoscaling.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } clientset.BatchClient, err = unversionedbatch.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } clientset.RbacClient, err = unversionedrbac.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) } return &clientset, err }
// Create creates a new ConfigChangeController which is used to trigger builds on creation func (factory *BuildConfigControllerFactory) Create() controller.RunnableController { queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, queue, 2*time.Minute).RunUntil(factory.Stop) eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) bcController := &buildcontroller.BuildConfigController{ BuildConfigInstantiator: factory.BuildConfigInstantiator, Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-config-controller"}), } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, retryFunc("BuildConfig", buildcontroller.IsFatal), flowcontrol.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { bc := obj.(*buildapi.BuildConfig) return bcController.HandleBuildConfig(bc) }, } }
// NewRESTClient creates a new RESTClient. This client performs generic REST functions // such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and // decoding of responses from the server. func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" } base.RawQuery = "" base.Fragment = "" if config.GroupVersion == nil { config.GroupVersion = &unversioned.GroupVersion{} } if len(config.ContentType) == 0 { config.ContentType = "application/json" } serializers, err := createSerializers(config) if err != nil { return nil, err } var throttle flowcontrol.RateLimiter if maxQPS > 0 && rateLimiter == nil { throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) } else if rateLimiter != nil { throttle = rateLimiter } return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, contentConfig: config, serializers: *serializers, createBackoffMgr: readExpBackoffConfig, Throttle: throttle, Client: client, }, nil }
// NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var clientset Clientset var err error clientset.FederationClient, err = unversionedfederation.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.ExtensionsClient, err = unversionedextensions.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &clientset, nil }
// Create creates a new ImageChangeController which is used to trigger builds when a new // image is available func (factory *ImageChangeControllerFactory) Create() controller.RunnableController { queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).RunUntil(factory.Stop) imageChangeController := &buildcontroller.ImageChangeController{ BuildConfigIndex: factory.BuildConfigIndex, BuildConfigInstantiator: factory.BuildConfigInstantiator, } // Wait for the bc store to sync before starting any work in this controller. factory.waitForSyncedStores() return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, retryFunc("ImageStream update", nil), flowcontrol.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { imageRepo := obj.(*imageapi.ImageStream) return imageChangeController.HandleImageStream(imageRepo) }, } }
// NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var clientset Clientset var err error clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.AppsClient, err = v1alpha1apps.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.AuthenticationClient, err = v1beta1authentication.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.AuthorizationClient, err = v1beta1authorization.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.AutoscalingClient, err = v1autoscaling.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.BatchClient, err = v1batch.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.CertificatesClient, err = v1alpha1certificates.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.ExtensionsClient, err = v1beta1extensions.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.PolicyClient, err = v1alpha1policy.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.RbacClient, err = v1alpha1rbac.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.StorageClient, err = v1beta1storage.NewForConfig(&configShallowCopy) if err != nil { return nil, err } clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &clientset, nil }
// Create creates an ImageChangeController. func (factory *ImageChangeControllerFactory) Create() controller.RunnableController { imageStreamLW := &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return factory.Client.ImageStreams(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return factory.Client.ImageStreams(kapi.NamespaceAll).Watch(options) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(imageStreamLW, &imageapi.ImageStream{}, queue, 2*time.Minute).Run() deploymentConfigLW := &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options) }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, store, 2*time.Minute).Run() changeController := &ImageChangeController{ listDeploymentConfigs: func() ([]*deployapi.DeploymentConfig, error) { configs := []*deployapi.DeploymentConfig{} objs := store.List() for _, obj := range objs { configs = append(configs, obj.(*deployapi.DeploymentConfig)) } return configs, nil }, client: factory.Client, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { utilruntime.HandleError(err) if _, isFatal := err.(fatalError); isFatal { return false } if retries.Count > 0 { return false } return true }, flowcontrol.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { repo := obj.(*imageapi.ImageStream) return changeController.Handle(repo) }, } }
// throttleImagePulling wraps kubecontainer.ImageService to throttle image // pulling based on the given QPS and burst limits. If QPS is zero, defaults // to no throttling. func throttleImagePulling(imageService kubecontainer.ImageService, qps float32, burst int) kubecontainer.ImageService { if qps == 0.0 { return imageService } return &throttledImageService{ ImageService: imageService, limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst), } }
// Create creates an ImportController. func (f *ImportControllerFactory) Create() (controller.RunnableController, controller.StoppableController) { lw := &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return f.Client.ImageStreams(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return f.Client.ImageStreams(kapi.NamespaceAll).Watch(options) }, } q := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(lw, &api.ImageStream{}, q, f.ResyncInterval).Run() // instantiate a scheduled importer using a number of buckets buckets := 4 switch { case f.MinimumCheckInterval > time.Hour: buckets = 8 case f.MinimumCheckInterval < 10*time.Minute: buckets = 2 } seconds := f.MinimumCheckInterval / time.Second bucketQPS := 1.0 / float32(seconds) * float32(buckets) limiter := flowcontrol.NewTokenBucketRateLimiter(bucketQPS, 1) b := newScheduled(f.ScheduleEnabled, f.Client, buckets, limiter, f.ImportRateLimiter) // instantiate an importer for changes that happen to the image stream changed := &controller.RetryController{ Queue: q, RetryManager: controller.NewQueueRetryManager( q, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { utilruntime.HandleError(err) return retries.Count < 5 }, flowcontrol.NewTokenBucketRateLimiter(1, 10), ), Handle: b.Handle, } return changed, b.scheduler }
func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &AltTokenSource{ oauthClient: client, tokenURL: tokenURL, tokenBody: tokenBody, throttle: flowcontrol.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) }
// NewRateLimitedFunction creates a new rate limited function. func NewRateLimitedFunction(keyFunc kcache.KeyFunc, interval int, handlerFunc HandlerFunc) *RateLimitedFunction { fifo := kcache.NewFIFO(keyFunc) qps := float32(1000.0) // Call rate per second (SLA). if interval > 0 { qps = float32(1.0 / float32(interval)) } limiter := flowcontrol.NewTokenBucketRateLimiter(qps, 1) return &RateLimitedFunction{handlerFunc, fifo, limiter} }
// Create constructs a BuildController func (factory *BuildControllerFactory) Create() controller.RunnableController { queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).RunUntil(factory.Stop) eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(&kcoreclient.EventSinkImpl{Interface: factory.KubeClient.Core().Events("")}) client := ControllerClient{factory.KubeClient, factory.OSClient} buildController := &buildcontroller.BuildController{ BuildUpdater: factory.BuildUpdater, BuildLister: factory.BuildLister, ImageStreamClient: client, PodManager: client, RunPolicies: policy.GetAllRunPolicies(factory.BuildLister, factory.BuildUpdater), BuildStrategy: &typeBasedFactoryStrategy{ DockerBuildStrategy: factory.DockerBuildStrategy, SourceBuildStrategy: factory.SourceBuildStrategy, CustomBuildStrategy: factory.CustomBuildStrategy, }, Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}), BuildDefaults: factory.BuildDefaults, BuildOverrides: factory.BuildOverrides, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute), flowcontrol.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { build := obj.(*buildapi.Build) err := buildController.HandleBuild(build) if err != nil { // Update the build status message only if it changed. if msg := errors.ErrorToSentence(err); build.Status.Message != msg { // Set default Reason. if len(build.Status.Reason) == 0 { build.Status.Reason = buildapi.StatusReasonError } build.Status.Message = msg if err := buildController.BuildUpdater.Update(build.Namespace, build); err != nil { glog.V(2).Infof("Failed to update status message of Build %s/%s: %v", build.Namespace, build.Name, err) } buildController.Recorder.Eventf(build, kapi.EventTypeWarning, "HandleBuildError", "Build has error: %v", err) } } return err }, } }
func TestQueueRetryManager_retries(t *testing.T) { retries := 5 requeued := map[string]int{} manager := &QueueRetryManager{ queue: &testFifo{ // Track re-queues AddIfNotPresentFunc: func(obj interface{}) error { id := obj.(testObj).id if _, exists := requeued[id]; !exists { requeued[id] = 0 } requeued[id] = requeued[id] + 1 return nil }, }, keyFunc: func(obj interface{}) (string, error) { return obj.(testObj).id, nil }, retryFunc: func(obj interface{}, err error, r Retry) bool { return r.Count < 5 && !r.StartTimestamp.IsZero() }, retries: make(map[string]Retry), limiter: flowcontrol.NewTokenBucketRateLimiter(1000, 1000), } objects := []testObj{ {"a", 1}, {"b", 2}, {"c", 3}, } // Retry one more than the max for _, obj := range objects { for i := 0; i < retries+1; i++ { manager.Retry(obj, nil) } } // Should only have re-queued up to the max retry setting for _, obj := range objects { if e, a := retries, requeued[obj.id]; e != a { t.Fatalf("expected requeue count %d for obj %s, got %d", e, obj.id, a) } } // Should have no more state since all objects were retried beyond max if e, a := 0, len(manager.retries); e != a { t.Fatalf("expected retry len %d, got %d", e, a) } }
// Create creates a DeploymentConfigController. func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController { deploymentConfigLW := &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"}) configController := NewDeploymentConfigController(factory.KubeClient, factory.Client, factory.Codec, recorder) return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { config := obj.(*deployapi.DeploymentConfig) // no retries for a fatal error if _, isFatal := err.(fatalError); isFatal { glog.V(4).Infof("Will not retry fatal error for deploymentConfig %s/%s: %v", config.Namespace, config.Name, err) utilruntime.HandleError(err) return false } // infinite retries for a transient error if _, isTransient := err.(transientError); isTransient { glog.V(4).Infof("Retrying deploymentConfig %s/%s with error: %v", config.Namespace, config.Name, err) return true } utilruntime.HandleError(err) // no retries for anything else if retries.Count > 0 { return false } return true }, flowcontrol.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { config := obj.(*deployapi.DeploymentConfig) return configController.Handle(config) }, } }
// newDockerPuller creates a new instance of the default implementation of DockerPuller. func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller { dp := dockerPuller{ client: client, keyring: credentialprovider.NewDockerKeyring(), } if qps == 0.0 { return dp } return &throttledDockerPuller{ puller: dp, limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst), } }
// NewManager ... func NewManager(kubeClient *client.Client) *Manager { ngx := &Manager{ ConfigFile: "/etc/nginx/nginx.conf", defCfg: config.NewDefault(), defResolver: strings.Join(getDNSServers(), " "), reloadLock: &sync.Mutex{}, reloadRateLimiter: flowcontrol.NewTokenBucketRateLimiter(0.1, 1), } ngx.createCertsDir(config.SSLDirectory) ngx.sslDHParam = ngx.SearchDHParamFile(config.SSLDirectory) ngx.loadTemplate() return ngx }
// Create creates a DeploymentConfigChangeController. func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController { deploymentConfigLW := &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) changeController := &DeploymentConfigChangeController{ client: factory.Client, kClient: factory.KubeClient, decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, factory.Codec) }, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { utilruntime.HandleError(err) if _, isFatal := err.(fatalError); isFatal { return false } if retries.Count > 0 { return false } return true }, flowcontrol.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { config := obj.(*deployapi.DeploymentConfig) return changeController.Handle(config) }, } }
// NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var clientset Clientset var err error clientset.TestgroupClient, err = unversionedtestgroup.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) } return &clientset, err }
func ClientSetRateLimiterTest(t *testing.T) { rateLimiter := flowcontrol.NewTokenBucketRateLimiter(1.0, 10) config := restclient.Config{ RateLimiter: rateLimiter, } if err := restclient.SetKubernetesDefaults(&config); err != nil { t.Errorf("setting defaults failed for %#v: %v", config, err) } clientSet, err := NewForConfig(&config) if err != nil { t.Errorf("creating clientset for config %v failed: %v", config, err) } testGroupThrottler := clientSet.Testgroup().GetRESTClient().GetRateLimiter() if rateLimiter != testGroupThrottler { t.Errorf("Clients in client set should use rateLimiter passed in config:\noriginal: %v\ntestGroup: %v", rateLimiter, testGroupThrottler) } }
// RunImageImportController starts the image import trigger controller process. func (c *MasterConfig) RunImageImportController() { osclient := c.ImageImportControllerClient() importRate := float32(c.Options.ImagePolicyConfig.MaxScheduledImageImportsPerMinute) / float32(time.Minute/time.Second) importBurst := c.Options.ImagePolicyConfig.MaxScheduledImageImportsPerMinute * 2 factory := imagecontroller.ImportControllerFactory{ Client: osclient, ResyncInterval: 10 * time.Minute, MinimumCheckInterval: time.Duration(c.Options.ImagePolicyConfig.ScheduledImageImportMinimumIntervalSeconds) * time.Second, ImportRateLimiter: flowcontrol.NewTokenBucketRateLimiter(importRate, importBurst), ScheduleEnabled: !c.Options.ImagePolicyConfig.DisableScheduledImport, } controller, scheduledController := factory.Create() controller.Run() if c.Options.ImagePolicyConfig.DisableScheduledImport { glog.V(2).Infof("Scheduled image import is disabled - the 'scheduled' flag on image streams will be ignored") } else { scheduledController.RunUntil(utilwait.NeverStop) } }
// NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error cs.ApiregistrationV1alpha1Client, err = v1alpha1apiregistration.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &cs, nil }