func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) gcc := &GCController{ kubeClient: kubeClient, podControl: controller.RealPodControl{ Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "pod-garbage-collector"}), KubeClient: kubeClient, }, threshold: threshold, } terminatedSelector := compileTerminatedPodSelector() gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return gcc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), terminatedSelector) }, WatchFunc: func(rv string) (watch.Interface, error) { return gcc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), terminatedSelector, rv) }, }, &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{}, ) return gcc }
// NewDeploymentConfigController creates a new DeploymentConfigController. func NewDeploymentConfigController(dcInformer, rcInformer, podInformer framework.SharedIndexInformer, oc osclient.Interface, kc kclient.Interface, codec runtime.Codec) *DeploymentConfigController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(kc.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"}) c := &DeploymentConfigController{ dn: oc, rn: kc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), recorder: recorder, codec: codec, } c.dcStore.Indexer = dcInformer.GetIndexer() dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addDeploymentConfig, UpdateFunc: c.updateDeploymentConfig, DeleteFunc: c.deleteDeploymentConfig, }) c.rcStore.Indexer = rcInformer.GetIndexer() rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addReplicationController, UpdateFunc: c.updateReplicationController, DeleteFunc: c.deleteReplicationController, }) c.podStore.Indexer = podInformer.GetIndexer() c.dcStoreSynced = dcInformer.HasSynced c.rcStoreSynced = rcInformer.HasSynced c.podStoreSynced = podInformer.HasSynced return c }
func NewTurboScheduler(kubeClient *client.Client, vmturboMeta *vmtmeta.VMTMeta) *TurboScheduler { scheduledPodLister := &cache.StoreToPodLister{} podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: podQueue}, scheduledPodLister) bindPodsQPS := float32(15.0) bindPodsBurst := 20 rateLimiter := util.NewTokenBucketRateLimiter(bindPodsQPS, bindPodsBurst) config := &Config{ Modeler: modeler, Binder: &binder{kubeClient}, BindPodsRateLimiter: rateLimiter, } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "turboscheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) vmtSched := vmtscheduler.NewVMTScheduler(kubeClient, vmturboMeta) glog.V(3).Infof("VMTScheduler is set: %++v", vmtSched) defaultSched := defaultscheduler.NewDefaultScheduler(kubeClient) glog.V(3).Infof("DefaultScheduler is set: %++v", defaultSched) return &TurboScheduler{ config: config, vmtScheduler: vmtSched, defaultScheduler: defaultSched, } }
// NewDeploymentController creates a new DeploymentController. func NewDeploymentController(rcInformer, podInformer framework.SharedIndexInformer, kc kclient.Interface, sa, image string, env []kapi.EnvVar, codec runtime.Codec) *DeploymentController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(kc.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployments-controller"}) c := &DeploymentController{ rn: kc, pn: kc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), serviceAccount: sa, deployerImage: image, environment: env, recorder: recorder, codec: codec, } c.rcStore.Indexer = rcInformer.GetIndexer() rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addReplicationController, UpdateFunc: c.updateReplicationController, }) c.podStore.Indexer = podInformer.GetIndexer() podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ UpdateFunc: c.updatePod, DeleteFunc: c.deletePod, }) c.rcStoreSynced = rcInformer.HasSynced c.podStoreSynced = podInformer.HasSynced return c }
func NewJobController(kubeClient client.Interface) *JobController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) jm := &JobController{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job"}), }, expectations: controller.NewControllerExpectations(), queue: workqueue.New(), } jm.jobStore.Store, jm.jobController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &experimental.Job{}, replicationcontroller.FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: jm.enqueueController, UpdateFunc: func(old, cur interface{}) { if job := cur.(*experimental.Job); !isJobFinished(job) { jm.enqueueController(job) } }, DeleteFunc: jm.enqueueController, }, ) jm.podStore.Store, jm.podController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return jm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return jm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Pod{}, replicationcontroller.PodRelistPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: jm.addPod, UpdateFunc: jm.updatePod, DeleteFunc: jm.deletePod, }, ) jm.updateHandler = jm.updateJobStatus jm.syncHandler = jm.syncJob jm.podStoreSynced = jm.podController.HasSynced return jm }
// mustSetupScheduler starts the following components: // - k8s api server (a.k.a. master) // - scheduler // It returns scheduler config factory and destroyFunc which should be used to // remove resources after finished. // Notes on rate limiter: // - The BindPodsRateLimiter is nil, meaning no rate limits. // - client rate limit is set to 5000. func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) { framework.DeleteAllEtcdKeys() var m *master.Master masterConfig := framework.NewIntegrationTestMasterConfig() m = master.New(masterConfig) s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) c := client.NewOrDie(&client.Config{ Host: s.URL, GroupVersion: testapi.Default.GroupVersion(), QPS: 5000.0, Burst: 5000, }) schedulerConfigFactory = factory.NewConfigFactory(c, nil, api.DefaultSchedulerName) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { panic("Couldn't create scheduler config") } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(c.Events("")) scheduler.New(schedulerConfig).Run() destroyFunc = func() { glog.Infof("destroying") close(schedulerConfig.StopEverything) s.Close() glog.Infof("destroyed") } return }
func main() { config := HollowNodeConfig{} config.addFlags(pflag.CommandLine) flag.InitFlags() if !knownMorphs.Has(config.Morph) { glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. clientConfig, err := config.createClientConfigFromFile() if err != nil { glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) } cl, err := client.New(clientConfig) if err != nil { glog.Fatalf("Failed to create a Client: %v. Exiting.", err) } clientset, err := internalclientset.NewForConfig(clientConfig) if err != nil { glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) } if config.Morph == "kubelet" { cadvisorInterface := new(cadvisortest.Fake) containerManager := cm.NewStubContainerManager() fakeDockerClient := dockertools.NewFakeDockerClient() fakeDockerClient.EnableSleep = true hollowKubelet := kubemark.NewHollowKubelet( config.NodeName, clientset, cadvisorInterface, fakeDockerClient, config.KubeletPort, config.KubeletReadOnlyPort, containerManager, maxPods, podsPerCore, ) hollowKubelet.Run() } if config.Morph == "proxy" { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName}) iptInterface := fakeiptables.NewFake() serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } }
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) gcc := &GCController{ kubeClient: kubeClient, threshold: threshold, deletePod: func(namespace, name string) error { return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0)) }, } terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown)) gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = terminatedSelector return gcc.kubeClient.Pods(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.FieldSelector = terminatedSelector return gcc.kubeClient.Pods(api.NamespaceAll).Watch(options) }, }, &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{}, ) return gcc }
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) { eventBroadcaster := record.NewBroadcaster() config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"})) channel := config.Channel(TestSource) ch := config.Updates() return channel, ch, config }
// NewCloudNodeController creates a CloudNodeController object func NewCloudNodeController( nodeInformer informers.NodeInformer, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeMonitorPeriod time.Duration) (*CloudNodeController, error) { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cloudcontrollermanager"}) eventBroadcaster.StartLogging(glog.Infof) if kubeClient != nil { glog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) } else { glog.V(0).Infof("No api server defined - no events will be sent to API server.") } cnc := &CloudNodeController{ nodeInformer: nodeInformer, kubeClient: kubeClient, recorder: recorder, cloud: cloud, nodeMonitorPeriod: nodeMonitorPeriod, } return cnc, nil }
func TestUnschedulableNodes(t *testing.T) { framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() masterConfig := framework.NewIntegrationTestMasterConfig() m = master.New(masterConfig) restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()}) schedulerConfigFactory := factory.NewConfigFactory(restClient, nil) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
// Create creates a new ConfigChangeController which is used to trigger builds on creation func (factory *BuildConfigControllerFactory) Create() controller.RunnableController { queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, queue, 2*time.Minute).RunUntil(factory.Stop) eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) bcController := &buildcontroller.BuildConfigController{ BuildConfigInstantiator: factory.BuildConfigInstantiator, Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-config-controller"}), } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, retryFunc("BuildConfig", buildcontroller.IsFatal), flowcontrol.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { bc := obj.(*buildapi.BuildConfig) return bcController.HandleBuildConfig(bc) }, } }
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) gcc := &GCController{ kubeClient: kubeClient, threshold: threshold, deletePod: func(namespace, name string) error { return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0)) }, } terminatedSelector := compileTerminatedPodSelector() gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { options := unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{terminatedSelector}} return gcc.kubeClient.Pods(api.NamespaceAll).List(options) }, WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) { options.FieldSelector.Selector = terminatedSelector return gcc.kubeClient.Pods(api.NamespaceAll).Watch(options) }, }, &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{}, ) return gcc }
func createConfig(s *options.SchedulerServer, kubecli *clientset.Clientset) (*scheduler.Config, error) { configFactory := factory.NewConfigFactory(kubecli, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains) if _, err := os.Stat(s.PolicyConfigFile); err == nil { var ( policy schedulerapi.Policy configData []byte ) configData, err := ioutil.ReadFile(s.PolicyConfigFile) if err != nil { return nil, fmt.Errorf("unable to read policy config: %v", err) } if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } return configFactory.CreateFromConfig(policy) } // if the config file isn't provided, use the specified (or default) provider config, err := configFactory.CreateFromProvider(s.AlgorithmProvider) if err != nil { return nil, err } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubecli.Core().Events("")}) return config, nil }
// Create constructs a BuildController func (factory *BuildControllerFactory) Create() controller.RunnableController { queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) client := ControllerClient{factory.KubeClient, factory.OSClient} buildController := &buildcontroller.BuildController{ BuildUpdater: factory.BuildUpdater, ImageStreamClient: client, PodManager: client, BuildStrategy: &typeBasedFactoryStrategy{ DockerBuildStrategy: factory.DockerBuildStrategy, SourceBuildStrategy: factory.SourceBuildStrategy, CustomBuildStrategy: factory.CustomBuildStrategy, }, Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}), } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute), kutil.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { build := obj.(*buildapi.Build) return buildController.HandleBuild(build) }, } }
func TestUnschedulableNodes(t *testing.T) { framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() masterConfig := framework.NewIntegrationTestMasterConfig() m, err := master.New(masterConfig) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: // 1 Integration tests // 2 Kubelet binary // 3 Standalone 'kubernetes' binary // Eventually, #2 will be replaced with instances of #3 func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error { kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride) if len(kcfg.NodeName) == 0 { // Query the cloud provider for our node name, default to Hostname nodeName := kcfg.Hostname if kcfg.Cloud != nil { var err error instances, ok := kcfg.Cloud.Instances() if !ok { return fmt.Errorf("failed to get instances from cloud provider") } nodeName, err = instances.CurrentNodeName(kcfg.Hostname) if err != nil { return fmt.Errorf("error fetching current instance name from cloud provider: %v", err) } glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) } kcfg.NodeName = nodeName } eventBroadcaster := record.NewBroadcaster() kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName}) eventBroadcaster.StartLogging(glog.V(3).Infof) if kcfg.KubeClient != nil { glog.V(4).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events("")) } else { glog.Warning("No api server defined - no events will be sent to API server.") } capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources, 0) credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory) if builder == nil { builder = createAndInitKubelet } if kcfg.OSInterface == nil { kcfg.OSInterface = kubecontainer.RealOS{} } k, podCfg, err := builder(kcfg) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) } // process pods and exit. if kcfg.Runonce { if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } glog.Infof("Started kubelet as runonce") } else { startKubelet(k, podCfg, kcfg) glog.Infof("Started kubelet") } return nil }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy() { // initialize kube proxy serviceConfig := pconfig.NewServiceConfig() endpointsConfig := pconfig.NewEndpointsConfig() host, _, err := net.SplitHostPort(c.BindAddress) if err != nil { glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress) } ip := net.ParseIP(host) if ip == nil { glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress) } protocol := iptables.ProtocolIpv4 if ip.To4() == nil { protocol = iptables.ProtocolIpv6 } syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod) if err != nil { glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err) } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(c.Client.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName}) nodeRef := &kapi.ObjectReference{ Kind: "Node", Name: c.KubeletConfig.NodeName, } exec := kexec.New() dbus := utildbus.New() iptables := iptables.New(exec, dbus, protocol) proxier, err := proxy.NewProxier(iptables, exec, syncPeriod, false) if err != nil { // This should be fatal, but that would break the integration tests glog.Warningf("WARNING: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) return } iptables.AddReloadFunc(proxier.Sync) pconfig.NewSourceAPI( c.Client, 10*time.Minute, serviceConfig.Channel("api"), endpointsConfig.Channel("api")) serviceConfig.RegisterHandler(proxier) if c.FilteringEndpointsHandler == nil { endpointsConfig.RegisterHandler(proxier) } else { c.FilteringEndpointsHandler.SetBaseEndpointsHandler(proxier) endpointsConfig.RegisterHandler(c.FilteringEndpointsHandler) } recorder.Eventf(nodeRef, kapi.EventTypeNormal, "Starting", "Starting kube-proxy.") glog.Infof("Started Kubernetes Proxy on %s", host) }
// NewReplicationManager creates a replication manager func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) return newReplicationManager( eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled) }
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } jm := &JobController{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), }, expectations: controller.NewControllerExpectations(), queue: workqueue.New(), recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), } jm.jobStore.Store, jm.jobController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return jm.kubeClient.Batch().Jobs(api.NamespaceAll).Watch(options) }, }, &batch.Job{}, // TODO: Can we have much longer period here? replicationcontroller.FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: jm.enqueueController, UpdateFunc: func(old, cur interface{}) { if job := cur.(*batch.Job); !IsJobFinished(job) { jm.enqueueController(job) } }, DeleteFunc: jm.enqueueController, }, ) podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: jm.addPod, UpdateFunc: jm.updatePod, DeleteFunc: jm.deletePod, }) jm.podStore.Indexer = podInformer.GetIndexer() jm.podStoreSynced = podInformer.HasSynced jm.updateHandler = jm.updateJobStatus jm.syncHandler = jm.syncJob return jm }
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node // Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. // Caller must always pass in a list of existing nodes so the new allocator // can initialize its CIDR map. NodeList is only nil in testing. func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartLogging(glog.Infof) ra := &rangeAllocator{ client: client, cidrs: newCIDRSet(clusterCIDR, subNetMaskSize), clusterCIDR: clusterCIDR, nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize), recorder: recorder, nodesInProcessing: sets.NewString(), } if serviceCIDR != nil { ra.filterOutServiceRange(serviceCIDR) } else { glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") } if nodeList != nil { for _, node := range nodeList.Items { if node.Spec.PodCIDR == "" { glog.Infof("Node %v has no CIDR, ignoring", node.Name) continue } else { glog.Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR) } if err := ra.occupyCIDR(&node); err != nil { // This will happen if: // 1. We find garbage in the podCIDR field. Retrying is useless. // 2. CIDR out of range: This means a node CIDR has changed. // This error will keep crashing controller-manager. return nil, err } } } for i := 0; i < cidrUpdateWorkers; i++ { go func(stopChan <-chan struct{}) { for { select { case workItem, ok := <-ra.nodeCIDRUpdateChannel: if !ok { glog.Warning("NodeCIDRUpdateChannel read returned false.") return } ra.updateCIDRAllocation(workItem) case <-stopChan: return } } }(wait.NeverStop) } return ra, nil }
// Create creates a DeploymentConfigChangeController. func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController { deploymentConfigLW := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) changeController := &DeploymentConfigChangeController{ changeStrategy: &changeStrategyImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Get(name) }, generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) { return factory.Client.DeploymentConfigs(namespace).Generate(name) }, updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { return factory.Client.DeploymentConfigs(namespace).Update(config) }, }, decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, factory.Codec) }, recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}), } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { kutil.HandleError(err) if _, isFatal := err.(fatalError); isFatal { return false } if retries.Count > 0 { return false } return true }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { config := obj.(*deployapi.DeploymentConfig) return changeController.Handle(config) }, } }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = s.KubeAPIBurst kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst)) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
// NewPetSetController creates a new petset controller. func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"}) pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}} psc := &PetSetController{ kubeClient: kubeClient, blockingPetStore: newUnHealthyPetTracker(pc), newSyncer: func(blockingPet *pcb) *petSyncer { return &petSyncer{pc, blockingPet} }, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"), } podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ // lookup the petset and enqueue AddFunc: psc.addPod, // lookup current and old petset if labels changed UpdateFunc: psc.updatePod, // lookup petset accounting for deletion tombstones DeleteFunc: psc.deletePod, }) psc.podStore.Indexer = podInformer.GetIndexer() psc.podController = podInformer.GetController() psc.psStore.Store, psc.psController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options) }, }, &apps.PetSet{}, petSetResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: psc.enqueuePetSet, UpdateFunc: func(old, cur interface{}) { oldPS := old.(*apps.PetSet) curPS := cur.(*apps.PetSet) if oldPS.Status.Replicas != curPS.Status.Replicas { glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) } psc.enqueuePetSet(cur) }, DeleteFunc: psc.enqueuePetSet, }, ) // TODO: Watch volumes psc.podStoreSynced = psc.podController.HasSynced psc.syncHandler = psc.Sync return psc }
func TestUnschedulableNodes(t *testing.T) { etcdStorage, err := framework.NewEtcdStorage() if err != nil { t.Fatalf("Couldn't create etcd storage: %v", err) } expEtcdStorage, err := framework.NewExtensionsEtcdStorage(nil) if err != nil { t.Fatalf("unexpected error: %v", err) } storageDestinations := master.NewStorageDestinations() storageDestinations.AddAPIGroup("", etcdStorage) storageDestinations.AddAPIGroup("extensions", expEtcdStorage) storageVersions := make(map[string]string) storageVersions[""] = testapi.Default.Version() storageVersions["extensions"] = testapi.Extensions.GroupAndVersion() framework.DeleteAllEtcdKeys() var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() m = master.New(&master.Config{ StorageDestinations: storageDestinations, KubeletClient: client.FakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableUISupport: false, EnableIndex: true, APIPrefix: "/api", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), StorageVersions: storageVersions, PublicAddress: net.ParseIP("192.168.10.4"), }) restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()}) schedulerConfigFactory := factory.NewConfigFactory(restClient, nil) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartRecordingToSink(restClient.Events("")) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) }
func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Duration, caCertFile, caKeyFile string, approveAllKubeletCSRsForGroup string) (*CertificateController, error) { // Send events to the apiserver eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) // Configure cfssl signer // TODO: support non-default policy and remote/pkcs11 signing policy := &config.Signing{ Default: config.DefaultConfig(), } ca, err := local.NewSignerFromFile(caCertFile, caKeyFile, policy) if err != nil { return nil, err } cc := &CertificateController{ kubeClient: kubeClient, queue: workqueue.New(), signer: ca, approveAllKubeletCSRsForGroup: approveAllKubeletCSRsForGroup, } // Manage the addition/update of certificate requests cc.csrStore.Store, cc.csrController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return cc.kubeClient.Certificates().CertificateSigningRequests().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options) }, }, &certificates.CertificateSigningRequest{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { csr := obj.(*certificates.CertificateSigningRequest) glog.V(4).Infof("Adding certificate request %s", csr.Name) cc.enqueueCertificateRequest(obj) }, UpdateFunc: func(old, new interface{}) { oldCSR := old.(*certificates.CertificateSigningRequest) glog.V(4).Infof("Updating certificate request %s", oldCSR.Name) cc.enqueueCertificateRequest(new) }, DeleteFunc: func(obj interface{}) { csr := obj.(*certificates.CertificateSigningRequest) glog.V(4).Infof("Deleting certificate request %s", csr.Name) cc.enqueueCertificateRequest(obj) }, }, ) cc.syncHandler = cc.maybeSignCertificate return cc, nil }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) config := HollowNodeConfig{} config.addFlags(pflag.CommandLine) util.InitFlags() if !knownMorphs.Has(config.Morph) { glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. cl, err := createClientFromFile(config.KubeconfigPath) clientset := clientset.FromUnversionedClient(cl) if err != nil { glog.Fatal("Failed to create a Client. Exiting.") } if config.Morph == "kubelet" { cadvisorInterface := new(cadvisortest.Fake) containerManager := cm.NewStubContainerManager() fakeDockerClient := dockertools.NewFakeDockerClient() fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"} fakeDockerClient.EnableSleep = true hollowKubelet := kubemark.NewHollowKubelet( config.NodeName, clientset, cadvisorInterface, fakeDockerClient, config.KubeletPort, config.KubeletReadOnlyPort, containerManager, maxPods, ) hollowKubelet.Run() } if config.Morph == "proxy" { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName}) iptInterface := fakeiptables.NewFake() serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } }
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux, podsWatcher *cache.ListWatch) *PluginConfig { // Watch and queue pods that need scheduling. updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog) podUpdates := &podStoreAdapter{queue.NewHistorical(updates)} reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0) // lock that guards critial sections that involve transferring pods from // the store (cache) to the scheduling queue; its purpose is to maintain // an ordering (vs interleaving) of operations that's easier to reason about. kapi := &k8smScheduler{internal: k} q := newQueuer(podUpdates) podDeleter := &deleter{ api: kapi, qr: q, } eh := &errorHandler{ api: kapi, backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration), qr: q, } startLatch := make(chan struct{}) eventBroadcaster := record.NewBroadcaster() runtime.On(startLatch, func() { eventBroadcaster.StartRecordingToSink(k.client.Events("")) reflector.Run() // TODO(jdef) should listen for termination podDeleter.Run(updates, terminate) q.Run(terminate) q.installDebugHandlers(mux) podtask.InstallDebugHandlers(k.taskRegistry, mux) }) return &PluginConfig{ Config: &plugin.Config{ MinionLister: nil, Algorithm: &kubeScheduler{ api: kapi, podUpdates: podUpdates, defaultContainerCPULimit: k.defaultContainerCPULimit, defaultContainerMemLimit: k.defaultContainerMemLimit, }, Binder: &binder{api: kapi}, NextPod: q.yield, Error: eh.handleSchedulingError, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}), }, api: kapi, client: k.client, qr: q, deleter: podDeleter, starting: startLatch, } }
func NewHorizontalController(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(client.Events("")) recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"}) return &HorizontalController{ client: client, metricsClient: metricsClient, eventRecorder: recorder, } }
func New(client client.Interface) *DeploymentController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(client.Events("")) return &DeploymentController{ client: client, expClient: client.Extensions(), eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}), } }