// TODO(jdef): hacked from kubelet/server/server.go // TODO(k8s): replace this with clientcmd func (s *SchedulerServer) createAPIServerClient() (*client.Client, error) { authInfo, err := clientauth.LoadFromFile(s.AuthPath) if err != nil { log.Warningf("Could not load kubernetes auth path: %v. Continuing with defaults.", err) } if authInfo == nil { // authInfo didn't load correctly - continue with defaults. authInfo = &clientauth.Info{} } clientConfig, err := authInfo.MergeWithConfig(client.Config{}) if err != nil { return nil, err } if len(s.APIServerList) < 1 { return nil, fmt.Errorf("no api servers specified") } // TODO: adapt Kube client to support LB over several servers if len(s.APIServerList) > 1 { log.Infof("Multiple api servers specified. Picking first one") } clientConfig.Host = s.APIServerList[0] c, err := client.New(&clientConfig) if err != nil { return nil, err } return c, nil }
func main() { clientConfig := kubectl_util.DefaultClientConfig(flags) flags.Parse(os.Args) cfg := parseCfg(*config, *lbDefAlgorithm) if len(*tcpServices) == 0 { glog.Infof("All tcp/https services will be ignored.") } var kubeClient *unversioned.Client var err error defErrorPage := newStaticPageHandler(*errorPage, defaultErrorPage) if defErrorPage == nil { glog.Fatalf("Failed to load the default error page") } go registerHandlers(defErrorPage) proc.StartReaper() if *startSyslog { cfg.startSyslog = true _, err = newSyslogServer("/var/run/haproxy.log.socket") if err != nil { glog.Fatalf("Failed to start syslog server: %v", err) } } if *cluster { if kubeClient, err = unversioned.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = unversioned.New(config) } namespace, specified, err := clientConfig.Namespace() if err != nil { glog.Fatalf("unexpected error: %v", err) } if !specified { namespace = "default" } // TODO: Handle multiple namespaces lbc := newLoadBalancerController(cfg, kubeClient, namespace) go lbc.epController.Run(util.NeverStop) go lbc.svcController.Run(util.NeverStop) if *dry { dryRun(lbc) } else { lbc.cfg.reload() util.Until(lbc.worker, time.Second, util.NeverStop) } }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 50.0 kubeconfig.Burst = 100 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst)) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
func main() { flag.Usage = usage flag.Parse() var ( cfg *kclient.Config err error ) if *local { cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)} } else { cfg, err = kclient.InClusterConfig() if err != nil { glog.Errorf("failed to load config: %v", err) flag.Usage() os.Exit(1) } } client, err = kclient.New(cfg) selector, err := labels.Parse(*userLabels) if err != nil { glog.Fatal(err) } tc, err := parseTimeCounts(*times, *counts) if err != nil { glog.Fatal(err) } if namespace == "" { glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.") } scaler := scaler{timeCounts: tc, selector: selector} if err != nil { glog.Fatal(err) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM) glog.Info("starting scaling") if err := scaler.Start(); err != nil { glog.Fatal(err) } <-sigChan glog.Info("stopping scaling") if err := scaler.Stop(); err != nil { glog.Fatal(err) } }
// ClientForVersion initializes or reuses a client for the specified version, or returns an // error if that is not possible func (c *ClientCache) ClientForVersion(version string) (*client.Client, error) { if client, ok := c.clients[version]; ok { return client, nil } config, err := c.ClientConfigForVersion(version) if err != nil { return nil, err } client, err := client.New(config) if err != nil { return nil, err } c.clients[config.Version] = client return client, nil }
// TODO: evaluate using pkg/client/clientcmd func newKubeClient() (*kclient.Client, error) { var ( config *kclient.Config err error masterURL string ) // If the user specified --kube_master_url, expand env vars and verify it. if *argKubeMasterURL != "" { masterURL, err = expandKubeMasterURL() if err != nil { return nil, err } } if masterURL != "" && *argKubecfgFile == "" { // Only --kube_master_url was provided. config = &kclient.Config{ Host: masterURL, Version: "v1", } } else { // We either have: // 1) --kube_master_url and --kubecfg_file // 2) just --kubecfg_file // 3) neither flag // In any case, the logic is the same. If (3), this will automatically // fall back on the service account token. overrides := &kclientcmd.ConfigOverrides{} overrides.ClusterInfo.Server = masterURL // might be "", but that is OK rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: *argKubecfgFile} // might be "", but that is OK if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig(); err != nil { return nil, err } } glog.Infof("Using %s for kubernetes master", config.Host) glog.Infof("Using kubernetes API %s", config.Version) return kclient.New(config) }
func loadData() { config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{}, &clientcmd.ConfigOverrides{}) clientConfig, err := config.ClientConfig() if err != nil { fmt.Printf("Error creating client config: %v", err) return } c, err := client.New(clientConfig) if err != nil { fmt.Printf("Error creating client: %v", err) return } pods, err := c.Pods("default").List(labels.Everything(), fields.Everything()) if err != nil { fmt.Printf("Error getting pods: %v", err) return } loadbots := []*api.Pod{} for ix := range pods.Items { pod := &pods.Items[ix] if pod.Status.PodIP == "" { continue } loadbots = append(loadbots, pod) } parts := []vegeta.Metrics{} lock := sync.Mutex{} wg := sync.WaitGroup{} wg.Add(len(loadbots)) for ix := range loadbots { go func(ix int) { defer wg.Done() pod := loadbots[ix] var data []byte if *useIP { url := "http://" + pod.Status.PodIP + ":8080/" resp, err := http.Get(url) if err != nil { fmt.Printf("Error getting: %v\n", err) return } defer resp.Body.Close() if data, err = ioutil.ReadAll(resp.Body); err != nil { fmt.Printf("Error reading: %v\n", err) return } } else { var err error data, err = c.RESTClient.Get().AbsPath("/api/v1/proxy/namespaces/default/pods/" + pod.Name + ":8080/").DoRaw() if err != nil { fmt.Printf("Error proxying to pod: %v\n", err) return } } var metrics vegeta.Metrics if err := json.Unmarshal(data, &metrics); err != nil { fmt.Printf("Error decoding: %v\n", err) return } lock.Lock() defer lock.Unlock() parts = append(parts, metrics) }(ix) } wg.Wait() data, err := json.Marshal(parts) if err != nil { fmt.Printf("Error marshaling: %v", err) } setData(data) fmt.Printf("Updated.\n") }
// Run runs the CMServer. This should never exit. func (s *CMServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 20.0 kubeconfig.Burst = 30 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() go endpointcontroller.NewEndpointController(kubeClient, s.resyncPeriod). Run(s.ConcurrentEndpointSyncs, util.NeverStop) go replicationControllerPkg.NewReplicationManager(kubeClient, s.resyncPeriod, replicationControllerPkg.BurstReplicas). Run(s.ConcurrentRCSyncs, util.NeverStop) if s.TerminatedPodGCThreshold > 0 { go gc.New(kubeClient, s.resyncPeriod, s.TerminatedPodGCThreshold). Run(util.NeverStop) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName) if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { if cloud == nil { glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.") } else if routes, ok := cloud.Routes(); !ok { glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.") } else { routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR) routeController.Run(s.NodeSyncPeriod) } } resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. var versionStrings []string err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { if versionStrings, err = client.ServerAPIVersions(kubeconfig); err == nil { return true, nil } glog.Errorf("Failed to get api versions from server: %v", err) return false, nil }) if err != nil { glog.Fatalf("Failed to get api versions from server: %v", err) } versions := &unversioned.APIVersions{Versions: versionStrings} resourceMap, err := kubeClient.Discovery().ServerResources() if err != nil { glog.Fatalf("Failed to get supported resources from server: %v", err) } namespacecontroller.NewNamespaceController(kubeClient, versions, s.NamespaceSyncPeriod).Run() groupVersion := "extensions/v1beta1" resources, found := resourceMap[groupVersion] // TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver if containsVersion(versions, groupVersion) && found { glog.Infof("Starting %s apis", groupVersion) if containsResource(resources, "horizontalpodautoscalers") { glog.Infof("Starting horizontal pod controller.") podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)). Run(s.HorizontalPodAutoscalerSyncPeriod) } if containsResource(resources, "daemonsets") { glog.Infof("Starting daemon set controller") go daemon.NewDaemonSetsController(kubeClient, s.resyncPeriod). Run(s.ConcurrentDSCSyncs, util.NeverStop) } if containsResource(resources, "jobs") { glog.Infof("Starting job controller") go job.NewJobController(kubeClient, s.resyncPeriod). Run(s.ConcurrentJobSyncs, util.NeverStop) } if containsResource(resources, "deployments") { glog.Infof("Starting deployment controller") deployment.New(kubeClient). Run(s.DeploymentControllerSyncPeriod) } } pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() var rootCA []byte if s.RootCAFile != "" { rootCA, err = ioutil.ReadFile(s.RootCAFile) if err != nil { return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err) } if _, err := util.CertsFromPEM(rootCA); err != nil { return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) } } else { rootCA = kubeconfig.CAData } if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( kubeClient, serviceaccount.TokensControllerOptions{ TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), RootCA: rootCA, }, ).Run() } } serviceaccount.NewServiceAccountsController( kubeClient, serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} }
// main function for GLBC. func main() { // TODO: Add a healthz endpoint var kubeClient *client.Client var err error var clusterManager *ClusterManager flags.Parse(os.Args) clientConfig := kubectl_util.DefaultClientConfig(flags) if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend") } if *proxyUrl != "" { // Create proxy kubeclient kubeClient = client.NewOrDie(&client.Config{ Host: *proxyUrl, Version: "v1"}) } else { // Create kubeclient if *inCluster { if kubeClient, err = client.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v.", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = client.New(config) } } // Wait for the default backend Service. There's no pretty way to do this. parts := strings.Split(*defaultSvc, "/") if len(parts) != 2 { glog.Fatalf("Default backend should take the form namespace/name: %v", *defaultSvc) } defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1]) if err != nil { glog.Fatalf("Could not configure default backend %v: %v", *defaultSvc, err) } if *proxyUrl == "" && *inCluster { // Create cluster manager clusterManager, err = NewClusterManager( *clusterName, defaultBackendNodePort, *healthCheckPath) if err != nil { glog.Fatalf("%v", err) } } else { // Create fake cluster manager clusterManager = newFakeClusterManager(*clusterName).ClusterManager } // Start loadbalancer controller lbc, err := NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace) if err != nil { glog.Fatalf("%v", err) } glog.Infof("Created lbc %+v", clusterManager.ClusterName) go registerHandlers(lbc) go handleSigterm(lbc, *deleteAllOnQuit) lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion.") time.Sleep(30 * time.Second) } }
// Run runs the specified KubeletServer for the given KubeletConfig. This should never exit. // The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer. // Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults // will be ignored. func (s *KubeletServer) Run(kcfg *KubeletConfig) error { if kcfg == nil { cfg, err := s.KubeletConfig() if err != nil { return err } kcfg = cfg clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { kcfg.KubeClient, err = client.New(clientConfig) } if err != nil && len(s.APIServerList) > 0 { glog.Warningf("No API client: %v", err) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { return err } glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) kcfg.Cloud = cloud } if kcfg.CAdvisorInterface == nil { ca, err := cadvisor.New(s.CAdvisorPort) if err != nil { return err } kcfg.CAdvisorInterface = ca } util.ReallyCrash = s.ReallyCrashForTesting rand.Seed(time.Now().UTC().UnixNano()) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) glog.V(2).Infof("Using root directory: %v", s.RootDirectory) // TODO(vmarmol): Do this through container config. oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.Warning(err) } if err := RunKubelet(kcfg, nil); err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } if s.RunOnce { return nil } // run forever select {} }
func (s *CMServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 20.0 kubeconfig.Burst = 30 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { profile.InstallHandler(mux) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() endpoints := s.createEndpointController(kubeClient) go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop) go replicationcontroller.NewReplicationManager(kubeClient, s.resyncPeriod, replicationcontroller.BurstReplicas). Run(s.ConcurrentRCSyncs, util.NeverStop) go daemon.NewDaemonSetsController(kubeClient, s.resyncPeriod). Run(s.ConcurrentDSCSyncs, util.NeverStop) //TODO(jdef) should eventually support more cloud providers here if s.CloudProvider != mesos.ProviderName { glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName) if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { routes, ok := cloud.Routes() if !ok { glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set") } routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR)) routeController.Run(s.NodeSyncPeriod) } resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient) resourceQuotaController.Run(s.ResourceQuotaSyncPeriod) namespaceController := namespacecontroller.NewNamespaceController(kubeClient, &unversioned.APIVersions{}, s.NamespaceSyncPeriod) namespaceController.Run() pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() var rootCA []byte if s.RootCAFile != "" { rootCA, err = ioutil.ReadFile(s.RootCAFile) if err != nil { return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err) } if _, err := util.CertsFromPEM(rootCA); err != nil { return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) } } else { rootCA = kubeconfig.CAData } if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( kubeClient, serviceaccount.TokensControllerOptions{ TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), RootCA: rootCA, }, ).Run() } } serviceaccount.NewServiceAccountsController( kubeClient, serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} }
// Run runs the specified APIServer. This should never exit. func (s *APIServer) Run(_ []string) error { s.verifyClusterIPFlags() // If advertise-address is not specified, use bind-address. If bind-address // is not usable (unset, 0.0.0.0, or loopback), setDefaults() in // pkg/master/master.go will do the right thing and use the host's default // interface. if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { s.AdvertiseAddress = s.BindAddress } if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { glog.Fatalf("specify either --etcd-servers or --etcd-config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } // Setup tunneler if needed var tunneler master.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH master.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } // Set up the tunneler tunneler = master.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } apiGroupVersionOverrides, err := s.parseRuntimeConfig() if err != nil { glog.Fatalf("error in parsing runtime-config: %s", err) } clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), Version: s.DeprecatedStorageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } legacyV1Group, err := latest.Group("") if err != nil { return err } storageDestinations := master.NewStorageDestinations() storageVersions := generateStorageVersionMap(s.DeprecatedStorageVersion, s.StorageVersions) if _, found := storageVersions[legacyV1Group.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", legacyV1Group.Group, storageVersions) } etcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, legacyV1Group.InterfacesFor, storageVersions[legacyV1Group.Group], s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup("", etcdStorage) if !apiGroupVersionOverrides["extensions/v1beta1"].Disable { expGroup, err := latest.Group("extensions") if err != nil { glog.Fatalf("Extensions API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) } if _, found := storageVersions[expGroup.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", expGroup.Group, storageVersions) } expEtcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, expGroup.InterfacesFor, storageVersions[expGroup.Group], s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid extensions storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup("extensions", expEtcdStorage) } updateEtcdOverrides(s.EtcdServersOverrides, storageVersions, s.EtcdPathPrefix, &storageDestinations, newEtcd) n := s.ServiceClusterIPRange // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if apiserver.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("no RSA key provided, service account token authentication disabled") } } authenticator, err := apiserver.NewAuthenticator(apiserver.AuthenticatorConfig{ BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, Storage: etcdStorage, KeystoneURL: s.KeystoneURL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("gce cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } config := &master.Config{ StorageDestinations: storageDestinations, StorageVersions: storageVersions, EventTTL: s.EventTTL, KubeletClient: kubeletClient, ServiceClusterIPRange: &n, EnableCoreControllers: true, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, EnableWatchCache: s.EnableWatchCache, EnableIndex: true, APIPrefix: s.APIPrefix, APIGroupPrefix: s.APIGroupPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: s.AdvertiseAddress, Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, APIGroupVersionOverrides: apiGroupVersionOverrides, MasterServiceNamespace: s.MasterServiceNamespace, ClusterName: s.ClusterName, ExternalHost: s.ExternalHost, MinRequestTimeout: s.MinRequestTimeout, ProxyDialer: proxyDialerFn, ProxyTLSClientConfig: proxyTLSClientConfig, Tunneler: tunneler, ServiceNodePortRange: s.ServiceNodePortRange, } m := master.New(config) // We serve on 2 ports. See docs/accessing_the_api.md secureLocation := "" if s.SecurePort != 0 { secureLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.SecurePort)) } insecureLocation := net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. var sem chan bool if s.MaxRequestsInFlight > 0 { sem = make(chan bool, s.MaxRequestsInFlight) } longRunningRE := regexp.MustCompile(s.LongRunningRequestRE) longRunningTimeout := func(req *http.Request) (<-chan time.Time, string) { // TODO unify this with apiserver.MaxInFlightLimit if longRunningRE.MatchString(req.URL.Path) || req.URL.Query().Get("watch") == "true" { return nil, "" } return time.After(time.Minute), "" } if secureLocation != "" { handler := apiserver.TimeoutHandler(m.Handler, longRunningTimeout) secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(handler)), MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, }, } if len(s.ClientCAFile) > 0 { clientCAs, err := util.CertPoolFromFile(s.ClientCAFile) if err != nil { glog.Fatalf("unable to load client CA file: %v", err) } // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types secureServer.TLSConfig.ClientAuth = tls.RequestClientCert // Specify allowed CAs for client certificates secureServer.TLSConfig.ClientCAs = clientCAs } glog.Infof("Serving securely on %s", secureLocation) if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") // TODO (cjcullen): Is PublicAddress the right address to sign a cert with? alternateIPs := []net.IP{config.ServiceReadWriteIP} alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"} // It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless // alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME") if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) } } go func() { defer util.HandleCrash() for { // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) } if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } handler := apiserver.TimeoutHandler(m.InsecureHandler, longRunningTimeout) http := &http.Server{ Addr: insecureLocation, Handler: apiserver.RecoverPanics(handler), MaxHeaderBytes: 1 << 20, } if secureLocation == "" { // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) } } glog.Infof("Serving insecurely on %s", insecureLocation) glog.Fatal(http.ListenAndServe()) return nil }
// Run runs the specified KubeletExecutorServer. func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } // empty string for the docker and system containers (= cgroup paths). This // stops the kubelet taking any control over other system processes. s.SystemContainer = "" s.DockerDaemonContainer = "" // create apiserver client var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) shutdownCloser, err := s.syncExternalShutdownWatcher() if err != nil { return err } cAdvisorInterface, err := cadvisor.New(s.CAdvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the kubelet doesn't actually use it //(b) we don't need to create N-kubelet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var writer utilio.Writer = &utilio.StdWriter{} var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": writer = &utilio.NsenterWriter{} dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := app.KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, // ConfigFile: "" // ManifestURL: "" FileCheckFrequency: s.FileCheckFrequency, // HTTPCheckFrequency PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, // StandaloneMode: false ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CAdvisorInterface: cAdvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), KubeClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), NetworkPlugins: app.ProbeNetworkPlugins(s.NetworkPluginDir), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.CgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, ResolverConfig: s.ResolverConfig, CPUCFSQuota: s.CPUCFSQuota, Writer: writer, MaxOpenFiles: s.MaxOpenFiles, } kcfg.NodeName = kcfg.Hostname err = app.RunKubelet(&kcfg, app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitKubelet(kc, hks, clientConfig, shutdownCloser) })) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // block until executor is shut down or commits shutdown select {} }
func main() { flag.Parse() glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string if *gke != "" { spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig") } else { spec = filepath.Join(os.Getenv("HOME"), ".kube", "config") } settings, err := clientcmd.LoadFromFile(spec) if err != nil { glog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { glog.Fatalf("Failed to construct config: %v", err) } c, err := client.New(config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { nodes, err = c.Nodes().List(labels.Everything(), fields.Everything()) if err == nil { break } glog.Warningf("Failed to list nodes: %v", err) } if err != nil { glog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { glog.Fatalf("Failed to find any nodes.") } glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { glog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { glog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := c.Namespaces().Delete(ns); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { if _, err := c.Namespaces().Get(ns); err != nil { if errors.IsNotFound(err) { return } } time.Sleep(time.Second) } } }(ns) glog.Infof("Created namespace %s", ns) // Create a service for these pods. glog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() svc, err = c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Protocol: "TCP", Port: 9376, TargetPort: util.NewIntOrStringFromInt(9376), }}, Selector: map[string]string{ "name": "serve-hostname", }, }, }) glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { glog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := c.Services(ns).Delete(svc.Name); err == nil { return } glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() // Put serve-hostname pods on each node. podNames := []string{} for i, node := range nodes.Items { for j := 0; j < *podsPerNode; j++ { podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "serve-hostname", Image: "gcr.io/google_containers/serve_hostname:1.1", Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, NodeName: node.Name, }, }) glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { glog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = c.Pods(ns).Delete(podName, nil); err == nil { break } glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() glog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = c.Pods(ns).Get(podName) if err != nil { glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == api.PodRunning { break } } if pod.Status.Phase != api.PodRunning { glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { glog.Infof("%s/%s is running", ns, podName) } } // Wait for the endpoints to propagate. for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) { hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() if err != nil { glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r unversioned.Status if err := api.Scheme.DecodeInto(hostname, &r); err != nil { break } if r.Status == unversioned.StatusFailure { glog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break } // Repeatedly make requests. for iteration := 0; iteration != *upTo; iteration++ { responseChan := make(chan string, queries) // Use a channel of size *maxPar to throttle the number // of in-flight requests to avoid overloading the service. inFlight := make(chan struct{}, *maxPar) start := time.Now() for q := 0; q < queries; q++ { go func(i int, query int) { inFlight <- struct{}{} t := time.Now() hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) } else { responseChan <- string(hostname) } <-inFlight }(iteration, q) } responses := make(map[string]int, *podsPerNode*len(nodes.Items)) missing := 0 for q := 0; q < queries; q++ { r := <-responseChan glog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { glog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { glog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } }
// NewProxyServerDefault creates a new ProxyServer object with default parameters. func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) { protocol := utiliptables.ProtocolIpv4 if config.BindAddress.To4() == nil { protocol = utiliptables.ProtocolIpv6 } // We ommit creation of pretty much everything if we run in cleanup mode if config.CleanupAndExit { execer := exec.New() dbus := utildbus.New() IptInterface := utiliptables.New(execer, dbus, protocol) return &ProxyServer{ Config: config, IptInterface: IptInterface, }, nil } // TODO(vmarmol): Use container config for this. var oomAdjuster *oom.OomAdjuster if config.OOMScoreAdj != 0 { oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, config.OOMScoreAdj); err != nil { glog.V(2).Info(err) } } if config.ResourceContainer != "" { // Run in its own container. if err := util.RunInResourceContainer(config.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer) } } // Create a Kube Client // define api config source if config.Kubeconfig == "" && config.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig() if err != nil { return nil, err } client, err := kubeclient.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } // Create event recorder hostname := nodeutil.GetHostname(config.HostnameOverride) eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname}) eventBroadcaster.StartRecordingToSink(client.Events("")) // Create a iptables utils. execer := exec.New() dbus := utildbus.New() iptInterface := utiliptables.New(execer, dbus, protocol) var proxier proxy.ProxyProvider var endpointsHandler proxyconfig.EndpointsConfigHandler useIptablesProxy := false if mayTryIptablesProxy(config.ProxyMode, client.Nodes(), hostname) { var err error // guaranteed false on error, error only necessary for debugging useIptablesProxy, err = iptables.ShouldUseIptablesProxier() if err != nil { glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err) } } if useIptablesProxy { glog.V(2).Info("Using iptables Proxier.") execer := exec.New() proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.SyncPeriod, config.MasqueradeAll) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierIptables endpointsHandler = proxierIptables // No turning back. Remove artifacts that might still exist from the userspace Proxier. glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.") userspace.CleanupLeftovers(iptInterface) } else { glog.V(2).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier(loadBalancer, config.BindAddress, iptInterface, config.PortRange, config.SyncPeriod, config.UDPIdleTimeout) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.") iptables.CleanupLeftovers(iptInterface) } iptInterface.AddReloadFunc(proxier.Sync) // Create configs (i.e. Watches for Services and Endpoints) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(proxier) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(endpointsHandler) proxyconfig.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) config.nodeRef = &api.ObjectReference{ Kind: "Node", Name: hostname, UID: types.UID(hostname), Namespace: "", } return NewProxyServer(config, client, endpointsConfig, endpointsHandler, iptInterface, oomAdjuster, proxier, recorder, serviceConfig) }