func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := configFactory.Create() if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if hadoopConfDir == nil || *hadoopConfDir == "" { glog.Fatalf("HADOOP_CONF_DIR not set!") } os.Setenv("HADOOP_CONF_DIR", *hadoopConfDir) kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(etcdServerList) == 0 { glog.Fatalf("-etcd_servers flag is required.") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := initCloudProvider(*cloudProvider, *cloudConfigFile) podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) if err != nil { glog.Fatalf("Invalid server address: %v", err) } m := master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) storage, codec := m.API_v1beta1() handler := apiserver.Handle(storage, codec, *apiPrefix) if len(corsAllowedOriginList) > 0 { allowedOriginRegexps, err := util.CompileRegexps(corsAllowedOriginList) if err != nil { glog.Fatalf("Invalid CORS allowed origin, --cors_allowed_origins flag was set to %v - %v", strings.Join(corsAllowedOriginList, ","), err) } handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true") } s := &http.Server{ Addr: net.JoinHostPort(*address, strconv.Itoa(int(*port))), Handler: apiserver.RecoverPanics(handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Fatal(s.ListenAndServe()) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(etcdServerList) == 0 { glog.Fatalf("-etcd_servers flag is required.") } cloud, err := cloudprovider.GetCloudProvider(*cloudProvider) if err != nil { glog.Fatalf("Couldn't init cloud provider %q: %#v", *cloudProvider, err) } if cloud == nil { if len(*cloudProvider) > 0 { glog.Fatalf("Unknown cloud provider: %s", *cloudProvider) } else { glog.Info("No cloud provider specified.") } } podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) if err != nil { glog.Fatalf("Invalid server address: %v", err) } m := master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) storage, codec := m.API_v1beta1() s := &http.Server{ Addr: net.JoinHostPort(*address, strconv.Itoa(int(*port))), Handler: apiserver.Handle(storage, codec, *apiPrefix), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Fatal(s.ListenAndServe()) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() // define api config source if *master != "" { glog.Infof("Using api calls to get config %v", *master) //TODO: add auth info client, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } // Create a configuration source that handles configuration from etcd. if len(etcdServerList) > 0 && *master == "" { glog.Infof("Using etcd servers %v", etcdServerList) // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) etcdClient := etcd.NewClient(etcdServerList) config.NewConfigSourceEtcd(etcdClient, serviceConfig.Channel("etcd"), endpointsConfig.Channel("etcd")) } // And create a configuration source that reads from a local file config.NewConfigSourceFile(*configFile, serviceConfig.Channel("file"), endpointsConfig.Channel("file")) glog.Infof("Using configuration file %s", *configFile) loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer, *bindAddress) // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Just loop forever for now... select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewSchedulerServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() s.Run(pflag.CommandLine.Args()) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) verflag.PrintAndExitIfRequested() setupRunOnce() if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil { glog.Info(err) } client, err := standalone.GetAPIServerClient(*authPath, apiServerList) if err != nil && len(apiServerList) > 0 { glog.Warningf("No API client: %v", err) } kcfg := standalone.KubeletConfig{ Address: address, AllowPrivileged: *allowPrivileged, HostnameOverride: *hostnameOverride, RootDirectory: *rootDirectory, ConfigFile: *config, ManifestURL: *manifestURL, FileCheckFrequency: *fileCheckFrequency, HttpCheckFrequency: *httpCheckFrequency, NetworkContainerImage: *networkContainerImage, SyncFrequency: *syncFrequency, RegistryPullQPS: *registryPullQPS, RegistryBurst: *registryBurst, MinimumGCAge: *minimumGCAge, MaxContainerCount: *maxContainerCount, ClusterDomain: *clusterDomain, ClusterDNS: clusterDNS, Runonce: *runonce, Port: *port, CAdvisorPort: *cAdvisorPort, EnableServer: *enableServer, EnableDebuggingHandlers: *enableDebuggingHandlers, DockerClient: util.ConnectToDockerOrDie(*dockerEndpoint), KubeClient: client, EtcdClient: kubelet.EtcdClientOrDie(etcdServerList, *etcdConfigFile), MasterServiceNamespace: *masterServiceNamespace, } standalone.RunKubelet(&kcfg) // runs forever select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(clientConfig.Host) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } if int64(int(*nodeMilliCPU)) != *nodeMilliCPU { glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d", *nodeMilliCPU, math.MaxInt32) *nodeMilliCPU = math.MaxInt32 } if int64(int(*nodeMemory)) != *nodeMemory { glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d", *nodeMemory, math.MaxInt32) *nodeMemory = math.MaxInt32 } go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) endpoints := service.NewEndpointController(kubeClient) go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10) controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) nodeResources := &api.NodeResources{ Capacity: api.ResourceList{ resources.CPU: util.NewIntOrStringFromInt(int(*nodeMilliCPU)), resources.Memory: util.NewIntOrStringFromInt(int(*nodeMemory)), }, } minionController := minionControllerPkg.NewMinionController(cloud, *minionRegexp, machineList, nodeResources, kubeClient) minionController.Run(10 * time.Second) select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewCMServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprint(os.Stderr, err.Error) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UTC().UnixNano()) s := app.NewAPIServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if len(*master) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } controllerManager := controller.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() // TODO: security story for plugins! kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } configFactory := &factory.ConfigFactory{Client: kubeClient} config := configFactory.Create() s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if len(*master) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } go http.ListenAndServe(net.JoinHostPort(*address, strconv.Itoa(*port)), nil) controllerManager := controller.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() // TODO: security story for plugins! kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } go http.ListenAndServe(net.JoinHostPort(*address, strconv.Itoa(*port)), nil) configFactory := &factory.ConfigFactory{Client: kubeClient} config := configFactory.Create() s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(clientConfig.Host) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) endpoints := service.NewEndpointController(kubeClient) go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10) controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) nodeResources := &api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(*nodeMilliCPU, resource.DecimalSI), api.ResourceMemory: *nodeMemory, }, } nodeController := nodeControllerPkg.NewNodeController(cloud, *minionRegexp, machineList, nodeResources, kubeClient) nodeController.Run(10 * time.Second) select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), "scheduler") go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := &factory.ConfigFactory{Client: kubeClient} config := configFactory.Create() config.MaxRetryTimes = *maxRetryTimes s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyPortalFlags() if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) { glog.Fatalf("specify either -etcd_servers or -etcd_config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) kubeletClient, err := client.NewKubeletClient(&kubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } _, v1beta3 := runtimeConfig["api/v1beta3"] // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))), Version: *storageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(*etcdConfigFile, etcdServerList) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } n := net.IPNet(portalNet) authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(*admissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, *admissionControlConfigFile) config := &master.Config{ Client: client, Cloud: cloud, EtcdHelper: helper, HealthCheckMinions: *healthCheckMinions, EventTTL: *eventTTL, KubeletClient: kubeletClient, PortalNet: &n, EnableLogsSupport: *enableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, APIPrefix: *apiPrefix, CorsAllowedOriginList: corsAllowedOriginList, ReadOnlyPort: *readOnlyPort, ReadWritePort: *port, PublicAddress: *publicAddressOverride, Authenticator: authenticator, Authorizer: authorizer, AdmissionControl: admissionController, EnableV1Beta3: v1beta3, MasterServiceNamespace: *masterServiceNamespace, } m := master.New(config) // We serve on 3 ports. See docs/reaching_the_api.md roLocation := "" if *readOnlyPort != 0 { roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort)) } secureLocation := "" if *securePort != 0 { secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort)) } rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port))) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. if roLocation != "" { // Allow 1 read-only request per second, allow up to 20 in a burst before enforcing. rl := util.NewTokenBucketRateLimiter(1.0, 20) readOnlyServer := &http.Server{ Addr: roLocation, Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving read-only insecurely on %s", roLocation) go func() { defer util.HandleCrash() for { if err := readOnlyServer.ListenAndServe(); err != nil { glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.RecoverPanics(m.Handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types ClientAuth: tls.RequestClientCert, }, } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if *tlsCertFile == "" && *tlsPrivateKeyFile == "" { *tlsCertFile = "/var/run/kubernetes/apiserver.crt" *tlsPrivateKeyFile = "/var/run/kubernetes/apiserver.key" if err := util.GenerateSelfSignedCert(config.PublicAddress, *tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", *tlsCertFile, *tlsPrivateKeyFile) } } if err := secureServer.ListenAndServeTLS(*tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } s := &http.Server{ Addr: rwLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", rwLocation) glog.Fatal(s.ListenAndServe()) }
func main() { util.InitFlags() util.InitLogs() util.ReallyCrash = *reallyCrashForTesting defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) verflag.PrintAndExitIfRequested() // Cluster creation scripts support both kubernetes versions that 1) support kublet watching // apiserver for pods, and 2) ones that don't. So they ca set both --etcd_servers and // --api_servers. The current code will ignore the --etcd_servers flag, while older kubelet // code will use the --etd_servers flag for pods, and use --api_servers for event publising. // // TODO(erictune): convert all cloud provider scripts and Google Container Engine to // use only --api_servers, then delete --etcd_servers flag and the resulting dead code. if len(etcdServerList) > 0 && len(apiServerList) > 0 { glog.Infof("Both --etcd_servers and --api_servers are set. Not using etcd source.") etcdServerList = util.StringList{} } setupRunOnce() if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil { glog.Info(err) } client, err := standalone.GetAPIServerClient(*authPath, apiServerList) if err != nil && len(apiServerList) > 0 { glog.Warningf("No API client: %v", err) } kcfg := standalone.KubeletConfig{ Address: address, AllowPrivileged: *allowPrivileged, HostnameOverride: *hostnameOverride, RootDirectory: *rootDirectory, ConfigFile: *config, ManifestURL: *manifestURL, FileCheckFrequency: *fileCheckFrequency, HttpCheckFrequency: *httpCheckFrequency, NetworkContainerImage: *networkContainerImage, SyncFrequency: *syncFrequency, RegistryPullQPS: *registryPullQPS, RegistryBurst: *registryBurst, MinimumGCAge: *minimumGCAge, MaxContainerCount: *maxContainerCount, ClusterDomain: *clusterDomain, ClusterDNS: clusterDNS, Runonce: *runonce, Port: *port, CAdvisorPort: *cAdvisorPort, EnableServer: *enableServer, EnableDebuggingHandlers: *enableDebuggingHandlers, DockerClient: util.ConnectToDockerOrDie(*dockerEndpoint), KubeClient: client, EtcdClient: kubelet.EtcdClientOrDie(etcdServerList, *etcdConfigFile), MasterServiceNamespace: *masterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), } standalone.RunKubelet(&kcfg) // runs forever select {} }
func main() { flag.Usage = func() { usage() } flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() // Initialize the client if clientConfig.Host == "" { clientConfig.Host = os.Getenv("KUBERNETES_MASTER") } // Load namespace information for requests // Check if the namespace was overriden by the -ns argument ctx := api.NewDefaultContext() if len(*ns) > 0 { ctx = api.WithNamespace(ctx, *ns) } else { nsInfo, err := kubecfg.LoadNamespaceInfo(*nsFile) if err != nil { glog.Fatalf("Error loading current namespace: %v", err) } ctx = api.WithNamespace(ctx, nsInfo.Namespace) } if clientConfig.Host == "" { // TODO: eventually apiserver should start on 443 and be secure by default // TODO: don't specify http or https in Host, and infer that from auth options. clientConfig.Host = "http://localhost:8080" } if client.IsConfigTransportTLS(*clientConfig) { auth, err := kubecfg.LoadClientAuthInfoOrPrompt(*authConfig, os.Stdin) if err != nil { glog.Fatalf("Error loading auth: %v", err) } clientConfig.Username = auth.User clientConfig.Password = auth.Password if auth.CAFile != "" { clientConfig.CAFile = auth.CAFile } if auth.CertFile != "" { clientConfig.CertFile = auth.CertFile } if auth.KeyFile != "" { clientConfig.KeyFile = auth.KeyFile } if auth.BearerToken != "" { clientConfig.BearerToken = auth.BearerToken } if auth.Insecure != nil { clientConfig.Insecure = *auth.Insecure } } kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Can't configure client: %v", err) } if *serverVersion != verflag.VersionFalse { got, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v\n", err) os.Exit(1) } if *serverVersion == verflag.VersionRaw { fmt.Printf("%#v\n", *got) os.Exit(0) } else { fmt.Printf("Server: Kubernetes %s\n", got) os.Exit(0) } } if *preventSkew { got, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v\n", err) os.Exit(1) } if c, s := version.Get(), *got; !reflect.DeepEqual(c, s) { fmt.Printf("Server version (%#v) differs from client version (%#v)!\n", s, c) os.Exit(1) } } if *proxy { glog.Info("Starting to serve on localhost:8001") if *openBrowser { go func() { time.Sleep(2 * time.Second) open.Start("http://localhost:8001/static/") }() } server, err := kubecfg.NewProxyServer(*www, clientConfig) if err != nil { glog.Fatalf("Error creating proxy server: %v", err) } glog.Fatal(server.Serve()) } if len(flag.Args()) < 1 { usage() os.Exit(1) } method := flag.Arg(0) matchFound := executeAPIRequest(ctx, method, kubeClient) || executeControllerRequest(ctx, method, kubeClient) || executeNamespaceRequest(method, kubeClient) if matchFound == false { glog.Fatalf("Unknown command %s", method) } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() // define api config source if clientConfig.Host != "" { glog.Infof("Using api calls to get config %v", clientConfig.Host) client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client.Services(api.NamespaceAll), client.Endpoints(api.NamespaceAll), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } else { var etcdClient *etcd.Client // Set up etcd client if len(etcdServerList) > 0 { // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) etcdClient = etcd.NewClient(etcdServerList) } else if *etcdConfigFile != "" { // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) var err error etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile) if err != nil { glog.Fatalf("Error with etcd config file: %v", err) } } // Create a configuration source that handles configuration from etcd. if etcdClient != nil { glog.Infof("Using etcd servers %v", etcdClient.GetCluster()) config.NewConfigSourceEtcd(etcdClient, serviceConfig.Channel("etcd"), endpointsConfig.Channel("etcd")) } } loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer, net.IP(bindAddress), iptables.New(exec.New())) // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Just loop forever for now... proxier.SyncLoop() }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) verflag.PrintAndExitIfRequested() etcd.SetLogger(util.NewLogger("etcd ")) dockerClient, err := docker.NewClient(getDockerEndpoint()) if err != nil { glog.Fatal("Couldn't connect to docker.") } cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194") if err != nil { glog.Errorf("Error on creating cadvisor client: %v", err) } hostname := getHostname() if *rootDirectory == "" { glog.Fatal("Invalid root directory path.") } *rootDirectory = path.Clean(*rootDirectory) os.MkdirAll(*rootDirectory, 0750) // source of all configuration cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates) // define file config source if *config != "" { kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file")) } // define url config source if *manifestURL != "" { kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http")) } // define etcd config source and initialize etcd client var etcdClient tools.EtcdClient if len(etcdServerList) > 0 { glog.Infof("Watching for etcd configs at %v", etcdServerList) etcdClient = etcd.NewClient(etcdServerList) kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd")) } // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations k := kubelet.NewMainKubelet( getHostname(), dockerClient, cadvisorClient, etcdClient, *rootDirectory, *syncFrequency, *allowPrivileged) health.AddHealthChecker("exec", health.NewExecHealthChecker(k)) health.AddHealthChecker("http", health.NewHTTPHealthChecker(&http.Client{})) health.AddHealthChecker("tcp", &health.TCPHealthChecker{}) // start the kubelet go util.Forever(func() { k.Run(cfg.Updates()) }, 0) // start the kubelet server if *enableServer { go util.Forever(func() { kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), *address, *port) }, 0) } // runs forever select {} }
// Run the server. This will pick the appropriate server and run it. func (hk *HyperKube) Run(args []string) error { // If we are called directly, parse all flags up to the first real // argument. That should be the server to run. baseCommand := path.Base(args[0]) serverName := baseCommand if serverName == hk.Name { args = args[1:] baseFlags := hk.Flags() baseFlags.SetInterspersed(false) // Only parse flags up to the next real command err := baseFlags.Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Println("Error:", err) } hk.Usage() return err } verflag.PrintAndExitIfRequested() args = baseFlags.Args() if len(args) > 0 && len(args[0]) > 0 { serverName = args[0] baseCommand = baseCommand + " " + serverName args = args[1:] } else { err = errors.New("No server specified") hk.Printf("Error: %v\n\n", err) hk.Usage() return err } } s, err := hk.FindServer(serverName) if err != nil { hk.Printf("Error: %v\n\n", err) hk.Usage() return err } util.AddPFlagSetToPFlagSet(hk.Flags(), s.Flags()) err = s.Flags().Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Printf("Error: %v\n\n", err) } s.Usage() return err } verflag.PrintAndExitIfRequested() util.InitLogs() defer util.FlushLogs() err = s.Run(s, s.Flags().Args()) if err != nil { hk.Println("Error:", err) } return err }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil { glog.Info(err) } verflag.PrintAndExitIfRequested() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := iptables.ProtocolIpv4 if net.IP(bindAddress).To4() == nil { protocol = iptables.ProtocolIpv6 } loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer, net.IP(bindAddress), iptables.New(exec.New(), protocol)) if proxier == nil { glog.Fatalf("failed to create proxier, aborting") } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. // define api config source if clientConfig.Host != "" { glog.Infof("Using api calls to get config %v", clientConfig.Host) client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client.Services(api.NamespaceAll), client.Endpoints(api.NamespaceAll), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } else { var etcdClient *etcd.Client // Set up etcd client if len(etcdServerList) > 0 { // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) etcdClient = etcd.NewClient(etcdServerList) } else if *etcdConfigFile != "" { // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) var err error etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile) if err != nil { glog.Fatalf("Error with etcd config file: %v", err) } } // Create a configuration source that handles configuration from etcd. if etcdClient != nil { glog.Infof("Using etcd servers %v", etcdClient.GetCluster()) config.NewConfigSourceEtcd(etcdClient, serviceConfig.Channel("etcd"), endpointsConfig.Channel("etcd")) } } if *healthz_port > 0 { go util.Forever(func() { err := http.ListenAndServe(bindAddress.String()+":"+strconv.Itoa(*healthz_port), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() }
func main() { flag.Usage = func() { usage() } flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() var masterServer string if len(*httpServer) > 0 { masterServer = *httpServer } else if len(os.Getenv("KUBERNETES_MASTER")) > 0 { masterServer = os.Getenv("KUBERNETES_MASTER") } else { masterServer = "http://localhost:8080" } kubeClient, err := client.New(masterServer, nil) if err != nil { glog.Fatalf("Unable to parse %s as a URL: %v", masterServer, err) } // TODO: this won't work if TLS is enabled with client cert auth, but no // passwords are required. Refactor when we address client auth abstraction. if kubeClient.Secure() { auth, err := kubecfg.LoadAuthInfo(*authConfig, os.Stdin) if err != nil { glog.Fatalf("Error loading auth: %v", err) } kubeClient, err = client.New(masterServer, auth) if err != nil { glog.Fatalf("Unable to parse %s as a URL: %v", masterServer, err) } } if *serverVersion { got, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v\n", err) os.Exit(1) } fmt.Printf("Server Version: %#v\n", got) os.Exit(0) } if *preventSkew { got, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v\n", err) os.Exit(1) } if c, s := version.Get(), *got; !reflect.DeepEqual(c, s) { fmt.Printf("Server version (%#v) differs from client version (%#v)!\n", s, c) os.Exit(1) } } if *proxy { glog.Info("Starting to serve on localhost:8001") server := kubecfg.NewProxyServer(*www, kubeClient) glog.Fatal(server.Serve()) } if len(flag.Args()) < 1 { usage() os.Exit(1) } method := flag.Arg(0) matchFound := executeAPIRequest(method, kubeClient) || executeControllerRequest(method, kubeClient) if matchFound == false { glog.Fatalf("Unknown command %s", method) } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) verflag.PrintAndExitIfRequested() if *runonce { exclusiveFlag := "invalid option: --runonce and %s are mutually exclusive" if len(etcdServerList) > 0 { glog.Fatalf(exclusiveFlag, "--etcd_servers") } if *enableServer { glog.Infof("--runonce is set, disabling server") *enableServer = false } } etcd.SetLogger(util.NewLogger("etcd ")) // Log the events locally too. record.StartLogging(glog.Infof) capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) dockerClient, err := docker.NewClient(getDockerEndpoint()) if err != nil { glog.Fatal("Couldn't connect to docker.") } hostname := getHostname() if *rootDirectory == "" { glog.Fatal("Invalid root directory path.") } *rootDirectory = path.Clean(*rootDirectory) if err := os.MkdirAll(*rootDirectory, 0750); err != nil { glog.Fatalf("Error creating root directory: %v", err) } // source of all configuration cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates) // define file config source if *config != "" { kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file")) } // define url config source if *manifestURL != "" { kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http")) } // define etcd config source and initialize etcd client var etcdClient *etcd.Client if len(etcdServerList) > 0 { etcdClient = etcd.NewClient(etcdServerList) } else if *etcdConfigFile != "" { var err error etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile) if err != nil { glog.Fatalf("Error with etcd config file: %v", err) } } if etcdClient != nil { glog.Infof("Watching for etcd configs at %v", etcdClient.GetCluster()) kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd")) } // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations k := kubelet.NewMainKubelet( getHostname(), dockerClient, etcdClient, *rootDirectory, *networkContainerImage, *syncFrequency, float32(*registryPullQPS), *registryBurst, *minimumGCAge, *maxContainerCount) k.BirthCry() go func() { util.Forever(func() { err := k.GarbageCollectContainers() if err != nil { glog.Errorf("Garbage collect failed: %v", err) } }, time.Minute*1) }() go func() { defer util.HandleCrash() // TODO: Monitor this connection, reconnect if needed? glog.V(1).Infof("Trying to create cadvisor client.") cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194") if err != nil { glog.Errorf("Error on creating cadvisor client: %v", err) return } glog.V(1).Infof("Successfully created cadvisor client.") k.SetCadvisorClient(cadvisorClient) }() // TODO: These should probably become more plugin-ish: register a factory func // in each checker's init(), iterate those here. health.AddHealthChecker(health.NewExecHealthChecker(k)) health.AddHealthChecker(health.NewHTTPHealthChecker(&http.Client{})) health.AddHealthChecker(&health.TCPHealthChecker{}) // process pods and exit. if *runonce { if _, err := k.RunOnce(cfg.Updates()); err != nil { glog.Fatalf("--runonce failed: %v", err) } return } // start the kubelet go util.Forever(func() { k.Run(cfg.Updates()) }, 0) // start the kubelet server if *enableServer { go util.Forever(func() { kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), net.IP(address), *port, *enableDebuggingHandlers) }, 0) } // runs forever select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyPortalFlags() if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) { glog.Fatalf("specify either -etcd_servers or -etcd_config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) kubeletClient, err := client.NewKubeletClient(&kubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))), Version: *storageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(*etcdConfigFile, etcdServerList) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } n := net.IPNet(portalNet) authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } config := &master.Config{ Client: client, Cloud: cloud, EtcdHelper: helper, HealthCheckMinions: *healthCheckMinions, EventTTL: *eventTTL, KubeletClient: kubeletClient, PortalNet: &n, EnableLogsSupport: *enableLogsSupport, EnableUISupport: true, APIPrefix: *apiPrefix, CorsAllowedOriginList: corsAllowedOriginList, ReadOnlyPort: *readOnlyPort, ReadWritePort: *port, PublicAddress: *publicAddressOverride, Authenticator: authenticator, Authorizer: authorizer, } m := master.New(config) // We serve on 3 ports. See docs/reaching_the_api.md roLocation := "" if *readOnlyPort != 0 { roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort)) } secureLocation := "" if *securePort != 0 { secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort)) } rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port))) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. if roLocation != "" { // Allow 1 read-only request per second, allow up to 20 in a burst before enforcing. rl := util.NewTokenBucketRateLimiter(1.0, 20) readOnlyServer := &http.Server{ Addr: roLocation, Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving read-only insecurely on %s", roLocation) go func() { defer util.HandleCrash() for { if err := readOnlyServer.ListenAndServe(); err != nil { glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.RecoverPanics(m.Handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if err := secureServer.ListenAndServeTLS(*tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } s := &http.Server{ Addr: rwLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", rwLocation) glog.Fatal(s.ListenAndServe()) }