func main() { util.InitFlags() goruntime.GOMAXPROCS(goruntime.NumCPU()) if context.Provider == "" { glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") os.Exit(1) } if *times <= 0 { glog.Error("Invalid --times (negative or no testing requested)!") os.Exit(1) } if context.Provider == "aws" { awsConfig := "[Global]\n" if cloudConfig.Zone == "" { glog.Error("gce_zone must be specified for AWS") os.Exit(1) } awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone) var err error cloudConfig.Provider, err = cloudprovider.GetCloudProvider(context.Provider, strings.NewReader(awsConfig)) if err != nil { glog.Error("Error building AWS provider: ", err) os.Exit(1) } } e2e.RunE2ETests(context, *orderseed, *times, *reportDir, testList) }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if hadoopConfDir == nil || *hadoopConfDir == "" { glog.Fatalf("HADOOP_CONF_DIR not set!") } os.Setenv("HADOOP_CONF_DIR", *hadoopConfDir) kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
func main() { util.InitFlags() goruntime.GOMAXPROCS(goruntime.NumCPU()) if *provider == "" { glog.Error("e2e needs the have the --provider flag set") os.Exit(1) } if *times <= 0 { glog.Error("Invalid --times (negative or no testing requested)!") os.Exit(1) } e2e.RunE2ETests(*authConfig, *certDir, *host, *repoRoot, *provider, *orderseed, *times, testList) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewSchedulerServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() s.Run(pflag.CommandLine.Args()) }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() glog.Infof("Creating etcd client pointing to %v", *etcdServer) etcdClient, err := tools.NewEtcdClientStartServerIfNecessary(*etcdServer) if err != nil { glog.Fatalf("Failed to connect to etcd: %v", err) } startComponents(etcdClient, newApiClient(*addr, *port), *addr, *port) glog.Infof("Kubernetes API Server is up and running on http://%s:%d", *addr, *port) select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewCMServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprint(os.Stderr, err.Error) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) util.InitFlags() util.InitLogs() defer util.FlushLogs() glog.Infof("Creating etcd client pointing to %v", *etcdServer) etcdClient, err := etcdstorage.NewEtcdClientStartServerIfNecessary(*etcdServer) if err != nil { glog.Fatalf("Failed to connect to etcd: %v", err) } address := net.ParseIP(*addr) startComponents(etcdClient, newApiClient(address, *port), address, *port) glog.Infof("Kubernetes API Server is up and running on http://%s:%d", *addr, *port) select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UTC().UnixNano()) s := app.NewAPIServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { util.InitFlags() goruntime.GOMAXPROCS(goruntime.NumCPU()) if *provider == "" { glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") os.Exit(1) } if *times <= 0 { glog.Error("Invalid --times (negative or no testing requested)!") os.Exit(1) } gceConfig := &e2e.GCEConfig{ ProjectID: *gceProject, Zone: *gceZone, MasterName: *masterName, } e2e.RunE2ETests(*kubeConfig, *authConfig, *certDir, *host, *repoRoot, *provider, gceConfig, *orderseed, *times, *reportDir, testList) }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(clientConfig.Host) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) endpoints := service.NewEndpointController(kubeClient) go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10) controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) nodeResources := &api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(*nodeMilliCPU, resource.DecimalSI), api.ResourceMemory: *nodeMemory, }, } nodeController := nodeControllerPkg.NewNodeController(cloud, *minionRegexp, machineList, nodeResources, kubeClient) nodeController.Run(10 * time.Second) select {} }
func main() { util.InitFlags() runtime.GOMAXPROCS(runtime.NumCPU()) util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() manifestURL := ServeCachedManifestFile() apiServerURL := startComponents(manifestURL) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Version()}) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, runSelfLinkTest, } var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { f(kubeClient) wg.Done() }() } wg.Wait() // Check that kubelet tried to make the pods. // Using a set to list unique creation attempts. Our fake is // really stupid, so kubelet tries to create these multiple times. createdPods := util.StringSet{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdPods.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdPods.Insert(p[:n-8]) } } // We expect 9: 2 net containers + 2 pods from the replication controller + // 1 net container + 2 pods from the URL + // 1 net container + 1 pod from the service test. if len(createdPods) != 9 { glog.Fatalf("Unexpected list of created pods:\n\n%#v\n\n%#v\n\n%#v\n\n", createdPods.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created pods: %#v", createdPods.List()) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) addFlags(pflag.CommandLine) util.InitFlags() util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() glog.Infof("Running tests for APIVersion: %s", apiVersion) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion}) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runPatchTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, func(c *client.Client) { runSelfLinkTestOnNamespace(c, api.NamespaceDefault) runSelfLinkTestOnNamespace(c, "other") }, } // Only run at most maxConcurrency tests in parallel. if maxConcurrency <= 0 { maxConcurrency = len(testFuncs) } glog.Infof("Running %d tests in parallel.", maxConcurrency) ch := make(chan struct{}, maxConcurrency) var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { ch <- struct{}{} f(kubeClient) <-ch wg.Done() }() } wg.Wait() close(ch) // Check that kubelet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so kubelet tries to create these multiple times. createdConts := util.StringSet{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } // We expect 9: 2 pod infra containers + 2 containers from the replication controller + // 1 pod infra container + 2 containers from the URL on first Kubelet + // 1 pod infra container + 2 containers from the URL on second Kubelet + // 1 pod infra container + 1 container from the service test. // The total number of container created is 9 if len(createdConts) != 12 { glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created containers: %#v", createdConts.List()) // This test doesn't run with the others because it can't run in // parallel and also it schedules extra pods which would change the // above pod counting logic. runSchedulerNoPhantomPodsTest(kubeClient) glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250") glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251") }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil { glog.Info(err) } verflag.PrintAndExitIfRequested() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := iptables.ProtocolIpv4 if net.IP(bindAddress).To4() == nil { protocol = iptables.ProtocolIpv6 } loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer, net.IP(bindAddress), iptables.New(exec.New(), protocol)) if proxier == nil { glog.Fatalf("failed to create proxier, aborting") } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. // define api config source if clientConfig.Host != "" { glog.Infof("Using api calls to get config %v", clientConfig.Host) client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client.Services(api.NamespaceAll), client.Endpoints(api.NamespaceAll), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } else { var etcdClient *etcd.Client // Set up etcd client if len(etcdServerList) > 0 { // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) etcdClient = etcd.NewClient(etcdServerList) } else if *etcdConfigFile != "" { // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) var err error etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile) if err != nil { glog.Fatalf("Error with etcd config file: %v", err) } } // Create a configuration source that handles configuration from etcd. if etcdClient != nil { glog.Infof("Using etcd servers %v", etcdClient.GetCluster()) config.NewConfigSourceEtcd(etcdClient, serviceConfig.Channel("etcd"), endpointsConfig.Channel("etcd")) } } if *healthz_port > 0 { go util.Forever(func() { err := http.ListenAndServe(bindAddress.String()+":"+strconv.Itoa(*healthz_port), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() }
func main() { util.InitFlags() util.InitLogs() util.ReallyCrash = *reallyCrashForTesting defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) verflag.PrintAndExitIfRequested() // Cluster creation scripts support both kubernetes versions that 1) support kublet watching // apiserver for pods, and 2) ones that don't. So they ca set both --etcd_servers and // --api_servers. The current code will ignore the --etcd_servers flag, while older kubelet // code will use the --etd_servers flag for pods, and use --api_servers for event publising. // // TODO(erictune): convert all cloud provider scripts and Google Container Engine to // use only --api_servers, then delete --etcd_servers flag and the resulting dead code. if len(etcdServerList) > 0 && len(apiServerList) > 0 { glog.Infof("Both --etcd_servers and --api_servers are set. Not using etcd source.") etcdServerList = util.StringList{} } setupRunOnce() if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil { glog.Info(err) } client, err := standalone.GetAPIServerClient(*authPath, apiServerList) if err != nil && len(apiServerList) > 0 { glog.Warningf("No API client: %v", err) } kcfg := standalone.KubeletConfig{ Address: address, AllowPrivileged: *allowPrivileged, HostnameOverride: *hostnameOverride, RootDirectory: *rootDirectory, ConfigFile: *config, ManifestURL: *manifestURL, FileCheckFrequency: *fileCheckFrequency, HttpCheckFrequency: *httpCheckFrequency, NetworkContainerImage: *networkContainerImage, SyncFrequency: *syncFrequency, RegistryPullQPS: *registryPullQPS, RegistryBurst: *registryBurst, MinimumGCAge: *minimumGCAge, MaxContainerCount: *maxContainerCount, ClusterDomain: *clusterDomain, ClusterDNS: clusterDNS, Runonce: *runonce, Port: *port, CAdvisorPort: *cAdvisorPort, EnableServer: *enableServer, EnableDebuggingHandlers: *enableDebuggingHandlers, DockerClient: util.ConnectToDockerOrDie(*dockerEndpoint), KubeClient: client, EtcdClient: kubelet.EtcdClientOrDie(etcdServerList, *etcdConfigFile), MasterServiceNamespace: *masterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), } standalone.RunKubelet(&kcfg) // runs forever select {} }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyPortalFlags() if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) { glog.Fatalf("specify either -etcd_servers or -etcd_config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) kubeletClient, err := client.NewKubeletClient(&kubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } _, v1beta3 := runtimeConfig["api/v1beta3"] // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))), Version: *storageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(*etcdConfigFile, etcdServerList) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } n := net.IPNet(portalNet) authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(*admissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, *admissionControlConfigFile) config := &master.Config{ Client: client, Cloud: cloud, EtcdHelper: helper, HealthCheckMinions: *healthCheckMinions, EventTTL: *eventTTL, KubeletClient: kubeletClient, PortalNet: &n, EnableLogsSupport: *enableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, APIPrefix: *apiPrefix, CorsAllowedOriginList: corsAllowedOriginList, ReadOnlyPort: *readOnlyPort, ReadWritePort: *port, PublicAddress: *publicAddressOverride, Authenticator: authenticator, Authorizer: authorizer, AdmissionControl: admissionController, EnableV1Beta3: v1beta3, MasterServiceNamespace: *masterServiceNamespace, } m := master.New(config) // We serve on 3 ports. See docs/reaching_the_api.md roLocation := "" if *readOnlyPort != 0 { roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort)) } secureLocation := "" if *securePort != 0 { secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort)) } rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port))) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. if roLocation != "" { // Allow 1 read-only request per second, allow up to 20 in a burst before enforcing. rl := util.NewTokenBucketRateLimiter(1.0, 20) readOnlyServer := &http.Server{ Addr: roLocation, Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving read-only insecurely on %s", roLocation) go func() { defer util.HandleCrash() for { if err := readOnlyServer.ListenAndServe(); err != nil { glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.RecoverPanics(m.Handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types ClientAuth: tls.RequestClientCert, }, } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if *tlsCertFile == "" && *tlsPrivateKeyFile == "" { *tlsCertFile = "/var/run/kubernetes/apiserver.crt" *tlsPrivateKeyFile = "/var/run/kubernetes/apiserver.key" if err := util.GenerateSelfSignedCert(config.PublicAddress, *tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", *tlsCertFile, *tlsPrivateKeyFile) } } if err := secureServer.ListenAndServeTLS(*tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } s := &http.Server{ Addr: rwLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", rwLocation) glog.Fatal(s.ListenAndServe()) }