func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if hadoopConfDir == nil || *hadoopConfDir == "" { glog.Fatalf("HADOOP_CONF_DIR not set!") } os.Setenv("HADOOP_CONF_DIR", *hadoopConfDir) kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) glog.Infof("Using configuration file %s and etcd_servers %s", *config_file, *etcd_servers) proxyConfig := config.NewServiceConfig() // Create a configuration source that handles configuration from etcd. etcdClient := etcd.NewClient([]string{*etcd_servers}) config.NewConfigSourceEtcd(etcdClient, proxyConfig.GetServiceConfigurationChannel("etcd"), proxyConfig.GetEndpointsConfigurationChannel("etcd")) // And create a configuration source that reads from a local file config.NewConfigSourceFile(*config_file, proxyConfig.GetServiceConfigurationChannel("file"), proxyConfig.GetEndpointsConfigurationChannel("file")) loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer) // Wire proxier to handle changes to services proxyConfig.RegisterServiceHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services proxyConfig.RegisterEndpointsHandler(loadBalancer) // Just loop forever for now... select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() if len(machineList) == 0 { glog.Fatal("No machines specified!") } var cloud cloudprovider.Interface switch *cloudProvider { case "gce": var err error cloud, err = cloudprovider.NewGCECloud() if err != nil { glog.Fatal("Couldn't connect to GCE cloud: %#v", err) } default: if len(*cloudProvider) > 0 { glog.Infof("Unknown cloud provider: %s", *cloudProvider) } else { glog.Info("No cloud provider specified.") } } var m *master.Master if len(etcdServerList) > 0 { m = master.New(etcdServerList, machineList, cloud) } else { m = master.NewMemoryServer(machineList, cloud) } glog.Fatal(m.Run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix)) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := configFactory.Create() if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() c := loadClientOrDie() tests := []func(c *client.Client) bool{ TestKubernetesROService, // TODO(brendandburns): fix this test and re-add it: TestPodUpdate, } passed := true for _, test := range tests { testPassed := test(c) if !testPassed { passed = false } } if !passed { glog.Fatalf("Tests failed") } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) endpoint := "unix:///var/run/docker.sock" dockerClient, err := docker.NewClient(endpoint) if err != nil { glog.Fatal("Couldn't connnect to docker.") } hostname := []byte(*hostnameOverride) if string(hostname) == "" { // Note: We use exec here instead of os.Hostname() because we // want the FQDN, and this is the easiest way to get it. hostname, err = exec.Command("hostname", "-f").Output() if err != nil { glog.Fatalf("Couldn't determine hostname: %v", err) } } my_kubelet := kubelet.Kubelet{ Hostname: string(hostname), DockerClient: dockerClient, FileCheckFrequency: *fileCheckFrequency, SyncFrequency: *syncFrequency, HTTPCheckFrequency: *httpCheckFrequency, } my_kubelet.RunKubelet(*config, *manifestUrl, *etcdServers, *address, *port) }
// NewSchedulerCommand provides a CLI handler for the 'scheduler' command func NewSchedulerCommand(name, fullName string, out io.Writer) *cobra.Command { s := app.NewSchedulerServer() cmd := &cobra.Command{ Use: name, Short: "Launch Kubernetes scheduler (kube-scheduler)", Long: controllersLong, Run: func(c *cobra.Command, args []string) { startProfiler() util.InitLogs() defer util.FlushLogs() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() //TODO: uncomment after picking up a newer cobra //pflag.AddFlagSetToPFlagSet(flag, flags) s.AddFlags(flags) return cmd }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(etcdServerList) == 0 { glog.Fatalf("-etcd_servers flag is required.") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := initCloudProvider(*cloudProvider, *cloudConfigFile) podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) if err != nil { glog.Fatalf("Invalid server address: %v", err) } m := master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) storage, codec := m.API_v1beta1() handler := apiserver.Handle(storage, codec, *apiPrefix) if len(corsAllowedOriginList) > 0 { allowedOriginRegexps, err := util.CompileRegexps(corsAllowedOriginList) if err != nil { glog.Fatalf("Invalid CORS allowed origin, --cors_allowed_origins flag was set to %v - %v", strings.Join(corsAllowedOriginList, ","), err) } handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true") } s := &http.Server{ Addr: net.JoinHostPort(*address, strconv.Itoa(int(*port))), Handler: apiserver.RecoverPanics(handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Fatal(s.ListenAndServe()) }
// CloudCfg command line tool. func main() { flag.Usage = func() { usage() } flag.Parse() // Scan the arguments list util.InitLogs() defer util.FlushLogs() if *versionFlag { fmt.Println("Version:", AppVersion) os.Exit(0) } secure := true var masterServer string if len(*httpServer) > 0 { masterServer = *httpServer } else if len(os.Getenv("KUBERNETES_MASTER")) > 0 { masterServer = os.Getenv("KUBERNETES_MASTER") } else { masterServer = "http://localhost:8080" } parsedURL, err := url.Parse(masterServer) if err != nil { glog.Fatalf("Unable to parse %v as a URL\n", err) } if parsedURL.Scheme != "" && parsedURL.Scheme != "https" { secure = false } var auth *kube_client.AuthInfo if secure { auth, err = kubecfg.LoadAuthInfo(*authConfig) if err != nil { glog.Fatalf("Error loading auth: %v", err) } } if *proxy { glog.Info("Starting to serve on localhost:8001") server := kubecfg.NewProxyServer(*www, masterServer, auth) glog.Fatal(server.Serve()) } if len(flag.Args()) < 1 { usage() os.Exit(1) } method := flag.Arg(0) client := kube_client.New(masterServer, auth) matchFound := executeAPIRequest(method, client) || executeControllerRequest(method, client) if matchFound == false { glog.Fatalf("Unknown command %s", method) } }
func TestE2E(t *testing.T) { util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() // TODO: possibly clean up or refactor this functionality. if testContext.Provider == "" { glog.Fatal("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.") } if testContext.Provider == "aws" { awsConfig := "[Global]\n" if cloudConfig.Zone == "" { glog.Fatal("gce-zone must be specified for AWS") } awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone) if cloudConfig.ClusterTag == "" { glog.Fatal("--cluster-tag must be specified for AWS") } awsConfig += fmt.Sprintf("KubernetesClusterTag=%s\n", cloudConfig.ClusterTag) var err error cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig)) if err != nil { glog.Fatal("Error building AWS provider: ", err) } } // Disable density test unless it's explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = "Skipped" } gomega.RegisterFailHandler(ginkgo.Fail) // Ensure all pods are running and ready before starting tests (otherwise, // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil { glog.Fatalf("Error waiting for all pods to be running and ready: %v", err) } // Run tests through the Ginkgo runner with output to console + JUnit for Jenkins var r []ginkgo.Reporter if *reportDir != "" { r = append(r, reporters.NewJUnitReporter(path.Join(*reportDir, fmt.Sprintf("junit_%02d.xml", config.GinkgoConfig.ParallelNode)))) failReport := &failReporter{} r = append(r, failReport) defer func() { if failReport.failed { coreDump(*reportDir) } }() } ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() if len(machineList) == 0 { glog.Fatal("No machines specified!") } var cloud cloudprovider.Interface switch *cloudProvider { case "gce": var err error cloud, err = cloudprovider.NewGCECloud() if err != nil { glog.Fatalf("Couldn't connect to GCE cloud: %#v", err) } default: if len(*cloudProvider) > 0 { glog.Infof("Unknown cloud provider: %s", *cloudProvider) } else { glog.Info("No cloud provider specified.") } } podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client := client.New("http://"+net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) var m *master.Master if len(etcdServerList) > 0 { m = master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) } else { m = master.NewMemoryServer(&master.Config{ Client: client, Cloud: cloud, Minions: machineList, PodInfoGetter: podInfoGetter, }) } glog.Fatal(m.Run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix)) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(etcdServerList) == 0 { glog.Fatalf("-etcd_servers flag is required.") } cloud, err := cloudprovider.GetCloudProvider(*cloudProvider) if err != nil { glog.Fatalf("Couldn't init cloud provider %q: %#v", *cloudProvider, err) } if cloud == nil { if len(*cloudProvider) > 0 { glog.Fatalf("Unknown cloud provider: %s", *cloudProvider) } else { glog.Info("No cloud provider specified.") } } podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) if err != nil { glog.Fatalf("Invalid server address: %v", err) } m := master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) storage, codec := m.API_v1beta1() s := &http.Server{ Addr: net.JoinHostPort(*address, strconv.Itoa(int(*port))), Handler: apiserver.Handle(storage, codec, *apiPrefix), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Fatal(s.ListenAndServe()) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() // define api config source if *master != "" { glog.Infof("Using api calls to get config %v", *master) //TODO: add auth info client, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } // Create a configuration source that handles configuration from etcd. if len(etcdServerList) > 0 && *master == "" { glog.Infof("Using etcd servers %v", etcdServerList) // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) etcdClient := etcd.NewClient(etcdServerList) config.NewConfigSourceEtcd(etcdClient, serviceConfig.Channel("etcd"), endpointsConfig.Channel("etcd")) } // And create a configuration source that reads from a local file config.NewConfigSourceFile(*configFile, serviceConfig.Channel("file"), endpointsConfig.Channel("file")) glog.Infof("Using configuration file %s", *configFile) loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer) // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Just loop forever for now... select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewSchedulerServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() s.Run(pflag.CommandLine.Args()) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() rand.Seed(time.Now().UTC().UnixNano()) verflag.PrintAndExitIfRequested() setupRunOnce() if err := util.ApplyOomScoreAdj(*oomScoreAdj); err != nil { glog.Info(err) } client, err := standalone.GetAPIServerClient(*authPath, apiServerList) if err != nil && len(apiServerList) > 0 { glog.Warningf("No API client: %v", err) } kcfg := standalone.KubeletConfig{ Address: address, AllowPrivileged: *allowPrivileged, HostnameOverride: *hostnameOverride, RootDirectory: *rootDirectory, ConfigFile: *config, ManifestURL: *manifestURL, FileCheckFrequency: *fileCheckFrequency, HttpCheckFrequency: *httpCheckFrequency, NetworkContainerImage: *networkContainerImage, SyncFrequency: *syncFrequency, RegistryPullQPS: *registryPullQPS, RegistryBurst: *registryBurst, MinimumGCAge: *minimumGCAge, MaxContainerCount: *maxContainerCount, ClusterDomain: *clusterDomain, ClusterDNS: clusterDNS, Runonce: *runonce, Port: *port, CAdvisorPort: *cAdvisorPort, EnableServer: *enableServer, EnableDebuggingHandlers: *enableDebuggingHandlers, DockerClient: util.ConnectToDockerOrDie(*dockerEndpoint), KubeClient: client, EtcdClient: kubelet.EtcdClientOrDie(etcdServerList, *etcdConfigFile), MasterServiceNamespace: *masterServiceNamespace, } standalone.RunKubelet(&kcfg) // runs forever select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(clientConfig.Host) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } if int64(int(*nodeMilliCPU)) != *nodeMilliCPU { glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d", *nodeMilliCPU, math.MaxInt32) *nodeMilliCPU = math.MaxInt32 } if int64(int(*nodeMemory)) != *nodeMemory { glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d", *nodeMemory, math.MaxInt32) *nodeMemory = math.MaxInt32 } go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) endpoints := service.NewEndpointController(kubeClient) go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10) controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) nodeResources := &api.NodeResources{ Capacity: api.ResourceList{ resources.CPU: util.NewIntOrStringFromInt(int(*nodeMilliCPU)), resources.Memory: util.NewIntOrStringFromInt(int(*nodeMemory)), }, } minionController := minionControllerPkg.NewMinionController(cloud, *minionRegexp, machineList, nodeResources, kubeClient) minionController.Run(10 * time.Second) select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() glog.Infof("Creating etcd client pointing to %v", *etcdServer) etcdClient, err := tools.NewEtcdClientStartServerIfNecessary(*etcdServer) if err != nil { glog.Fatalf("Failed to connect to etcd: %v", err) } startComponents(etcdClient, newApiClient(*addr, *port), *addr, *port) glog.Infof("Kubernetes API Server is up and running on http://%s:%d", *addr, *port) select {} }
// CloudCfg command line tool. func main() { flag.Usage = func() { usage() } flag.Parse() // Scan the arguments list util.InitLogs() defer util.FlushLogs() if *versionFlag { fmt.Println("Version:", APP_VERSION) os.Exit(0) } secure := true parsedUrl, err := url.Parse(*httpServer) if err != nil { glog.Fatalf("Unable to parse %v as a URL\n", err) } if parsedUrl.Scheme != "" && parsedUrl.Scheme != "https" { secure = false } var auth *kube_client.AuthInfo if secure { auth, err = kubecfg.LoadAuthInfo(*authConfig) if err != nil { glog.Fatalf("Error loading auth: %#v", err) } } if *proxy { glog.Info("Starting to serve on localhost:8001") server := kubecfg.NewProxyServer(*www, *httpServer, auth) glog.Fatal(server.Serve()) } if len(flag.Args()) < 1 { usage() os.Exit(1) } method := flag.Arg(0) matchFound := executeAPIRequest(method, auth) || executeControllerRequest(method, auth) if matchFound == false { glog.Fatalf("Unknown command %s", method) } }
func main() { flag.Parse() goruntime.GOMAXPROCS(goruntime.NumCPU()) util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(5 * time.Minute) glog.Fatalf("This test has timed out. Cleanup not guaranteed.") }() c := loadClientOrDie() // Define the tests. Important: for a clean test grid, please keep ids for a test constant. tests := []TestSpec{ {TestKubernetesROService, "TestKubernetesROService", 1}, {TestKubeletSendsEvent, "TestKubeletSendsEvent", 2}, {TestImportantURLs, "TestImportantURLs", 3}, {TestPodUpdate, "TestPodUpdate", 4}, {TestNetwork, "TestNetwork", 5}, {TestClusterDNS, "TestClusterDNS", 6}, {TestPodHasServiceEnvVars, "TestPodHasServiceEnvVars", 7}, } info := []TestInfo{} passed := true for i, test := range tests { glog.Infof("Running test %d", i+1) testPassed := test.test(c) if !testPassed { glog.Infof(" test %d failed", i+1) passed = false } else { glog.Infof(" test %d passed", i+1) } // TODO: clean up objects created during a test after the test, so cases // are independent. info = append(info, TestInfo{testPassed, test}) } outputTAPSummary(info) if !passed { glog.Fatalf("At least one test failed") } else { glog.Infof("All tests pass") } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewCMServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprint(os.Stderr, err.Error) os.Exit(1) } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() // TODO: security story for plugins! kubeClient := client.New("http://"+*master, nil) configFactory := &factory.ConfigFactory{Client: kubeClient} config := configFactory.Create() s := scheduler.New(config) s.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if len(*master) == 0 { glog.Fatal("usage: controller-manager -master <master>") } controllerManager := controller.NewReplicationManager( client.New("http://"+*master, nil)) controllerManager.Run(10 * time.Second) select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) util.InitFlags() util.InitLogs() defer util.FlushLogs() glog.Infof("Creating etcd client pointing to %v", *etcdServer) etcdClient, err := etcdstorage.NewEtcdClientStartServerIfNecessary(*etcdServer) if err != nil { glog.Fatalf("Failed to connect to etcd: %v", err) } address := net.ParseIP(*addr) startComponents(etcdClient, newApiClient(address, *port), address, *port) glog.Infof("Kubernetes API Server is up and running on http://%s:%d", *addr, *port) select {} }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UTC().UnixNano()) s := app.NewAPIServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() if len(*etcdServers) == 0 || len(*master) == 0 { glog.Fatal("usage: controller-manager -etcd_servers <servers> -master <master>") } // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) controllerManager := controller.MakeReplicationManager( etcd.NewClient([]string{*etcdServers}), client.New("http://"+*master, nil)) controllerManager.Run(10 * time.Second) select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if len(*master) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } controllerManager := controller.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() // TODO: security story for plugins! kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } configFactory := &factory.ConfigFactory{Client: kubeClient} config := configFactory.Create() s := scheduler.New(config) s.Run() select {} }
// Run each Go end-to-end-test. This function assumes the // creation of a test cluster. func RunE2ETests(context *TestContextType, orderseed int64, times int, reportDir string, testList []string) { testContext = *context util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() if len(testList) != 0 { if config.GinkgoConfig.FocusString != "" || config.GinkgoConfig.SkipString != "" { glog.Fatal("Either specify --test/-t or --ginkgo.focus/--ginkgo.skip but not both.") } var testRegexps []string for _, t := range testList { testRegexps = append(testRegexps, regexp.QuoteMeta(t)) } config.GinkgoConfig.FocusString = `\b(` + strings.Join(testRegexps, "|") + `)\b` } // Disable density test unless it's explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = "Skipped" } // TODO: Make orderseed work again. var passed testResult = true gomega.RegisterFailHandler(ginkgo.Fail) // Run the existing tests with output to console + JUnit for Jenkins for i := 0; i < times && passed; i++ { var r []ginkgo.Reporter if reportDir != "" { r = append(r, reporters.NewJUnitReporter(path.Join(reportDir, fmt.Sprintf("junit_%d.xml", i+1)))) } ginkgo.RunSpecsWithDefaultAndCustomReporters(&passed, fmt.Sprintf("Kubernetes e2e Suite run %d of %d", i+1, times), r) } if !passed { glog.Fatalf("At least one test failed") } else { glog.Infof("All tests pass") } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if len(*master) == 0 { glog.Fatal("usage: controller-manager -master <master>") } kubeClient, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } go http.ListenAndServe(net.JoinHostPort(*address, strconv.Itoa(*port)), nil) controllerManager := controller.NewReplicationManager(kubeClient) controllerManager.Run(10 * time.Second) select {} }