func main() { leaderElection := kube_leaderelection.DefaultLeaderElectionConfiguration() leaderElection.LeaderElect = true kube_leaderelection.BindFlags(&leaderElection, pflag.CommandLine) flag.Var(&nodeGroupsFlag, "nodes", "sets min,max size and other configuration data for a node group in a format accepted by cloud provider."+ "Can be used multiple times. Format: <min>:<max>:<other...>") kube_flag.InitFlags() glog.Infof("Cluster Autoscaler %s", ClusterAutoscalerVersion) correctEstimator := false for _, availableEstimator := range AvailableEstimators { if *estimatorFlag == availableEstimator { correctEstimator = true } } if !correctEstimator { glog.Fatalf("Unrecognized estimator: %v", *estimatorFlag) } go func() { http.Handle("/metrics", prometheus.Handler()) err := http.ListenAndServe(*address, nil) glog.Fatalf("Failed to start metrics: %v", err) }() if !leaderElection.LeaderElect { run(nil) } else { id, err := os.Hostname() if err != nil { glog.Fatalf("Unable to get hostname: %v", err) } kubeClient := createKubeClient() kube_leaderelection.RunOrDie(kube_leaderelection.LeaderElectionConfig{ Lock: &resourcelock.EndpointsLock{ EndpointsMeta: apiv1.ObjectMeta{ Namespace: "kube-system", Name: "cluster-autoscaler", }, Client: kubeClient, LockConfig: resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: createEventRecorder(kubeClient), }, }, LeaseDuration: leaderElection.LeaseDuration.Duration, RenewDeadline: leaderElection.RenewDeadline.Duration, RetryPeriod: leaderElection.RetryPeriod.Duration, Callbacks: kube_leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { glog.Fatalf("lost master") }, }, }) } }
func main() { config := HollowNodeConfig{} config.addFlags(pflag.CommandLine) flag.InitFlags() if !knownMorphs.Has(config.Morph) { glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. clientConfig, err := config.createClientConfigFromFile() if err != nil { glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) } cl, err := client.New(clientConfig) if err != nil { glog.Fatalf("Failed to create a Client: %v. Exiting.", err) } clientset, err := internalclientset.NewForConfig(clientConfig) if err != nil { glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) } if config.Morph == "kubelet" { cadvisorInterface := new(cadvisortest.Fake) containerManager := cm.NewStubContainerManager() fakeDockerClient := dockertools.NewFakeDockerClient() fakeDockerClient.EnableSleep = true hollowKubelet := kubemark.NewHollowKubelet( config.NodeName, clientset, cadvisorInterface, fakeDockerClient, config.KubeletPort, config.KubeletReadOnlyPort, containerManager, maxPods, podsPerCore, ) hollowKubelet.Run() } if config.Morph == "proxy" { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName}) iptInterface := fakeiptables.NewFake() serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } }
func main() { opt := options.NewHeapsterRunOptions() opt.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() setMaxProcs(opt) glog.Infof(strings.Join(os.Args, " ")) glog.Infof("Heapster version %v", version.HeapsterVersion) if err := validateFlags(opt); err != nil { glog.Fatal(err) } kubernetesUrl, err := getKubernetesAddress(opt.Sources) if err != nil { glog.Fatalf("Failed to get kubernetes address: %v", err) } sourceManager := createSourceManagerOrDie(opt.Sources) sinkManager, metricSink, historicalSource := createAndInitSinksOrDie(opt.Sinks, opt.HistoricalSource) podLister, nodeLister := getListersOrDie(kubernetesUrl) dataProcessors := createDataProcessorsOrDie(kubernetesUrl, podLister) man, err := manager.NewManager(sourceManager, dataProcessors, sinkManager, opt.MetricResolution, manager.DefaultScrapeOffset, manager.DefaultMaxParallelism) if err != nil { glog.Fatalf("Failed to create main manager: %v", err) } man.Start() if opt.EnableAPIServer { // Run API server in a separate goroutine createAndRunAPIServer(opt, metricSink, nodeLister, podLister) } mux := http.NewServeMux() promHandler := prometheus.Handler() handler := setupHandlers(metricSink, podLister, nodeLister, historicalSource) healthz.InstallHandler(mux, healthzChecker(metricSink)) addr := fmt.Sprintf("%s:%d", opt.Ip, opt.Port) glog.Infof("Starting heapster on port %d", opt.Port) if len(opt.TLSCertFile) > 0 && len(opt.TLSKeyFile) > 0 { startSecureServing(opt, handler, promHandler, mux, addr) } else { mux.Handle("/", handler) mux.Handle("/metrics", promHandler) glog.Fatal(http.ListenAndServe(addr, mux)) } }
func main() { serverRunOptions := apiserver.NewServerRunOptions() // Parse command line flags. serverRunOptions.AddFlags(pflag.CommandLine) flag.InitFlags() if err := apiserver.Run(serverRunOptions); err != nil { glog.Fatalf("Error in bringing up the server: %v", err) } }
func main() { serverRunOptions := apiserver.NewServerRunOptions() // Parse command line flags. serverRunOptions.AddUniversalFlags(pflag.CommandLine) serverRunOptions.AddEtcdStorageFlags(pflag.CommandLine) flag.InitFlags() if err := apiserver.Run(serverRunOptions, wait.NeverStop); err != nil { glog.Fatalf("Error in bringing up the server: %v", err) } }
func main() { s := options.NewSchedulerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() app.Run(s) }
func main() { config := options.NewKubeDNSConfig() config.AddFlags(pflag.CommandLine) flag.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() server := app.NewKubeDNSServerDefault(config) server.Run() }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := options.NewSchedulerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() app.Run(s) }
func main() { s := options.NewSchedulerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { glog.Fatalf("scheduler app failed to run: %v", err) } }
func main() { config := options.NewKubeDNSConfig() config.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() glog.V(0).Infof("version: %+v", version.Get()) server := app.NewKubeDNSServerDefault(config) server.Run() }
func main() { options := server.NewOptions() configureFlags(options, pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() glog.Infof("dnsmasq-metrics v%s", version.VERSION) verflag.PrintAndExitIfRequested() server := server.NewServer() server.Run(options) }
func main() { s := options.NewCMServer() s.AddFlags(pflag.CommandLine, app.KnownControllers(), app.ControllersDisabledByDefault.List()) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := options.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { s := controllermanager.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { serverRunOptions := apiserver.NewServerRunOptions() // Parse command line flags. serverRunOptions.GenericServerRunOptions.AddUniversalFlags(pflag.CommandLine) serverRunOptions.Etcd.AddFlags(pflag.CommandLine) serverRunOptions.SecureServing.AddFlags(pflag.CommandLine) serverRunOptions.SecureServing.AddDeprecatedFlags(pflag.CommandLine) serverRunOptions.InsecureServing.AddFlags(pflag.CommandLine) serverRunOptions.InsecureServing.AddDeprecatedFlags(pflag.CommandLine) flag.InitFlags() if err := serverRunOptions.Run(wait.NeverStop); err != nil { glog.Fatalf("Error in bringing up the server: %v", err) } }
func main() { s := service.NewKubeletExecutorServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := options.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { rand.Seed(time.Now().UTC().UnixNano()) s := options.NewAPIServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := controllermanager.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { rand.Seed(time.Now().UTC().UnixNano()) s := genericoptions.NewServerRunOptions().WithEtcdOptions() s.AddUniversalFlags(pflag.CommandLine) s.AddEtcdStorageFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UTC().UnixNano()) s := genericoptions.NewServerRunOptions() s.AddFlags(pflag.CommandLine) flag.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := options.NewCloudControllerManagerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } if err := app.Run(s, cloud); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { config := options.NewProxyConfig() config.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() s, err := app.NewProxyServerDefault(config) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if err = s.Run(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
// Execute implements main(). // If you don't need any non-default behavior, use as: // args.Default().Execute(...) func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error { utilflag.InitFlags() util.InitLogs() b, err := g.NewBuilder() if err != nil { return fmt.Errorf("Failed making a parser: %v", err) } c, err := generator.NewContext(b, nameSystems, defaultSystem) if err != nil { return fmt.Errorf("Failed making a context: %v", err) } c.Verify = g.VerifyOnly packages := pkgs(c, g) if err := c.ExecutePackages(g.OutputBase, packages); err != nil { return fmt.Errorf("Failed executing generator: %v", err) } return nil }
func main() { gruntime.GOMAXPROCS(gruntime.NumCPU()) addFlags(pflag.CommandLine) flag.InitFlags() utilruntime.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(maxTestTimeout) glog.Fatalf("This test has timed out.") }() glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API")) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) kubeClient := client.NewOrDie( &restclient.Config{ Host: apiServerURL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 20, Burst: 50, }) // TODO: caesarxuchao: hacky way to specify version of Experimental client. // We will fix this by supporting multiple group versions in Config kubeClient.ExtensionsClient = client.NewExtensionsOrDie( &restclient.Config{ Host: apiServerURL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}, QPS: 20, Burst: 50, }) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runPatchTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, func(c *client.Client) { runSelfLinkTestOnNamespace(c, api.NamespaceDefault) runSelfLinkTestOnNamespace(c, "other") }, } // Only run at most maxConcurrency tests in parallel. if maxConcurrency <= 0 { maxConcurrency = len(testFuncs) } glog.Infof("Running %d tests in parallel.", maxConcurrency) ch := make(chan struct{}, maxConcurrency) var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { ch <- struct{}{} f(kubeClient) <-ch wg.Done() }() } wg.Wait() close(ch) // Check that kubelet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so kubelet tries to create these multiple times. createdConts := sets.String{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } // We expect 12: 2 pod infra containers + 2 containers from the replication controller + // 1 pod infra container + 2 containers from the URL on first Kubelet + // 1 pod infra container + 2 containers from the URL on second Kubelet + // 1 pod infra container + 1 container from the service test. // The total number of container created is 12 if len(createdConts) != 12 { glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created containers: %#v", createdConts.List()) // This test doesn't run with the others because it can't run in // parallel and also it schedules extra pods which would change the // above pod counting logic. runSchedulerNoPhantomPodsTest(kubeClient) glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250") glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251") }