// NewControllersCommand provides a CLI handler for the 'controller-manager' command func NewControllersCommand(name, fullName string, out io.Writer) *cobra.Command { controllerOptions := controlleroptions.NewCMServer() cmd := &cobra.Command{ Use: name, Short: "Launch Kubernetes controller manager (kube-controller-manager)", Long: controllersLong, Run: func(c *cobra.Command, args []string) { startProfiler() logs.InitLogs() defer logs.FlushLogs() if err := controllerapp.Run(controllerOptions); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() flags.SetNormalizeFunc(kflag.WordSepNormalizeFunc) controllerOptions.AddFlags(flags) return cmd }
// NewProxyCommand provides a CLI handler for the 'proxy' command func NewProxyCommand(name, fullName string, out io.Writer) *cobra.Command { proxyConfig := proxyoptions.NewProxyConfig() cmd := &cobra.Command{ Use: name, Short: "Launch Kubernetes proxy (kube-proxy)", Long: proxyLong, Run: func(c *cobra.Command, args []string) { startProfiler() logs.InitLogs() defer logs.FlushLogs() s, err := proxyapp.NewProxyServerDefault(proxyConfig) kcmdutil.CheckErr(err) if err := s.Run(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() flags.SetNormalizeFunc(kflag.WordSepNormalizeFunc) proxyConfig.AddFlags(flags) return cmd }
func main() { logs.InitLogs() defer logs.FlushLogs() defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"))() defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() startProfiler() rand.Seed(time.Now().UTC().UnixNano()) runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() // TODO convert to flags instead of a config file? configurationPath := "" if flag.NArg() > 0 { configurationPath = flag.Arg(0) } if configurationPath == "" { configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") } if configurationPath == "" { fmt.Println("configuration path unspecified") os.Exit(1) } // Prevent a warning about unrecognized environment variable os.Unsetenv("REGISTRY_CONFIGURATION_PATH") configFile, err := os.Open(configurationPath) if err != nil { log.Fatalf("Unable to open configuration file: %s", err) } dockerregistry.Execute(configFile) }
// TestE2E checks configuration parameters (specified through flags) and then runs // E2E tests using the Ginkgo runner. // If a "report directory" is specified, one or more JUnit test reports will be // generated in this directory, and cluster logs will also be saved. // This function is called on each Ginkgo node in parallel mode. func RunE2ETests(t *testing.T) { runtime.ReallyCrash = true logs.InitLogs() defer logs.FlushLogs() gomega.RegisterFailHandler(ginkgo.Fail) // Disable skipped tests unless they are explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]` } // Run tests through the Ginkgo runner with output to console + JUnit for Jenkins var r []ginkgo.Reporter if framework.TestContext.ReportDir != "" { // TODO: we should probably only be trying to create this directory once // rather than once-per-Ginkgo-node. if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil { glog.Errorf("Failed creating report directory: %v", err) } else { r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)))) } } glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) }
// NewKubeletCommand provides a CLI handler for the 'kubelet' command func NewKubeletCommand(name, fullName string, out io.Writer) *cobra.Command { kubeletOptions := kubeletoptions.NewKubeletServer() cmd := &cobra.Command{ Use: name, Short: "Launch the Kubelet (kubelet)", Long: kubeletLog, Run: func(c *cobra.Command, args []string) { startProfiler() logs.InitLogs() defer logs.FlushLogs() if err := kubeletapp.Run(kubeletOptions, nil); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() flags.SetNormalizeFunc(kflag.WordSepNormalizeFunc) kubeletOptions.AddFlags(flags) return cmd }
func Run() error { logs.InitLogs() defer logs.FlushLogs() cmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) return cmd.Execute() }
func main() { opt := options.NewHeapsterRunOptions() opt.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() setMaxProcs(opt) glog.Infof(strings.Join(os.Args, " ")) glog.Infof("Heapster version %v", version.HeapsterVersion) if err := validateFlags(opt); err != nil { glog.Fatal(err) } kubernetesUrl, err := getKubernetesAddress(opt.Sources) if err != nil { glog.Fatalf("Failed to get kubernetes address: %v", err) } sourceManager := createSourceManagerOrDie(opt.Sources) sinkManager, metricSink, historicalSource := createAndInitSinksOrDie(opt.Sinks, opt.HistoricalSource) podLister, nodeLister := getListersOrDie(kubernetesUrl) dataProcessors := createDataProcessorsOrDie(kubernetesUrl, podLister) man, err := manager.NewManager(sourceManager, dataProcessors, sinkManager, opt.MetricResolution, manager.DefaultScrapeOffset, manager.DefaultMaxParallelism) if err != nil { glog.Fatalf("Failed to create main manager: %v", err) } man.Start() if opt.EnableAPIServer { // Run API server in a separate goroutine createAndRunAPIServer(opt, metricSink, nodeLister, podLister) } mux := http.NewServeMux() promHandler := prometheus.Handler() handler := setupHandlers(metricSink, podLister, nodeLister, historicalSource) healthz.InstallHandler(mux, healthzChecker(metricSink)) addr := fmt.Sprintf("%s:%d", opt.Ip, opt.Port) glog.Infof("Starting heapster on port %d", opt.Port) if len(opt.TLSCertFile) > 0 && len(opt.TLSKeyFile) > 0 { startSecureServing(opt, handler, promHandler, mux, addr) } else { mux.Handle("/", handler) mux.Handle("/metrics", promHandler) glog.Fatal(http.ListenAndServe(addr, mux)) } }
func Run() error { logs.InitLogs() defer logs.FlushLogs() // We do not want these flags to show up in --help pflag.CommandLine.MarkHidden("google-json-key") pflag.CommandLine.MarkHidden("log-flush-frequency") cmd := cmd.NewKubeadmCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) return cmd.Execute() }
func main() { config := options.NewKubeDNSConfig() config.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() server := app.NewKubeDNSServerDefault(config) server.Run() }
func main() { s := options.NewSchedulerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() app.Run(s) }
func main() { logs.InitLogs() defer logs.FlushLogs() if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } cmd := server.NewCommandStartDiscoveryServer(os.Stdout, os.Stderr) if err := cmd.Execute(); err != nil { cmdutil.CheckErr(err) } }
func main() { s := options.NewSchedulerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { glog.Fatalf("scheduler app failed to run: %v", err) } }
func main() { options := server.NewOptions() configureFlags(options, pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() glog.Infof("dnsmasq-metrics v%s", version.VERSION) verflag.PrintAndExitIfRequested() server := server.NewServer() server.Run(options) }
func main() { s := options.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := service.NewSchedulerServer() s.AddStandaloneFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(hyperkube.Nil(), pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { s := options.NewCMServer() s.AddFlags(pflag.CommandLine, app.KnownControllers(), app.ControllersDisabledByDefault.List()) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := controllermanager.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { logs.InitLogs() defer logs.FlushLogs() defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"))() defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } basename := filepath.Base(os.Args[0]) command := cli.CommandFor(basename) if err := command.Execute(); err != nil { os.Exit(1) } }
func main() { rand.Seed(time.Now().UTC().UnixNano()) s := options.NewAPIServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { rand.Seed(time.Now().UTC().UnixNano()) s := genericoptions.NewServerRunOptions().WithEtcdOptions() s.AddUniversalFlags(pflag.CommandLine) s.AddEtcdStorageFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := options.NewCloudControllerManagerServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } if err := app.Run(s, cloud); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { config := options.NewProxyConfig() config.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() s, err := app.NewProxyServerDefault(config) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if err = s.Run(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { quitChannel := make(chan struct{}, 0) flag.Var(&argSources, "source", "source(s) to read events from") flag.Var(&argSinks, "sink", "external sink(s) that receive events") flag.BoolVar(&argVersion, "version", false, "print version info and exit") flag.Parse() if argVersion { fmt.Println(version.VersionInfo()) os.Exit(0) } logs.InitLogs() defer logs.FlushLogs() setMaxProcs() glog.Infof(strings.Join(os.Args, " ")) glog.Infof("Eventer version %v", version.HeapsterVersion) if err := validateFlags(); err != nil { glog.Fatal(err) } // sources if len(argSources) != 1 { glog.Fatal("Wrong number of sources specified") } sourceFactory := sources.NewSourceFactory() sources, err := sourceFactory.BuildAll(argSources) if err != nil { glog.Fatalf("Failed to create sources: %v", err) } if len(sources) != 1 { glog.Fatal("Requires exactly 1 source") } // sinks sinksFactory := sinks.NewSinkFactory() sinkList := sinksFactory.BuildAll(argSinks) if len([]flags.Uri(argSinks)) != 0 && len(sinkList) == 0 { glog.Fatal("No available sink to use") } for _, sink := range sinkList { glog.Infof("Starting with %s sink", sink.Name()) } sinkManager, err := sinks.NewEventSinkManager(sinkList, sinks.DefaultSinkExportEventsTimeout, sinks.DefaultSinkStopTimeout) if err != nil { glog.Fatalf("Failed to create sink manager: %v", err) } // main manager manager, err := manager.NewManager(sources[0], sinkManager, *argFrequency) if err != nil { glog.Fatalf("Failed to create main manager: %v", err) } manager.Start() glog.Infof("Starting eventer") <-quitChannel }
// Run the server. This will pick the appropriate server and run it. func (hk *HyperKube) Run(args []string) error { // If we are called directly, parse all flags up to the first real // argument. That should be the server to run. baseCommand := path.Base(args[0]) serverName := baseCommand if serverName == hk.Name { args = args[1:] baseFlags := hk.Flags() baseFlags.SetInterspersed(false) // Only parse flags up to the next real command err := baseFlags.Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Println("Error:", err) } hk.Usage() return err } verflag.PrintAndExitIfRequested() args = baseFlags.Args() if len(args) > 0 && len(args[0]) > 0 { serverName = args[0] baseCommand = baseCommand + " " + serverName args = args[1:] } else { err = errors.New("No server specified") hk.Printf("Error: %v\n\n", err) hk.Usage() return err } } s, err := hk.FindServer(serverName) if err != nil { hk.Printf("Error: %v\n\n", err) hk.Usage() return err } s.Flags().AddFlagSet(hk.Flags()) err = s.Flags().Parse(args) if err != nil || hk.helpFlagVal { if err != nil { hk.Printf("Error: %v\n\n", err) } s.Usage() return err } verflag.PrintAndExitIfRequested() logs.InitLogs() defer logs.FlushLogs() err = s.Run(s, s.Flags().Args()) if err != nil { hk.Println("Error:", err) } return err }