Пример #1
0
// NewSchedulerCommand provides a CLI handler for the 'scheduler' command
func NewSchedulerCommand(name, fullName string, out io.Writer) *cobra.Command {
	s := app.NewSchedulerServer()

	cmd := &cobra.Command{
		Use:   name,
		Short: "Launch Kubernetes scheduler (kube-scheduler)",
		Long:  controllersLong,
		Run: func(c *cobra.Command, args []string) {
			startProfiler()

			util.InitLogs()
			defer util.FlushLogs()

			if err := s.Run(pflag.CommandLine.Args()); err != nil {
				fmt.Fprintf(os.Stderr, "%v\n", err)
				os.Exit(1)
			}
		},
	}
	cmd.SetOutput(out)

	flags := cmd.Flags()
	flags.SetNormalizeFunc(util.WordSepNormalizeFunc)
	flags.AddGoFlagSet(flag.CommandLine)
	s.AddFlags(flags)

	return cmd
}
Пример #2
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	s := app.NewSchedulerServer()
	s.AddFlags(pflag.CommandLine)

	util.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	//s.Run(pflag.CommandLine.Args())
}
Пример #3
0
// NewScheduler creates a new hyperkube Server object that includes the
// description and flags.
func NewScheduler() *Server {
	s := scheduler.NewSchedulerServer()

	hks := Server{
		SimpleUsage: "scheduler",
		Long:        "Implements a Kubernetes scheduler.  This will assign pods to kubelets based on capacity and constraints.",
		Run: func(_ *Server, args []string) error {
			return s.Run(args)
		},
	}
	s.AddFlags(hks.Flags())
	return &hks
}
Пример #4
0
func main() {
	// embed kubectl
	if filepath.Base(os.Args[0]) == "kubectl" {
		cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
		if err := cmd.Execute(); err != nil {
			os.Exit(1)
		}
		return
	}

	usr, err := user.Current()
	if err != nil {
		log.Fatal(err)
	}

	mfs := pflag.NewFlagSet("main", pflag.ExitOnError)
	nodes := mfs.StringSlice("nodes", []string{}, "list of nodes to make part of cluster")
	sshKeyfile := mfs.String("ssh-keyfile", usr.HomeDir+"/.vagrant.d/insecure_private_key", "private ssh key to use for tunnels")
	sshUser := mfs.String("ssh-user", "core", "ssh user to use for tunnels")
	clusterIPRange := mfs.String("service-cluster-ip-range", "10.1.30.0/24", "A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.")
	mfs.Parse(os.Args)

	config := &ssh.ClientConfig{
		User: *sshUser,
		Auth: []ssh.AuthMethod{
			PublicKeyFile(*sshKeyfile),
		},
	}
	for _, remoteHost := range *nodes {
		// Dial your ssh server.
		go func(host string) {
			// Serve HTTP with your SSH server acting as a reverse proxy.
			go func() {
				b := &backoff.Backoff{
					//These are the defaults
					Min:    100 * time.Millisecond,
					Max:    10 * time.Second,
					Factor: 2,
					Jitter: false,
				}
				for {
					conn, err := ssh.Dial("tcp", host, config)
					if err != nil {
						log.Println("unable to connect, retrying:", err)
						time.Sleep(b.Duration())
						continue
					}
					defer conn.Close()

					// Request the remote side to open port 8080 on all interfaces.
					l, err := conn.Listen("tcp", remoteListen)
					if err != nil {
						log.Println("unable to register tcp forward, retrying:", err)
						time.Sleep(b.Duration())
						continue
					}
					defer l.Close()
					fwd, _ := forward.New()
					http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
						req.URL = testutils.ParseURI("http://localhost:8080")
						fwd.ServeHTTP(resp, req)
					}))
					log.Println("proxy connection broken, reconnecting....")
					time.Sleep(b.Duration())
				}
			}()
			go func() {
				// this will block, and the kubelet will stop once the connection is broken
				// loop for reconnection
				b := &backoff.Backoff{
					//These are the defaults
					Min:    100 * time.Millisecond,
					Max:    10 * time.Second,
					Factor: 2,
					Jitter: false,
				}

				for {
					ip, _, err := net.SplitHostPort(host)
					if err != nil {
						log.Fatalf("unable split host port: %v", err)
						return
					}
					cmd := fmt.Sprintf("sudo /usr/bin/kubelet --hostname-override=%s --api-servers=http://localhost:8080", ip)
					_, err = executeCmd(cmd, host, config)
					if err != nil {
						log.Println("unable to execute kubelet, retrying:", err)
					}
					// if we got here something went wrong
					dur := b.Duration()
					log.Println("kubelet connection broken, reconnecting in", dur)
					time.Sleep(dur)

				}
			}()
			<-make(chan interface{})
		}(remoteHost)
	}

	go func() {
		// etcd reads os.Args so we have to use mess with them
		os.Args = []string{"etcd"}
		etcdmain.Main()
	}()

	go func() {
		s := kubeapiserver.NewAPIServer()
		fs := pflag.NewFlagSet("apiserver", pflag.ContinueOnError)
		s.AddFlags(fs)
		fs.Parse([]string{
			"--service-cluster-ip-range=" + *clusterIPRange,
			"--etcd-servers=http://127.0.0.1:2379",
			"--ssh-keyfile=" + *sshKeyfile,
			"--ssh-user="******"controller", pflag.ContinueOnError)
		s.AddFlags(fs)
		fs.Parse([]string{})
		s.Run([]string{})
	}()

	go func() {
		s := scheduler.NewSchedulerServer()
		fs := pflag.NewFlagSet("scheduler", pflag.ContinueOnError)
		s.AddFlags(fs)
		fs.Parse([]string{})
		s.Run([]string{})
	}()
	<-make(chan interface{})
}