/* WARNING: this logic is duplicated, with minor changes, in cmd/hyperkube/kubectl.go Any salient changes here will need to be manually reflected in that file. */ func Run() error { logs.InitLogs() defer logs.FlushLogs() cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) return cmd.Execute() }
func NewKubectlServer() *Server { cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) localFlags := cmd.LocalFlags() localFlags.SetInterspersed(false) return &Server{ name: "kubectl", SimpleUsage: "Kubernetes command line client", Long: "Kubernetes command line client", Run: func(s *Server, args []string) error { cmd.SetArgs(args) return cmd.Execute() }, flags: localFlags, } }
func NewKubectlServer() *Server { // need to use term.StdStreams to get the right IO refs on Windows stdin, stdout, stderr := term.StdStreams() cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), stdin, stdout, stderr) localFlags := cmd.LocalFlags() localFlags.SetInterspersed(false) return &Server{ name: "kubectl", SimpleUsage: "Kubernetes command line client", Long: "Kubernetes command line client", Run: func(s *Server, args []string) error { cmd.SetArgs(args) return cmd.Execute() }, flags: localFlags, } }
/* WARNING: this logic is duplicated, with minor changes, in cmd/hyperkube/kubectl.go Any salient changes here will need to be manually reflected in that file. */ func Run() error { cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) return cmd.Execute() }
func main() { // embed kubectl if filepath.Base(os.Args[0]) == "kubectl" { cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) if err := cmd.Execute(); err != nil { os.Exit(1) } return } usr, err := user.Current() if err != nil { log.Fatal(err) } mfs := pflag.NewFlagSet("main", pflag.ExitOnError) nodes := mfs.StringSlice("nodes", []string{}, "list of nodes to make part of cluster") sshKeyfile := mfs.String("ssh-keyfile", usr.HomeDir+"/.vagrant.d/insecure_private_key", "private ssh key to use for tunnels") sshUser := mfs.String("ssh-user", "core", "ssh user to use for tunnels") clusterIPRange := mfs.String("service-cluster-ip-range", "10.1.30.0/24", "A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.") mfs.Parse(os.Args) config := &ssh.ClientConfig{ User: *sshUser, Auth: []ssh.AuthMethod{ PublicKeyFile(*sshKeyfile), }, } for _, remoteHost := range *nodes { // Dial your ssh server. go func(host string) { // Serve HTTP with your SSH server acting as a reverse proxy. go func() { b := &backoff.Backoff{ //These are the defaults Min: 100 * time.Millisecond, Max: 10 * time.Second, Factor: 2, Jitter: false, } for { conn, err := ssh.Dial("tcp", host, config) if err != nil { log.Println("unable to connect, retrying:", err) time.Sleep(b.Duration()) continue } defer conn.Close() // Request the remote side to open port 8080 on all interfaces. l, err := conn.Listen("tcp", remoteListen) if err != nil { log.Println("unable to register tcp forward, retrying:", err) time.Sleep(b.Duration()) continue } defer l.Close() fwd, _ := forward.New() http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { req.URL = testutils.ParseURI("http://localhost:8080") fwd.ServeHTTP(resp, req) })) log.Println("proxy connection broken, reconnecting....") time.Sleep(b.Duration()) } }() go func() { // this will block, and the kubelet will stop once the connection is broken // loop for reconnection b := &backoff.Backoff{ //These are the defaults Min: 100 * time.Millisecond, Max: 10 * time.Second, Factor: 2, Jitter: false, } for { ip, _, err := net.SplitHostPort(host) if err != nil { log.Fatalf("unable split host port: %v", err) return } cmd := fmt.Sprintf("sudo /usr/bin/kubelet --hostname-override=%s --api-servers=http://localhost:8080", ip) _, err = executeCmd(cmd, host, config) if err != nil { log.Println("unable to execute kubelet, retrying:", err) } // if we got here something went wrong dur := b.Duration() log.Println("kubelet connection broken, reconnecting in", dur) time.Sleep(dur) } }() <-make(chan interface{}) }(remoteHost) } go func() { // etcd reads os.Args so we have to use mess with them os.Args = []string{"etcd"} etcdmain.Main() }() go func() { s := kubeapiserver.NewAPIServer() fs := pflag.NewFlagSet("apiserver", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{ "--service-cluster-ip-range=" + *clusterIPRange, "--etcd-servers=http://127.0.0.1:2379", "--ssh-keyfile=" + *sshKeyfile, "--ssh-user="******"controller", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{}) s.Run([]string{}) }() go func() { s := scheduler.NewSchedulerServer() fs := pflag.NewFlagSet("scheduler", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{}) s.Run([]string{}) }() <-make(chan interface{}) }
/* WARNING: this logic is duplicated, with minor changes, in cmd/hyperkube/kubectl.go Any salient changes here will need to be manually reflected in that file. */ func Run() error { // need to use term.StdStreams to get the right IO refs on Windows stdin, stdout, stderr := term.StdStreams() cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), stdin, stdout, stderr) return cmd.Execute() }