func (m *etcdService) StartServer() { exepath, err := exePath() if err != nil { log.Fatal(err) } if err := os.Chdir(filepath.Dir(exepath)); err != nil { log.Fatal(err) } etcdmain.Main() }
func init() { os.Args = os.Args[0:1] // Make etcdmain.Main parse and get all-default settings. go etcdmain.Main() time.Sleep(3 * time.Second) // NOTE: Give etcd 3 seconds to start before connecting to it. var e error c, e = New("http://localhost:4001,http://localhost:2379") Must(e) }
func main() { if str := os.Getenv("ETCD_ALLOW_LEGACY_MODE"); str != "" { v, err := strconv.ParseBool(str) if err != nil { log.Fatalf("failed to parse ETCD_ALLOW_LEGACY_MODE=%s as bool", str) } if v { starter.StartDesiredVersion(os.Args[1:]) } } else if coreos.IsCoreOS() { starter.StartDesiredVersion(os.Args[1:]) } etcdmain.Main() }
// Starts an etcd server. func Start() error { // assuming that the tests won't try to start it concurrently, // fix this only when it turns out to be a wrong assumption if started { return nil } Urls = makeLocalUrls(randPort(), randPort()) clientUrlsString := strings.Join(Urls, ",") var args []string args, os.Args = os.Args, []string{ "etcd", formatFlag("-listen-client-urls", clientUrlsString), formatFlag("-advertise-client-urls", clientUrlsString), formatFlag("-listen-peer-urls", strings.Join(makeLocalUrls(randPort(), randPort()), ","))} go func() { // best mock is the real thing etcdmain.Main() }() // wait for started: wait := make(chan int) go func() { for { c := etcd.NewClient(Urls) _, err := c.Get("/", false, false) if err == nil { // revert the args for the rest of the tests: os.Args = args close(wait) return } time.Sleep(30 * time.Millisecond) } }() select { case <-wait: started = true return nil case <-time.After(6 * time.Second): return errors.New("etcd timeout") } }
func main() { isIntSess, err := svc.IsAnInteractiveSession() if err != nil { log.Fatalf("etcd: failed to determine if we are running in an interactive session: %v", err) } if !isIntSess { svcName := filepath.Base(os.Args[0]) if strings.HasSuffix(strings.ToLower(svcName), ".exe") { svcName = svcName[:len(svcName)-len(".exe")] } runAsService(svcName) return } etcdmain.Main() }
func main() { etcdmain.Main() }
func main() { // embed kubectl if filepath.Base(os.Args[0]) == "kubectl" { cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) if err := cmd.Execute(); err != nil { os.Exit(1) } return } usr, err := user.Current() if err != nil { log.Fatal(err) } mfs := pflag.NewFlagSet("main", pflag.ExitOnError) nodes := mfs.StringSlice("nodes", []string{}, "list of nodes to make part of cluster") sshKeyfile := mfs.String("ssh-keyfile", usr.HomeDir+"/.vagrant.d/insecure_private_key", "private ssh key to use for tunnels") sshUser := mfs.String("ssh-user", "core", "ssh user to use for tunnels") clusterIPRange := mfs.String("service-cluster-ip-range", "10.1.30.0/24", "A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.") mfs.Parse(os.Args) config := &ssh.ClientConfig{ User: *sshUser, Auth: []ssh.AuthMethod{ PublicKeyFile(*sshKeyfile), }, } for _, remoteHost := range *nodes { // Dial your ssh server. go func(host string) { // Serve HTTP with your SSH server acting as a reverse proxy. go func() { b := &backoff.Backoff{ //These are the defaults Min: 100 * time.Millisecond, Max: 10 * time.Second, Factor: 2, Jitter: false, } for { conn, err := ssh.Dial("tcp", host, config) if err != nil { log.Println("unable to connect, retrying:", err) time.Sleep(b.Duration()) continue } defer conn.Close() // Request the remote side to open port 8080 on all interfaces. l, err := conn.Listen("tcp", remoteListen) if err != nil { log.Println("unable to register tcp forward, retrying:", err) time.Sleep(b.Duration()) continue } defer l.Close() fwd, _ := forward.New() http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { req.URL = testutils.ParseURI("http://localhost:8080") fwd.ServeHTTP(resp, req) })) log.Println("proxy connection broken, reconnecting....") time.Sleep(b.Duration()) } }() go func() { // this will block, and the kubelet will stop once the connection is broken // loop for reconnection b := &backoff.Backoff{ //These are the defaults Min: 100 * time.Millisecond, Max: 10 * time.Second, Factor: 2, Jitter: false, } for { ip, _, err := net.SplitHostPort(host) if err != nil { log.Fatalf("unable split host port: %v", err) return } cmd := fmt.Sprintf("sudo /usr/bin/kubelet --hostname-override=%s --api-servers=http://localhost:8080", ip) _, err = executeCmd(cmd, host, config) if err != nil { log.Println("unable to execute kubelet, retrying:", err) } // if we got here something went wrong dur := b.Duration() log.Println("kubelet connection broken, reconnecting in", dur) time.Sleep(dur) } }() <-make(chan interface{}) }(remoteHost) } go func() { // etcd reads os.Args so we have to use mess with them os.Args = []string{"etcd"} etcdmain.Main() }() go func() { s := kubeapiserver.NewAPIServer() fs := pflag.NewFlagSet("apiserver", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{ "--service-cluster-ip-range=" + *clusterIPRange, "--etcd-servers=http://127.0.0.1:2379", "--ssh-keyfile=" + *sshKeyfile, "--ssh-user="******"controller", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{}) s.Run([]string{}) }() go func() { s := scheduler.NewSchedulerServer() fs := pflag.NewFlagSet("scheduler", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{}) s.Run([]string{}) }() <-make(chan interface{}) }