Esempio n. 1
0
// rawhandle wraps the command function handlers and sets up the
// environment but performs no output formatting.
func rawhandle(c *cli.Context, fn handlerFunc) (*etcd.Response, error) {
	endpoints, err := getEndpoints(c)
	if err != nil {
		return nil, err
	}

	tr, err := getTransport(c)
	if err != nil {
		return nil, err
	}

	client := etcd.NewClient(endpoints)
	client.SetTransport(tr)

	if c.GlobalBool("debug") {
		go dumpCURL(client)
	}

	// Sync cluster.
	if !c.GlobalBool("no-sync") {
		if ok := client.SyncCluster(); !ok {
			handleError(FailedToConnectToHost, errors.New("cannot sync with the cluster using endpoints "+strings.Join(endpoints, ", ")))
		}
	}

	if c.GlobalBool("debug") {
		fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(client.GetCluster(), ", "))
	}

	// Execute handler function.
	return fn(c, client)
}
Esempio n. 2
0
// rawhandle wraps the command function handlers and sets up the
// environment but performs no output formatting.
func rawhandle(c *cli.Context, fn handlerFunc) (*etcd.Response, error) {
	sync := !c.GlobalBool("no-sync")

	peerstr := c.GlobalString("peers")

	// Use an environment variable if nothing was supplied on the
	// command line
	if peerstr == "" {
		peerstr = os.Getenv("ETCDCTL_PEERS")
	}

	// If we still don't have peers, use a default
	if peerstr == "" {
		peerstr = "127.0.0.1:4001"
	}

	peers := strings.Split(peerstr, ",")

	// If no sync, create http path for each peer address
	if !sync {
		revisedPeers := make([]string, 0)
		for _, peer := range peers {
			if revisedPeer, err := createHttpPath(peer); err != nil {
				fmt.Fprintf(os.Stderr, "Unsupported url %v: %v\n", peer, err)
			} else {
				revisedPeers = append(revisedPeers, revisedPeer)
			}
		}
		peers = revisedPeers
	}

	client := etcd.NewClient(peers)

	if c.GlobalBool("debug") {
		go dumpCURL(client)
	}

	// Sync cluster.
	if sync {
		if ok := client.SyncCluster(); !ok {
			handleError(FailedToConnectToHost, errors.New("Cannot sync with the cluster using peers "+strings.Join(peers, ", ")))
		}
	}

	if c.GlobalBool("debug") {
		fmt.Fprintf(os.Stderr, "Cluster-Peers: %s\n",
			strings.Join(client.GetCluster(), " "))
	}

	// Execute handler function.
	return fn(c, client)
}
Esempio n. 3
0
func handleImportSnap(c *cli.Context) {
	d, err := ioutil.ReadFile(c.String("snap"))
	if err != nil {
		if c.String("snap") == "" {
			fmt.Printf("no snapshot file provided (use --snap)\n")
		} else {
			fmt.Printf("cannot read snapshot file %s\n", c.String("snap"))
		}
		os.Exit(1)
	}

	st := store.New()
	err = st.Recovery(d)
	if err != nil {
		fmt.Printf("cannot recover the snapshot file: %v\n", err)
		os.Exit(1)
	}

	endpoints, err := getEndpoints(c)
	if err != nil {
		handleError(ExitServerError, err)
	}
	tr, err := getTransport(c)
	if err != nil {
		handleError(ExitServerError, err)
	}

	wg := &sync.WaitGroup{}
	setc := make(chan set)
	concurrent := c.Int("c")
	fmt.Printf("starting to import snapshot %s with %d clients\n", c.String("snap"), concurrent)
	for i := 0; i < concurrent; i++ {
		client := etcd.NewClient(endpoints)
		client.SetTransport(tr)

		if c.GlobalBool("debug") {
			go dumpCURL(client)
		}

		if ok := client.SyncCluster(); !ok {
			handleError(ExitBadConnection, errors.New("cannot sync with the cluster using endpoints "+strings.Join(endpoints, ", ")))
		}
		wg.Add(1)
		go runSet(client, setc, wg)
	}

	all, err := st.Get("/", true, true)
	if err != nil {
		handleError(ExitServerError, err)
	}
	n := copyKeys(all.Node, setc)

	hiddens := c.StringSlice("hidden")
	for _, h := range hiddens {
		allh, err := st.Get(h, true, true)
		if err != nil {
			handleError(ExitServerError, err)
		}
		n += copyKeys(allh.Node, setc)
	}
	close(setc)
	wg.Wait()
	fmt.Printf("finished importing %d keys\n", n)
}
Esempio n. 4
0
func handleClusterHealth(c *cli.Context) {
	endpoints, err := getEndpoints(c)
	if err != nil {
		handleError(ExitServerError, err)
	}
	tr, err := getTransport(c)
	if err != nil {
		handleError(ExitServerError, err)
	}

	client := etcd.NewClient(endpoints)
	client.SetTransport(tr)

	if c.GlobalBool("debug") {
		go dumpCURL(client)
	}

	if ok := client.SyncCluster(); !ok {
		handleError(ExitBadConnection, errors.New("cannot sync with the cluster using endpoints "+strings.Join(endpoints, ", ")))
	}

	// do we have a leader?
	cl := client.GetCluster()
	ep, ls0, err := getLeaderStats(tr, cl)
	if err != nil {
		fmt.Println("cluster may be unhealthy: failed to connect", cl)
		os.Exit(1)
	}

	// is raft stable and making progress?
	client = etcd.NewClient([]string{ep})
	client.SetTransport(tr)
	resp, err := client.Get("/", false, false)
	if err != nil {
		fmt.Println("cluster is unhealthy")
		os.Exit(1)
	}
	rt0, ri0 := resp.RaftTerm, resp.RaftIndex
	time.Sleep(time.Second)

	resp, err = client.Get("/", false, false)
	if err != nil {
		fmt.Println("cluster is unhealthy")
		os.Exit(1)
	}
	rt1, ri1 := resp.RaftTerm, resp.RaftIndex

	if rt0 != rt1 {
		fmt.Println("cluster is unhealthy")
		os.Exit(1)
	}

	if ri1 == ri0 {
		fmt.Println("cluster is unhealthy")
		os.Exit(1)
	}

	// are all the members makeing progress?
	_, ls1, err := getLeaderStats(tr, []string{ep})
	if err != nil {
		fmt.Println("cluster is unhealthy")
		os.Exit(1)
	}

	fmt.Println("cluster is healthy")
	// self is healthy
	var prints []string

	prints = append(prints, fmt.Sprintf("member %s is healthy\n", ls1.Leader))
	for name, fs0 := range ls0.Followers {
		fs1, ok := ls1.Followers[name]
		if !ok {
			fmt.Println("Cluster configuration changed during health checking. Please retry.")
			os.Exit(1)
		}
		if fs1.Counts.Success <= fs0.Counts.Success {
			prints = append(prints, fmt.Sprintf("member %s is unhealthy\n", name))
		} else {
			prints = append(prints, fmt.Sprintf("member %s is healthy\n", name))
		}
	}

	sort.Strings(prints)
	for _, p := range prints {
		fmt.Print(p)
	}
	os.Exit(0)
}