// mkCommandFunc executes the "mk" command. func mkCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] value, err := argOrStdin(c.Args(), os.Stdin, 1) if err != nil { handleError(ExitBadArgs, errors.New("value required")) } ttl := c.Int("ttl") ctx, cancel := contextWithTotalTimeout(c) // Since PrevNoExist means that the Node must not exist previously, // this Set method always creates a new key. Therefore, mk command // succeeds only if the key did not previously exist, and the command // prevents one from overwriting values accidentally. resp, err := ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevNoExist}) cancel() if err != nil { handleError(ExitServerError, err) } printResponseKey(resp, c.GlobalString("output")) }
func actionMemberList(c *cli.Context) { if len(c.Args()) != 0 { fmt.Fprintln(os.Stderr, "No arguments accepted") os.Exit(1) } mAPI := mustNewMembersAPI(c) ctx, cancel := contextWithTotalTimeout(c) defer cancel() members, err := mAPI.List(ctx) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } leader, err := mAPI.Leader(ctx) if err != nil { fmt.Fprintln(os.Stderr, "Failed to get leader: ", err) os.Exit(1) } for _, m := range members { isLeader := false if m.ID == leader.ID { isLeader = true } if len(m.Name) == 0 { fmt.Printf("%s[unstarted]: peerURLs=%s\n", m.ID, strings.Join(m.PeerURLs, ",")) } else { fmt.Printf("%s: name=%s peerURLs=%s clientURLs=%s isLeader=%v\n", m.ID, m.Name, strings.Join(m.PeerURLs, ","), strings.Join(m.ClientURLs, ","), isLeader) } } }
// deleteRangeCommandFunc executes the "delegeRange" command. func deleteRangeCommandFunc(c *cli.Context) { if len(c.Args()) == 0 { panic("bad arg") } var rangeEnd []byte key := []byte(c.Args()[0]) if len(c.Args()) > 1 { rangeEnd = []byte(c.Args()[1]) } conn, err := grpc.Dial(c.GlobalString("endpoint")) if err != nil { panic(err) } etcd := pb.NewEtcdClient(conn) req := &pb.DeleteRangeRequest{Key: key, RangeEnd: rangeEnd} etcd.DeleteRange(context.Background(), req) if rangeEnd != nil { fmt.Printf("range [%s, %s) is deleted\n", string(key), string(rangeEnd)) } else { fmt.Printf("key %s is deleted\n", string(key)) } }
func newClient(c *cli.Context) (client.Client, error) { eps, err := getEndpoints(c) if err != nil { return nil, err } tr, err := getTransport(c) if err != nil { return nil, err } cfg := client.Config{ Transport: tr, Endpoints: eps, HeaderTimeoutPerRequest: c.GlobalDuration("timeout"), } uFlag := c.GlobalString("username") if uFlag != "" { username, password, err := getUsernamePasswordFromFlag(uFlag) if err != nil { return nil, err } cfg.Username = username cfg.Password = password } return client.New(cfg) }
func mustNewMembersAPI(c *cli.Context) client.MembersAPI { eps, err := getEndpoints(c) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } tr, err := getTransport(c) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } hc, err := client.NewHTTPClient(tr, eps) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } if !c.GlobalBool("no-sync") { ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) err := hc.Sync(ctx) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } } if c.GlobalBool("debug") { fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", ")) } return client.NewMembersAPI(hc) }
// txnCommandFunc executes the "txn" command. func txnCommandFunc(c *cli.Context) { if len(c.Args()) != 0 { panic("unexpected args") } reader := bufio.NewReader(os.Stdin) next := compareState txn := &pb.TxnRequest{} for next != nil { next = next(txn, reader) } conn, err := grpc.Dial("127.0.0.1:12379") if err != nil { panic(err) } etcd := pb.NewEtcdClient(conn) resp, err := etcd.Txn(context.Background(), txn) if err != nil { fmt.Println(err) } if resp.Succeeded { fmt.Println("executed success request list") } else { fmt.Println("executed failure request list") } }
// removeDirCommandFunc executes the "rmdir" command. func removeDirCommandFunc(c *cli.Context, client *etcd.Client) (*etcd.Response, error) { if len(c.Args()) == 0 { return nil, errors.New("Key required") } key := c.Args()[0] return client.DeleteDir(key) }
func mustNewAuthRoleAPI(c *cli.Context) client.AuthRoleAPI { hc := mustNewClient(c) if c.GlobalBool("debug") { fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", ")) } return client.NewAuthRoleAPI(hc) }
func mustNewClient(c *cli.Context) client.Client { hc, err := newClient(c) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } debug := c.GlobalBool("debug") if debug { client.EnablecURLDebug() } if !c.GlobalBool("no-sync") { if debug { fmt.Fprintf(os.Stderr, "start to sync cluster using endpoints(%s)\n", strings.Join(hc.Endpoints(), ",")) } ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) err := hc.Sync(ctx) cancel() if err != nil { if err == client.ErrNoEndpoints { fmt.Fprintf(os.Stderr, "etcd cluster has no published client endpoints.\n") fmt.Fprintf(os.Stderr, "Try '--no-sync' if you want to access non-published client endpoints(%s).\n", strings.Join(hc.Endpoints(), ",")) handleError(ExitServerError, err) } if isConnectionError(err) { handleError(ExitBadConnection, err) } // fail-back to try sync cluster with peer API. this is for making etcdctl work with etcd 0.4.x. // TODO: remove this when we deprecate the support for etcd 0.4. eps, serr := syncWithPeerAPI(c, ctx, hc.Endpoints()) if serr != nil { if isConnectionError(serr) { handleError(ExitBadConnection, serr) } else { handleError(ExitServerError, serr) } } err = hc.SetEndpoints(eps) if err != nil { handleError(ExitServerError, err) } } if debug { fmt.Fprintf(os.Stderr, "got endpoints(%s) after sync\n", strings.Join(hc.Endpoints(), ",")) } } if debug { fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", ")) } return hc }
// lsCommandFunc executes the "ls" command. func lsCommandFunc(c *cli.Context, client *etcd.Client) (*etcd.Response, error) { key := "/" if len(c.Args()) != 0 { key = c.Args()[0] } recursive := c.Bool("recursive") // Retrieve the value from the server. return client.Get(key, false, recursive) }
// rPrint recursively prints out the nodes in the node structure. func rPrint(c *cli.Context, n *client.Node) { if n.Dir && c.Bool("p") { fmt.Println(fmt.Sprintf("%v/", n.Key)) } else { fmt.Println(n.Key) } for _, node := range n.Nodes { rPrint(c, node) } }
func mustRoleAPIAndName(c *cli.Context) (client.AuthRoleAPI, string) { args := c.Args() if len(args) != 1 { fmt.Fprintln(os.Stderr, "Please provide a role name") os.Exit(1) } name := args[0] api := mustNewAuthRoleAPI(c) return api, name }
func mustUserAPIAndName(c *cli.Context) (client.AuthUserAPI, string) { args := c.Args() if len(args) != 1 { fmt.Fprintln(os.Stderr, "Please provide a username") os.Exit(1) } api := mustNewAuthUserAPI(c) username := args[0] return api, username }
// Just like handlePrint but also passed the context of the command func handleContextualPrint(c *cli.Context, fn handlerFunc, pFn contextualPrintFunc) { resp, err := rawhandle(c, fn) if err != nil { handleError(ErrorFromEtcd, err) } if resp != nil && pFn != nil { pFn(c, resp, c.GlobalString("output")) } }
// handlePrint wraps the command function handlers to parse global flags // into a client and to properly format the response objects. func handlePrint(c *cli.Context, fn handlerFunc, pFn printFunc) { resp, err := rawhandle(c, fn) // Print error and exit, if necessary. if err != nil { handleError(ErrorFromEtcd, err) } if resp != nil && pFn != nil { pFn(resp, c.GlobalString("output")) } }
func mustNewClientNoSync(c *cli.Context) client.Client { hc, err := newClient(c) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } if c.GlobalBool("debug") { fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", ")) client.EnablecURLDebug() } return hc }
// watchCommandFunc executes the "watch" command. func watchCommandFunc(c *cli.Context) { conn, err := grpc.Dial(c.GlobalString("endpoint")) if err != nil { panic(err) } wAPI := pb.NewWatchClient(conn) wStream, err := wAPI.Watch(context.TODO()) if err != nil { panic(err) } go recvLoop(wStream) reader := bufio.NewReader(os.Stdin) for { l, err := reader.ReadString('\n') if err != nil { fmt.Fprintf(os.Stderr, "Error reading watch request line: %v", err) os.Exit(1) } l = strings.TrimSuffix(l, "\n") // TODO: support start and end revision segs := strings.Split(l, " ") if len(segs) != 2 { fmt.Fprintf(os.Stderr, "Invalid watch request format: use watch key or watchprefix prefix\n") continue } var r *pb.WatchRequest switch segs[0] { case "watch": r = &pb.WatchRequest{Key: []byte(segs[1])} case "watchprefix": r = &pb.WatchRequest{Prefix: []byte(segs[1])} default: fmt.Fprintf(os.Stderr, "Invalid watch request format: use watch key or watchprefix prefix\n") continue } err = wStream.Send(r) if err != nil { fmt.Fprintf(os.Stderr, "Error sending request to server: %v\n", err) } } }
func getPeersFlagValue(c *cli.Context) []string { peerstr := c.GlobalString("peers") // Use an environment variable if nothing was supplied on the // command line if peerstr == "" { peerstr = os.Getenv("ETCDCTL_PEERS") } // If we still don't have peers, use a default if peerstr == "" { peerstr = "127.0.0.1:4001,127.0.0.1:2379" } return strings.Split(peerstr, ",") }
func actionMemberAdd(c *cli.Context) { args := c.Args() if len(args) != 2 { fmt.Fprintln(os.Stderr, "Provide a name and a single member peerURL") os.Exit(1) } mAPI := mustNewMembersAPI(c) url := args[1] ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) m, err := mAPI.Add(ctx, url) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } newID := m.ID newName := args[0] fmt.Printf("Added member named %s with ID %s to cluster\n", newName, newID) ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout) members, err := mAPI.List(ctx) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } conf := []string{} for _, memb := range members { for _, u := range memb.PeerURLs { n := memb.Name if memb.ID == newID { n = newName } conf = append(conf, fmt.Sprintf("%s=%s", n, u)) } } fmt.Print("\n") fmt.Printf("ETCD_NAME=%q\n", newName) fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ",")) fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n") }
func actionMemberRemove(c *cli.Context) { args := c.Args() if len(args) != 1 { fmt.Fprintln(os.Stderr, "Provide a single member ID") os.Exit(1) } removalID := args[0] mAPI := mustNewMembersAPI(c) // Get the list of members. listctx, listCancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) members, err := mAPI.List(listctx) listCancel() if err != nil { fmt.Fprintln(os.Stderr, "Error while verifying ID against known members:", err.Error()) os.Exit(1) } // Sanity check the input. foundID := false for _, m := range members { if m.ID == removalID { foundID = true } if m.Name == removalID { // Note that, so long as it's not ambiguous, we *could* do the right thing by name here. fmt.Fprintf(os.Stderr, "Found a member named %s; if this is correct, please use its ID, eg:\n\tetcdctl member remove %s\n", m.Name, m.ID) fmt.Fprintf(os.Stderr, "For more details, read the documentation at https://github.com/coreos/etcd/blob/master/Documentation/runtime-configuration.md#remove-a-member\n\n") } } if !foundID { fmt.Fprintf(os.Stderr, "Couldn't find a member in the cluster with an ID of %s.\n", removalID) os.Exit(1) } // Actually attempt to remove the member. ctx, removeCancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) err = mAPI.Remove(ctx, removalID) removeCancel() if err != nil { fmt.Fprintf(os.Stderr, "Recieved an error trying to remove member %s: %s", removalID, err.Error()) os.Exit(1) } fmt.Printf("Removed member %s from cluster\n", removalID) }
// handleBackup handles a request that intends to do a backup. func handleBackup(c *cli.Context) { srcSnap := path.Join(c.String("data-dir"), "member", "snap") destSnap := path.Join(c.String("backup-dir"), "member", "snap") srcWAL := path.Join(c.String("data-dir"), "member", "wal") destWAL := path.Join(c.String("backup-dir"), "member", "wal") if err := os.MkdirAll(destSnap, 0700); err != nil { log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err) } ss := snap.New(srcSnap) snapshot, err := ss.Load() if err != nil && err != snap.ErrNoSnapshot { log.Fatal(err) } var walsnap walpb.Snapshot if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term newss := snap.New(destSnap) if err = newss.SaveSnap(*snapshot); err != nil { log.Fatal(err) } } w, err := wal.OpenForRead(srcWAL, walsnap) if err != nil { log.Fatal(err) } defer w.Close() wmetadata, state, ents, err := w.ReadAll() switch err { case nil: case wal.ErrSnapshotNotFound: fmt.Printf("Failed to find the match snapshot record %+v in wal %v.", walsnap, srcWAL) fmt.Printf("etcdctl will add it back. Start auto fixing...") default: log.Fatal(err) } var metadata etcdserverpb.Metadata pbutil.MustUnmarshal(&metadata, wmetadata) idgen := idutil.NewGenerator(0, time.Now()) metadata.NodeID = idgen.Next() metadata.ClusterID = idgen.Next() neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata)) if err != nil { log.Fatal(err) } defer neww.Close() if err := neww.Save(state, ents); err != nil { log.Fatal(err) } if err := neww.SaveSnapshot(walsnap); err != nil { log.Fatal(err) } }
func actionMemberList(c *cli.Context) { if len(c.Args()) != 0 { fmt.Fprintln(os.Stderr, "No arguments accepted") os.Exit(1) } mAPI := mustNewMembersAPI(c) ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) members, err := mAPI.List(ctx) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } for _, m := range members { fmt.Printf("%s: name=%s peerURLs=%s clientURLs=%s\n", m.ID, m.Name, strings.Join(m.PeerURLs, ","), strings.Join(m.ClientURLs, ",")) } }
func actionUserList(c *cli.Context) { if len(c.Args()) != 0 { fmt.Fprintln(os.Stderr, "No arguments accepted") os.Exit(1) } u := mustNewAuthUserAPI(c) ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) users, err := u.ListUsers(ctx) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } for _, user := range users { fmt.Printf("%s\n", user) } }
func actionRoleList(c *cli.Context) { if len(c.Args()) != 0 { fmt.Fprintln(os.Stderr, "No arguments accepted") os.Exit(1) } r := mustNewAuthRoleAPI(c) ctx, cancel := contextWithTotalTimeout(c) roles, err := r.ListRoles(ctx) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } for _, role := range roles { fmt.Printf("%s\n", role) } }
// updateCommandFunc executes the "update" command. func updateCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] value, err := argOrStdin(c.Args(), os.Stdin, 1) if err != nil { handleError(ExitBadArgs, errors.New("value required")) } ttl := c.Int("ttl") resp, err := ki.Set(context.TODO(), key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevExist}) if err != nil { handleError(ExitServerError, err) } printResponseKey(resp, c.GlobalString("output")) }
func actionMemberRemove(c *cli.Context) { args := c.Args() if len(args) != 1 { fmt.Fprintln(os.Stderr, "Provide a single member ID") os.Exit(1) } mAPI := mustNewMembersAPI(c) mID := args[0] ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) err := mAPI.Remove(ctx, mID) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } fmt.Printf("Removed member %s from cluster\n", mID) }
func mustNewClient(c *cli.Context) client.Client { eps, err := getEndpoints(c) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } tr, err := getTransport(c) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } cfg := client.Config{ Transport: tr, Endpoints: eps, HeaderTimeoutPerRequest: c.GlobalDuration("timeout"), } uFlag := c.GlobalString("username") if uFlag != "" { username, password, err := getUsernamePasswordFromFlag(uFlag) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } cfg.Username = username cfg.Password = password } hc, err := client.New(cfg) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } if !c.GlobalBool("no-sync") { ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) err := hc.Sync(ctx) cancel() if err != nil { handleError(ExitServerError, err) os.Exit(1) } } if c.GlobalBool("debug") { fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", ")) client.EnablecURLDebug() } return hc }
// rawhandle wraps the command function handlers and sets up the // environment but performs no output formatting. func rawhandle(c *cli.Context, fn handlerFunc) (*etcd.Response, error) { sync := !c.GlobalBool("no-sync") peerstr := c.GlobalString("peers") // Use an environment variable if nothing was supplied on the // command line if peerstr == "" { peerstr = os.Getenv("ETCDCTL_PEERS") } // If we still don't have peers, use a default if peerstr == "" { peerstr = "127.0.0.1:4001" } peers := strings.Split(peerstr, ",") // If no sync, create http path for each peer address if !sync { revisedPeers := make([]string, 0) for _, peer := range peers { if revisedPeer, err := createHttpPath(peer); err != nil { fmt.Fprintf(os.Stderr, "Unsupported url %v: %v\n", peer, err) } else { revisedPeers = append(revisedPeers, revisedPeer) } } peers = revisedPeers } client := etcd.NewClient(peers) if c.GlobalBool("debug") { go dumpCURL(client) } // Sync cluster. if sync { if ok := client.SyncCluster(); !ok { handleError(FailedToConnectToHost, errors.New("Cannot sync with the cluster using peers "+strings.Join(peers, ", "))) } } if c.GlobalBool("debug") { fmt.Fprintf(os.Stderr, "Cluster-Peers: %s\n", strings.Join(client.GetCluster(), " ")) } // Execute handler function. return fn(c, client) }
func actionMemberUpdate(c *cli.Context) { args := c.Args() if len(args) != 2 { fmt.Fprintln(os.Stderr, "Provide an ID and a list of comma separated peerURL (0xabcd http://example.com,http://example1.com)") os.Exit(1) } mAPI := mustNewMembersAPI(c) mid := args[0] urls := args[1] ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) err := mAPI.Update(ctx, mid, strings.Split(urls, ",")) cancel() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } fmt.Printf("Updated member with ID %s in cluster\n", mid) }
// makeCommandFunc executes the "make" command. func makeCommandFunc(c *cli.Context, client *etcd.Client) (*etcd.Response, error) { if len(c.Args()) == 0 { return nil, errors.New("Key required") } key := c.Args()[0] value, err := argOrStdin(c.Args(), os.Stdin, 1) if err != nil { return nil, errors.New("Value required") } ttl := c.Int("ttl") return client.Create(key, value, uint64(ttl)) }