Exemplo n.º 1
0
func actionUserPasswd(c *cli.Context) {
	api, user := mustUserAPIAndName(c)
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	currentUser, err := api.GetUser(ctx, user)
	cancel()
	if currentUser == nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}
	pass, err := speakeasy.Ask("New password: "******"Error reading password:"******"Password updated\n")
}
Exemplo n.º 2
0
// Leases returns map binary.BigEndian.Uint32(IP) and Lease of all assigned leases
func (p *LeasePool) Leases() (map[string]Lease, error) {
	p.dataLock.Lock()
	defer p.dataLock.Unlock()
	leases := make(map[string]Lease, 10)

	ctxGet, cancelGet := context.WithTimeout(context.Background(), 2*time.Second)
	defer cancelGet()
	response, err := p.dataSource.Get(ctxGet, path.Join(p.etcdDir, "/leases"), &etcd.GetOptions{Recursive: true})

	if err != nil {
		etcdError, found := err.(etcd.Error)
		if found && etcdError.Code == etcd.ErrorCodeKeyNotFound {
			// handle key not found
			ctxSet, cancelSet := context.WithTimeout(context.Background(), 2*time.Second)
			defer cancelSet()
			_, err := p.dataSource.Set(ctxSet, path.Join(p.etcdDir, "/leases"), "", &etcd.SetOptions{Dir: true})
			if err != nil {
				return nil, err
			}
			return leases, nil
		}
		return nil, err
	}
	for i := range response.Node.Nodes {
		var lease Lease
		err := json.Unmarshal([]byte(response.Node.Nodes[i].Value), &lease)
		if err == nil {
			leases[lease.IP.String()] = lease
		} else {
			return nil, ErrFoundInvalidLease
		}
	}
	return leases, nil
}
Exemplo n.º 3
0
func ExampleKV_compact() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	compRev := resp.Header.Revision // specify compact revision of your choice

	ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
	err = kvc.Compact(ctx, compRev)
	cancel()
	if err != nil {
		log.Fatal(err)
	}
}
Exemplo n.º 4
0
func ExampleKV_getSortedPrefix() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	for i := range make([]int, 3) {
		ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
		_, err = kvc.Put(ctx, fmt.Sprintf("key_%d", i), "value")
		cancel()
		if err != nil {
			log.Fatal(err)
		}
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// key_2 : value
	// key_1 : value
	// key_0 : value
}
Exemplo n.º 5
0
func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
	configKey := path.Join("/", d.cluster, "_config")
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	// find cluster size
	resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
	cancel()
	if err != nil {
		if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
			return nil, 0, 0, ErrSizeNotFound
		}
		if err == client.ErrInvalidJSON {
			return nil, 0, 0, ErrBadDiscoveryEndpoint
		}
		if ce, ok := err.(*client.ClusterError); ok {
			plog.Error(ce.Detail())
			return d.checkClusterRetry()
		}
		return nil, 0, 0, err
	}
	size, err := strconv.Atoi(resp.Node.Value)
	if err != nil {
		return nil, 0, 0, ErrBadSizeKey
	}

	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err = d.c.Get(ctx, d.cluster, nil)
	cancel()
	if err != nil {
		if ce, ok := err.(*client.ClusterError); ok {
			plog.Error(ce.Detail())
			return d.checkClusterRetry()
		}
		return nil, 0, 0, err
	}
	nodes := make([]*client.Node, 0)
	// append non-config keys to nodes
	for _, n := range resp.Node.Nodes {
		if !(path.Base(n.Key) == path.Base(configKey)) {
			nodes = append(nodes, n)
		}
	}

	snodes := sortableNodes{nodes}
	sort.Sort(snodes)

	// find self position
	for i := range nodes {
		if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
			break
		}
		if i >= size-1 {
			return nodes[:size], size, resp.Index, ErrFullCluster
		}
	}
	return nodes, size, resp.Index, nil
}
Exemplo n.º 6
0
func (d *discovery) checkCluster() (client.Nodes, int, error) {
	configKey := path.Join("/", d.cluster, "_config")
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	// find cluster size
	resp, err := d.c.Get(ctx, path.Join(configKey, "size"))
	cancel()
	if err != nil {
		if err == client.ErrKeyNoExist {
			return nil, 0, ErrSizeNotFound
		}
		if err == client.ErrTimeout {
			return d.checkClusterRetry()
		}
		return nil, 0, err
	}
	size, err := strconv.Atoi(resp.Node.Value)
	if err != nil {
		return nil, 0, ErrBadSizeKey
	}

	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err = d.c.Get(ctx, d.cluster)
	cancel()
	if err != nil {
		if err == client.ErrTimeout {
			return d.checkClusterRetry()
		}
		return nil, 0, err
	}
	nodes := make(client.Nodes, 0)
	// append non-config keys to nodes
	for _, n := range resp.Node.Nodes {
		if !(path.Base(n.Key) == path.Base(configKey)) {
			nodes = append(nodes, n)
		}
	}

	snodes := sortableNodes{nodes}
	sort.Sort(snodes)

	// find self position
	for i := range nodes {
		if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
			break
		}
		if i >= size-1 {
			return nodes[:size], size, ErrFullCluster
		}
	}
	return nodes, size, nil
}
Exemplo n.º 7
0
// checkConsistency stops the cluster for a moment and get the hashes of KV storages.
func (c *cluster) checkConsistency() error {
	hashes := make(map[string]uint32)
	for _, u := range c.GRPCURLs {
		conn, err := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			return err
		}
		kvc := pb.NewKVClient(conn)

		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
		resp, err := kvc.Hash(ctx, &pb.HashRequest{})
		hv := resp.Hash
		if resp != nil && err != nil {
			return err
		}
		cancel()

		hashes[u] = hv
	}

	if !checkConsistency(hashes) {
		return fmt.Errorf("check consistency fails: %v", hashes)
	}
	return nil
}
Exemplo n.º 8
0
func TestEmptyAddrs(t *testing.T) {
	servers, r := startServers(t, 1, 0, math.MaxUint32)
	cc, err := Dial("foo.bar.com", WithPicker(NewUnicastNamingPicker(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
	if err != nil {
		t.Fatalf("Failed to create ClientConn: %v", err)
	}
	var reply string
	if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse {
		t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want <nil>", err)
	}
	// Inject name resolution change to remove the server address so that there is no address
	// available after that.
	var updates []*naming.Update
	updates = append(updates, &naming.Update{
		Op:   naming.Delete,
		Addr: "127.0.0.1:" + servers[0].port,
	})
	r.w.inject(updates)
	// Loop until the above updates apply.
	for {
		time.Sleep(10 * time.Millisecond)
		ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond)
		if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil {
			break
		}
	}
	cc.Close()
	servers[0].stop()
}
Exemplo n.º 9
0
func mustNewMembersAPI(c *cli.Context) client.MembersAPI {
	eps, err := getEndpoints(c)
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	tr, err := getTransport(c)
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	hc, err := client.NewHTTPClient(tr, eps)
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	if !c.GlobalBool("no-sync") {
		ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
		err := hc.Sync(ctx)
		cancel()
		if err != nil {
			fmt.Fprintln(os.Stderr, err.Error())
			os.Exit(1)
		}
	}

	if c.GlobalBool("debug") {
		fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
	}

	return client.NewMembersAPI(hc)
}
Exemplo n.º 10
0
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
	// send remove request to the cluster
	cc := mustNewHTTPClient(t, []string{c.URL(0)})
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
		t.Fatalf("unexpected remove error %v", err)
	}
	cancel()
	newMembers := make([]*member, 0)
	for _, m := range c.Members {
		if uint64(m.s.ID()) != id {
			newMembers = append(newMembers, m)
		} else {
			select {
			case <-m.s.StopNotify():
				m.Terminate(t)
			// stop delay / election timeout + 1s disk and network delay
			case <-time.After(time.Duration(electionTicks)*tickDuration + time.Second):
				t.Fatalf("failed to remove member %s in time", m.s.ID())
			}
		}
	}
	c.Members = newMembers
	c.waitMembersMatch(t, c.HTTPMembers())
}
Exemplo n.º 11
0
func (c *cluster) AddMember(t *testing.T) {
	clusterStr := c.Members[0].Cluster.String()
	idx := len(c.Members)
	m := mustNewMember(t, c.name(idx))

	// send add request to the cluster
	cc := mustNewHTTPClient(t, []string{c.URL(0)})
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	peerURL := "http://" + m.PeerListeners[0].Addr().String()
	if _, err := ma.Add(ctx, peerURL); err != nil {
		t.Fatalf("add member on %s error: %v", c.URL(0), err)
	}
	cancel()

	// wait for the add node entry applied in the cluster
	members := append(c.HTTPMembers(), httptypes.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
	c.waitMembersMatch(t, members)

	for _, ln := range m.PeerListeners {
		clusterStr += fmt.Sprintf(",%s=http://%s", m.Name, ln.Addr().String())
	}
	var err error
	m.Cluster, err = etcdserver.NewClusterFromString(clusterName, clusterStr)
	if err != nil {
		t.Fatal(err)
	}
	m.NewCluster = false
	if err := m.Launch(); err != nil {
		t.Fatal(err)
	}
	c.Members = append(c.Members, m)
	// wait cluster to be stable to receive future client requests
	c.waitMembersMatch(t, c.HTTPMembers())
}
Exemplo n.º 12
0
// TestV3WatchFutureRevision ensures invalid future revision to Watch APIs
// returns WatchResponse of true Created and true Canceled.
func TestV3WatchInvalidFutureRevision(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
	if wErr != nil {
		t.Fatalf("wAPI.Watch error: %v", wErr)
	}

	wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
		CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 100}}}
	if err := wStream.Send(wreq); err != nil {
		t.Fatalf("watch request failed (%v)", err)
	}

	resp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if resp.WatchId != -1 || !resp.Created || !resp.Canceled || len(resp.Events) != 0 {
		t.Errorf("invalid start-rev expected -1, true, true, 0, but got %d, %v, %v, %d",
			resp.WatchId, resp.Created, resp.Canceled, len(resp.Events))
	}
}
Exemplo n.º 13
0
func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) {
	fakeURL := url.URL{}
	tr := newFakeTransport()
	tr.finishCancel <- struct{}{}
	c := &httpClusterClient{
		clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
		endpoints:     []url.URL{fakeURL},
	}

	errc := make(chan error)
	go func() {
		ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
		defer cancel()
		_, _, err := c.Do(ctx, &fakeAction{})
		errc <- err
	}()

	select {
	case err := <-errc:
		if err != context.DeadlineExceeded {
			t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded)
		}
	case <-time.After(time.Second):
		t.Fatalf("unexpected timeout when waitting for request to deadline exceed")
	}
}
Exemplo n.º 14
0
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism.
func testRPCTimeout(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	for i := -1; i <= 10; i++ {
		ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
		reply, err := tc.UnaryCall(ctx, req)
		if grpc.Code(err) != codes.DeadlineExceeded {
			t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded)
		}
	}
}
Exemplo n.º 15
0
func getPeersFromDiscoveryURL(discoverURL string) ([]string, error) {
	if discoverURL == "" {
		return nil, nil
	}

	u, err := url.Parse(discoverURL)
	if err != nil {
		return nil, err
	}
	token := u.Path
	u.Path = ""
	c, err := client.NewHTTPClient(&http.Transport{}, []string{u.String()})
	if err != nil {
		return nil, err
	}
	dc := client.NewDiscoveryKeysAPI(c)

	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	resp, err := dc.Get(ctx, token)
	cancel()
	if err != nil {
		return nil, err
	}
	peers := make([]string, 0)
	// append non-config keys to peers
	for _, n := range resp.Node.Nodes {
		if g := path.Base(n.Key); g == "_config" || g == "_state" {
			continue
		}
		peers = append(peers, n.Value)
	}
	return peers, nil
}
Exemplo n.º 16
0
// publish registers server information into the cluster. The information
// is the JSON representation of this server's member struct, updated with the
// static clientURLs of the server.
// The function keeps attempting to register until it succeeds,
// or its server is stopped.
func (s *EtcdServer) publish(timeout time.Duration) {
	b, err := json.Marshal(s.attributes)
	if err != nil {
		plog.Panicf("json marshal error: %v", err)
		return
	}
	req := pb.Request{
		Method: "PUT",
		Path:   MemberAttributesStorePath(s.id),
		Val:    string(b),
	}

	for {
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		_, err := s.Do(ctx, req)
		cancel()
		switch err {
		case nil:
			plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
			return
		case ErrStopped:
			plog.Infof("aborting publish because server is stopped")
			return
		default:
			plog.Errorf("publish error: %v", err)
		}
	}
}
Exemplo n.º 17
0
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
	// send remove request to the cluster
	cc := mustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
		t.Fatalf("unexpected remove error %v", err)
	}
	cancel()
	newMembers := make([]*member, 0)
	for _, m := range c.Members {
		if uint64(m.s.ID()) != id {
			newMembers = append(newMembers, m)
		} else {
			select {
			case <-m.s.StopNotify():
				m.Terminate(t)
			// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
			// TODO: remove connection write timeout by selecting on http response closeNotifier
			// blocking on https://github.com/golang/go/issues/9524
			case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout):
				t.Fatalf("failed to remove member %s in time", m.s.ID())
			}
		}
	}
	c.Members = newMembers
	c.waitMembersMatch(t, c.HTTPMembers())
}
Exemplo n.º 18
0
// publish registers server information into the cluster. The information
// is the JSON representation of this server's member struct, updated with the
// static clientURLs of the server.
// The function keeps attempting to register until it succeeds,
// or its server is stopped.
func (s *EtcdServer) publish(retryInterval time.Duration) {
	b, err := json.Marshal(s.attributes)
	if err != nil {
		log.Printf("etcdserver: json marshal error: %v", err)
		return
	}
	req := pb.Request{
		ID:     GenID(),
		Method: "PUT",
		Path:   MemberAttributesStorePath(s.id),
		Val:    string(b),
	}

	for {
		ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
		_, err := s.Do(ctx, req)
		cancel()
		switch err {
		case nil:
			log.Printf("etcdserver: published %+v to cluster %s", s.attributes, s.Cluster.ID())
			return
		case ErrStopped:
			log.Printf("etcdserver: aborting publish because server is stopped")
			return
		default:
			log.Printf("etcdserver: publish error: %v", err)
		}
	}
}
Exemplo n.º 19
0
// Ensure etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
	defer testutil.AfterTest(t)
	// start 1-member cluster to ensure member 0 is the leader of the cluster.
	c := NewCluster(t, 1)
	c.Launch(t)
	defer c.Terminate(t)

	c.AddMember(t)
	c.Members[1].Stop(t)

	// send remove member-1 request to the cluster.
	cc := mustNewHTTPClient(t, c.URLs(), nil)
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	// the proposal is not committed because member 1 is stopped, but the
	// proposal is appended to leader's raft log.
	ma.Remove(ctx, c.Members[1].s.ID().String())
	cancel()

	// restart member, and expect it to send UpdateAttributes request.
	// the log in the leader is like this:
	// [..., remove 1, ..., update attr 1, ...]
	c.Members[1].Restart(t)
	// when the member comes back, it ack the proposal to remove itself,
	// and apply it.
	<-c.Members[1].s.StopNotify()

	// terminate removed member
	c.Members[1].Terminate(t)
	c.Members = c.Members[:1]
	// wait member to be removed.
	c.waitMembersMatch(t, c.HTTPMembers())
}
Exemplo n.º 20
0
func (c *cluster) addMember(t *testing.T) {
	m := c.mustNewMember(t)

	scheme := "http"
	if c.cfg.PeerTLS != nil {
		scheme = "https"
	}

	// send add request to the cluster
	cc := mustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
	if _, err := ma.Add(ctx, peerURL); err != nil {
		t.Fatalf("add member on %s error: %v", c.URL(0), err)
	}
	cancel()

	// wait for the add node entry applied in the cluster
	members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
	c.waitMembersMatch(t, members)

	m.InitialPeerURLsMap = types.URLsMap{}
	for _, mm := range c.Members {
		m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
	}
	m.InitialPeerURLsMap[m.Name] = m.PeerURLs
	m.NewCluster = false
	if err := m.Launch(); err != nil {
		t.Fatal(err)
	}
	c.Members = append(c.Members, m)
	// wait cluster to be stable to receive future client requests
	c.waitMembersMatch(t, c.HTTPMembers())
}
Exemplo n.º 21
0
func ExampleKV_get() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	_, err = kvc.Put(context.TODO(), "foo", "bar")
	if err != nil {
		log.Fatal(err)
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// foo : bar
}
Exemplo n.º 22
0
func (s *store) ensureAuthDirectories() error {
	if s.ensuredOnce {
		return nil
	}
	for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} {
		ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
		defer cancel()
		pe := false
		rr := etcdserverpb.Request{
			Method:    "PUT",
			Path:      res,
			Dir:       true,
			PrevExist: &pe,
		}
		_, err := s.server.Do(ctx, rr)
		if err != nil {
			if e, ok := err.(*etcderr.Error); ok {
				if e.ErrorCode == etcderr.EcodeNodeExist {
					continue
				}
			}
			plog.Errorf("failed to create auth directories in the store (%v)", err)
			return err
		}
	}
	ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
	defer cancel()
	pe := false
	rr := etcdserverpb.Request{
		Method:    "PUT",
		Path:      StorePermsPrefix + "/enabled",
		Val:       "false",
		PrevExist: &pe,
	}
	_, err := s.server.Do(ctx, rr)
	if err != nil {
		if e, ok := err.(*etcderr.Error); ok {
			if e.ErrorCode == etcderr.EcodeNodeExist {
				s.ensuredOnce = true
				return nil
			}
		}
		return err
	}
	s.ensuredOnce = true
	return nil
}
Exemplo n.º 23
0
// TestV3WatchFutureRevision tests Watch APIs from a future revision.
func TestV3WatchFutureRevision(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := NewClusterV3(t, &ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	wAPI := toGRPC(clus.RandClient()).Watch
	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	wStream, err := wAPI.Watch(ctx)
	if err != nil {
		t.Fatalf("wAPI.Watch error: %v", err)
	}

	wkey := []byte("foo")
	wrev := int64(10)
	req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
		CreateRequest: &pb.WatchCreateRequest{Key: wkey, StartRevision: wrev}}}
	err = wStream.Send(req)
	if err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	// ensure watcher request created a new watcher
	cresp, err := wStream.Recv()
	if err != nil {
		t.Fatalf("wStream.Recv error: %v", err)
	}
	if !cresp.Created {
		t.Fatal("create = %v, want %v", cresp.Created, true)
	}

	kvc := toGRPC(clus.RandClient()).KV
	for {
		req := &pb.PutRequest{Key: wkey, Value: []byte("bar")}
		resp, rerr := kvc.Put(context.TODO(), req)
		if rerr != nil {
			t.Fatalf("couldn't put key (%v)", rerr)
		}
		if resp.Header.Revision == wrev {
			break
		}
	}

	// ensure watcher request created a new watcher
	cresp, err = wStream.Recv()
	if err != nil {
		t.Fatalf("wStream.Recv error: %v", err)
	}
	if cresp.Header.Revision != wrev {
		t.Fatalf("revision = %d, want %d", cresp.Header.Revision, wrev)
	}
	if len(cresp.Events) != 1 {
		t.Fatalf("failed to receive events")
	}
	if cresp.Events[0].Kv.ModRevision != wrev {
		t.Errorf("mod revision = %d, want %d", cresp.Events[0].Kv.ModRevision, wrev)
	}
}
Exemplo n.º 24
0
func actionMemberAdd(c *cli.Context) {
	args := c.Args()
	if len(args) != 2 {
		fmt.Fprintln(os.Stderr, "Provide a name and a single member peerURL")
		os.Exit(1)
	}

	mAPI := mustNewMembersAPI(c)

	url := args[1]
	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	m, err := mAPI.Add(ctx, url)
	cancel()
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	newID := m.ID
	newName := args[0]
	fmt.Printf("Added member named %s with ID %s to cluster\n", newName, newID)

	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	members, err := mAPI.List(ctx)
	cancel()
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	conf := []string{}
	for _, memb := range members {
		for _, u := range memb.PeerURLs {
			n := memb.Name
			if memb.ID == newID {
				n = newName
			}
			conf = append(conf, fmt.Sprintf("%s=%s", n, u))
		}
	}

	fmt.Print("\n")
	fmt.Printf("ETCD_NAME=%q\n", newName)
	fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ","))
	fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
}
Exemplo n.º 25
0
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
		return
	}

	w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())

	ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
	defer cancel()
	clock := clockwork.NewRealClock()
	startTime := clock.Now()
	rr, err := parseKeyRequest(r, clock)
	if err != nil {
		writeKeyError(w, err)
		return
	}
	// The path must be valid at this point (we've parsed the request successfully).
	if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive) {
		writeKeyNoAuth(w)
		return
	}
	if !rr.Wait {
		reportRequestReceived(rr)
	}
	resp, err := h.server.Do(ctx, rr)
	if err != nil {
		err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
		writeKeyError(w, err)
		reportRequestFailed(rr, err)
		return
	}
	switch {
	case resp.Event != nil:
		if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
			// Should never be reached
			plog.Errorf("error writing event (%v)", err)
		}
		reportRequestCompleted(rr, resp, startTime)
	case resp.Watcher != nil:
		ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
		defer cancel()
		handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer)
	default:
		writeKeyError(w, errors.New("received response with no Event/Watcher!"))
	}
}
Exemplo n.º 26
0
func healthCheck(t time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) {
	ctx, _ := context.WithTimeout(context.Background(), t)
	hc := healthpb.NewHealthClient(cc)
	req := &healthpb.HealthCheckRequest{
		Service: serviceName,
	}
	return hc.Check(ctx, req)
}
Exemplo n.º 27
0
func actionMemberRemove(c *cli.Context) {
	args := c.Args()
	if len(args) != 1 {
		fmt.Fprintln(os.Stderr, "Provide a single member ID")
		os.Exit(1)
	}
	removalID := args[0]

	mAPI := mustNewMembersAPI(c)
	// Get the list of members.
	listctx, listCancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	members, err := mAPI.List(listctx)
	listCancel()
	if err != nil {
		fmt.Fprintln(os.Stderr, "Error while verifying ID against known members:", err.Error())
		os.Exit(1)
	}
	// Sanity check the input.
	foundID := false
	for _, m := range members {
		if m.ID == removalID {
			foundID = true
		}
		if m.Name == removalID {
			// Note that, so long as it's not ambiguous, we *could* do the right thing by name here.
			fmt.Fprintf(os.Stderr, "Found a member named %s; if this is correct, please use its ID, eg:\n\tetcdctl member remove %s\n", m.Name, m.ID)
			fmt.Fprintf(os.Stderr, "For more details, read the documentation at https://github.com/coreos/etcd/blob/master/Documentation/runtime-configuration.md#remove-a-member\n\n")
		}
	}
	if !foundID {
		fmt.Fprintf(os.Stderr, "Couldn't find a member in the cluster with an ID of %s.\n", removalID)
		os.Exit(1)
	}

	// Actually attempt to remove the member.
	ctx, removeCancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	err = mAPI.Remove(ctx, removalID)
	removeCancel()
	if err != nil {
		fmt.Fprintf(os.Stderr, "Recieved an error trying to remove member %s: %s", removalID, err.Error())
		os.Exit(1)
	}

	fmt.Printf("Removed member %s from cluster\n", removalID)
}
Exemplo n.º 28
0
Arquivo: util.go Projeto: lrita/etcd
func mustNewClient(c *cli.Context) client.Client {
	hc, err := newClient(c)
	if err != nil {
		fmt.Fprintln(os.Stderr, err.Error())
		os.Exit(1)
	}

	debug := c.GlobalBool("debug")
	if debug {
		client.EnablecURLDebug()
	}

	if !c.GlobalBool("no-sync") {
		if debug {
			fmt.Fprintf(os.Stderr, "start to sync cluster using endpoints(%s)\n", strings.Join(hc.Endpoints(), ","))
		}
		ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
		err := hc.Sync(ctx)
		cancel()
		if err != nil {
			if err == client.ErrNoEndpoints {
				fmt.Fprintf(os.Stderr, "etcd cluster has no published client endpoints.\n")
				fmt.Fprintf(os.Stderr, "Try '--no-sync' if you want to access non-published client endpoints(%s).\n", strings.Join(hc.Endpoints(), ","))
				handleError(ExitServerError, err)
			}

			if isConnectionError(err) {
				handleError(ExitBadConnection, err)
			}

			// fail-back to try sync cluster with peer API. this is for making etcdctl work with etcd 0.4.x.
			// TODO: remove this when we deprecate the support for etcd 0.4.
			eps, serr := syncWithPeerAPI(c, ctx, hc.Endpoints())
			if serr != nil {
				if isConnectionError(serr) {
					handleError(ExitBadConnection, serr)
				} else {
					handleError(ExitServerError, serr)
				}
			}
			err = hc.SetEndpoints(eps)
			if err != nil {
				handleError(ExitServerError, err)
			}
		}
		if debug {
			fmt.Fprintf(os.Stderr, "got endpoints(%s) after sync\n", strings.Join(hc.Endpoints(), ","))
		}
	}

	if debug {
		fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
	}

	return hc
}
Exemplo n.º 29
0
// GetValue normalizes the key parameter, retrive the value from etcd, and returns it
func (rc *RuntimeConfiguration) GetValue(confCtx *cloudconfig.ConfigContext, key string) (string, error) {
	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
	defer cancel()

	response, err := rc.DataSource.Get(ctx, rc.parseKey(key), nil)
	if err != nil {
		return "", err
	}
	return response.Node.Value, nil
}
Exemplo n.º 30
0
func (f *Flags) Set(confCtx *cloudconfig.ConfigContext, key string, value string) error {
	key, err := f.parseKey(confCtx, key)
	if err != nil {
		return err
	}
	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
	defer cancel()
	_, err = f.kapi.Set(ctx, key, value, nil)
	return err
}