Exemple #1
0
func TestKVDelete(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	presp, err := kv.Put(ctx, "foo", "")
	if err != nil {
		t.Fatalf("couldn't put 'foo' (%v)", err)
	}
	if presp.Header.Revision != 2 {
		t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
	}
	resp, err := kv.Delete(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't delete key (%v)", err)
	}
	if resp.Header.Revision != 3 {
		t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
	}
	gresp, err := kv.Get(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't get key (%v)", err)
	}
	if len(gresp.Kvs) > 0 {
		t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
	}
}
Exemple #2
0
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func TestDialSetEndpoints(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// get endpoint list
	eps := make([]string, 3)
	for i := range eps {
		eps[i] = clus.Members[i].GRPCAddr()
	}
	toKill := rand.Intn(len(eps))

	cfg := clientv3.Config{Endpoints: []string{eps[toKill]}, DialTimeout: 1 * time.Second}
	cli, err := clientv3.New(cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer cli.Close()

	// make a dead node
	clus.Members[toKill].Stop(t)
	clus.WaitLeader(t)

	// update client with available endpoints
	cli.SetEndpoints(eps[(toKill+1)%3])

	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
	if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
		t.Fatal(err)
	}
	cancel()
}
Exemple #3
0
func TestTxnWriteFail(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	resp, err := kv.Txn().Then(clientv3.OpPut("foo", "bar", 0)).Commit()
	if err == nil {
		t.Fatalf("expected error, got response %v", resp)
	}

	// reconnect so cluster terminate doesn't complain about double-close
	clus.Members[0].Restart(t)

	// and ensure the put didn't take
	gresp, gerr := kv.Get("foo", 0)
	if gerr != nil {
		t.Fatal(gerr)
	}
	if len(gresp.Kvs) != 0 {
		t.Fatalf("expected no keys, got %v", gresp.Kvs)
	}
}
Exemple #4
0
// TestWatchWithCreatedNotificationDropConn ensures that
// a watcher with created notify does not post duplicate
// created events from disconnect.
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)

	client := cluster.RandClient()

	wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify())

	resp := <-wch

	if !resp.Created {
		t.Fatalf("expected created event, got %v", resp)
	}

	cluster.Members[0].DropConnections()

	// try to receive from watch channel again
	// ensure it doesn't post another createNotify
	select {
	case wresp := <-wch:
		t.Fatalf("got unexpected watch response: %+v\n", wresp)
	case <-time.After(time.Second):
		// watcher may not reconnect by the time it hits the select,
		// so it wouldn't have a chance to filter out the second create event
	}
}
Exemple #5
0
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
func TestKVPutStoppedServerAndClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	clus.Members[0].Stop(t)

	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
	// get retries on all errors.
	// so here we use it to eat the potential broken pipe error for the next put.
	// grpc client might see a broken pipe error when we issue the get request before
	// grpc finds out the original connection is down due to the member shutdown.
	_, err := cli.Get(ctx, "abc")
	cancel()
	if !strings.Contains(err.Error(), "context deadline") {
		t.Fatal(err)
	}

	// this Put fails and triggers an asynchronous connection retry
	_, err = cli.Put(ctx, "abc", "123")
	cancel()
	if !strings.Contains(err.Error(), "context deadline") {
		t.Fatal(err)
	}
}
Exemple #6
0
func TestTLSConnection(t *testing.T) {
	certFile, keyFile, caFile := configureTLSCerts(t)

	tlsInfo := &transport.TLSInfo{
		CertFile: certFile,
		KeyFile:  keyFile,
		CAFile:   caFile,
	}

	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{
		Size:      1,
		ClientTLS: tlsInfo,
	})
	defer cluster.Terminate(t)

	cfg := storagebackend.Config{
		Type:       storagebackend.StorageTypeETCD3,
		ServerList: []string{cluster.Members[0].GRPCAddr()},
		CertFile:   certFile,
		KeyFile:    keyFile,
		CAFile:     caFile,
		Codec:      testapi.Default.Codec(),
	}
	storage, err := newETCD3Storage(cfg)
	if err != nil {
		t.Fatal(err)
	}
	err = storage.Create(context.TODO(), "/abc", &api.Pod{}, nil, 0)
	if err != nil {
		t.Fatalf("Create failed: %v", err)
	}
}
Exemple #7
0
func TestKVPutWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)

	// wait for election timeout, then member[0] will not have a leader.
	var (
		electionTicks = 10
		tickDuration  = 10 * time.Millisecond
	)
	time.Sleep(time.Duration(3*electionTicks) * tickDuration)

	kv := clientv3.NewKV(clus.Client(0))
	_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
	if err != rpctypes.ErrNoLeader {
		t.Fatal(err)
	}

	// clients may give timeout errors since the members are stopped; take
	// the clients so that terminating the cluster won't complain
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)
}
Exemple #8
0
func TestKVPutError(t *testing.T) {
	defer testutil.AfterTest(t)

	var (
		maxReqBytes = 1.5 * 1024 * 1024
		quota       = int64(maxReqBytes * 1.2)
	)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	_, err := kv.Put(ctx, "", "bar")
	if err != rpctypes.ErrEmptyKey {
		t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
	}

	_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) // 1.5MB
	if err != rpctypes.ErrRequestTooLarge {
		t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
	}

	_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
	if err != nil { // below quota
		t.Fatal(err)
	}

	time.Sleep(500 * time.Millisecond) // give enough time for commit

	_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
	if err != rpctypes.ErrNoSpace { // over quota
		t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
	}
}
Exemple #9
0
// TestCompactConflict tests that two compactors (Let's use C1, C2) are trying to compact etcd cluster with the same
// logical time.
// - C1 compacts first. It will succeed.
// - C2 compacts after. It will fail. But it will get latest logical time, which should be larger by one.
func TestCompactConflict(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)
	client := cluster.RandClient()
	ctx := context.Background()

	putResp, err := client.Put(ctx, "/somekey", "data")
	if err != nil {
		t.Fatalf("Put failed: %v", err)
	}

	// Compact first. It would do the compaction and return compact time which is incremented by 1.
	curTime, _, err := compact(ctx, client, 0, putResp.Header.Revision)
	if err != nil {
		t.Fatalf("compact failed: %v", err)
	}
	if curTime != 1 {
		t.Errorf("Expect current logical time = 1, get = %v", curTime)
	}

	// Compact again with the same parameters. It won't do compaction but return the latest compact time.
	curTime2, _, err := compact(ctx, client, 0, putResp.Header.Revision)
	if err != nil {
		t.Fatalf("compact failed: %v", err)
	}
	if curTime != curTime2 {
		t.Errorf("Unexpected curTime (%v) != curTime2 (%v)", curTime, curTime2)
	}
}
Exemple #10
0
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
func TestKVPutOneEndpointDown(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// get endpoint list
	eps := make([]string, 3)
	for i := range eps {
		eps[i] = clus.Members[i].GRPCAddr()
	}

	// make a dead node
	clus.Members[rand.Intn(len(eps))].Stop(t)

	// try to connect with dead node in the endpoint list
	cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
	cli, err := clientv3.New(cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer cli.Close()
	ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
	if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
		t.Fatal(err)
	}
	cancel()
}
Exemple #11
0
func TestLeaseRevoke(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to revoke lease %v", err)
	}

	_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != v3rpc.ErrLeaseNotFound {
		t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound)
	}
}
Exemple #12
0
func TestCompact(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)
	client := cluster.RandClient()
	ctx := context.Background()

	putResp, err := client.Put(ctx, "/somekey", "data")
	if err != nil {
		t.Fatalf("Put failed: %v", err)
	}

	putResp1, err := client.Put(ctx, "/somekey", "data2")
	if err != nil {
		t.Fatalf("Put failed: %v", err)
	}

	_, _, err = compact(ctx, client, 0, putResp1.Header.Revision)
	if err != nil {
		t.Fatalf("compact failed: %v", err)
	}

	obj, err := client.Get(ctx, "/somekey", clientv3.WithRev(putResp.Header.Revision))
	if err != etcdrpc.ErrCompacted {
		t.Errorf("Expecting ErrCompacted, but get=%v err=%v", obj, err)
	}
}
Exemple #13
0
func TestMemberRemove(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	capi := clientv3.NewCluster(clus.Client(1))
	resp, err := capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	_, err = capi.MemberRemove(context.Background(), resp.Members[0].ID)
	if err != nil {
		t.Fatalf("failed to remove member %v", err)
	}

	resp, err = capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	if len(resp.Members) != 2 {
		t.Errorf("number of members = %d, want %d", len(resp.Members), 2)
	}
}
Exemple #14
0
// TestWatchCanelAndCloseClient ensures that canceling a watcher then immediately
// closing the client does not return a client closing error.
func TestWatchCancelAndCloseClient(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)
	cli := clus.Client(0)
	ctx, cancel := context.WithCancel(context.Background())
	wch := cli.Watch(ctx, "abc")
	donec := make(chan struct{})
	go func() {
		defer close(donec)
		select {
		case wr, ok := <-wch:
			if ok {
				t.Fatalf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err())
			}
		case <-time.After(5 * time.Second):
			t.Fatal("timed out waiting for closed channel")
		}
	}()
	cancel()
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}
	<-donec
	clus.TakeClient(0)
}
Exemple #15
0
func TestLeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID))
	if kerr != nil {
		t.Errorf("failed to keepalive lease %v", kerr)
	}

	kresp, ok := <-rc
	if !ok {
		t.Errorf("chan is closed, want not closed")
	}

	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	lapi.Close()

	_, ok = <-rc
	if ok {
		t.Errorf("chan is not closed, want lease Close() closes chan")
	}
}
Exemple #16
0
func TestLeaseRevokeNewAfterClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	le := clientv3.NewLease(cli)
	resp, err := le.Grant(context.TODO(), 5)
	if err != nil {
		t.Fatal(err)
	}
	leaseID := resp.ID

	clus.TakeClient(0)
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}

	donec := make(chan struct{})
	go func() {
		if _, err := le.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
		close(donec)
	}()
	select {
	case <-time.After(3 * time.Second):
		t.Fatal("le.Revoke took too long")
	case <-donec:
	}
}
Exemple #17
0
func TestWatchErrConnClosed(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	wc := clientv3.NewWatcher(cli)

	donec := make(chan struct{})
	go func() {
		defer close(donec)
		wc.Watch(context.TODO(), "foo")
		if err := wc.Close(); err != nil && err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
	}()

	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}
	clus.TakeClient(0)

	select {
	case <-time.After(3 * time.Second):
		t.Fatal("wc.Watch took too long")
	case <-donec:
	}
}
Exemple #18
0
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
// a keep alive request after the first never gets a response.
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)

	// setup lease and do a keepalive
	resp, err := cli.Grant(context.Background(), 5)
	if err != nil {
		t.Fatal(err)
	}
	rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
	if kerr != nil {
		t.Fatal(kerr)
	}
	if kresp := <-rc; kresp.ID != resp.ID {
		t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	// keep client disconnected
	clus.Members[0].Stop(t)
	select {
	case ka, ok := <-rc:
		if ok {
			t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
		}
	case <-time.After(10 * time.Second):
		t.Fatalf("keepalive channel did not close")
	}

	clus.Members[0].Restart(t)
}
Exemple #19
0
// TestWatchWithFilter checks that watch filtering works.
func TestWatchWithFilter(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)

	client := cluster.RandClient()
	ctx := context.Background()

	wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut())
	wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete())

	if _, err := client.Put(ctx, "a", "abc"); err != nil {
		t.Fatal(err)
	}
	if _, err := client.Delete(ctx, "a"); err != nil {
		t.Fatal(err)
	}

	npResp := <-wcNoPut
	if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete {
		t.Fatalf("expected delete event, got %+v", npResp.Events)
	}
	ndResp := <-wcNoDel
	if len(ndResp.Events) != 1 || ndResp.Events[0].Type != clientv3.EventTypePut {
		t.Fatalf("expected put event, got %+v", ndResp.Events)
	}

	select {
	case resp := <-wcNoPut:
		t.Fatalf("unexpected event on filtered put (%+v)", resp)
	case resp := <-wcNoDel:
		t.Fatalf("unexpected event on filtered delete (%+v)", resp)
	case <-time.After(100 * time.Millisecond):
	}
}
Exemple #20
0
// TestMain sets up an etcd cluster if running the examples.
func TestMain(m *testing.M) {
	useCluster := true // default to running all tests
	for _, arg := range os.Args {
		if strings.HasPrefix(arg, "-test.run=") {
			exp := strings.Split(arg, "=")[1]
			match, err := regexp.MatchString(exp, "Example")
			useCluster = (err == nil && match) || strings.Contains(exp, "Example")
			break
		}
	}

	v := 0
	if useCluster {
		cfg := integration.ClusterConfig{Size: 3}
		clus := integration.NewClusterV3(nil, &cfg)
		endpoints = make([]string, 3)
		for i := range endpoints {
			endpoints[i] = clus.Client(i).Endpoints()[0]
		}
		v = m.Run()
		clus.Terminate(nil)
	} else {
		v = m.Run()
	}

	if v == 0 && testutil.CheckLeakedGoroutine() {
		os.Exit(1)
	}
	os.Exit(v)
}
Exemple #21
0
func TestMemberUpdate(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	capi := clientv3.NewCluster(clus.RandClient())
	resp, err := capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	urls := []string{"http://127.0.0.1:1234"}
	_, err = capi.MemberUpdate(context.Background(), resp.Members[0].ID, urls)
	if err != nil {
		t.Fatalf("failed to update member %v", err)
	}

	resp, err = capi.MemberList(context.Background())
	if err != nil {
		t.Fatalf("failed to list member %v", err)
	}

	if !reflect.DeepEqual(resp.Members[0].PeerURLs, urls) {
		t.Errorf("urls = %v, want %v", urls, resp.Members[0].PeerURLs)
	}
}
Exemple #22
0
func TestKVNewAfterClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	clus.TakeClient(0)
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}

	donec := make(chan struct{})
	go func() {
		kv := clientv3.NewKV(cli)
		if _, err := kv.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
		close(donec)
	}()
	select {
	case <-time.After(3 * time.Second):
		t.Fatal("kv.Get took too long")
	case <-donec:
	}
}
Exemple #23
0
func TestUserError(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	authapi := clientv3.NewAuth(clus.RandClient())

	_, err := authapi.UserAdd(context.TODO(), "foo", "bar")
	if err != nil {
		t.Fatal(err)
	}

	_, err = authapi.UserAdd(context.TODO(), "foo", "bar")
	if err != rpctypes.ErrUserAlreadyExist {
		t.Fatalf("expected %v, got %v", rpctypes.ErrUserAlreadyExist, err)
	}

	_, err = authapi.UserDelete(context.TODO(), "not-exist-user")
	if err != rpctypes.ErrUserNotFound {
		t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
	}

	_, err = authapi.UserGrant(context.TODO(), "foo", "test-role-does-not-exist")
	if err != rpctypes.ErrRoleNotFound {
		t.Fatalf("expected %v, got %v", rpctypes.ErrRoleNotFound, err)
	}
}
Exemple #24
0
func TestKVCompactError(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	for i := 0; i < 5; i++ {
		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
			t.Fatalf("couldn't put 'foo' (%v)", err)
		}
	}
	_, err := kv.Compact(ctx, 6)
	if err != nil {
		t.Fatalf("couldn't compact 6 (%v)", err)
	}

	_, err = kv.Compact(ctx, 6)
	if err != rpctypes.ErrCompacted {
		t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
	}

	_, err = kv.Compact(ctx, 100)
	if err != rpctypes.ErrFutureRev {
		t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
	}
}
Exemple #25
0
func TestTxnReadRetry(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	donec := make(chan struct{})
	go func() {
		ctx := context.TODO()
		_, err := kv.Txn(ctx).Then(clientv3.OpGet("foo")).Commit()
		if err != nil {
			t.Fatalf("expected response, got error %v", err)
		}
		donec <- struct{}{}
	}()
	// wait for txn to fail on disconnect
	time.Sleep(100 * time.Millisecond)

	// restart node; client should resume
	clus.Members[0].Restart(t)
	select {
	case <-donec:
	case <-time.After(5 * time.Second):
		t.Fatalf("waited too long")
	}
}
Exemple #26
0
func TestTxnWriteFail(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	ctx := context.TODO()

	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	donec := make(chan struct{})
	go func() {
		resp, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit()
		if err == nil {
			t.Fatalf("expected error, got response %v", resp)
		}
		donec <- struct{}{}
	}()

	dialTimeout := 5 * time.Second
	select {
	case <-time.After(2*dialTimeout + time.Second):
		t.Fatalf("timed out waiting for txn to fail")
	case <-donec:
		// don't restart cluster until txn errors out
	}

	go func() {
		// reconnect so terminate doesn't complain about double-close
		clus.Members[0].Restart(t)
		// wait for etcdserver to get established (CI races and get req times out)
		time.Sleep(2 * time.Second)
		donec <- struct{}{}

		// and ensure the put didn't take
		gresp, gerr := kv.Get(ctx, "foo")
		if gerr != nil {
			t.Fatal(gerr)
		}
		if len(gresp.Kvs) != 0 {
			t.Fatalf("expected no keys, got %v", gresp.Kvs)
		}
		donec <- struct{}{}
	}()

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for restart")
	case <-donec:
	}

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for get")
	case <-donec:
	}
}
Exemple #27
0
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
	defer testutil.AfterTest(t)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	// each unique context "%v" has a unique grpc stream
	n := 100
	ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
	for i := range ctxs {
		// make "%v" unique
		ctxs[i] = context.WithValue(context.TODO(), "key", i)
		// limits the maximum number of outstanding watchers per stream
		ctxc[i] = make(chan struct{}, 2)
	}

	// issue concurrent watches on "abc" with cancel
	cli := clus.RandClient()
	if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil {
		t.Fatal(err)
	}
	ch := make(chan struct{}, n)
	for i := 0; i < n; i++ {
		go func() {
			defer func() { ch <- struct{}{} }()
			idx := rand.Intn(len(ctxs))
			ctx, cancel := context.WithCancel(ctxs[idx])
			ctxc[idx] <- struct{}{}
			wch := cli.Watch(ctx, "abc", clientv3.WithRev(1))
			f(clus)
			select {
			case _, ok := <-wch:
				if !ok {
					t.Fatalf("unexpected closed channel %p", wch)
				}
			// may take a second or two to reestablish a watcher because of
			// grpc backoff policies for disconnects
			case <-time.After(5 * time.Second):
				t.Errorf("timed out waiting for watch on %p", wch)
			}
			// randomize how cancel overlaps with watch creation
			if rand.Intn(2) == 0 {
				<-ctxc[idx]
				cancel()
			} else {
				cancel()
				<-ctxc[idx]
			}
		}()
	}
	// join on watches
	for i := 0; i < n; i++ {
		select {
		case <-ch:
		case <-time.After(5 * time.Second):
			t.Fatalf("timed out waiting for completed watch")
		}
	}
}
Exemple #28
0
func TestGRPCResolver(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	r := GRPCResolver{
		Client: clus.RandClient(),
	}

	w, err := r.Resolve("foo")
	if err != nil {
		t.Fatal("failed to resolve foo", err)
	}
	defer w.Close()

	addOp := naming.Update{Op: naming.Add, Addr: "127.0.0.1", Metadata: "metadata"}
	err = r.Update(context.TODO(), "foo", addOp)
	if err != nil {
		t.Fatal("failed to add foo", err)
	}

	us, err := w.Next()
	if err != nil {
		t.Fatal("failed to get udpate", err)
	}

	wu := &naming.Update{
		Op:       naming.Add,
		Addr:     "127.0.0.1",
		Metadata: "metadata",
	}

	if !reflect.DeepEqual(us[0], wu) {
		t.Fatalf("up = %#v, want %#v", us[0], wu)
	}

	delOp := naming.Update{Op: naming.Delete, Addr: "127.0.0.1"}
	err = r.Update(context.TODO(), "foo", delOp)

	us, err = w.Next()
	if err != nil {
		t.Fatal("failed to get udpate", err)
	}

	wu = &naming.Update{
		Op:       naming.Delete,
		Addr:     "127.0.0.1",
		Metadata: "metadata",
	}

	if !reflect.DeepEqual(us[0], wu) {
		t.Fatalf("up = %#v, want %#v", us[0], wu)
	}
}
Exemple #29
0
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
func TestWatchWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// Put a key for the non-require leader watch to read as an event.
	// The watchers will be on member[0]; put key through member[0] to
	// ensure that it receives the update so watching after killing quorum
	// is guaranteed to have the key.
	liveClient := clus.Client(0)
	if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
		t.Fatal(err)
	}

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)

	// wait for election timeout, then member[0] will not have a leader.
	tickDuration := 10 * time.Millisecond
	time.Sleep(time.Duration(3*clus.Members[0].ElectionTicks) * tickDuration)

	chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
	chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))

	select {
	case resp, ok := <-chLeader:
		if !ok {
			t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
		}
		if resp.Err() != rpctypes.ErrNoLeader {
			t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
		}
	case <-time.After(3 * time.Second):
		t.Fatal("watch without leader took too long to close")
	}

	select {
	case resp, ok := <-chLeader:
		if ok {
			t.Fatalf("expected closed channel, got response %v", resp)
		}
	case <-time.After(3 * time.Second):
		t.Fatal("waited too long for channel to close")
	}

	if _, ok := <-chNoLeader; !ok {
		t.Fatalf("expected response, got closed channel")
	}
}
Exemple #30
0
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
	defer testutil.AfterTest(t)

	// accelerate report interval so test terminates quickly
	oldpi := v3rpc.GetProgressReportInterval()
	// using atomics to avoid race warnings
	v3rpc.SetProgressReportInterval(3 * time.Second)
	pi := 3 * time.Second
	defer func() { v3rpc.SetProgressReportInterval(oldpi) }()

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	wc := clientv3.NewWatcher(clus.RandClient())
	defer wc.Close()

	opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
	if watchOnPut {
		opts = append(opts, clientv3.WithPrefix())
	}
	rch := wc.Watch(context.Background(), "foo", opts...)

	select {
	case resp := <-rch: // wait for notification
		if len(resp.Events) != 0 {
			t.Fatalf("resp.Events expected none, got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}

	kvc := clientv3.NewKV(clus.RandClient())
	if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil {
		t.Fatal(err)
	}

	select {
	case resp := <-rch:
		if resp.Header.Revision != 2 {
			t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision)
		}
		if watchOnPut { // wait for put if watch on the put key
			ev := []*clientv3.Event{{Type: clientv3.EventTypePut,
				Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}}
			if !reflect.DeepEqual(ev, resp.Events) {
				t.Fatalf("expected %+v, got %+v", ev, resp.Events)
			}
		} else if len(resp.Events) != 0 { // wait for notification otherwise
			t.Fatalf("expected no events, but got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}
}