예제 #1
0
파일: lease_test.go 프로젝트: mgurevin/etcd
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
// a keep alive request after the first never gets a response.
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)

	// setup lease and do a keepalive
	resp, err := cli.Grant(context.Background(), 5)
	if err != nil {
		t.Fatal(err)
	}
	rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
	if kerr != nil {
		t.Fatal(kerr)
	}
	if kresp := <-rc; kresp.ID != resp.ID {
		t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	// keep client disconnected
	clus.Members[0].Stop(t)
	select {
	case ka, ok := <-rc:
		if ok {
			t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
		}
	case <-time.After(10 * time.Second):
		t.Fatalf("keepalive channel did not close")
	}

	clus.Members[0].Restart(t)
}
예제 #2
0
파일: kv_test.go 프로젝트: ringtail/etcd
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
func TestKVPutOneEndpointDown(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// get endpoint list
	eps := make([]string, 3)
	for i := range eps {
		eps[i] = clus.Members[i].GRPCAddr()
	}

	// make a dead node
	clus.Members[rand.Intn(len(eps))].Stop(t)

	// try to connect with dead node in the endpoint list
	cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
	cli, err := clientv3.New(cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer cli.Close()
	ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
	if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
		t.Fatal(err)
	}
	cancel()
}
예제 #3
0
파일: lease_test.go 프로젝트: lrita/etcd
func TestLeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID))
	if kerr != nil {
		t.Errorf("failed to keepalive lease %v", kerr)
	}

	kresp, ok := <-rc
	if !ok {
		t.Errorf("chan is closed, want not closed")
	}

	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	lapi.Close()

	_, ok = <-rc
	if ok {
		t.Errorf("chan is not closed, want lease Close() closes chan")
	}
}
예제 #4
0
파일: kv_test.go 프로젝트: ringtail/etcd
func TestKVNewAfterClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	clus.TakeClient(0)
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}

	donec := make(chan struct{})
	go func() {
		kv := clientv3.NewKV(cli)
		if _, err := kv.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
		close(donec)
	}()
	select {
	case <-time.After(3 * time.Second):
		t.Fatal("kv.Get took too long")
	case <-donec:
	}
}
예제 #5
0
파일: kv_test.go 프로젝트: ringtail/etcd
func TestKVCompactError(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	for i := 0; i < 5; i++ {
		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
			t.Fatalf("couldn't put 'foo' (%v)", err)
		}
	}
	_, err := kv.Compact(ctx, 6)
	if err != nil {
		t.Fatalf("couldn't compact 6 (%v)", err)
	}

	_, err = kv.Compact(ctx, 6)
	if err != rpctypes.ErrCompacted {
		t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
	}

	_, err = kv.Compact(ctx, 100)
	if err != rpctypes.ErrFutureRev {
		t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
	}
}
예제 #6
0
func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
	defer testutil.AfterTest(t)

	// test doesn't use quorum gets, so ensure there are no followers to avoid
	// stale reads that will break the test
	cfg = configStandalone(*cfg)

	epc, err := newEtcdProcessCluster(cfg)
	if err != nil {
		t.Fatalf("could not start etcd process cluster (%v)", err)
	}
	defer func() {
		if err := epc.Close(); err != nil {
			t.Fatalf("error closing etcd processes (%v)", err)
		}
	}()

	var (
		expectPut = `{"action":"set","node":{"key":"/foo","value":"bar","`
		expectGet = `{"action":"get","node":{"key":"/foo","value":"bar","`
	)
	if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil {
		t.Fatalf("failed put with curl (%v)", err)
	}
	if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet}); err != nil {
		t.Fatalf("failed get with curl (%v)", err)
	}
	if cfg.clientTLS == clientTLSAndNonTLS {
		if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet, isTLS: true}); err != nil {
			t.Fatalf("failed get with curl (%v)", err)
		}
	}
}
예제 #7
0
func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) {
	defer testutil.AfterTest(t)

	epc := setupEtcdctlTest(t, cfg, true)
	defer func() {
		if errC := epc.Close(); errC != nil {
			t.Fatalf("error closing etcd processes (%v)", errC)
		}
	}()

	key, value := "foo", "bar"
	errc := etcdctlWatch(epc, key, value, noSync)
	if err := etcdctlSet(epc, key, value); err != nil {
		t.Fatalf("failed set (%v)", err)
	}

	select {
	case err := <-errc:
		if err != nil {
			t.Fatalf("failed watch (%v)", err)
		}
	case <-time.After(5 * time.Second):
		t.Fatalf("watch timed out")
	}
}
예제 #8
0
// TestPutRestart checks if a put after an unrelated member restart succeeds
func TestV3PutRestart(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kvIdx := rand.Intn(3)
	kvc := toGRPC(clus.Client(kvIdx)).KV

	stopIdx := kvIdx
	for stopIdx == kvIdx {
		stopIdx = rand.Intn(3)
	}

	clus.clients[stopIdx].Close()
	clus.Members[stopIdx].Stop(t)
	clus.Members[stopIdx].Restart(t)
	c, cerr := NewClientV3(clus.Members[stopIdx])
	if cerr != nil {
		t.Fatalf("cannot create client: %v", cerr)
	}
	clus.clients[stopIdx] = c

	ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
	defer cancel()
	reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
	_, err := kvc.Put(ctx, reqput)
	if err != nil && err == ctx.Err() {
		t.Fatalf("expected grpc error, got local ctx error (%v)", err)
	}
}
예제 #9
0
// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client.
func TestTLSGRPCRejectInsecureClient(t *testing.T) {
	defer testutil.AfterTest(t)

	cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
	clus := newClusterV3NoClients(t, &cfg)
	defer clus.Terminate(t)

	// nil out TLS field so client will use an insecure connection
	clus.Members[0].ClientTLSInfo = nil
	client, err := NewClientV3(clus.Members[0])
	if err != nil && err != grpc.ErrClientConnTimeout {
		t.Fatalf("unexpected error (%v)", err)
	} else if client == nil {
		// Ideally, no client would be returned. However, grpc will
		// return a connection without trying to handshake first so
		// the connection appears OK.
		return
	}
	defer client.Close()

	donec := make(chan error, 1)
	go func() {
		ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
		reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
		_, perr := toGRPC(client).KV.Put(ctx, reqput)
		cancel()
		donec <- perr
	}()

	if perr := <-donec; perr == nil {
		t.Fatalf("expected client error on put")
	}
}
예제 #10
0
func testCtlV2GetRoleUser(t *testing.T, cfg *etcdProcessClusterConfig) {
	defer testutil.AfterTest(t)

	epc := setupEtcdctlTest(t, cfg, true)
	defer func() {
		if err := epc.Close(); err != nil {
			t.Fatalf("error closing etcd processes (%v)", err)
		}
	}()

	// wait for the server capabilities to be updated based on the version;
	// the update loop has a delay of 500ms, so 1s should be enough wait time
	time.Sleep(time.Second)

	if err := etcdctlRoleAdd(epc, "foo"); err != nil {
		t.Fatalf("failed to add role (%v)", err)
	}
	if err := etcdctlUserAdd(epc, "username", "password"); err != nil {
		t.Fatalf("failed to add user (%v)", err)
	}
	if err := etcdctlUserGrant(epc, "username", "foo"); err != nil {
		t.Fatalf("failed to grant role (%v)", err)
	}
	if err := etcdctlUserGet(epc, "username"); err != nil {
		t.Fatalf("failed to get user (%v)", err)
	}
}
예제 #11
0
func TestGRPCRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	cfg := ClusterConfig{Size: 3}
	clus := newClusterV3NoClients(t, &cfg)
	defer clus.Terminate(t)

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)

	client, err := NewClientV3(clus.Members[0])
	if err != nil {
		t.Fatalf("cannot create client: %v", err)
	}
	defer client.Close()

	// wait for election timeout, then member[0] will not have a leader.
	time.Sleep(time.Duration(3*electionTicks) * tickDuration)

	md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
	ctx := metadata.NewContext(context.Background(), md)
	reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
	if _, err := toGRPC(client).KV.Put(ctx, reqput); grpc.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
		t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
	}
}
예제 #12
0
func testBasicOpsPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
	defer testutil.AfterTest(t)

	// test doesn't use quorum gets, so ensure there are no followers to avoid
	// stale reads that will break the test
	cfg = configStandalone(*cfg)

	epc, err := newEtcdProcessCluster(cfg)
	if err != nil {
		t.Fatalf("could not start etcd process cluster (%v)", err)
	}
	defer func() {
		if err := epc.Close(); err != nil {
			t.Fatalf("error closing etcd processes (%v)", err)
		}
	}()

	expectPut := `{"action":"set","node":{"key":"/testKey","value":"foo","`
	if err := cURLPut(epc, "testKey", "foo", expectPut); err != nil {
		t.Fatalf("failed put with curl (%v)", err)
	}

	expectGet := `{"action":"get","node":{"key":"/testKey","value":"foo","`
	if err := cURLGet(epc, "testKey", expectGet); err != nil {
		t.Fatalf("failed get with curl (%v)", err)
	}
}
예제 #13
0
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
func TestV2NoRetryNoLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("errHttp:123.%d.sock", os.Getpid()))
	eh := &errHandler{errCode: http.StatusServiceUnavailable}
	srv := httptest.NewUnstartedServer(eh)
	defer lHttp.Close()
	defer srv.Close()
	srv.Listener = lHttp
	go srv.Start()
	lHttpURL := integration.UrlScheme + "://" + lHttp.Addr().String()

	cli := integration.MustNewHTTPClient(t, []string{lHttpURL, lHttpURL}, nil)
	kapi := client.NewKeysAPI(cli)
	// test error code
	for i, f := range noRetryList(kapi) {
		reqs := eh.reqs
		if err := f(); err == nil || !strings.Contains(err.Error(), "no leader") {
			t.Errorf("#%d: expected \"no leader\", got %v", i, err)
		}
		if eh.reqs != reqs+1 {
			t.Errorf("#%d: expected 1 request, got %d", i, eh.reqs-reqs)
		}
	}
}
예제 #14
0
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
func TestV2NoRetryEOF(t *testing.T) {
	defer testutil.AfterTest(t)
	// generate an EOF response; specify address so appears first in sorted ep list
	lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("eof:123.%d.sock", os.Getpid()))
	defer lEOF.Close()
	tries := uint32(0)
	go func() {
		for {
			conn, err := lEOF.Accept()
			if err != nil {
				return
			}
			atomic.AddUint32(&tries, 1)
			conn.Close()
		}
	}()
	eofURL := integration.UrlScheme + "://" + lEOF.Addr().String()
	cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
	kapi := client.NewKeysAPI(cli)
	for i, f := range noRetryList(kapi) {
		startTries := atomic.LoadUint32(&tries)
		if err := f(); err == nil {
			t.Errorf("#%d: expected EOF error, got nil", i)
		}
		endTries := atomic.LoadUint32(&tries)
		if startTries+1 != endTries {
			t.Errorf("#%d: expected 1 try, got %d", i, endTries-startTries)
		}
	}
}
예제 #15
0
// TestReleaseUpgrade ensures that changes to master branch does not affect
// upgrade from latest etcd releases.
func TestReleaseUpgrade(t *testing.T) {
	lastReleaseBinary := binDir + "/etcd-last-release"
	if !fileutil.Exist(lastReleaseBinary) {
		t.Skipf("%q does not exist", lastReleaseBinary)
	}

	defer testutil.AfterTest(t)

	copiedCfg := configNoTLS
	copiedCfg.execPath = lastReleaseBinary
	copiedCfg.snapCount = 3
	copiedCfg.baseScheme = "unix" // to avoid port conflict

	epc, err := newEtcdProcessCluster(&copiedCfg)
	if err != nil {
		t.Fatalf("could not start etcd process cluster (%v)", err)
	}
	defer func() {
		if errC := epc.Close(); errC != nil {
			t.Fatalf("error closing etcd processes (%v)", errC)
		}
	}()

	os.Setenv("ETCDCTL_API", "3")
	defer os.Unsetenv("ETCDCTL_API")
	cx := ctlCtx{
		t:           t,
		cfg:         configNoTLS,
		dialTimeout: 7 * time.Second,
		quorum:      true,
		epc:         epc,
	}
	var kvs []kv
	for i := 0; i < 5; i++ {
		kvs = append(kvs, kv{key: fmt.Sprintf("foo%d", i), val: "bar"})
	}
	for i := range kvs {
		if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
			cx.t.Fatalf("#%d: ctlV3Put error (%v)", i, err)
		}
	}

	for i := range epc.procs {
		if err := epc.procs[i].Stop(); err != nil {
			t.Fatalf("#%d: error closing etcd process (%v)", i, err)
		}
		epc.procs[i].cfg.execPath = binDir + "/etcd"
		epc.procs[i].cfg.keepDataDir = true

		if err := epc.procs[i].Restart(); err != nil {
			t.Fatalf("error restarting etcd process (%v)", err)
		}

		for j := range kvs {
			if err := ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...); err != nil {
				cx.t.Fatalf("#%d-%d: ctlV3Get error (%v)", i, j, err)
			}
		}
	}
}
예제 #16
0
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)
	testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
		lc := toGRPC(clus.RandClient()).Lease
		lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()
		lac, err := lc.LeaseKeepAlive(ctx)
		if err != nil {
			return err
		}
		defer lac.CloseSend()

		// renew long enough so lease would've expired otherwise
		for i := 0; i < 3; i++ {
			if err = lac.Send(lreq); err != nil {
				return err
			}
			lresp, rxerr := lac.Recv()
			if rxerr != nil {
				return rxerr
			}
			if lresp.ID != leaseID {
				return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
			}
			time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
		}
		_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
		return err
	})
}
예제 #17
0
func TestV2WatchKeyInDir(t *testing.T) {
	defer testutil.AfterTest(t)
	cl := NewCluster(t, 1)
	cl.Launch(t)
	defer cl.Terminate(t)

	u := cl.URL(0)
	tc := NewTestClient()

	var body map[string]interface{}
	c := make(chan bool)

	// Create an expiring directory
	v := url.Values{}
	v.Set("dir", "true")
	v.Set("ttl", "1")
	resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir"), v)
	if err != nil {
		t.Fatalf("put err = %v, want nil", err)
	}
	resp.Body.Close()

	// Create a permanent node within the directory
	v = url.Values{}
	v.Set("value", "XXX")
	resp, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir/bar"), v)
	if err != nil {
		t.Fatalf("put err = %v, want nil", err)
	}
	resp.Body.Close()

	go func() {
		// Expect a notification when watching the node
		resp, err := tc.Get(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir/bar?wait=true"))
		if err != nil {
			t.Fatalf("watch err = %v, want nil", err)
		}
		body = tc.ReadBodyJSON(resp)
		c <- true
	}()

	select {
	case <-c:
	// 1s ttl + 0.5s sync delay + 1.5s disk and network delay
	// We set that long disk and network delay because travis may be slow
	// when do system calls.
	case <-time.After(3 * time.Second):
		t.Fatal("timed out waiting for watch result")
	}

	w := map[string]interface{}{
		"node": map[string]interface{}{
			"key": "/keyindir",
		},
		"action": "expire",
	}
	if err := checkBody(body, w); err != nil {
		t.Error(err)
	}
}
예제 #18
0
// TestV3LeaseGrantById ensures leases may be created by a given id.
func TestV3LeaseGrantByID(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create fixed lease
	lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
		context.TODO(),
		&pb.LeaseGrantRequest{ID: 1, TTL: 1})
	if err != nil {
		t.Errorf("could not create lease 1 (%v)", err)
	}
	if lresp.ID != 1 {
		t.Errorf("got id %v, wanted id %v", lresp.ID, 1)
	}

	// create duplicate fixed lease
	lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
		context.TODO(),
		&pb.LeaseGrantRequest{ID: 1, TTL: 1})
	if err != rpctypes.ErrLeaseExist {
		t.Error(err)
	}

	// create fresh fixed lease
	lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
		context.TODO(),
		&pb.LeaseGrantRequest{ID: 2, TTL: 1})
	if err != nil {
		t.Errorf("could not create lease 2 (%v)", err)
	}
	if lresp.ID != 2 {
		t.Errorf("got id %v, wanted id %v", lresp.ID, 2)
	}
}
예제 #19
0
func testCtlV2GetRoleUser(t *testing.T, cfg *etcdProcessClusterConfig) {
	defer testutil.AfterTest(t)

	epc := setupEtcdctlTest(t, cfg, true)
	defer func() {
		if err := epc.Close(); err != nil {
			t.Fatalf("error closing etcd processes (%v)", err)
		}
	}()

	if err := etcdctlRoleAdd(epc, "foo"); err != nil {
		t.Fatalf("failed to add role (%v)", err)
	}
	if err := etcdctlUserAdd(epc, "username", "password"); err != nil {
		t.Fatalf("failed to add user (%v)", err)
	}
	if err := etcdctlUserGrant(epc, "username", "foo"); err != nil {
		t.Fatalf("failed to grant role (%v)", err)
	}
	if err := etcdctlUserGet(epc, "username"); err != nil {
		t.Fatalf("failed to get user (%v)", err)
	}

	// ensure double grant gives an error; was crashing in 2.3.1
	regrantArgs := etcdctlPrefixArgs(epc)
	regrantArgs = append(regrantArgs, "user", "grant", "--roles", "foo", "username")
	if err := spawnWithExpect(regrantArgs, "duplicate"); err != nil {
		t.Fatalf("missing duplicate error on double grant role (%v)", err)
	}
}
예제 #20
0
func TestCtlV2AuthWithCommonName(t *testing.T) {
	defer testutil.AfterTest(t)

	copiedCfg := configClientTLS
	copiedCfg.clientCertAuthEnabled = true

	epc := setupEtcdctlTest(t, &copiedCfg, false)
	defer func() {
		if err := epc.Close(); err != nil {
			t.Fatalf("error closing etcd processes (%v)", err)
		}
	}()

	if err := etcdctlRoleAdd(epc, "testrole"); err != nil {
		t.Fatalf("failed to add role (%v)", err)
	}
	if err := etcdctlRoleGrant(epc, "testrole", "--rw", "--path=/foo"); err != nil {
		t.Fatalf("failed to grant role (%v)", err)
	}
	if err := etcdctlUserAdd(epc, "root", "123"); err != nil {
		t.Fatalf("failed to add user (%v)", err)
	}
	if err := etcdctlUserAdd(epc, "Autogenerated CA", "123"); err != nil {
		t.Fatalf("failed to add user (%v)", err)
	}
	if err := etcdctlUserGrant(epc, "Autogenerated CA", "testrole"); err != nil {
		t.Fatalf("failed to grant role (%v)", err)
	}
	if err := etcdctlAuthEnable(epc); err != nil {
		t.Fatalf("failed to enable auth (%v)", err)
	}
	if err := etcdctlSet(epc, "foo", "bar"); err != nil {
		t.Fatalf("failed to write (%v)", err)
	}
}
예제 #21
0
파일: kv_test.go 프로젝트: ringtail/etcd
func TestKVPutWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)

	// wait for election timeout, then member[0] will not have a leader.
	var (
		electionTicks = 10
		tickDuration  = 10 * time.Millisecond
	)
	time.Sleep(time.Duration(3*electionTicks) * tickDuration)

	kv := clientv3.NewKV(clus.Client(0))
	_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
	if err != rpctypes.ErrNoLeader {
		t.Fatal(err)
	}

	// clients may give timeout errors since the members are stopped; take
	// the clients so that terminating the cluster won't complain
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)
}
예제 #22
0
// Ensure etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
	defer testutil.AfterTest(t)
	// start 1-member cluster to ensure member 0 is the leader of the cluster.
	c := NewCluster(t, 1)
	c.Launch(t)
	defer c.Terminate(t)

	c.AddMember(t)
	c.Members[1].Stop(t)

	// send remove member-1 request to the cluster.
	cc := MustNewHTTPClient(t, c.URLs(), nil)
	ma := client.NewMembersAPI(cc)
	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	// the proposal is not committed because member 1 is stopped, but the
	// proposal is appended to leader's raft log.
	ma.Remove(ctx, c.Members[1].s.ID().String())
	cancel()

	// restart member, and expect it to send UpdateAttributes request.
	// the log in the leader is like this:
	// [..., remove 1, ..., update attr 1, ...]
	c.Members[1].Restart(t)
	// when the member comes back, it ack the proposal to remove itself,
	// and apply it.
	<-c.Members[1].s.StopNotify()

	// terminate removed member
	c.Members[1].Terminate(t)
	c.Members = c.Members[:1]
	// wait member to be removed.
	c.waitMembersMatch(t, c.HTTPMembers())
}
예제 #23
0
파일: kv_test.go 프로젝트: ringtail/etcd
func TestKVPutError(t *testing.T) {
	defer testutil.AfterTest(t)

	var (
		maxReqBytes = 1.5 * 1024 * 1024
		quota       = int64(maxReqBytes * 1.2)
	)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	_, err := kv.Put(ctx, "", "bar")
	if err != rpctypes.ErrEmptyKey {
		t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
	}

	_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) // 1.5MB
	if err != rpctypes.ErrRequestTooLarge {
		t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
	}

	_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
	if err != nil { // below quota
		t.Fatal(err)
	}

	time.Sleep(500 * time.Millisecond) // give enough time for commit

	_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
	if err != rpctypes.ErrNoSpace { // over quota
		t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
	}
}
예제 #24
0
func testCluster(t *testing.T, size int) {
	defer testutil.AfterTest(t)
	c := NewCluster(t, size)
	c.Launch(t)
	defer c.Terminate(t)
	clusterMustProgress(t, c.Members)
}
예제 #25
0
파일: kv_test.go 프로젝트: ringtail/etcd
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
func TestKVPutStoppedServerAndClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	clus.Members[0].Stop(t)

	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
	// get retries on all errors.
	// so here we use it to eat the potential broken pipe error for the next put.
	// grpc client might see a broken pipe error when we issue the get request before
	// grpc finds out the original connection is down due to the member shutdown.
	_, err := cli.Get(ctx, "abc")
	cancel()
	if !strings.Contains(err.Error(), "context deadline") {
		t.Fatal(err)
	}

	// this Put fails and triggers an asynchronous connection retry
	_, err = cli.Put(ctx, "abc", "123")
	cancel()
	if !strings.Contains(err.Error(), "context deadline") {
		t.Fatal(err)
	}
}
예제 #26
0
func TestTLSClusterOf3(t *testing.T) {
	defer testutil.AfterTest(t)
	c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
	c.Launch(t)
	defer c.Terminate(t)
	clusterMustProgress(t, c.Members)
}
예제 #27
0
파일: lease_test.go 프로젝트: lrita/etcd
func TestLeaseRevoke(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to revoke lease %v", err)
	}

	_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != v3rpc.ErrLeaseNotFound {
		t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound)
	}
}
예제 #28
0
// TestStreamReaderStopOnDial tests a stream reader closes the connection on stop.
func TestStreamReaderStopOnDial(t *testing.T) {
	defer testutil.AfterTest(t)
	h := http.Header{}
	h.Add("X-Server-Version", version.Version)
	tr := &respWaitRoundTripper{rrt: &respRoundTripper{code: http.StatusOK, header: h}}
	sr := &streamReader{
		peerID: types.ID(2),
		tr:     &Transport{streamRt: tr, ClusterID: types.ID(1)},
		picker: mustNewURLPicker(t, []string{"http://localhost:2380"}),
		errorc: make(chan error, 1),
		typ:    streamTypeMessage,
		status: newPeerStatus(types.ID(2)),
	}
	tr.onResp = func() {
		// stop() waits for the run() goroutine to exit, but that exit
		// needs a response from RoundTrip() first; use goroutine
		go sr.stop()
		// wait so that stop() is blocked on run() exiting
		time.Sleep(10 * time.Millisecond)
		// sr.run() completes dialing then begins decoding while stopped
	}
	sr.start()
	select {
	case <-sr.done:
	case <-time.After(time.Second):
		t.Fatal("streamReader did not stop in time")
	}
}
예제 #29
0
파일: kv_test.go 프로젝트: veteranlu/etcd
func TestKVDelete(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	presp, err := kv.Put(ctx, "foo", "")
	if err != nil {
		t.Fatalf("couldn't put 'foo' (%v)", err)
	}
	if presp.Header.Revision != 2 {
		t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
	}
	resp, err := kv.Delete(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't delete key (%v)", err)
	}
	if resp.Header.Revision != 3 {
		t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
	}
	gresp, err := kv.Get(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't get key (%v)", err)
	}
	if len(gresp.Kvs) > 0 {
		t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
	}
}
예제 #30
0
파일: lease_test.go 프로젝트: mgurevin/etcd
func TestLeaseRevokeNewAfterClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	le := clientv3.NewLease(cli)
	resp, err := le.Grant(context.TODO(), 5)
	if err != nil {
		t.Fatal(err)
	}
	leaseID := resp.ID

	clus.TakeClient(0)
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}

	donec := make(chan struct{})
	go func() {
		if _, err := le.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
		close(donec)
	}()
	select {
	case <-time.After(3 * time.Second):
		t.Fatal("le.Revoke took too long")
	case <-donec:
	}
}