Example #1
0
// TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
// client to confirm it's visible to the whole cluster.
func TestV3LeaseExists(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create lease
	ctx0, cancel0 := context.WithCancel(context.Background())
	defer cancel0()
	lresp, err := clus.RandClient().Lease.LeaseCreate(
		ctx0,
		&pb.LeaseCreateRequest{TTL: 30})
	if err != nil {
		t.Fatal(err)
	}
	if lresp.Error != "" {
		t.Fatal(lresp.Error)
	}

	// confirm keepalive
	ctx1, cancel1 := context.WithCancel(context.Background())
	defer cancel1()
	lac, err := clus.RandClient().Lease.LeaseKeepAlive(ctx1)
	if err != nil {
		t.Fatal(err)
	}
	defer lac.CloseSend()
	if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
		t.Fatal(err)
	}
	if _, err = lac.Recv(); err != nil {
		t.Fatal(err)
	}
}
Example #2
0
func newWatcher(c *EtcdClient, key string, rev int64, isPrefix bool) (*Watcher, error) {
	ctx, cancel := context.WithCancel(context.Background())
	w, err := c.Watch.Watch(ctx)
	if err != nil {
		return nil, err
	}

	req := &pb.WatchCreateRequest{StartRevision: rev}
	if isPrefix {
		req.Prefix = []byte(key)
	} else {
		req.Key = []byte(key)
	}

	if err := w.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{CreateRequest: req}}); err != nil {
		return nil, err
	}

	wresp, err := w.Recv()
	if err != nil {
		return nil, err
	}
	if len(wresp.Events) != 0 || wresp.Created != true {
		return nil, ErrWaitMismatch
	}
	ret := &Watcher{
		wstream: w,
		cancel:  cancel,
		donec:   make(chan struct{}),
		id:      storage.WatchID(wresp.WatchId),
		recvc:   make(chan *storagepb.Event),
	}
	go ret.recvLoop()
	return ret, nil
}
Example #3
0
func TestNodeAdvance(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	storage := NewMemoryStorage()
	c := &Config{
		ID:              1,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         storage,
		MaxSizePerMsg:   noLimit,
		MaxInflightMsgs: 256,
	}
	n := StartNode(c, []Peer{{ID: 1}})
	n.Campaign(ctx)
	<-n.Ready()
	n.Propose(ctx, []byte("foo"))
	var rd Ready
	select {
	case rd = <-n.Ready():
		t.Fatalf("unexpected Ready before Advance: %+v", rd)
	case <-time.After(time.Millisecond):
	}
	storage.Append(rd.Entries)
	n.Advance()
	select {
	case <-n.Ready():
	case <-time.After(100 * time.Millisecond):
		t.Errorf("expect Ready after Advance, but there is no Ready available")
	}
}
Example #4
0
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
	ctx, cancel := context.WithCancel(context.Background())
	if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
		t.Fatalf("expected non-nil watcher channel")
	}
	if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil {
		t.Fatal(err)
	}
	cancel()
	select {
	case <-time.After(time.Second):
		t.Fatalf("took too long to cancel")
	case v, ok := <-wctx.ch:
		if !ok {
			// closed before getting put; OK
			break
		}
		// got the PUT; should close next
		select {
		case <-time.After(time.Second):
			t.Fatalf("took too long to close")
		case v, ok = <-wctx.ch:
			if ok {
				t.Fatalf("expected watcher channel to close, got %v", v)
			}
		}
	}
}
Example #5
0
func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) {
	tr := newFakeTransport()
	c := &simpleHTTPClient{transport: tr}

	donechan := make(chan struct{})
	ctx, cancel := context.WithCancel(context.Background())
	go func() {
		c.Do(ctx, &fakeAction{})
		close(donechan)
	}()

	// This should call CancelRequest and begin the cancellation process
	cancel()

	select {
	case <-donechan:
		t.Fatalf("simpleHTTPClient.Do should not have exited yet")
	default:
	}

	tr.finishCancel <- struct{}{}

	select {
	case <-donechan:
		//expected behavior
		return
	case <-time.After(time.Second):
		t.Fatalf("simpleHTTPClient.Do did not exit within 1s")
	}
}
Example #6
0
func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) {
	tr := newFakeTransport()
	c := &simpleHTTPClient{transport: tr}

	// create an already-cancelled context
	ctx, cancel := context.WithCancel(context.Background())
	cancel()

	body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))}
	go func() {
		// wait that simpleHTTPClient knows the context is already timed out,
		// and calls CancelRequest
		testutil.WaitSchedule()

		// response is returned before cancel effects
		tr.respchan <- &http.Response{Body: body}
	}()

	_, _, err := c.Do(ctx, &fakeAction{})
	if err == nil {
		t.Fatalf("expected non-nil error, got nil")
	}

	if !body.closed {
		t.Fatalf("expected closed body")
	}
}
Example #7
0
File: client.go Project: lrita/etcd
func newClient(cfg *Config) (*Client, error) {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	var creds *credentials.TransportAuthenticator
	if cfg.TLS != nil {
		c := credentials.NewTLS(cfg.TLS)
		creds = &c
	}
	// use a temporary skeleton client to bootstrap first connection
	ctx, cancel := context.WithCancel(context.TODO())
	conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds, ctx: ctx})
	if err != nil {
		return nil, err
	}
	client := &Client{
		conn:   conn,
		cfg:    *cfg,
		creds:  creds,
		ctx:    ctx,
		cancel: cancel,
	}
	client.Cluster = NewCluster(client)
	client.KV = NewKV(client)
	client.Lease = NewLease(client)
	client.Watcher = NewWatcher(client)
	client.Auth = NewAuth(client)
	client.Maintenance = &maintenance{c: client}

	return client, nil
}
Example #8
0
// DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server.
func DoCancelAfterFirstResponse(tc testpb.TestServiceClient) {
	ctx, cancel := context.WithCancel(context.Background())
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
	}
	respParam := []*testpb.ResponseParameters{
		{
			Size: proto.Int32(31415),
		},
	}
	pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182)
	req := &testpb.StreamingOutputCallRequest{
		ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseParameters: respParam,
		Payload:            pl,
	}
	if err := stream.Send(req); err != nil {
		grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err)
	}
	if _, err := stream.Recv(); err != nil {
		grpclog.Fatalf("%v.Recv() = %v", stream, err)
	}
	cancel()
	if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled {
		grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled)
	}
	grpclog.Println("CancelAfterFirstResponse done")
}
// Register register service
// to configured etcd instance.
// Once the Register is called, the client
// also periodically
// calls the refresh goroutine.
func (e *EtcdReigistryClient) Register() {
	e.etcdKey = buildKey(e.ServiceName, e.InstanceName)
	value := registerDTO{
		e.BaseURL,
	}

	val, _ := json.Marshal(value)
	e.keepAliveTicker = time.NewTicker(KeepAlivePeriod)
	ctx, c := context.WithCancel(context.TODO())
	e.cancel = c

	insertFunc := func() {
		e.etcdKApi.Set(context.Background(), e.etcdKey, string(val), &client.SetOptions{
			TTL: TTL,
		})
	}
	insertFunc()

	// Exec the keep alive goroutine
	go func() {
		for {
			select {
			case <-e.keepAliveTicker.C:
				insertFunc()
				log.Printf("Keep alive routine for %s", e.ServiceName)
			case <-ctx.Done():
				log.Printf("Shutdown keep alive routine for %s", e.ServiceName)
				return
			}
		}
	}()

}
Example #10
0
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)
	testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
		lc := toGRPC(clus.RandClient()).Lease
		lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()
		lac, err := lc.LeaseKeepAlive(ctx)
		if err != nil {
			return err
		}
		defer lac.CloseSend()

		// renew long enough so lease would've expired otherwise
		for i := 0; i < 3; i++ {
			if err = lac.Send(lreq); err != nil {
				return err
			}
			lresp, rxerr := lac.Recv()
			if rxerr != nil {
				return rxerr
			}
			if lresp.ID != leaseID {
				return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
			}
			time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
		}
		_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
		return err
	})
}
Example #11
0
func BenchmarkOneNode(b *testing.B) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	n := newNode()
	s := NewMemoryStorage()
	r := newTestRaft(1, []uint64{1}, 10, 1, s)
	go n.run(r)

	defer n.Stop()

	n.Campaign(ctx)
	go func() {
		for i := 0; i < b.N; i++ {
			n.Propose(ctx, []byte("foo"))
		}
	}()

	for {
		rd := <-n.Ready()
		s.Append(rd.Entries)
		// a reasonable disk sync latency
		time.Sleep(1 * time.Millisecond)
		n.Advance()
		if rd.HardState.Commit == uint64(b.N+1) {
			return
		}
	}
}
Example #12
0
func observe(c *clientv3.Client, election string) error {
	e := concurrency.NewElection(context.TODO(), c, election)
	ctx, cancel := context.WithCancel(context.TODO())

	donec := make(chan struct{})
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc, os.Interrupt, os.Kill)
	go func() {
		<-sigc
		cancel()
	}()

	go func() {
		for resp := range e.Observe(ctx) {
			display.Get(resp)
		}
		close(donec)
	}()

	<-donec

	select {
	case <-ctx.Done():
	default:
		return errors.New("elect: observer lost")
	}

	return nil
}
Example #13
0
func testCancel(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	ctx, cancel := context.WithCancel(context.Background())
	time.AfterFunc(1*time.Millisecond, cancel)
	reply, err := tc.UnaryCall(ctx, req)
	if grpc.Code(err) != codes.Canceled {
		t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled)
	}
}
Example #14
0
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
	fc := &inFlow{
		limit: initialWindowSize,
		conn:  t.fc,
	}
	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
	s := &Stream{
		id:            t.nextID,
		method:        callHdr.Method,
		buf:           newRecvBuffer(),
		fc:            fc,
		sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
		headerChan:    make(chan struct{}),
	}
	t.nextID += 2
	s.windowHandler = func(n int) {
		t.updateWindow(s, uint32(n))
	}
	// Make a stream be able to cancel the pending operations by itself.
	s.ctx, s.cancel = context.WithCancel(ctx)
	s.dec = &recvBufferReader{
		ctx:  s.ctx,
		recv: s.buf,
	}
	return s
}
Example #15
0
// TestV3LeaseSwitch tests a key can be switched from one lease to another.
func TestV3LeaseSwitch(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	key := "foo"

	// create lease
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseCreate(ctx, &pb.LeaseCreateRequest{TTL: 30})
	if err1 != nil {
		t.Fatal(err1)
	}
	lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseCreate(ctx, &pb.LeaseCreateRequest{TTL: 30})
	if err2 != nil {
		t.Fatal(err2)
	}

	// attach key on lease1 then switch it to lease2
	put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID}
	_, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1)
	if err != nil {
		t.Fatal(err)
	}
	put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID}
	_, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2)
	if err != nil {
		t.Fatal(err)
	}

	// revoke lease1 should not remove key
	_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
	if err != nil {
		t.Fatal(err)
	}
	rreq := &pb.RangeRequest{Key: []byte("foo")}
	rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
	if err != nil {
		t.Fatal(err)
	}
	if len(rresp.Kvs) != 1 {
		t.Fatalf("unexpect removal of key")
	}

	// revoke lease2 should remove key
	_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
	if err != nil {
		t.Fatal(err)
	}
	rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
	if err != nil {
		t.Fatal(err)
	}
	if len(rresp.Kvs) != 0 {
		t.Fatalf("lease removed but key remains")
	}
}
Example #16
0
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
	defer close(ch)
	for {
		resp, err := e.client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
		if err != nil {
			return
		}

		var kv *storagepb.KeyValue

		cctx, cancel := context.WithCancel(ctx)
		if len(resp.Kvs) == 0 {
			// wait for first key put on prefix
			opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
			wch := e.client.Watch(cctx, e.keyPrefix, opts...)

			for kv == nil {
				wr, ok := <-wch
				if !ok || wr.Err() != nil {
					cancel()
					return
				}
				// only accept PUTs; a DELETE will make observe() spin
				for _, ev := range wr.Events {
					if ev.Type == storagepb.PUT {
						kv = ev.Kv
						break
					}
				}
			}
		} else {
			kv = resp.Kvs[0]
		}

		wch := e.client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
		keyDeleted := false
		for !keyDeleted {
			wr, ok := <-wch
			if !ok {
				return
			}
			for _, ev := range wr.Events {
				if ev.Type == storagepb.DELETE {
					keyDeleted = true
					break
				}
				resp.Header = &wr.Header
				resp.Kvs = []*storagepb.KeyValue{ev.Kv}
				select {
				case ch <- *resp:
				case <-cctx.Done():
					return
				}
			}
		}
		cancel()
	}
}
func (le *LeaderElector) Start() <-chan ElectionResponse {
	var waitIndex uint64 = 0

	out := make(chan ElectionResponse, 2)

	await := make(chan bool, 2)
	renew := make(chan bool, 2)

	le.ctx, le.cancel = context.WithCancel(context.Background())
	if le.ctx == nil || le.cancel == nil {
		out <- ElectionResponse{
			err: errors.New("Couldn't instantiate context/cancel objects"),
		}
		close(out)
		return out
	}

	go func() {
		for {
			select {
			case <-le.ctx.Done():
				le.obsvr.Stop()
				close(out)
				return
			case <-await:
				resp, err := le.waitForLeadership(waitIndex)
				if err != nil {
					time.Sleep(LE_SLEEP_INTERVAL)
				} else if resp != nil {
					waitIndex = resp.Node.ModifiedIndex
				}
			case <-renew:
				errChan := le.ec.RenewTTL(le.ctx, le.keyPath, le.value, TTL_VAL, TTL_REFRESH_TIMEOUT)
				for e := range errChan {
					out <- ElectionResponse{err: e}
					time.Sleep(LE_SLEEP_INTERVAL)
					break
				}
			default:
				_, err := le.acquireLeadership()
				if err != nil {
					errObj, ok := err.(client.Error)
					if ok == true && errObj.Code == client.ErrorCodeNodeExist {
						await <- true
					} else {
						time.Sleep(LE_SLEEP_INTERVAL)
					}
				} else {
					renew <- true
					out <- ElectionResponse{err: nil}
				}
			}
		}
	}()

	return out
}
Example #18
0
// TestElectionFailover tests that an election will
func TestElectionFailover(t *testing.T) {
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)
	defer dropSessionLease(clus)

	cctx, cancel := context.WithCancel(context.TODO())
	defer cancel()

	// first leader (elected)
	e := concurrency.NewElection(clus.clients[0], "test-election")
	if err := e.Campaign(context.TODO(), "foo"); err != nil {
		t.Fatalf("failed volunteer (%v)", err)
	}

	// check first leader
	resp, ok := <-e.Observe(cctx)
	if !ok {
		t.Fatalf("could not wait for first election; channel closed")
	}
	s := string(resp.Kvs[0].Value)
	if s != "foo" {
		t.Fatalf("wrong election result. got %s, wanted foo", s)
	}

	// next leader
	electedc := make(chan struct{})
	go func() {
		ee := concurrency.NewElection(clus.clients[1], "test-election")
		if eer := ee.Campaign(context.TODO(), "bar"); eer != nil {
			t.Fatal(eer)
		}
		electedc <- struct{}{}
	}()

	// invoke leader failover
	session, serr := concurrency.NewSession(clus.clients[0])
	if serr != nil {
		t.Fatal(serr)
	}
	if err := session.Close(); err != nil {
		t.Fatal(err)
	}

	// check new leader
	e = concurrency.NewElection(clus.clients[2], "test-election")
	resp, ok = <-e.Observe(cctx)
	if !ok {
		t.Fatalf("could not wait for second election; channel closed")
	}
	s = string(resp.Kvs[0].Value)
	if s != "bar" {
		t.Fatalf("wrong election result. got %s, wanted bar", s)
	}

	// leader must ack election (otherwise, Campaign may see closed conn)
	<-electedc
}
Example #19
0
func waitUpdate(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
	cctx, cancel := context.WithCancel(ctx)
	defer cancel()
	wresp, ok := <-client.Watch(cctx, key, opts...)
	if !ok {
		return ctx.Err()
	}
	return wresp.Err()
}
Example #20
0
// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
// order sequentially
func NewFIFOScheduler() Scheduler {
	f := &fifo{
		resume: make(chan struct{}, 1),
		donec:  make(chan struct{}, 1),
	}
	f.finishCond = sync.NewCond(&f.mu)
	f.ctx, f.cancel = context.WithCancel(context.Background())
	go f.run()
	return f
}
Example #21
0
// TestMultiNodeStart ensures that a node can be started correctly. The node should
// start with correct configuration change entries, and can accept and commit
// proposals.
func TestMultiNodeStart(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
	ccdata, err := cc.Marshal()
	if err != nil {
		t.Fatalf("unexpected marshal error: %v", err)
	}
	wants := []Ready{
		{
			SoftState: &SoftState{Lead: 1, RaftState: StateLeader},
			HardState: raftpb.HardState{Term: 2, Commit: 2, Vote: 1},
			Entries: []raftpb.Entry{
				{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
				{Term: 2, Index: 2},
			},
			CommittedEntries: []raftpb.Entry{
				{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
				{Term: 2, Index: 2},
			},
		},
		{
			HardState:        raftpb.HardState{Term: 2, Commit: 3, Vote: 1},
			Entries:          []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
			CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
		},
	}
	mn := StartMultiNode(1)
	storage := NewMemoryStorage()
	mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}})
	mn.Campaign(ctx, 1)
	gs := <-mn.Ready()
	g := gs[1]
	if !reflect.DeepEqual(g, wants[0]) {
		t.Fatalf("#%d: g = %+v,\n             w   %+v", 1, g, wants[0])
	} else {
		storage.Append(g.Entries)
		mn.Advance(gs)
	}

	mn.Propose(ctx, 1, []byte("foo"))
	if gs2 := <-mn.Ready(); !reflect.DeepEqual(gs2[1], wants[1]) {
		t.Errorf("#%d: g = %+v,\n             w   %+v", 2, gs2[1], wants[1])
	} else {
		storage.Append(gs2[1].Entries)
		mn.Advance(gs2)
	}

	select {
	case rd := <-mn.Ready():
		t.Errorf("unexpected Ready: %+v", rd)
	case <-time.After(time.Millisecond):
	}
}
Example #22
0
// TestV3LeaseFailover ensures the old leader drops lease keepalive requests within
// election timeout after it loses its quorum. And the new leader extends the TTL of
// the lease to at least TTL + election timeout.
func TestV3LeaseFailover(t *testing.T) {
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	toIsolate := clus.waitLeader(t, clus.Members)

	lc := toGRPC(clus.Client(toIsolate)).Lease

	// create lease
	lresp, err := lc.LeaseCreate(context.TODO(), &pb.LeaseCreateRequest{TTL: 5})
	if err != nil {
		t.Fatal(err)
	}
	if lresp.Error != "" {
		t.Fatal(lresp.Error)
	}

	// isolate the current leader with its followers.
	clus.Members[toIsolate].Pause()

	lreq := &pb.LeaseKeepAliveRequest{ID: lresp.ID}

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	lac, err := lc.LeaseKeepAlive(ctx)
	if err != nil {
		t.Fatal(err)
	}
	defer lac.CloseSend()

	// send keep alive to old leader until the old leader starts
	// to drop lease request.
	var expectedExp time.Time
	for {
		if err = lac.Send(lreq); err != nil {
			break
		}
		lkresp, rxerr := lac.Recv()
		if rxerr != nil {
			break
		}
		expectedExp = time.Now().Add(time.Duration(lkresp.TTL) * time.Second)
		time.Sleep(time.Duration(lkresp.TTL/2) * time.Second)
	}

	clus.Members[toIsolate].Resume()
	clus.waitLeader(t, clus.Members)

	// lease should not expire at the last received expire deadline.
	time.Sleep(expectedExp.Sub(time.Now()) - 500*time.Millisecond)

	if !leaseExist(t, clus, lresp.ID) {
		t.Error("unexpected lease not exists")
	}
}
Example #23
0
func testV3WatchCancel(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	wStream, errW := wAPI.Watch(ctx)
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
		CreateRequest: &pb.WatchCreateRequest{
			Key: []byte("foo"), StartRevision: startRev}}}
	if err := wStream.Send(wreq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
		CancelRequest: &pb.WatchCancelRequest{
			WatchId: wresp.WatchId}}}
	if err := wStream.Send(creq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}

	clus.Terminate(t)
}
Example #24
0
func (s *stresser) Stress() error {
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	defer conn.Close()
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go func(i int) {
			defer wg.Done()
			for {
				// TODO: 10-second is enough timeout to cover leader failure
				// and immediate leader election. Find out what other cases this
				// could be timed out.
				putctx, putcancel := context.WithTimeout(ctx, 10*time.Second)
				_, err := kvc.Put(putctx, &pb.PutRequest{
					Key:   []byte(fmt.Sprintf("foo%d", rand.Intn(s.KeySuffixRange))),
					Value: []byte(randStr(s.KeySize)),
				})
				putcancel()
				if err != nil {
					if grpc.ErrorDesc(err) == context.DeadlineExceeded.Error() {
						// This retries when request is triggered at the same time as
						// leader failure. When we terminate the leader, the request to
						// that leader cannot be processed, and times out. Also requests
						// to followers cannot be forwarded to the old leader, so timing out
						// as well. We want to keep stressing until the cluster elects a
						// new leader and start processing requests again.
						continue
					}
					return
				}
				s.mu.Lock()
				s.success++
				s.mu.Unlock()
			}
		}(i)
	}

	<-ctx.Done()
	return nil
}
Example #25
0
func (clm *clientLeaseMgr) sessionLease(client *clientv3.Client, ttl int64) (lease.LeaseID, error) {
	clm.mu.Lock()
	defer clm.mu.Unlock()
	if lka, ok := clm.leases[client]; ok {
		return lka.id, nil
	}

	resp, err := client.Lease.LeaseCreate(context.TODO(), &pb.LeaseCreateRequest{TTL: ttl})
	if err != nil {
		return lease.NoLease, err
	}
	id := lease.LeaseID(resp.ID)

	ctx, cancel := context.WithCancel(context.Background())
	keepAlive, err := client.Lease.LeaseKeepAlive(ctx)
	if err != nil || keepAlive == nil {
		return lease.NoLease, err
	}

	lka := &leaseKeepAlive{id: id, donec: make(chan struct{})}
	clm.leases[client] = lka

	// keep the lease alive until clientection error
	go func() {
		defer func() {
			keepAlive.CloseSend()
			clm.mu.Lock()
			delete(clm.leases, client)
			clm.mu.Unlock()
			cancel()
			close(lka.donec)
		}()

		ttl := resp.TTL
		for {
			lreq := &pb.LeaseKeepAliveRequest{ID: int64(id)}
			select {
			case <-lka.donec:
				return
			case <-time.After(time.Duration(ttl/2) * time.Second):
			}
			if err := keepAlive.Send(lreq); err != nil {
				break
			}
			resp, err := keepAlive.Recv()
			if err != nil {
				break
			}
			ttl = resp.TTL
		}
	}()

	return id, nil
}
Example #26
0
func (e *etcd) WatchRouteChanges(filter string) (<-chan Event, <-chan error, context.CancelFunc) {
	events := make(chan Event)
	errors := make(chan error)

	cxt, cancel := context.WithCancel(e.ctx)

	go e.dispatchWatchEvents(cxt, filter, events, errors)

	time.Sleep(100 * time.Millisecond) //give the watcher a chance to connect

	return events, errors, cancel
}
Example #27
0
func testWatchCancelImmediate(t *testing.T, wctx *watchctx) {
	ctx, cancel := context.WithCancel(context.Background())
	cancel()
	wch := wctx.w.Watch(ctx, "a")
	select {
	case wresp, ok := <-wch:
		if ok {
			t.Fatalf("read wch got %v; expected closed channel", wresp)
		}
	default:
		t.Fatalf("closed watcher channel should not block")
	}
}
Example #28
0
func setupFakeEtcd(keys client.KeysAPI) db.DB {
	nodeURLs := []string{"127.0.0.1:5000"}

	cfg := client.Config{
		Endpoints: nodeURLs,
		Transport: client.DefaultTransport,
	}

	client, err := client.New(cfg)
	Expect(err).NotTo(HaveOccurred())
	ctx, cancel := context.WithCancel(context.Background())
	return db.New(client, keys, ctx, cancel)
}
Example #29
0
// DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent.
func DoCancelAfterBegin(tc testpb.TestServiceClient) {
	ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata))
	stream, err := tc.StreamingInputCall(ctx)
	if err != nil {
		grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
	}
	cancel()
	_, err = stream.CloseAndRecv()
	if grpc.Code(err) != codes.Canceled {
		grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled)
	}
	grpclog.Println("CancelAfterBegin done")
}
Example #30
0
func TestIsHalted(t *testing.T) {
	if !isHalted(nil, fmt.Errorf("etcdserver: some etcdserver error")) {
		t.Errorf(`error prefixed with "etcdserver: " should be Halted`)
	}
	ctx, cancel := context.WithCancel(context.TODO())
	if isHalted(ctx, nil) {
		t.Errorf("no error and active context should not be Halted")
	}
	cancel()
	if !isHalted(ctx, nil) {
		t.Errorf("cancel on context should be Halted")
	}
}