Example #1
0
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
	wp := &watchProxy{
		cw:           c.Watcher,
		ctx:          clientv3.WithRequireLeader(c.Ctx()),
		retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
		leaderc:      make(chan struct{}),
	}
	wp.ranges = newWatchRanges(wp)
	go func() {
		// a new streams without opening any watchers won't catch
		// a lost leader event, so have a special watch to monitor it
		rev := int64((uint64(1) << 63) - 2)
		for wp.ctx.Err() == nil {
			wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
			for range wch {
			}
			wp.mu.Lock()
			close(wp.leaderc)
			wp.leaderc = make(chan struct{})
			wp.mu.Unlock()
			wp.retryLimiter.Wait(wp.ctx)
		}
		wp.mu.Lock()
		<-wp.ctx.Done()
		wp.mu.Unlock()
		wp.wg.Wait()
		wp.ranges.stop()
	}()
	return wp
}
Example #2
0
func TestKVPutWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)

	// wait for election timeout, then member[0] will not have a leader.
	var (
		electionTicks = 10
		tickDuration  = 10 * time.Millisecond
	)
	time.Sleep(time.Duration(3*electionTicks) * tickDuration)

	kv := clientv3.NewKV(clus.Client(0))
	_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
	if err != rpctypes.ErrNoLeader {
		t.Fatal(err)
	}

	// clients may give timeout errors since the members are stopped; take
	// the clients so that terminating the cluster won't complain
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)
}
Example #3
0
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
func TestWatchWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// Put a key for the non-require leader watch to read as an event.
	// The watchers will be on member[0]; put key through member[0] to
	// ensure that it receives the update so watching after killing quorum
	// is guaranteed to have the key.
	liveClient := clus.Client(0)
	if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
		t.Fatal(err)
	}

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)

	// wait for election timeout, then member[0] will not have a leader.
	tickDuration := 10 * time.Millisecond
	time.Sleep(time.Duration(3*clus.Members[0].ElectionTicks) * tickDuration)

	chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
	chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))

	select {
	case resp, ok := <-chLeader:
		if !ok {
			t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
		}
		if resp.Err() != rpctypes.ErrNoLeader {
			t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
		}
	case <-time.After(3 * time.Second):
		t.Fatal("watch without leader took too long to close")
	}

	select {
	case resp, ok := <-chLeader:
		if ok {
			t.Fatalf("expected closed channel, got response %v", resp)
		}
	case <-time.After(3 * time.Second):
		t.Fatal("waited too long for channel to close")
	}

	if _, ok := <-chNoLeader; !ok {
		t.Fatalf("expected response, got closed channel")
	}
}