Exemple #1
0
// TestWatchWithCreatedNotificationDropConn ensures that
// a watcher with created notify does not post duplicate
// created events from disconnect.
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)

	client := cluster.RandClient()

	wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify())

	resp := <-wch

	if !resp.Created {
		t.Fatalf("expected created event, got %v", resp)
	}

	cluster.Members[0].DropConnections()

	// try to receive from watch channel again
	// ensure it doesn't post another createNotify
	select {
	case wresp := <-wch:
		t.Fatalf("got unexpected watch response: %+v\n", wresp)
	case <-time.After(time.Second):
		// watcher may not reconnect by the time it hits the select,
		// so it wouldn't have a chance to filter out the second create event
	}
}
Exemple #2
0
func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
	cctx, cancel := context.WithCancel(wp.ctx)
	wb := &watchBroadcast{
		cancel:    cancel,
		nextrev:   w.nextrev,
		receivers: make(map[*watcher]struct{}),
		donec:     make(chan struct{}),
	}
	wb.add(w)
	go func() {
		defer close(wb.donec)
		// loop because leader loss will close channel
		for cctx.Err() == nil {
			wch := wp.cw.Watch(cctx, w.wr.key,
				clientv3.WithRange(w.wr.end),
				clientv3.WithProgressNotify(),
				clientv3.WithCreatedNotify(),
				clientv3.WithRev(wb.nextrev),
				clientv3.WithPrevKV(),
			)
			for wr := range wch {
				wb.bcast(wr)
				update(wb)
			}
			wp.retryLimiter.Wait(cctx)
		}
	}()
	return wb
}
Exemple #3
0
// TestWatchWithCreatedNotification checks that createdNotification works.
func TestWatchWithCreatedNotification(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)

	client := cluster.RandClient()

	ctx := context.Background()

	createC := client.Watch(ctx, "a", clientv3.WithCreatedNotify())

	resp := <-createC

	if !resp.Created {
		t.Fatalf("expected created event, got %v", resp)
	}
}
func (wgs *watchergroups) addWatcher(rid receiverID, w watcher) {
	wgs.mu.Lock()
	defer wgs.mu.Unlock()

	groups := wgs.groups

	if wg, ok := groups[w.wr]; ok {
		rev := wg.add(rid, w)
		wgs.idToGroup[rid] = wg

		if rev == 0 {
			// The group is newly created, the create event has not been delivered
			// to this group yet.
			// We can rely on etcd server to deliver the create event.
			// Or we might end up sending created event twice.
			return
		}

		resp := &pb.WatchResponse{
			Header: &pb.ResponseHeader{
				// todo: fill in ClusterId
				// todo: fill in MemberId:
				Revision: rev,
				// todo: fill in RaftTerm:
			},
			WatchId: rid.watcherID,
			Created: true,
		}
		w.ch <- resp

		return
	}

	ctx, cancel := context.WithCancel(wgs.proxyCtx)

	wch := wgs.cw.Watch(ctx, w.wr.key,
		clientv3.WithRange(w.wr.end),
		clientv3.WithProgressNotify(),
		clientv3.WithCreatedNotify(),
	)

	watchg := newWatchergroup(wch, cancel)
	watchg.add(rid, w)
	go watchg.run()
	groups[w.wr] = watchg
	wgs.idToGroup[rid] = watchg
}
Exemple #5
0
func (sws *serverWatchStream) addDedicatedWatcher(w watcher, rev int64) {
	sws.mu.Lock()
	defer sws.mu.Unlock()

	ctx, cancel := context.WithCancel(sws.proxyCtx)

	wch := sws.cw.Watch(ctx,
		w.wr.key, clientv3.WithRange(w.wr.end),
		clientv3.WithRev(rev),
		clientv3.WithProgressNotify(),
		clientv3.WithCreatedNotify(),
	)

	ws := newWatcherSingle(wch, cancel, w, sws)
	sws.singles[w.id] = ws
	go ws.run()
}
Exemple #6
0
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
func TestWatchCancelOnServer(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)

	client := cluster.RandClient()

	for i := 0; i < 10; i++ {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		client.Watch(ctx, "a", clientv3.WithCreatedNotify())
		cancel()
	}
	// wait for cancels to propagate
	time.Sleep(time.Second)

	watchers, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
	if err != nil {
		t.Fatal(err)
	}
	if watchers != "0" {
		t.Fatalf("expected 0 watchers, got %q", watchers)
	}
}
Exemple #7
0
func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
	cctx, cancel := context.WithCancel(wp.ctx)
	wb := &watchBroadcast{
		cancel:    cancel,
		nextrev:   w.nextrev,
		receivers: make(map[*watcher]struct{}),
		donec:     make(chan struct{}),
	}
	wb.add(w)
	go func() {
		defer close(wb.donec)
		// loop because leader loss will close channel
		for cctx.Err() == nil {
			opts := []clientv3.OpOption{
				clientv3.WithRange(w.wr.end),
				clientv3.WithProgressNotify(),
				clientv3.WithRev(wb.nextrev),
				clientv3.WithPrevKV(),
			}
			// The create notification should be the first response;
			// if the watch is recreated following leader loss, it
			// shouldn't post a second create response to the client.
			if wb.responses == 0 {
				opts = append(opts, clientv3.WithCreatedNotify())
			}
			wch := wp.cw.Watch(cctx, w.wr.key, opts...)

			for wr := range wch {
				wb.bcast(wr)
				update(wb)
			}
			wp.retryLimiter.Wait(cctx)
		}
	}()
	return wb
}