Esempio n. 1
0
func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
	cctx, cancel := context.WithCancel(wp.ctx)
	wb := &watchBroadcast{
		cancel:    cancel,
		nextrev:   w.nextrev,
		receivers: make(map[*watcher]struct{}),
		donec:     make(chan struct{}),
	}
	wb.add(w)
	go func() {
		defer close(wb.donec)
		// loop because leader loss will close channel
		for cctx.Err() == nil {
			wch := wp.cw.Watch(cctx, w.wr.key,
				clientv3.WithRange(w.wr.end),
				clientv3.WithProgressNotify(),
				clientv3.WithCreatedNotify(),
				clientv3.WithRev(wb.nextrev),
				clientv3.WithPrevKV(),
			)
			for wr := range wch {
				wb.bcast(wr)
				update(wb)
			}
			wp.retryLimiter.Wait(cctx)
		}
	}()
	return wb
}
Esempio n. 2
0
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
	defer testutil.AfterTest(t)

	// accelerate report interval so test terminates quickly
	oldpi := v3rpc.GetProgressReportInterval()
	// using atomics to avoid race warnings
	v3rpc.SetProgressReportInterval(3 * time.Second)
	pi := 3 * time.Second
	defer func() { v3rpc.SetProgressReportInterval(oldpi) }()

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	wc := clientv3.NewWatcher(clus.RandClient())
	defer wc.Close()

	opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
	if watchOnPut {
		opts = append(opts, clientv3.WithPrefix())
	}
	rch := wc.Watch(context.Background(), "foo", opts...)

	select {
	case resp := <-rch: // wait for notification
		if len(resp.Events) != 0 {
			t.Fatalf("resp.Events expected none, got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}

	kvc := clientv3.NewKV(clus.RandClient())
	if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil {
		t.Fatal(err)
	}

	select {
	case resp := <-rch:
		if resp.Header.Revision != 2 {
			t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision)
		}
		if watchOnPut { // wait for put if watch on the put key
			ev := []*clientv3.Event{{Type: clientv3.EventTypePut,
				Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}}
			if !reflect.DeepEqual(ev, resp.Events) {
				t.Fatalf("expected %+v, got %+v", ev, resp.Events)
			}
		} else if len(resp.Events) != 0 { // wait for notification otherwise
			t.Fatalf("expected no events, but got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}
}
Esempio n. 3
0
func ExampleWatcher_watchWithProgressNotify() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}

	rch := cli.Watch(context.Background(), "foo", clientv3.WithProgressNotify())
	wresp := <-rch
	fmt.Printf("wresp.Header.Revision: %d\n", wresp.Header.Revision)
	fmt.Println("wresp.IsProgressNotify:", wresp.IsProgressNotify())
	// wresp.Header.Revision: 0
	// wresp.IsProgressNotify: true
}
Esempio n. 4
0
func (sws *serverWatchStream) addDedicatedWatcher(w watcher, rev int64) {
	sws.mu.Lock()
	defer sws.mu.Unlock()

	ctx, cancel := context.WithCancel(context.Background())

	wch := sws.c.Watch(ctx,
		w.wr.key, clientv3.WithRange(w.wr.end),
		clientv3.WithRev(rev),
		clientv3.WithProgressNotify(),
	)

	ws := newWatcherSingle(wch, cancel, w, sws)
	sws.singles[w.id] = ws
	go ws.run()
}
Esempio n. 5
0
func (wgs *watchergroups) addWatcher(rid receiverID, w watcher) {
	wgs.mu.Lock()
	defer wgs.mu.Unlock()

	groups := wgs.groups

	if wg, ok := groups[w.wr]; ok {
		rev := wg.add(rid, w)
		wgs.idToGroup[rid] = wg

		if rev == 0 {
			// The group is newly created, the create event has not been delivered
			// to this group yet.
			// We can rely on etcd server to deliver the create event.
			// Or we might end up sending created event twice.
			return
		}

		resp := &pb.WatchResponse{
			Header: &pb.ResponseHeader{
				// todo: fill in ClusterId
				// todo: fill in MemberId:
				Revision: rev,
				// todo: fill in RaftTerm:
			},
			WatchId: rid.watcherID,
			Created: true,
		}
		w.ch <- resp

		return
	}

	ctx, cancel := context.WithCancel(wgs.proxyCtx)

	wch := wgs.cw.Watch(ctx, w.wr.key,
		clientv3.WithRange(w.wr.end),
		clientv3.WithProgressNotify(),
		clientv3.WithCreatedNotify(),
	)

	watchg := newWatchergroup(wch, cancel)
	watchg.add(rid, w)
	go watchg.run()
	groups[w.wr] = watchg
	wgs.idToGroup[rid] = watchg
}
Esempio n. 6
0
func (wgs *watchergroups) addWatcher(rid receiverID, w watcher) {
	wgs.mu.Lock()
	defer wgs.mu.Unlock()

	groups := wgs.groups

	if wg, ok := groups[w.wr]; ok {
		wg.add(rid, w)
		return
	}

	ctx, cancel := context.WithCancel(context.Background())

	wch := wgs.cw.Watch(ctx, w.wr.key, clientv3.WithRange(w.wr.end), clientv3.WithProgressNotify())
	watchg := newWatchergroup(wch, cancel)
	watchg.add(rid, w)
	go watchg.run()
	groups[w.wr] = watchg
}
Esempio n. 7
0
func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
	cctx, cancel := context.WithCancel(wp.ctx)
	wb := &watchBroadcast{
		cancel:    cancel,
		nextrev:   w.nextrev,
		receivers: make(map[*watcher]struct{}),
		donec:     make(chan struct{}),
	}
	wb.add(w)
	go func() {
		defer close(wb.donec)
		// loop because leader loss will close channel
		for cctx.Err() == nil {
			opts := []clientv3.OpOption{
				clientv3.WithRange(w.wr.end),
				clientv3.WithProgressNotify(),
				clientv3.WithRev(wb.nextrev),
				clientv3.WithPrevKV(),
			}
			// The create notification should be the first response;
			// if the watch is recreated following leader loss, it
			// shouldn't post a second create response to the client.
			if wb.responses == 0 {
				opts = append(opts, clientv3.WithCreatedNotify())
			}
			wch := wp.cw.Watch(cctx, w.wr.key, opts...)

			for wr := range wch {
				wb.bcast(wr)
				update(wb)
			}
			wp.retryLimiter.Wait(cctx)
		}
	}()
	return wb
}