func (w *watcher) send(wr clientv3.WatchResponse) { if wr.IsProgressNotify() && !w.progress { return } events := make([]*mvccpb.Event, 0, len(wr.Events)) var lastRev int64 for i := range wr.Events { ev := (*mvccpb.Event)(wr.Events[i]) if ev.Kv.ModRevision <= w.rev { continue } else { // We cannot update w.rev here. // txn can have multiple events with the same rev. // If we update w.rev here, we would skip some events in the same txn. lastRev = ev.Kv.ModRevision } filtered := false if len(w.filters) != 0 { for _, filter := range w.filters { if filter(*ev) { filtered = true break } } } if !filtered { events = append(events, ev) } } if lastRev > w.rev { w.rev = lastRev } // all events are filtered out? if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 { return } pbwr := &pb.WatchResponse{ Header: &wr.Header, Created: wr.Created, WatchId: w.id, Events: events, } select { case w.ch <- pbwr: case <-time.After(50 * time.Millisecond): // close the watch chan will notify the stream sender. // the stream will gc all its watchers. close(w.ch) } }
func (w *watcher) send(wr clientv3.WatchResponse) { if wr.IsProgressNotify() && !w.progress { return } events := make([]*mvccpb.Event, 0, len(wr.Events)) for i := range wr.Events { ev := (*mvccpb.Event)(wr.Events[i]) if ev.Kv.ModRevision <= w.rev { continue } else { w.rev = ev.Kv.ModRevision } filtered := false if len(w.filters) != 0 { for _, filter := range w.filters { if filter(*ev) { filtered = true break } } } if !filtered { events = append(events, ev) } } // all events are filtered out? if !wr.IsProgressNotify() && len(events) == 0 { return } pbwr := &pb.WatchResponse{ Header: &wr.Header, WatchId: w.id, Events: events, } select { case w.ch <- pbwr: default: panic("handle this") } }
func (w *watcher) send(wr clientv3.WatchResponse) { if wr.IsProgressNotify() && !w.progress { return } // todo: filter out the events that this watcher already seen. evs := wr.Events events := make([]*mvccpb.Event, len(evs)) for i := range evs { events[i] = (*mvccpb.Event)(evs[i]) } pbwr := &pb.WatchResponse{ Header: &wr.Header, WatchId: w.id, Events: events, } select { case w.ch <- pbwr: default: panic("handle this") } }
// send filters out repeated events by discarding revisions older // than the last one sent over the watch channel. func (w *watcher) send(wr clientv3.WatchResponse) { if wr.IsProgressNotify() && !w.progress { return } if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 { return } if w.nextrev == 0 { // current watch; expect updates following this revision w.nextrev = wr.Header.Revision + 1 } events := make([]*mvccpb.Event, 0, len(wr.Events)) var lastRev int64 for i := range wr.Events { ev := (*mvccpb.Event)(wr.Events[i]) if ev.Kv.ModRevision < w.nextrev { continue } else { // We cannot update w.rev here. // txn can have multiple events with the same rev. // If w.nextrev updates here, it would skip events in the same txn. lastRev = ev.Kv.ModRevision } filtered := false for _, filter := range w.filters { if filter(*ev) { filtered = true break } } if filtered { continue } if !w.prevKV { evCopy := *ev evCopy.PrevKv = nil ev = &evCopy } events = append(events, ev) } if lastRev >= w.nextrev { w.nextrev = lastRev + 1 } // all events are filtered out? if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 { return } w.lastHeader = wr.Header w.post(&pb.WatchResponse{ Header: &wr.Header, Created: wr.Created, WatchId: w.id, Events: events, }) }
// TestWatchResumeComapcted checks that the watcher gracefully closes in case // that it tries to resume to a revision that's been compacted out of the store. // Since the watcher's server restarts with stale data, the watcher will receive // either a compaction error or all keys by staying in sync before the compaction // is finally applied. func TestWatchResumeCompacted(t *testing.T) { defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // create a waiting watcher at rev 1 w := clientv3.NewWatcher(clus.Client(0)) defer w.Close() wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1)) select { case w := <-wch: t.Errorf("unexpected message from wch %v", w) default: } clus.Members[0].Stop(t) ticker := time.After(time.Second * 10) for clus.WaitLeader(t) <= 0 { select { case <-ticker: t.Fatalf("failed to wait for new leader") default: time.Sleep(10 * time.Millisecond) } } // put some data and compact away numPuts := 5 kv := clientv3.NewKV(clus.Client(1)) for i := 0; i < numPuts; i++ { if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil { t.Fatal(err) } } if _, err := kv.Compact(context.TODO(), 3); err != nil { t.Fatal(err) } clus.Members[0].Restart(t) // since watch's server isn't guaranteed to be synced with the cluster when // the watch resumes, there is a window where the watch can stay synced and // read off all events; if the watcher misses the window, it will go out of // sync and get a compaction error. wRev := int64(2) for int(wRev) <= numPuts+1 { var wresp clientv3.WatchResponse var ok bool select { case wresp, ok = <-wch: if !ok { t.Fatalf("expected wresp, but got closed channel") } case <-time.After(5 * time.Second): t.Fatalf("compacted watch timed out") } for _, ev := range wresp.Events { if ev.Kv.ModRevision != wRev { t.Fatalf("expected modRev %v, got %+v", wRev, ev) } wRev++ } if wresp.Err() == nil { continue } if wresp.Err() != rpctypes.ErrCompacted { t.Fatalf("wresp.Err() expected %v, but got %v %+v", rpctypes.ErrCompacted, wresp.Err()) } break } if int(wRev) > numPuts+1 { // got data faster than the compaction return } // received compaction error; ensure the channel closes select { case wresp, ok := <-wch: if ok { t.Fatalf("expected closed channel, but got %v", wresp) } case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for channel close") } }