Exemple #1
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
	if watchInteractive {
		watchInteractiveFunc(cmd, args)
		return
	}

	if len(args) != 1 {
		ExitWithError(ExitBadArgs, fmt.Errorf("watch in non-interactive mode requires an argument as key or prefix"))
	}

	c := mustClientFromCmd(cmd)
	w := clientv3.NewWatcher(c)

	opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
	if watchPrefix {
		opts = append(opts, clientv3.WithPrefix())
	}
	wc := w.Watch(context.TODO(), args[0], opts...)
	printWatchCh(wc)
	err := w.Close()
	if err == nil {
		ExitWithError(ExitInterrupted, fmt.Errorf("watch is canceled by the server"))
	}
	ExitWithError(ExitBadConnection, err)
}
Exemple #2
0
func TestWatchErrConnClosed(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	defer cli.Close()
	wc := clientv3.NewWatcher(cli)

	donec := make(chan struct{})
	go func() {
		defer close(donec)
		ch := wc.Watch(context.TODO(), "foo")
		if wr := <-ch; grpc.ErrorDesc(wr.Err()) != grpc.ErrClientConnClosing.Error() {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, grpc.ErrorDesc(wr.Err()))
		}
	}()

	if err := cli.ActiveConnection().Close(); err != nil {
		t.Fatal(err)
	}
	clus.TakeClient(0)

	select {
	case <-time.After(3 * time.Second):
		t.Fatal("wc.Watch took too long")
	case <-donec:
	}
}
Exemple #3
0
func TestWatchAfterClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	clus.TakeClient(0)
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}

	donec := make(chan struct{})
	go func() {
		wc := clientv3.NewWatcher(cli)
		wc.Watch(context.TODO(), "foo")
		if err := wc.Close(); err != nil && err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
		close(donec)
	}()
	select {
	case <-time.After(3 * time.Second):
		t.Fatal("wc.Watch took too long")
	case <-donec:
	}
}
Exemple #4
0
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
	defer testutil.AfterTest(t)

	// accelerate report interval so test terminates quickly
	oldpi := v3rpc.GetProgressReportInterval()
	// using atomics to avoid race warnings
	v3rpc.SetProgressReportInterval(3 * time.Second)
	pi := 3 * time.Second
	defer func() { v3rpc.SetProgressReportInterval(oldpi) }()

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	wc := clientv3.NewWatcher(clus.RandClient())
	defer wc.Close()

	opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
	if watchOnPut {
		opts = append(opts, clientv3.WithPrefix())
	}
	rch := wc.Watch(context.Background(), "foo", opts...)

	select {
	case resp := <-rch: // wait for notification
		if len(resp.Events) != 0 {
			t.Fatalf("resp.Events expected none, got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}

	kvc := clientv3.NewKV(clus.RandClient())
	if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil {
		t.Fatal(err)
	}

	select {
	case resp := <-rch:
		if resp.Header.Revision != 2 {
			t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision)
		}
		if watchOnPut { // wait for put if watch on the put key
			ev := []*clientv3.Event{{Type: clientv3.EventTypePut,
				Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}}
			if !reflect.DeepEqual(ev, resp.Events) {
				t.Fatalf("expected %+v, got %+v", ev, resp.Events)
			}
		} else if len(resp.Events) != 0 { // wait for notification otherwise
			t.Fatalf("expected no events, but got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}
}
Exemple #5
0
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
// that it tries to resume to a revision that's been compacted out of the store.
func TestWatchResumeCompacted(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create a waiting watcher at rev 1
	w := clientv3.NewWatcher(clus.Client(0))
	defer w.Close()
	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1))
	select {
	case w := <-wch:
		t.Errorf("unexpected message from wch %v", w)
	default:
	}
	clus.Members[0].Stop(t)

	ticker := time.After(time.Second * 10)
	for clus.WaitLeader(t) <= 0 {
		select {
		case <-ticker:
			t.Fatalf("failed to wait for new leader")
		default:
			time.Sleep(10 * time.Millisecond)
		}
	}

	// put some data and compact away
	kv := clientv3.NewKV(clus.Client(1))
	for i := 0; i < 5; i++ {
		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
			t.Fatal(err)
		}
	}
	if _, err := kv.Compact(context.TODO(), 3); err != nil {
		t.Fatal(err)
	}

	clus.Members[0].Restart(t)

	// get compacted error message
	wresp, ok := <-wch
	if !ok {
		t.Fatalf("expected wresp, but got closed channel")
	}
	if wresp.Err() != rpctypes.ErrCompacted {
		t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
	}
	// ensure the channel is closed
	if wresp, ok = <-wch; ok {
		t.Fatalf("expected closed channel, but got %v", wresp)
	}
}
Exemple #6
0
func watchGetFunc(cmd *cobra.Command, args []string) {
	clients := mustCreateClients(totalClients, totalConns)
	getClient := mustCreateClients(1, 1)

	// setup keys for watchers
	watchRev := int64(0)
	for i := 0; i < watchEvents; i++ {
		v := fmt.Sprintf("%d", i)
		resp, err := clients[0].Put(context.TODO(), "watchkey", v)
		if err != nil {
			panic(err)
		}
		if i == 0 {
			watchRev = resp.Header.Revision
		}
	}

	streams := make([]v3.Watcher, watchGetTotalStreams)
	for i := range streams {
		streams[i] = v3.NewWatcher(clients[i%len(clients)])
	}

	bar = pb.New(watchGetTotalWatchers * watchEvents)
	bar.Format("Bom !")
	bar.Start()

	// report from trying to do serialized gets with concurrent watchers
	r := newReport()
	ctx, cancel := context.WithCancel(context.TODO())
	f := func() {
		defer close(r.Results())
		for {
			st := time.Now()
			_, err := getClient[0].Get(ctx, "abc", v3.WithSerializable())
			if ctx.Err() != nil {
				break
			}
			r.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
		}
	}

	wg.Add(watchGetTotalWatchers)
	for i := 0; i < watchGetTotalWatchers; i++ {
		go doUnsyncWatch(streams[i%len(streams)], watchRev, f)
	}

	rc := r.Run()
	wg.Wait()
	cancel()
	bar.Finish()
	fmt.Printf("Get during watch summary:\n%s", <-rc)
}
Exemple #7
0
// snapshotToStdout streams a snapshot over stdout
func snapshotToStdout(c *clientv3.Client) {
	// must explicitly fetch first revision since no retry on stdout
	wapi := clientv3.NewWatcher(c)
	defer wapi.Close()
	wr := <-wapi.Watch(context.TODO(), "", clientv3.WithPrefix(), clientv3.WithRev(1))
	if len(wr.Events) > 0 {
		wr.CompactRevision = 1
	}
	if rev := snapshot(os.Stdout, c, wr.CompactRevision+1); rev != 0 {
		err := fmt.Errorf("snapshot interrupted by compaction %v", rev)
		ExitWithError(ExitInterrupted, err)
	}
	os.Stdout.Sync()
}
Exemple #8
0
func watchInteractiveFunc(cmd *cobra.Command, args []string) {
	c := mustClientFromCmd(cmd)
	w := clientv3.NewWatcher(c)

	reader := bufio.NewReader(os.Stdin)

	for {
		l, err := reader.ReadString('\n')
		if err != nil {
			ExitWithError(ExitInvalidInput, fmt.Errorf("Error reading watch request line: %v", err))
		}
		l = strings.TrimSuffix(l, "\n")

		args := argify(l)
		if len(args) < 2 {
			fmt.Fprintf(os.Stderr, "Invalid command %s (command type or key is not provided)\n", l)
			continue
		}

		if args[0] != "watch" {
			fmt.Fprintf(os.Stderr, "Invalid command %s (only support watch)\n", l)
			continue
		}

		flagset := NewWatchCommand().Flags()
		err = flagset.Parse(args[1:])
		if err != nil {
			fmt.Fprintf(os.Stderr, "Invalid command %s (%v)\n", l, err)
			continue
		}
		moreargs := flagset.Args()
		if len(moreargs) != 1 {
			fmt.Fprintf(os.Stderr, "Invalid command %s (Too many arguments)\n", l)
			continue
		}
		var key string
		_, err = fmt.Sscanf(moreargs[0], "%q", &key)
		if err != nil {
			key = moreargs[0]
		}
		opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
		if watchPrefix {
			opts = append(opts, clientv3.WithPrefix())
		}
		ch := w.Watch(context.TODO(), key, opts...)
		go printWatchCh(ch)
	}
}
Exemple #9
0
func waitUpdate(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
	w := v3.NewWatcher(client)
	defer w.Close()
	wc := w.Watch(ctx, key, opts...)
	if wc == nil {
		return ctx.Err()
	}
	wresp, ok := <-wc
	if !ok {
		return ctx.Err()
	}
	if len(wresp.Events) == 0 {
		return v3rpc.ErrCompacted
	}
	return nil
}
Exemple #10
0
func watchGetFunc(cmd *cobra.Command, args []string) {
	clients := mustCreateClients(totalClients, totalConns)
	getClient := mustCreateClients(1, 1)

	// setup keys for watchers
	watchRev := int64(0)
	for i := 0; i < watchEvents; i++ {
		v := fmt.Sprintf("%d", i)
		resp, err := clients[0].Put(context.TODO(), "watchkey", v)
		if err != nil {
			panic(err)
		}
		if i == 0 {
			watchRev = resp.Header.Revision
		}
	}

	streams := make([]v3.Watcher, watchGetTotalStreams)
	for i := range streams {
		streams[i] = v3.NewWatcher(clients[i%len(clients)])
	}

	// results from trying to do serialized gets with concurrent watchers
	results = make(chan result)

	bar = pb.New(watchGetTotalWatchers * watchEvents)
	bar.Format("Bom !")
	bar.Start()

	pdoneC := printReport(results)
	wg.Add(watchGetTotalWatchers)
	ctx, cancel := context.WithCancel(context.TODO())
	f := func() {
		doSerializedGet(ctx, getClient[0], results)
	}
	for i := 0; i < watchGetTotalWatchers; i++ {
		go doUnsyncWatch(streams[i%len(streams)], watchRev, f)
	}
	wg.Wait()
	cancel()
	bar.Finish()
	fmt.Printf("Get during watch summary:\n")
	<-pdoneC
}
Exemple #11
0
func TestKVCompact(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	for i := 0; i < 10; i++ {
		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
			t.Fatalf("couldn't put 'foo' (%v)", err)
		}
	}

	_, err := kv.Compact(ctx, 7)
	if err != nil {
		t.Fatalf("couldn't compact kv space (%v)", err)
	}
	_, err = kv.Compact(ctx, 7)
	if err == nil || err != rpctypes.ErrCompacted {
		t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
	}

	wcli := clus.RandClient()
	// new watcher could precede receiving the compaction without quorum first
	wcli.Get(ctx, "quorum-get")

	wc := clientv3.NewWatcher(wcli)
	defer wc.Close()
	wchan := wc.Watch(ctx, "foo", clientv3.WithRev(3))

	if wr := <-wchan; wr.CompactRevision != 7 {
		t.Fatalf("wchan CompactRevision got %v, want 7", wr.CompactRevision)
	}
	if wr, ok := <-wchan; ok {
		t.Fatalf("wchan got %v, expected closed", wr)
	}

	_, err = kv.Compact(ctx, 1000)
	if err == nil || err != rpctypes.ErrFutureRev {
		t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
	}
}
Exemple #12
0
func runWatchTest(t *testing.T, f watcherTest) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	wclient := clus.RandClient()
	w := clientv3.NewWatcher(wclient)
	defer w.Close()
	// select a different client from wclient so puts succeed if
	// a test knocks out the watcher client
	kvclient := clus.RandClient()
	for kvclient == wclient {
		kvclient = clus.RandClient()
	}
	kv := clientv3.NewKV(kvclient)

	wctx := &watchctx{clus, w, wclient, kv, nil}
	f(t, wctx)
}
Exemple #13
0
func ExampleWatcher_watchPrefix() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	wc := clientv3.NewWatcher(cli)
	defer wc.Close()

	rch := wc.Watch(context.Background(), "foo", clientv3.WithPrefix())
	for wresp := range rch {
		for _, ev := range wresp.Events {
			fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
		}
	}
	// PUT "foo1" : "bar"
}
Exemple #14
0
func TestKVCompact(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	for i := 0; i < 10; i++ {
		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
			t.Fatalf("couldn't put 'foo' (%v)", err)
		}
	}

	err := kv.Compact(ctx, 7)
	if err != nil {
		t.Fatalf("couldn't compact kv space (%v)", err)
	}
	err = kv.Compact(ctx, 7)
	if err == nil || err != v3rpc.ErrCompacted {
		t.Fatalf("error got %v, want %v", err, v3rpc.ErrFutureRev)
	}

	wc := clientv3.NewWatcher(clus.RandClient())
	defer wc.Close()
	wchan := wc.Watch(ctx, "foo", 3)

	if wr := <-wchan; wr.CompactRevision != 7 {
		t.Fatalf("wchan CompactRevision got %v, want 7", wr.CompactRevision)
	}
	if wr, ok := <-wchan; ok {
		t.Fatalf("wchan got %v, expected closed", wr)
	}

	err = kv.Compact(ctx, 1000)
	if err == nil || err != v3rpc.ErrFutureRev {
		t.Fatalf("error got %v, want %v", err, v3rpc.ErrFutureRev)
	}
}
Exemple #15
0
func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan {
	if s.rev == 0 {
		panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?")
	}

	respchan := make(chan clientv3.WatchResponse, 1024)

	go func() {
		wapi := clientv3.NewWatcher(s.c)
		defer wapi.Close()
		defer close(respchan)

		// get all events since revision (or get non-compacted revision, if
		// rev is too far behind)
		wch := wapi.WatchPrefix(ctx, s.prefix, s.rev)
		for wr := range wch {
			respchan <- wr
		}
	}()

	return respchan
}
Exemple #16
0
func TestKVCompact(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())

	for i := 0; i < 10; i++ {
		if _, err := kv.Put("foo", "bar", lease.NoLease); err != nil {
			t.Fatalf("couldn't put 'foo' (%v)", err)
		}
	}

	err := kv.Compact(7)
	if err != nil {
		t.Fatalf("couldn't compact kv space (%v)", err)
	}
	err = kv.Compact(7)
	if err == nil || err != v3rpc.ErrCompacted {
		t.Fatalf("error got %v, want %v", err, v3rpc.ErrFutureRev)
	}

	wc := clientv3.NewWatcher(clus.RandClient())
	defer wc.Close()
	wchan := wc.Watch(context.TODO(), "foo", 3)

	_, ok := <-wchan
	if ok {
		t.Fatalf("wchan ok got %v, want false", ok)
	}

	err = kv.Compact(1000)
	if err == nil || err != v3rpc.ErrFutureRev {
		t.Fatalf("error got %v, want %v", err, v3rpc.ErrFutureRev)
	}
}
Exemple #17
0
func watchLatencyFunc(cmd *cobra.Command, args []string) {
	key := string(mustRandBytes(watchLKeySize))
	value := string(mustRandBytes(watchLValueSize))

	client := mustCreateConn()
	stream := v3.NewWatcher(client)
	wch := stream.Watch(context.TODO(), key)

	bar = pb.New(watchLTotal)
	bar.Format("Bom !")
	bar.Start()

	limiter := rate.NewLimiter(rate.Limit(watchLPutRate), watchLPutRate)
	r := newReport()
	rc := r.Run()

	for i := 0; i < watchLTotal; i++ {
		// limit key put as per reqRate
		if err := limiter.Wait(context.TODO()); err != nil {
			break
		}
		_, err := client.Put(context.TODO(), string(key), value)

		if err != nil {
			fmt.Fprintf(os.Stderr, "Failed to Put for watch latency benchmark: %v\n", err)
			os.Exit(1)
		}
		st := time.Now()
		<-wch
		r.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
		bar.Increment()
	}

	close(r.Results())
	bar.Finish()
	fmt.Printf("%s", <-rc)
}
Exemple #18
0
// Subscribe watches etcd changes and generates structured events telling vulcand to add or delete frontends, hosts etc.
// It is a blocking function.
func (n *ng) Subscribe(changes chan interface{}, afterIdx uint64, cancelC chan bool) error {
	watcher := etcd.NewWatcher(n.client)
	defer watcher.Close()

	log.Infof("Begin watching: etcd revision %d", afterIdx)
	watchChan := watcher.Watch(n.context, n.etcdKey, etcd.WithRev(int64(afterIdx)), etcd.WithPrefix())

	for response := range watchChan {
		if response.Canceled {
			log.Infof("Stop watching: graceful shutdown")
			return nil
		}
		if err := response.Err(); err != nil {
			log.Errorf("Stop watching: error: %v", err)
			return err
		}

		for _, event := range response.Events {
			log.Infof("%s", eventToString(event))
			change, err := n.parseChange(event)
			if err != nil {
				log.Warningf("Ignore '%s', error: %s", eventToString(event), err)
				continue
			}
			if change != nil {
				log.Infof("%v", change)
				select {
				case changes <- change:
				case <-cancelC:
					return nil
				}
			}
		}
	}

	return nil
}
Exemple #19
0
func TestWatchInvalidFutureRevision(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	w := clientv3.NewWatcher(clus.RandClient())
	defer w.Close()

	rch := w.Watch(context.Background(), "foo", clientv3.WithRev(100))

	wresp, ok := <-rch // WatchResponse from canceled one
	if !ok {
		t.Fatalf("expected wresp 'open'(ok true), but got ok %v", ok)
	}
	if !wresp.Canceled {
		t.Fatalf("wresp.Canceled expected 'true', but got %v", wresp.Canceled)
	}

	_, ok = <-rch // ensure the channel is closed
	if ok != false {
		t.Fatalf("expected wresp 'closed'(ok false), but got ok %v", ok)
	}
}
Exemple #20
0
// TestWatchCompactRevision ensures the CompactRevision error is given on a
// compaction event ahead of a watcher.
func TestWatchCompactRevision(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// set some keys
	kv := clientv3.NewKV(clus.RandClient())
	for i := 0; i < 5; i++ {
		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
			t.Fatal(err)
		}
	}

	w := clientv3.NewWatcher(clus.RandClient())
	defer w.Close()

	if err := kv.Compact(context.TODO(), 4); err != nil {
		t.Fatal(err)
	}
	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2))

	// get compacted error message
	wresp, ok := <-wch
	if !ok {
		t.Fatalf("expected wresp, but got closed channel")
	}
	if wresp.Err() != rpctypes.ErrCompacted {
		t.Fatalf("wresp.Err() expected ErrCompacteed, but got %v", wresp.Err())
	}

	// ensure the channel is closed
	if wresp, ok = <-wch; ok {
		t.Fatalf("expected closed channel, but got %v", wresp)
	}
}
Exemple #21
0
func watchFunc(cmd *cobra.Command, args []string) {
	watched := make([]string, watchedKeyTotal)
	for i := range watched {
		watched[i] = string(mustRandBytes(32))
	}

	requests := make(chan string, totalClients)

	clients := mustCreateClients(totalClients, totalConns)

	streams := make([]v3.Watcher, watchTotalStreams)
	for i := range streams {
		streams[i] = v3.NewWatcher(clients[i%len(clients)])
	}

	putStartNotifier = make(chan struct{})

	// watching phase
	results = make(chan result)
	bar = pb.New(watchTotal)

	bar.Format("Bom !")
	bar.Start()

	pdoneC := printRate(results)

	atomic.StoreInt32(&nrWatchCompleted, int32(0))
	watchCompletedNotifier = make(chan struct{})
	for i := range streams {
		go doWatch(streams[i], requests)
	}

	go func() {
		for i := 0; i < watchTotal; i++ {
			requests <- watched[i%len(watched)]
		}
		close(requests)
	}()

	<-watchCompletedNotifier
	bar.Finish()

	fmt.Printf("Watch creation summary:\n")
	close(results)
	<-pdoneC

	// put phase
	// total number of puts * number of watchers on each key
	eventsTotal = watchPutTotal * (watchTotal / watchedKeyTotal)
	results = make(chan result)
	bar = pb.New(eventsTotal)

	bar.Format("Bom !")
	bar.Start()

	atomic.StoreInt32(&nrRecvCompleted, 0)
	recvCompletedNotifier = make(chan struct{})
	close(putStartNotifier)

	putreqc := make(chan v3.Op)

	for i := 0; i < watchPutTotal; i++ {
		go doPutForWatch(context.TODO(), clients[i%len(clients)].KV, putreqc)
	}

	pdoneC = printRate(results)

	go func() {
		for i := 0; i < eventsTotal; i++ {
			putreqc <- v3.OpPut(watched[i%(len(watched))], "data")
			// TODO: use a real rate-limiter instead of sleep.
			time.Sleep(time.Second / time.Duration(watchPutRate))
		}
		close(putreqc)
	}()

	<-recvCompletedNotifier
	bar.Finish()
	fmt.Printf("Watch events received summary:\n")
	close(results)
	<-pdoneC
}
Exemple #22
0
func watchFunc(cmd *cobra.Command, args []string) {
	if watchKeySpaceSize <= 0 {
		fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", watchKeySpaceSize)
		os.Exit(1)
	}

	watched := make([]string, watchedKeyTotal)
	numWatchers := make(map[string]int)
	for i := range watched {
		k := make([]byte, watchKeySize)
		if watchSeqKeys {
			binary.PutVarint(k, int64(i%watchKeySpaceSize))
		} else {
			binary.PutVarint(k, int64(rand.Intn(watchKeySpaceSize)))
		}
		watched[i] = string(k)
	}

	requests := make(chan string, totalClients)

	clients := mustCreateClients(totalClients, totalConns)

	streams := make([]v3.Watcher, watchTotalStreams)
	for i := range streams {
		streams[i] = v3.NewWatcher(clients[i%len(clients)])
	}

	// watching phase
	results = make(chan result)
	bar = pb.New(watchTotal)

	bar.Format("Bom !")
	bar.Start()

	pdoneC := printRate(results)

	atomic.StoreInt32(&nrWatchCompleted, int32(0))
	watchCompletedNotifier = make(chan struct{})
	for i := range streams {
		go doWatch(streams[i], requests)
	}

	go func() {
		for i := 0; i < watchTotal; i++ {
			key := watched[i%len(watched)]
			requests <- key
			numWatchers[key]++
		}
		close(requests)
	}()

	<-watchCompletedNotifier
	bar.Finish()

	fmt.Printf("Watch creation summary:\n")
	close(results)
	<-pdoneC

	// put phase
	eventsTotal = 0
	for i := 0; i < watchPutTotal; i++ {
		eventsTotal += numWatchers[watched[i%len(watched)]]
	}
	results = make(chan result)
	bar = pb.New(eventsTotal)

	bar.Format("Bom !")
	bar.Start()

	atomic.StoreInt32(&nrRecvCompleted, 0)
	recvCompletedNotifier = make(chan struct{})

	putreqc := make(chan v3.Op)

	for i := 0; i < watchPutTotal; i++ {
		go doPutForWatch(context.TODO(), clients[i%len(clients)].KV, putreqc)
	}

	pdoneC = printRate(results)

	go func() {
		for i := 0; i < watchPutTotal; i++ {
			putreqc <- v3.OpPut(watched[i%(len(watched))], "data")
			// TODO: use a real rate-limiter instead of sleep.
			time.Sleep(time.Second / time.Duration(watchPutRate))
		}
		close(putreqc)
	}()

	<-recvCompletedNotifier
	bar.Finish()
	fmt.Printf("Watch events received summary:\n")
	close(results)
	<-pdoneC
}
Exemple #23
0
func TestV3ClientMetrics(t *testing.T) {
	defer testutil.AfterTest(t)

	var (
		addr string = "localhost:27989"
		ln   net.Listener
		err  error
	)

	// listen for all prometheus metrics
	donec := make(chan struct{})
	go func() {
		defer close(donec)

		srv := &http.Server{Handler: prometheus.Handler()}
		srv.SetKeepAlivesEnabled(false)

		ln, err = transport.NewUnixListener(addr)
		if err != nil {
			t.Fatalf("Error: %v occurred while listening on addr: %v", err, addr)
		}

		err = srv.Serve(ln)
		if err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
			t.Fatalf("Err serving http requests: %v", err)
		}
	}()

	url := "unix://" + addr + "/metrics"

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	client := clus.Client(0)

	w := clientv3.NewWatcher(client)
	defer w.Close()

	kv := clientv3.NewKV(client)

	wc := w.Watch(context.Background(), "foo")

	wBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")

	pBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")

	_, err = kv.Put(context.Background(), "foo", "bar")
	if err != nil {
		t.Errorf("Error putting value in key store")
	}

	pAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
	if pBefore+1 != pAfter {
		t.Errorf("grpc_client_started_total expected %d, got %d", 1, pAfter-pBefore)
	}

	// consume watch response
	select {
	case <-wc:
	case <-time.After(10 * time.Second):
		t.Error("Timeout occurred for getting watch response")
	}

	wAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
	if wBefore+1 != wAfter {
		t.Errorf("grpc_client_msg_received_total expected %d, got %d", 1, wAfter-wBefore)
	}

	ln.Close()
	<-donec
}
Exemple #24
0
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
// that it tries to resume to a revision that's been compacted out of the store.
// Since the watcher's server restarts with stale data, the watcher will receive
// either a compaction error or all keys by staying in sync before the compaction
// is finally applied.
func TestWatchResumeCompacted(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create a waiting watcher at rev 1
	w := clientv3.NewWatcher(clus.Client(0))
	defer w.Close()
	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1))
	select {
	case w := <-wch:
		t.Errorf("unexpected message from wch %v", w)
	default:
	}
	clus.Members[0].Stop(t)

	ticker := time.After(time.Second * 10)
	for clus.WaitLeader(t) <= 0 {
		select {
		case <-ticker:
			t.Fatalf("failed to wait for new leader")
		default:
			time.Sleep(10 * time.Millisecond)
		}
	}

	// put some data and compact away
	numPuts := 5
	kv := clientv3.NewKV(clus.Client(1))
	for i := 0; i < numPuts; i++ {
		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
			t.Fatal(err)
		}
	}
	if _, err := kv.Compact(context.TODO(), 3); err != nil {
		t.Fatal(err)
	}

	clus.Members[0].Restart(t)

	// since watch's server isn't guaranteed to be synced with the cluster when
	// the watch resumes, there is a window where the watch can stay synced and
	// read off all events; if the watcher misses the window, it will go out of
	// sync and get a compaction error.
	wRev := int64(2)
	for int(wRev) <= numPuts+1 {
		var wresp clientv3.WatchResponse
		var ok bool
		select {
		case wresp, ok = <-wch:
			if !ok {
				t.Fatalf("expected wresp, but got closed channel")
			}
		case <-time.After(5 * time.Second):
			t.Fatalf("compacted watch timed out")
		}
		for _, ev := range wresp.Events {
			if ev.Kv.ModRevision != wRev {
				t.Fatalf("expected modRev %v, got %+v", wRev, ev)
			}
			wRev++
		}
		if wresp.Err() == nil {
			continue
		}
		if wresp.Err() != rpctypes.ErrCompacted {
			t.Fatalf("wresp.Err() expected %v, but got %v %+v", rpctypes.ErrCompacted, wresp.Err())
		}
		break
	}
	if int(wRev) > numPuts+1 {
		// got data faster than the compaction
		return
	}
	// received compaction error; ensure the channel closes
	select {
	case wresp, ok := <-wch:
		if ok {
			t.Fatalf("expected closed channel, but got %v", wresp)
		}
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for channel close")
	}
}