Beispiel #1
0
func newClient(cfg *Config) (*Client, error) {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	var creds *credentials.TransportAuthenticator
	if cfg.TLS != nil {
		tlscfg, err := cfg.TLS.ClientConfig()
		if err != nil {
			return nil, err
		}
		c := credentials.NewTLS(tlscfg)
		creds = &c
	}
	// use a temporary skeleton client to bootstrap first connection
	conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds})
	if err != nil {
		return nil, err
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
		creds:   creds,
	}, nil
}
Beispiel #2
0
func toGRPC(c *clientv3.Client) grpcAPI {
	return grpcAPI{
		pb.NewClusterClient(c.ActiveConnection()),
		pb.NewKVClient(c.ActiveConnection()),
		pb.NewLeaseClient(c.ActiveConnection()),
		pb.NewWatchClient(c.ActiveConnection()),
	}
}
Beispiel #3
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitInvalidInput, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}

	wAPI := pb.NewWatchClient(conn)
	wStream, err := wAPI.Watch(context.TODO())
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}

	go recvLoop(wStream)

	reader := bufio.NewReader(os.Stdin)

	for {
		l, err := reader.ReadString('\n')
		if err != nil {
			ExitWithError(ExitInvalidInput, fmt.Errorf("Error reading watch request line: %v", err))
		}
		l = strings.TrimSuffix(l, "\n")

		// TODO: support start and end revision
		segs := strings.Split(l, " ")
		if len(segs) != 2 {
			fmt.Fprintf(os.Stderr, "Invalid watch request format: use \"watch [key]\", \"watchprefix [prefix]\" or \"cancel [watcher ID]\"\n")
			continue
		}

		var r *pb.WatchRequest
		switch segs[0] {
		case "watch":
			r = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte(segs[1])}}
		case "watchprefix":
			r = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte(segs[1])}}
		case "cancel":
			id, perr := strconv.ParseInt(segs[1], 10, 64)
			if perr != nil {
				fmt.Fprintf(os.Stderr, "Invalid cancel ID (%v)\n", perr)
				continue
			}
			r = &pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: id}}
		default:
			fmt.Fprintf(os.Stderr, "Invalid watch request type: use watch, watchprefix or cancel\n")
			continue
		}

		err = wStream.Send(r)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error sending request to server: %v\n", err)
		}
	}
}
Beispiel #4
0
// TestV3WatchCancel tests Watch APIs cancellation.
func TestV3WatchCancel(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	wStream, errW := wAPI.Watch(context.TODO())
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rCh := make(chan *pb.WatchResponse)
	go func() {
		resp, _ := wStream.Recv()
		rCh <- resp
	}()
	select {
	case nr := <-rCh:
		t.Errorf("unexpected response is received %+v", nr)
	case <-time.After(2 * time.Second):
	}
	wStream.CloseSend()
	rv, ok := <-rCh
	if rv != nil || !ok {
		t.Errorf("rv, ok got = %v %v, want = nil true", rv, ok)
	}

	clus.Terminate(t)
}
Beispiel #5
0
func testV3WatchCancel(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	wStream, errW := wAPI.Watch(ctx)
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
		CreateRequest: &pb.WatchCreateRequest{
			Key: []byte("foo"), StartRevision: startRev}}}
	if err := wStream.Send(wreq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
		CancelRequest: &pb.WatchCancelRequest{
			WatchId: wresp.WatchId}}}
	if err := wStream.Send(creq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}

	clus.Terminate(t)
}
Beispiel #6
0
func newClient(conn *grpc.ClientConn, cfg *Config) *Client {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
	}
}
Beispiel #7
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitInvalidInput, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}

	wAPI := pb.NewWatchClient(conn)
	wStream, err := wAPI.Watch(context.TODO())
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}

	go recvLoop(wStream)

	reader := bufio.NewReader(os.Stdin)

	for {
		l, err := reader.ReadString('\n')
		if err != nil {
			ExitWithError(ExitInvalidInput, fmt.Errorf("Error reading watch request line: %v", err))
		}
		l = strings.TrimSuffix(l, "\n")

		// TODO: support start and end revision
		segs := strings.Split(l, " ")
		if len(segs) != 2 {
			fmt.Fprintf(os.Stderr, "Invalid watch request format: use watch key or watchprefix prefix\n")
			continue
		}

		var r *pb.WatchRequest
		switch segs[0] {
		case "watch":
			r = &pb.WatchRequest{Key: []byte(segs[1])}
		case "watchprefix":
			r = &pb.WatchRequest{Prefix: []byte(segs[1])}
		default:
			fmt.Fprintf(os.Stderr, "Invalid watch request format: use watch key or watchprefix prefix\n")
			continue
		}

		err = wStream.Send(r)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error sending request to server: %v\n", err)
		}
	}
}
Beispiel #8
0
// TestV3LeaseExpire ensures a key is deleted once a key expires.
func TestV3LeaseExpire(t *testing.T) {
	defer testutil.AfterTest(t)
	testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
		// let lease lapse; wait for deleted key

		wAPI := pb.NewWatchClient(clus.RandConn())
		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()
		wStream, err := wAPI.Watch(ctx)
		if err != nil {
			return err
		}

		wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
			CreateRequest: &pb.WatchCreateRequest{
				Key: []byte("foo"), StartRevision: 1}}}
		if err := wStream.Send(wreq); err != nil {
			return err
		}
		if _, err := wStream.Recv(); err != nil {
			// the 'created' message
			return err
		}
		if _, err := wStream.Recv(); err != nil {
			// the 'put' message
			return err
		}

		errc := make(chan error, 1)
		go func() {
			resp, err := wStream.Recv()
			switch {
			case err != nil:
				errc <- err
			case len(resp.Events) != 1:
				fallthrough
			case resp.Events[0].Type != storagepb.DELETE:
				errc <- fmt.Errorf("expected key delete, got %v", resp)
			default:
				errc <- nil
			}
		}()

		select {
		case <-time.After(15 * time.Second):
			return fmt.Errorf("lease expiration too slow")
		case err := <-errc:
			return err
		}
	})
}
Beispiel #9
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(c *cli.Context) {
	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}

	wAPI := pb.NewWatchClient(conn)
	wStream, err := wAPI.Watch(context.TODO())
	if err != nil {
		panic(err)
	}

	go recvLoop(wStream)

	reader := bufio.NewReader(os.Stdin)

	for {
		l, err := reader.ReadString('\n')
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error reading watch request line: %v", err)
			os.Exit(1)
		}
		l = strings.TrimSuffix(l, "\n")

		// TODO: support start and end revision
		segs := strings.Split(l, " ")
		if len(segs) != 2 {
			fmt.Fprintf(os.Stderr, "Invalid watch request format: use watch key or watchprefix prefix\n")
			continue
		}

		var r *pb.WatchRequest
		switch segs[0] {
		case "watch":
			r = &pb.WatchRequest{Key: []byte(segs[1])}
		case "watchprefix":
			r = &pb.WatchRequest{Prefix: []byte(segs[1])}
		default:
			fmt.Fprintf(os.Stderr, "Invalid watch request format: use watch key or watchprefix prefix\n")
			continue
		}

		err = wStream.Send(r)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error sending request to server: %v\n", err)
		}
	}
}
Beispiel #10
0
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watcher) openWatchClient() (ws pb.Watch_WatchClient, err error) {
	for {
		if ws, err = w.remote.Watch(w.ctx); ws != nil {
			break
		} else if isHalted(w.ctx, err) {
			return nil, err
		}
		newConn, nerr := w.c.retryConnection(w.conn, nil)
		if nerr != nil {
			return nil, nerr
		}
		w.conn = newConn
		w.remote = pb.NewWatchClient(w.conn)
	}
	return ws, nil
}
Beispiel #11
0
func NewWatcher(c *Client) Watcher {
	ctx, cancel := context.WithCancel(context.Background())
	w := &watcher{
		ctx:     ctx,
		cancel:  cancel,
		streams: make(map[int64]*watcherStream),

		respc: make(chan *pb.WatchResponse),
		reqc:  make(chan *watchRequest),
		stopc: make(chan struct{}),
		donec: make(chan struct{}),
		errc:  make(chan error, 1),
	}

	f := func(conn *grpc.ClientConn) { w.remote = pb.NewWatchClient(conn) }
	w.rc = newRemoteClient(c, f)

	go w.run()
	return w
}
Beispiel #12
0
func NewWatcher(c *Client) Watcher {
	ctx, cancel := context.WithCancel(context.Background())
	conn := c.ActiveConnection()

	w := &watcher{
		c:      c,
		conn:   conn,
		remote: pb.NewWatchClient(conn),

		ctx:     ctx,
		cancel:  cancel,
		streams: make(map[int64]*watcherStream),

		respc: make(chan *pb.WatchResponse),
		reqc:  make(chan *watchRequest),
		stopc: make(chan struct{}),
		donec: make(chan struct{}),
		errc:  make(chan error, 1),
	}
	go w.run()
	return w
}
Beispiel #13
0
func newClient(conn *grpc.ClientConn, cfg *Config) (*Client, error) {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	var creds *credentials.TransportAuthenticator
	if cfg.TLS != nil {
		tlscfg, err := cfg.TLS.ClientConfig()
		if err != nil {
			return nil, err
		}
		c := credentials.NewTLS(tlscfg)
		creds = &c
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
		creds:   creds,
	}, nil
}
Beispiel #14
0
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())
	kvc := pb.NewKVClient(clus.RandConn())

	streams := make([]pb.Watch_WatchClient, 5)
	for i := range streams {
		wStream, errW := wAPI.Watch(context.TODO())
		if errW != nil {
			t.Fatalf("wAPI.Watch error: %v", errW)
		}
		if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
			t.Fatalf("wStream.Send error: %v", err)
		}
		streams[i] = wStream
	}

	for _, wStream := range streams {
		wresp, err := wStream.Recv()
		if err != nil {
			t.Fatalf("wStream.Recv error: %v", err)
		}
		if !wresp.Created {
			t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
		}
	}

	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}

	var wg sync.WaitGroup
	wg.Add(len(streams))
	wevents := []*storagepb.Event{
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
		},
	}
	for i := range streams {
		go func(i int) {
			defer wg.Done()
			wStream := streams[i]
			wresp, err := wStream.Recv()
			if err != nil {
				t.Fatalf("wStream.Recv error: %v", err)
			}
			if wresp.WatchId != 0 {
				t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
			}
			if !reflect.DeepEqual(wresp.Events, wevents) {
				t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
			}
			// now Recv should block because there is no more events coming
			rok, nr := WaitResponse(wStream, 1*time.Second)
			if !rok {
				t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
			}
		}(i)
	}
	wg.Wait()

	clus.Terminate(t)
}
Beispiel #15
0
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	kvc := pb.NewKVClient(clus.RandConn())

	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}

	wAPI := pb.NewWatchClient(clus.RandConn())
	wStream, wErr := wAPI.Watch(context.TODO())
	if wErr != nil {
		t.Fatalf("wAPI.Watch error: %v", wErr)
	}

	if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: 1}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}

	allWevents := []*storagepb.Event{
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
		},
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
		},
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
		},
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
		},
	}

	events := []*storagepb.Event{}
	for len(events) < 4 {
		resp, err := wStream.Recv()
		if err != nil {
			t.Errorf("wStream.Recv error: %v", err)
		}
		if resp.Created {
			continue
		}
		events = append(events, resp.Events...)
		// if PUT requests are committed by now, first receive would return
		// multiple events, but if not, it returns a single event. In SSD,
		// it should return 4 events at once.
	}

	if !reflect.DeepEqual(events, allWevents) {
		t.Errorf("events got = %+v, want = %+v", events, allWevents)
	}

	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}
}
Beispiel #16
0
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})

	wAPI := pb.NewWatchClient(clus.RandConn())
	wStream, wErr := wAPI.Watch(context.TODO())
	if wErr != nil {
		t.Fatalf("wAPI.Watch error: %v", wErr)
	}

	if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: startRev}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	txn := pb.TxnRequest{}
	for i := 0; i < 3; i++ {
		ru := &pb.RequestUnion{}
		ru.RequestPut = &pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}
		txn.Success = append(txn.Success, ru)
	}

	tresp, err := kvc.Txn(context.Background(), &txn)
	if err != nil {
		t.Fatalf("kvc.Txn error: %v", err)
	}
	if !tresp.Succeeded {
		t.Fatalf("kvc.Txn failed: %+v", tresp)
	}

	events := []*storagepb.Event{}
	for len(events) < 3 {
		resp, err := wStream.Recv()
		if err != nil {
			t.Errorf("wStream.Recv error: %v", err)
		}
		if resp.Created {
			continue
		}
		events = append(events, resp.Events...)
	}
	sort.Sort(eventsSortByKey(events))

	wevents := []*storagepb.Event{
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
		},
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
		},
		{
			Type: storagepb.PUT,
			Kv:   &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
		},
	}

	if !reflect.DeepEqual(events, wevents) {
		t.Errorf("events got = %+v, want = %+v", events, wevents)
	}

	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}

	// can't defer because tcp ports will be in use
	clus.Terminate(t)
}
Beispiel #17
0
// testV3WatchMultipleWatchers tests multiple watchers on the same key
// and one watcher with matching prefix. It first puts the key
// that matches all watchers, and another key that matches only
// one watcher to test if it receives expected events.
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())
	kvc := pb.NewKVClient(clus.RandConn())

	wStream, errW := wAPI.Watch(context.TODO())
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	watchKeyN := 4
	for i := 0; i < watchKeyN+1; i++ {
		var wreq *pb.WatchRequest
		if i < watchKeyN {
			wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}
		} else {
			wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("fo"), StartRevision: startRev}}
		}
		if err := wStream.Send(wreq); err != nil {
			t.Fatalf("wStream.Send error: %v", err)
		}
	}

	ids := make(map[int64]struct{})
	for i := 0; i < watchKeyN+1; i++ {
		wresp, err := wStream.Recv()
		if err != nil {
			t.Fatalf("wStream.Recv error: %v", err)
		}
		if !wresp.Created {
			t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
		}
		ids[wresp.WatchId] = struct{}{}
	}

	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}

	for i := 0; i < watchKeyN+1; i++ {
		wresp, err := wStream.Recv()
		if err != nil {
			t.Fatalf("wStream.Recv error: %v", err)
		}
		if _, ok := ids[wresp.WatchId]; !ok {
			t.Errorf("watchId %d is not created!", wresp.WatchId)
		} else {
			delete(ids, wresp.WatchId)
		}
		if len(wresp.Events) == 0 {
			t.Errorf("#%d: no events received", i)
		}
		for _, ev := range wresp.Events {
			if string(ev.Kv.Key) != "foo" {
				t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
			}
			if string(ev.Kv.Value) != "bar" {
				t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
			}
		}
	}

	// now put one key that has only one matching watcher
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
		t.Fatalf("couldn't put key (%v)", err)
	}
	wresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if len(wresp.Events) != 1 {
		t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
	}
	if string(wresp.Events[0].Kv.Key) != "fo" {
		t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
	}

	// now Recv should block because there is no more events coming
	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}

	clus.Terminate(t)
}
Beispiel #18
0
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
	tests := []struct {
		putKeys      []string
		watchRequest *pb.WatchRequest

		wresps []*pb.WatchResponse
	}{
		// watch the key, matching
		{
			[]string{"foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 2},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
						},
					},
				},
			},
		},
		// watch the key, non-matching
		{
			[]string{"foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
			},
		},
		// watch the prefix, matching
		{
			[]string{"fooLong"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 2},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
						},
					},
				},
			},
		},
		// watch the prefix, non-matching
		{
			[]string{"foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
			},
		},
		// multiple puts, one watcher with matching key
		{
			[]string{"foo", "foo", "foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 2},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 3},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 4},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
						},
					},
				},
			},
		},
		// multiple puts, one watcher with matching prefix
		{
			[]string{"foo", "foo", "foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 2},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 3},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 4},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
						},
					},
				},
			},
		},
	}

	for i, tt := range tests {
		clus := newClusterGRPC(t, &clusterConfig{size: 3})

		wAPI := pb.NewWatchClient(clus.RandConn())
		wStream, err := wAPI.Watch(context.TODO())
		if err != nil {
			t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
		}

		if err := wStream.Send(tt.watchRequest); err != nil {
			t.Fatalf("#%d: wStream.Send error: %v", i, err)
		}

		go func() {
			for _, k := range tt.putKeys {
				kvc := pb.NewKVClient(clus.RandConn())
				req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
				if _, err := kvc.Put(context.TODO(), req); err != nil {
					t.Fatalf("#%d: couldn't put key (%v)", i, err)
				}
			}
		}()

		var createdWatchId int64
		for j, wresp := range tt.wresps {
			resp, err := wStream.Recv()
			if err != nil {
				t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
			}

			if resp.Header == nil {
				t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
			}
			if resp.Header.Revision != wresp.Header.Revision {
				t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
			}

			if wresp.Created != resp.Created {
				t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
			}
			if resp.Created {
				createdWatchId = resp.WatchId
			}
			if resp.WatchId != createdWatchId {
				t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
			}

			if !reflect.DeepEqual(resp.Events, wresp.Events) {
				t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
			}
		}

		rok, nr := WaitResponse(wStream, 1*time.Second)
		if !rok {
			t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
		}

		// can't defer because tcp ports will be in use
		clus.Terminate(t)
	}
}
Beispiel #19
0
func NewEtcdClient(conn *grpc.ClientConn) *EtcdClient {
	kv := pb.NewKVClient(conn)
	lease := pb.NewLeaseClient(conn)
	watch := pb.NewWatchClient(conn)
	return &EtcdClient{conn, kv, lease, watch}
}
Beispiel #20
0
func NewWatcher(c *Client) Watcher {
	return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
}
Beispiel #21
0
// WatchAndPut watches key and later put that key
// so that the watcher can return.
func (c *Cluster) WatchAndPut(connsN, streamsN, watchersN int) error {
	endpoint := ""
	var nd *Node
	for _, node := range c.NameToNode {
		if node.Flags.ExperimentalgRPCAddr != "" && !node.Terminated {
			endpoint = node.Flags.ExperimentalgRPCAddr
			nd = node
			break
		}
	}
	if endpoint == "" || nd == nil {
		return fmt.Errorf("no experimental-gRPC found")
	}

	switch nd.outputOption {
	case ToTerminal:
		fmt.Fprintln(nd.w, "[WatchAndPut] Started!")
	case ToHTML:
		nd.BufferStream <- "[WatchAndPut] Started!"
		if f, ok := nd.w.(http.Flusher); ok {
			if f != nil {
				f.Flush()
			}
		}
	}

	conns := make([]*grpc.ClientConn, connsN)
	for i := range conns {
		conns[i] = mustCreateConn(endpoint)
	}

	streams := make([]pb.Watch_WatchClient, streamsN)
	for i := range streams {
		watchClient := pb.NewWatchClient(conns[i%int(connsN)])
		wStream, err := watchClient.Watch(context.Background())
		if err != nil {
			return err
		}
		streams[i] = wStream
	}

	switch nd.outputOption {
	case ToTerminal:
		fmt.Fprintf(nd.w, fmt.Sprintf("[WatchAndPut] Launching all watchers! (name: %s, endpoint: %s)\n", nd.Flags.Name, endpoint))
	case ToHTML:
		nd.BufferStream <- fmt.Sprintf("[WatchAndPut] Launching all watchers! (name: %s, endpoint: %s)", nd.Flags.Name, endpoint)
		if f, ok := nd.w.(http.Flusher); ok {
			if f != nil {
				f.Flush()
			}
		}
	}

	keyToWatch := []byte("fo")
	for i := 0; i < watchersN; i++ {
		go func(i int) {
			wStream := streams[i%int(streamsN)]
			wr := &pb.WatchRequest{
				CreateRequest: &pb.WatchCreateRequest{Prefix: keyToWatch},
			}
			if err := wStream.Send(wr); err != nil {
				switch nd.outputOption {
				case ToTerminal:
					fmt.Fprintf(nd.w, fmt.Sprintf("[wStream.Send] error (%v)\n", err))
				case ToHTML:
					nd.BufferStream <- fmt.Sprintf("[wStream.Send] error (%v)", err)
					if f, ok := nd.w.(http.Flusher); ok {
						if f != nil {
							f.Flush()
						}
					}
				}
			}
		}(i)
	}

	streamsToWatchId := make(map[pb.Watch_WatchClient]map[int64]struct{})
	for i := 0; i < watchersN; i++ {
		wStream := streams[i%int(streamsN)]
		wresp, err := wStream.Recv()
		if err != nil {
			return err
		}
		if !wresp.Created {
			switch nd.outputOption {
			case ToTerminal:
				fmt.Fprintln(nd.w, "[WatchResponse] wresp.Created is supposed to be true! Something wrong!")
			case ToHTML:
				nd.BufferStream <- "[WatchResponse] wresp.Created is supposed to be true! Something wrong!"
				if f, ok := nd.w.(http.Flusher); ok {
					if f != nil {
						f.Flush()
					}
				}
			}
		}
		if _, ok := streamsToWatchId[wStream]; !ok {
			streamsToWatchId[wStream] = make(map[int64]struct{})
		}
		streamsToWatchId[wStream][wresp.WatchId] = struct{}{}
	}

	switch nd.outputOption {
	case ToTerminal:
		fmt.Fprintln(nd.w, "[PutRequest] Triggering notifications with PUT!")
	case ToHTML:
		nd.BufferStream <- "[PutRequest] Triggering notifications with PUT!"
		if f, ok := nd.w.(http.Flusher); ok {
			if f != nil {
				f.Flush()
			}
		}
	}

	kvc := pb.NewKVClient(conns[0])
	if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		return err
	}

	switch nd.outputOption {
	case ToTerminal:
		fmt.Fprintln(nd.w, "[Watch] Started!")
	case ToHTML:
		nd.BufferStream <- "[Watch] Started!"
		if f, ok := nd.w.(http.Flusher); ok {
			if f != nil {
				f.Flush()
			}
		}
	}

	st := time.Now()
	var wg sync.WaitGroup
	wg.Add(watchersN)

	for i := 0; i < watchersN; i++ {

		go func(i int) {

			defer wg.Done()

			wStream := streams[i%int(streamsN)]
			wresp, err := wStream.Recv()
			if err != nil {
				switch nd.outputOption {
				case ToTerminal:
					fmt.Fprintf(nd.w, fmt.Sprintf("[Watch] send error (%v)\n", err))
				case ToHTML:
					nd.BufferStream <- fmt.Sprintf("[Watch] send error (%v)", err)
					if f, ok := nd.w.(http.Flusher); ok {
						if f != nil {
							f.Flush()
						}
					}
				}
				return
			}

			switch {

			case wresp.Created:
				switch nd.outputOption {
				case ToTerminal:
					fmt.Fprintf(nd.w, fmt.Sprintf("[Watch revision] %d / watcher created %08x\n", wresp.Header.Revision, wresp.WatchId))
				case ToHTML:
					nd.BufferStream <- fmt.Sprintf("[Watch revision] %d / watcher created %08x", wresp.Header.Revision, wresp.WatchId)
					if f, ok := nd.w.(http.Flusher); ok {
						if f != nil {
							f.Flush()
						}
					}
				}

			case wresp.Canceled:
				switch nd.outputOption {
				case ToTerminal:
					fmt.Fprintf(nd.w, fmt.Sprintf("[Watch revision] %d / watcher canceled %08x\n", wresp.Header.Revision, wresp.WatchId))
				case ToHTML:
					nd.BufferStream <- fmt.Sprintf("[Watch revision] %d / watcher canceled %08x", wresp.Header.Revision, wresp.WatchId)
					if f, ok := nd.w.(http.Flusher); ok {
						if f != nil {
							f.Flush()
						}
					}
				}

			default:
				switch nd.outputOption {
				case ToTerminal:
					fmt.Fprintf(nd.w, fmt.Sprintf("[Watch revision] %d\n", wresp.Header.Revision))
				case ToHTML:
					nd.BufferStream <- fmt.Sprintf("[Watch revision] %d", wresp.Header.Revision)
					if f, ok := nd.w.(http.Flusher); ok {
						if f != nil {
							f.Flush()
						}
					}
				}
				for _, ev := range wresp.Events {
					switch nd.outputOption {
					case ToTerminal:
						fmt.Fprintf(nd.w, fmt.Sprintf("[%s] %s : %s\n", ev.Type, ev.Kv.Key, ev.Kv.Value))
					case ToHTML:
						nd.BufferStream <- fmt.Sprintf("[%s] %s : %s\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
						if f, ok := nd.w.(http.Flusher); ok {
							if f != nil {
								f.Flush()
							}
						}
					}
				}
			}

		}(i)

	}

	wg.Wait()

	switch nd.outputOption {
	case ToTerminal:
		fmt.Fprintf(nd.w, fmt.Sprintf("[Watch] Done! Took %v!\n", time.Since(st)))
	case ToHTML:
		nd.BufferStream <- fmt.Sprintf("[Watch] Done! Took %v!", time.Since(st))
		if f, ok := nd.w.(http.Flusher); ok {
			if f != nil {
				f.Flush()
			}
		}
	}

	return nil
}
Beispiel #22
0
func NewWatcher(c *Client) Watcher {
	return &watcher{
		remote:  pb.NewWatchClient(c.conn),
		streams: make(map[string]*watchGrpcStream),
	}
}
Beispiel #23
0
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
	tests := []struct {
		putKeys      []string
		watchRequest *pb.WatchRequest

		wresps []*pb.WatchResponse
	}{
		// watch the key, matching
		{
			[]string{"foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 1, Version: 1},
						},
					},
				},
			},
		},
		// watch the key, non-matching
		{
			[]string{"foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
			},
		},
		// watch the prefix, matching
		{
			[]string{"fooLong"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 1, Version: 1},
						},
					},
				},
			},
		},
		// watch the prefix, non-matching
		{
			[]string{"foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
			},
		},
		// multiple puts, one watcher with matching key
		{
			[]string{"foo", "foo", "foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 1, Version: 1},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 2, Version: 2},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 3, Version: 3},
						},
					},
				},
			},
		},
		// multiple puts, one watcher with matching prefix
		{
			[]string{"foo", "foo", "foo"},
			&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},

			[]*pb.WatchResponse{
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: true,
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 1, Version: 1},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 2, Version: 2},
						},
					},
				},
				{
					Header:  &pb.ResponseHeader{Revision: 1},
					Created: false,
					Events: []*storagepb.Event{
						{
							Type: storagepb.PUT,
							Kv:   &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 1, ModRevision: 3, Version: 3},
						},
					},
				},
			},
		},

		// TODO: watch and receive multiple-events from synced (need Txn)
	}

	for i, tt := range tests {
		clus := newClusterGRPC(t, &clusterConfig{size: 3})

		wAPI := pb.NewWatchClient(clus.RandConn())
		wStream, err := wAPI.Watch(context.TODO())
		if err != nil {
			t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
		}

		if err := wStream.Send(tt.watchRequest); err != nil {
			t.Fatalf("#%d: wStream.Send error: %v", i, err)
		}

		kvc := pb.NewKVClient(clus.RandConn())
		for _, k := range tt.putKeys {
			if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}); err != nil {
				t.Fatalf("#%d: couldn't put key (%v)", i, err)
			}
		}

		var createdWatchId int64
		for j, wresp := range tt.wresps {
			resp, err := wStream.Recv()
			if err != nil {
				t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
			}

			if resp.Header == nil {
				t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
			}
			if resp.Header.Revision != wresp.Header.Revision {
				t.Logf("[TODO - skip for now] #%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
			}

			if wresp.Created != resp.Created {
				t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
			}
			if resp.Created {
				createdWatchId = resp.WatchId
			}
			if resp.WatchId != createdWatchId {
				t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
			}

			if !reflect.DeepEqual(resp.Events, wresp.Events) {
				t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
			}
		}

		rCh := make(chan *pb.WatchResponse)
		go func() {
			resp, _ := wStream.Recv()
			rCh <- resp
		}()
		select {
		case nr := <-rCh:
			t.Errorf("#%d: unexpected response is received %+v", i, nr)
		case <-time.After(2 * time.Second):
		}
		wStream.CloseSend()
		rv, ok := <-rCh
		if rv != nil || !ok {
			t.Errorf("#%d: rv, ok got = %v %v, want = nil true", i, rv, ok)
		}

		// can't defer because tcp ports will be in use
		clus.Terminate(t)
	}
}
Beispiel #24
0
func watchFunc(cmd *cobra.Command, args []string) {
	watched := make([][]byte, watchedKeyTotal)
	for i := range watched {
		watched[i] = mustRandBytes(32)
	}

	requests := make(chan etcdserverpb.WatchRequest, totalClients)

	conns := make([]*grpc.ClientConn, totalConns)
	for i := range conns {
		conns[i] = mustCreateConn()
	}

	clients := make([]etcdserverpb.WatchClient, totalClients)
	for i := range clients {
		clients[i] = etcdserverpb.NewWatchClient(conns[i%int(totalConns)])
	}

	streams := make([]etcdserverpb.Watch_WatchClient, watchTotalStreams)
	var err error
	for i := range streams {
		streams[i], err = clients[i%int(totalClients)].Watch(context.TODO())
		if err != nil {
			fmt.Fprintln(os.Stderr, "Failed to create watch stream:", err)
			os.Exit(1)
		}
	}

	for i := range streams {
		wg.Add(1)
		go doWatch(streams[i], requests)
	}

	// watching phase
	results = make(chan result)
	bar = pb.New(watchTotal)

	bar.Format("Bom !")
	bar.Start()

	pdoneC := printRate(results)

	go func() {
		for i := 0; i < watchTotal; i++ {
			requests <- etcdserverpb.WatchRequest{
				CreateRequest: &etcdserverpb.WatchCreateRequest{Key: watched[i%(len(watched))]},
			}
		}
		close(requests)
	}()

	wg.Wait()
	bar.Finish()

	fmt.Printf("Watch creation summary:\n")
	close(results)
	<-pdoneC

	// put phase
	kv := etcdserverpb.NewKVClient(conns[0])
	// total number of puts * number of watchers on each key
	eventsTotal := watchPutTotal * (watchTotal / watchedKeyTotal)

	results = make(chan result)
	bar = pb.New(eventsTotal)

	bar.Format("Bom !")
	bar.Start()

	putreqc := make(chan etcdserverpb.PutRequest)

	for i := 0; i < watchPutTotal; i++ {
		wg.Add(1)
		go doPut(context.TODO(), kv, putreqc)
	}

	pdoneC = printRate(results)

	go func() {
		for i := 0; i < eventsTotal; i++ {
			putreqc <- etcdserverpb.PutRequest{
				Key:   watched[i%(len(watched))],
				Value: []byte("data"),
			}
			// TODO: use a real rate-limiter instead of sleep.
			time.Sleep(time.Second / time.Duration(watchPutRate))
		}
		close(putreqc)
	}()

	wg.Wait()
	bar.Finish()
	fmt.Printf("Watch events received summary:\n")
	close(results)
	<-pdoneC
}
Beispiel #25
0
func watchFunc(cmd *cobra.Command, args []string) {
	watched := make([][]byte, watchedKeyTotal)
	for i := range watched {
		watched[i] = mustRandBytes(32)
	}

	requests := make(chan *etcdserverpb.WatchRequest, watchTotal)

	conns := make([]*grpc.ClientConn, totalConns)
	for i := range conns {
		conns[i] = mustCreateConn()
	}

	clients := make([]etcdserverpb.WatchClient, totalClients)
	for i := range clients {
		clients[i] = etcdserverpb.NewWatchClient(conns[i%int(totalConns)])
	}

	streams := make([]etcdserverpb.Watch_WatchClient, watchTotalStreams)
	var err error
	for i := range streams {
		streams[i], err = clients[i%int(totalClients)].Watch(context.TODO())
		if err != nil {
			fmt.Fprintln(os.Stderr, "Failed to create watch stream:", err)
			os.Exit(1)
		}
	}

	for i := range streams {
		wg.Add(1)
		go doWatch(streams[i], requests)
	}

	// watching phase
	results = make(chan *result, watchTotal)
	bar = pb.New(watchTotal)

	bar.Format("Bom !")
	bar.Start()

	start := time.Now()
	for i := 0; i < watchTotal; i++ {
		r := &etcdserverpb.WatchRequest{
			Key: watched[i%(len(watched))],
		}
		requests <- r
	}
	close(requests)

	wg.Wait()
	bar.Finish()
	fmt.Printf("Watch creation summary:\n")
	printRate(watchTotal, results, time.Now().Sub(start))

	// put phase
	kv := etcdserverpb.NewKVClient(conns[0])
	// total number of puts * number of watchers on each key
	eventsTotal := watchPutTotal * (watchTotal / watchedKeyTotal)

	results = make(chan *result, eventsTotal)
	bar = pb.New(eventsTotal)

	bar.Format("Bom !")
	bar.Start()

	start = time.Now()

	// TODO: create multiple clients to do put to increase throughput
	// TODO: use a real rate-limiter instead of sleep.
	for i := 0; i < watchPutTotal; i++ {
		r := &etcdserverpb.PutRequest{
			Key:   watched[i%(len(watched))],
			Value: []byte("data"),
		}
		_, err := kv.Put(context.TODO(), r)
		if err != nil {
			fmt.Fprintln(os.Stderr, "Failed to put:", err)
		}
		time.Sleep(time.Second / time.Duration(watchPutRate))
	}

	for {
		if len(results) == eventsTotal {
			break
		}
		time.Sleep(50 * time.Millisecond)
	}

	bar.Finish()
	fmt.Printf("Watch events received summary:\n")
	printRate(eventsTotal, results, time.Now().Sub(start))
}