func TestRetryReadRows(t *testing.T) { ctx := context.Background() // Intercept requests and delegate to an interceptor defined by the test case errCount := 0 var f func(grpc.ServerStream) error errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if strings.HasSuffix(info.FullMethod, "ReadRows") { return f(ss) } return handler(ctx, ss) } tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) defer cleanup() if err != nil { t.Fatalf("fake server setup: %v", err) } errCount = 0 // Test overall request failure and retries f = func(ss grpc.ServerStream) error { var err error req := new(btpb.ReadRowsRequest) ss.RecvMsg(req) switch errCount { case 0: // Retryable request failure err = grpc.Errorf(codes.Unavailable, "") case 1: // Write two rows then error writeReadRowsResponse(ss, "a", "b") err = grpc.Errorf(codes.Unavailable, "") case 2: // Retryable request failure if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { t.Errorf("2 range retries: got %q, want %q", got, want) } err = grpc.Errorf(codes.Unavailable, "") case 3: // Write two more rows writeReadRowsResponse(ss, "c", "d") err = nil } errCount++ return err } var got []string tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { got = append(got, r.Key()) return true }) want := []string{"a", "b", "c", "d"} if !reflect.DeepEqual(got, want) { t.Errorf("retry range integration: got %v, want %v", got, want) } }
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) grpcServer := grpc.NewServer(opts...) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) return grpcServer }
func (c *Component) ServerOptions() []grpc.ServerOption { unary := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { var peerAddr string peer, ok := peer.FromContext(ctx) if ok { peerAddr = peer.Addr.String() } var peerID string meta, ok := metadata.FromContext(ctx) if ok { id, ok := meta["id"] if ok && len(id) > 0 { peerID = id[0] } } logCtx := c.Ctx.WithFields(log.Fields{ "CallerID": peerID, "CallerIP": peerAddr, "Method": info.FullMethod, }) t := time.Now() iface, err := handler(ctx, req) err = errors.BuildGRPCError(err) logCtx = logCtx.WithField("Duration", time.Now().Sub(t)) if grpc.Code(err) == codes.OK || grpc.Code(err) == codes.Canceled { logCtx.Debug("Handled request") } else { logCtx.WithField("ErrCode", grpc.Code(err)).WithError(err).Debug("Handled request with error") } return iface, err } stream := func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { var peerAddr string peer, ok := peer.FromContext(stream.Context()) if ok { peerAddr = peer.Addr.String() } var peerID string meta, ok := metadata.FromContext(stream.Context()) if ok { id, ok := meta["id"] if ok && len(id) > 0 { peerID = id[0] } } logCtx := c.Ctx.WithFields(log.Fields{ "CallerID": peerID, "CallerIP": peerAddr, "Method": info.FullMethod, }) t := time.Now() logCtx.Debug("Start stream") err := handler(srv, stream) err = errors.BuildGRPCError(err) logCtx = logCtx.WithField("Duration", time.Now().Sub(t)) if grpc.Code(err) == codes.OK || grpc.Code(err) == codes.Canceled { logCtx.Debug("End stream") } else { logCtx.WithField("ErrCode", grpc.Code(err)).WithError(err).Debug("End stream with error") } return err } opts := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unary)), grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(stream)), } if c.tlsConfig != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(c.tlsConfig))) } return opts }
func startGRPCProxy(cmd *cobra.Command, args []string) { l, err := net.Listen("tcp", grpcProxyListenAddr) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } plog.Infof("listening for grpc-proxy client requests on %s", grpcProxyListenAddr) defer func() { l.Close() plog.Infof("stopping listening for grpc-proxy client requests on %s", grpcProxyListenAddr) }() m := cmux.New(l) cfg, err := newClientCfg() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } client, err := clientv3.New(*cfg) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } kvp := grpcproxy.NewKvProxy(client) watchp := grpcproxy.NewWatchProxy(client) clusterp := grpcproxy.NewClusterProxy(client) leasep := grpcproxy.NewLeaseProxy(client) mainp := grpcproxy.NewMaintenanceProxy(client) authp := grpcproxy.NewAuthProxy(client) server := grpc.NewServer( grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), ) pb.RegisterKVServer(server, kvp) pb.RegisterWatchServer(server, watchp) pb.RegisterClusterServer(server, clusterp) pb.RegisterLeaseServer(server, leasep) pb.RegisterMaintenanceServer(server, mainp) pb.RegisterAuthServer(server, authp) errc := make(chan error) grpcl := m.Match(cmux.HTTP2()) go func() { errc <- server.Serve(grpcl) }() httpmux := http.NewServeMux() httpmux.HandleFunc("/", http.NotFound) httpmux.Handle("/metrics", prometheus.Handler()) srvhttp := &http.Server{ Handler: httpmux, } var httpl net.Listener if cfg.TLS != nil { srvhttp.TLSConfig = cfg.TLS httpl = tls.NewListener(m.Match(cmux.Any()), cfg.TLS) } else { httpl = m.Match(cmux.HTTP1()) } go func() { errc <- srvhttp.Serve(httpl) }() go func() { errc <- m.Serve() }() fmt.Fprintln(os.Stderr, <-errc) os.Exit(1) }
func TestRetryApplyBulk(t *testing.T) { ctx := context.Background() // Intercept requests and delegate to an interceptor defined by the test case errCount := 0 var f func(grpc.ServerStream) error errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if strings.HasSuffix(info.FullMethod, "MutateRows") { return f(ss) } return handler(ctx, ss) } tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) defer cleanup() if err != nil { t.Fatalf("fake server setup: %v", err) } errCount = 0 // Test overall request failure and retries f = func(ss grpc.ServerStream) error { if errCount < 3 { errCount++ return grpc.Errorf(codes.Aborted, "") } return nil } mut := NewMutation() mut.Set("cf", "col", 1, []byte{}) errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) if errors != nil || err != nil { t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) } // Test failures and retries in one request errCount = 0 m1 := NewMutation() m1.Set("cf", "col", 1, []byte{}) m2 := NewMutation() m2.Set("cf", "col2", 1, []byte{}) m3 := NewMutation() m3.Set("cf", "col3", 1, []byte{}) f = func(ss grpc.ServerStream) error { var err error req := new(btpb.MutateRowsRequest) ss.RecvMsg(req) switch errCount { case 0: // Retryable request failure err = grpc.Errorf(codes.Unavailable, "") case 1: // Two mutations fail writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) err = nil case 2: // Two failures were retried. One will succeed. if want, got := 2, len(req.Entries); want != got { t.Errorf("2 bulk retries, got: %d, want %d", got, want) } writeMutateRowsResponse(ss, codes.OK, codes.Aborted) err = nil case 3: // One failure was retried and will succeed. if want, got := 1, len(req.Entries); want != got { t.Errorf("1 bulk retry, got: %d, want %d", got, want) } writeMutateRowsResponse(ss, codes.OK) err = nil } errCount++ return err } errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) if errors != nil || err != nil { t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) } // Test unretryable errors niMut := NewMutation() niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent errCount = 0 f = func(ss grpc.ServerStream) error { var err error req := new(btpb.MutateRowsRequest) ss.RecvMsg(req) switch errCount { case 0: // Give non-idempotent mutation a retryable error code. // Nothing should be retried. writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted) err = nil case 1: t.Errorf("unretryable errors: got one retry, want no retries") } errCount++ return err } errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) if err != nil { t.Errorf("unretryable errors: request failed %v") } want := []error{ grpc.Errorf(codes.FailedPrecondition, ""), grpc.Errorf(codes.Aborted, ""), } if !reflect.DeepEqual(want, errors) { t.Errorf("unretryable errors: got: %v, want: %v", errors, want) } // Test individual errors and a deadline exceeded f = func(ss grpc.ServerStream) error { writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) return nil } ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) wantErr := context.DeadlineExceeded if wantErr != err { t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) } if errors != nil { t.Errorf("deadline exceeded errors: got: %v, want: nil", err) } }