func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } return (*CompactResponse)(resp), err }
func (cli *grpcClient) OnStart() error { cli.QuitService.OnStart() RETRY_LOOP: for { conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) if err != nil { if cli.mustConnect { return err } else { log.Warn(Fmt("tmsp.grpcClient failed to connect to %v. Retrying...\n", cli.addr)) time.Sleep(time.Second * 3) continue RETRY_LOOP } } client := types.NewTMSPApplicationClient(conn) ENSURE_CONNECTED: for { _, err := client.Echo(context.Background(), &types.RequestEcho{"hello"}, grpc.FailFast(true)) if err == nil { break ENSURE_CONNECTED } time.Sleep(time.Second) } cli.client = client return nil } }
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { cctx, cancel := context.WithCancel(ctx) defer cancel() stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) if err != nil { return nil, toErr(ctx, err) } resp, rerr := stream.Recv() if rerr != nil { return nil, toErr(ctx, rerr) } karesp := &LeaseKeepAliveResponse{ ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID), TTL: resp.TTL, } return karesp, nil }
// intercept fulfils the grpc.UnaryClientInterceptor interface, it should be noted that while this API // is currently experimental the metrics it reports should be kept as stable as can be, *within reason*. func (ci *clientInterceptor) intercept( ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { localCtx, cancel := context.WithTimeout(ctx, ci.timeout) defer cancel() s := ci.clk.Now() methodScope := ci.stats.NewScope(cleanMethod(method, false)) methodScope.Inc("Calls", 1) methodScope.GaugeDelta("InProgress", 1) // Disable fail-fast so RPCs will retry until deadline, even if all backends // are down. opts = append(opts, grpc.FailFast(false)) err := grpc_prometheus.UnaryClientInterceptor(localCtx, method, req, reply, cc, invoker, opts...) methodScope.TimingDuration("Latency", ci.clk.Since(s)) methodScope.GaugeDelta("InProgress", -1) if err != nil { methodScope.Inc("Failed", 1) } return err }
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { // TODO: handle other ops case tRange: var resp *pb.RangeResponse resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} resp, err = kv.remote.Put(ctx, r) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil } case tDeleteRange: var resp *pb.DeleteRangeResponse r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} resp, err = kv.remote.DeleteRange(ctx, r) if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } default: panic("Unknown op") } return OpResponse{}, err }
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { req := &pb.AlarmRequest{ Action: pb.AlarmRequest_DEACTIVATE, MemberID: am.MemberID, Alarm: am.Alarm, } if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { ar, err := m.AlarmList(ctx) if err != nil { return nil, toErr(ctx, err) } ret := AlarmResponse{} for _, am := range ar.Alarms { dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) if derr != nil { return nil, toErr(ctx, derr) } ret.Alarms = append(ret.Alarms, dresp.Alarms...) } return &ret, nil } resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) if err == nil { return (*AlarmResponse)(resp), nil } return nil, toErr(ctx, err) }
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { cctx, cancel := context.WithCancel(ctx) done := cancelWhenStop(cancel, l.stopCtx.Done()) defer close(done) for { r := &pb.LeaseGrantRequest{TTL: ttl} resp, err := l.remote.LeaseGrant(cctx, r, grpc.FailFast(false)) if err == nil { gresp := &LeaseGrantResponse{ ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID), TTL: resp.TTL, Error: resp.Error, } return gresp, nil } if isHaltErr(cctx, err) { return nil, toErr(ctx, err) } if nerr := l.newStream(); nerr != nil { return nil, nerr } } }
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } pr, pw := io.Pipe() go func() { for { resp, err := ss.Recv() if err != nil { pw.CloseWithError(err) return } if resp == nil && err == nil { break } if _, werr := pw.Write(resp.Blob); werr != nil { pw.CloseWithError(werr) return } } pw.Close() }() return pr, nil }
func TestDropRequestFailedNonFailFast(t *testing.T) { // Start a backend. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } beAddr := strings.Split(beLis.Addr().String(), ":") bePort, err := strconv.Atoi(beAddr[1]) backends := startBackends(t, besn, beLis) defer stopBackends(backends) // Start a load balancer. lbLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to create the listener for the load balancer %v", err) } lbCreds := &serverNameCheckCreds{ sn: lbsn, } lb := grpc.NewServer(grpc.Creds(lbCreds)) if err != nil { t.Fatalf("Failed to generate the port number %v", err) } be := &lbpb.Server{ IpAddress: []byte(beAddr[0]), Port: int32(bePort), LoadBalanceToken: lbToken, DropRequest: true, } var bes []*lbpb.Server bes = append(bes, be) sl := &lbpb.ServerList{ Servers: bes, } ls := newRemoteBalancer(sl) lbpb.RegisterLoadBalancerServer(lb, ls) go func() { lb.Serve(lbLis) }() defer func() { ls.stop() lb.Stop() }() creds := serverNameCheckCreds{ expected: besn, } ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(Balancer(&testNameResolver{ addr: lbLis.Addr().String(), })), grpc.WithBlock(), grpc.WithTransportCredentials(&creds)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } helloC := hwpb.NewGreeterClient(cc) ctx, _ = context.WithTimeout(context.Background(), 10*time.Millisecond) if _, err := helloC.SayHello(ctx, &hwpb.HelloRequest{Name: "grpc"}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("%v.SayHello(_, _) = _, %v, want _, %s", helloC, err, codes.DeadlineExceeded) } cc.Close() }
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) { if rev <= 0 { return nil } for i, m := range c.Members { u := m.ClientURL conn, derr := m.dialGRPC() if derr != nil { plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u) err = derr continue } kvc := pb.NewKVClient(conn) ctx, cancel := context.WithTimeout(context.Background(), timeout) plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u) _, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false)) cancel() conn.Close() succeed := true if cerr != nil { if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 { plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u) } else { plog.Warningf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u) err = cerr succeed = false } } if succeed { plog.Printf("[compact kv #%d] done (endpoint %s)", i, u) } } return err }
func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbc.BalanceLoad(ctx, grpc.FailFast(false)) if err != nil { grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() if b.done { b.mu.Unlock() return } b.mu.Unlock() initReq := &lbpb.LoadBalanceRequest{ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ InitialRequest: new(lbpb.InitialLoadBalanceRequest), }, } if err := stream.Send(initReq); err != nil { // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { grpclog.Println("Failed to receive the initial response from the remote balancer.") return } // TODO: Support delegation. if initResp.LoadBalancerDelegate != "" { // delegation grpclog.Println("TODO: Delegation is not supported yet.") return } // Retrieve the server list. for { reply, err := stream.Recv() if err != nil { break } b.mu.Lock() if b.done || seq < b.seq { b.mu.Unlock() return } b.seq++ // tick when receiving a new list of servers. seq = b.seq b.mu.Unlock() if serverList := reply.GetServerList(); serverList != nil { b.processServerList(serverList, seq) } } return true }
func newStressDelete(kvc pb.KVClient, keySuffixRange int) stressFunc { return func(ctx context.Context) (error, int64) { _, err := kvc.DeleteRange(ctx, &pb.DeleteRangeRequest{ Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))), }, grpc.FailFast(false)) return err, 1 } }
func newStressRange(kvc pb.KVClient, keySuffixRange int) stressFunc { return func(ctx context.Context) error { _, err := kvc.Range(ctx, &pb.RangeRequest{ Key: []byte(fmt.Sprintf("foo%d", rand.Intn(keySuffixRange))), }, grpc.FailFast(false)) return err } }
func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} resp, err := txn.kv.remote.Txn(txn.ctx, r, grpc.FailFast(false)) if err != nil { return nil, err } return (*TxnResponse)(resp), nil }
func (cli *grpcClient) InitChainAsync(validators []*types.Validator) *ReqRes { req := types.ToRequestInitChain(validators) res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_InitChain{res}}) }
func (cli *grpcClient) EndBlockAsync(height uint64) *ReqRes { req := types.ToRequestEndBlock(height) res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_EndBlock{res}}) }
func (cli *grpcClient) QueryAsync(query []byte) *ReqRes { req := types.ToRequestQuery(query) res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_Query{res}}) }
func (cli *grpcClient) CommitAsync() *ReqRes { req := types.ToRequestCommit() res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_Commit{res}}) }
func (cli *grpcClient) SetOptionAsync(key string, value string) *ReqRes { req := types.ToRequestSetOption(key, value) res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_SetOption{res}}) }
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes { req := types.ToRequestCheckTx(tx) res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_CheckTx{res}}) }
func (cli *grpcClient) EchoAsync(msg string) *ReqRes { req := types.ToRequestEcho(msg) res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{&types.Response_Echo{res}}) }
func newStressPut(kvc pb.KVClient, keySuffixRange, keySize int) stressFunc { return func(ctx context.Context) (error, int64) { _, err := kvc.Put(ctx, &pb.PutRequest{ Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))), Value: randBytes(keySize), }, grpc.FailFast(false)) return err, 1 } }
// Send implements the Sender interface. func (s sender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br, err := s.Batch(ctx, &ba, grpc.FailFast(false)) if err != nil { return nil, roachpb.NewError(errors.Wrap(err, "roachpb.Batch RPC failed")) } pErr := br.Error br.Error = nil return br, pErr }
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { // TODO: handle other ops case tRange: var resp *pb.RangeResponse r := &pb.RangeRequest{ Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable, KeysOnly: op.keysOnly, CountOnly: op.countOnly, } if op.sort != nil { r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) } resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false)) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)} resp, err = kv.remote.Put(ctx, r, grpc.FailFast(false)) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil } case tDeleteRange: var resp *pb.DeleteRangeResponse r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end} resp, err = kv.remote.DeleteRange(ctx, r, grpc.FailFast(false)) if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } default: panic("Unknown op") } return OpResponse{}, err }
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { r := &pb.MemberRemoveRequest{ID: id} resp, err := c.remote.MemberRemove(ctx, r, grpc.FailFast(false)) if err == nil { return (*MemberRemoveResponse)(resp), nil } if isHaltErr(ctx, err) { return nil, toErr(ctx, err) } return nil, toErr(ctx, err) }
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { r := &pb.MemberAddRequest{PeerURLs: peerAddrs} resp, err := c.remote.MemberAdd(ctx, r, grpc.FailFast(false)) if err == nil { return (*MemberAddResponse)(resp), nil } if isHaltErr(ctx, err) { return nil, toErr(ctx, err) } return nil, toErr(ctx, err) }
func newStressRangeInterval(kvc pb.KVClient, keySuffixRange int) stressFunc { return func(ctx context.Context) (error, int64) { start := rand.Intn(keySuffixRange) end := start + 500 _, err := kvc.Range(ctx, &pb.RangeRequest{ Key: []byte(fmt.Sprintf("foo%016x", start)), RangeEnd: []byte(fmt.Sprintf("foo%016x", end)), }, grpc.FailFast(false)) return err, 0 } }
// The keys attached to the lease has the format of "<leaseID>_<idx>" where idx is the ordering key creation // Since the format of keys contains about leaseID, finding keys base on "<leaseID>" prefix // determines whether the attached keys for a given leaseID has been deleted or not func (lc *leaseChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, leaseID int64) (bool, error) { resp, err := lc.kvc.Range(ctx, &pb.RangeRequest{ Key: []byte(fmt.Sprintf("%d", leaseID)), RangeEnd: []byte(clientv3.GetPrefixRangeEnd(fmt.Sprintf("%d", leaseID))), }, grpc.FailFast(false)) plog.Debugf("hasKeysAttachedToLeaseExpired %v resp %v error (%v)", leaseID, resp, err) if err != nil { plog.Errorf("retriving keys attached to lease %v error: (%v)", leaseID, err) return false, err } return len(resp.Kvs) == 0, nil }
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. for { resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) if err == nil { return (*MemberListResponse)(resp), nil } if isHaltErr(ctx, err) { return nil, toErr(ctx, err) } } }
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { // it is safe to retry on update. for { r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) if err == nil { return (*MemberUpdateResponse)(resp), nil } if isHaltErr(ctx, err) { return nil, toErr(ctx, err) } } }