Esempio n. 1
0
// When wait returns, either the new transport is up or ClientConn is
// closing. Used to avoid working on a dying transport. It updates and
// returns the transport and its version when there is no error.
func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.closing:
			cc.mu.Unlock()
			return nil, 0, ErrClientConnClosing
		case ts < cc.transportSeq:
			// Worked on a dying transport. Try the new one immediately.
			defer cc.mu.Unlock()
			return cc.transport, cc.transportSeq, nil
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, 0, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 2
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed.
func (ac *addrConn) wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 3
0
// Wait blocks until i) the new transport is up or ii) ctx is done or iii)
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		case cc.state == Ready:
			cc.mu.Unlock()
			return cc.transport, nil
		case cc.state == TransientFailure:
			cc.mu.Unlock()
			// Break out so that the caller gets chance to pick another transport to
			// perform rpc instead of sticking to this transport.
			return nil, ErrTransientFailure
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 4
0
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	// 等待? 为什么等待呢?
	for {
		cc.mu.Lock()
		switch {
		// 1. 如果Conn关闭,则直接报错
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		// 2. 如果Ready, 则返回: Transport
		case cc.state == Ready:
			ct := cc.transport
			cc.mu.Unlock()
			return ct, nil
		default:
			// 3. 其他情况下,等待Ready or Done
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 5
0
// NewClientStream creates a new Stream for the client side. This is called
// by generated code.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
	var (
		t   transport.ClientTransport
		err error
	)
	t, err = cc.dopts.picker.Pick(ctx)
	if err != nil {
		return nil, toRPCErr(err)
	}
	// TODO(zhaoq): CallOption is omitted. Add support when it is needed.
	callHdr := &transport.CallHdr{
		Host:   cc.authority,
		Method: method,
		Flush:  desc.ServerStreams && desc.ClientStreams,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
	}
	cs := &clientStream{
		desc:    desc,
		codec:   cc.dopts.codec,
		cp:      cc.dopts.cp,
		dc:      cc.dopts.dc,
		tracing: EnableTracing,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
		cs.cbuf = new(bytes.Buffer)
	}
	if cs.tracing {
		cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
		cs.trInfo.firstLine.client = true
		if deadline, ok := ctx.Deadline(); ok {
			cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
		}
		cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
		ctx = trace.NewContext(ctx, cs.trInfo.tr)
	}
	s, err := t.NewStream(ctx, callHdr)
	if err != nil {
		cs.finish(err)
		return nil, toRPCErr(err)
	}
	cs.t = t
	cs.s = s
	cs.p = &parser{r: s}
	// Listen on ctx.Done() to detect cancellation when there is no pending
	// I/O operations on this stream.
	go func() {
		select {
		case <-t.Error():
			// Incur transport error, simply exit.
		case <-s.Context().Done():
			err := s.Context().Err()
			cs.finish(err)
			cs.closeTransportStream(transport.ContextErr(err))
		}
	}()
	return cs, nil
}
Esempio n. 6
0
// Get returns the next addr in the rotation.
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
	var ch chan struct{}
	rr.mu.Lock()
	if rr.done {
		rr.mu.Unlock()
		err = ErrClientConnClosing
		return
	}
	if rr.next >= len(rr.connected) {
		rr.next = 0
	}
	if len(rr.connected) > 0 {
		addr = rr.connected[rr.next]
		rr.next++
		rr.mu.Unlock()
		return
	}
	// There is no address available. Wait on rr.waitCh.
	// TODO(zhaoq): Handle the case when opts.BlockingWait is false.
	if rr.waitCh == nil {
		ch = make(chan struct{})
		rr.waitCh = ch
	} else {
		ch = rr.waitCh
	}
	rr.mu.Unlock()
	for {
		select {
		case <-ctx.Done():
			err = transport.ContextErr(ctx.Err())
			return
		case <-ch:
			rr.mu.Lock()
			if rr.done {
				rr.mu.Unlock()
				err = ErrClientConnClosing
				return
			}
			if len(rr.connected) == 0 {
				// The newly added addr got removed by Down() again.
				if rr.waitCh == nil {
					ch = make(chan struct{})
					rr.waitCh = ch
				} else {
					ch = rr.waitCh
				}
				rr.mu.Unlock()
				continue
			}
			if rr.next >= len(rr.connected) {
				rr.next = 0
			}
			addr = rr.connected[rr.next]
			rr.next++
			rr.mu.Unlock()
			return
		}
	}
}
Esempio n. 7
0
func TestContextErr(t *testing.T) {
	for _, test := range []struct {
		// input
		errIn error
		// outputs
		errOut transport.StreamError
	}{
		{context.DeadlineExceeded, transport.StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)},
		{context.Canceled, transport.StreamErrorf(codes.Canceled, "%v", context.Canceled)},
	} {
		err := transport.ContextErr(test.errIn)
		if err != test.errOut {
			t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut)
		}
	}
}
Esempio n. 8
0
// NewClientStream creates a new Stream for the client side. This is called
// by generated code.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
	var (
		t   transport.ClientTransport
		err error
	)
	t, err = cc.dopts.picker.Pick(ctx)
	if err != nil {
		return nil, toRPCErr(err)
	}
	// TODO(zhaoq): CallOption is omitted. Add support when it is needed.
	callHdr := &transport.CallHdr{
		Host:   cc.authority,
		Method: method,
	}
	cs := &clientStream{
		desc:    desc,
		codec:   cc.dopts.codec,
		tracing: EnableTracing,
	}
	if cs.tracing {
		cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
		cs.trInfo.firstLine.client = true
		if deadline, ok := ctx.Deadline(); ok {
			cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
		}
		cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
		ctx = trace.NewContext(ctx, cs.trInfo.tr)
	}
	s, err := t.NewStream(ctx, callHdr)
	if err != nil {
		return nil, toRPCErr(err)
	}
	cs.t = t
	cs.s = s
	cs.p = &parser{s: s}
	// Listen on ctx.Done() to detect cancellation when there is no pending
	// I/O operations on this stream.
	go func() {
		<-s.Context().Done()
		cs.closeTransportStream(transport.ContextErr(s.Context().Err()))
	}()
	return cs, nil
}
// Get returns the next addr in the rotation.
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
	var ch chan struct{}
	rr.mu.Lock()
	if rr.done {
		rr.mu.Unlock()
		err = ErrClientConnClosing
		return
	}

	if len(rr.addrs) > 0 {
		if rr.next >= len(rr.addrs) {
			rr.next = 0
		}
		next := rr.next
		for {
			a := rr.addrs[next]
			next = (next + 1) % len(rr.addrs)
			if a.connected {
				addr = a.addr
				rr.next = next
				rr.mu.Unlock()
				return
			}
			if next == rr.next {
				// Has iterated all the possible address but none is connected.
				break
			}
		}
	}
	// There is no address available. Wait on rr.waitCh.
	// TODO(zhaoq): Handle the case when opts.BlockingWait is false.
	if rr.waitCh == nil {
		ch = make(chan struct{})
		rr.waitCh = ch
	} else {
		ch = rr.waitCh
	}
	rr.mu.Unlock()
	for {
		select {
		case <-ctx.Done():
			err = transport.ContextErr(ctx.Err())
			return
		case <-ch:
			rr.mu.Lock()
			if rr.done {
				rr.mu.Unlock()
				err = ErrClientConnClosing
				return
			}

			if len(rr.addrs) > 0 {
				if rr.next >= len(rr.addrs) {
					rr.next = 0
				}
				next := rr.next
				for {
					a := rr.addrs[next]
					next = (next + 1) % len(rr.addrs)
					if a.connected {
						addr = a.addr
						rr.next = next
						rr.mu.Unlock()
						return
					}
					if next == rr.next {
						// Has iterated all the possible address but none is connected.
						break
					}
				}
			}
			// The newly added addr got removed by Down() again.
			if rr.waitCh == nil {
				ch = make(chan struct{})
				rr.waitCh = ch
			} else {
				ch = rr.waitCh
			}
			rr.mu.Unlock()
		}
	}
}
Esempio n. 10
0
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
	var (
		t   transport.ClientTransport
		s   *transport.Stream
		put func()
	)
	c := defaultCallInfo
	for _, o := range opts {
		if err := o.before(&c); err != nil {
			return nil, toRPCErr(err)
		}
	}
	callHdr := &transport.CallHdr{
		Host:   cc.authority,
		Method: method,
		Flush:  desc.ServerStreams && desc.ClientStreams,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
	}
	var trInfo traceInfo
	if EnableTracing {
		trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
		trInfo.firstLine.client = true
		if deadline, ok := ctx.Deadline(); ok {
			trInfo.firstLine.deadline = deadline.Sub(time.Now())
		}
		trInfo.tr.LazyLog(&trInfo.firstLine, false)
		ctx = trace.NewContext(ctx, trInfo.tr)
		defer func() {
			if err != nil {
				// Need to call tr.finish() if error is returned.
				// Because tr will not be returned to caller.
				trInfo.tr.LazyPrintf("RPC: [%v]", err)
				trInfo.tr.SetError()
				trInfo.tr.Finish()
			}
		}()
	}
	if stats.On() {
		ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
		begin := &stats.Begin{
			Client:    true,
			BeginTime: time.Now(),
			FailFast:  c.failFast,
		}
		stats.HandleRPC(ctx, begin)
	}
	defer func() {
		if err != nil && stats.On() {
			// Only handle end stats if err != nil.
			end := &stats.End{
				Client: true,
				Error:  err,
			}
			stats.HandleRPC(ctx, end)
		}
	}()
	gopts := BalancerGetOptions{
		BlockingWait: !c.failFast,
	}
	for {
		t, put, err = cc.getTransport(ctx, gopts)
		if err != nil {
			// TODO(zhaoq): Probably revisit the error handling.
			if _, ok := err.(*rpcError); ok {
				return nil, err
			}
			if err == errConnClosing || err == errConnUnavailable {
				if c.failFast {
					return nil, Errorf(codes.Unavailable, "%v", err)
				}
				continue
			}
			// All the other errors are treated as Internal errors.
			return nil, Errorf(codes.Internal, "%v", err)
		}

		s, err = t.NewStream(ctx, callHdr)
		if err != nil {
			if put != nil {
				put()
				put = nil
			}
			if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
				if c.failFast {
					return nil, toRPCErr(err)
				}
				continue
			}
			return nil, toRPCErr(err)
		}
		break
	}
	cs := &clientStream{
		opts:  opts,
		c:     c,
		desc:  desc,
		codec: cc.dopts.codec,
		cp:    cc.dopts.cp,
		dc:    cc.dopts.dc,

		put: put,
		t:   t,
		s:   s,
		p:   &parser{r: s},

		tracing: EnableTracing,
		trInfo:  trInfo,

		statsCtx: ctx,
	}
	if cc.dopts.cp != nil {
		cs.cbuf = new(bytes.Buffer)
	}
	// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
	// when there is no pending I/O operations on this stream.
	go func() {
		select {
		case <-t.Error():
			// Incur transport error, simply exit.
		case <-s.Done():
			// TODO: The trace of the RPC is terminated here when there is no pending
			// I/O, which is probably not the optimal solution.
			if s.StatusCode() == codes.OK {
				cs.finish(nil)
			} else {
				cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
			}
			cs.closeTransportStream(nil)
		case <-s.GoAway():
			cs.finish(errConnDrain)
			cs.closeTransportStream(errConnDrain)
		case <-s.Context().Done():
			err := s.Context().Err()
			cs.finish(err)
			cs.closeTransportStream(transport.ContextErr(err))
		}
	}()
	return cs, nil
}
Esempio n. 11
0
// NewClientStream creates a new Stream for the client side. This is called
// by generated code.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
	var (
		t   transport.ClientTransport
		s   *transport.Stream
		err error
		put func()
	)
	c := defaultCallInfo
	for _, o := range opts {
		if err := o.before(&c); err != nil {
			return nil, toRPCErr(err)
		}
	}
	callHdr := &transport.CallHdr{
		Host:   cc.authority,
		Method: method,
		Flush:  desc.ServerStreams && desc.ClientStreams,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
	}
	cs := &clientStream{
		opts:    opts,
		c:       c,
		desc:    desc,
		codec:   cc.dopts.codec,
		cp:      cc.dopts.cp,
		dc:      cc.dopts.dc,
		tracing: EnableTracing,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
		cs.cbuf = new(bytes.Buffer)
	}
	if cs.tracing {
		cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
		cs.trInfo.firstLine.client = true
		if deadline, ok := ctx.Deadline(); ok {
			cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
		}
		cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
		ctx = trace.NewContext(ctx, cs.trInfo.tr)
	}
	gopts := BalancerGetOptions{
		BlockingWait: !c.failFast,
	}
	for {
		t, put, err = cc.getTransport(ctx, gopts)
		if err != nil {
			// TODO(zhaoq): Probably revisit the error handling.
			if _, ok := err.(*rpcError); ok {
				return nil, err
			}
			if err == errConnClosing {
				if c.failFast {
					return nil, Errorf(codes.Unavailable, "%v", errConnClosing)
				}
				continue
			}
			// All the other errors are treated as Internal errors.
			return nil, Errorf(codes.Internal, "%v", err)
		}

		s, err = t.NewStream(ctx, callHdr)
		if err != nil {
			if put != nil {
				put()
				put = nil
			}
			if _, ok := err.(transport.ConnectionError); ok {
				if c.failFast {
					cs.finish(err)
					return nil, toRPCErr(err)
				}
				continue
			}
			return nil, toRPCErr(err)
		}
		break
	}
	cs.put = put
	cs.t = t
	cs.s = s
	cs.p = &parser{r: s}
	// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
	// when there is no pending I/O operations on this stream.
	go func() {
		select {
		case <-t.Error():
			// Incur transport error, simply exit.
		case <-s.Done():
			// TODO: The trace of the RPC is terminated here when there is no pending
			// I/O, which is probably not the optimal solution.
			if s.StatusCode() == codes.OK {
				cs.finish(nil)
			} else {
				cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
			}
			cs.closeTransportStream(nil)
		case <-s.Context().Done():
			err := s.Context().Err()
			cs.finish(err)
			cs.closeTransportStream(transport.ContextErr(err))
		}
	}()
	return cs, nil
}