// When wait returns, either the new transport is up or ClientConn is // closing. Used to avoid working on a dying transport. It updates and // returns the transport and its version when there is no error. func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { for { cc.mu.Lock() switch { case cc.state == Shutdown: cc.mu.Unlock() return nil, 0, ErrClientConnClosing case ts < cc.transportSeq: // Worked on a dying transport. Try the new one immediately. defer cc.mu.Unlock() return cc.transport, cc.transportSeq, nil default: ready := cc.ready if ready == nil { ready = make(chan struct{}) cc.ready = ready } cc.mu.Unlock() select { case <-ctx.Done(): return nil, 0, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } } } }
func TestContextErr(t *testing.T) { for _, test := range []struct { // input errIn error // outputs errOut transport.StreamError }{ {context.DeadlineExceeded, transport.StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)}, {context.Canceled, transport.StreamErrorf(codes.Canceled, "%v", context.Canceled)}, } { err := transport.ContextErr(test.errIn) if err != test.errOut { t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) } } }