Esempio n. 1
0
func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
	if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
		f(level, format, args...)
		return
	}
	logf(fromContext(ctx), level, format, args...)
}
Esempio n. 2
0
// NewClientStream creates a new Stream for the client side. This is called
// by generated code.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
	var (
		t   transport.ClientTransport
		err error
	)
	t, err = cc.dopts.picker.Pick(ctx)
	if err != nil {
		return nil, toRPCErr(err)
	}
	// TODO(zhaoq): CallOption is omitted. Add support when it is needed.
	callHdr := &transport.CallHdr{
		Host:   cc.authority,
		Method: method,
		Flush:  desc.ServerStreams && desc.ClientStreams,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
	}
	cs := &clientStream{
		desc:    desc,
		codec:   cc.dopts.codec,
		cp:      cc.dopts.cp,
		dc:      cc.dopts.dc,
		tracing: EnableTracing,
	}
	if cc.dopts.cp != nil {
		callHdr.SendCompress = cc.dopts.cp.Type()
		cs.cbuf = new(bytes.Buffer)
	}
	if cs.tracing {
		cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
		cs.trInfo.firstLine.client = true
		if deadline, ok := ctx.Deadline(); ok {
			cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
		}
		cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
		ctx = trace.NewContext(ctx, cs.trInfo.tr)
	}
	s, err := t.NewStream(ctx, callHdr)
	if err != nil {
		cs.finish(err)
		return nil, toRPCErr(err)
	}
	cs.t = t
	cs.s = s
	cs.p = &parser{s: s}
	// Listen on ctx.Done() to detect cancellation when there is no pending
	// I/O operations on this stream.
	go func() {
		select {
		case <-t.Error():
			// Incur transport error, simply exit.
		case <-s.Context().Done():
			err := s.Context().Err()
			cs.finish(err)
			cs.closeTransportStream(transport.ContextErr(err))
		}
	}()
	return cs, nil
}
Esempio n. 3
0
// WaitForStateChange blocks until the state changes to something other than the sourceState.
func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
	cc.mu.Lock()
	defer cc.mu.Unlock()
	if sourceState != cc.state {
		return cc.state, nil
	}
	done := make(chan struct{})
	var err error
	go func() {
		select {
		case <-ctx.Done():
			cc.mu.Lock()
			err = ctx.Err()
			cc.stateCV.Broadcast()
			cc.mu.Unlock()
		case <-done:
		}
	}()
	defer close(done)
	for sourceState == cc.state {
		cc.stateCV.Wait()
		if err != nil {
			return cc.state, err
		}
	}
	return cc.state, nil
}
Esempio n. 4
0
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		case cc.state == Ready:
			cc.mu.Unlock()
			return cc.transport, nil
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 5
0
func contextDone(ctx context.Context) bool {
	select {
	case <-ctx.Done():
		return true
	default:
		return false
	}
}
Esempio n. 6
0
func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
	// We avoid appending to any existing call override
	// so we don't risk overwriting a popped stack below.
	var cofs []CallOverrideFunc
	if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
		cofs = append(cofs, uf...)
	}
	cofs = append(cofs, f)
	return netcontext.WithValue(ctx, &callOverrideKey, cofs)
}
Esempio n. 7
0
// wait blocks until it can receive from ctx.Done, closing, or proceed.
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
// If it receives from closing, it returns 0, ErrConnClosing.
// If it receives from proceed, it returns the received integer, nil.
func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) {
	select {
	case <-ctx.Done():
		return 0, ContextErr(ctx.Err())
	case <-closing:
		return 0, ErrConnClosing
	case i := <-proceed:
		return i, nil
	}
}
Esempio n. 8
0
func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
	cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
	if len(cofs) == 0 {
		return nil, nil, false
	}
	// We found a list of overrides; grab the last, and reconstitute a
	// context that will hide it.
	f := cofs[len(cofs)-1]
	ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
	return f, ctx, true
}
Esempio n. 9
0
func (c *ProtoClient) Call(ctx context.Context, method string, req, resp proto.Message) error {
	payload, err := proto.Marshal(req)
	if err != nil {
		return err
	}

	httpReq, err := http.NewRequest("POST", c.endpoint+method, bytes.NewReader(payload))
	if err != nil {
		return err
	}
	httpReq.Header.Set("Content-Type", "application/x-protobuf")
	if ua := c.userAgent; ua != "" {
		httpReq.Header.Set("User-Agent", ua)
	}

	errc := make(chan error, 1)
	cancel := makeReqCancel(httpReq)

	go func() {
		r, err := c.client.Do(httpReq)
		if err != nil {
			errc <- err
			return
		}
		defer r.Body.Close()

		body, err := ioutil.ReadAll(r.Body)
		if r.StatusCode != http.StatusOK {
			err = &ErrHTTP{
				StatusCode: r.StatusCode,
				Body:       body,
				err:        err,
			}
		}
		if err != nil {
			errc <- err
			return
		}
		errc <- proto.Unmarshal(body, resp)
	}()

	select {
	case <-ctx.Done():
		cancel(c.client.Transport) // Cancel the HTTP request.
		return ctx.Err()
	case err := <-errc:
		return err
	}
}
Esempio n. 10
0
func ContextClient(ctx context.Context) (*http.Client, error) {
	if ctx != nil {
		if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
			return hc, nil
		}
	}
	for _, fn := range contextClientFuncs {
		c, err := fn(ctx)
		if err != nil {
			return nil, err
		}
		if c != nil {
			return c, nil
		}
	}
	return http.DefaultClient, nil
}
Esempio n. 11
0
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
	if client == nil {
		client = http.DefaultClient
	}

	// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
	cancel := canceler(client, req)

	type responseAndError struct {
		resp *http.Response
		err  error
	}
	result := make(chan responseAndError, 1)

	go func() {
		resp, err := client.Do(req)
		testHookDoReturned()
		result <- responseAndError{resp, err}
	}()

	var resp *http.Response

	select {
	case <-ctx.Done():
		testHookContextDoneBeforeHeaders()
		cancel()
		// Clean up after the goroutine calling client.Do:
		go func() {
			if r := <-result; r.resp != nil {
				testHookDidBodyClose()
				r.resp.Body.Close()
			}
		}()
		return nil, ctx.Err()
	case r := <-result:
		var err error
		resp, err = r.resp, r.err
		if err != nil {
			return resp, err
		}
	}

	c := make(chan struct{})
	go func() {
		select {
		case <-ctx.Done():
			cancel()
		case <-c:
			// The response's Body is closed.
		}
	}()
	resp.Body = &notifyingReader{resp.Body, c}

	return resp, nil
}
Esempio n. 12
0
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
	if f, ctx, ok := callOverrideFromContext(ctx); ok {
		return f(ctx, service, method, in, out)
	}

	// Handle already-done contexts quickly.
	select {
	case <-ctx.Done():
		return ctx.Err()
	default:
	}

	c := fromContext(ctx)
	if c == nil {
		// Give a good error message rather than a panic lower down.
		return errors.New("not an App Engine context")
	}

	// Apply transaction modifications if we're in a transaction.
	if t := transactionFromContext(ctx); t != nil {
		if t.finished {
			return errors.New("transaction context has expired")
		}
		applyTransaction(in, &t.transaction)
	}

	var opts *appengine_internal.CallOptions
	if d, ok := ctx.Deadline(); ok {
		opts = &appengine_internal.CallOptions{
			Timeout: d.Sub(time.Now()),
		}
	}

	err := c.Call(service, method, in, out, opts)
	switch v := err.(type) {
	case *appengine_internal.APIError:
		return &APIError{
			Service: v.Service,
			Detail:  v.Detail,
			Code:    v.Code,
		}
	case *appengine_internal.CallError:
		return &CallError{
			Detail:  v.Detail,
			Code:    v.Code,
			Timeout: v.Timeout,
		}
	}
	return err
}
Esempio n. 13
0
// Upload starts the process of a resumable upload with a cancellable context.
// It retries using the provided back off strategy until cancelled or the
// strategy indicates to stop retrying.
// It is called from the auto-generated API code and is not visible to the user.
// rx is private to the auto-generated API code.
// Exactly one of resp or err will be nil.  If resp is non-nil, the caller must call resp.Body.Close.
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
	var pause time.Duration
	backoff := rx.Backoff
	if backoff == nil {
		backoff = DefaultBackoffStrategy()
	}

	for {
		// Ensure that we return in the case of cancelled context, even if pause is 0.
		if contextDone(ctx) {
			return nil, ctx.Err()
		}
		select {
		case <-ctx.Done():
			return nil, ctx.Err()
		case <-time.After(pause):
		}

		resp, err = rx.transferChunk(ctx)
		// It's possible for err and resp to both be non-nil here, but we expose a simpler
		// contract to our callers: exactly one of resp and err will be non-nil.  This means
		// that any response body must be closed here before returning a non-nil error.
		if err != nil {
			if resp != nil && resp.Body != nil {
				resp.Body.Close()
			}
			return nil, err
		}
		if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusOK {
			return resp, nil
		}

		if resp.StatusCode == statusResumeIncomplete {
			pause = 0
			backoff.Reset()
		} else {
			var retry bool
			pause, retry = backoff.Pause()
			if !retry {
				// Return the last response with its failing HTTP code.
				return resp, nil
			}
		}
		resp.Body.Close()
	}
}
Esempio n. 14
0
func transactionFromContext(ctx netcontext.Context) *transaction {
	t, _ := ctx.Value(&transactionKey).(*transaction)
	return t
}
Esempio n. 15
0
func fromContext(ctx netcontext.Context) appengine.Context {
	c, _ := ctx.Value(&contextKey).(appengine.Context)
	return c
}
Esempio n. 16
0
// ctxNamespace returns the active namespace for a context.
// It defaults to "" if no namespace was specified.
func ctxNamespace(ctx context.Context) string {
	v, _ := ctx.Value(nsKey{}).(string)
	return v
}
Esempio n. 17
0
// FromContext returns the peer information in ctx if it exists.
func FromContext(ctx context.Context) (p *Peer, ok bool) {
	p, ok = ctx.Value(peerKey{}).(*Peer)
	return
}
Esempio n. 18
0
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
	if f, ctx, ok := callOverrideFromContext(ctx); ok {
		return f(ctx, service, method, in, out)
	}

	// Handle already-done contexts quickly.
	select {
	case <-ctx.Done():
		return ctx.Err()
	default:
	}

	c := fromContext(ctx)
	if c == nil {
		// Give a good error message rather than a panic lower down.
		return errors.New("not an App Engine context")
	}

	// Apply transaction modifications if we're in a transaction.
	if t := transactionFromContext(ctx); t != nil {
		if t.finished {
			return errors.New("transaction context has expired")
		}
		applyTransaction(in, &t.transaction)
	}

	// Default RPC timeout is 60s.
	timeout := 60 * time.Second
	if deadline, ok := ctx.Deadline(); ok {
		timeout = deadline.Sub(time.Now())
	}

	data, err := proto.Marshal(in)
	if err != nil {
		return err
	}

	ticket := c.req.Header.Get(ticketHeader)
	req := &remotepb.Request{
		ServiceName: &service,
		Method:      &method,
		Request:     data,
		RequestId:   &ticket,
	}
	hreqBody, err := proto.Marshal(req)
	if err != nil {
		return err
	}

	hrespBody, err := c.post(hreqBody, timeout)
	if err != nil {
		return err
	}

	res := &remotepb.Response{}
	if err := proto.Unmarshal(hrespBody, res); err != nil {
		return err
	}
	if res.RpcError != nil {
		ce := &CallError{
			Detail: res.RpcError.GetDetail(),
			Code:   *res.RpcError.Code,
		}
		switch remotepb.RpcError_ErrorCode(ce.Code) {
		case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
			ce.Timeout = true
		}
		return ce
	}
	if res.ApplicationError != nil {
		return &APIError{
			Service: *req.ServiceName,
			Detail:  res.ApplicationError.GetDetail(),
			Code:    *res.ApplicationError.Code,
		}
	}
	if res.Exception != nil || res.JavaException != nil {
		// This shouldn't happen, but let's be defensive.
		return &CallError{
			Detail: "service bridge returned exception",
			Code:   int32(remotepb.RpcError_UNKNOWN),
		}
	}
	return proto.Unmarshal(res.Response, out)
}
Esempio n. 19
0
// FromContext returns the Trace bound to the context, if any.
func FromContext(ctx context.Context) (tr Trace, ok bool) {
	tr, ok = ctx.Value(contextKey).(Trace)
	return
}
Esempio n. 20
0
// cc returns the internal *cloudContext (cc) state for a context.Context.
// It panics if the user did it wrong.
func cc(ctx context.Context) *cloudContext {
	if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
		return c
	}
	panic("invalid context.Context type; it should be created with cloud.NewContext")
}
Esempio n. 21
0
func NamespaceFromContext(ctx netcontext.Context) string {
	// If there's no namespace, return the empty string.
	ns, _ := ctx.Value(&namespaceKey).(string)
	return ns
}
Esempio n. 22
0
// Invoke is called by the generated code. It sends the RPC request on the
// wire and returns after response is received.
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
	var c callInfo
	for _, o := range opts {
		if err := o.before(&c); err != nil {
			return toRPCErr(err)
		}
	}
	defer func() {
		for _, o := range opts {
			o.after(&c)
		}
	}()
	if EnableTracing {
		c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
		defer c.traceInfo.tr.Finish()
		c.traceInfo.firstLine.client = true
		if deadline, ok := ctx.Deadline(); ok {
			c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
		}
		c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
		// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
		defer func() {
			if err != nil {
				c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
				c.traceInfo.tr.SetError()
			}
		}()
	}
	topts := &transport.Options{
		Last:  true,
		Delay: false,
	}
	var (
		lastErr error // record the error that happened
	)
	for {
		var (
			err    error
			t      transport.ClientTransport
			stream *transport.Stream
		)
		// TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs.
		if lastErr != nil && c.failFast {
			return toRPCErr(lastErr)
		}
		callHdr := &transport.CallHdr{
			Host:   cc.authority,
			Method: method,
		}
		if cc.dopts.cp != nil {
			callHdr.SendCompress = cc.dopts.cp.Type()
		}
		t, err = cc.dopts.picker.Pick(ctx)
		if err != nil {
			if lastErr != nil {
				// This was a retry; return the error from the last attempt.
				return toRPCErr(lastErr)
			}
			return toRPCErr(err)
		}
		if c.traceInfo.tr != nil {
			c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
		}
		stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
		if err != nil {
			if _, ok := err.(transport.ConnectionError); ok {
				lastErr = err
				continue
			}
			if lastErr != nil {
				return toRPCErr(lastErr)
			}
			return toRPCErr(err)
		}
		// Receive the response
		lastErr = recvResponse(cc.dopts, t, &c, stream, reply)
		if _, ok := lastErr.(transport.ConnectionError); ok {
			continue
		}
		if c.traceInfo.tr != nil {
			c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
		}
		t.CloseStream(stream, lastErr)
		if lastErr != nil {
			return toRPCErr(lastErr)
		}
		return Errorf(stream.StatusCode(), stream.StatusDesc())
	}
}
Esempio n. 23
0
// NewStream creates a stream and register it into the transport as "active"
// streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
	// Record the timeout value on the context.
	var timeout time.Duration
	if dl, ok := ctx.Deadline(); ok {
		timeout = dl.Sub(time.Now())
		if timeout <= 0 {
			return nil, ContextErr(context.DeadlineExceeded)
		}
	}
	pr := &peer.Peer{
		Addr: t.conn.RemoteAddr(),
	}
	// Attach Auth info if there is any.
	if t.authInfo != nil {
		pr.AuthInfo = t.authInfo
	}
	ctx = peer.NewContext(ctx, pr)
	authData := make(map[string]string)
	for _, c := range t.authCreds {
		// Construct URI required to get auth request metadata.
		var port string
		if pos := strings.LastIndex(t.target, ":"); pos != -1 {
			// Omit port if it is the default one.
			if t.target[pos+1:] != "443" {
				port = ":" + t.target[pos+1:]
			}
		}
		pos := strings.LastIndex(callHdr.Method, "/")
		if pos == -1 {
			return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
		}
		audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
		data, err := c.GetRequestMetadata(ctx, audience)
		if err != nil {
			return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err)
		}
		for k, v := range data {
			authData[k] = v
		}
	}
	t.mu.Lock()
	if t.state != reachable {
		t.mu.Unlock()
		return nil, ErrConnClosing
	}
	checkStreamsQuota := t.streamsQuota != nil
	t.mu.Unlock()
	if checkStreamsQuota {
		sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire())
		if err != nil {
			return nil, err
		}
		// Returns the quota balance back.
		if sq > 1 {
			t.streamsQuota.add(sq - 1)
		}
	}
	if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil {
		// t.streamsQuota will be updated when t.CloseStream is invoked.
		return nil, err
	}
	t.mu.Lock()
	if t.state != reachable {
		t.mu.Unlock()
		return nil, ErrConnClosing
	}
	s := t.newStream(ctx, callHdr)
	t.activeStreams[s.id] = s

	// This stream is not counted when applySetings(...) initialize t.streamsQuota.
	// Reset t.streamsQuota to the right value.
	var reset bool
	if !checkStreamsQuota && t.streamsQuota != nil {
		reset = true
	}
	t.mu.Unlock()
	if reset {
		t.streamsQuota.reset(-1)
	}

	// HPACK encodes various headers. Note that once WriteField(...) is
	// called, the corresponding headers/continuation frame has to be sent
	// because hpack.Encoder is stateful.
	t.hBuf.Reset()
	t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
	t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
	t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
	t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
	t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
	t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
	t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})

	if callHdr.SendCompress != "" {
		t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
	}
	if timeout > 0 {
		t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)})
	}
	for k, v := range authData {
		t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
	}
	var (
		hasMD      bool
		endHeaders bool
	)
	if md, ok := metadata.FromContext(ctx); ok {
		hasMD = true
		for k, v := range md {
			for _, entry := range v {
				t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
			}
		}
	}
	first := true
	// Sends the headers in a single batch even when they span multiple frames.
	for !endHeaders {
		size := t.hBuf.Len()
		if size > http2MaxFrameLen {
			size = http2MaxFrameLen
		} else {
			endHeaders = true
		}
		var flush bool
		if endHeaders && (hasMD || callHdr.Flush) {
			flush = true
		}
		if first {
			// Sends a HeadersFrame to server to start a new stream.
			p := http2.HeadersFrameParam{
				StreamID:      s.id,
				BlockFragment: t.hBuf.Next(size),
				EndStream:     false,
				EndHeaders:    endHeaders,
			}
			// Do a force flush for the buffered frames iff it is the last headers frame
			// and there is header metadata to be sent. Otherwise, there is flushing until
			// the corresponding data frame is written.
			err = t.framer.writeHeaders(flush, p)
			first = false
		} else {
			// Sends Continuation frames for the leftover headers.
			err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
		}
		if err != nil {
			t.notifyError(err)
			return nil, ConnectionErrorf("transport: %v", err)
		}
	}
	t.writableChan <- 0
	return s, nil
}
Esempio n. 24
0
// FullyQualifiedAppID returns the fully-qualified application ID.
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
// or a domain prefix (e.g. "example.com:").
func FullyQualifiedAppID(ctx netcontext.Context) string {
	if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
		return id
	}
	return fullyQualifiedAppID(ctx)
}
Esempio n. 25
0
// FromContext returns the MD in ctx if it exists.
func FromContext(ctx context.Context) (md MD, ok bool) {
	md, ok = ctx.Value(mdKey{}).(MD)
	return
}
Esempio n. 26
0
func fromContext(ctx netcontext.Context) *context {
	c, _ := ctx.Value(&contextKey).(*context)
	return c
}
Esempio n. 27
0
// StreamFromContext returns the stream saved in ctx.
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
	s, ok = ctx.Value(streamKey).(*Stream)
	return
}