// WithDeadline is a clock library implementation of context.WithDeadline that // uses the clock library's time features instead of the Go time library. // // For more information, see context.WithDeadline. func WithDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { // The current deadline is already sooner than the new one. return context.WithCancel(parent) } parent, cancelFunc := context.WithCancel(parent) c := &clockContext{ Context: parent, deadline: deadline, } d := deadline.Sub(Now(c)) if d <= 0 { // Deadline has already passed. c.setError(context.DeadlineExceeded) cancelFunc() return c, cancelFunc } // Invoke our cancelFunc after the specified time. go func() { select { case <-c.Done(): break case <-After(c, d): c.setError(context.DeadlineExceeded) cancelFunc() } }() return c, cancelFunc }
// dialContext connects to the address on the named network. func dialContext(ctx context.Context, network, address string) (net.Conn, error) { var dialer net.Dialer if deadline, ok := ctx.Deadline(); ok { dialer.Timeout = deadline.Sub(time.Now()) } return dialer.Dial(network, address) }
// PromoteSlaveWhenCaughtUp waits for this slave to be caught up on // replication up to the provided point, and then makes the slave the // shard master. func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position string) (string, error) { pos, err := replication.DecodePosition(position) if err != nil { return "", err } // TODO(alainjobart) change the flavor API to take the context directly // For now, extract the timeout from the context, or wait forever var waitTimeout time.Duration if deadline, ok := ctx.Deadline(); ok { waitTimeout = deadline.Sub(time.Now()) if waitTimeout <= 0 { waitTimeout = time.Millisecond } } if err := agent.MysqlDaemon.WaitMasterPos(pos, waitTimeout); err != nil { return "", err } pos, err = agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return "", err } if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap); err != nil { return "", err } return replication.EncodePosition(pos), nil }
// MountLabel performs a mount with the label and target being absolute paths func (t *BaseOperations) MountLabel(ctx context.Context, label, target string) error { defer trace.End(trace.Begin(fmt.Sprintf("Mounting %s on %s", label, target))) if err := os.MkdirAll(target, 0600); err != nil { return fmt.Errorf("unable to create mount point %s: %s", target, err) } // convert the label to a filesystem path label = "/dev/disk/by-label/" + label // do..while ! timedout var timeout bool for timeout = false; !timeout; { _, err := os.Stat(label) if err == nil || !os.IsNotExist(err) { break } deadline, ok := ctx.Deadline() timeout = ok && time.Now().After(deadline) } if timeout { detail := fmt.Sprintf("timed out waiting for %s to appear", label) return errors.New(detail) } if err := syscall.Mount(label, target, "ext4", syscall.MS_NOATIME, ""); err != nil { detail := fmt.Sprintf("mounting %s on %s failed: %s", label, target, err) return errors.New(detail) } return nil }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, cp: cc.dopts.cp, dc: cc.dopts.dc, tracing: EnableTracing, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() cs.cbuf = new(bytes.Buffer) } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{r: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }
// Begin begins a transaction, and returns the associated transaction id. // Subsequent statements can access the connection through the transaction id. func (axp *TxPool) Begin(ctx context.Context) int64 { poolCtx := ctx if deadline, ok := ctx.Deadline(); ok { var cancel func() poolCtx, cancel = context.WithDeadline(ctx, deadline.Add(-10*time.Millisecond)) defer cancel() } conn, err := axp.pool.Get(poolCtx) if err != nil { switch err { case ErrConnPoolClosed: panic(err) case pools.ErrTimeout: axp.LogActive() panic(NewTabletError(ErrTxPoolFull, vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded")) } panic(NewTabletErrorSQL(ErrFatal, vtrpcpb.ErrorCode_INTERNAL_ERROR, err)) } if _, err := conn.Exec(ctx, "begin", 1, false); err != nil { conn.Recycle() panic(NewTabletErrorSQL(ErrFail, vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)) } transactionID := axp.lastID.Add(1) axp.activePool.Register( transactionID, newTxConnection( conn, transactionID, axp, callerid.ImmediateCallerIDFromContext(ctx), callerid.EffectiveCallerIDFromContext(ctx), ), ) return transactionID }
// WaitMasterPos implements MysqlFlavor.WaitMasterPos(). // // Note: Unlike MASTER_POS_WAIT(), MASTER_GTID_WAIT() will continue waiting even // if the slave thread stops. If that is a problem, we'll have to change this. func (*mariaDB10) WaitMasterPos(ctx context.Context, mysqld *Mysqld, targetPos replication.Position) error { var query string if deadline, ok := ctx.Deadline(); ok { timeout := deadline.Sub(time.Now()) if timeout <= 0 { return fmt.Errorf("timed out waiting for position %v", targetPos) } query = fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s', %.6f)", targetPos, timeout.Seconds()) } else { // Omit the timeout to wait indefinitely. In MariaDB, a timeout of 0 means // return immediately. query = fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s')", targetPos) } log.Infof("Waiting for minimum replication position with query: %v", query) qr, err := mysqld.FetchSuperQuery(ctx, query) if err != nil { return fmt.Errorf("MASTER_GTID_WAIT() failed: %v", err) } if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { return fmt.Errorf("unexpected result format from MASTER_GTID_WAIT(): %#v", qr) } result := qr.Rows[0][0].String() if result == "-1" { return fmt.Errorf("timed out waiting for position %v", targetPos) } return nil }
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { if f, ok := ctx.Value(&callOverrideKey).(callOverrideFunc); ok { return f(ctx, service, method, in, out) } c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. return errors.New("not an App Engine context") } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { return errors.New("transaction context has expired") } applyTransaction(in, &t.transaction) } var opts *appengine_internal.CallOptions if d, ok := ctx.Deadline(); ok { opts = &appengine_internal.CallOptions{ Timeout: d.Sub(time.Now()), } } return c.Call(service, method, in, out, opts) }
// rpcCallTablet wil execute the RPC on the remote server. func (client *GoRPCTabletManagerClient) rpcCallTablet(ctx context.Context, tablet *topo.TabletInfo, name string, args, reply interface{}) error { // create the RPC client, using ctx.Deadline if set, or no timeout. var connectTimeout time.Duration deadline, ok := ctx.Deadline() if ok { connectTimeout = deadline.Sub(time.Now()) if connectTimeout < 0 { return timeoutError{fmt.Errorf("timeout connecting to TabletManager.%v on %v", name, tablet.Alias)} } } rpcClient, err := bsonrpc.DialHTTP("tcp", tablet.Addr(), connectTimeout) if err != nil { return fmt.Errorf("RPC error for %v: %v", tablet.Alias, err.Error()) } defer rpcClient.Close() // use the context Done() channel. Will handle context timeout. call := rpcClient.Go(ctx, "TabletManager."+name, args, reply, nil) select { case <-ctx.Done(): if ctx.Err() == context.DeadlineExceeded { return timeoutError{fmt.Errorf("timeout waiting for TabletManager.%v to %v", name, tablet.Alias)} } return fmt.Errorf("interrupted waiting for TabletManager.%v to %v", name, tablet.Alias) case <-call.Done: if call.Error != nil { return fmt.Errorf("remote error for %v: %v", tablet.Alias, call.Error.Error()) } return nil } }
func timeout(ctx context.Context) (time.Duration, error) { deadline, ok := ctx.Deadline() if !ok { // no deadline set return RPC_TIMEOUT, nil } if now := time.Now(); now.Before(deadline) { d := deadline.Sub(now) if d > RPC_TIMEOUT { // deadline is too far out, use our built-in return RPC_TIMEOUT, nil } return d, nil } // deadline has expired.. select { case <-ctx.Done(): return 0, ctx.Err() default: // this should never happen because Done() should be closed // according to the contract of context. but we have this here // just in case. return 0, context.DeadlineExceeded } }
// Chat implementation of the the Chat bidi streaming RPC function func (p *PeerImpl) handleChat(ctx context.Context, stream ChatStream, initiatedStream bool) error { deadline, ok := ctx.Deadline() peerLogger.Debugf("Current context deadline = %s, ok = %v", deadline, ok) handler, err := p.handlerFactory(p, stream, initiatedStream, nil) if err != nil { return fmt.Errorf("Error creating handler during handleChat initiation: %s", err) } defer handler.Stop() for { in, err := stream.Recv() if err == io.EOF { peerLogger.Debug("Received EOF, ending Chat") return nil } if err != nil { e := fmt.Errorf("Error during Chat, stopping handler: %s", err) peerLogger.Error(e.Error()) return e } err = handler.HandleMessage(in) if err != nil { peerLogger.Errorf("Error handling message: %s", err) //return err } } }
func (h *testHandler) Handle(ctx context.Context, args *raw.Args) (*raw.Res, error) { h.mut.Lock() h.format = args.Format h.caller = args.Caller h.mut.Unlock() assert.Equal(h.t, args.Caller, CurrentCall(ctx).CallerName()) switch args.Operation { case "timeout": deadline, _ := ctx.Deadline() time.Sleep(deadline.Add(time.Second * 1).Sub(time.Now())) h.t.FailNow() case "echo": return &raw.Res{ Arg2: args.Arg2, Arg3: args.Arg3, }, nil case "busy": return &raw.Res{ SystemErr: ErrServerBusy, }, nil case "app-error": return &raw.Res{ IsErr: true, }, nil } return nil, errors.New("unknown operation") }
// WaitN blocks until lim permits n events to happen. // It returns an error if n exceeds the Limiter's burst size, the Context is // canceled, or the expected wait time exceeds the Context's Deadline. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { if n > lim.burst { return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) } // Check if ctx is already cancelled select { case <-ctx.Done(): return ctx.Err() default: } // Determine wait limit now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { waitLimit = deadline.Sub(now) } // Reserve r := lim.reserveN(now, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait t := time.NewTimer(r.DelayFrom(now)) defer t.Stop() select { case <-t.C: // We can proceed. return nil case <-ctx.Done(): // Context was canceled before we could proceed. Cancel the // reservation, which may permit other events to proceed sooner. r.Cancel() return ctx.Err() } }
// Begin begins a transaction, and returns the associated transaction id. // Subsequent statements can access the connection through the transaction id. func (axp *TxPool) Begin(ctx context.Context) int64 { poolCtx := ctx if deadline, ok := ctx.Deadline(); ok { var cancel func() poolCtx, cancel = context.WithDeadline(ctx, deadline.Add(-10*time.Millisecond)) defer cancel() } conn, err := axp.pool.Get(poolCtx) if err != nil { switch err { case ErrConnPoolClosed: panic(err) case pools.ErrTimeout: axp.LogActive() panic(NewTabletError(ErrTxPoolFull, "Transaction pool connection limit exceeded")) } panic(NewTabletErrorSql(ErrFatal, err)) } if _, err := conn.Exec(ctx, "begin", 1, false); err != nil { conn.Recycle() panic(NewTabletErrorSql(ErrFail, err)) } transactionID := axp.lastID.Add(1) axp.activePool.Register(transactionID, newTxConnection(conn, transactionID, axp)) return transactionID }
// PromoteSlaveWhenCaughtUp waits for this slave to be caught up on // replication up to the provided point, and then makes the slave the // shard master. func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, pos myproto.ReplicationPosition) (myproto.ReplicationPosition, error) { tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return myproto.ReplicationPosition{}, err } // TODO(alainjobart) change the flavor API to take the context directly // For now, extract the timeout from the context, or wait forever var waitTimeout time.Duration if deadline, ok := ctx.Deadline(); ok { waitTimeout = deadline.Sub(time.Now()) if waitTimeout <= 0 { waitTimeout = time.Millisecond } } if err := agent.MysqlDaemon.WaitMasterPos(pos, waitTimeout); err != nil { return myproto.ReplicationPosition{}, err } rp, err := agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return myproto.ReplicationPosition{}, err } if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return myproto.ReplicationPosition{}, err } return rp, agent.updateReplicationGraphForPromotedSlave(ctx, tablet) }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) } t, err := cc.wait(ctx) if err != nil { return nil, toRPCErr(err) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} return cs, nil }
// ReceivePacket listens for incoming OSC packets and returns the packet and // client address if one is received. func (s *Server) ReceivePacket(ctx context.Context, c net.PacketConn) (Packet, net.Addr, error) { if deadline, ok := ctx.Deadline(); ok { if err := c.SetReadDeadline(deadline); err != nil { return nil, nil, err } } go func() { select { // case <-time.After(200 * time.Millisecond): // log.Println("Overslept.") case <-ctx.Done(): log.Println(ctx.Err()) } }() data := make([]byte, 65535) var n, start int n, addr, err := c.ReadFrom(data) if err != nil { return nil, nil, err } pkt, err := readPacket(bufio.NewReader(bytes.NewBuffer(data)), &start, n) return pkt, addr, err }
func timeout(ctx context.Context, call *tchannel.InboundCall) { deadline, _ := ctx.Deadline() log.Infof("Client requested timeout in %dms", int(deadline.Sub(time.Now()).Seconds()*1000)) pastDeadline := deadline.Add(time.Second * 2) time.Sleep(pastDeadline.Sub(time.Now())) echo(ctx, call) }
func (netDialer) Dial(n, addr string, ctx context.Context) (net.Conn, error) { deadline, _ := ctx.Deadline() d := net.Dialer{ Deadline: deadline, } return d.Dial(n, addr) }
func getTimeout(ctx context.Context) time.Duration { deadline, ok := ctx.Deadline() if !ok { return DefaultConnectTimeout } return deadline.Sub(time.Now()) }
func readFromTwitter(ctx context.Context, votes chan<- string) { options, err := loadOptions() if err != nil { log.Println("選択肢の読み込みに失敗しました:", err) return } query := make(url.Values) query.Set("track", strings.Join(options, ",")) req, err := makeRequest(query) if err != nil { log.Println("検索のリクエストの作成に失敗しました:", err) return } client := &http.Client{} if deadline, ok := ctx.Deadline(); ok { client.Timeout = deadline.Sub(time.Now()) } resp, err := client.Do(req) if err != nil { log.Println("検索のリクエストに失敗しました:", err) return } done := make(chan struct{}) defer func() { <-done }() defer resp.Body.Close() go func() { defer close(done) log.Println("resp:", resp.StatusCode) if resp.StatusCode != 200 { var buf bytes.Buffer io.Copy(&buf, resp.Body) log.Println("resp body: %s", buf.String()) return } decoder := json.NewDecoder(resp.Body) for { var tweet tweet if err := decoder.Decode(&tweet); err != nil { break } log.Println("tweet:", tweet) for _, option := range options { if strings.Contains(strings.ToLower(tweet.Text), strings.ToLower(option)) { log.Println("投票:", option) votes <- option } } } }() select { case <-ctx.Done(): case <-done: } }
func (g *GraphiteExporter) Send(ctx context.Context, r metrics.Registry) error { d := net.Dialer{ DualStack: true, Cancel: ctx.Done(), } conn, err := d.Dial("tcp", g.addr) if err != nil { return err } defer conn.Close() if deadline, ok := ctx.Deadline(); ok { conn.SetWriteDeadline(deadline) } w := bufio.NewWriter(conn) now := time.Now().Unix() r.Each(func(name string, value interface{}) { switch metric := value.(type) { case metrics.Counter: fmt.Fprintf(w, "%s.%s %d %d\n", g.prefix, name, metric.Count(), now) case metrics.Gauge: fmt.Fprintf(w, "%s.%s %d %d\n", g.prefix, name, metric.Value(), now) case metrics.Meter: m := metric.Snapshot() fmt.Fprintf(w, "%s.%s.count %d %d\n", g.prefix, name, m.Count(), now) fmt.Fprintf(w, "%s.%s.rate1m %.2f %d\n", g.prefix, name, m.Rate1(), now) fmt.Fprintf(w, "%s.%s.rat5m %.2f %d\n", g.prefix, name, m.Rate5(), now) fmt.Fprintf(w, "%s.%s.rate15m %.2f %d\n", g.prefix, name, m.Rate15(), now) fmt.Fprintf(w, "%s.%s.ratemean %.2f %d\n", g.prefix, name, m.RateMean(), now) case metrics.Timer: t := metric.Snapshot() ps := t.Percentiles(g.percentiles) fmt.Fprintf(w, "%s.%s.count %d %d\n", g.prefix, name, t.Count(), now) fmt.Fprintf(w, "%s.%s.min_%s %d %d\n", g.prefix, name, g.duStr, t.Min()/int64(g.du), now) fmt.Fprintf(w, "%s.%s.max_%s %d %d\n", g.prefix, name, g.duStr, t.Max()/int64(g.du), now) fmt.Fprintf(w, "%s.%s.mean_%s %.2f %d\n", g.prefix, name, g.duStr, t.Mean()/float64(g.du), now) // fmt.Fprintf(w, "%s.%s.std-dev_%s %.2f %d\n", g.prefix, name, g.duStr, t.StdDev()/float64(g.du), now) for psIdx, psKey := range g.percentiles { key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) fmt.Fprintf(w, "%s.%s.%s_%s %.2f %d\n", g.prefix, name, key, g.duStr, ps[psIdx]/float64(g.du), now) } fmt.Fprintf(w, "%s.%s.rate1m %.2f %d\n", g.prefix, name, t.Rate1(), now) fmt.Fprintf(w, "%s.%s.rate5m %.2f %d\n", g.prefix, name, t.Rate5(), now) fmt.Fprintf(w, "%s.%s.rate15m %.2f %d\n", g.prefix, name, t.Rate15(), now) fmt.Fprintf(w, "%s.%s.ratemean %.2f %d\n", g.prefix, name, t.RateMean(), now) case metrics.Healthcheck: // pass default: apexctx.GetLogger(ctx).Warnf("Graphite: skip metric `%s` of unknown type %T", name, value) } w.Flush() }) return nil }
// Start is part of the MysqlctlClient interface. func (c *goRPCMysqlctlClient) Start(ctx context.Context) error { var timeout time.Duration if deadline, ok := ctx.Deadline(); ok { timeout = deadline.Sub(time.Now()) if timeout <= 0 { return fmt.Errorf("deadline exceeded") } } return c.rpcClient.Call(ctx, "MysqlctlServer.Start", &timeout, &rpc.Unused{}) }
// AEContext retrieves the raw "google.golang.org/appengine" compatible Context. // // It also transfers deadline of `c` to AE context, since deadline is used for // RPCs. Doesn't transfer cancelation ability though (since it's ignored by GAE // anyway). func AEContext(c context.Context) context.Context { aeCtx, _ := c.Value(prodContextKey).(context.Context) if aeCtx == nil { return nil } if deadline, ok := c.Deadline(); ok { aeCtx, _ = context.WithDeadline(aeCtx, deadline) } return aeCtx }
func TestHandlerGetContext(t *testing.T) { var c context.Context var err *Error h, _ := NewHandler(resource.NewIndex()) w := newRecorder() defer w.Close() c, err = h.getContext(w, &http.Request{URL: &url.URL{}}) assert.Nil(t, err) _, ok := c.Deadline() assert.False(t, ok) }
func (h handler) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) { // Leave other go routines a chance to run time.Sleep(time.Nanosecond) value, _ := fromContext(ctx) if _, ok := ctx.Deadline(); ok { value += " with deadline" } if ctx.Err() == context.Canceled { value += " canceled" } w.Write([]byte(value)) }
func (h handler) ServeHTTPContext(ctx context.Context, w http.ResponseWriter, r *http.Request) error { time.Sleep(time.Millisecond) // wait for other goroutines val, _ := fromContext(ctx) if _, ok := ctx.Deadline(); ok { val += " with deadline" } if ctx.Err() == context.Canceled { val += " canceled" } _, err := w.Write([]byte(val)) return err }
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { if f, ctx, ok := callOverrideFromContext(ctx); ok { return f(ctx, service, method, in, out) } // Handle already-done contexts quickly. select { case <-ctx.Done(): return ctx.Err() default: } c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. return errors.New("not an App Engine context") } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { return errors.New("transaction context has expired") } applyTransaction(in, &t.transaction) } var opts *appengine_internal.CallOptions if d, ok := ctx.Deadline(); ok { opts = &appengine_internal.CallOptions{ Timeout: d.Sub(time.Now()), } } err := c.Call(service, method, in, out, opts) switch v := err.(type) { case *appengine_internal.APIError: return &APIError{ Service: v.Service, Detail: v.Detail, Code: v.Code, } case *appengine_internal.CallError: return &CallError{ Detail: v.Detail, Code: v.Code, Timeout: v.Timeout, } } return err }
// AEContextNoTxn retrieves the raw "google.golang.org/appengine" compatible // Context that's not part of a transaction. func AEContextNoTxn(c context.Context) context.Context { aeCtx, _ := c.Value(prodContextNoTxnKey).(context.Context) if aeCtx == nil { return nil } aeCtx, err := appengine.Namespace(aeCtx, info.Get(c).GetNamespace()) if err != nil { panic(err) } if deadline, ok := c.Deadline(); ok { aeCtx, _ = context.WithDeadline(aeCtx, deadline) } return aeCtx }
func validateCall(ctx context.Context, serviceName, methodName string, callOpts *CallOptions) error { if serviceName == "" { return ErrNoServiceName } if len(methodName) > maxMethodSize { return ErrMethodTooLarge } if _, ok := ctx.Deadline(); !ok { return ErrTimeoutRequired } return nil }