// exchange sends a query on the connection and hopes for a response. func exchange(ctx context.Context, server, name string, qtype uint16) (*dnsMsg, error) { d := testHookDNSDialer() out := dnsMsg{ dnsMsgHdr: dnsMsgHdr{ recursion_desired: true, }, question: []dnsQuestion{ {name, qtype, dnsClassINET}, }, } for _, network := range []string{"udp", "tcp"} { c, err := d.dialDNS(ctx, network, server) if err != nil { return nil, err } defer c.Close() if d, ok := ctx.Deadline(); ok && !d.IsZero() { c.SetDeadline(d) } out.id = uint16(rand.Int()) ^ uint16(time.Now().UnixNano()) in, err := c.dnsRoundTrip(&out) if err != nil { return nil, mapErr(err) } if in.truncated { // see RFC 5966 continue } return in, nil } return nil, errors.New("no answer from DNS server") }
// Ping will check to see if the server is up with an optional timeout on waiting for leader. // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. func (c *HTTPClient) Ping(ctx context.Context) (time.Duration, string, error) { now := time.Now() u := c.url() u.Path = "ping" if ctx != nil { if dl, ok := ctx.Deadline(); ok { v := url.Values{} v.Set("wait_for_leader", fmt.Sprintf("%.0fs", time.Now().Sub(dl).Seconds())) u.RawQuery = v.Encode() } } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return 0, "", err } if ctx != nil { req = req.WithContext(ctx) } resp, err := c.do(req, nil, http.StatusNoContent) if err != nil { return 0, "", err } version := resp.Header.Get("X-Influxdb-Version") return time.Since(now), version, nil }
func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) error { deadline := time.Now().Add(sendMessageTimeout) if dl, ok := ctx.Deadline(); ok { deadline = dl } if err := s.SetWriteDeadline(deadline); err != nil { log.Warningf("error setting deadline: %s", err) } switch s.Protocol() { case ProtocolBitswap: if err := msg.ToNetV1(s); err != nil { log.Debugf("error: %s", err) return err } case ProtocolBitswapOne, ProtocolBitswapNoVers: if err := msg.ToNetV0(s); err != nil { log.Debugf("error: %s", err) return err } default: return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } if err := s.SetWriteDeadline(time.Time{}); err != nil { log.Warningf("error resetting deadline: %s", err) } return nil }
// MountLabel performs a mount with the label and target being absolute paths func (t *BaseOperations) MountLabel(ctx context.Context, label, target string) error { defer trace.End(trace.Begin(fmt.Sprintf("Mounting %s on %s", label, target))) if err := os.MkdirAll(target, 0600); err != nil { return fmt.Errorf("unable to create mount point %s: %s", target, err) } // convert the label to a filesystem path label = "/dev/disk/by-label/" + label // do..while ! timedout var timeout bool for timeout = false; !timeout; { _, err := os.Stat(label) if err == nil || !os.IsNotExist(err) { break } deadline, ok := ctx.Deadline() timeout = ok && time.Now().After(deadline) } if timeout { detail := fmt.Sprintf("timed out waiting for %s to appear", label) return errors.New(detail) } if err := Sys.Syscall.Mount(label, target, "ext4", syscall.MS_NOATIME, ""); err != nil { detail := fmt.Sprintf("mounting %s on %s failed: %s", label, target, err) return errors.New(detail) } return nil }
// Connect to a Venue console. func (v *VNC) Connect(ctx context.Context) error { if v.conn != nil { return fmt.Errorf("already connected") } // TODO(kward:20161122) Add check for a reasonably sufficient deadline. deadline, ok := ctx.Deadline() if !ok { return fmt.Errorf("context missing deadline") } log.Println("Connecting to VENUE VNC server...") addr := fmt.Sprintf("%s:%d", v.opts.host, v.opts.port) nc, err := net.DialTimeout("tcp", addr, time.Since(deadline)) if err != nil { return err } log.Println("Establishing session...") v.cfg = vnclib.NewClientConfig(v.opts.passwd) conn, err := vnclib.Connect(ctx, nc, v.cfg) if err != nil { return err } v.conn = conn // Initialize a framebuffer for updates. v.fb = NewFramebuffer(int(v.conn.FramebufferWidth()), int(v.conn.FramebufferHeight())) // Setup channel to listen to server messages. v.cfg.ServerMessageCh = make(chan vnclib.ServerMessage) return nil }
// WaitForTimeoutRaw waits after the context deadline then returns the context // error. yarpc should interpret this as an handler timeout, which in turns // should be forwarded to the yarpc client as a remote handler timeout. func WaitForTimeoutRaw(ctx context.Context, reqMeta yarpc.ReqMeta, body []byte) ([]byte, yarpc.ResMeta, error) { if _, ok := ctx.Deadline(); !ok { return nil, nil, fmt.Errorf("no deadline set in context") } select { case <-ctx.Done(): return nil, nil, ctx.Err() } }
// deadline returns the earliest of: // - now+Timeout // - d.Deadline // - the context's deadline // Or zero, if none of Timeout, Deadline, or context's deadline is set. func (d *Dialer) deadline(ctx context.Context, now time.Time) (earliest time.Time) { if d.Timeout != 0 { // including negative, for historical reasons earliest = now.Add(d.Timeout) } if d, ok := ctx.Deadline(); ok { earliest = minNonzeroTime(earliest, d) } return minNonzeroTime(earliest, d.Deadline) }
func (h handler) callHandler(ctx context.Context, call inboundCall, start time.Time) error { _, ok := ctx.Deadline() if !ok { return tchannel.ErrTimeoutRequired } treq := &transport.Request{ Caller: call.CallerName(), Service: call.ServiceName(), Encoding: transport.Encoding(call.Format()), Procedure: call.MethodString(), } ctx, headers, err := readRequestHeaders(ctx, call.Format(), call.Arg2Reader) if err != nil { return encoding.RequestHeadersDecodeError(treq, err) } treq.Headers = headers if tcall, ok := call.(tchannelCall); ok { tracer := h.deps.Tracer() ctx = tchannel.ExtractInboundSpan(ctx, tcall.InboundCall, headers.Items(), tracer) } body, err := call.Arg3Reader() if err != nil { return err } defer body.Close() treq.Body = body rw := newResponseWriter(treq, call) defer rw.Close() // TODO(abg): log if this errors treq, err = request.Validate(ctx, treq) if err != nil { return err } spec, err := h.Registry.GetHandlerSpec(treq.Service, treq.Procedure) if err != nil { return err } switch spec.Type() { case transport.Unary: treq, err = request.ValidateUnary(ctx, treq) if err == nil { err = internal.SafelyCallUnaryHandler(ctx, spec.Unary(), start, treq, rw) } default: err = errors.UnsupportedTypeError{Transport: "TChannel", Type: string(spec.Type())} } return err }
func (s *Session) handleTread(cx context.Context, msg styxproto.Tread, file file) bool { var n int var err error if file.rwc == nil { s.conn.clearTag(msg.Tag()) s.conn.Rerror(msg.Tag(), "file %s is not open for reading", file.name) s.conn.Flush() return true } go func() { // TODO(droyo) allocations could hurt here, come up with a better // way to do this (after measuring the impact, of course). The tricky bit // here is inherent to the 9P protocol; rather than using sentinel values, // each message is prefixed with its length. While this is generally a Good // Thing, this means we can't write directly to the connection, because // we don't know how much we are going to write until it's too late. buf := make([]byte, int(msg.Count())) if t, ok := cx.Deadline(); ok { styxfile.SetDeadline(file.rwc, t) } done := make(chan struct{}) go func() { n, err = file.rwc.ReadAt(buf, msg.Offset()) close(done) }() select { case <-cx.Done(): // NOTE(droyo) deciding what to do here is somewhat // difficult. Many (but not all) Read/Write calls in Go can // be interrupted by calling Close. Obviously, calling Close // on a file will disrupt any current and future reads on the // same fid. However, that is preferrable to leaking goroutines. file.rwc.Close() s.conn.clearTag(msg.Tag()) return case <-done: } s.conn.clearTag(msg.Tag()) if n > 0 { s.conn.Rread(msg.Tag(), buf[:n]) } else if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { s.conn.Rerror(msg.Tag(), "%v", err) } else { s.conn.Rread(msg.Tag(), buf[:n]) } s.conn.Flush() }() return true }
// ValidateUnary validates a unary request. This should be used after a successful v.Validate() func (v *Validator) ValidateUnary(ctx context.Context) (*transport.Request, error) { if v.errTTL != nil { return nil, v.errTTL } _, hasDeadline := ctx.Deadline() if !hasDeadline { return nil, missingParametersError{Parameters: []string{"TTL"}} } return v.Request, nil }
func backchannel(ctx context.Context, conn net.Conn) error { defer trace.End(trace.Begin("establish tether backchannel")) // HACK: currently RawConn dosn't implement timeout so throttle the spinning // it does implement the Timeout methods so the intermediary code can be written // to support it, but they are stub implementation in rawconn impl. // This needs to tick *faster* than the ticker in connection.go on the // portlayer side. The PL sends the first syn and if this isn't waiting, // alignment will take a few rounds (or it may never happen). ticker := time.NewTicker(10 * time.Millisecond) defer ticker.Stop() // We run this in a separate goroutine because HandshakeServer // calls a Read on rawconn which is a blocking call which causes // the caller to block as well so this is the only way to cancel. // Calling Close() will unblock us and on the next tick we will // return ctx.Err() go func() { select { case <-ctx.Done(): conn.Close() } }() for { select { case <-ticker.C: if ctx.Err() != nil { return ctx.Err() } deadline, ok := ctx.Deadline() if ok { conn.SetReadDeadline(deadline) } err := serial.HandshakeServer(conn) if err == nil { conn.SetReadDeadline(time.Time{}) return nil } switch et := err.(type) { case *serial.HandshakeError: log.Debugf("HandshakeServer: %v", et) default: log.Errorf("HandshakeServer: %v", err) } } } }
// Do a lookup for a single name, which must be rooted // (otherwise answer will not find the answers). func tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype uint16) (string, []dnsRR, error) { if len(cfg.servers) == 0 { return "", nil, &DNSError{Err: "no DNS servers", Name: name} } deadline := time.Now().Add(cfg.timeout) if old, ok := ctx.Deadline(); !ok || deadline.Before(old) { var cancel context.CancelFunc ctx, cancel = context.WithDeadline(ctx, deadline) defer cancel() } var lastErr error for i := 0; i < cfg.attempts; i++ { for _, server := range cfg.servers { msg, err := exchange(ctx, server, name, qtype) if err != nil { lastErr = &DNSError{ Err: err.Error(), Name: name, Server: server, } if nerr, ok := err.(Error); ok && nerr.Timeout() { lastErr.(*DNSError).IsTimeout = true } continue } // libresolv continues to the next server when it receives // an invalid referral response. See golang.org/issue/15434. if msg.rcode == dnsRcodeSuccess && !msg.authoritative && !msg.recursion_available && len(msg.answer) == 0 && len(msg.extra) == 0 { lastErr = &DNSError{Err: "lame referral", Name: name, Server: server} continue } cname, rrs, err := answer(name, server, msg, qtype) // If answer errored for rcodes dnsRcodeSuccess or dnsRcodeNameError, // it means the response in msg was not useful and trying another // server probably won't help. Return now in those cases. // TODO: indicate this in a more obvious way, such as a field on DNSError? if err == nil || msg.rcode == dnsRcodeSuccess || msg.rcode == dnsRcodeNameError { return cname, rrs, err } lastErr = err } } return "", nil, lastErr }
// Find items from the mongo collection matching the provided lookup func (m *Handler) Find(ctx context.Context, lookup *resource.Lookup, page, perPage int) (*resource.ItemList, error) { q, err := getQuery(lookup) if err != nil { return nil, err } s := getSort(lookup) c, err := m.c(ctx) if err != nil { return nil, err } defer m.close(c) var mItem mongoItem query := c.Find(q).Sort(s...) if perPage >= 0 { query.Skip((page - 1) * perPage).Limit(perPage) } // Apply context deadline if any if dl, ok := ctx.Deadline(); ok { dur := dl.Sub(time.Now()) if dur < 0 { dur = 0 } query.SetMaxTime(dur) } // Perform request iter := query.Iter() // Total is set to -1 because we have no easy way with Mongodb to to compute this value // without performing two requests. list := &resource.ItemList{Page: page, Total: -1, Items: []*resource.Item{}} for iter.Next(&mItem) { // Check if context is still ok before to continue if err = ctx.Err(); err != nil { // TODO bench this as net/context is using mutex under the hood iter.Close() return nil, err } list.Items = append(list.Items, newItem(&mItem)) } if err := iter.Close(); err != nil { return nil, err } return list, err }
// C returns the mongo collection managed by this storage handler // from a Copy() of the mgo session. func (m *Handler) c(ctx context.Context) (*mgo.Collection, error) { if err := ctx.Err(); err != nil { return nil, err } // With mgo, session.Copy() pulls a connection from the connection pool s := m.session.Copy() // Ensure safe mode is enabled in order to get errors s.EnsureSafe(&mgo.Safe{}) // Set a timeout to match the context deadline if any if deadline, ok := ctx.Deadline(); ok { timeout := deadline.Sub(time.Now()) if timeout <= 0 { timeout = 0 } s.SetSocketTimeout(timeout) s.SetSyncTimeout(timeout) } return s.DB(m.dbName).C(m.colName), nil }
// dialSerial connects to a list of addresses in sequence, returning // either the first successful connection, or the first error. func dialSerial(ctx context.Context, dp *dialParam, ras addrList) (Conn, error) { var firstErr error // The error from the first address is most relevant. for i, ra := range ras { select { case <-ctx.Done(): return nil, &OpError{Op: "dial", Net: dp.network, Source: dp.LocalAddr, Addr: ra, Err: mapErr(ctx.Err())} default: } deadline, _ := ctx.Deadline() partialDeadline, err := partialDeadline(time.Now(), deadline, len(ras)-i) if err != nil { // Ran out of time. if firstErr == nil { firstErr = &OpError{Op: "dial", Net: dp.network, Source: dp.LocalAddr, Addr: ra, Err: err} } break } dialCtx := ctx if partialDeadline.Before(deadline) { var cancel context.CancelFunc dialCtx, cancel = context.WithDeadline(ctx, partialDeadline) defer cancel() } c, err := dialSingle(dialCtx, dp, ra) if err == nil { return c, nil } if firstErr == nil { firstErr = err } } if firstErr == nil { firstErr = &OpError{Op: "dial", Net: dp.network, Source: nil, Addr: nil, Err: errMissingAddress} } return nil, firstErr }
// exchange sends a query on the connection and hopes for a response. func exchange(ctx context.Context, server, name string, qtype uint16, timeout time.Duration) (*dnsMsg, error) { d := testHookDNSDialer() out := dnsMsg{ dnsMsgHdr: dnsMsgHdr{ recursion_desired: true, }, question: []dnsQuestion{ {name, qtype, dnsClassINET}, }, } for _, network := range []string{"udp", "tcp"} { // TODO(mdempsky): Refactor so defers from UDP-based // exchanges happen before TCP-based exchange. ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout)) defer cancel() c, err := d.dialDNS(ctx, network, server) if err != nil { return nil, err } defer c.Close() if d, ok := ctx.Deadline(); ok && !d.IsZero() { c.SetDeadline(d) } out.id = uint16(rand.Int()) ^ uint16(time.Now().UnixNano()) in, err := c.dnsRoundTrip(&out) if err != nil { return nil, mapErr(err) } if in.truncated { // see RFC 5966 continue } return in, nil } return nil, errors.New("no answer from DNS server") }
// SafelyCallUnaryHandler calls the handler h, recovering panics and timeout errors, // converting them to yarpc errors. All other errors are passed trough. func SafelyCallUnaryHandler( ctx context.Context, h transport.UnaryHandler, start time.Time, req *transport.Request, resq transport.ResponseWriter, ) (err error) { // We recover panics from now on. defer func() { if r := recover(); r != nil { log.Printf("Unary handler panicked: %v\n%s", r, debug.Stack()) err = fmt.Errorf("panic: %v", r) } }() err = h.Handle(ctx, req, resq) // The handler stopped work on context deadline. if err == context.DeadlineExceeded && err == ctx.Err() { deadline, _ := ctx.Deadline() err = errors.HandlerTimeoutError(req.Caller, req.Service, req.Procedure, deadline.Sub(start)) } return err }
func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error { // Do not need to call fd.writeLock here, // because fd is not yet accessible to user, // so no concurrent operations are possible. if err := fd.init(); err != nil { return err } if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { fd.setWriteDeadline(deadline) defer fd.setWriteDeadline(noDeadline) } if !canUseConnectEx(fd.net) { err := connectFunc(fd.sysfd, ra) return os.NewSyscallError("connect", err) } // ConnectEx windows API requires an unconnected, previously bound socket. if la == nil { switch ra.(type) { case *syscall.SockaddrInet4: la = &syscall.SockaddrInet4{} case *syscall.SockaddrInet6: la = &syscall.SockaddrInet6{} default: panic("unexpected type in connect") } if err := syscall.Bind(fd.sysfd, la); err != nil { return os.NewSyscallError("bind", err) } } // Call ConnectEx API. o := &fd.wop o.sa = ra // Wait for the goroutine converting context.Done into a write timeout // to exist, otherwise our caller might cancel the context and // cause fd.setWriteDeadline(aLongTimeAgo) to cancel a successful dial. done := make(chan bool) // must be unbuffered defer func() { done <- true }() go func() { select { case <-ctx.Done(): // Force the runtime's poller to immediately give // up waiting for writability. fd.setWriteDeadline(aLongTimeAgo) <-done case <-done: } }() _, err := wsrv.ExecIO(o, "ConnectEx", func(o *operation) error { return connectExFunc(o.fd.sysfd, o.sa, nil, 0, nil, &o.o) }) if err != nil { select { case <-ctx.Done(): return mapErr(ctx.Err()) default: if _, ok := err.(syscall.Errno); ok { err = os.NewSyscallError("connectex", err) } return err } } // Refresh socket properties. return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_UPDATE_CONNECT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))) }
// DialContext connects to the address on the named network using // the provided context. // // The provided Context must be non-nil. If the context expires before // the connection is complete, an error is returned. Once successfully // connected, any expiration of the context will not affect the // connection. // // See func Dial for a description of the network and address // parameters. func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) { if ctx == nil { panic("nil context") } deadline := d.deadline(ctx, time.Now()) if !deadline.IsZero() { if d, ok := ctx.Deadline(); !ok || deadline.Before(d) { subCtx, cancel := context.WithDeadline(ctx, deadline) defer cancel() ctx = subCtx } } if oldCancel := d.Cancel; oldCancel != nil { subCtx, cancel := context.WithCancel(ctx) defer cancel() go func() { select { case <-oldCancel: cancel() case <-subCtx.Done(): } }() ctx = subCtx } // Shadow the nettrace (if any) during resolve so Connect events don't fire for DNS lookups. resolveCtx := ctx if trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace); trace != nil { shadow := *trace shadow.ConnectStart = nil shadow.ConnectDone = nil resolveCtx = context.WithValue(resolveCtx, nettrace.TraceKey{}, &shadow) } addrs, err := d.resolver().resolveAddrList(resolveCtx, "dial", network, address, d.LocalAddr) if err != nil { return nil, &OpError{Op: "dial", Net: network, Source: nil, Addr: nil, Err: err} } dp := &dialParam{ Dialer: *d, network: network, address: address, } var primaries, fallbacks addrList if d.DualStack && network == "tcp" { primaries, fallbacks = addrs.partition(isIPv4) } else { primaries = addrs } var c Conn if len(fallbacks) > 0 { c, err = dialParallel(ctx, dp, primaries, fallbacks) } else { c, err = dialSerial(ctx, dp, primaries) } if err != nil { return nil, err } if tc, ok := c.(*TCPConn); ok && d.KeepAlive > 0 { setKeepAlive(tc.fd, true) setKeepAlivePeriod(tc.fd, d.KeepAlive) testHookSetKeepAlive() } return c, nil }
// Call makes a HTTP request func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) { if !o.started.Load() { // panic because there's no recovery from this panic(errOutboundNotStarted) } start := time.Now() deadline, _ := ctx.Deadline() ttl := deadline.Sub(start) peer, err := o.getPeerForRequest(ctx, treq) if err != nil { return nil, err } endRequest := peer.StartRequest() defer endRequest() req, err := o.createRequest(peer, treq) if err != nil { return nil, err } req.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil) ctx, req, span := o.withOpentracingSpan(ctx, req, treq, start) defer span.Finish() req = o.withCoreHeaders(req, treq, ttl) client, err := o.getHTTPClient(peer) if err != nil { return nil, err } response, err := client.Do(req.WithContext(ctx)) if err != nil { // Workaround borrowed from ctxhttp until // https://github.com/golang/go/issues/17711 is resolved. select { case <-ctx.Done(): err = ctx.Err() default: } span.SetTag("error", true) span.LogEvent(err.Error()) if err == context.DeadlineExceeded { end := time.Now() return nil, errors.ClientTimeoutError(treq.Service, treq.Procedure, end.Sub(start)) } return nil, err } span.SetTag("http.status_code", response.StatusCode) if response.StatusCode >= 200 && response.StatusCode < 300 { appHeaders := applicationHeaders.FromHTTPHeaders( response.Header, transport.NewHeaders()) return &transport.Response{ Headers: appHeaders, Body: response.Body, }, nil } return nil, getErrFromResponse(response) }
func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error { // Do not need to call fd.writeLock here, // because fd is not yet accessible to user, // so no concurrent operations are possible. switch err := connectFunc(fd.sysfd, ra); err { case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR: case nil, syscall.EISCONN: select { case <-ctx.Done(): return mapErr(ctx.Err()) default: } if err := fd.init(); err != nil { return err } return nil case syscall.EINVAL: // On Solaris we can see EINVAL if the socket has // already been accepted and closed by the server. // Treat this as a successful connection--writes to // the socket will see EOF. For details and a test // case in C see https://golang.org/issue/6828. if runtime.GOOS == "solaris" { return nil } fallthrough default: return os.NewSyscallError("connect", err) } if err := fd.init(); err != nil { return err } if deadline, _ := ctx.Deadline(); !deadline.IsZero() { fd.setWriteDeadline(deadline) defer fd.setWriteDeadline(noDeadline) } // Wait for the goroutine converting context.Done into a write timeout // to exist, otherwise our caller might cancel the context and // cause fd.setWriteDeadline(aLongTimeAgo) to cancel a successful dial. done := make(chan bool) // must be unbuffered defer func() { done <- true }() go func() { select { case <-ctx.Done(): // Force the runtime's poller to immediately give // up waiting for writability. fd.setWriteDeadline(aLongTimeAgo) <-done case <-done: } }() for { // Performing multiple connect system calls on a // non-blocking socket under Unix variants does not // necessarily result in earlier errors being // returned. Instead, once runtime-integrated network // poller tells us that the socket is ready, get the // SO_ERROR socket option to see if the connection // succeeded or failed. See issue 7474 for further // details. if err := fd.pd.waitWrite(); err != nil { select { case <-ctx.Done(): return mapErr(ctx.Err()) default: } return err } nerr, err := getsockoptIntFunc(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR) if err != nil { return os.NewSyscallError("getsockopt", err) } switch err := syscall.Errno(nerr); err { case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR: case syscall.Errno(0), syscall.EISCONN: if runtime.GOOS != "darwin" { return nil } // See golang.org/issue/14548. // On Darwin, multiple connect system calls on // a non-blocking socket never harm SO_ERROR. switch err := connectFunc(fd.sysfd, ra); err { case nil, syscall.EISCONN: return nil } default: return os.NewSyscallError("getsockopt", err) } } }
func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret error) { // Do not need to call fd.writeLock here, // because fd is not yet accessible to user, // so no concurrent operations are possible. switch err := connectFunc(fd.sysfd, ra); err { case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR: case nil, syscall.EISCONN: select { case <-ctx.Done(): return mapErr(ctx.Err()) default: } if err := fd.init(); err != nil { return err } return nil case syscall.EINVAL: // On Solaris we can see EINVAL if the socket has // already been accepted and closed by the server. // Treat this as a successful connection--writes to // the socket will see EOF. For details and a test // case in C see https://golang.org/issue/6828. if runtime.GOOS == "solaris" { return nil } fallthrough default: return os.NewSyscallError("connect", err) } if err := fd.init(); err != nil { return err } if deadline, _ := ctx.Deadline(); !deadline.IsZero() { fd.setWriteDeadline(deadline) defer fd.setWriteDeadline(noDeadline) } // Start the "interrupter" goroutine, if this context might be canceled. // (The background context cannot) // // The interrupter goroutine waits for the context to be done and // interrupts the dial (by altering the fd's write deadline, which // wakes up waitWrite). if ctx != context.Background() { // Wait for the interrupter goroutine to exit before returning // from connect. done := make(chan struct{}) interruptRes := make(chan error) defer func() { close(done) if ctxErr := <-interruptRes; ctxErr != nil && ret == nil { // The interrupter goroutine called setWriteDeadline, // but the connect code below had returned from // waitWrite already and did a successful connect (ret // == nil). Because we've now poisoned the connection // by making it unwritable, don't return a successful // dial. This was issue 16523. ret = ctxErr fd.Close() // prevent a leak } }() go func() { select { case <-ctx.Done(): // Force the runtime's poller to immediately give up // waiting for writability, unblocking waitWrite // below. fd.setWriteDeadline(aLongTimeAgo) testHookCanceledDial() interruptRes <- ctx.Err() case <-done: interruptRes <- nil } }() } for { // Performing multiple connect system calls on a // non-blocking socket under Unix variants does not // necessarily result in earlier errors being // returned. Instead, once runtime-integrated network // poller tells us that the socket is ready, get the // SO_ERROR socket option to see if the connection // succeeded or failed. See issue 7474 for further // details. if err := fd.pd.waitWrite(); err != nil { select { case <-ctx.Done(): return mapErr(ctx.Err()) default: } return err } nerr, err := getsockoptIntFunc(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR) if err != nil { return os.NewSyscallError("getsockopt", err) } switch err := syscall.Errno(nerr); err { case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR: case syscall.Errno(0), syscall.EISCONN: if runtime.GOOS != "darwin" { return nil } // See golang.org/issue/14548. // On Darwin, multiple connect system calls on // a non-blocking socket never harm SO_ERROR. switch err := connectFunc(fd.sysfd, ra); err { case nil, syscall.EISCONN: return nil } default: return os.NewSyscallError("getsockopt", err) } } }