func TCProxy(srvConn, cliConn *net.TCPConn) { // channels to wait on the close event for each connection serverClosed := make(chan struct{}, 1) clientClosed := make(chan struct{}, 1) go broker(srvConn, cliConn, clientClosed) go broker(cliConn, srvConn, serverClosed) // wait for one half of the proxy to exit, then trigger a shutdown of the // other half by calling CloseRead(). This will break the read loop in the // broker and allow us to fully close the connection cleanly without a // "use of closed network connection" error. var waitFor chan struct{} select { case <-clientClosed: // the client closed first and any more packets from the server aren't // useful, so we can optionally SetLinger(0) here to recycle the port // faster. srvConn.SetLinger(0) srvConn.CloseRead() waitFor = serverClosed case <-serverClosed: cliConn.CloseRead() waitFor = clientClosed } // Wait for the other connection to close. // This "waitFor" pattern isn't required, but gives us a way to track the // connection and ensure all copies terminate correctly; we can trigger // stats on entry and deferred exit of this function. <-waitFor }
func handleClient(source *net.TCPConn) { atomic.AddInt32(&daemon.status.actives, 1) defer func() { atomic.AddInt32(&daemon.status.actives, -1) daemon.wg.Done() }() // read req err, req := ReadReq(source) if err != nil { source.Close() Error("conn:%v, read req failed:%v", source.RemoteAddr(), err) return } source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) // judge: new conn or reuse conn switch req := req.(type) { case *NewConnReq: Info("new conn request:%v", req) onNewConn(source, req) case *ReuseConnReq: Info("reuse conn request:%v", req) onReuseConn(source, req) default: Info("unknown request:%v", req) source.Close() return } }
func handleClient(pf *PF, source *net.TCPConn) { atomic.AddInt32(&pf.status.actives, 1) defer func() { atomic.AddInt32(&pf.status.actives, -1) pf.wg.Done() }() settings := pf.settings host := chooseHost(settings.weight, settings.Hosts) if host == nil { source.Close() logger.Println("choose host failed") return } dest, err := net.DialTCP("tcp", nil, host.addr) if err != nil { source.Close() logger.Printf("connect to %s failed: %s", host.addr, err.Error()) return } source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) dest.SetLinger(-1) go forward(source, dest) forward(dest, source) //logger.Printf("forward finished, %v -> %v", source.RemoteAddr(), host) }
func (server *TcpServer) Start() (err error) { for { var conn *net.TCPConn if conn, err = server.TCPListener.AcceptTCP(); err != nil { return err } if server.keepAlive != nil { if err := conn.SetKeepAlive(server.keepAlive.(bool)); err != nil { return err } } if server.keepAlivePeriod != nil { if kap, ok := (net.Conn(conn)).(iKeepAlivePeriod); ok { if err := kap.SetKeepAlivePeriod(server.keepAlivePeriod.(time.Duration)); err != nil { return err } } } if server.linger != nil { if err := conn.SetLinger(server.linger.(int)); err != nil { return err } } if server.noDelay != nil { if err := conn.SetNoDelay(server.noDelay.(bool)); err != nil { return err } } if server.readBuffer != nil { if err := conn.SetReadBuffer(server.readBuffer.(int)); err != nil { return err } } if server.writerBuffer != nil { if err := conn.SetWriteBuffer(server.writerBuffer.(int)); err != nil { return err } } if server.deadline != nil { if err := conn.SetDeadline(server.deadline.(time.Time)); err != nil { return err } } if server.readDeadline != nil { if err := conn.SetReadDeadline(server.readDeadline.(time.Time)); err != nil { return err } } if server.writerDeadline != nil { if err := conn.SetWriteDeadline(server.writerDeadline.(time.Time)); err != nil { return err } } if server.config != nil { server.ServeTCP(tls.Client(conn, server.config)) } else { server.ServeTCP(conn) } } }
// Set TCP socket options on a new SMTP connection. func (s *SMTPService) SetClientOptions(conn *net.TCPConn) error { if err := conn.SetKeepAlive(false); err != nil { log.Error("%s: SetKeepAlive: %v", conn.RemoteAddr(), err) return err } if err := conn.SetLinger(-1); err != nil { log.Error("%s: SetLinger: %v", conn.RemoteAddr(), err) return err } return nil }
func configureConn (conn *net.TCPConn, spec *ConnectionSpec) { // these two -- the most important -- are causing problems on my osx/64 // where a "service unavailable" pops up in the async reads // but we absolutely need to be able to use timeouts. // conn.SetReadTimeout(spec.rTimeout); // conn.SetWriteTimeout(spec.wTimeout); conn.SetLinger(spec.lingerspec); conn.SetKeepAlive(spec.keepalive); conn.SetReadBuffer(spec.rBufSize); conn.SetWriteBuffer(spec.wBufSize); }
func loop(conn *net.TCPConn) { defer conn.Close() conn.SetLinger(5) conn.SetKeepAlive(false) conn.SetNoDelay(true) now := time.Now() conn.SetReadDeadline(now.Add(readWriteLimit)) if _, err := io.ReadFull(conn, buf); err == nil { conn.Write(response) } }
func (self *Link) Pump(conn *net.TCPConn) { conn.SetKeepAlive(true) conn.SetLinger(-1) self.conn = conn self.wg.Add(1) go self.pumpIn() self.wg.Add(1) go self.pumpOut() self.wg.Wait() Info("link(%d) closed", self.id) }
func setTCPOptions(conn *net.TCPConn) { var err error if err = conn.SetLinger(0); err != nil { l.Infoln(err) } if err = conn.SetNoDelay(false); err != nil { l.Infoln(err) } if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil { l.Infoln(err) } if err = conn.SetKeepAlive(true); err != nil { l.Infoln(err) } }
// SetTCPOptions sets syncthings default TCP options on a TCP connection func SetTCPOptions(conn *net.TCPConn) error { var err error if err = conn.SetLinger(0); err != nil { return err } if err = conn.SetNoDelay(false); err != nil { return err } if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil { return err } if err = conn.SetKeepAlive(true); err != nil { return err } return nil }
func newTunnel(conn *net.TCPConn) *Tunnel { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) conn.SetLinger(-1) // conn.SetWriteBuffer(64 * 1024) // conn.SetReadBuffer(64 * 1024) desc := fmt.Sprintf("tunnel[%s <-> %s]", conn.LocalAddr(), conn.RemoteAddr()) return &Tunnel{ wlock: new(sync.Mutex), writer: NewRC4Writer(conn, options.RC4Key), rlock: new(sync.Mutex), reader: NewRC4Reader(bufio.NewReaderSize(conn, 8192), options.RC4Key), conn: conn, desc: desc, } }
func (server *Server) setupSocket(conn *net.TCPConn) (err error) { if err = conn.SetLinger(0); err != nil { return } if server.ConfigReadBuffer != 0 { if err = conn.SetReadBuffer(int(server.ConfigReadBuffer)); err != nil { return } } if err = conn.SetKeepAlive(true); err != nil { return } if err = conn.SetReadDeadline(time.Now().Add(server.ConfigIdleTimeout)); err != nil { return } return }
func handleClient(source *net.TCPConn) { atomic.AddInt32(&daemon.status.actives, 1) defer func() { atomic.AddInt32(&daemon.status.actives, -1) daemon.wg.Done() }() Info("accept new connection: %v", source.RemoteAddr()) source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) // read req // set read request timeout source.SetReadDeadline(time.Now().Add(time.Second * 30)) err, req := ReadReq(source) if err != nil { source.Close() Error("conn:%v, read req failed: %v", source.RemoteAddr(), err) return } // cancel read timeout var t time.Time source.SetReadDeadline(t) // judge: new conn or reuse conn switch req := req.(type) { case *NewConnReq: Info("new conn request:%v", req) onNewConn(source, req) case *ReuseConnReq: Info("reuse conn request:%v", req) onReuseConn(source, req) default: Info("unknown request:%v", req) source.Close() return } Info("connection close: %v", source.RemoteAddr()) }
// ServeTCP ... func (service *TcpService) ServeTCP(conn *net.TCPConn) (err error) { if service.keepAlive != nil { if err = conn.SetKeepAlive(service.keepAlive.(bool)); err != nil { return err } } if service.keepAlivePeriod != nil { if kap, ok := (net.Conn(conn)).(iKeepAlivePeriod); ok { if err = kap.SetKeepAlivePeriod(service.keepAlivePeriod.(time.Duration)); err != nil { return err } } } if service.linger != nil { if err = conn.SetLinger(service.linger.(int)); err != nil { return err } } if service.noDelay != nil { if err = conn.SetNoDelay(service.noDelay.(bool)); err != nil { return err } } if service.readBuffer != nil { if err = conn.SetReadBuffer(service.readBuffer.(int)); err != nil { return err } } if service.writeBuffer != nil { if err = conn.SetWriteBuffer(service.writeBuffer.(int)); err != nil { return err } } if service.config != nil { tlsConn := tls.Server(conn, service.config) tlsConn.Handshake() return service.Serve(tlsConn) } return service.Serve(conn) }
// proxy brokers a connection from src to dst func proxy(dst, src *net.TCPConn) error { // channels to wait on the close event for each connection serverClosed := make(chan struct{}, 1) clientClosed := make(chan struct{}, 1) errors := make(chan error, 2) go broker(dst, src, clientClosed, errors) go broker(src, dst, serverClosed, errors) // wait for one half of the proxy to exit, then trigger a shutdown of the // other half by calling CloseRead(). This will break the read loop in the // broker and allow us to fully close the connection cleanly without a // "use of closed network connection" error. var waitFor chan struct{} select { case <-clientClosed: // the client closed first and any more packets from the server aren't // useful, so we can optionally SetLinger(0) here to recycle the port // faster. dst.SetLinger(0) dst.Close() waitFor = serverClosed case <-serverClosed: src.Close() waitFor = clientClosed case err := <-errors: src.Close() dst.SetLinger(0) dst.Close() return err } // Wait for the other connection to close. <-waitFor return nil }
func NewTunnel(conn *net.TCPConn) *Tunnel { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) conn.SetLinger(-1) return &Tunnel{make(chan *TunnelPayload, 65535), make(chan *TunnelPayload, 65535), conn} }
func (server *TcpServer) handle() (err error) { defer func() { if e := recover(); e != nil && err == nil { err = fmt.Errorf("%v", e) } }() if server.listener == nil { return nil } var conn *net.TCPConn if conn, err = server.listener.AcceptTCP(); err != nil { return err } if server.keepAlive != nil { if err = conn.SetKeepAlive(server.keepAlive.(bool)); err != nil { return err } } if server.keepAlivePeriod != nil { if kap, ok := (net.Conn(conn)).(iKeepAlivePeriod); ok { if err = kap.SetKeepAlivePeriod(server.keepAlivePeriod.(time.Duration)); err != nil { return err } } } if server.linger != nil { if err = conn.SetLinger(server.linger.(int)); err != nil { return err } } if server.noDelay != nil { if err = conn.SetNoDelay(server.noDelay.(bool)); err != nil { return err } } if server.readBuffer != nil { if err = conn.SetReadBuffer(server.readBuffer.(int)); err != nil { return err } } if server.writerBuffer != nil { if err = conn.SetWriteBuffer(server.writerBuffer.(int)); err != nil { return err } } if server.deadline != nil { if err = conn.SetDeadline(server.deadline.(time.Time)); err != nil { return err } } if server.readDeadline != nil { if err = conn.SetReadDeadline(server.readDeadline.(time.Time)); err != nil { return err } } if server.writerDeadline != nil { if err = conn.SetWriteDeadline(server.writerDeadline.(time.Time)); err != nil { return err } } if server.config != nil { server.ServeTCP(tls.Client(conn, server.config)) } else { server.ServeTCP(conn) } return nil }
func SetConnParam(conn *net.TCPConn) { conn.SetNoDelay(false) conn.SetKeepAlive(true) conn.SetLinger(-1) }
func (service *TcpService) ServeTCP(conn *net.TCPConn) (err error) { if service.keepAlive != nil { if err = conn.SetKeepAlive(service.keepAlive.(bool)); err != nil { return err } } if service.keepAlivePeriod != nil { if kap, ok := (net.Conn(conn)).(iKeepAlivePeriod); ok { if err = kap.SetKeepAlivePeriod(service.keepAlivePeriod.(time.Duration)); err != nil { return err } } } if service.linger != nil { if err = conn.SetLinger(service.linger.(int)); err != nil { return err } } if service.noDelay != nil { if err = conn.SetNoDelay(service.noDelay.(bool)); err != nil { return err } } if service.readBuffer != nil { if err = conn.SetReadBuffer(service.readBuffer.(int)); err != nil { return err } } if service.writeBuffer != nil { if err = conn.SetWriteBuffer(service.writeBuffer.(int)); err != nil { return err } } if service.timeout != nil { if err = conn.SetDeadline(time.Now().Add(service.timeout.(time.Duration))); err != nil { return err } } go func(conn net.Conn) { if service.config != nil { tlsConn := tls.Server(conn, service.config) tlsConn.Handshake() conn = tlsConn } var data []byte var err error for { if service.readTimeout != nil { err = conn.SetReadDeadline(time.Now().Add(service.readTimeout.(time.Duration))) } if err == nil { data, err = receiveDataOverTcp(conn) } if err == nil { data = service.Handle(data, conn) if service.writeTimeout != nil { err = conn.SetWriteDeadline(time.Now().Add(service.writeTimeout.(time.Duration))) } if err == nil { err = sendDataOverTcp(conn, data) } } if err != nil { conn.Close() break } } }(conn) return nil }
// proxyTCP is responsible for handling a new TCP connection. func (p *Proxy) proxyTCP(conn *net.TCPConn) { //We can abort the connection immediately, in case of an Abort action. //FIXME: Need to have a way to abort in the middle of a connection too. rule := p.getRule(Request, "", nil) t := time.Now() //FIXME: Add proper delay support for TCP channels. if (rule.DelayProbability > 0.0) && drawAndDecide(rule.DelayDistribution, rule.DelayProbability) { proxylog.WithFields(logrus.Fields{ "dest": p.name, "source": config.ProxyFor, "protocol": "tcp", "action": "delay", "rule": rule.ToConfig(), "testid": p.getmyID(), "ts": t.Format("2006-01-02T15:04:05.999999"), }).Info("Stream") time.Sleep(rule.DelayTime) } if (rule.AbortProbability > 0.0) && drawAndDecide(rule.AbortDistribution, rule.AbortProbability) { proxylog.WithFields(logrus.Fields{ "dest": p.name, "source": config.ProxyFor, "protocol": "tcp", "action": "abort", "rule": rule.ToConfig(), "testid": p.getmyID(), "ts": t.Format("2006-01-02T15:04:05.999999"), }).Info("Stream") conn.SetLinger(0) conn.Close() return } remotehost := p.lb.GetHost() rAddr, err := net.ResolveTCPAddr("tcp", remotehost) if err != nil { globallog.Error("Could not resolve remote address: " + err.Error()) conn.Close() return } rConn, err := net.DialTCP("tcp", nil, rAddr) if err != nil { globallog.WithField("errmsg", err.Error()).Error("Could not connect to remote destination") conn.Close() return } // Make sure to copy data both directions, do it in separate threads var wg sync.WaitGroup wg.Add(2) // go p.tcpReadWrite(conn, rConn, Request, &wg) // go p.tcpReadWrite(rConn, conn, Response, &wg) //from proxier.go code in Kubernetes go copyBytes(conn, rConn, &wg) go copyBytes(rConn, conn, &wg) wg.Wait() conn.Close() rConn.Close() }
func connectionHandler(conn *net.TCPConn, out chan *oakmole.Record) { defer conn.Close() timeBegin := time.Now() conn.SetKeepAlive(false) conn.SetLinger(0) conn.SetDeadline(timeBegin.Add(IOTimeout)) addrLocal := conn.LocalAddr().(*net.TCPAddr) addrRemote := conn.RemoteAddr().(*net.TCPAddr) log.Println("New connection from", addrRemote) // TODO: reuse existing buffers buffer := make([]byte, ReadBufferSize) var httpRequest *http.Request totalSize := 0 for i := 1; i <= 3 && totalSize < ReadBufferSize; i++ { size, err := conn.Read(buffer[totalSize:]) totalSize += size if err == io.EOF { break } if err != nil { // Only log IO timeout on first Read(). // Later it just means that client already sent everything. if netErr, ok := err.(net.Error); i == 1 || !ok || !netErr.Timeout() { log.Println("Read: try:", i, "local:", addrLocal, "remote:", addrRemote, "error:", err) } if i == 1 { return } break } // Try to parse HTTP request. // This allows to stop reading from socket early. // TODO: reuse existing bufio.Reader bufReader := bufio.NewReader(bytes.NewReader(buffer)) httpRequest, err = http.ReadRequest(bufReader) httpRequest.Body.Close() if err == nil { break } else { httpRequest = nil } } buffer = buffer[:totalSize] record := &oakmole.Record{ Timestamp: uint64(timeBegin.UnixNano() / 1000), LocalIP: addrLocal.IP, RemoteIP: addrRemote.IP, Body: buffer, } if httpRequest != nil { record.HttpHost = []byte(httpRequest.Host) } else { record.HttpHost = readHost(buffer) } // log.Println("Read: success local:", addrLocal, "remote:", addrRemote, "size:", totalSize, "first bytes:", string(buffer[:20])) // t1 := time.Now() out <- record // outSendTime := time.Now().Sub(t1) // log.Println("connectionHandler: out<- time:", outSendTime) if httpRequest != nil && httpRequest.Method == "GET" && httpRequest.RequestURI == "/robots.txt" { conn.Write(robotsDisallowBytes) } }