func handleClient(source *net.TCPConn) { atomic.AddInt32(&daemon.status.actives, 1) defer func() { atomic.AddInt32(&daemon.status.actives, -1) daemon.wg.Done() }() // read req err, req := ReadReq(source) if err != nil { source.Close() Error("conn:%v, read req failed:%v", source.RemoteAddr(), err) return } source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) // judge: new conn or reuse conn switch req := req.(type) { case *NewConnReq: Info("new conn request:%v", req) onNewConn(source, req) case *ReuseConnReq: Info("reuse conn request:%v", req) onReuseConn(source, req) default: Info("unknown request:%v", req) source.Close() return } }
func (h *Hub) startLink(l *link, conn *net.TCPConn) { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) l.setConn(conn) Info("link(%d) start: %v", l.id, conn.RemoteAddr()) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() for { data, err := l.read() if err != nil { if err != errPeerClosed { h.SendCmd(l.id, LINK_CLOSE_SEND) } break } h.Send(l.id, data) } }() wg.Add(1) go func() { defer wg.Done() err := l._write() if err != errPeerClosed { h.SendCmd(l.id, LINK_CLOSE_RECV) } }() wg.Wait() Info("link(%d) close", l.id) }
func handleClient(pf *PF, source *net.TCPConn) { atomic.AddInt32(&pf.status.actives, 1) defer func() { atomic.AddInt32(&pf.status.actives, -1) pf.wg.Done() }() settings := pf.settings host := chooseHost(settings.weight, settings.Hosts) if host == nil { source.Close() logger.Println("choose host failed") return } dest, err := net.DialTCP("tcp", nil, host.addr) if err != nil { source.Close() logger.Printf("connect to %s failed: %s", host.addr, err.Error()) return } source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) dest.SetLinger(-1) go forward(source, dest) forward(dest, source) //logger.Printf("forward finished, %v -> %v", source.RemoteAddr(), host) }
func (this *GoServer) SetDefaultOptions(conn *net.TCPConn) { conn.SetKeepAlive(true) interval, err := time.ParseDuration("45s") if err != nil { conn.SetKeepAlivePeriod(interval) } conn.SetNoDelay(true) }
func (server *TcpServer) Start() (err error) { for { var conn *net.TCPConn if conn, err = server.TCPListener.AcceptTCP(); err != nil { return err } if server.keepAlive != nil { if err := conn.SetKeepAlive(server.keepAlive.(bool)); err != nil { return err } } if server.keepAlivePeriod != nil { if err := conn.SetKeepAlivePeriod(server.keepAlivePeriod.(time.Duration)); err != nil { return err } } if server.linger != nil { if err := conn.SetLinger(server.linger.(int)); err != nil { return err } } if server.noDelay != nil { if err := conn.SetNoDelay(server.noDelay.(bool)); err != nil { return err } } if server.readBuffer != nil { if err := conn.SetReadBuffer(server.readBuffer.(int)); err != nil { return err } } if server.writerBuffer != nil { if err := conn.SetWriteBuffer(server.writerBuffer.(int)); err != nil { return err } } if server.deadline != nil { if err := conn.SetDeadline(server.deadline.(time.Time)); err != nil { return err } } if server.readDeadline != nil { if err := conn.SetReadDeadline(server.readDeadline.(time.Time)); err != nil { return err } } if server.writerDeadline != nil { if err := conn.SetWriteDeadline(server.writerDeadline.(time.Time)); err != nil { return err } } if server.config != nil { server.ServeTCP(tls.Client(conn, server.config)) } else { server.ServeTCP(conn) } } }
func newTunnel(conn *net.TCPConn) *Tunnel { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 10) //we do not keep so long var tun Tunnel tun.Conn = &Conn{conn, bufio.NewReaderSize(conn, PacketSize), bufio.NewWriterSize(conn, PacketSize), nil, nil} Info("new tunnel:%s", tun) return &tun }
func newTunnel(conn *net.TCPConn) *Tunnel { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 180) var tun Tunnel tun.Conn = &Conn{conn, bufio.NewReaderSize(conn, 64*1024), bufio.NewWriterSize(conn, 64*1024), nil, nil} Info("new tunnel:%s", tun) return &tun }
func NewConnectionFromTCPConn(socket *net.TCPConn, cm *ConnectionManager, count uint32) *Connection { socket.SetKeepAlive(true) socket.SetKeepAlivePeriod(time.Second) conn := &Connection{ socket: socket, connectionManager: cm, ConnectionNumber: count, } conn.start() return conn }
func setTCPOptions(conn *net.TCPConn) { var err error if err = conn.SetLinger(0); err != nil { l.Infoln(err) } if err = conn.SetNoDelay(false); err != nil { l.Infoln(err) } if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil { l.Infoln(err) } if err = conn.SetKeepAlive(true); err != nil { l.Infoln(err) } }
func (link *Link) Pump(conn *net.TCPConn) { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) link.conn = conn link.wg.Add(1) go link.pumpIn() link.wg.Add(1) go link.pumpOut() link.wg.Wait() Info("link(%d) closed", link.id) link.hub.deleteLink(link.id) }
func newTunnel(conn *net.TCPConn) *Tunnel { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) conn.SetLinger(-1) // conn.SetWriteBuffer(64 * 1024) // conn.SetReadBuffer(64 * 1024) desc := fmt.Sprintf("tunnel[%s <-> %s]", conn.LocalAddr(), conn.RemoteAddr()) return &Tunnel{ wlock: new(sync.Mutex), writer: NewRC4Writer(conn, options.RC4Key), rlock: new(sync.Mutex), reader: NewRC4Reader(bufio.NewReaderSize(conn, 8192), options.RC4Key), conn: conn, desc: desc, } }
// SetTCPOptions sets syncthings default TCP options on a TCP connection func SetTCPOptions(conn *net.TCPConn) error { var err error if err = conn.SetLinger(0); err != nil { return err } if err = conn.SetNoDelay(false); err != nil { return err } if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil { return err } if err = conn.SetKeepAlive(true); err != nil { return err } return nil }
func (p *Proxy) proxyTCPStream(ctx context.Context, src *net.TCPConn) { srcRemoteAddr := src.RemoteAddr().(*net.TCPAddr) srcLocalAddr := src.LocalAddr().(*net.TCPAddr) route := p.routes.GetTable().Lookup(protocols.TCP, srcRemoteAddr.IP, srcLocalAddr.IP, uint16(srcRemoteAddr.Port), uint16(srcLocalAddr.Port)) if route == nil { src.Close() return } go func() { dstAddr := net.TCPAddr{ IP: route.Outbound.DstIP, Port: int(route.Outbound.DstPort), } dst, err := net.DialTCP("tcp", nil, &dstAddr) if err != nil { src.Close() return } dst.SetKeepAlivePeriod(10 * time.Second) src.SetKeepAlivePeriod(10 * time.Second) go func() { <-ctx.Done() src.Close() dst.Close() }() go func() { defer dst.CloseWrite() defer src.CloseRead() io.Copy(dst, src) }() go func() { defer src.CloseWrite() defer dst.CloseRead() io.Copy(src, dst) }() }() }
// readDiamondMetrics reads from the connection func (d *Diamond) readDiamondMetrics(conn *net.TCPConn) { defer conn.Close() conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second) reader := bufio.NewReader(conn) d.log.Info("Connection started: ", conn.RemoteAddr()) for { line, err := reader.ReadBytes('\n') if err != nil { d.log.Warn("Error while reading diamond metrics", err) break } d.log.Debug("Read: ", string(line)) d.incoming <- line } d.log.Info("Connection closed: ", conn.RemoteAddr()) }
// readDiamondMetrics reads from the connection func (d Diamond) readDiamondMetrics(conn *net.TCPConn) { defer conn.Close() conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second) reader := bufio.NewReader(conn) log.Info("Diamond collector connection started: ", conn.RemoteAddr()) for { // TODO: verify that timeout is actually working. conn.SetDeadline(time.Now().Add(1e9)) line, err := reader.ReadBytes('\n') if err != nil { break } log.Debug("Read from Diamond collector: ", string(line)) d.incoming <- line } log.Info("Diamond collector connection closed: ", conn.RemoteAddr()) }
func handleConn(source *net.TCPConn) { var destList []*DestList source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) for _, host := range options.hostList.HostList { dest, err := net.Dial("tcp", host.Addr) if err != nil { source.Close() log.Printf("connect to %s failed: %s", host.Addr, err.Error()) return } tmp := new(DestList) tmp.Conn = dest.(*net.TCPConn) tmp.Discard = host.Discard destList = append(destList, tmp) } go sendDest(source, destList) sendSource(destList, source) }
func handleClient(source *net.TCPConn) { atomic.AddInt32(&daemon.status.actives, 1) defer func() { atomic.AddInt32(&daemon.status.actives, -1) daemon.wg.Done() }() Info("accept new connection: %v", source.RemoteAddr()) source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) // read req // set read request timeout source.SetReadDeadline(time.Now().Add(time.Second * 30)) err, req := ReadReq(source) if err != nil { source.Close() Error("conn:%v, read req failed: %v", source.RemoteAddr(), err) return } // cancel read timeout var t time.Time source.SetReadDeadline(t) // judge: new conn or reuse conn switch req := req.(type) { case *NewConnReq: Info("new conn request:%v", req) onNewConn(source, req) case *ReuseConnReq: Info("reuse conn request:%v", req) onReuseConn(source, req) default: Info("unknown request:%v", req) source.Close() return } Info("connection close: %v", source.RemoteAddr()) }
func NewSession(conn *net.TCPConn, rc *turbo.RemotingConfig) *Session { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(rc.IdleTime * 2) //禁用nagle conn.SetNoDelay(true) conn.SetReadBuffer(rc.ReadBufferSize) conn.SetWriteBuffer(rc.WriteBufferSize) session := &Session{ conn: conn, br: bufio.NewReaderSize(conn, rc.ReadBufferSize), bw: bufio.NewWriterSize(conn, rc.WriteBufferSize), ReadChannel: make(chan *packet.Packet, rc.ReadChannelSize), WriteChannel: make(chan *packet.Packet, rc.WriteChannelSize), isClose: false, remoteAddr: conn.RemoteAddr().String(), rc: rc} return session }
func handleConn(source *net.TCPConn) { host := chooseHost(options.backend.weight, options.backend.Hosts) if host == nil { source.Close() log.Println("choose host failed") return } dest, err := net.Dial("tcp", host.Addr) if err != nil { source.Close() log.Printf("connect to %s failed: %s", host.Addr, err.Error()) return } source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) go forward(source, dest.(*net.TCPConn)) forward(dest.(*net.TCPConn), source) }
func (this *DeviceGatewayServer) deviceRoutine(waitGroup *sync.WaitGroup, socket *net.TCPConn, maxLen int) { defer waitGroup.Done() conn := NewConnection(socket, maxLen, this.devManager) defer conn.Close() // step 1. shake hands with the dev connection deviceGid, err := conn.DeviceHandShake() if err != nil { if deviceGid != nil { log.Errorf("device handshake failed exit:addr[%s], gid[%s]", socket.RemoteAddr(), deviceGid.String()) return } log.Errorf("device handshake failed exit:addr[%s]", socket.RemoteAddr()) return } // step 2. set the socket option as keep alive socket.SetKeepAlive(true) socket.SetKeepAlivePeriod(time.Second * 30) // step 3. record the connection for forward manager err = this.connManager.Insert(deviceGid, conn) if err != nil { log.Errorf("insert the device connection Failed:addr[%s], gid[%s], err[%v]", socket.RemoteAddr(), deviceGid.String(), err) return } // for debug info log.Infof("device connection created:addr[%s], gid[%s]", socket.RemoteAddr(), deviceGid.String()) // step 4. loop forward all request and receive all response conn.Loop(waitGroup) // step 5. remove conn from device connection manager _, find := this.connManager.Delete(deviceGid) if !find { log.Errorf("delete device connection before close:addr[%s], gid[%s]", socket.RemoteAddr(), deviceGid.String()) } // for debug info log.Infof("device connection closed:addr[%s], gid[%s]", socket.RemoteAddr(), deviceGid.String()) }
func handle_sync_client(conn *net.TCPConn) { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Duration(10 * 60 * time.Second)) client := NewSyncClient(conn) client.Run() }
func NewTunnel(conn *net.TCPConn) *Tunnel { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) conn.SetLinger(-1) return &Tunnel{make(chan *TunnelPayload, 65535), make(chan *TunnelPayload, 65535), conn} }
func (c *conn) command(s string) error { c.Lock() defer c.Unlock() s = strings.TrimSpace(s) args := strings.Split(s, " ") cmd := args[0] args = args[1:] switch cmd { case "connect": addr := strings.Split(args[0], "!") if len(args) == 2 { return errors.New("manual local port assignment not supported") } if len(addr) != 2 { return errors.New("invalid address") } host := addr[0] port, err := strconv.ParseUint(addr[1], 10, 64) if err != nil { return err } if port > 65535 { return fmt.Errorf("invalid port: %s", addr[1]) } // TODO(kl): Should the old connection disconnect? c.address = fmt.Sprintf("%s:%d", host, port) log.Printf("-> Connect: %s", c.address) c.connectPending = true case "announce": if args[0] == "*" { args[0] = "0" } port, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err } if port > 65535 { return fmt.Errorf("invalid port: %s", args[0]) } c.address = fmt.Sprintf(":%d", port) log.Printf("-> Announce: %s", c.address) c.listener, err = net.Listen(c.network, c.address) c.state = stateAnnouncing case "accept": // Do nothing... case "keepalive": var tcp *net.TCPConn var ok bool if tcp, ok = c.conn.(*net.TCPConn); !ok { return fmt.Errorf("keepalive only valid on tcp connections") } var period time.Duration switch len(args) { case 0: period = 30 * time.Second case 1: d, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err } period = time.Duration(d) * time.Millisecond default: return fmt.Errorf("invalid arguments") } log.Printf("-> Keepalive: %v", period) tcp.SetKeepAlivePeriod(period) tcp.SetKeepAlive(true) case "hangup", "reject": c.hangup() case "bind", "ttl", "tos", "ignoreadvice", "addmulti", "remmulti": return fmt.Errorf("Unimplemented command received: %v", s) case "checksum", "tcpporthogdefence": return fmt.Errorf("Unimplemented command received: %v", s) default: return fmt.Errorf("Unimplemented command received: %v", s) } return nil }