Example #1
0
func (h *Hub) startLink(l *link, conn *net.TCPConn) {
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second * 60)
	l.setConn(conn)

	Info("link(%d) start: %v", l.id, conn.RemoteAddr())
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			data, err := l.read()
			if err != nil {
				if err != errPeerClosed {
					h.SendCmd(l.id, LINK_CLOSE_SEND)
				}
				break
			}

			h.Send(l.id, data)
		}
	}()

	wg.Add(1)
	go func() {
		defer wg.Done()
		err := l._write()
		if err != errPeerClosed {
			h.SendCmd(l.id, LINK_CLOSE_RECV)
		}
	}()
	wg.Wait()
	Info("link(%d) close", l.id)
}
Example #2
0
// Accept accepts connections on the listener and serves requests
// for each incoming connection.  Accept blocks; the caller typically
// invokes it in a go statement.
func acceptTCP(server *Server, lis *net.TCPListener) {
	var (
		conn *net.TCPConn
		err  error
		r    int
	)
	for {
		if conn, err = lis.AcceptTCP(); err != nil {
			// if listener close then return
			log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err)
			return
		}
		if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil {
			log.Error("conn.SetKeepAlive() error(%v)", err)
			return
		}
		if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil {
			log.Error("conn.SetReadBuffer() error(%v)", err)
			return
		}
		if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil {
			log.Error("conn.SetWriteBuffer() error(%v)", err)
			return
		}
		go serveTCP(server, conn, r)
		if r++; r == maxInt {
			r = 0
		}
	}
}
Example #3
0
func (server *TcpServer) Start() (err error) {
	for {
		var conn *net.TCPConn
		if conn, err = server.TCPListener.AcceptTCP(); err != nil {
			return err
		}
		if server.keepAlive != nil {
			if err := conn.SetKeepAlive(server.keepAlive.(bool)); err != nil {
				return err
			}
		}
		if server.keepAlivePeriod != nil {
			if kap, ok := (net.Conn(conn)).(iKeepAlivePeriod); ok {
				if err := kap.SetKeepAlivePeriod(server.keepAlivePeriod.(time.Duration)); err != nil {
					return err
				}
			}
		}
		if server.linger != nil {
			if err := conn.SetLinger(server.linger.(int)); err != nil {
				return err
			}
		}
		if server.noDelay != nil {
			if err := conn.SetNoDelay(server.noDelay.(bool)); err != nil {
				return err
			}
		}
		if server.readBuffer != nil {
			if err := conn.SetReadBuffer(server.readBuffer.(int)); err != nil {
				return err
			}
		}
		if server.writerBuffer != nil {
			if err := conn.SetWriteBuffer(server.writerBuffer.(int)); err != nil {
				return err
			}
		}
		if server.deadline != nil {
			if err := conn.SetDeadline(server.deadline.(time.Time)); err != nil {
				return err
			}
		}
		if server.readDeadline != nil {
			if err := conn.SetReadDeadline(server.readDeadline.(time.Time)); err != nil {
				return err
			}
		}
		if server.writerDeadline != nil {
			if err := conn.SetWriteDeadline(server.writerDeadline.(time.Time)); err != nil {
				return err
			}
		}
		if server.config != nil {
			server.ServeTCP(tls.Client(conn, server.config))
		} else {
			server.ServeTCP(conn)
		}
	}
}
Example #4
0
File: gopf.go Project: no2key/gopf
func handleClient(pf *PF, source *net.TCPConn) {
	atomic.AddInt32(&pf.status.actives, 1)
	defer func() {
		atomic.AddInt32(&pf.status.actives, -1)
		pf.wg.Done()
	}()

	settings := pf.settings
	host := chooseHost(settings.weight, settings.Hosts)
	if host == nil {
		source.Close()
		logger.Println("choose host failed")
		return
	}

	dest, err := net.DialTCP("tcp", nil, host.addr)
	if err != nil {
		source.Close()
		logger.Printf("connect to %s failed: %s", host.addr, err.Error())
		return
	}

	source.SetKeepAlive(true)
	source.SetKeepAlivePeriod(time.Second * 60)
	source.SetLinger(-1)
	dest.SetLinger(-1)

	go forward(source, dest)
	forward(dest, source)
	//logger.Printf("forward finished, %v -> %v", source.RemoteAddr(), host)
}
Example #5
0
//处理连接请求
func (this *server) handlerConnection(conn *net.TCPConn) {

	defer logger.CatchException()

	logger.Infof("New connection coming ... IP=%s ", conn.RemoteAddr())

	conn.SetNoDelay(true)                                        //无延迟
	conn.SetKeepAlive(true)                                      //保持激活
	conn.SetReadBuffer(64 * 1024)                                //设置读缓冲区大小
	conn.SetWriteBuffer(64 * 1024)                               //设置写缓冲区大小
	conn.SetReadDeadline(time.Now().Add(30000000 * time.Second)) //设置读超时

	session := network.NewSession(conn)
	defer session.Close()

	for {
		msg, err := session.RecvMSG()
		if err != nil {
			logger.Infof("RecvMsgs IP=%s err=%v", conn.RemoteAddr(), err.Error())
			return
		}

		ret := this.ProcessMessage(session, msg)
		if ret == false {
			return
		}
	}
}
Example #6
0
func handleClient(source *net.TCPConn) {
	atomic.AddInt32(&daemon.status.actives, 1)
	defer func() {
		atomic.AddInt32(&daemon.status.actives, -1)
		daemon.wg.Done()
	}()

	// read req
	err, req := ReadReq(source)
	if err != nil {
		source.Close()
		Error("conn:%v, read req failed:%v", source.RemoteAddr(), err)
		return
	}

	source.SetKeepAlive(true)
	source.SetKeepAlivePeriod(time.Second * 60)
	source.SetLinger(-1)

	// judge: new conn or reuse conn
	switch req := req.(type) {
	case *NewConnReq:
		Info("new conn request:%v", req)
		onNewConn(source, req)
	case *ReuseConnReq:
		Info("reuse conn request:%v", req)
		onReuseConn(source, req)
	default:
		Info("unknown request:%v", req)
		source.Close()
		return
	}
}
Example #7
0
func newClientConn(ls *LeaseServer, raw *net.TCPConn) (*clientConn, error) {
	err := raw.SetKeepAlive(true)
	if err != nil {
		return nil, err
	}
	err = raw.SetNoDelay(true)
	if err != nil {
		return nil, err
	}
	ret := &clientConn{
		id:          incrementAndGet(&ls.clientIdCounter, 1),
		c:           raw,
		d:           gob.NewDecoder(raw),
		e:           gob.NewEncoder(raw),
		req:         ls.req,
		resp:        make(chan response, 20),
		ackLock:     new(sync.Mutex),
		pendingAcks: make(map[uint64]chan bool),
	}
	// send client id
	fmt.Printf("sending id %d\n", ret.id)
	idBuff := make([]byte, 8, 8)
	binary.LittleEndian.PutUint64(idBuff, ret.id)
	ret.c.Write(idBuff)
	fmt.Println("sent")
	return ret, nil
}
Example #8
0
func SetKeepAlive(c *net.TCPConn, cfg *KeepAliveConfig) error {
	if err := c.SetKeepAlive(cfg.KeepAlive); err != nil {
		return err
	}

	file, err := c.File()
	if err != nil {
		return err
	}

	fd := int(file.Fd())

	if cfg.KeepAliveIdle != 0 {
		if err := setIdle(fd, secs(cfg.KeepAliveIdle)); err != nil {
			return err
		}
	}

	if cfg.KeepAliveCount != 0 {
		if err := setCount(fd, cfg.KeepAliveCount); err != nil {
			return err
		}
	}

	if cfg.KeepAliveInterval != 0 {
		if err := setInterval(fd, secs(cfg.KeepAliveInterval)); err != nil {
			return nil
		}
	}

	return nil
}
Example #9
0
// Accept accepts connections on the listener and serves requests
// for each incoming connection.  Accept blocks; the caller typically
// invokes it in a go statement.
func (server *Server) AcceptTCP(lis *net.TCPListener, i int) {
	var (
		conn *net.TCPConn
		err  error
	)
	for {
		log.Debug("server: accept round: %d", i)
		if conn, err = lis.AcceptTCP(); err != nil {
			// if listener close then return
			log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err)
			return
		}
		if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil {
			log.Error("conn.SetKeepAlive() error(%v)", err)
			return
		}
		if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil {
			log.Error("conn.SetReadBuffer() error(%v)", err)
			return
		}
		if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil {
			log.Error("conn.SetWriteBuffer() error(%v)", err)
			return
		}
		go server.serveConn(conn, i)
		if i++; i == maxInt {
			i = 0
		}
	}
}
Example #10
0
func (this *GoServer) SetDefaultOptions(conn *net.TCPConn) {
	conn.SetKeepAlive(true)
	interval, err := time.ParseDuration("45s")
	if err != nil {
		conn.SetKeepAlivePeriod(interval)
	}
	conn.SetNoDelay(true)
}
Example #11
0
func newTunnel(conn *net.TCPConn) *Tunnel {
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second * 180)

	var tun Tunnel
	tun.Conn = &Conn{conn, bufio.NewReaderSize(conn, 64*1024), bufio.NewWriterSize(conn, 64*1024), nil, nil}
	Info("new tunnel:%s", tun)
	return &tun
}
Example #12
0
func newTunnel(conn *net.TCPConn) *Tunnel {
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second * 10) //we do not keep so long

	var tun Tunnel
	tun.Conn = &Conn{conn, bufio.NewReaderSize(conn, PacketSize), bufio.NewWriterSize(conn, PacketSize), nil, nil}
	Info("new tunnel:%s", tun)
	return &tun
}
Example #13
0
func NewConnectionFromTCPConn(socket *net.TCPConn, cm *ConnectionManager, count uint32) *Connection {
	socket.SetKeepAlive(true)
	socket.SetKeepAlivePeriod(time.Second)
	conn := &Connection{
		socket:            socket,
		connectionManager: cm,
		ConnectionNumber:  count,
	}
	conn.start()
	return conn
}
Example #14
0
// Set TCP socket options on a new SMTP connection.
func (s *SMTPService) SetClientOptions(conn *net.TCPConn) error {
	if err := conn.SetKeepAlive(false); err != nil {
		log.Error("%s: SetKeepAlive: %v", conn.RemoteAddr(), err)
		return err
	}
	if err := conn.SetLinger(-1); err != nil {
		log.Error("%s: SetLinger: %v", conn.RemoteAddr(), err)
		return err
	}
	return nil
}
Example #15
0
func configureConn (conn *net.TCPConn, spec *ConnectionSpec) {
	// these two -- the most important -- are causing problems on my osx/64
	// where a "service unavailable" pops up in the async reads 
	// but we absolutely need to be able to use timeouts.
//			conn.SetReadTimeout(spec.rTimeout);	
//			conn.SetWriteTimeout(spec.wTimeout);	
	conn.SetLinger(spec.lingerspec);
	conn.SetKeepAlive(spec.keepalive);
	conn.SetReadBuffer(spec.rBufSize);
	conn.SetWriteBuffer(spec.wBufSize);
}
Example #16
0
func faiConnection(conn *net.TCPConn) {
	defer conn.Close()
	var err error

	err = conn.SetKeepAlive(true)
	if err != nil {
		util.Log(0, "ERROR! SetKeepAlive: %v", err)
	}

	var buf bytes.Buffer
	defer buf.Reset()
	readbuf := make([]byte, 4096)
	n := 1
	for n != 0 {
		n, err = conn.Read(readbuf)
		if err != nil && err != io.EOF {
			util.Log(0, "ERROR! Read: %v", err)
		}
		if n == 0 && err == nil {
			util.Log(0, "ERROR! Read 0 bytes but no error reported")
		}

		// Find complete lines terminated by '\n' and process them.
		for start := 0; ; {
			eol := start
			for ; eol < n; eol++ {
				if readbuf[eol] == '\n' {
					break
				}
			}

			// no \n found, append to buf and continue reading
			if eol == n {
				buf.Write(readbuf[start:n])
				break
			}

			// append to rest of line to buffered contents
			buf.Write(readbuf[start:eol])
			start = eol + 1

			buf.TrimSpace()

			util.Log(2, "DEBUG! FAI monitor message from %v: %v", conn.RemoteAddr(), buf.String())
			buf.Reset()
		}
	}

	if buf.Len() != 0 {
		util.Log(2, "DEBUG! Incomplete FAI monitor message (i.e. not terminated by \"\\n\") from %v: %v", conn.RemoteAddr(), buf.String())
	}
}
Example #17
0
func (l *ConnListener) Accept() (c net.Conn, err error) {
	var tcp *net.TCPConn
	tcp, err = l.listener.AcceptTCP()
	if err != nil {
		return
	}
	// Tcp Setting。
	if err = tcp.SetKeepAlive(true); err != nil {
		glog.Errorf("conn.SetKeepAlive() error(%v)", err)
		return
	}
	return tcp, nil
}
Example #18
0
func (o options) configTCP(conn *net.TCPConn) error {
	if v, ok := o[mangos.OptionNoDelay]; ok {
		if err := conn.SetNoDelay(v.(bool)); err != nil {
			return err
		}
	}
	if v, ok := o[mangos.OptionKeepAlive]; ok {
		if err := conn.SetKeepAlive(v.(bool)); err != nil {
			return err
		}
	}
	return nil
}
Example #19
0
func newCodecConn(f trace.Frame, tcp *net.TCPConn, key []byte) (*codecConn, error) {
	if err := tcp.SetKeepAlive(true); err != nil {
		panic(err)
	}
	c := &codecConn{
		Frame: f,
		tcp:   tcp,
		key:   key,
	}
	if err := c.auth(); err != nil {
		return nil, err
	}
	return c, nil
}
Example #20
0
func (self *Link) Pump(conn *net.TCPConn) {
	conn.SetKeepAlive(true)
	conn.SetLinger(-1)
	self.conn = conn

	self.wg.Add(1)
	go self.pumpIn()

	self.wg.Add(1)
	go self.pumpOut()

	self.wg.Wait()
	Info("link(%d) closed", self.id)
}
func loop(conn *net.TCPConn) {
	defer conn.Close()

	conn.SetLinger(5)
	conn.SetKeepAlive(false)
	conn.SetNoDelay(true)
	now := time.Now()

	conn.SetReadDeadline(now.Add(readWriteLimit))

	if _, err := io.ReadFull(conn, buf); err == nil {
		conn.Write(response)
	}
}
Example #22
0
func setTCPOptions(conn *net.TCPConn) {
	var err error
	if err = conn.SetLinger(0); err != nil {
		l.Infoln(err)
	}
	if err = conn.SetNoDelay(false); err != nil {
		l.Infoln(err)
	}
	if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil {
		l.Infoln(err)
	}
	if err = conn.SetKeepAlive(true); err != nil {
		l.Infoln(err)
	}
}
Example #23
0
func (link *Link) Pump(conn *net.TCPConn) {
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second * 60)
	link.conn = conn

	link.wg.Add(1)
	go link.pumpIn()

	link.wg.Add(1)
	go link.pumpOut()

	link.wg.Wait()
	Info("link(%d) closed", link.id)
	link.hub.deleteLink(link.id)
}
Example #24
0
func handleConnection(conn *net.TCPConn) {
	conn.SetKeepAlive(true)

	userid, err := handleAuth(conn)
	if err != nil {
		logger.Errorf("Authenticate Information Fail. Userid: %d. Error: %s", userid, err)
		conn.Close()
		return
	}

	shouldQuit := make(chan bool)

	go handleReceivingMsg(conn, userid, shouldQuit)
	go handleSendingMsg(conn, userid, shouldQuit)
}
Example #25
0
// SetTCPOptions sets syncthings default TCP options on a TCP connection
func SetTCPOptions(conn *net.TCPConn) error {
	var err error
	if err = conn.SetLinger(0); err != nil {
		return err
	}
	if err = conn.SetNoDelay(false); err != nil {
		return err
	}
	if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil {
		return err
	}
	if err = conn.SetKeepAlive(true); err != nil {
		return err
	}
	return nil
}
Example #26
0
func newTunnel(conn *net.TCPConn) *Tunnel {
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second * 60)
	conn.SetLinger(-1)
	// conn.SetWriteBuffer(64 * 1024)
	// conn.SetReadBuffer(64 * 1024)
	desc := fmt.Sprintf("tunnel[%s <-> %s]", conn.LocalAddr(), conn.RemoteAddr())
	return &Tunnel{
		wlock:  new(sync.Mutex),
		writer: NewRC4Writer(conn, options.RC4Key),
		rlock:  new(sync.Mutex),
		reader: NewRC4Reader(bufio.NewReaderSize(conn, 8192), options.RC4Key),
		conn:   conn,
		desc:   desc,
	}
}
Example #27
0
// readDiamondMetrics reads from the connection
func (d *Diamond) readDiamondMetrics(conn *net.TCPConn) {
	defer conn.Close()
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second)
	reader := bufio.NewReader(conn)
	d.log.Info("Connection started: ", conn.RemoteAddr())
	for {
		line, err := reader.ReadBytes('\n')
		if err != nil {
			d.log.Warn("Error while reading diamond metrics", err)
			break
		}
		d.log.Debug("Read: ", string(line))
		d.incoming <- line
	}
	d.log.Info("Connection closed: ", conn.RemoteAddr())
}
Example #28
0
func (server *Server) setupSocket(conn *net.TCPConn) (err error) {
	if err = conn.SetLinger(0); err != nil {
		return
	}
	if server.ConfigReadBuffer != 0 {
		if err = conn.SetReadBuffer(int(server.ConfigReadBuffer)); err != nil {
			return
		}
	}
	if err = conn.SetKeepAlive(true); err != nil {
		return
	}
	if err = conn.SetReadDeadline(time.Now().Add(server.ConfigIdleTimeout)); err != nil {
		return
	}
	return
}
Example #29
0
// readDiamondMetrics reads from the connection
func (d Diamond) readDiamondMetrics(conn *net.TCPConn) {
	defer conn.Close()
	conn.SetKeepAlive(true)
	conn.SetKeepAlivePeriod(time.Second)
	reader := bufio.NewReader(conn)
	log.Info("Diamond collector connection started: ", conn.RemoteAddr())
	for {
		// TODO: verify that timeout is actually working.
		conn.SetDeadline(time.Now().Add(1e9))
		line, err := reader.ReadBytes('\n')
		if err != nil {
			break
		}
		log.Debug("Read from Diamond collector: ", string(line))
		d.incoming <- line
	}
	log.Info("Diamond collector connection closed: ", conn.RemoteAddr())
}
Example #30
0
// https://github.com/docker/docker/blob/18c7c67308bd4a24a41028e63c2603bb74eac85e/pkg/proxy/tcp_proxy.go#L34
func (p *proxy) proxyLoop(client, backend *net.TCPConn) {
	if err := client.SetKeepAlive(true); err != nil {
		p.log(fmt.Sprintf("failed to enable keepalive for client %s: %s", client.RemoteAddr(), err))
	}

	if err := backend.SetKeepAlive(true); err != nil {
		p.log(fmt.Sprintf("failed to enable keepalive for backend %s: %s", backend.RemoteAddr(), err))
	}

	event := make(chan struct{})
	var broker = func(to, from *net.TCPConn, c prometheus.Counter) {
		for {
			n, err := io.CopyN(to, from, copySize)
			c.Add(float64(n))
			if err != nil {
				// If the socket we are writing to is shutdown with
				// SHUT_WR, forward it to the other end of the pipe:
				if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE {
					_ = from.CloseWrite()
				}

				break
			}
		}

		_ = to.CloseRead()
		event <- struct{}{}
	}

	backendAddr := backend.RemoteAddr().String()
	labels := prometheus.Labels{"app": p.app, "upstream": backendAddr}

	go broker(client, backend, bytesSent.With(labels))
	go broker(backend, client, bytesReceived.With(labels))

	for i := 0; i < 2; i++ {
		<-event
	}

	_ = client.Close()
	_ = backend.Close()

	p.log(fmt.Sprintf("closed connection from %s to %s", client.RemoteAddr(), backend.RemoteAddr()))
}