// handler handles a single TCP Connection func (t *TcpListener) handler(conn *net.TCPConn, id string) { // connection cleanup function defer func() { t.wg.Done() conn.Close() log.Printf("Closed TCP Connection from %s", conn.RemoteAddr()) // Add one connection potential back to channel when this one closes t.accept <- true t.forget(id) }() scanner := bufio.NewScanner(conn) for { select { case <-t.done: return default: if !scanner.Scan() { return } buf := scanner.Bytes() select { case t.in <- buf: default: log.Printf(dropwarn, string(buf)) } } } }
func (h *Hub) startLink(l *link, conn *net.TCPConn) { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(time.Second * 60) l.setConn(conn) Info("link(%d) start: %v", l.id, conn.RemoteAddr()) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() for { data, err := l.read() if err != nil { if err != errPeerClosed { h.SendCmd(l.id, LINK_CLOSE_SEND) } break } h.Send(l.id, data) } }() wg.Add(1) go func() { defer wg.Done() err := l._write() if err != errPeerClosed { h.SendCmd(l.id, LINK_CLOSE_RECV) } }() wg.Wait() Info("link(%d) close", l.id) }
func InitClient(conn *net.TCPConn, devid string) *Client { client := &Client{ devId: devid, ctrl: make(chan bool), MsgOut: make(chan *Pack, 100), WaitingChannels: make(map[uint32]chan *Message), NextSeqId: 1, LastAlive: time.Now(), } DevMap.Set(devid, client) go func() { log.Tracef("start send routine for %s", conn.RemoteAddr().String()) for { select { case pack := <-client.MsgOut: seqid := pack.client.NextSeqId pack.msg.Header.Seq = seqid b, _ := pack.msg.Header.Serialize() conn.Write(b) conn.Write(pack.msg.Data) log.Infof("send msg ok, (%s)", string(pack.msg.Data)) pack.client.NextSeqId += 1 // add reply channel if pack.reply != nil { pack.client.WaitingChannels[seqid] = pack.reply } case <-client.ctrl: log.Tracef("leave send routine for %s", conn.RemoteAddr().String()) return } } }() return client }
func handleConnection(clientConn *net.TCPConn) { if clientConn == nil { log.Debugf("handleConnection(): oops, clientConn is nil") return } // test if the underlying fd is nil remoteAddr := clientConn.RemoteAddr() if remoteAddr == nil { log.Debugf("handleConnection(): oops, clientConn.fd is nil!") return } ipv4, port, clientConn, err := getOriginalDst(clientConn) if err != nil { log.Infof("handleConnection(): can not handle this connection, error occurred in getting original destination ip address/port: %+v\n", err) return } // If no upstream proxies were provided on the command line, assume all traffic should be sent directly if gProxyServerSpec == "" { handleDirectConnection(clientConn, ipv4, port) return } // Evaluate for direct connection ip := net.ParseIP(ipv4) if ok, _ := director(&ip); ok { handleDirectConnection(clientConn, ipv4, port) return } handleProxyConnection(clientConn, ipv4, port) }
func (router *Router) acceptTCP(tcpConn *net.TCPConn) { // someone else is dialing us, so our udp sender is the conn // on Port and we wait for them to send us something on UDP to // start. connRemote := NewRemoteConnection(router.Ourself, nil, tcpConn.RemoteAddr().String()) NewLocalConnection(connRemote, UnknownPeerName, tcpConn, nil, router) }
//处理连接请求 func (this *server) handlerConnection(conn *net.TCPConn) { defer logger.CatchException() logger.Infof("New connection coming ... IP=%s ", conn.RemoteAddr()) conn.SetNoDelay(true) //无延迟 conn.SetKeepAlive(true) //保持激活 conn.SetReadBuffer(64 * 1024) //设置读缓冲区大小 conn.SetWriteBuffer(64 * 1024) //设置写缓冲区大小 conn.SetReadDeadline(time.Now().Add(30000000 * time.Second)) //设置读超时 session := network.NewSession(conn) defer session.Close() for { msg, err := session.RecvMSG() if err != nil { logger.Infof("RecvMsgs IP=%s err=%v", conn.RemoteAddr(), err.Error()) return } ret := this.ProcessMessage(session, msg) if ret == false { return } } }
// serve serves a single accepted connection func (p *proxy) serve(client *net.TCPConn) { connected := connectedClients.With(p.labels) connected.Inc() defer func() { connected.Dec() _ = client.Close() }() p.mutex.Lock() upstreams := make([]Upstream, len(p.upstreams)) copy(upstreams, p.upstreams) p.mutex.Unlock() // TODO: proper iterator accounting weights and current # of connections for i := range upstreams { j := rand.Intn(i + 1) upstreams[i], upstreams[j] = upstreams[j], upstreams[i] } for _, upstream := range upstreams { p.log(fmt.Sprintf("connecting from %s to %s", client.RemoteAddr(), upstream)) backend, err := net.Dial("tcp", upstream.Addr()) if err != nil { p.log(fmt.Sprintf("error connecting from %s to %s: %s", client.RemoteAddr(), upstream.Addr(), err)) connectionErrors.With(prometheus.Labels{"app": p.app, "upstream": upstream.Addr()}).Inc() continue } p.proxyLoop(client, backend.(*net.TCPConn)) break } }
// NewConn creates a new connection for the sio. It generates the session id and // prepares the internal structure for usage. func newConn(serv *Server, fd uint32, nc *net.TCPConn) (c *Conn, err error) { host, _, err := net.SplitHostPort(nc.RemoteAddr().String()) if err != nil { serv.Log("mudoo/newConn: GetRemoteAddr:", err) return } c = &Conn{ serv: serv, fd: fd, nc: nc, raddr: host, online: true, lastConnected: time.Now().UnixNano(), wakeupFlusher: make(chan byte), wakeupReader: make(chan byte), numConns: 0, numHeartbeats: 0, decBuf: new(Buffer), } nc.SetReadBuffer(serv.config.ReadBufferSize) nc.SetWriteBuffer(serv.config.WriteBufferSize) go c.keepalive() // go c.flusher() go c.reader() return }
func (cl *SrvClient) Init(conn *net.TCPConn) { cl.Conn = conn cl.Connected = true WriteToLogFile("Server", fmt.Sprintf("Client:%v connected\n", conn.RemoteAddr())) cl.Conn.SetReadDeadline(time.Now().Add(time.Duration(envConfig.Configs.Server.Timeout) * time.Second)) cl.Read() }
func serve(c *net.TCPConn) { ret := c.RemoteAddr().(*net.TCPAddr).IP.String() _, err := c.Write([]byte(ret)) logError(err) err = c.Close() logError(err) }
// handleConn handles a single incoming TCP connection func (m *Memberlist) handleConn(conn *net.TCPConn) { m.logger.Printf("[DEBUG] memberlist: Responding to push/pull sync with: %s", conn.RemoteAddr()) defer conn.Close() metrics.IncrCounter([]string{"memberlist", "tcp", "accept"}, 1) join, remoteNodes, userState, err := m.readRemoteState(conn) if err != nil { m.logger.Printf("[ERR] memberlist: Failed to receive remote state: %s", err) return } if err := m.sendLocalState(conn, join); err != nil { m.logger.Printf("[ERR] memberlist: Failed to push local state: %s", err) } if err := m.verifyProtocol(remoteNodes); err != nil { m.logger.Printf("[ERR] memberlist: Push/pull verification failed: %s", err) return } // Merge the membership state m.mergeState(remoteNodes) // Invoke the delegate for user state if m.config.Delegate != nil { m.config.Delegate.MergeRemoteState(userState, join) } }
func (netservice *NetService) NewTcpTask(conn *net.TCPConn) { fmt.Println(conn.RemoteAddr()) fmt.Println(conn.LocalAddr()) /* go func (conn *net.TCPConn) { readdata := make([]byte,1000) for true { _,err := conn.Read(readdata) if err == nil { fmt.Println("========",readdata) msg := &MsgDefine.BaseMsg{} proto.Unmarshal(readdata[4:],msg) basemsg := msg.String() fmt.Println("basemsg",basemsg) fmt.Printf("%s===\n",basemsg) } else { fmt.Println("close================ error") conn.Close() break } } }(conn) */ }
func TCPReceive(connection *net.TCPConn, channelRecieve <-chan config.NetworkMessage) { for { message, _ := bufio.NewReader(connection).ReadByte(byte('\x00')) recievedMessage := config.NetworkMessage{recievedAddress: connection.RemoteAddr(), data: message, length: len(message)} channelRecieve <- receivedMessage } }
func handleClient(source *net.TCPConn) { atomic.AddInt32(&daemon.status.actives, 1) defer func() { atomic.AddInt32(&daemon.status.actives, -1) daemon.wg.Done() }() // read req err, req := ReadReq(source) if err != nil { source.Close() Error("conn:%v, read req failed:%v", source.RemoteAddr(), err) return } source.SetKeepAlive(true) source.SetKeepAlivePeriod(time.Second * 60) source.SetLinger(-1) // judge: new conn or reuse conn switch req := req.(type) { case *NewConnReq: Info("new conn request:%v", req) onNewConn(source, req) case *ReuseConnReq: Info("reuse conn request:%v", req) onReuseConn(source, req) default: Info("unknown request:%v", req) source.Close() return } }
func onNewConn(source *net.TCPConn, req *NewConnReq) { settings := daemon.settings host := chooseHost(settings.weight, settings.Hosts) if host == nil { source.Close() Error("choose host failed:%v", source.RemoteAddr()) return } dest, err := net.DialTCP("tcp", nil, host.addr) if err != nil { source.Close() Error("connect to %s failed: %s", host.addr, err.Error()) return } dest.SetKeepAlive(true) dest.SetKeepAlivePeriod(time.Second * 60) dest.SetLinger(-1) id := <-daemon.nextidCh link := NewStableLink(id, source, dest, req.key) daemon.eventCh <- link link.Run() daemon.eventCh <- link link.Wait() }
func readloop(conn *net.TCPConn, clist *[]ChanPair, controlc chan chan string) { output := make(chan string, 2048) input := make(chan string, 2048) controlc <- output controlc <- input address := conn.RemoteAddr() player := parsing.NewPlayer(address.String()) for { b := make([]byte, 4096) n, err := conn.Read(b[:]) data := b[:n] if err != nil { fmt.Println(err) } select { case str := <-input: conn.Write([]uint8(str)) default: } if len(string(data)) == 0 { fmt.Println("PARTING:", address.String()) conn.Close() return } conn.Write(parsing.Parse(player, string(data), output)) } }
func (s *Service) handleConnection(conn *net.TCPConn) { defer conn.Close() defer s.wg.Done() controller := controller.NewSession(conn, s.repo) defer controller.FinishSession() for { select { case <-s.ch: log.Println("disconnecting", conn.RemoteAddr()) return default: } err := controller.Dispatch() if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } if err != nil { if err.Error() != "EOF" { log.Println(conn.RemoteAddr(), err) } return } } }
func (t *Server) TunnelServe(conn *net.TCPConn) { fconn := NewConnWithHash(conn) defer func() { fconn.FreeHash() ex.CatchException(recover()) }() nego := &d5SNegotiation{Server: t} session, err := nego.negotiate(fconn) if session != nil { // unique fconn.identifier = fmt.Sprintf("%s@%s", session.uid, fconn.RemoteAddr()) } if err != nil { if err == DATATUN_SESSION { // dataTunnel go session.DataTunServe(fconn.Conn, nego.tokenBuf) } else { log.Warningln("Close abnormal connection from", conn.RemoteAddr(), err) SafeClose(conn) if session != nil { t.sessionMgr.clearTokens(session) } } } else if session != nil { // signalTunnel atomic.AddInt32(&t.stCnt, 1) log.Infof("Client(%s)-ST is established\n", fconn.identifier) var st = NewSignalTunnel(session.tun, 0) session.svr = t session.sigTun = st go st.start(session.eventHandler) } }
func (t *Listener) accept(c *net.TCPConn) { g := newGobConn(c) /* XXX: Maybe this handshake should be in auto, where the other side of it is dmsg_, err := g.Read() if err != nil { g.Close() return } dmsg, ok := dmsg_.(*autoDialMsg) if !ok { g.Close() return } if err := g.Write(&autoAcceptMsg{}); err != nil { g.Close() return } */ addr := x.Addr(c.RemoteAddr().String()) t.olk.Lock() defer t.olk.Unlock() l := t.open[dmsg.ID] if l == nil { l = newAcceptLink(addr, dmsg.ID, g, listenerBroker{t}) t.open[dmsg.ID] = l } else { l.AcceptRedial(g) } }
// handleConn handles a single incoming TCP connection func (m *Memberlist) handleConn(conn *net.TCPConn) { m.logger.Printf("[INFO] Responding to push/pull sync with: %s", conn.RemoteAddr()) defer conn.Close() remoteNodes, userState, err := m.readRemoteState(conn) if err != nil { m.logger.Printf("[ERR] Failed to receive remote state: %s", err) return } if err := m.sendLocalState(conn); err != nil { m.logger.Printf("[ERR] Failed to push local state: %s", err) } if err := m.verifyProtocol(remoteNodes); err != nil { m.logger.Printf("[ERR] Push/pull verification failed: %s", err) return } // Merge the membership state m.mergeState(remoteNodes) // Invoke the delegate for user state if m.config.Delegate != nil { m.config.Delegate.MergeRemoteState(userState) } }
// Read TCP socket and return complete ByteMessage to caller's channel func ReadTCPChannel(conn *net.TCPConn, delimiter []byte, fromSocket chan ByteMessage) error { var message []byte buffer := make([]byte, SocketBuffer) for { if n, err := conn.Read(buffer); err != nil || n == 0 { if n == 0 && err == nil { err = errors.New("No bytes") } Logger.Println("Closing read:", conn.RemoteAddr(), err) conn.Close() return err } else { message = append(message, buffer[0:n]...) m := bytes.Split(message, delimiter) for i, entry := range m { if i < len(m)-1 { fromSocket <- ByteMessage{Msg: entry, RemoteAddr: conn.RemoteAddr()} } else { // overflow message = entry } } } } return nil }
func ReadTCPChannel(conn *net.TCPConn, delimiter string, fromSocket chan Message) error { var message string buffer := make([]byte, socketBuffer) for { if n, err := conn.Read(buffer); err != nil || n == 0 { if n == 0 && err == nil { err = errors.New("No bytes") } myLog.Printf("Closing:%v %v\n", conn.RemoteAddr(), err) conn.Close() return err } else { message += string(buffer[0:n]) m := strings.Split(message, delimiter) for i, entry := range m { if i < len(m)-1 { fromSocket <- Message{Msg: entry, RemoteAddr: conn.RemoteAddr()} } else { // overflow message = entry } } } } return nil }
func readRemoteLoop(client, remote *net.TCPConn, stopChan chan<- bool) { defer func() { stopChan <- true }() addr := client.RemoteAddr() for { var buf [4096]byte nr, err := remote.Read(buf[:]) if err != nil && err != os.EOF { log.Printf("%v: Failed to read from the remote: %v", addr, err) return } start := 0 for start < nr { nw, err := client.Write(buf[start:nr]) if err != nil && err != os.EOF { log.Printf("%v: Failed to write to the client: %v", addr, err) return } start += nw } } }
func serve(con *net.TCPConn) { defer con.Close() if *verbose { fmt.Fprintf(os.Stdout, "serving %s\n", con.RemoteAddr().String()) } for { line, err := readUntilCrLf(con) if err != nil { // TODO : pass error message con.Write([]byte("\"internal error\"\r\n")) continue } tokens := strings.Split(string(line), " ", -1) command := tokens[0] if command == "quit" { writeJson(con, "bye.") break } f, ok := commands[command] if ok { f(con, tokens[1:]) } else { writeJson(con, fmt.Sprintf("unknown command '%s'", command)) } } }
func newRCConn(c *net.TCPConn, iface *Interface) (*RCConn, error) { // Leave enough room in the completion queue for any operation, // including inline sends, to return an error. CQ overruns // sometimes cause internal errors in the HCA, which can make the // kernel very unhappy. qp, err := iface.NewQueuePair(10) if err != nil { return nil, err } if err := c.SetDeadline(ioDeadline()); err != nil { checkClose(qp) return nil, err } destLid, destQpn, destPsn, err := writeReadQPParams(c, iface.Lid(), qp.Qpn(), qp.Psn()) if err != nil { checkClose(qp) return nil, err } messages, meta := CreateBuffers() if err := qp.Setup(destLid, destQpn, destPsn, messages); err != nil { checkClose(qp) return nil, err } laddr, raddr := c.LocalAddr(), c.RemoteAddr() rcc := &RCConn{iface, laddr, raddr, qp, math.MaxInt64, true, messages, meta, false} return rcc, nil }
func ConnectedHandler(server *sev.Nexus, conn *net.TCPConn) { ipStr := conn.RemoteAddr().String() str := "A new connection :" + ipStr chatMsg := &protocol.ChatMsg{ChatContext: &str} byt, _ := proto.Marshal(chatMsg) broBack(server, byt, int32(protocol.MessageType_MSG_TYPE_CHAT_MESSAGE_RES)) }
func send(tcpconn *net.TCPConn, rsp *Response) (err error) { Len := uint32(PkgLenSize) + uint32(len(rsp.Head)) + uint32(len(rsp.Body)) Hlen := uint16(Uint16Size) + uint16(len(rsp.Head)) data := make([]byte, 0, int(Len)) // len:0, cap:Len; TODO(zog): cache buf := bytes.NewBuffer(data) // TODO(zog): 复用 binary.Write(buf, binary.BigEndian, Len) binary.Write(buf, binary.BigEndian, Hlen) buf.Write(rsp.Head) buf.Write(rsp.Body) if debug { glog.Infof("sent bytes to %s, len: %d", tcpconn.RemoteAddr().String(), len(buf.Bytes())) glog.Flush() } tcpconn.SetDeadline(time.Now().Add(100 * time.Millisecond)) if _, err = tcpconn.Write(buf.Bytes()); err != nil { return err } if debug { glog.Infof("sent data(len:%d): %v", buf.Len(), buf.Bytes()) glog.Flush() } return nil }
func receiveTcpConn(conn *net.TCPConn, rc TcpReceiveCaller) { for { buf := bufio.NewReader(conn) line, err := buf.ReadBytes('\n') if err != nil { // remove client addr := conn.RemoteAddr().String() if v, ok := clients[addr]; ok { uid := v.UserId delete(clients, addr) addr2 := users[uid] if strings.Index(addr2, "$") == -1 { delete(users, uid) } else { users[uid] = strings.Replace(strings.Replace(addr2, addr, "", 1), "$$", "$", -1) } } printf(true, "[ CLIENT][ DISCONN] - IP : %s disconnect!active clients : %d", conn.RemoteAddr().String(), len(clients)) break } if d, err := rc(conn, line[:len(line)-1]); err != nil { // remove '\n' conn.Write([]byte("error$" + err.Error())) } else if d != nil { conn.Write(d) } conn.Write([]byte("\n")) conn.SetReadDeadline(time.Now().Add(ReadDeadLine)) // discount after 5m } }
// 分配连接处理 func (self *AgentSvr) handleConnection(conn *net.TCPConn) { defer conn.Close() defer self.wg.Done() defer func() { if err := recover(); err != nil { Error("handle agent connection:%v failed:%v", conn.RemoteAddr(), err) } }() Info("new agent connection:%v", conn.RemoteAddr()) for { var sz uint32 err := binary.Read(conn, binary.BigEndian, &sz) if err != nil { Error("read conn failed:%v, err:%v", conn.RemoteAddr(), err) break } buf := make([]byte, sz) _, err = io.ReadFull(conn, buf) if err != nil { Error("read conn failed:%v, err:%v", conn.RemoteAddr(), err) break } var req Request if err = json.Unmarshal(buf, &req); err != nil { Error("parse request failed:%v, err:%v", conn.RemoteAddr(), err) } go self.dispatchRequst(conn, &req) } }
// handleConn handles a single incoming TCP connection func (m *Memberlist) handleConn(conn *net.TCPConn) { m.logger.Printf("[DEBUG] memberlist: TCP connection from: %s", conn.RemoteAddr()) defer conn.Close() metrics.IncrCounter([]string{"memberlist", "tcp", "accept"}, 1) msgType, bufConn, dec, err := m.readTCP(conn) if err != nil { m.logger.Printf("[ERR] memberlist: failed to receive: %s", err) return } if msgType == userMsg { if err := m.readUserMsg(bufConn, dec); err != nil { m.logger.Printf("[ERR] memberlist: Failed to receive user message: %s", err) } } else if msgType == pushPullMsg { join, remoteNodes, userState, err := m.readRemoteState(bufConn, dec) if err != nil { m.logger.Printf("[ERR] memberlist: Failed to read remote state: %s", err) return } if err := m.sendLocalState(conn, join); err != nil { m.logger.Printf("[ERR] memberlist: Failed to push local state: %s", err) return } if err := m.mergeRemoteState(join, remoteNodes, userState); err != nil { return } } else { m.logger.Printf("[ERR] memberlist: Received invalid msgType (%d)", msgType) } }