// Re-exec this image without dropping the listener passed to this function. func Relaunch(l *net.TCPListener) error { f, err := l.File() if nil != err { return err } argv0, err := exec.LookPath(os.Args[0]) if nil != err { return err } wd, err := os.Getwd() if nil != err { return err } err = os.Setenv("GOAGAIN_FD", fmt.Sprint(f.Fd())) if nil != err { return err } err = os.Setenv("GOAGAIN_PPID", strconv.Itoa(syscall.Getpid())) if nil != err { return err } p, err := os.StartProcess(argv0, os.Args, &os.ProcAttr{ Dir: wd, Env: os.Environ(), Files: []*os.File{os.Stdin, os.Stdout, os.Stderr, f}, Sys: &syscall.SysProcAttr{}, }) if nil != err { return err } log.Printf("spawned child %d\n", p.Pid) return nil }
func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { logger.Infof("listening on %q", lis.Addr()) srv := &Server{ state: s, statePool: state.NewStatePool(s), addr: lis.Addr().(*net.TCPAddr), // cannot fail tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminApiFactories: map[int]adminApiFactory{ 0: newAdminApiV0, 1: newAdminApiV1, 2: newAdminApiV2, }, } srv.authCtxt = newAuthContext(srv) tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) if err != nil { return nil, err } // TODO(rog) check that *srvRoot is a valid type for using // as an RPC server. tlsConfig := tls.Config{ Certificates: []tls.Certificate{tlsCert}, } changeCertListener := newChangeCertListener(lis, cfg.CertChanged, tlsConfig) go srv.run(changeCertListener) return srv, nil }
// serveTCP starts a TCP listener for the server. // Each request is handled in a separate goroutine. func (srv *Server) serveTCP(l *net.TCPListener) error { defer l.Close() if srv.NotifyStartedFunc != nil { srv.NotifyStartedFunc() } handler := srv.Handler if handler == nil { handler = DefaultServeMux } rtimeout := srv.getReadTimeout() // deadline is not used here for { rw, e := l.AcceptTCP() if e != nil { continue } m, e := srv.readTCP(rw, rtimeout) select { case <-srv.stopTCP: return nil default: } if e != nil { continue } srv.wgTCP.Add(1) go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) } panic("dns: not reached") }
func (this *Server) Start(listener *net.TCPListener) { log.Printf("Start listen on %v", listener.Addr()) this.waitGroup.Add(1) defer func() { listener.Close() this.waitGroup.Done() }() for { select { case <-this.exitCh: log.Printf("Stop listen on %v", listener.Addr()) return default: } listener.SetDeadline(time.Now().Add(this.acceptTimeout)) conn, err := listener.AcceptTCP() if err != nil { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { // log.Printf("Accept timeout: %v", opErr) continue } log.Printf("Accept error: %v", err) continue } log.Printf("Accept: %v", conn.RemoteAddr()) go this.handleClientConn(conn) } }
func Server(listen *net.TCPListener) { for { conn, err := listen.AcceptTCP() if err != nil { log.Printf("接受客户端连接异常:%v", err) continue } log.Printf("客户端来自:%s", conn.RemoteAddr().String()) defer conn.Close() go func() { data := make([]byte, 2) fmt.Printf("data len:%v\n", len(data)) for { // i,err := conn.Read(data) i, err := io.ReadFull(conn, data) fmt.Printf("data len:%v\n", len(data)) log.Printf("客户端%s.发来数据:%s", conn.RemoteAddr().String(), string(data[0:i])) if err != nil { log.Printf("读取客户端数据错误:%v", err) break } conn.Write([]byte{'f', 'i', 'n', 's', 'h'}) } }() } }
func (server *GoTelnet) acceptConnection(listener *net.TCPListener) { conn, _ := listener.Accept() server.chanForNextAccept <- true if server.preConnection != nil { (*server.preConnection).Close() } server.preConnection = &conn conn.Write([]byte("welcome to telnet debug server")) defer conn.Close() for { buf := make([]byte, 1024) readlen, ok := conn.Read(buf) if ok != nil { fmt.Fprintf(os.Stderr, "close connection when reading from socket: %s\n", ok.String()) return } if readlen == 0 { fmt.Printf("Connection closed by remote host\n") return } if buf[0] == 13 { conn.Write([]byte(server.Promote + ">")) } if string(buf[0:3]) == "bye" { conn.Write(buf) break } server.commandHandler.Handle(string(buf)) } }
/** * Listens for new public tcp connections from the internet. */ func (t *Tunnel) listenTcp(listener *net.TCPListener) { for { defer func() { if r := recover(); r != nil { log.Warn("listenTcp failed with error %v", r) } }() // accept public connections tcpConn, err := listener.AcceptTCP() if err != nil { // not an error, we're shutting down this tunnel if atomic.LoadInt32(&t.closing) == 1 { return } t.Error("Failed to accept new TCP connection: %v", err) continue } conn := conn.Wrap(tcpConn, "pub") conn.AddLogPrefix(t.Id()) conn.Info("New connection from %v", conn.RemoteAddr()) go t.HandlePublicConnection(conn) } }
// serve accepts connections from the given TCP listener and dispatches each // connection to the RPC server. Connections are only accepted from localhost // and the seesaw node that we are configured to peer with. func (s *syncServer) serve(l *net.TCPListener) error { defer l.Close() s.server = rpc.NewServer() s.server.Register(&SeesawSync{s}) for { c, err := l.AcceptTCP() if err != nil { if ne, ok := err.(net.Error); ok && ne.Temporary() { time.Sleep(100 * time.Millisecond) continue } return err } raddr := c.RemoteAddr().String() host, _, err := net.SplitHostPort(raddr) if err != nil { log.Errorf("Failed to parse remote address %q: %v", raddr, err) c.Close() continue } rip := net.ParseIP(host) if rip == nil || (!rip.IsLoopback() && !rip.Equal(s.engine.config.Peer.IPv4Addr) && !rip.Equal(s.engine.config.Peer.IPv6Addr)) { log.Warningf("Rejecting connection from non-peer (%s)...", rip) c.Close() continue } log.Infof("Sync connection established from %s", rip) go s.server.ServeConn(c) } }
func startLocalProxyServer(proxy ProxyConfig) (*net.TCPListener, error) { tcpaddr, err := net.ResolveTCPAddr("tcp", proxy.Local) if nil != err { log.Fatalf("[ERROR]Local server address:%s error:%v", proxy.Local, err) return nil, err } var lp *net.TCPListener lp, err = net.ListenTCP("tcp", tcpaddr) if nil != err { log.Fatalf("Can NOT listen on address:%s", proxy.Local) return nil, err } log.Printf("Listen on address %s", proxy.Local) go func() { for proxyServerRunning { conn, err := lp.AcceptTCP() if nil != err { continue } go serveProxyConn(conn, proxy) } lp.Close() }() return lp, nil }
func (ctx *bootContext) startServer() { defer func() { sigChan <- Bye }() var ( conn *net.TCPConn ln *net.TCPListener err error ) server := NewServer(ctx.cman) addr := ctx.cman.ListenAddr(SR_SERVER) ln, err = net.ListenTCP("tcp", addr) fatalError(err) defer ln.Close() ctx.register(server, ln) log.Infoln(versionString()) log.Infoln("Server is listening on", addr) for { conn, err = ln.AcceptTCP() if err == nil { go server.TunnelServe(conn) } else { SafeClose(conn) } } }
// FreePorts returns maximum n tcp ports available to use. func FreePorts(n int, matchFunc func(int) bool) (ports []int, err error) { if n > 50000 { return nil, fmt.Errorf("too many ports requested (%d)", n) } for len(ports) < n { var addr *net.TCPAddr addr, err = net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { break } var l *net.TCPListener l, err = net.ListenTCP("tcp", addr) if err != nil { break } port := l.Addr().(*net.TCPAddr).Port l.Close() if matchFunc == nil { ports = append(ports, port) continue } if matchFunc(port) { ports = append(ports, port) } } return }
func (ctx *bootContext) startClient() { defer func() { sigChan <- Bye }() var ( conn *net.TCPConn ln *net.TCPListener err error ) client := NewClient(ctx.cman) addr := ctx.cman.ListenAddr(SR_CLIENT) ln, err = net.ListenTCP("tcp", addr) fatalError(err) defer ln.Close() ctx.register(client, ln) log.Infoln(versionString()) log.Infoln("Proxy(SOCKS5/HTTP) is listening on", addr) // connect to server go client.StartTun(true) for { conn, err = ln.AcceptTCP() if err == nil { go client.ClientServe(conn) } else { SafeClose(conn) } } }
// Accepts TCP connections on listener and sends them on the channel tcp_connections. func acceptConnections(listener *net.TCPListener, tcp_connections chan<- *net.TCPConn) { for { message := true for { // if we've reached the maximum number of connections, wait if atomic.AddInt32(&ActiveConnections, 1) <= config.MaxConnections { break } atomic.AddInt32(&ActiveConnections, -1) if message { util.Log(0, "WARNING! Maximum number of %v active connections reached => Throttling", config.MaxConnections) message = false } time.Sleep(100 * time.Millisecond) } tcpConn, err := listener.AcceptTCP() if err != nil { if Shutdown { return } util.Log(0, "ERROR! AcceptTCP: %v", err) } else { tcp_connections <- tcpConn } } }
func Server(listen *net.TCPListener) { for { conn, err := listen.AcceptTCP() if err != nil { fmt.Println("接受客户端连接异常:", err.Error()) continue } fmt.Println("客户端连接来自:", conn.RemoteAddr().String()) defer conn.Close() go func() { data := make([]byte, 128) for { i, err := conn.Read(data) fmt.Println("客户端", conn.RemoteAddr().String(), "发来数据:", string(data[0:i])) if err != nil { fmt.Println("读取客户端数据错误:", err.Error()) break } sms := make([]byte, 128) fmt.Print("请输入要发送的消息:") fmt.Scan(&sms) conn.Write(sms) } }() } }
func fakeReadingAMI(t *testing.T, l *net.TCPListener, expect [][]byte) { lconn, err := l.Accept() if err != nil { t.Fail() t.Log("error accepting", err) return } err = lconn.SetDeadline(time.Now().Add(5 * time.Second)) //TODO _, err = lconn.Write([]byte("Asterisk Call Manager/1.3\r\n")) if err != nil { t.Fail() t.Log("error writing", err) return } for _, a := range expect { b := make([]byte, 1024) n, err := lconn.Read(b) if err != nil { t.Fail() t.Log("error reading:", err) } b = b[:n] if !bytes.Equal(a, b) { t.Fail() t.Logf("read expected %#v, got %#v", string(a), string(b)) } } }
func Server(listen *net.TCPListener) { for { conn, err := listen.AcceptTCP() if err != nil { fmt.Println("接受客户端连接异常:", err.Error()) continue } fmt.Println("客户端连接来自:", conn.RemoteAddr().String()) defer conn.Close() go func() { data := make([]byte, 128) for { i, err := conn.Read(data) fmt.Println("客户端发来数据:", string(data[0:i])) if err != nil { fmt.Println("读取客户端数据错误:", err.Error()) break } outstr := "finish_hahah" // conn.Write([]byte{'f', 'i', 'n', 'i', 's', 'h'}) conn.Write([]byte(outstr)) } }() } }
func (s *TcpServer) accept(lis *net.TCPListener) { defer lis.Close() defer s.waitGroup.Done() for { select { case <-s.ch: //stop goroutine //log.Println("close tcp listener") return default: //log.Println("close 11111 .....") } var trans Transport = nil conn, err := lis.AcceptTCP() if nil != err { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { //log.Println("Stop accepting connections") continue } log.Println(err) } conn.SetLinger(-1) trans = NewTCPTransport(conn, s.rTimeout, s.wTimeout) s.connections[trans.Id()] = trans if s.transHandler != nil { s.transHandler.OnConnect(trans) } s.waitGroup.Add(1) go s.run(trans) } }
// Accept accepts connections on the listener and serves requests // for each incoming connection. Accept blocks; the caller typically // invokes it in a go statement. func (server *Server) AcceptTCP(lis *net.TCPListener, i int) { var ( conn *net.TCPConn err error ) for { log.Debug("server: accept round: %d", i) if conn, err = lis.AcceptTCP(); err != nil { // if listener close then return log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err) return } if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) return } if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil { log.Error("conn.SetReadBuffer() error(%v)", err) return } if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil { log.Error("conn.SetWriteBuffer() error(%v)", err) return } go server.serveConn(conn, i) if i++; i == maxInt { i = 0 } } }
func New(id circuit.WorkerID, bindAddr string, host string) *Transport { // Bind var l *net.TCPListener if strings.Index(bindAddr, ":") < 0 { bindAddr = bindAddr + ":0" } l_, err := net.Listen("tcp", bindAddr) if err != nil { panic(err) } // Build transport structure l = l_.(*net.TCPListener) t := &Transport{ listener: l, addrtabl: makeAddrTabl(), pipelining: DefaultPipelining, remote: make(map[circuit.WorkerID]*link), ach: make(chan *conn), } // Resolve self address laddr := l.Addr().(*net.TCPAddr) t.self, err = NewAddr(id, os.Getpid(), fmt.Sprintf("%s:%d", host, laddr.Port)) if err != nil { panic(err) } // This LocalAddr might be useless for connect purposes (e.g. 0.0.0.0). Consider self instead. t.bind = t.addrtabl.Normalize(&Addr{ID: id, PID: os.Getpid(), Addr: laddr}) go t.loop() return t }
func accept_clients(listener *net.TCPListener, wg *sync.WaitGroup) { defer wg.Done() for { if server_state == STOPPED { return } listener.SetDeadline(time.Now().Add(time.Duration(time.Second))) conn, err := listener.Accept() if err != nil { netErr, ok := err.(net.Error) if ok && netErr.Timeout() && netErr.Temporary() { continue } else { logger.Printf("accept client error: %v\n", err) server_state = STOPPED return } } client := tsp_task_manager.ClientInfo{new_client_id, &conn} new_client_id++ tsp_task_manager.AddNewClient(client) logger.Println("I'm accept client #", client.ID) go listen_client(client) } }
func tokenFromWeb(g_config *oauth.Config) *oauth.Token { ch := make(chan string) randState := fmt.Sprintf("st%d", time.Now()) var listener *net.TCPListener go serveCallback(listener, randState, ch) defer listener.Close() g_config.RedirectURL = "http://localhost:8080/callback" authUrl := g_config.AuthCodeURL(randState) go openUrl(authUrl) log.Printf("Authorize this app at: %s", authUrl) code := <-ch log.Printf("Got code: %s", code) t := &oauth.Transport{ Config: g_config, Transport: condDebugTransport(http.DefaultTransport), } _, err := t.Exchange(code) if err != nil { log.Fatalf("Token exchange error: %v", err) } return t.Token }
func accept_workers(listener *net.TCPListener, wg *sync.WaitGroup) { defer wg.Done() for { if server_state == STOPPED { return } listener.SetDeadline(time.Now().Add(time.Duration(time.Second))) conn, err := listener.Accept() if err != nil { netErr, ok := err.(net.Error) if ok && netErr.Timeout() && netErr.Temporary() { continue } else { logger.Printf("accept worker error: %v\n", err) server_state = STOPPED return } } worker := &tsp_task_manager.WorkerInfo{new_worker_id, &conn, -1} tsp_task_manager.AddNewWorker(worker) logger.Println("I'm accept worker #", new_worker_id) go listen_worker(worker) new_worker_id++ } }
// If listener is non-nil, then it's used; otherwise listen on TCP using the given port. func (s *Server) Start(port int, listener *net.TCPListener) error { if listener == nil { var err error ip, err := service.GetLocalIp() if err != nil { return err } addr := fmt.Sprintf("%s:%d", ip, port) tcpAddr, err := net.ResolveTCPAddr("tcp4", addr) if err != nil { return err } s.l.Println("Listening for debug TCP clients on", addr) listener, err = net.ListenTCP("tcp", tcpAddr) if err != nil { return err } } go func() { for { c, err := listener.AcceptTCP() if err != nil { continue } c.SetWriteDeadline(time.Now().Add(10 * time.Millisecond)) c.SetKeepAlive(true) c.SetKeepAlivePeriod(tcpKeepAlivePeriod) c.SetNoDelay(true) go s.tcpClientServer(c) } }() return nil }
func (srv *Server) socketListen() error { var la *net.TCPAddr var err error if la, err = net.ResolveTCPAddr("tcp", srv.Addr); err != nil { return err } var l *net.TCPListener if l, err = net.ListenTCP("tcp", la); err != nil { return err } srv.listener = l // setup listener to be non-blocking if we're not on windows. // this is required for hot restart to work. if runtime.GOOS != "windows" { if srv.listenerFile, err = l.File(); err != nil { return err } fd := int(srv.listenerFile.Fd()) if e := setupFDNonblock(fd); e != nil { return e } if srv.sendfile { if e := syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, srv.sockOpt, 1); e != nil { return e } } } return nil }
func main() { var config *yaml.File var listenPortStr string var listenHost string var service string var err error var tcpAddr *net.TCPAddr var listener *net.TCPListener var conn net.Conn config, _ = yaml.ReadFile("conf/khutter.yaml") listenHost, _ = config.Get("network.listen") listenPortStr, _ = config.Get("network.port") service = strings.Join([]string{listenHost, listenPortStr}, ":") tcpAddr, err = net.ResolveTCPAddr("tcp4", service) if err != nil { log.Printf("Could not resolve TCP Address/Port: %s", err.Error()) os.Exit(1) } listener, err = net.ListenTCP("tcp4", tcpAddr) if err != nil { log.Printf("Could not listen on host/port: %s", err.Error()) os.Exit(1) } for { conn, err = listener.AcceptTCP() if err != nil { log.Printf("Could not accept TCP Connection: %s", err.Error()) continue } go handleClient(conn) } }
func Server(listener *net.TCPListener) { for { con, err := listener.AcceptTCP() if err != nil { p("accept error") continue } p("get connection from ", con.RemoteAddr().String()) defer con.Close() go func() { data := make([]byte, 1024) for { i, err := con.Read(data) p("got data from ", con.RemoteAddr().String(), ", values:", string(data[0:i])) if err != nil { p("read data failed") break } ret, err := con.Write(data[0:i]) p("write back~~~~~", ret, err) } }() } }
func (r *SrsServer) Serve() error { // too many open files will thows a panic. addr, err := net.ResolveTCPAddr("tcp", r.addr) if err != nil { glog.Errorf("resolve listen address failed, err=%v", err) return fmt.Errorf("resolve listen address failed, err=%v", err) } var listener *net.TCPListener listener, err = net.ListenTCP("tcp", addr) if err != nil { glog.Errorf("listen failed, err=%v", err) return fmt.Errorf("listen failed, err=%v", err) } defer listener.Close() for { glog.Info("listener ready to accept client") conn, err := listener.AcceptTCP() if err != nil { glog.Errorf("accept client failed, err=%v", err) return fmt.Errorf("accept client failed, err=%v", err) } glog.Info("TCP Connected") go r.serve(conn) } }
func NewTCPSink(laddr string) (*Sink, error) { c := new(Connection) s := new(Sink) s.packets = make(chan *Packet) s.Connection = c localAddr := c.getTCPAddr(laddr) var listener *net.TCPListener if c.err == nil { listener, s.err = net.ListenTCP("tcp", localAddr) } if s.err == nil { go func() { for { conn, err := listener.Accept() if err == nil { go s.receivePackets(conn, s.newCloseChannel()) } else { logger.Warningf("Failed to accept new connection: %v", err) } } }() } return s, s.err }
// Accept accepts connections on the listener and serves requests // for each incoming connection. Accept blocks; the caller typically // invokes it in a go statement. func acceptTCP(server *Server, lis *net.TCPListener) { var ( conn *net.TCPConn err error r int ) for { if conn, err = lis.AcceptTCP(); err != nil { // if listener close then return log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err) return } if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) return } if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil { log.Error("conn.SetReadBuffer() error(%v)", err) return } if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil { log.Error("conn.SetWriteBuffer() error(%v)", err) return } go serveTCP(server, conn, r) if r++; r == maxInt { r = 0 } } }
// listen starts listening for a video connection on a socket for the given // player. This video will be streamed to the partner. func listen(ln *net.TCPListener, p *player, partner *player, pairs *[]playerPair) { var err error // Wait for a TCP connection for { p.Lock() ln.SetDeadline(time.Now().Add(time.Second * 5)) p.conn, err = ln.AcceptTCP() if err == nil { break } p.Unlock() } log.Println("connected to player", p.id) p.conn.SetKeepAlive(true) p.conn.SetKeepAlivePeriod(time.Second / 2) p.Unlock() streamVideo(p, partner) removePlayer(pairs, p) p.Lock() log.Println("lost connection to player", p.id) p.active = false p.Unlock() }