func fakeReadingAMI(t *testing.T, l *net.TCPListener, expect [][]byte) { lconn, err := l.Accept() if err != nil { t.Fail() t.Log("error accepting", err) return } err = lconn.SetDeadline(time.Now().Add(5 * time.Second)) //TODO _, err = lconn.Write([]byte("Asterisk Call Manager/1.3\r\n")) if err != nil { t.Fail() t.Log("error writing", err) return } for _, a := range expect { b := make([]byte, 1024) n, err := lconn.Read(b) if err != nil { t.Fail() t.Log("error reading:", err) } b = b[:n] if !bytes.Equal(a, b) { t.Fail() t.Logf("read expected %#v, got %#v", string(a), string(b)) } } }
func accept_clients(listener *net.TCPListener, wg *sync.WaitGroup) { defer wg.Done() for { if server_state == STOPPED { return } listener.SetDeadline(time.Now().Add(time.Duration(time.Second))) conn, err := listener.Accept() if err != nil { netErr, ok := err.(net.Error) if ok && netErr.Timeout() && netErr.Temporary() { continue } else { logger.Printf("accept client error: %v\n", err) server_state = STOPPED return } } client := tsp_task_manager.ClientInfo{new_client_id, &conn} new_client_id++ tsp_task_manager.AddNewClient(client) logger.Println("I'm accept client #", client.ID) go listen_client(client) } }
func NewTCPSink(laddr string) (*Sink, error) { c := new(Connection) s := new(Sink) s.packets = make(chan *Packet) s.Connection = c localAddr := c.getTCPAddr(laddr) var listener *net.TCPListener if c.err == nil { listener, s.err = net.ListenTCP("tcp", localAddr) } if s.err == nil { go func() { for { conn, err := listener.Accept() if err == nil { go s.receivePackets(conn, s.newCloseChannel()) } else { logger.Warningf("Failed to accept new connection: %v", err) } } }() } return s, s.err }
func accept_workers(listener *net.TCPListener, wg *sync.WaitGroup) { defer wg.Done() for { if server_state == STOPPED { return } listener.SetDeadline(time.Now().Add(time.Duration(time.Second))) conn, err := listener.Accept() if err != nil { netErr, ok := err.(net.Error) if ok && netErr.Timeout() && netErr.Temporary() { continue } else { logger.Printf("accept worker error: %v\n", err) server_state = STOPPED return } } worker := &tsp_task_manager.WorkerInfo{new_worker_id, &conn, -1} tsp_task_manager.AddNewWorker(worker) logger.Println("I'm accept worker #", new_worker_id) go listen_worker(worker) new_worker_id++ } }
func (server *GoTelnet) acceptConnection(listener *net.TCPListener) { conn, _ := listener.Accept() server.chanForNextAccept <- true if server.preConnection != nil { (*server.preConnection).Close() } server.preConnection = &conn conn.Write([]byte("welcome to telnet debug server")) defer conn.Close() for { buf := make([]byte, 1024) readlen, ok := conn.Read(buf) if ok != nil { fmt.Fprintf(os.Stderr, "close connection when reading from socket: %s\n", ok.String()) return } if readlen == 0 { fmt.Printf("Connection closed by remote host\n") return } if buf[0] == 13 { conn.Write([]byte(server.Promote + ">")) } if string(buf[0:3]) == "bye" { conn.Write(buf) break } server.commandHandler.Handle(string(buf)) } }
func connListener(conn chan net.Conn, listener *net.TCPListener) { for { c, err := listener.Accept() if err != nil { panic("Accept: ", err.String()) } conn <- c } }
func echoListen(listener *net.TCPListener) { for { c, err := listener.Accept() if err == nil { fmt.Println("accepeted") go echo(c) } } }
func pingListen(listener *net.TCPListener, peer *peer.Peer) { for { c, err := listener.Accept() if err == nil { fmt.Println("accepted") go pong(c, peer) } } }
func (server *Server) acceptConnections(listener *net.TCPListener) { for { conn, err := listener.Accept() if err != nil { fmt.Printf("Failed accept TCP connection: %v", err) continue } go server.handleConnection(conn) } }
func connectionListener(listener *net.TCPListener) { for { conn, err := listener.Accept() if err != nil { core.Log(core.LogInfo, "Closed network connection") done <- true return } go handleConnection(conn) } }
func (svr *Server) handler(listener *net.TCPListener) { defer listener.Close() for { conn, err := listener.Accept() if err != nil { time.Sleep(1 * time.Second) svr.StartServer() return } go svr.handleClient(conn) } }
func (tcp *Tcp) serve(listeningPoint *net.TCPListener) { log.Info("Begin serving TCP on address " + listeningPoint.Addr().String()) for { baseConn, err := listeningPoint.Accept() if err != nil { log.Severe("Failed to accept TCP conn on address " + listeningPoint.Addr().String() + "; " + err.Error()) continue } conn := NewConn(baseConn, tcp.output) log.Debug("Accepted new TCP conn %p from %s on address %s", &conn, conn.baseConn.RemoteAddr(), conn.baseConn.LocalAddr()) tcp.connTable.Notify(baseConn.RemoteAddr().String(), conn) } }
func (srv *Server) serve(ln *net.TCPListener) error { defer ln.Close() for { if cn, err := ln.Accept(); err != nil { if ne, ok := err.(net.Error); ok && ne.Temporary() { continue } else { return err } } else { conn := newConn(srv, cn) srv.ConnPool.Add(conn) go conn.serve() } } }
func (tc *Consumer) listen(listener *net.TCPListener) { for { if log.V(2) { log.Info("Accepting client connection") } connection, err := listener.Accept() if err != nil { log.Warning("%s", err) return } if log.V(2) { log.Info("Accepted client connection") } go tc.handleConnection(connection) } }
func (srv *Server) Serve(listener *net.TCPListener) error { defer listener.Close() for { conn, err := listener.Accept() fmt.Println("accept :", conn.RemoteAddr()) if err != nil { return err } serveConn, err := srv.newConn(conn) if err != nil { continue } go serveConn.ServeConn() } panic("not reached") }
func handleCmd(ch chan string, l *net.TCPListener, reply chan string) { for { file := <-ch log.Printf("handCmd %s\n", file) reply <- "file ok" dc, err := l.Accept() if err != nil { log.Fatalf("net accept error: %s\n", err) } log.Printf("get data connection\n") go handleFile(file, dc, reply) } }
// Accepts inbound connections till the service is terminated. For each one it // starts a new handler and hands the socket over. func (r *Relay) acceptor(listener *net.TCPListener) { // Accept connections until termination request var errc chan error for errc == nil { select { case errc = <-r.quit: break case client := <-r.done: // A client terminated, remove from active list delete(r.clients, client) if err := client.report(); err != nil { log.Printf("relay: closing client error: %v.", err) } default: // Accept an incoming connection but without blocking for too long listener.SetDeadline(time.Now().Add(acceptPollRate)) if sock, err := listener.Accept(); err == nil { if rel, err := r.acceptRelay(sock); err != nil { log.Printf("relay: accept failed: %v.", err) } else { r.clients[rel] = struct{}{} } } else if !err.(net.Error).Timeout() { log.Printf("relay: accept failed: %v, terminating.", err) } } } // In case of failure, wait for termination request if errc == nil { errc = <-r.quit } // Forcefully close all active client connections for rel, _ := range r.clients { rel.drop() <-r.done } for rel, _ := range r.clients { rel.report() } // Clean up and report errc <- listener.Close() }
func (s *Server) goAcceptConnection(listener *net.TCPListener) { s.wait.Add(1) go func(listener *net.TCPListener) { loop: for { select { case <-s.doneTcp: break loop default: } connection, err := listener.Accept() if err != nil { continue } s.goScanConnection(connection) } s.wait.Done() }(listener) }
func (r *ZookoServer) serverLoop(listener *net.TCPListener) { connections := make(chan net.Conn) // Loop forever, waiting for connections go func() { for { // Accept a Connection conn, err := listener.Accept() if err != nil { fmt.Println("Error getting Accept", err) continue } connections <- conn } }() for { select { case conn := <-connections: go r.handleClient(conn) } } }
// Creates and runs a new mock instance // The path is the path to the mock jar. // nodes is the total number of cluster nodes (and thus the number of mock threads) // replicas is the number of replica nodes (subset of the number of nodes) for each couchbase bucket. // vbuckets is the number of vbuckets to use for each couchbase bucket // specs should be a list of specifications of buckets to use.. func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) { var lsn *net.TCPListener = nil chAccept := make(chan bool) m = &Mock{} defer func() { close(chAccept) if lsn != nil { lsn.Close() } exc := recover() if exc == nil { // No errors, everything is OK return } // Close mock on error, destroying resources m.Close() if mExc, ok := exc.(mockError); !ok { panic(mExc) } else { m = nil err = mExc } }() if lsn, err = net.ListenTCP("tcp", &net.TCPAddr{Port: 0}); err != nil { throwMockError("Couldn't set up listening socket", err) } _, ctlPort, _ := net.SplitHostPort(lsn.Addr().String()) log.Printf("Listening for control connection at %s\n", ctlPort) go func() { defer func() { chAccept <- false }() if m.conn, err = lsn.Accept(); err != nil { throwMockError("Couldn't accept incoming control connection from mock", err) return } }() if len(specs) == 0 { specs = []BucketSpec{BucketSpec{Name: "default", Type: BCouchbase}} } options := []string{ "-jar", path, "--harakiri-monitor", "localhost:" + ctlPort, "--port", "0", "--replicas", strconv.Itoa(int(replicas)), "--vbuckets", strconv.Itoa(int(vbuckets)), "--nodes", strconv.Itoa(int(nodes)), "--buckets", m.buildSpecStrings(specs), } log.Printf("Invoking java %s", strings.Join(options, " ")) m.cmd = exec.Command("java", options...) m.cmd.Stdout = os.Stdout m.cmd.Stderr = os.Stderr if err = m.cmd.Start(); err != nil { m.cmd = nil throwMockError("Couldn't start command", err) } select { case <-chAccept: break case <-time.After(mockInitTimeout): throwMockError("Timed out waiting for initalization", errors.New("timeout")) } m.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn)) // Read the port buffer, which is delimited by a NUL byte if portBytes, err := m.rw.ReadBytes(0); err != nil { throwMockError("Couldn't get port information", err) } else { portBytes = portBytes[:len(portBytes)-1] if entryPort, err := strconv.Atoi(string(portBytes)); err != nil { throwMockError("Incorrectly formatted port from mock", err) } else { m.EntryPort = uint16(entryPort) } } log.Printf("Mock HTTP port at %d\n", m.EntryPort) return }
// Start creates the accept routine and begins to accept connections. func (t *TCP) Start(context interface{}) error { log.Dev(context, "Start", "IPAddress[ %s ]", join(t.ipAddress, t.port)) t.listenerMu.Lock() { // If the listener has been started already, return an error. if t.listener != nil { err := errors.New("This TCP has already been started") log.Error(context, "Start", err, "Completed") t.listenerMu.Unlock() return err } } t.listenerMu.Unlock() t.wg.Add(1) // We need to wait for the goroutine to initialize itself. var waitStart sync.WaitGroup waitStart.Add(1) // Start the connection accept routine. go func() { var listener *net.TCPListener for { t.listenerMu.Lock() { // Start a listener for the specified addr and port is one // does not exist. if t.listener == nil { var err error listener, err = net.ListenTCP(t.NetType, t.tcpAddr) if err != nil { log.Error(context, "Start", err, "Completed") panic(err) } t.listener = listener waitStart.Done() log.Dev(context, "accept-routine", "Waiting For Connections : IPAddress[ %s ]", join(t.ipAddress, t.port)) } } t.listenerMu.Unlock() // Listen for new connections. conn, err := listener.Accept() if err != nil { shutdown := atomic.LoadInt32(&t.shuttingDown) if shutdown == 0 { log.Error(context, "accept-routine", err, "Completed") } else { t.listenerMu.Lock() { t.listener = nil } t.listenerMu.Unlock() break } // temporary is declared to test for the existance of // the method coming from the net package. type temporary interface { Temporary() bool } if e, ok := err.(temporary); ok && !e.Temporary() { t.listenerMu.Lock() { t.listener.Close() t.listener = nil } t.listenerMu.Unlock() // Don't want to add a flag. So setting this back to // 1 so when the listener is re-established, the call // to Done does not fail. waitStart.Add(1) } continue } // Check if we are being asked to drop all new connections. if drop := atomic.LoadInt32(&t.dropConns); drop == 1 { log.Dev(context, "accept-routine", "*******> DROPPING CONNECTION") conn.Close() continue } // Check if rate limit is enabled. if t.RateLimit != nil { now := time.Now() // We will only accept 1 connection per duration. Anything // connection above that must be dropped. if t.lastAcceptedConnection.Add(t.RateLimit()).After(now) { log.Dev(context, "accept-routine", "*******> DROPPING CONNECTION Local[ %v ] Remote[ %v ] DUE TO RATE LIMIT %v", conn.LocalAddr(), conn.RemoteAddr(), t.RateLimit()) conn.Close() continue } // Since we accepted connection, mark the time. t.lastAcceptedConnection = now } // Add this new connection to the manager map. t.join(context, conn) } // Shutting down the routine. t.wg.Done() log.Dev(context, "accept-routine", "Shutdown : IPAddress[ %s ]", join(t.ipAddress, t.port)) }() // Wait for the goroutine to initialize itself. waitStart.Wait() log.Dev(context, "Start", "Completed") return nil }