// Remove a proxy connection from the pool and return it // If not proxy connections are in the pool, request one // and wait until it is available // Returns an error if we couldn't get a proxy because it took too long // or the tunnel is closing func (c *Control) GetProxy() (proxyConn conn.Conn, err error) { // initial timeout is zero to try to get a proxy connection without asking for one timeout := time.NewTimer(0) // get a proxy connection. if we timeout, request one over the control channel for proxyConn == nil { var ok bool select { case proxyConn, ok = <-c.proxies: if !ok { err = fmt.Errorf("No proxy connections available, control is closing") return } continue case <-timeout.C: c.conn.Debug("Requesting new proxy connection") // request a proxy connection if err = util.PanicToError(func() { c.out <- &msg.ReqProxy{} }); err != nil { return } // timeout after 1 second if we don't get one timeout.Reset(1 * time.Second) } } // To try to reduce latency hanndling tunnel connections, we employ // the following curde heuristic: // Whenever we take a proxy connection from the pool, replace it with a new one err = util.PanicToError(func() { c.out <- &msg.ReqProxy{} }) return }
// Remove a proxy connection from the pool and return it // If not proxy connections are in the pool, request one // and wait until it is available // Returns an error if we couldn't get a proxy because it took too long // or the tunnel is closing func (c *Control) GetProxy() (proxyConn conn.Conn, err error) { var ok bool // get a proxy connection from the pool select { case proxyConn, ok = <-c.proxies: if !ok { err = fmt.Errorf("No proxy connections available, control is closing") return } default: // no proxy available in the pool, ask for one over the control channel c.conn.Debug("No proxy in pool, requesting proxy from control . . .") if err = util.PanicToError(func() { c.out <- &msg.ReqProxy{} }); err != nil { return } select { case proxyConn, ok = <-c.proxies: if !ok { err = fmt.Errorf("No proxy connections available, control is closing") return } case <-time.After(pingTimeoutInterval): err = fmt.Errorf("Timeout trying to get proxy connection") return } } return }
func (t *Tunnel) HandlePublicConnection(publicConn conn.Conn) { defer publicConn.Close() defer func() { if r := recover(); r != nil { publicConn.Warn("HandlePublicConnection failed with error %v", r) } }() startTime := time.Now() metrics.OpenConnection(t, publicConn) var proxyConn conn.Conn var err error for i := 0; i < (2 * proxyMaxPoolSize); i++ { // get a proxy connection if proxyConn, err = t.ctl.GetProxy(); err != nil { t.Warn("Failed to get proxy connection: %v", err) return } defer proxyConn.Close() t.Info("Got proxy connection %s", proxyConn.Id()) proxyConn.AddLogPrefix(t.Id()) // tell the client we're going to start using this proxy connection startPxyMsg := &msg.StartProxy{ Url: t.url, ClientAddr: publicConn.RemoteAddr().String(), } if err = msg.WriteMsg(proxyConn, startPxyMsg); err != nil { proxyConn.Warn("Failed to write StartProxyMessage: %v, attempt %d", err, i) proxyConn.Close() } else { // success break } } if err != nil { // give up publicConn.Error("Too many failures starting proxy connection") return } // To reduce latency handling tunnel connections, we employ the following curde heuristic: // Whenever we take a proxy connection from the pool, replace it with a new one util.PanicToError(func() { t.ctl.out <- &msg.ReqProxy{} }) // no timeouts while connections are joined proxyConn.SetDeadline(time.Time{}) // join the public and proxy connections bytesIn, bytesOut := conn.Join(publicConn, proxyConn) metrics.CloseConnection(t, publicConn, startTime, bytesIn, bytesOut) //log.Info("Proxy authId=%s bytesIn=%d, bytesOut=%d\n", t.ctl.userInfo.Uc.UserId, bytesIn, bytesOut) atomic.AddInt32(&t.ctl.userInfo.TransPerDay, int32(bytesIn+bytesOut)) atomic.AddInt32(&t.ctl.userInfo.TransAll, int32(bytesIn+bytesOut)) }