// if ( [re] != nil OR [err] !=nil ) then return true // and set [err] to [re] if [re] != nil func Catch(re interface{}, err *error) bool { var ex error if re != nil { switch rex := re.(type) { case error: ex = rex default: ex = fmt.Errorf("%v", re) } // print recovered error if DEBUG || bool(log.V(log.LV_ERR_STACK)) { buf := make([]byte, 1600) n := runtime.Stack(buf, false) errStack := ex.Error() + "\n" errStack += string(buf[:n]) log.DirectPrintln(errStack) } } if ex != nil { if err != nil { *err = ex } return true } return err != nil && *err != nil }
func TestSingleRequest(t *testing.T) { startEmulation() conn, e := net.Dial("tcp", cltAddr) ThrowErr(e) rest(1) assertLength(t, "client.registry", client.router.registry, 1) buf0 := make([]byte, 0xffff) buf1 := make([]byte, 0xffff) for i := 0; i < 10; i++ { n := randomBuffer(buf0) nw, e := conn.Write(buf0[:n]) ThrowErr(e) nr, e := io.ReadFull(conn, buf1[:n-2]) ThrowErr(e) if log.V(3) { fmt.Printf("\tsend=%d recv=%d\n", nw, nr) } if !bytes.Equal(buf0[2:n], buf1[:nr]) { t.Errorf("sent is inconsistent with recv. nw=%d nr=%d\n", nw, nr) } } conn.Close() rest(2) checkFinishedLength(t) }
// close for ending of queued task func (q *equeue) _close(force bool, close_code uint) { q.lock.Lock() defer q.lock.Unlock() e := q.edge if log.V(log.LV_ACT_FRM) { switch close_code { case CLOSED_BY_ERR: log.Infoln("Terminate", e.dest) case CLOSED_FORCE: log.Infoln("Close", e.dest) case CLOSED_WRITE: log.Infof("CloseWrite %s by peer\n", e.dest) } } for i, e := q.buffer.Len(), q.buffer.Front(); i > 0; i, e = i-1, e.Next() { f := e.Value.(*frame) if f != nil { f.free() } } q.buffer = nil if force { atomic.StoreUint32(&e.closed, TCP_CLOSED) SafeClose(e.conn) } else { closeW(e.conn) } }
func sendFrame(frm *frame) bool { dst := frm.conn.conn if log.V(log.LV_DAT_FRM) { log.Infoln("SEND queue", frm) } dst.SetWriteDeadline(time.Now().Add(GENERAL_SO_TIMEOUT)) nw, ew := dst.Write(frm.data) if nw == int(frm.length) && ew == nil { return false } // an error occured if log.V(log.LV_WARN_EDGE) { log.Warningf("Write edge (%s) error (%v) %s\n", frm.conn.dest, ew, frm) } return true }
func (n *d5sman) authenticate(conn *Conn, session *Session) error { var err error setRTimeout(conn) hashSRand, err := ReadFullByLen(1, conn) if err != nil { // client aborted if IsClosedError(err) { return ABORTED_ERROR.Apply(err) } else { return exception.Spawn(&err, "srand: read connection") } } myHashSRand := hash256(n.sRand) if !bytes.Equal(hashSRand, myHashSRand) { // MITM ? return INCONSISTENT_HASH } // client identity setRTimeout(conn) idBuf, err := ReadFullByLen(1, conn) if err != nil { return exception.Spawn(&err, "auth: read connection") } user, passwd, err := n.deserializeIdentity(idBuf) if err != nil { return err } if log.V(log.LV_LOGIN) { log.Infoln("Login request:", user) } pass, err := n.AuthSys.Authenticate(user, passwd) if !pass { // authSys denied log.Warningf("Auth %s:%s failed: %v\n", user, passwd, err) // reply failed msg conn.Write([]byte{1, 0}) return VALIDATION_FAILED } session.indentifySession(user, conn) w := newMsgWriter() w.WriteL1Msg([]byte{AUTH_PASS}) w.WriteL2Msg(n.tunParams.serialize()) // send tokens num := maxInt(GENERATE_TOKEN_NUM, n.Parallels+2) tokens := n.sessionMgr.createTokens(session, num) w.WriteL2Msg(tokens[1:]) // skip index=0 setWTimeout(conn) err = w.WriteTo(conn) return exception.Spawn(&err, "setting: write connection") }
// async request func (c *Client) asyncRequestTokens() { // don't require if shutdown if atomic.LoadInt32(&c.state) >= CLT_WORKING { go c.mux.bestSend([]byte{FRAME_ACTION_TOKEN_REQUEST}, "asyncRequestTokens") if log.V(log.LV_TOKEN) { log.Infof("Request new tokens, current pool=%d\n", len(c.token)/TKSZ) } } }
func TestConcurrency(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func(j int) { defer wg.Done() conn, e := net.Dial("tcp", cltAddr) ThrowErr(e) if log.V(2) { fmt.Printf("\tthread=%d/ start\n", j) } defer conn.Close() buf0 := make([]byte, 0xffff) buf1 := make([]byte, 0xffff) for k := 0; k < 99; k++ { n := randomBuffer(buf0) nw, e := conn.Write(buf0[:n]) ThrowErr(e) ThrowIf(nw != int(n), fmt.Sprintf("nr=%d nw=%d", n, nw)) conn.SetReadDeadline(time.Now().Add(time.Second * 4)) nr, e := io.ReadFull(conn, buf1[:n-2]) if e != nil { if ne, y := e.(net.Error); y && ne.Timeout() { continue } else { ThrowErr(e) } } if log.V(2) { fmt.Printf("\tthread=%d/%d send=%d recv=%d\n", j, k, nw, nr) } if !bytes.Equal(buf0[2:n], buf1[:nr]) { t.Errorf("thread=%d/ sent != recv. nw=%d nr=%d\n", j, nw, nr) } } if log.V(2) { fmt.Printf("\tthread=%d/ done\n", j) } }(i) } wg.Wait() rest(3) checkFinishedLength(t) }
func Detail(err error) string { if err != nil && (log.V(log.LV_ERR_DETAIL) == true || DEBUG) { var ori = err if ex, y := err.(*Exception); y && ex.Origin != nil { ori = ex.Origin } return fmt.Sprintf("(Error:%T::%s)", ori, err) } return "" }
func (t *Session) DataTunServe(tun *Conn, isNewSession bool) { defer func() { if atomic.AddInt32(&t.activeCnt, -1) <= 0 { t.destroy() log.Infof("Client %s was offline", t.cid) } }() if isNewSession { log.Infof("Client %s is online", t.cid) } if log.V(log.LV_SVR_CONNECT) { log.Infof("Tun %s is established", tun.identifier) } cnt := atomic.AddInt32(&t.activeCnt, 1) // mux will output error log err := t.mux.Listen(tun, t.eventHandler, DT_PING_INTERVAL+int(cnt)) if log.V(log.LV_SVR_CONNECT) { log.Infof("Tun %s was disconnected%s", tun.identifier, ex.Detail(err)) } }
func Spawn(ePtr *error, format string, args ...interface{}) error { var err error if err = *ePtr; err == nil { return nil } var e Exception e.msg = fmt.Sprintf(format, args...) if log.V(log.LV_ERR_DETAIL) { e.msg += " " + err.Error() } *ePtr = &e return &e }
func (h *ConnPool) Select() *Conn { h.lock.Lock() defer h.lock.Unlock() if h.pool.Len() < 1 { return nil } sort.Sort(h.pool) if log.V(log.LV_TUN_SELECT) { log.Infoln("Selected tun", h.pool[0].LocalAddr()) } selected := h.pool[0] atomic.AddInt64(&selected.priority.rank, SELECT_DECREASE) return selected }
func (p *multiplexer) HandleRequest(prot string, client net.Conn, target string) { if tun := p.pool.Select(); tun != nil { sid := next_sid() if log.V(log.LV_REQ) { log.Infof("%s->[%s] from=%s sid=%d\n", prot, target, ipAddr(client.RemoteAddr()), sid) } key := sessionKey(tun, sid) edge := p.router.register(key, target, tun, client, true) // write edge p.relay(edge, tun, sid) // read edge } else { log.Warningln(ERR_TUN_NA) time.Sleep(time.Second) SafeClose(client) } }
// report hashRand0 then request authentication // get tun params and tokens func (n *d5cman) authThenFinishSetting(conn *Conn, t *tunParams) error { var err error w := newMsgWriter() // hash sRand w.WriteL1Msg(hash256(n.sRand)) // identity w.WriteL1Msg(n.serializeIdentity()) setWTimeout(conn) err = w.WriteTo(conn) if err != nil { return exception.Spawn(&err, "auth: write connection") } setRTimeout(conn) var buf, params []byte buf, err = ReadFullByLen(1, conn) if err != nil { return exception.Spawn(&err, "auth: read connection") } // auth_result switch buf[0] { case AUTH_PASS: default: return auth.AUTH_FAILED } // parse params params, err = ReadFullByLen(2, conn) if err != nil { return exception.Spawn(&err, "param: read connection") } t.deserialize(params) t.token, err = ReadFullByLen(2, conn) if err != nil { return exception.Spawn(&err, "token: read connection") } if len(t.token) < TKSZ || len(t.token)%TKSZ != 0 { return ILLEGAL_STATE.Apply("incorrect token") } if log.V(log.LV_TOKEN) { log.Infof("Received tokens size=%d\n", len(t.token)/TKSZ) } return nil }
func (c *Client) saveTokens(data []byte) { var tokens []byte switch data[0] { case FRAME_ACTION_TOKEN_REQUEST: log.Warningf("Unexpected token request") return case FRAME_ACTION_TOKEN_REPLY: tokens = data[1:] } c.lock.Lock() c.token = append(c.token, tokens...) c.lock.Unlock() // wakeup waiting c.pendingTK.notifyAll() if log.V(log.LV_TOKEN) { log.Infof("Received tokens=%d pool=%d\n", len(tokens)/TKSZ, len(c.token)/TKSZ) } }
func (p *multiplexer) connectToDest(frm *frame, key string, tun *Conn) { defer func() { p.wg.Done() ex.Catch(recover(), nil) }() var ( dstConn net.Conn err error target = string(frm.data) denied = false ) if p.filter != nil { denied = p.filter.Filter(target) } if !denied { dstConn, err = dialer.Dial("tcp", target) } if err != nil || denied { p.router.removePreRegistered(key) if denied { frm.action = FRAME_ACTION_OPEN_DENIED log.Warningf("Denied request [%s] for %s\n", target, key) } else { frm.action = FRAME_ACTION_OPEN_N log.Warningf("Cannot connect to [%s] for %s error: %s\n", target, key, err) } frameWriteHead(tun, frm) } else { edge := p.router.register(key, target, tun, dstConn, false) // write edge if log.V(log.LV_SVR_OPEN) { log.Infoln("OPEN", target, "for", key) } dstConn.SetReadDeadline(ZERO_TIME) frm.action = FRAME_ACTION_OPEN_Y if frameWriteHead(tun, frm) == nil { p.relay(edge, tun, frm.sid) // read edge } else { // send open_y failed SafeClose(tun) } } }
// return header=1 + TKSZ*many func (s *SessionMgr) createTokens(session *Session, many int) []byte { s.lock.Lock() defer s.lock.Unlock() // issue #35 // clearTokens() invoked prior to createTokens() if session == nil || session.tokens == nil { return nil } var ( tokens = make([]byte, 1+many*TKSZ) i64buf = make([]byte, 8) _tokens = tokens[1:] sha = sha1.New() ) rand.Seed(time.Now().UnixNano()) sha.Write([]byte(session.uid)) for i := 0; i < many; i++ { binary.BigEndian.PutUint64(i64buf, uint64(rand.Int63())) sha.Write(i64buf) binary.BigEndian.PutUint64(i64buf, uint64(time.Now().UnixNano())) sha.Write(i64buf) pos := i * TKSZ sha.Sum(_tokens[pos:pos]) token := _tokens[pos : pos+TKSZ] key := fmt.Sprintf("%x", token) if _, y := s.container[key]; y { i-- continue } s.container[key] = session session.tokens[key] = true } if log.V(log.LV_SESSION) { log.Errorf("SessionMap created=%d len=%d\n", many, len(s.container)) } return tokens }
func (p *multiplexer) relay(edge *edgeConn, tun *Conn, sid uint16) { var ( buf = bytePool.Get(FRAME_MAX_LEN) code byte src = edge.conn ) defer func() { // actively close then notify peer if edge.bitwiseCompareAndSet(TCP_CLOSE_R) && code != FRAME_ACTION_OPEN_DENIED { pack(buf, FRAME_ACTION_CLOSE_W, sid, nil) go func() { // tell peer to closeW frameWriteBuffer(tun, buf[:FRAME_HEADER_LEN]) bytePool.Put(buf) }() } else { bytePool.Put(buf) } if code == FRAME_ACTION_OPEN_Y { closeR(src) } else { // remote open failed SafeClose(src) if log.V(log.LV_REQ) { switch code { case FRAME_ACTION_OPEN_N: log.Infof("Remote open %s failed", edge.dest) case FRAME_ACTION_OPEN_DENIED: log.Infof("Request %s was denied by remote", edge.dest) } } } }() if edge.active { // for client _len := pack(buf, FRAME_ACTION_OPEN, sid, []byte(edge.dest[2:])) // dest with a leading mark if frameWriteBuffer(tun, buf[:_len]) != nil { SafeClose(tun) return } } var ( tn int // total nr int er error _fast_open = p.isClient ) for { if _fast_open { // In fastOpening, the timeout will give rise to recheck fastopen state src.SetReadDeadline(time.Now().Add(READ_TMO_IN_FASTOPEN)) received := false select { case code = <-edge.ready: received = true default: } if received { if code == FRAME_ACTION_OPEN_Y { _fast_open = false // fastopen finished } else { return } } else { // ready-chan was not ready if tn >= FAST_OPEN_BUF_MAX_SIZE { // must waiting for signal select { case code = <-edge.ready: case <-time.After(WAITING_OPEN_TIMEOUT): log.Errorf("Waiting open-signal sid=%d timeout for %s\n", sid, edge.dest) } // timeout or open-signal received if code == FRAME_ACTION_OPEN_Y { _fast_open = false // fastopen finished } else { return } } } // Received signal-y then finish fastopen if !_fast_open { // read forever src.SetReadDeadline(ZERO_TIME) } } nr, er = src.Read(buf[FRAME_HEADER_LEN:]) if nr > 0 { tn += nr pack(buf, FRAME_ACTION_DATA, sid, uint16(nr)) if frameWriteBuffer(tun, buf[:nr+FRAME_HEADER_LEN]) != nil { SafeClose(tun) return } } // timeout to recheck open signal if er != nil && !(_fast_open && IsTimeout(er)) { if er != io.EOF && DEBUG { log.Infof("Read to the end of edge total=%d err=(%v)", tn, er) } return } } }
// TODO notify peer to slow down when queue increased too fast func (p *multiplexer) Listen(tun *Conn, handler event_handler, interval int) error { tun.priority = &TSPriority{0, 1e9} p.pool.Push(tun) defer p.onTunDisconnected(tun, handler) tun.SetSockOpt(1, 0, 1) var ( header = make([]byte, FRAME_HEADER_LEN) idle = NewIdler(interval, p.isClient) router = p.router nr int er error frm *frame key string ) if !p.isClient { // server first ping client // make client aware of using a valid token. idle.ping(tun) } for { idle.newRound(tun) nr, er = io.ReadFull(tun, header) if nr == FRAME_HEADER_LEN { frm, er = parse_frame(header) if er == nil && len(frm.data) > 0 { // read All and discard tail random nr, er = io.ReadFull(tun, frm.data) frm.data = frm.data[:frm.length] } } if er != nil { // shutdown if atomic.LoadInt32(&p.status) < 0 { time.Sleep(time.Second) return nil } switch idle.consumeError(er) { case ERR_NEW_PING: if er = idle.ping(tun); er == nil { continue } case ERR_PING_TIMEOUT: er = ex.New("Peer was unresponsive then close") } // abandon this connection return er } // prefix tun.identifier key = sessionKey(tun, frm.sid) switch frm.action { case FRAME_ACTION_CLOSE_W: if edge, _ := router.getRegistered(key); edge != nil { edge.bitwiseCompareAndSet(TCP_CLOSE_W) edge.deliver(frm) } case FRAME_ACTION_CLOSE_R: if edge, _ := router.getRegistered(key); edge != nil { edge.bitwiseCompareAndSet(TCP_CLOSE_R) closeR(edge.conn) } case FRAME_ACTION_DATA: edge, pre := router.getRegistered(key) if edge != nil { edge.deliver(frm) } else if pre { router.preDeliver(key, frm) } else { if log.V(log.LV_WARN) { log.Warningln("Peer sent data to an unexisted socket.", key, frm) } // trigger sending close to notice peer. pack(header, FRAME_ACTION_CLOSE_R, frm.sid, nil) if er = frameWriteBuffer(tun, header); er != nil { return er } } case FRAME_ACTION_OPEN: router.preRegister(key) p.wg.Add(1) go p.connectToDest(frm, key, tun) case FRAME_ACTION_OPEN_N, FRAME_ACTION_OPEN_Y, FRAME_ACTION_OPEN_DENIED: edge, _ := router.getRegistered(key) if edge != nil { if log.V(log.LV_ACT_FRM) { log.Infoln(p.role, "received OPEN_x", frm) } edge.ready <- frm.action close(edge.ready) } else { if log.V(log.LV_WARN) { log.Warningln("Peer sent OPEN_x to an unexisted socket.", key, frm) } } case FRAME_ACTION_PING: if er = idle.pong(tun); er == nil { atomic.AddInt32(&p.pingCnt, 1) } else { // reply pong failed return er } case FRAME_ACTION_PONG: if idle.verify() { if p.isClient && idle.lastPing > 0 { sRtt, devRtt := idle.updateRtt() atomic.StoreInt32(&p.sRtt, sRtt) if DEBUG { log.Infof("sRtt=%d devRtt=%d", sRtt, devRtt) if devRtt+(sRtt>>2) > sRtt { // restart ??? log.Warningf("Network jitter sRtt=%d devRtt=%d", sRtt, devRtt) } } } } else { log.Warningln("Incorrect action_pong received") } case FRAME_ACTION_TOKENS: handler(evt_tokens, frm.data) default: // impossible return fmt.Errorf("Unrecognized %s", frm) } tun.Update() } }
func (c *Client) StartTun(mustRestart bool) { var ( tun *Conn wait bool rn = atomic.LoadInt32(&c.round) ) for { if wait { time.Sleep(RETRY_INTERVAL) } if rn < atomic.LoadInt32(&c.round) { return } if mustRestart { // clear mustRestart mustRestart = false // prevent concurrently if atomic.CompareAndSwapInt32(&c.state, CLT_WORKING, CLT_PENDING) { tun, rn = c.restart() } else { return } } if atomic.LoadInt32(&c.state) == CLT_WORKING { var dtcnt int32 var err error // not restarting, ordinary data tun if tun == nil { tun, err = c.createDataTun() if err != nil { log.Errorf("Connection failed %s Reconnect after %s", ex.Detail(err), RETRY_INTERVAL) wait = true continue } } if log.V(log.LV_CLT_CONNECT) { log.Infof("Tun %s is established\n", tun.identifier) } dtcnt = atomic.AddInt32(&c.dtCnt, 1) err = c.mux.Listen(tun, c.eventHandler, c.params.pingInterval+int(dtcnt)) dtcnt = atomic.AddInt32(&c.dtCnt, -1) if log.V(log.LV_CLT_CONNECT) { log.Errorf("Tun %s was disconnected %s Reconnect after %s\n", tun.identifier, ex.Detail(err), RETRY_INTERVAL) } // reset tun, wait = nil, true // received ping count if atomic.LoadInt32(&c.mux.pingCnt) <= 0 { // dirty tokens: used abandoned tokens c.clearTokens() } // restart: all connections were disconnected if dtcnt <= 0 { log.Errorf("Currently offline, all connections %s were lost\n", c.connInfo.RemoteName()) go c.StartTun(true) return } } else { // now is restarting then exit return } } }