func (t *Session) DataTunServe(fconn *Conn, buf []byte) { defer func() { var offline bool if atomic.AddInt32(&t.activeCnt, -1) <= 0 { offline = true t.mgr.clearTokens(t) t.mux.destroy() } var err = recover() if log.V(1) { log.Infof("Tun=%s was disconnected. %v\n", fconn.identifier, nvl(err, NULL)) if offline { log.Infof("Client=%s was offline\n", t.cid) } } if DEBUG { ex.CatchException(err) } }() atomic.AddInt32(&t.activeCnt, 1) if buf != nil { token := buf[:TKSZ] fconn.cipher = t.cipherFactory.NewCipher(token) buf = nil } else { // first negotiation had initialized cipher, the buf will be null log.Infof("Client=%s is online\n", t.cid) } if log.V(1) { log.Infof("Tun=%s is established\n", fconn.identifier) } t.mux.Listen(fconn, t.eventHandler, DT_PING_INTERVAL) }
func (p *multiplexer) bestSend(data []byte, action_desc string) bool { var buf = make([]byte, FRAME_HEADER_LEN+len(data)) _frame(buf, FRAME_ACTION_TOKENS, 0, data) var tun *Conn for i := 1; i <= 3; i++ { if p.status < 0 /* MUX_CLOSED */ || p.pool == nil { if log.V(4) { log.Warningln("abandon sending data of", action_desc) } break } tun = p.pool.Select() if tun != nil { if tunWrite1(tun, buf) == nil { return true } } else { time.Sleep(time.Millisecond * 200 * time.Duration(i)) } } if log.V(3) { log.Warningln("failed to send data of", action_desc) } return false }
func acquire(s *semaphore, id int, t *testing.T) { if s.acquire(tmo) { atomic.AddInt32(&cnt1, 1) if log.V(3) { t.Log("\tacquired", id) } } else { atomic.AddInt32(&cnt2, 1) if log.V(3) { t.Log("\tacquired timeout", id) } } }
func (c *Client) getToken() []byte { c.lock.Lock() defer func() { c.lock.Unlock() tlen := len(c.token) / TKSZ if tlen <= TOKENS_FLOOR && atomic.LoadInt32(&c.State) == 0 { atomic.AddInt32(&c.State, 1) if log.V(4) { log.Infof("Request new tokens, pool=%d\n", tlen) } c.sigTun.postCommand(TOKEN_REQUEST, nil) } }() for len(c.token) < TKSZ { log.Warningln("waiting for token. May be the requests are coming too fast.") c.waitTK.Wait() if atomic.LoadInt32(&c.State) < 0 { panic("Abandon the request beacause the tunSession was lost.") } } token := c.token[:TKSZ] c.token = c.token[TKSZ:] return token }
// return header=1 + TKSZ*many func (s *SessionMgr) createTokens(session *Session, many int) []byte { s.lock.Lock() defer s.lock.Unlock() var ( tokens = make([]byte, 1+many*TKSZ) i64buf = make([]byte, 8) _tokens = tokens[1:] sha = sha1.New() ) rand.Seed(time.Now().UnixNano()) sha.Write([]byte(session.uid)) for i := 0; i < many; i++ { binary.BigEndian.PutUint64(i64buf, uint64(rand.Int63())) sha.Write(i64buf) binary.BigEndian.PutUint64(i64buf, uint64(time.Now().UnixNano())) sha.Write(i64buf) pos := i * TKSZ sha.Sum(_tokens[pos:pos]) token := _tokens[pos : pos+TKSZ] key := fmt.Sprintf("%x", token) if _, y := s.container[key]; y { i-- continue } s.container[key] = session session.tokens[key] = true } if log.V(4) { log.Errorf("sessionMap created=%d len=%d\n", many, len(s.container)) } return tokens }
func TestSingleRequest(t *testing.T) { startEmulation() conn, e := net.Dial("tcp", cltAddr) ThrowErr(e) rest(1) assertLength(t, "client.registry", client.router.registry, 1) buf0 := make([]byte, 0xffff) buf1 := make([]byte, 0xffff) for i := 0; i < 10; i++ { n := randomBuffer(buf0) binary.BigEndian.PutUint16(buf0, n-2) nw, e := conn.Write(buf0[:n]) ThrowErr(e) nr, e := io.ReadFull(conn, buf1[:n-2]) ThrowErr(e) if log.V(3) { fmt.Printf("\tsend=%d recv=%d\n", nw, nr) } if !bytes.Equal(buf0[2:n], buf1[:nr]) { t.Errorf("sent is inconsistent with recv. nw=%d nr=%d\n", nw, nr) } } conn.Close() rest(2) checkFinishedLength(t) }
func (t *signalTunnel) acknowledged() { if log.V(4) { log.Infoln("Ping/acknowledged by", t.remoteAddr) } t.tun.SetReadDeadline(ZERO_TIME) t.active(-2) // so slow down the tempo }
// without sPub field func NewD5Params(uri string) (*D5Params, error) { re := regexp.MustCompile("d5://(\\w+):(\\w+)@([.:a-zA-Z0-9-]+)#(\\w+)") ma := re.FindStringSubmatch(uri) if len(ma) != 5 { return nil, INVALID_D5PARAMS } _, y := availableCiphers[ma[4]] if !y { return nil, UNSUPPORTED_CIPHER.Apply(ma[4]) } d5sAddr, e := net.ResolveTCPAddr("tcp", ma[3]) if e != nil { return nil, D5SER_UNREACHABLE.Apply(e) } if log.V(2) { log.Infof("D5Params: %q\n", ma[1:]) } return &D5Params{ d5sAddrStr: ma[3], d5sAddr: d5sAddr, algo: ma[4], user: ma[1], pass: ma[2], }, nil }
func TestConcurrency(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func(j int) { defer wg.Done() conn, e := net.Dial("tcp", cltAddr) ThrowErr(e) buf0 := make([]byte, 0xffff) buf1 := make([]byte, 0xffff) for i := 0; i < 99; i++ { n := randomBuffer(buf0) binary.BigEndian.PutUint16(buf0, n-2) nw, e := conn.Write(buf0[:n]) ThrowErr(e) nr, e := io.ReadFull(conn, buf1[:n-2]) ThrowErr(e) if log.V(3) { fmt.Printf("\tthread=%d send=%d recv=%d\n", j, nw, nr) } if !bytes.Equal(buf0[2:n], buf1[:nr]) { t.Errorf("thread=%d sent != recv. nw=%d nr=%d\n", j, nw, nr) } } conn.Close() }(i) } wg.Wait() rest(3) checkFinishedLength(t) }
func (nego *d5SNegotiation) verifyThenDHExchange(conn net.Conn, credBuf []byte) (key []byte, err error) { userIdentity, err := RSADecrypt(credBuf, nego.RSAKeys.priv) ThrowErr(err) clientIdentity := string(userIdentity) if log.V(2) { log.Infoln("Auth clientIdentity:", clientIdentity) } allow, ex := nego.AuthSys.Authenticate(userIdentity) cDHPub, err := ReadFullByLen(2, conn) if !allow { log.Warningf("Auth %s failed: %v\n", clientIdentity, ex) conn.Write([]byte{0, 1, 0xff}) return nil, ex } nego.clientIdentity = clientIdentity key = takeSharedKey(nego.dhKeys, cDHPub) // if log.V(5) { // dumpHex("Sharedkey", key) // } buf := new(bytes.Buffer) buf.Write(nego.dhKeys.pubLen) buf.Write(nego.dhKeys.pub) _, err = buf.WriteTo(conn) return }
func (p *multiplexer) connectToDest(frm *frame, key string, tun *Conn) { defer func() { ex.CatchException(recover()) }() var ( dstConn net.Conn err error target = string(frm.data) ) dstConn, err = net.DialTimeout("tcp", target, GENERAL_SO_TIMEOUT) frm.length = 0 if err != nil { p.router.removePreRegistered(key) log.Errorf("Cannot connect to [%s] for %s error: %s\n", target, key, err) frm.action = FRAME_ACTION_OPEN_N tunWrite2(tun, frm) } else { if log.V(1) { log.Infoln("OPEN", target, "for", key) } dstConn.SetReadDeadline(ZERO_TIME) edge := p.router.register(key, target, tun, dstConn, false) // write edge frm.action = FRAME_ACTION_OPEN_Y if tunWrite2(tun, frm) == nil { p.relay(edge, tun, frm.sid) // read edge } else { // send open_y failed SafeClose(tun) } } }
func (c *Client) startDataTun(again bool) { var connected bool defer func() { if connected { atomic.AddInt32(&c.dtCnt, -1) } if err := recover(); err != nil { log.Errorf("DTun failed to connect(%s). Retry after %s\n", err, RETRY_INTERVAL) c.eventHandler(evt_dt_closed, true) if DEBUG { ex.CatchException(err) } } }() if again { time.Sleep(RETRY_INTERVAL) } for { if atomic.LoadInt32(&c.State) == 0 { conn := c.createDataTun() connected = true if log.V(1) { log.Infof("DTun(%s) is established\n", conn.sign()) } atomic.AddInt32(&c.dtCnt, 1) c.mux.Listen(conn, c.eventHandler, c.tp.dtInterval) log.Errorf("DTun(%s) was disconnected. Reconnect after %s\n", conn.sign(), RETRY_INTERVAL) break } else { c.pendingSema.acquire(RETRY_INTERVAL) } } }
// async request func (c *Client) requireTokens() { // non-working state can't require anything if atomic.CompareAndSwapInt32(&c.State, CLT_WORKING, CLT_PENDING) { if log.V(3) { log.Infof("Request new tokens, pool=%d\n", len(c.token)/TKSZ) } go c.mux.bestSend([]byte{FRAME_ACTION_TOKEN_REQUEST}, "requireTokens") } }
func (c *Client) putTokens(tokens []byte) { c.lock.Lock() defer c.lock.Unlock() c.token = append(c.token, tokens...) atomic.StoreInt32(&c.State, 0) c.waitTK.Broadcast() if log.V(4) { log.Infof("Recv tokens=%d pool=%d\n", len(tokens)/TKSZ, len(c.token)/TKSZ) } }
func (t *Session) onSTDisconnected() { tid := t.tun.identifier SafeClose(t.tun) atomic.AddInt32(&t.svr.stCnt, -1) log.Warningf("Client(%s)-ST was disconnected\n", tid) i := t.svr.sessionMgr.clearTokens(t) if log.V(4) { log.Infof("Clear tokens %d of %s\n", i, tid) } }
func (p *multiplexer) HandleRequest(prot string, client net.Conn, target string) { sid := _nextSID() if log.V(1) { log.Infof("%s->[%s] from=%s sid=%d\n", prot, target, ipAddr(client.RemoteAddr()), sid) } tun := p.pool.Select() ThrowIf(tun == nil, "No tun to deliveries request") key := sessionKey(tun, sid) edge := p.router.register(key, target, tun, client, true) // write edge p.relay(edge, tun, sid) // read edge }
func (t *signalTunnel) imAlive() { if log.V(4) { log.Infoln("Ping/responded to", t.remoteAddr) } t.active(-1) // up tempo for become a sender _, err := t.postCommand(CTL_PONG, nil) if err != nil { SafeClose(t.tun) log.Warningln("Reply ping failed and then closed", t.remoteAddr, err) } }
func sendFrame(frm *frame) (werr bool) { dst := frm.conn.conn if log.V(5) { log.Infoln("SEND queue", frm) } dst.SetWriteDeadline(time.Now().Add(GENERAL_SO_TIMEOUT)) nw, ew := dst.Write(frm.data) if nw == int(frm.length) && ew == nil { return } werr = true // an error occured log.Warningf("Write edge(%s) error(%v). %s\n", frm.conn.dest, ew, frm) return }
func (t *signalTunnel) areYouAlive() { if log.V(4) { log.Infoln("Ping/launched to", t.remoteAddr) } _, err := t.postCommand(CTL_PING, nil) // Either waiting pong timeout or send ping failed if err != nil { SafeClose(t.tun) log.Warningln("Ping remote failed and then closed", t.remoteAddr, err) } else { t.tun.SetReadDeadline(time.Now().Add(GENERAL_SO_TIMEOUT * 2)) // impossible call by timer, will reset by acknowledged or read timeout. t.active(-1) } }
func (t *signalTunnel) postCommand(cmd byte, args []byte) (n int, err error) { t.lock.Lock() defer t.lock.Unlock() buf := randArray(CMD_HEADER_LEN, CMD_HEADER_LEN) buf[0] = cmd binary.BigEndian.PutUint16(buf[2:], uint16(len(args))) if args != nil { buf = append(buf, args...) } if log.V(4) { log.Infof("send command packet=[% x]\n", buf) } t.tun.SetWriteDeadline(time.Now().Add(GENERAL_SO_TIMEOUT * 2)) n, err = t.tun.Write(buf) return }
func (c *Client) saveTokens(data []byte) { var tokens []byte switch data[0] { case FRAME_ACTION_TOKEN_REQUEST: log.Warningf("unexpected token request") return case FRAME_ACTION_TOKEN_REPLY: tokens = data[1:] } c.lock.Lock() defer c.lock.Unlock() c.token = append(c.token, tokens...) atomic.CompareAndSwapInt32(&c.State, CLT_PENDING, CLT_WORKING) c.pendingTK.notifyAll() if log.V(3) { log.Infof("Recv tokens=%d pool=%d\n", len(tokens)/TKSZ, len(c.token)/TKSZ) } }
func (t *signalTunnel) active(times int64) { t.lock.Lock() defer t.lock.Unlock() if times > 0 { // active link in transferring var d = (times - t.lastResetTime) << 1 // allow reset at least half interval if d > int64(t.interval/time.Second) { if log.V(4) { log.Infoln("suppress the next ping task") } t.lastResetTime = times t.lived.Reset(t.interval) } } else if times < 0 { // scheduled ping t.interval = t.baseInterval * time.Duration(-times) t.lastResetTime = time.Now().Unix() t.lived.Reset(t.interval) } }
func (nego *d5CNegotiation) validateAndGetTokens(sconn *hashedConn, t *tunParams) (err error) { buf, err := ReadFullByLen(2, sconn) ThrowErr(err) tVer := VERSION oVer := binary.BigEndian.Uint32(buf) if oVer > tVer { oVerStr := fmt.Sprintf("%d.%d.%04d", oVer>>24, (oVer>>16)&0xFF, oVer&0xFFFF) tVer >>= 16 oVer >>= 16 if tVer == oVer { log.Warningf("Caution !!! Please upgrade to new version, remote is v%s\n", oVerStr) } else { return INCOMPATIBLE_VERSION.Apply(oVerStr) } } ofs := 4 t.stInterval = int(binary.BigEndian.Uint16(buf[ofs:])) ofs += 2 t.dtInterval = int(binary.BigEndian.Uint16(buf[ofs:])) ofs += 2 t.tunQty = int(buf[ofs]) t.token = buf[TUN_PARAMS_LEN:] if log.V(2) { n := len(buf) - TUN_PARAMS_LEN log.Infof("Got tokens length=%d\n", n/TKSZ) } rHash := sconn.RHashSum() wHash := sconn.WHashSum() _, err = sconn.Write(rHash) ThrowErr(err) oHash := make([]byte, TKSZ) _, err = sconn.Read(oHash) if !bytes.Equal(wHash, oHash) { log.Errorln("Server hash/r is inconsistence with the client/w") log.Errorf("rHash: [% x] wHash: [% x]\n", rHash, wHash) log.Errorf("oHash: [% x]\n", oHash) return INCONSISTENT_HASH } return }
// close for ending of queued task func (q *equeue) _close(force bool, close_code uint) { q.lock.Lock() defer q.lock.Unlock() e := q.edge if log.V(4) { switch close_code { case CLOSED_BY_ERR: log.Infoln("terminate", e.dest) case CLOSED_FORCE: log.Infoln("close", e.dest) case CLOSED_WRITE: log.Infof("closeW %s by peer\n", e.dest) } } q.buffer.Init() q.buffer = nil if force { e.closed = TCP_CLOSE_R | TCP_CLOSE_W SafeClose(e.conn) } else { closeW(e.conn) } }
func (c *Client) StartTun(mustRestart bool) { var ( wait bool tun *Conn rn = atomic.LoadInt32(&c.round) ) for { if wait { time.Sleep(RETRY_INTERVAL) } if rn < atomic.LoadInt32(&c.round) { return } if mustRestart { mustRestart = false if atomic.SwapInt32(&c.State, CLT_PENDING) >= CLT_WORKING { tun, rn = c.restart() if tun == nil { return } } else { return } } if atomic.LoadInt32(&c.State) == CLT_WORKING { // negotiation conn executed here firstly will not be null // otherwise must be null then create new one. if tun == nil { var err error tun, err = c.createDataTun() if err != nil { if DEBUG { ex.CatchException(err) } log.Errorf("Failed to connect %s. Reconnect after %s\n", err, RETRY_INTERVAL) wait = true continue } } if log.V(1) { log.Infof("Tun=%s is established\n", tun.sign()) } atomic.AddInt32(&c.dtCnt, 1) c.mux.Listen(tun, c.eventHandler, c.tp.dtInterval) dtcnt := atomic.AddInt32(&c.dtCnt, -1) log.Errorf("Tun=%s was disconnected, Reconnect after %s\n", tun.sign(), RETRY_INTERVAL) if atomic.LoadInt32(&c.mux.pingCnt) <= 0 { // dirty tokens: used abandoned tokens c.clearTokens() } if dtcnt <= 0 { // restart: all connections were disconnected log.Errorf("Connections %s were lost\n", tun.identifier) go c.StartTun(true) return } else { // reconnect // waiting and don't use old tun wait = true tun = nil } } else { // can't create tun and waiting for release if !c.pendingConn.acquire(RETRY_INTERVAL) { return } } } }
// TODO notify peer to slow down when queue increased too fast func (p *multiplexer) Listen(tun *Conn, handler event_handler, interval int) { if p.isClient { tun.priority = &TSPriority{0, 1e9} p.pool.Push(tun) } defer p.onTunDisconnected(tun, handler) tun.SetSockOpt(1, 1, 0) var ( header = make([]byte, FRAME_HEADER_LEN) router = p.router idle = NewIdler(interval, p.isClient) lastActive = time.Now().Unix() nr int er error now int64 frm *frame key string ) for { idle.newRound(tun) nr, er = io.ReadFull(tun, header) if nr == FRAME_HEADER_LEN { frm = _parseFrameHeader(header) if frm.length > 0 { nr, er = io.ReadFull(tun, frm.data) } } if er != nil { switch idle.consumeError(er) { case ERR_NEW_PING: if idle.ping(tun) == nil { continue } case ERR_PING_TIMEOUT: log.Errorln("Peer was unresponsive then close", tun.identifier) default: log.Errorln("Read tunnel", tun.identifier, er) } return // error, abandon tunnel } key = sessionKey(tun, frm.sid) switch frm.action { case FRAME_ACTION_CLOSE_W: if edge := router.getRegistered(key); edge != nil { edge.closed |= TCP_CLOSE_W edge.deliver(frm) } case FRAME_ACTION_CLOSE_R: if edge := router.getRegistered(key); edge != nil { edge.closed |= TCP_CLOSE_R closeR(edge.conn) } case FRAME_ACTION_DATA: edge := router.getRegistered(key) if edge == nil { if log.V(2) { log.Warningln("peer send data to an unexisted socket.", key, frm) } // trigger sending close to notice peer. _frame(header, FRAME_ACTION_CLOSE_R, frm.sid, nil) if tunWrite1(tun, header) != nil { return } } else { edge.deliver(frm) } case FRAME_ACTION_OPEN: go p.connectToDest(frm, key, tun) case FRAME_ACTION_OPEN_N, FRAME_ACTION_OPEN_Y: edge := router.getRegistered(key) if edge == nil { if log.V(2) { log.Warningln("peer send OPENx to an unexisted socket.", key, frm) } } else { if log.V(4) { if frm.action == FRAME_ACTION_OPEN_Y { log.Infoln(p.mode, "recv OPEN_Y", frm) } else { log.Infoln(p.mode, "recv OPEN_N", frm) } } edge.ready <- frm.action close(edge.ready) } case FRAME_ACTION_PING: if idle.pong(tun) != nil { return } case FRAME_ACTION_PONG: if !idle.verify() { log.Warningln("Incorrect action_pong received") } default: log.Errorln(p.mode, "Unrecognized", frm) } // prevent frequently calling, especially in high-speed transmitting. if now = time.Now().Unix(); (now-lastActive) > 2 && handler != nil { lastActive = now handler(evt_st_active, now) } if p.isClient { tun.Update() } } }
// TODO notify peer to slow down when queue increased too fast func (p *multiplexer) Listen(tun *Conn, handler event_handler, interval int) { tun.priority = &TSPriority{0, 1e9} p.pool.Push(tun) defer p.onTunDisconnected(tun, handler) tun.SetSockOpt(1, 0, 1) var ( header = make([]byte, FRAME_HEADER_LEN) router = p.router idle = NewIdler(interval, p.isClient) nr int er error frm *frame key string ) if !p.isClient { // first ping frame will let client to be aware of using a valid token. idle.ping(tun) } for { idle.newRound(tun) nr, er = io.ReadFull(tun, header) if nr == FRAME_HEADER_LEN { frm = _parseFrameHeader(header) if frm.length > 0 { nr, er = io.ReadFull(tun, frm.data) } } if er != nil { switch idle.consumeError(er) { case ERR_NEW_PING: if idle.ping(tun) == nil { continue } case ERR_PING_TIMEOUT: if log.V(2) { log.Warningln("Peer was unresponsive then close", tun.identifier) } default: if log.V(2) { log.Warningln("Read tunnel", tun.identifier, er) } } return // error, abandon tunnel } key = sessionKey(tun, frm.sid) switch frm.action { case FRAME_ACTION_CLOSE_W: if edge, _ := router.getRegistered(key); edge != nil { edge.closed |= TCP_CLOSE_W edge.deliver(frm) } case FRAME_ACTION_CLOSE_R: if edge, _ := router.getRegistered(key); edge != nil { edge.closed |= TCP_CLOSE_R closeR(edge.conn) } case FRAME_ACTION_DATA: edge, pre := router.getRegistered(key) if edge != nil { edge.deliver(frm) } else if pre { router.preDeliver(key, frm) } else { if log.V(2) { log.Warningln("peer send data to an unexisted socket.", key, frm) } // trigger sending close to notice peer. _frame(header, FRAME_ACTION_CLOSE_R, frm.sid, nil) if tunWrite1(tun, header) != nil { return } } case FRAME_ACTION_OPEN: router.preRegister(key) go p.connectToDest(frm, key, tun) case FRAME_ACTION_OPEN_N, FRAME_ACTION_OPEN_Y: edge, _ := router.getRegistered(key) if edge != nil { if log.V(4) { if frm.action == FRAME_ACTION_OPEN_Y { log.Infoln(p.role, "recv OPEN_Y", frm) } else { log.Infoln(p.role, "recv OPEN_N", frm) } } edge.ready <- frm.action close(edge.ready) } else { if log.V(2) { log.Warningln("peer send OPENx to an unexisted socket.", key, frm) } } case FRAME_ACTION_PING: if idle.pong(tun) == nil { atomic.AddInt32(&p.pingCnt, 1) } else { // reply pong failed return } case FRAME_ACTION_PONG: if !idle.verify() { log.Warningln("Incorrect action_pong received") } case FRAME_ACTION_TOKENS: handler(evt_tokens, frm.data) default: log.Errorln(p.role, "Unrecognized", frm) } tun.Update() } }