Esempio n. 1
0
func (t *Session) DataTunServe(fconn *Conn, isNewSession bool) {
	atomic.AddInt32(&t.activeCnt, 1)
	defer func() {
		var (
			offline bool
			err     = recover()
		)
		if atomic.AddInt32(&t.activeCnt, -1) <= 0 {
			offline = true
			t.mgr.clearTokens(t)
			t.mux.destroy()
		}
		if log.V(1) {
			log.Infof("Tun=%s was disconnected. %v\n", fconn.identifier, nvl(err, NULL))
			if offline {
				log.Infof("Client=%s was offline\n", t.cid)
			}
		}
		if DEBUG {
			ex.CatchException(err)
		}
	}()
	if isNewSession {
		log.Infof("Client=%s is online\n", t.cid)
	}

	if log.V(1) {
		log.Infof("Tun=%s is established\n", fconn.identifier)
	}
	t.mux.Listen(fconn, t.eventHandler, DT_PING_INTERVAL)
}
Esempio n. 2
0
func (t *Session) DataTunServe(fconn *Conn, buf []byte) {
	defer func() {
		var offline bool
		if atomic.AddInt32(&t.activeCnt, -1) <= 0 {
			offline = true
			t.mgr.clearTokens(t)
			t.mux.destroy()
		}
		var err = recover()
		if log.V(1) {
			log.Infof("Tun=%s was disconnected. %v\n", fconn.identifier, nvl(err, NULL))
			if offline {
				log.Infof("Client=%s was offline\n", t.cid)
			}
		}
		if DEBUG {
			ex.CatchException(err)
		}
	}()
	atomic.AddInt32(&t.activeCnt, 1)

	if buf != nil {
		token := buf[:TKSZ]
		fconn.cipher = t.cipherFactory.NewCipher(token)
		buf = nil
	} else { // first negotiation had initialized cipher, the buf will be null
		log.Infof("Client=%s is online\n", t.cid)
	}

	if log.V(1) {
		log.Infof("Tun=%s is established\n", fconn.identifier)
	}
	t.mux.Listen(fconn, t.eventHandler, DT_PING_INTERVAL)
}
Esempio n. 3
0
func (c *Client) initialNegotiation() (tun *Conn) {
	var newParams = new(tunParams)
	var err error
	tun, err = c.nego.negotiate(newParams)
	if err != nil {
		if log.V(1) == true || DEBUG {
			log.Errorf("Connection failed %s, Error: %s. Retry after %s",
				c.nego.RemoteName(), err, RETRY_INTERVAL)
		} else {
			log.Errorf("Connection failed %s. Retry after %s",
				c.nego.RemoteName(), RETRY_INTERVAL)
		}
		if strings.Contains(err.Error(), "closed") {
			log.Warningln(string(bytes.Repeat([]byte{'+'}, 30)))
			log.Warningln("Maybe your clock is inaccurate, or your client credential is invalid.")
			log.Warningln(string(bytes.Repeat([]byte{'+'}, 30)))
			os.Exit(2)
		}
		return nil
	}
	c.params = newParams
	c.token = newParams.token

	tun.identifier = c.nego.RemoteName()
	log.Infof("Login to the gateway %s successfully", tun.identifier)
	return
}
Esempio n. 4
0
// close for ending of queued task
func (q *equeue) _close(force bool, close_code uint) {
	q.lock.Lock()
	defer q.lock.Unlock()
	e := q.edge
	if log.V(4) {
		switch close_code {
		case CLOSED_BY_ERR:
			log.Infoln("terminate", e.dest)
		case CLOSED_FORCE:
			log.Infoln("close", e.dest)
		case CLOSED_WRITE:
			log.Infof("closeW %s by peer\n", e.dest)
		}
	}

	for i, e := q.buffer.Len(), q.buffer.Front(); i > 0; i, e = i-1, e.Next() {
		f := e.Value.(*frame)
		if f != nil {
			f.free()
		}
	}

	q.buffer = nil
	if force {
		atomic.StoreUint32(&e.closed, TCP_CLOSED)
		SafeClose(e.conn)
	} else {
		closeW(e.conn)
	}
}
Esempio n. 5
0
// async request
func (c *Client) asyncRequestTokens() {
	// don't require if shutdown
	if atomic.LoadInt32(&c.state) >= CLT_WORKING {
		go c.mux.bestSend([]byte{FRAME_ACTION_TOKEN_REQUEST}, "asyncRequestTokens")
		if log.V(3) {
			log.Infof("Request new tokens, pool=%d\n", len(c.token)/TKSZ)
		}
	}
}
Esempio n. 6
0
// async request
func (c *Client) requireTokens() {
	// non-working state can't require anything
	if atomic.CompareAndSwapInt32(&c.State, CLT_WORKING, CLT_PENDING) {
		if log.V(3) {
			log.Infof("Request new tokens, pool=%d\n", len(c.token)/TKSZ)
		}
		go c.mux.bestSend([]byte{FRAME_ACTION_TOKEN_REQUEST}, "requireTokens")
	}
}
Esempio n. 7
0
func (p *multiplexer) HandleRequest(prot string, client net.Conn, target string) {
	sid := next_sid()
	if log.V(1) {
		log.Infof("%s->[%s] from=%s sid=%d\n", prot, target, ipAddr(client.RemoteAddr()), sid)
	}
	tun := p.pool.Select()
	ThrowIf(tun == nil, "No tun to deliveries request")
	key := sessionKey(tun, sid)
	edge := p.router.register(key, target, tun, client, true) // write edge
	p.relay(edge, tun, sid)                                   // read edge
}
Esempio n. 8
0
func (t *Session) DataTunServe(tun *Conn, isNewSession bool) {
	defer func() {
		tun.cipher.Cleanup()
		if atomic.AddInt32(&t.activeCnt, -1) <= 0 {
			t.destroy()
			log.Infof("Client %s was offline", t.cid)
		}
	}()

	if isNewSession {
		log.Infof("Client %s is online", t.cid)
	}
	if log.V(1) {
		log.Infof("Tun %s is established", tun.identifier)
	}
	cnt := atomic.AddInt32(&t.activeCnt, 1)
	// mux will output error log
	t.mux.Listen(tun, t.eventHandler, DT_PING_INTERVAL+int(cnt))
	if log.V(1) {
		log.Infof("Tun %s was disconnected", tun.identifier)
	}
}
Esempio n. 9
0
func (c *Client) saveTokens(data []byte) {
	var tokens []byte
	switch data[0] {
	case FRAME_ACTION_TOKEN_REQUEST:
		log.Warningf("unexpected token request")
		return
	case FRAME_ACTION_TOKEN_REPLY:
		tokens = data[1:]
	}
	c.lock.Lock()
	c.token = append(c.token, tokens...)
	c.lock.Unlock()
	// wakeup waiting
	c.pendingTK.notifyAll()
	if log.V(3) {
		log.Infof("Recv tokens=%d pool=%d\n", len(tokens)/TKSZ, len(c.token)/TKSZ)
	}
}
Esempio n. 10
0
func (c *Client) saveTokens(data []byte) {
	var tokens []byte
	switch data[0] {
	case FRAME_ACTION_TOKEN_REQUEST:
		log.Warningf("unexpected token request")
		return
	case FRAME_ACTION_TOKEN_REPLY:
		tokens = data[1:]
	}
	c.lock.Lock()
	defer c.lock.Unlock()
	c.token = append(c.token, tokens...)
	atomic.CompareAndSwapInt32(&c.State, CLT_PENDING, CLT_WORKING)
	c.pendingTK.notifyAll()
	if log.V(3) {
		log.Infof("Recv tokens=%d pool=%d\n", len(tokens)/TKSZ, len(c.token)/TKSZ)
	}
}
Esempio n. 11
0
func (nego *d5CNegotiation) validateAndGetTokens(sconn *hashedConn, t *tunParams) {
	buf, err := ReadFullByLen(2, sconn)
	ThrowErr(err)
	tVer := VERSION
	oVer := binary.BigEndian.Uint32(buf)
	if oVer > tVer {
		oVerStr := fmt.Sprintf("%d.%d.%04d", oVer>>24, (oVer>>16)&0xFF, oVer&0xFFFF)
		tVer >>= 16
		oVer >>= 16
		if tVer == oVer {
			log.Warningf("Caution !!! Please upgrade to new version, remote is v%s\n", oVerStr)
		} else {
			err = INCOMPATIBLE_VERSION.Apply(oVerStr)
		}
		ThrowErr(err)
	}
	ofs := 4
	ofs += 2
	t.dtInterval = int(binary.BigEndian.Uint16(buf[ofs:]))
	ofs += 2
	t.tunQty = int(buf[ofs])
	t.token = buf[TUN_PARAMS_LEN:]
	if log.V(3) {
		n := len(buf) - TUN_PARAMS_LEN
		log.Infof("Received tokens count=%d\n", n/TKSZ)
	}
	rHash := sconn.RHashSum()
	wHash := sconn.WHashSum()
	_, err = sconn.Write(rHash)
	ThrowErr(err)
	oHash := make([]byte, TKSZ)
	_, err = sconn.Read(oHash)
	if !bytes.Equal(wHash, oHash) {
		log.Errorln("Server hash/r is inconsistence with the client/w")
		log.Errorf("rHash: [% x] wHash: [% x]\n", rHash, wHash)
		log.Errorf("oHash: [% x]\n", oHash)
		ThrowErr(INCONSISTENT_HASH)
	}
}
Esempio n. 12
0
func (n *dbcCltNego) validateAndGetTokens(hConn *hashedConn, t *tunParams) {
	setRTimeout(hConn)
	buf, err := ReadFullByLen(2, hConn)
	ThrowErr(err)

	// compare version with remote
	myVer := VERSION
	rVer := binary.BigEndian.Uint32(buf)
	ofs := 4
	if rVer > myVer {
		rVerStr := fmt.Sprintf("%d.%d.%04d", rVer>>24, (rVer>>16)&0xFF, rVer&0xFFFF)
		myVer >>= 16
		rVer >>= 16
		if myVer == rVer {
			log.Warningf("Caution !!! Please upgrade to new version, remote is v%s\n", rVerStr)
		} else {
			ThrowErr(INCOMPATIBLE_VERSION.Apply(rVerStr))
			// return
		}
	}

	// check ibHash
	_ibHash := buf[ofs : ofs+20]
	ofs += 20
	if !bytes.Equal(n.ibHash, _ibHash) {
		// S->C is polluted.
		ThrowErr(INCONSISTENT_HASH.Apply("MitM attack"))
	}

	// parse params
	t.deserialize(buf, ofs)

	if log.V(3) {
		log.Infof("Received tokens size=%d\n", len(t.token)/TKSZ)
	}

	// validated or throws
	verifyHash(hConn, false)
}
Esempio n. 13
0
// close for ending of queued task
func (q *equeue) _close(force bool, close_code uint) {
	q.lock.Lock()
	defer q.lock.Unlock()
	e := q.edge
	if log.V(4) {
		switch close_code {
		case CLOSED_BY_ERR:
			log.Infoln("terminate", e.dest)
		case CLOSED_FORCE:
			log.Infoln("close", e.dest)
		case CLOSED_WRITE:
			log.Infof("closeW %s by peer\n", e.dest)
		}
	}
	q.buffer.Init()
	q.buffer = nil
	if force {
		atomic.StoreUint32(&e.closed, TCP_CLOSED)
		SafeClose(e.conn)
	} else {
		closeW(e.conn)
	}
}
Esempio n. 14
0
func (p *multiplexer) relay(edge *edgeConn, tun *Conn, sid uint16) {
	var (
		buf  = bytePool.Get(FRAME_MAX_LEN)
		code byte
		src  = edge.conn
	)
	defer func() {
		// positively close then notify peer
		if edge.bitwiseCompareAndSet(TCP_CLOSE_R) && code != FRAME_ACTION_OPEN_DENIED {
			pack(buf, FRAME_ACTION_CLOSE_W, sid, nil)
			go func() {
				// tell peer to closeW
				frameWriteBuffer(tun, buf[:FRAME_HEADER_LEN])
				bytePool.Put(buf)
			}()
		} else {
			bytePool.Put(buf)
		}
		if code == FRAME_ACTION_OPEN_Y {
			closeR(src)
		} else { // remote open failed
			SafeClose(src)
			if log.V(1) {
				switch code {
				case FRAME_ACTION_OPEN_N:
					log.Infof("Remote open %s failed", edge.dest)
				case FRAME_ACTION_OPEN_DENIED:
					log.Infof("Request %s was denied by remote", edge.dest)
				}
			}
		}
	}()
	if edge.positive { // for client
		_len := pack(buf, FRAME_ACTION_OPEN, sid, []byte(edge.dest[2:])) // dest with a leading mark
		if frameWriteBuffer(tun, buf[:_len]) != nil {
			SafeClose(tun)
			return
		}
	}

	var (
		tn         int // total
		nr         int
		er         error
		_fast_open = p.isClient
	)
	for {
		if _fast_open {
			// In fastOpening, the timeout will give rise to recheck fastopen state
			src.SetReadDeadline(time.Now().Add(READ_TMO_IN_FASTOPEN))
			received := false
			select {
			case code = <-edge.ready:
				received = true
			default:
			}
			if received {
				if code == FRAME_ACTION_OPEN_Y {
					_fast_open = false // fastopen finished
				} else {
					return
				}
			} else { // ready-chan was not ready
				if tn >= FAST_OPEN_BUF_MAX_SIZE { // must waiting for signal
					select {
					case code = <-edge.ready:
					case <-time.After(WAITING_OPEN_TIMEOUT):
						log.Errorf("waiting open-signal sid=%d timeout for %s\n", sid, edge.dest)
					}
					// timeout or open-signal received
					if code == FRAME_ACTION_OPEN_Y {
						_fast_open = false // fastopen finished
					} else {
						return
					}
				}
			}
			// Received signal-y then finish fastopen
			if !_fast_open {
				// read forever
				src.SetReadDeadline(ZERO_TIME)
			}
		}

		nr, er = src.Read(buf[FRAME_HEADER_LEN:])
		if nr > 0 {
			tn += nr
			pack(buf, FRAME_ACTION_DATA, sid, uint16(nr))
			if frameWriteBuffer(tun, buf[:nr+FRAME_HEADER_LEN]) != nil {
				SafeClose(tun)
				return
			}
		}
		// timeout to recheck open signal
		if er != nil && !(_fast_open && IsTimeout(er)) {
			if er != io.EOF && DEBUG {
				log.Infof("read to the end of edge total=%d err=(%v)", tn, er)
			}
			return
		}
	}
}
Esempio n. 15
0
// TODO notify peer to slow down when queue increased too fast
func (p *multiplexer) Listen(tun *Conn, handler event_handler, interval int) {
	tun.priority = &TSPriority{0, 1e9}
	p.pool.Push(tun)
	defer p.onTunDisconnected(tun, handler)
	tun.SetSockOpt(1, 0, 1)
	var (
		header = make([]byte, FRAME_HEADER_LEN)
		idle   = NewIdler(interval, p.isClient)
		router = p.router
		nr     int
		er     error
		frm    *frame
		key    string
	)
	if !p.isClient {
		// server first ping client
		// make client aware of using a valid token.
		idle.ping(tun)
	}
	for {
		idle.newRound(tun)
		nr, er = io.ReadFull(tun, header)
		if nr == FRAME_HEADER_LEN {
			frm, er = parse_frame(header)
			if er == nil && len(frm.data) > 0 {
				// read All and discard tail random
				nr, er = io.ReadFull(tun, frm.data)
				frm.data = frm.data[:frm.length]
			}
		}
		if er != nil {
			// shutdown
			if atomic.LoadInt32(&p.status) < 0 {
				time.Sleep(time.Second)
				return
			}
			switch idle.consumeError(er) {
			case ERR_NEW_PING:
				if idle.ping(tun) == nil {
					continue
				}
			case ERR_PING_TIMEOUT:
				if log.V(1) {
					log.Errorln("Peer was unresponsive then close", tun.identifier)
				}
			default:
				if log.V(1) {
					log.Errorln("Error", tun.identifier, er)
				}
			}
			// abandon this connection
			return
		}
		key = sessionKey(tun, frm.sid)

		switch frm.action {
		case FRAME_ACTION_CLOSE_W:
			if edge, _ := router.getRegistered(key); edge != nil {
				edge.bitwiseCompareAndSet(TCP_CLOSE_W)
				edge.deliver(frm)
			}
		case FRAME_ACTION_CLOSE_R:
			if edge, _ := router.getRegistered(key); edge != nil {
				edge.bitwiseCompareAndSet(TCP_CLOSE_R)
				closeR(edge.conn)
			}
		case FRAME_ACTION_DATA:
			edge, pre := router.getRegistered(key)
			if edge != nil {
				edge.deliver(frm)
			} else if pre {
				router.preDeliver(key, frm)
			} else {
				if log.V(2) {
					log.Warningln("peer send data to an unexisted socket.", key, frm)
				}
				// trigger sending close to notice peer.
				pack(header, FRAME_ACTION_CLOSE_R, frm.sid, nil)
				if frameWriteBuffer(tun, header) != nil {
					return
				}
			}
		case FRAME_ACTION_OPEN:
			router.preRegister(key)
			go p.connectToDest(frm, key, tun)
		case FRAME_ACTION_OPEN_N, FRAME_ACTION_OPEN_Y, FRAME_ACTION_OPEN_DENIED:
			edge, _ := router.getRegistered(key)
			if edge != nil {
				if log.V(4) {
					log.Infoln(p.role, "recv OPEN_x", frm)
				}
				edge.ready <- frm.action
				close(edge.ready)
			} else {
				if log.V(2) {
					log.Warningln("peer send OPEN_x to an unexisted socket.", key, frm)
				}
			}
		case FRAME_ACTION_PING:
			if idle.pong(tun) == nil {
				atomic.AddInt32(&p.pingCnt, 1)
			} else { // reply pong failed
				return
			}
		case FRAME_ACTION_PONG:
			if idle.verify() {
				if p.isClient && idle.lastPing > 0 {
					sRtt, devRtt := idle.updateRtt()
					if DEBUG {
						log.Infof("sRtt=%d devRtt=%d", sRtt, devRtt)
					}
					if devRtt+(sRtt>>2) > sRtt {
						// restart ???
						log.Warningf("Unstable network sRtt=%d devRtt=%d", sRtt, devRtt)
					}
				}
			} else {
				log.Warningln("Incorrect action_pong received")
			}
		case FRAME_ACTION_TOKENS:
			handler(evt_tokens, frm.data)
		default:
			log.Errorln(p.role, "Unrecognized", frm)
		}
		tun.Update()
	}
}
Esempio n. 16
0
func (c *Client) StartTun(mustRestart bool) {
	var (
		wait bool
		tun  *Conn
		rn   = atomic.LoadInt32(&c.round)
	)
	for {
		if wait {
			time.Sleep(RETRY_INTERVAL)
		}
		if rn < atomic.LoadInt32(&c.round) {
			return
		}
		if mustRestart {
			// clear mustRestart
			mustRestart = false
			// prevent concurrently
			if atomic.CompareAndSwapInt32(&c.state, CLT_WORKING, CLT_PENDING) {
				tun, rn = c.restart()
			} else {
				return
			}
		}
		if atomic.LoadInt32(&c.state) == CLT_WORKING {
			// negotiation conn executed here firstly will not be null
			// otherwise must be null then create new one.
			if tun == nil {
				var err error
				tun, err = c.createDataTun()
				if err != nil {
					if log.V(1) == true || DEBUG {
						log.Errorf("Connection failed %s, Error: %s. Reconnect after %s",
							c.nego.RemoteName(), err, RETRY_INTERVAL)
					} else {
						log.Errorf("Connection failed %s. Reconnect after %s",
							c.nego.RemoteName(), RETRY_INTERVAL)
					}
					wait = true
					continue
				}
			}

			if log.V(1) {
				log.Infof("Tun %s is established\n", tun.sign())
			}

			cnt := atomic.AddInt32(&c.dtCnt, 1)
			c.mux.Listen(tun, c.eventHandler, c.params.pingInterval+int(cnt))
			dtcnt := atomic.AddInt32(&c.dtCnt, -1)

			log.Errorf("Tun %s was disconnected, Reconnect after %s\n", tun.sign(), RETRY_INTERVAL)

			tun.cipher.Cleanup()
			if atomic.LoadInt32(&c.mux.pingCnt) <= 0 {
				// dirty tokens: used abandoned tokens
				c.clearTokens()
			}

			if dtcnt <= 0 { // restart: all connections were disconnected
				log.Errorf("Connections %s were lost\n", tun.identifier)
				go c.StartTun(true)
				return

			} else { // reconnect
				// don't use old tun
				wait, tun = true, nil
			}
		} else { // can't create tun and waiting for release
			if !c.pendingConn.acquire(RETRY_INTERVAL) {
				return
			}
		}
	}
}
Esempio n. 17
0
func (c *Client) StartTun(mustRestart bool) {
	var (
		wait bool
		tun  *Conn
		rn   = atomic.LoadInt32(&c.round)
	)
	for {
		if wait {
			time.Sleep(RETRY_INTERVAL)
		}
		if rn < atomic.LoadInt32(&c.round) {
			return
		}
		if mustRestart {
			mustRestart = false
			if atomic.SwapInt32(&c.State, CLT_PENDING) >= CLT_WORKING {
				tun, rn = c.restart()
				if tun == nil {
					return
				}
			} else {
				return
			}
		}
		if atomic.LoadInt32(&c.State) == CLT_WORKING {
			// negotiation conn executed here firstly will not be null
			// otherwise must be null then create new one.
			if tun == nil {
				var err error
				tun, err = c.createDataTun()
				if err != nil {
					if DEBUG {
						ex.CatchException(err)
					}
					log.Errorf("Failed to connect %s. Reconnect after %s\n", err, RETRY_INTERVAL)
					wait = true
					continue
				}
			}

			if log.V(1) {
				log.Infof("Tun=%s is established\n", tun.sign())
			}
			atomic.AddInt32(&c.dtCnt, 1)
			c.mux.Listen(tun, c.eventHandler, c.tp.pingInterval)
			dtcnt := atomic.AddInt32(&c.dtCnt, -1)

			log.Errorf("Tun=%s was disconnected, Reconnect after %s\n", tun.sign(), RETRY_INTERVAL)

			if atomic.LoadInt32(&c.mux.pingCnt) <= 0 { // dirty tokens: used abandoned tokens
				c.clearTokens()
			}

			if dtcnt <= 0 { // restart: all connections were disconnected
				log.Errorf("Connections %s were lost\n", tun.identifier)
				go c.StartTun(true)
				return
			} else { // reconnect
				// waiting and don't use old tun
				wait = true
				tun = nil
			}
		} else { // can't create tun and waiting for release
			if !c.pendingConn.acquire(RETRY_INTERVAL) {
				return
			}
		}
	}
}
Esempio n. 18
0
func (p *multiplexer) relay(edge *edgeConn, tun *Conn, sid uint16) {
	var (
		buf  = make([]byte, FRAME_MAX_LEN)
		code byte
		src  = edge.conn
	)
	defer func() {
		// positively close then notify peer
		if edge.bitwiseCompareAndSet(TCP_CLOSE_R) && code != FRAME_ACTION_OPEN_DENIED {
			_frame(buf, FRAME_ACTION_CLOSE_W, sid, nil)
			go tunWrite1(tun, buf[:FRAME_HEADER_LEN]) // tell peer to closeW
		}
		if code == FRAME_ACTION_OPEN_Y {
			closeR(src)
		} else { // remote open failed
			SafeClose(src)
			if log.V(1) {
				switch code {
				case FRAME_ACTION_OPEN_N:
					log.Infof("Remote open %s failed", edge.dest)
				case FRAME_ACTION_OPEN_DENIED:
					log.Infof("Request %s was denied by remote", edge.dest)
				}
			}
		}
	}()
	if edge.positive { // for client
		_len := _frame(buf, FRAME_ACTION_OPEN, sid, []byte(edge.dest))
		if tunWrite1(tun, buf[:_len]) != nil {
			SafeClose(tun)
			return
		}
	}

	var (
		tn         int // total
		nr         int
		er         error
		_fast_open = /* FAST_OPEN && */ p.isClient
	)
	for {
		if _fast_open {
			src.SetReadDeadline(time.Now().Add(READ_TMO_IN_FASTOPEN))
			v, y := reflect.ValueOf(edge.ready).TryRecv()
			if y {
				code = v.Interface().(byte)
				if code == FRAME_ACTION_OPEN_Y {
					_fast_open = false // fastopen finished
				} else {
					return
				}
			} else { // ready-chan was not ready
				if tn >= FAST_OPEN_BUF_MAX_SIZE { // must waiting for signal
					select {
					case code = <-edge.ready:
					case <-time.After(WAITING_OPEN_TIMEOUT):
						log.Errorf("waiting open-signal sid=%d timeout for %s\n", sid, edge.dest)
					}
					// timeout or open-signal received
					if code == FRAME_ACTION_OPEN_Y {
						_fast_open = false // fastopen finished
					} else {
						return
					}
				}
			}
			if !_fast_open { // fastopen finished
				// read forever
				src.SetReadDeadline(ZERO_TIME)
			}
		}

		nr, er = src.Read(buf[FRAME_HEADER_LEN:])
		if nr > 0 {
			tn += nr
			_frame(buf, FRAME_ACTION_DATA, sid, uint16(nr))
			if tunWrite1(tun, buf[:nr+FRAME_HEADER_LEN]) != nil {
				SafeClose(tun)
				return
			}
		}
		// timeout to recheck open signal
		if er != nil && !(_fast_open && IsTimeout(er)) {
			return
		}
	}
}