func runWithCustomMethod(user user.User) { // port, password string, Cipher *ss.Cipher port := strconv.Itoa(user.GetPort()) password := user.GetPasswd() ln, err := net.Listen("tcp", ":"+port) if err != nil { Log.Error(fmt.Sprintf("error listening port %v: %v\n", port, err)) os.Exit(1) } passwdManager.add(port, password, ln) cipher, err := user.GetCipher() if err != nil { return } Log.Info(fmt.Sprintf("server listening port %v ...\n", port)) for { conn, err := ln.Accept() if err != nil { // listener maybe closed to update password Log.Debug(fmt.Sprintf("accept error: %v\n", err)) return } // Creating cipher upon first connection. if cipher == nil { Log.Debug("creating cipher for port:", port) cipher, err = ss.NewCipher(user.GetMethod(), password) if err != nil { Log.Error(fmt.Sprintf("Error generating cipher for port: %s %v\n", port, err)) conn.Close() continue } } go handleConnection(user, ss.NewConn(conn, cipher.Copy())) } }
func (r *RedisClient) IsUserOnline(u user.User) bool { key := genUserOnlineKey(u.GetUserInfo()) isExits, err := r.client.Exists(key).Result() if err != nil { return false } return isExits }
func (r *RedisClient) GetUserInfo(u user.User) (user.UserInfo, error) { var user user.UserInfo val, err := r.client.Get(genUserInfoKey(u.GetUserInfo())).Result() if err != nil { return user, err } err = json.Unmarshal([]byte(val), &user) return user, err }
func (r *RedisClient) GetSize(u user.User) (int64, error) { key := genUserFlowKey(u.GetUserInfo()) isExits, err := r.client.Exists(key).Result() if err != nil { return 0, err } if !isExits { return 0, nil } return r.client.Get(key).Int64() }
// traffic func (r *RedisClient) IncrSize(u user.User, size int) error { key := genUserFlowKey(u.GetUserInfo()) incrSize := int(float32(size)) isExits, err := r.client.Exists(key).Result() if err != nil { return err } if !isExits { return r.client.Set(key, incrSize, DefaultExpireTime).Err() } return r.client.IncrBy(key, int64(incrSize)).Err() }
func PipeThenClose(src, dst net.Conn, is_res bool, host string, user user.User) { var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize) defer dst.Close() buf := pipeBuf.Get() // defer pipeBuf.Put(buf) var size int for { SetReadTimeout(src) n, err := src.Read(buf) // read may return EOF with n > 0 // should always process n > 0 bytes before handling error if n > 0 { size, err = dst.Write(buf[0:n]) if is_res { err = storage.IncrSize(user, size) if err != nil { Log.Error(err) } Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), size)) } if err != nil { Log.Debug("write:", err) break } } if err != nil || n == 0 { // Always "use of closed network connection", but no easy way to // identify this specific error. So just leave the error along for now. // More info here: https://code.google.com/p/go/issues/detail?id=4373 break } } return }
func PipeThenClose(src, dst net.Conn, is_http bool, is_res bool, host string, user user.User) (total int, raw_header []byte) { var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize) defer dst.Close() buf := pipeBuf.Get() // defer pipeBuf.Put(buf) var buffer = bytes.NewBuffer(nil) var is_end = false var size int for { SetReadTimeout(src) n, err := src.Read(buf) // read may return EOF with n > 0 // should always process n > 0 bytes before handling error if n > 0 { if is_http && !is_end { buffer.Write(buf) raw_header = buffer.Bytes() lines := bytes.SplitN(raw_header, []byte("\r\n\r\n"), 2) if len(lines) == 2 { is_end = true } } size, err = dst.Write(buf[0:n]) if is_res { err = storage.IncrSize(user, size) if err != nil { Log.Error(err) } Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), size)) } total += size if err != nil { Log.Debug("write:", err) break } } if err != nil || n == 0 { // Always "use of closed network connection", but no easy way to // identify this specific error. So just leave the error along for now. // More info here: https://code.google.com/p/go/issues/detail?id=4373 break } } return }
func showConn(raw_req_header, raw_res_header []byte, host string, user user.User, size int, is_http bool) { if size == 0 { Log.Error(fmt.Sprintf("[port-%d] Error: request %s cancel", user.GetPort(), host)) return } if is_http { req, _ := http.ReadRequest(bufio.NewReader(bytes.NewReader(raw_req_header))) if req == nil { lines := bytes.SplitN(raw_req_header, []byte(" "), 2) Log.Debug(fmt.Sprintf("%s http://%s/ \"Unknow\" HTTP/1.1 unknow user-port: %d size: %d\n", lines[0], host, user.GetPort(), size)) return } res, _ := http.ReadResponse(bufio.NewReader(bytes.NewReader(raw_res_header)), req) statusCode := 200 if res != nil { statusCode = res.StatusCode } Log.Debug(fmt.Sprintf("%s http://%s%s \"%s\" %s %d user-port: %d size: %d\n", req.Method, req.Host, req.URL.String(), req.Header.Get("user-agent"), req.Proto, statusCode, user.GetPort(), size)) } else { Log.Debug(fmt.Sprintf("CONNECT %s \"NONE\" NONE NONE user-port: %d size: %d\n", host, user.GetPort(), size)) } }
func (r *RedisClient) MarkUserOnline(u user.User) error { key := genUserOnlineKey(u.GetUserInfo()) return r.client.Set(key, "1", DefaultOnlineKeyExpireTime).Err() }
func (r *RedisClient) SetSize(u user.User, size int) error { key := genUserFlowKey(u.GetUserInfo()) return r.client.Set(key, size, DefaultExpireTime).Err() }
func (r *RedisClient) Del(u user.User) error { return r.client.Del(genUserInfoKey(u.GetUserInfo())).Err() }
func (r *RedisClient) Exists(u user.User) (bool, error) { return r.client.Exists(genUserInfoKey(u.GetUserInfo())).Result() }
func PipeThenCloseOta(src *ss.Conn, dst net.Conn, is_res bool, host string, user user.User) { const ( dataLenLen = 2 hmacSha1Len = 10 idxData0 = dataLenLen + hmacSha1Len ) defer func() { dst.Close() }() var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize) buf := pipeBuf.Get() // sometimes it have to fill large block for i := 1; ; i += 1 { SetReadTimeout(src) n, err := io.ReadFull(src, buf[:dataLenLen+hmacSha1Len]) if err != nil { if err == io.EOF { break } Log.Debug(fmt.Sprintf("conn=%p #%v read header error n=%v: %v", src, i, n, err)) break } dataLen := binary.BigEndian.Uint16(buf[:dataLenLen]) expectedHmacSha1 := buf[dataLenLen:idxData0] var dataBuf []byte if len(buf) < int(idxData0+dataLen) { dataBuf = make([]byte, dataLen) } else { dataBuf = buf[idxData0 : idxData0+dataLen] } if n, err := io.ReadFull(src, dataBuf); err != nil { if err == io.EOF { break } Log.Debug(fmt.Sprintf("conn=%p #%v read data error n=%v: %v", src, i, n, err)) break } chunkIdBytes := make([]byte, 4) chunkId := src.GetAndIncrChunkId() binary.BigEndian.PutUint32(chunkIdBytes, chunkId) actualHmacSha1 := ss.HmacSha1(append(src.GetIv(), chunkIdBytes...), dataBuf) if !bytes.Equal(expectedHmacSha1, actualHmacSha1) { Log.Debug(fmt.Sprintf("conn=%p #%v read data hmac-sha1 mismatch, iv=%v chunkId=%v src=%v dst=%v len=%v expeced=%v actual=%v", src, i, src.GetIv(), chunkId, src.RemoteAddr(), dst.RemoteAddr(), dataLen, expectedHmacSha1, actualHmacSha1)) break } if n, err := dst.Write(dataBuf); err != nil { Log.Debug(fmt.Sprintf("conn=%p #%v write data error n=%v: %v", dst, i, n, err)) break } if is_res { err := storage.IncrSize(user, n) if err != nil { Log.Error(err) } Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), n)) } } return }
func handleConnection(user user.User, conn *ss.Conn, auth bool) { var host string connCnt++ // this maybe not accurate, but should be enough if connCnt-nextLogConnCnt >= 0 { // XXX There's no xadd in the atomic package, so it's difficult to log // the message only once with low cost. Also note nextLogConnCnt maybe // added twice for current peak connection number level. Log.Debug("Number of client connections reaches %d\n", nextLogConnCnt) nextLogConnCnt += logCntDelta } // function arguments are always evaluated, so surround debug statement // with if statement Log.Debug(fmt.Sprintf("new client %s->%s\n", conn.RemoteAddr().String(), conn.LocalAddr())) closed := false defer func() { if ssdebug { Log.Debug(fmt.Sprintf("closed pipe %s<->%s\n", conn.RemoteAddr(), host)) } connCnt-- if !closed { conn.Close() } }() host, res_size, ota, err := getRequest(conn, auth) if err != nil { Log.Error("error getting request", conn.RemoteAddr(), conn.LocalAddr(), err) return } Log.Info(fmt.Sprintf("[port-%d]connecting %s ", user.GetPort(), host)) remote, err := net.Dial("tcp", host) if err != nil { if ne, ok := err.(*net.OpError); ok && (ne.Err == syscall.EMFILE || ne.Err == syscall.ENFILE) { // log too many open file error // EMFILE is process reaches open file limits, ENFILE is system limit Log.Error("dial error:", err) } else { Log.Error("error connecting to:", host, err) } return } defer func() { if !closed { remote.Close() } }() // debug conn info Log.Debug(fmt.Sprintf("%d conn debug: local addr: %s | remote addr: %s network: %s ", user.GetPort(), conn.LocalAddr().String(), conn.RemoteAddr().String(), conn.RemoteAddr().Network())) err = storage.IncrSize(user, res_size) if err != nil { Log.Error(err) return } err = storage.MarkUserOnline(user) if err != nil { Log.Error(err) return } Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), res_size)) Log.Info(fmt.Sprintf("piping %s<->%s ota=%v connOta=%v", conn.RemoteAddr(), host, ota, conn.IsOta())) if ota { go PipeThenCloseOta(conn, remote, false, host, user) } else { go PipeThenClose(conn, remote, false, host, user) } PipeThenClose(remote, conn, true, host, user) closed = true return }
func handleConnection(user user.User, conn *ss.Conn) { var host string var size = 0 var raw_req_header, raw_res_header []byte var is_http = false var res_size = 0 var req_chan = make(chan []byte) connCnt++ // this maybe not accurate, but should be enough if connCnt-nextLogConnCnt >= 0 { // XXX There's no xadd in the atomic package, so it's difficult to log // the message only once with low cost. Also note nextLogConnCnt maybe // added twice for current peak connection number level. Log.Debug("Number of client connections reaches %d\n", nextLogConnCnt) nextLogConnCnt += logCntDelta } // function arguments are always evaluated, so surround debug statement // with if statement Log.Debug(fmt.Sprintf("new client %s->%s\n", conn.RemoteAddr().String(), conn.LocalAddr())) closed := false defer func() { if ssdebug { Log.Debug(fmt.Sprintf("closed pipe %s<->%s\n", conn.RemoteAddr(), host)) } connCnt-- if !closed { conn.Close() } }() host, extra, err := getRequest(conn) if err != nil { Log.Error("error getting request", conn.RemoteAddr(), conn.LocalAddr(), err) return } Log.Info(fmt.Sprintf("[port-%d]connecting %s ", user.GetPort(), host)) remote, err := net.Dial("tcp", host) if err != nil { if ne, ok := err.(*net.OpError); ok && (ne.Err == syscall.EMFILE || ne.Err == syscall.ENFILE) { // log too many open file error // EMFILE is process reaches open file limits, ENFILE is system limit Log.Error("dial error:", err) } else { Log.Error("error connecting to:", host, err) } return } defer func() { if !closed { remote.Close() } }() defer func() { if is_http { tmp_req_header := <-req_chan buffer := bytes.NewBuffer(raw_req_header) buffer.Write(tmp_req_header) raw_req_header = buffer.Bytes() } showConn(raw_req_header, raw_res_header, host, user, size, is_http) close(req_chan) if !closed { remote.Close() } }() // write extra bytes read from if extra != nil { // debug.Println("getRequest read extra data, writing to remote, len", len(extra)) is_http, extra, _ = checkHttp(extra, conn) if strings.HasSuffix(host, ":80") { is_http = true } raw_req_header = extra res_size, err = remote.Write(extra) // size, err := remote.Write(extra) if err != nil { Log.Error("write request extra error:", err) return } err = storage.IncrSize(user, res_size) if err != nil { Log.Error(err) return } Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), res_size)) } Log.Debug(fmt.Sprintf("piping %s<->%s", conn.RemoteAddr(), host)) /** go ss.PipeThenClose(conn, remote) ss.PipeThenClose(remote, conn) closed = true return **/ go func() { _, raw_header := PipeThenClose(conn, remote, is_http, false, host, user) if is_http { req_chan <- raw_header } }() res_size, raw_res_header = PipeThenClose(remote, conn, is_http, true, host, user) size += res_size closed = true return }