Example #1
0
func (r *RedisClient) IsUserOnline(u user.User) bool {
	key := genUserOnlineKey(u.GetUserInfo())
	isExits, err := r.client.Exists(key).Result()
	if err != nil {
		return false
	}
	return isExits
}
Example #2
0
func (r *RedisClient) GetUserInfo(u user.User) (user.UserInfo, error) {
	var user user.UserInfo
	val, err := r.client.Get(genUserInfoKey(u.GetUserInfo())).Result()
	if err != nil {
		return user, err
	}
	err = json.Unmarshal([]byte(val), &user)
	return user, err
}
Example #3
0
func (r *RedisClient) GetSize(u user.User) (int64, error) {
	key := genUserFlowKey(u.GetUserInfo())
	isExits, err := r.client.Exists(key).Result()
	if err != nil {
		return 0, err
	}
	if !isExits {
		return 0, nil
	}
	return r.client.Get(key).Int64()
}
Example #4
0
// traffic
func (r *RedisClient) IncrSize(u user.User, size int) error {
	key := genUserFlowKey(u.GetUserInfo())
	incrSize := int(float32(size))
	isExits, err := r.client.Exists(key).Result()
	if err != nil {
		return err
	}
	if !isExits {
		return r.client.Set(key, incrSize, DefaultExpireTime).Err()
	}
	return r.client.IncrBy(key, int64(incrSize)).Err()
}
Example #5
0
func runWithCustomMethod(user user.User) {
	// port, password string, Cipher *ss.Cipher
	port := strconv.Itoa(user.GetPort())
	// 检测端口是否已存在
	_, ok := passwdManager.get(port)
	// 如果存在
	if ok {
		return
	}
	password := user.GetPasswd()
	ln, err := net.Listen("tcp", ":"+port)
	if err != nil {
		Log.Error(fmt.Sprintf("error listening port %v: %v\n", port, err))
		// os.Exit(1)
		return
	}
	passwdManager.add(port, password, ln)
	cipher, err, auth := user.GetCipher()
	if err != nil {
		return
	}
	Log.Info(fmt.Sprintf("server listening port %v ...\n", port))
	for {
		conn, err := ln.Accept()
		if err != nil {
			// listener maybe closed to update password
			Log.Debug(fmt.Sprintf("accept error: %v\n", err))
			return
		}
		// Creating cipher upon first connection.
		if cipher == nil {
			Log.Debug("creating cipher for port:", port)
			method := user.GetMethod()

			if strings.HasSuffix(method, "-auth") {
				method = method[:len(method)-5]
				auth = true
			} else {
				auth = false
			}

			cipher, err = ss.NewCipher(method, password)
			if err != nil {
				Log.Error(fmt.Sprintf("Error generating cipher for port: %s %v\n", port, err))
				conn.Close()
				continue
			}
		}
		go handleConnection(user, ss.NewConn(conn, cipher.Copy()), auth)
	}
}
Example #6
0
func PipeThenClose(src, dst net.Conn, is_res bool, host string, user user.User) {
	var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)
	defer dst.Close()
	buf := pipeBuf.Get()
	// defer pipeBuf.Put(buf)
	var size int

	for {
		SetReadTimeout(src)
		n, err := src.Read(buf)
		// read may return EOF with n > 0
		// should always process n > 0 bytes before handling error
		if n > 0 {
			size, err = dst.Write(buf[0:n])
			if is_res {
				err = storage.IncrSize(user, size)
				if err != nil {
					Log.Error(err)
				}
				Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), size))
			}
			if err != nil {
				Log.Debug("write:", err)
				break
			}
		}
		if err != nil || n == 0 {
			// Always "use of closed network connection", but no easy way to
			// identify this specific error. So just leave the error along for now.
			// More info here: https://code.google.com/p/go/issues/detail?id=4373
			break
		}
	}
	return
}
Example #7
0
func showConn(raw_req_header, raw_res_header []byte, host string, user user.User, size int, is_http bool) {
	if size == 0 {
		Log.Error(fmt.Sprintf("[port-%d]  Error: request %s cancel", user.GetPort(), host))
		return
	}
	if is_http {
		req, _ := http.ReadRequest(bufio.NewReader(bytes.NewReader(raw_req_header)))
		if req == nil {
			lines := bytes.SplitN(raw_req_header, []byte(" "), 2)
			Log.Debug(fmt.Sprintf("%s http://%s/ \"Unknow\" HTTP/1.1 unknow user-port: %d size: %d\n", lines[0], host, user.GetPort(), size))
			return
		}
		res, _ := http.ReadResponse(bufio.NewReader(bytes.NewReader(raw_res_header)), req)
		statusCode := 200
		if res != nil {
			statusCode = res.StatusCode
		}
		Log.Debug(fmt.Sprintf("%s http://%s%s \"%s\" %s %d  user-port: %d  size: %d\n", req.Method, req.Host, req.URL.String(), req.Header.Get("user-agent"), req.Proto, statusCode, user.GetPort(), size))
	} else {
		Log.Debug(fmt.Sprintf("CONNECT %s \"NONE\" NONE NONE user-port: %d  size: %d\n", host, user.GetPort(), size))
	}

}
Example #8
0
func (r *RedisClient) MarkUserOnline(u user.User) error {
	key := genUserOnlineKey(u.GetUserInfo())
	return r.client.Set(key, "1", DefaultOnlineKeyExpireTime).Err()
}
Example #9
0
func (r *RedisClient) SetSize(u user.User, size int) error {
	key := genUserFlowKey(u.GetUserInfo())
	return r.client.Set(key, size, DefaultExpireTime).Err()
}
Example #10
0
func (r *RedisClient) Del(u user.User) error {
	return r.client.Del(genUserInfoKey(u.GetUserInfo())).Err()
}
Example #11
0
func (r *RedisClient) Exists(u user.User) (bool, error) {
	return r.client.Exists(genUserInfoKey(u.GetUserInfo())).Result()
}
Example #12
0
func PipeThenCloseOta(src *ss.Conn, dst net.Conn, is_res bool, host string, user user.User) {
	const (
		dataLenLen  = 2
		hmacSha1Len = 10
		idxData0    = dataLenLen + hmacSha1Len
	)

	defer func() {
		dst.Close()
	}()
	var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)
	buf := pipeBuf.Get()
	// sometimes it have to fill large block
	for i := 1; ; i += 1 {
		SetReadTimeout(src)
		n, err := io.ReadFull(src, buf[:dataLenLen+hmacSha1Len])
		if err != nil {
			if err == io.EOF {
				break
			}
			Log.Debug(fmt.Sprintf("conn=%p #%v read header error n=%v: %v", src, i, n, err))
			break
		}
		dataLen := binary.BigEndian.Uint16(buf[:dataLenLen])
		expectedHmacSha1 := buf[dataLenLen:idxData0]

		var dataBuf []byte
		if len(buf) < int(idxData0+dataLen) {
			dataBuf = make([]byte, dataLen)
		} else {
			dataBuf = buf[idxData0 : idxData0+dataLen]
		}
		if n, err := io.ReadFull(src, dataBuf); err != nil {
			if err == io.EOF {
				break
			}
			Log.Debug(fmt.Sprintf("conn=%p #%v read data error n=%v: %v", src, i, n, err))
			break
		}
		chunkIdBytes := make([]byte, 4)
		chunkId := src.GetAndIncrChunkId()
		binary.BigEndian.PutUint32(chunkIdBytes, chunkId)
		actualHmacSha1 := ss.HmacSha1(append(src.GetIv(), chunkIdBytes...), dataBuf)
		if !bytes.Equal(expectedHmacSha1, actualHmacSha1) {
			Log.Debug(fmt.Sprintf("conn=%p #%v read data hmac-sha1 mismatch, iv=%v chunkId=%v src=%v dst=%v len=%v expeced=%v actual=%v", src, i, src.GetIv(), chunkId, src.RemoteAddr(), dst.RemoteAddr(), dataLen, expectedHmacSha1, actualHmacSha1))
			break
		}

		if n, err := dst.Write(dataBuf); err != nil {
			Log.Debug(fmt.Sprintf("conn=%p #%v write data error n=%v: %v", dst, i, n, err))
			break
		}
		if is_res {
			err := storage.IncrSize(user, n)
			if err != nil {
				Log.Error(err)
			}
			Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), n))
		}
	}
	return
}
Example #13
0
func handleConnection(user user.User, conn *ss.Conn, auth bool) {
	var host string

	connCnt++ // this maybe not accurate, but should be enough
	if connCnt-nextLogConnCnt >= 0 {
		// XXX There's no xadd in the atomic package, so it's difficult to log
		// the message only once with low cost. Also note nextLogConnCnt maybe
		// added twice for current peak connection number level.
		Log.Debug("Number of client connections reaches %d\n", nextLogConnCnt)
		nextLogConnCnt += logCntDelta
	}

	// function arguments are always evaluated, so surround debug statement
	// with if statement
	Log.Debug(fmt.Sprintf("new client %s->%s\n", conn.RemoteAddr().String(), conn.LocalAddr()))
	closed := false
	defer func() {
		if ssdebug {
			Log.Debug(fmt.Sprintf("closed pipe %s<->%s\n", conn.RemoteAddr(), host))
		}
		connCnt--
		if !closed {
			conn.Close()
		}
	}()

	host, res_size, ota, err := getRequest(conn, auth)
	if err != nil {
		Log.Error("error getting request", conn.RemoteAddr(), conn.LocalAddr(), err)
		return
	}
	Log.Info(fmt.Sprintf("[port-%d]connecting %s ", user.GetPort(), host))
	remote, err := net.Dial("tcp", host)
	if err != nil {
		if ne, ok := err.(*net.OpError); ok && (ne.Err == syscall.EMFILE || ne.Err == syscall.ENFILE) {
			// log too many open file error
			// EMFILE is process reaches open file limits, ENFILE is system limit
			Log.Error("dial error:", err)
		} else {
			Log.Error("error connecting to:", host, err)
		}
		return
	}
	defer func() {
		if !closed {
			remote.Close()
		}
	}()

	// debug conn info
	Log.Debug(fmt.Sprintf("%d conn debug:  local addr: %s | remote addr: %s network: %s ", user.GetPort(),
		conn.LocalAddr().String(), conn.RemoteAddr().String(), conn.RemoteAddr().Network()))
	err = storage.IncrSize(user, res_size)
	if err != nil {
		Log.Error(err)
		return
	}
	err = storage.MarkUserOnline(user)
	if err != nil {
		Log.Error(err)
		return
	}
	Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), res_size))

	Log.Info(fmt.Sprintf("piping %s<->%s ota=%v connOta=%v", conn.RemoteAddr(), host, ota, conn.IsOta()))

	if ota {
		go PipeThenCloseOta(conn, remote, false, host, user)
	} else {
		go PipeThenClose(conn, remote, false, host, user)
	}

	PipeThenClose(remote, conn, true, host, user)
	closed = true
	return
}