Example #1
0
func PipeThenClose(src, dst net.Conn, is_res bool, host string, user user.User) {
	var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)
	defer dst.Close()
	buf := pipeBuf.Get()
	// defer pipeBuf.Put(buf)
	var size int

	for {
		SetReadTimeout(src)
		n, err := src.Read(buf)
		// read may return EOF with n > 0
		// should always process n > 0 bytes before handling error
		if n > 0 {
			size, err = dst.Write(buf[0:n])
			if is_res {
				err = storage.IncrSize(user, size)
				if err != nil {
					Log.Error(err)
				}
				Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), size))
			}
			if err != nil {
				Log.Debug("write:", err)
				break
			}
		}
		if err != nil || n == 0 {
			// Always "use of closed network connection", but no easy way to
			// identify this specific error. So just leave the error along for now.
			// More info here: https://code.google.com/p/go/issues/detail?id=4373
			break
		}
	}
	return
}
Example #2
0
func PipeThenClose(src, dst net.Conn, timeoutOpt int, is_http bool, is_res bool, host string, user User) (total int, raw_header []byte) {
	var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)
	defer dst.Close()
	buf := pipeBuf.Get()
	// defer pipeBuf.Put(buf)
	var buffer = bytes.NewBuffer(nil)
	var is_end = false
	var size int

	for {
		if timeoutOpt == ss.SET_TIMEOUT {
			SetReadTimeout(src)
		}
		n, err := src.Read(buf)
		// read may return EOF with n > 0
		// should always process n > 0 bytes before handling error
		if n > 0 {
			if is_http && !is_end {
				buffer.Write(buf)
				raw_header = buffer.Bytes()
				lines := bytes.SplitN(raw_header, []byte("\r\n\r\n"), 2)
				if len(lines) == 2 {
					is_end = true
				}
			}

			size, err = dst.Write(buf[0:n])
			if is_res {
				total_size, _ := storage.IncrSize("flow:"+user.Name, size)
				storage.ZincrbySize("flow:"+user.Name, host, size)
				if total_size > user.Limit {
					return
				}
			}
			total += size
			if err != nil {
				ss.Debug.Println("write:", err)
				break
			}
		}
		if err != nil || n == 0 {
			// Always "use of closed network connection", but no easy way to
			// identify this specific error. So just leave the error along for now.
			// More info here: https://code.google.com/p/go/issues/detail?id=4373
			break
		}
	}
	return
}
Example #3
0
func PipeThenClose(src, dst net.Conn, is_http bool, is_res bool, host string, user user.User) (total int, raw_header []byte) {
	var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)
	defer dst.Close()
	buf := pipeBuf.Get()
	// defer pipeBuf.Put(buf)
	var buffer = bytes.NewBuffer(nil)
	var is_end = false
	var size int

	for {
		SetReadTimeout(src)
		n, err := src.Read(buf)
		// read may return EOF with n > 0
		// should always process n > 0 bytes before handling error
		if n > 0 {
			if is_http && !is_end {
				buffer.Write(buf)
				raw_header = buffer.Bytes()
				lines := bytes.SplitN(raw_header, []byte("\r\n\r\n"), 2)
				if len(lines) == 2 {
					is_end = true
				}
			}

			size, err = dst.Write(buf[0:n])
			if is_res {
				err = storage.IncrSize(user, size)
				if err != nil {
					Log.Error(err)
				}
				Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), size))
			}
			total += size
			if err != nil {
				Log.Debug("write:", err)
				break
			}
		}
		if err != nil || n == 0 {
			// Always "use of closed network connection", but no easy way to
			// identify this specific error. So just leave the error along for now.
			// More info here: https://code.google.com/p/go/issues/detail?id=4373
			break
		}
	}
	return
}
Example #4
0
// bufio.ErrBufferFull while reading line, so set it to a large value to
// prevent such problems.
//
// For limits about URL and HTTP header size, refer to:
// http://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url
// "de facto limit of 2000 characters"
// http://www.mnot.net/blog/2011/07/11/what_proxies_must_do
// "URIs should be allowed at least 8000 octets, and HTTP headers should have
// 4000 as an absolute minimum".
// In practice, there are sites using cookies larger than 4096 bytes,
// e.g. www.fitbit.com. So set http buffer size to 8192 to be safe.
const httpBufSize = 8192

// Hold at most 4MB memory as buffer for parsing http request/response and
// holding post data.
var httpBuf = leakybuf.NewLeakyBuf(512, httpBufSize)

// If no keep-alive header in response, use this as the keep-alive value.
const defaultServerConnTimeout = 5 * time.Second

// Close client connection if no new requests received in some time.
// (On OS X, the default soft limit of open file descriptor is 256, which is
// very conservative and easy to cause problem if we are not careful to limit
// open fds.)
const clientConnTimeout = 5 * time.Second
const fullKeepAliveHeader = "Keep-Alive: timeout=5\r\n"

// Some code are learnt from the http package

var zeroTime time.Time
Example #5
0
const (
	NO_TIMEOUT = iota
	SET_TIMEOUT
)

func SetReadTimeout(c net.Conn) {
	if readTimeout != 0 {
		c.SetReadDeadline(time.Now().Add(readTimeout))
	}
}

const bufSize = 4096
const nBuf = 2048

var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)

// PipeThenClose copies data from src to dst, closes dst when done.
func PipeThenClose(src, dst net.Conn, timeoutOpt int) {
	defer dst.Close()
	buf := pipeBuf.Get()
	defer pipeBuf.Put(buf)
	for {
		if timeoutOpt == SET_TIMEOUT {
			SetReadTimeout(src)
		}
		n, err := src.Read(buf)
		// read may return EOF with n > 0
		// should always process n > 0 bytes before handling error
		if n > 0 {
			if _, err = dst.Write(buf[0:n]); err != nil {
Example #6
0
func PipeThenCloseOta(src *ss.Conn, dst net.Conn, is_res bool, host string, user user.User) {
	const (
		dataLenLen  = 2
		hmacSha1Len = 10
		idxData0    = dataLenLen + hmacSha1Len
	)

	defer func() {
		dst.Close()
	}()
	var pipeBuf = leakybuf.NewLeakyBuf(nBuf, bufSize)
	buf := pipeBuf.Get()
	// sometimes it have to fill large block
	for i := 1; ; i += 1 {
		SetReadTimeout(src)
		n, err := io.ReadFull(src, buf[:dataLenLen+hmacSha1Len])
		if err != nil {
			if err == io.EOF {
				break
			}
			Log.Debug(fmt.Sprintf("conn=%p #%v read header error n=%v: %v", src, i, n, err))
			break
		}
		dataLen := binary.BigEndian.Uint16(buf[:dataLenLen])
		expectedHmacSha1 := buf[dataLenLen:idxData0]

		var dataBuf []byte
		if len(buf) < int(idxData0+dataLen) {
			dataBuf = make([]byte, dataLen)
		} else {
			dataBuf = buf[idxData0 : idxData0+dataLen]
		}
		if n, err := io.ReadFull(src, dataBuf); err != nil {
			if err == io.EOF {
				break
			}
			Log.Debug(fmt.Sprintf("conn=%p #%v read data error n=%v: %v", src, i, n, err))
			break
		}
		chunkIdBytes := make([]byte, 4)
		chunkId := src.GetAndIncrChunkId()
		binary.BigEndian.PutUint32(chunkIdBytes, chunkId)
		actualHmacSha1 := ss.HmacSha1(append(src.GetIv(), chunkIdBytes...), dataBuf)
		if !bytes.Equal(expectedHmacSha1, actualHmacSha1) {
			Log.Debug(fmt.Sprintf("conn=%p #%v read data hmac-sha1 mismatch, iv=%v chunkId=%v src=%v dst=%v len=%v expeced=%v actual=%v", src, i, src.GetIv(), chunkId, src.RemoteAddr(), dst.RemoteAddr(), dataLen, expectedHmacSha1, actualHmacSha1))
			break
		}

		if n, err := dst.Write(dataBuf); err != nil {
			Log.Debug(fmt.Sprintf("conn=%p #%v write data error n=%v: %v", dst, i, n, err))
			break
		}
		if is_res {
			err := storage.IncrSize(user, n)
			if err != nil {
				Log.Error(err)
			}
			Log.Debug(fmt.Sprintf("[port-%d] store size: %d", user.GetPort(), n))
		}
	}
	return
}