func (gae *GAEHttpConnection) doRangeFetch(req *http.Request, firstChunkRes *http.Response) { task := new(rangeFetchTask) task.FetchLimit = int(gae_cfg.FetchLimitSize) task.FetchWorkerNum = int(gae_cfg.ConcurrentRangeFetcher) task.SessionID = gae.sess.SessionID // task.TaskValidation = func() bool { // return !util.IsDeadConnection(gae.sess.LocalRawConn) // } gae.rangeWorker = task fetch := func(preq *http.Request) (*http.Response, error) { ev := new(event.HTTPRequestEvent) ev.FromRequest(preq) ev.SetHash(gae.sess.SessionID) err, xres := gae.requestEvent(gaeHttpClient, gae.sess, ev) if nil != err { //try again err, xres = gae.requestEvent(gaeHttpClient, gae.sess, ev) } if nil == err { httpresev := xres.(*event.HTTPResponseEvent) httpres := httpresev.ToResponse() return httpres, err } return nil, err } pres, err := task.SyncGet(req, firstChunkRes, fetch) if nil == err { err = pres.Write(gae.sess.LocalRawConn) if nil != err { task.Close() gae.rangeWorker = nil } if nil != pres.Body { pres.Body.Close() } } if nil != err { log.Printf("Session[%d]Range task failed for reason:%v\n", gae.sess.SessionID, err) } if nil != err || !util.IsResponseKeepAlive(pres) || !util.IsRequestKeepAlive(req) { gae.sess.LocalRawConn.Close() gae.sess.State = STATE_SESSION_CLOSE gae.Close() } }
func (session *SessionConnection) processHttpEvent(ev *event.HTTPRequestEvent) error { ev.SetHash(session.SessionID) //proxies, attrs := SelectProxy(ev.RawReq, session.LocalRawConn, session.Type == HTTPS_TUNNEL) proxies, attrs := SelectProxy(ev.RawReq, session) if nil == proxies { session.State = STATE_SESSION_CLOSE return nil } var err error if nil == session.RemoteConn { err = session.tryProxy(proxies, attrs, ev) } else { rmanager := session.RemoteConn.GetConnectionManager() matched := false for _, proxy := range proxies { proxyName := adjustProxyName(proxy.GetName(), session.Type == HTTPS_TUNNEL) if rmanager.GetName() == proxyName { matched = true break } } if !matched { session.RemoteConn.Close() err = session.tryProxy(proxies, attrs, ev) } else { err, _ = session.RemoteConn.Request(session, ev) } } if nil != err { log.Printf("Session[%d]Process error:%v for host:%s", session.SessionID, err, ev.RawReq.Host) session.LocalRawConn.Write([]byte("HTTP/1.1 500 Internal Server Error\r\n\r\n")) session.LocalRawConn.Close() } return nil }
func (c4 *C4RemoteSession) writeHttpRequest(preq *http.Request) error { ev := new(event.HTTPRequestEvent) ev.FromRequest(preq) ev.SetHash(c4.sess.SessionID) if strings.Contains(ev.Url, "http://") { ev.Url = ev.Url[7+len(preq.Host):] } //log.Printf("Session[%d]Range Request %s\n", c4.sess.SessionID, ev.Url) c4.offerRequestEvent(ev) return nil }
func Fetch(context appengine.Context, ev *event.HTTPRequestEvent) event.Event { errorResponse := new(event.HTTPResponseEvent) if Cfg.IsMaster == 1 { fillErrorResponse(errorResponse, "Proxy service is no enable in snova master node.") return errorResponse } if isInBlacklist(ev.GetHeader("Host")) { fillErrorResponse(errorResponse, "Current site is in blacklist.") return errorResponse } req := buildHTTPRequest(ev) if req == nil { errorResponse.Status = 400 fillErrorResponse(errorResponse, "Invalid fetch url:"+ev.Url) return errorResponse } var t urlfetch.Transport t.Context = context t.Deadline, _ = time.ParseDuration("10s") t.AllowInvalidServerCertificate = true //t := &transport //t := &urlfetch.Transport{context, 0, true} retryCount := Cfg.RetryFetchCount for retryCount > 0 { resp, err := t.RoundTrip(req) if err == nil { res := buildHTTPResponseEvent(resp) if res.Status == 302 { rangeHeader := req.Header.Get("Range") if len(rangeHeader) > 0 { res.AddHeader("X-Range", rangeHeader) } } return res } context.Errorf("Failed to fetch URL[%s] for reason:%v", ev.Url, err) retryCount-- if strings.EqualFold(req.Method, "GET") && strings.Contains(err.Error(), "RESPONSE_TOO_LARGE") { rangeLimit := Cfg.RangeFetchLimit rangestart := 0 rangeheader := req.Header.Get("Range") if len(rangeheader) > 0 { rangestart, _ = util.ParseRangeHeaderValue(rangeheader) } req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", rangestart, rangeLimit-1)) } if strings.Contains(err.Error(), "RESPONSE_TOO_LARGE") { time.Sleep(1 * time.Second) return Fetch(context, ev) } } errorResponse.Status = 408 fillErrorResponse(errorResponse, "Fetch timeout for url:"+ev.Url) rangeHeader := req.Header.Get("Range") if len(rangeHeader) > 0 { errorResponse.SetHeader("X-Range", rangeHeader) } return errorResponse }
func (session *SessionConnection) process() error { close_session := func() { session.LocalRawConn.Close() if nil != session.RemoteConn { session.RemoteConn.Close() } session.State = STATE_SESSION_CLOSE } readRequest := func() (*http.Request, error) { for { needCheckRemote := false if nil != session.RemoteConn { forward, ok := session.RemoteConn.(*ForwardConnection) if ok { if forward.IsClosed() { return nil, fmt.Errorf("Remote conn closed.") } needCheckRemote = true } } if needCheckRemote { session.LocalRawConn.SetReadDeadline(time.Now().Add(1 * time.Second)) } req, e := http.ReadRequest(session.LocalBufferConn) if nil != req { req.Header.Del("Proxy-Connection") } if needCheckRemote { var zero time.Time session.LocalRawConn.SetReadDeadline(zero) } if nil != e && util.IsTimeoutError(e) { continue } return req, e } return nil, nil } switch session.State { case STATE_RECV_HTTP: req, rerr := readRequest() if nil == rerr { var rev event.HTTPRequestEvent rev.FromRequest(req) rev.SetHash(session.SessionID) err := session.processHttpEvent(&rev) if err != nil { log.Printf("Session[%d]Failed to read http request:%v\n", session.SessionID, err) close_session() return io.EOF } } if nil != rerr { log.Printf("Session[%d]Browser close connection:%v\n", session.SessionID, rerr) close_session() return io.EOF } case STATE_RECV_HTTP_CHUNK: buf := make([]byte, 8192) n, err := session.LocalBufferConn.Read(buf) if nil == err { rev := new(event.HTTPChunkEvent) rev.Content = buf[0:n] err = session.processHttpChunkEvent(rev) } if nil != err { close_session() return io.EOF } case STATE_RECV_TCP: } return nil }