// DumpResponse is like DumpRequest but dumps a response. func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { var b bytes.Buffer save := resp.Body savecl := resp.ContentLength if !body { // For content length of zero. Make sure the body is an empty // reader, instead of returning error through failureToReadBody{}. if resp.ContentLength == 0 { resp.Body = emptyBody } else { resp.Body = failureToReadBody{} } } else if resp.Body == nil { resp.Body = emptyBody } else { save, resp.Body, err = drainBody(resp.Body) if err != nil { return nil, err } } err = resp.Write(&b) if err == errNoBody { err = nil } resp.Body = save resp.ContentLength = savecl if err != nil { return nil, err } return b.Bytes(), nil }
// Code to stream the result in lock-step to the client and to a cache file. // If conn terminates, continue streaming the response into the cache. func streamAndCache(res *http.Response, conn net.Conn, cache_path string) int64 { response_reader, cache_writer := io.Pipe() defer func() { err := cache_writer.Close() if err != nil { panic(err) } }() n_recvd := make(chan int64) go func() { n_recvd <- CacheResponse(cache_path, response_reader) }() // Swallow write errors to `conn` since we want it to keep writing to the // cache even if conn goes away w := io.MultiWriter(cache_writer, writeErrorSwallower{conn}) err := res.Write(w) if err != nil { panic(err) } // Wait until CacheResponse is done copying the response to a file. n := <-n_recvd return n }
func (srv *Server) handlerWriteResponse(request *Request, res *http.Response, c net.Conn, bw *bufio.Writer) error { // Setup write stage request.startPipelineStage("server.ResponseWrite") request.CurrentStage.Type = PipelineStageTypeOverhead // cleanup defer func() { request.finishPipelineStage() request.finishRequest() srv.requestFinished(request, res) if res.Body != nil { res.Body.Close() } }() // Cycle nodelay flag on socket // Note: defers for FILO so this will happen before the write // phase is complete, which is what we want. if nodelay := srv.setNoDelay(c, false); nodelay { defer srv.setNoDelay(c, true) } var err error // Write response if err = res.Write(bw); err != nil { return err } // Flush any remaining buffer err = bw.Flush() return err }
// DumpResponse is like DumpRequest but dumps a response. func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { // dump出响应内容 var b bytes.Buffer save := resp.Body savecl := resp.ContentLength if !body { resp.Body = failureToReadBody{} } else if resp.Body == nil { resp.Body = emptyBody } else { save, resp.Body, err = drainBody(resp.Body) if err != nil { return } } err = resp.Write(&b) if err == errNoBody { err = nil } resp.Body = save resp.ContentLength = savecl if err != nil { return nil, err } return b.Bytes(), nil }
// putCache puts the supplied http.Response into the cache. func putCache(c *Context, req *http.Request, resp *http.Response) error { defer resp.Body.Close() filename := cacheEntryFilename(c, req.URL.String()) f, err := os.Create(filename) if err != nil { return err } if err := resp.Write(f); err != nil { f.Close() return err } f.Close() if log.V(1) { log.Infof("wrote %q to response cache", req.URL.String()) } // TODO(spencer): this sucks, but we must re-read the response as // the body is closed during the call to resp.Write(). if readResp, err := readCachedResponse(filename, req); err != nil { log.Errorf("failed reading cached response: %s", err) return err } else { resp.Body = readResp.Body } return nil }
func (c *Client) logResponse(resp *http.Response) error { if c.logHTTP { var err error save := resp.Body savecl := resp.ContentLength body := true if !body { resp.Body = failureToReadBody{} } else if resp.Body == nil { resp.Body = emptyBody } else { save, resp.Body, err = drainBody(resp.Body) if err != nil { return err } } fmt.Println("----------- response start -----------") err = resp.Write(os.Stderr) if err == errNoBody { err = nil } resp.Body = save resp.ContentLength = savecl if err != nil { return err } fmt.Println("----------- response end -----------") } return nil }
// handleRequest runs the request and response modifiers and performs the roundtrip to the destination server. func (p *Proxy) handleRequest(ctx *Context, rw *bufio.ReadWriter, req *http.Request) (closing bool) { if err := proxyutil.FixBadFraming(req.Header); err != nil { Errorf("proxyutil.FixBadFraming(): %v", err) proxyutil.NewErrorResponse(400, err, req).Write(rw) } proxyutil.SetForwardedHeaders(req) proxyutil.SetViaHeader(req.Header, "1.1 martian") if err := p.ModifyRequest(ctx, req); err != nil { Errorf("martian.ModifyRequest(): %v", err) proxyutil.NewErrorResponse(400, err, req).Write(rw) return } if shouldCloseAfterReply(req.Header) { Debugf("closing after reply") closing = true } proxyutil.RemoveHopByHopHeaders(req.Header) var res *http.Response var err error if !ctx.SkipRoundTrip { Debugf("proceed to round trip for %s", req.URL) res, err = p.RoundTripper.RoundTrip(req) if err != nil { Errorf("RoundTripper.RoundTrip(%s): %v", req.URL, err) proxyutil.NewErrorResponse(502, err, req).Write(rw) return } } else { Debugf("skipped round trip for %s", req.URL) res = proxyutil.NewResponse(200, nil, req) } proxyutil.RemoveHopByHopHeaders(res.Header) if err := p.ModifyResponse(ctx, res); err != nil { Errorf("martian.ModifyResponse(): %v", err) proxyutil.NewErrorResponse(400, err, req).Write(rw) return } if closing { res.Header.Set("Connection", "close") res.Close = true } if err := res.Write(rw); err != nil { Errorf("res.Write(): %v", err) } return }
func TestResponse(t *testing.T) { var resp http.Response resp.StatusCode = http.StatusOK resp.Header = make(http.Header) resp.Header["SID"] = []string{"uuid:1337"} var buf bytes.Buffer resp.Write(&buf) t.Logf("%q", buf.String()) }
func (srv *Server) handler(c net.Conn) { startTime := time.Now() defer srv.connectionFinished(c) bpe := srv.bufferPool.take(c) defer srv.bufferPool.give(bpe) var err error var req *http.Request // no keepalive (for now) reqCount := 0 keepAlive := true for err == nil && keepAlive { if req, err = http.ReadRequest(bpe.br); err == nil { if req.Header.Get("Connection") != "Keep-Alive" { keepAlive = false } request := newRequest(req, c, startTime) reqCount++ var res *http.Response pssInit := new(PipelineStageStat) pssInit.Name = "server.Init" pssInit.StartTime = startTime pssInit.EndTime = time.Now() request.appendPipelineStage(pssInit) // execute the pipeline if res = srv.Pipeline.execute(request); res == nil { res = SimpleResponse(req, 404, nil, "Not Found") } // cleanup request.startPipelineStage("server.ResponseWrite") req.Body.Close() if srv.sendfile { res.Write(c) } else { wbuf := bufio.NewWriter(c) res.Write(wbuf) wbuf.Flush() } if res.Body != nil { res.Body.Close() } request.finishPipelineStage() request.finishRequest() srv.requestFinished(request) } else { // EOF is socket closed if err != io.ErrUnexpectedEOF { Error("%s %v ERROR reading request: %v", srv.serverLogPrefix(), c.RemoteAddr(), err) } } } //Debug("%s Processed %v requests on connection %v", srv.serverLogPrefix(), reqCount, c.RemoteAddr()) }
// Write writes resp in response to req. To close the connection gracefully, set the // Response.Close field to true. Write should be considered operational until // it returns an error, regardless of any errors returned on the Read side. func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { // Retrieve the pipeline ID of this request/response pair sc.lk.Lock() id, ok := sc.pipereq[req] delete(sc.pipereq, req) if !ok { sc.lk.Unlock() return ErrPipeline } sc.lk.Unlock() // Ensure pipeline order sc.pipe.StartResponse(id) defer sc.pipe.EndResponse(id) sc.lk.Lock() if sc.we != nil { defer sc.lk.Unlock() return sc.we } if sc.c == nil { // connection closed by user in the meantime defer sc.lk.Unlock() return os.EBADF } c := sc.c if sc.nread <= sc.nwritten { defer sc.lk.Unlock() return errors.New("persist server pipe count") } if resp.Close { // After signaling a keep-alive close, any pipelined unread // requests will be lost. It is up to the user to drain them // before signaling. sc.re = ErrPersistEOF } sc.lk.Unlock() err := resp.Write(c) sc.lk.Lock() defer sc.lk.Unlock() if err != nil { sc.we = err return err } sc.nwritten++ return nil }
func (c *Cli) CmdWatch(issue string, watcher string, remove bool) error { log.Debug("watch called: watcher: %q, remove: %n", watcher, remove) var uri string json, err := jsonEncode(watcher) if err != nil { return err } if c.getOptBool("dryrun", false) { if !remove { log.Debug("POST: %s", json) log.Debug("Dryrun mode, skipping POST") } else { log.Debug("DELETE: %s", watcher) log.Debug("Dryrun mode, skipping POST") } return nil } var resp *http.Response if !remove { uri = fmt.Sprintf("%s/rest/api/2/issue/%s/watchers", c.endpoint, issue) resp, err = c.post(uri, json) } else { uri = fmt.Sprintf("%s/rest/api/2/issue/%s/watchers?username=%s", c.endpoint, issue, watcher) resp, err = c.delete(uri) } if err != nil { return err } if resp.StatusCode == 204 { c.Browse(issue) if !c.opts["quiet"].(bool) { fmt.Printf("OK %s %s/browse/%s\n", issue, c.endpoint, issue) } } else { logBuffer := bytes.NewBuffer(make([]byte, 0)) resp.Write(logBuffer) if !remove { err = fmt.Errorf("Unexpected Response From POST") } else { err = fmt.Errorf("Unexpected Response From DELETE") } log.Error("%s:\n%s", err, logBuffer) return err } return nil }
func makeServe(connectionClose bool, processTime time.Duration, bodyLength int, slow *SlowReaderWriter) ConnectionHandler { return func(t *testing.T, conn net.Conn) { defer conn.Close() var br *bufio.Reader = bufio.NewReader(conn) var w io.Writer = conn body := strings.Repeat("x", bodyLength) if slow != nil { slow.R = conn slow.W = conn br = bufio.NewReader(slow) w = io.Writer(slow) } for i := 1; i < 10; i++ { request, err := http.ReadRequest(br) if err != nil { t.Error("Read:", err.Error()) } response := http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, // No RDelay, used only to provide Close method to strings.Reader. Body: &SlowReaderWriter{R: strings.NewReader(body), Size: bodyLength}, ContentLength: int64(bodyLength), Close: connectionClose, Header: make(http.Header), Request: request, } response.Header.Set("Content-Type", "text/plain") response.Header.Set("Content-Length", fmt.Sprintf("%d", bodyLength)) // dw := bytes.NewBuffer(make([]byte, 100000)) // response.Write(dw) // println("Write:", dw.String()) err = response.Write(w) if err != nil { t.Error("Write:", err.Error()) } } t.Fatal("Too many requests on one connection") } }
func updateRateHandler(w http.ResponseWriter, r *http.Request) { if DEBUG_WEB_SERVER { fmt.Println("GET AN UPDATE RATE REQUEST") } rateValueStr := r.FormValue("rate") rateValue, err := strconv.ParseInt(rateValueStr, 0, 0) var response http.Response if err == nil && rateValue > 0 { updateRate(int(rateValue)) if DEBUG_WEB_SERVER { fmt.Println("Update lambda, new value =", rate) } response.StatusCode = 200 } else { response.StatusCode = 400 } response.Write(w) }
func (c *Cli) CmdVote(issue string, up bool) error { log.Debug("vote called, with up: %n", up) uri := fmt.Sprintf("%s/rest/api/2/issue/%s/votes", c.endpoint, issue) if c.getOptBool("dryrun", false) { if up { log.Debug("POST: %s", "") log.Debug("Dryrun mode, skipping POST") } else { log.Debug("DELETE: %s", "") log.Debug("Dryrun mode, skipping DELETE") } return nil } var resp *http.Response var err error if up { resp, err = c.post(uri, "") } else { resp, err = c.delete(uri) } if err != nil { return err } if resp.StatusCode == 204 { c.Browse(issue) if !c.opts["quiet"].(bool) { fmt.Printf("OK %s %s/browse/%s\n", issue, c.endpoint, issue) } } else { logBuffer := bytes.NewBuffer(make([]byte, 0)) resp.Write(logBuffer) if up { err = fmt.Errorf("Unexpected Response From POST") } else { err = fmt.Errorf("Unexpected Response From DELETE") } log.Error("%s:\n%s", err, logBuffer) return err } return nil }
func (srv *Server) handlerWriteResponse(request *Request, res *http.Response, c net.Conn, bw *bufio.Writer) { request.startPipelineStage("server.ResponseWrite") request.CurrentStage.Type = PipelineStageTypeOverhead var nodelay = srv.setNoDelay(c, false) if nodelay { res.Write(bw) bw.Flush() srv.setNoDelay(c, true) } else { res.Write(bw) bw.Flush() } if res.Body != nil { res.Body.Close() } request.finishPipelineStage() request.finishRequest() srv.requestFinished(request, res) }
func (p proxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Body.Close() conn, _, err := w.(http.Hijacker).Hijack() if err != nil { log.Println("Failed to hijack connection in ProxyConnections.", err) return } defer conn.Close() if _, ok := conn.(*tls.Conn); !ok { log.Println("Recieved a non-TLS connection in ProxyConnections.") return } // Send the connection accepted response. res := new(http.Response) res.Status = "200 Connection Established" res.StatusCode = http.StatusOK res.Proto = "HTTP/1.1" res.ProtoMajor = 1 res.ProtoMinor = 1 if err = res.Write(conn); err != nil { log.Println("Failed to send connection established message in ProxyConnections.", err) return } client, err := NewClientConn(conn, nil, 3, 1) if err != nil { log.Println("Error creating SPDY connection in ProxyConnections.", err) return } go client.Run() // Call user code. p.ProxyConnHandle(client) client.Close() }
// DumpResponse is like DumpRequest but dumps a response. func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { var b bytes.Buffer save := resp.Body savecl := resp.ContentLength if !body || resp.Body == nil { resp.Body = nil resp.ContentLength = 0 } else { save, resp.Body, err = drainBody(resp.Body) if err != nil { return } } err = resp.Write(&b) resp.Body = save resp.ContentLength = savecl if err != nil { return } dump = b.Bytes() return }
// Write the response message to the websocket func writeResponseMessage(t *WSTunnelClient, id int16, resp *http.Response) { // Get writer's lock wsWriterMutex.Lock() defer wsWriterMutex.Unlock() // Write response into the tunnel t.ws.SetWriteDeadline(time.Now().Add(time.Minute)) w, err := t.ws.NextWriter(websocket.BinaryMessage) // got an error, reply with a "hey, retry" to the request handler if err != nil { t.Log.Warn("WS NextWriter", "err", err.Error()) t.ws.Close() return } // write the request Id _, err = fmt.Fprintf(w, "%04x", id) if err != nil { t.Log.Warn("WS cannot write request Id", "err", err.Error()) t.ws.Close() return } // write the response itself err = resp.Write(w) if err != nil { t.Log.Warn("WS cannot write response", "err", err.Error()) t.ws.Close() return } // done err = w.Close() if err != nil { t.Log.Warn("WS write-close failed", "err", err.Error()) t.ws.Close() return } }
func serveConn(c net.Conn, cfg *oauth2.Config) (string, error) { defer c.Close() req, err := http.ReadRequest(bufio.NewReader(c)) if err != nil { return "", err } var res http.Response res.Header = http.Header(map[string][]string{}) code := req.FormValue("code") if code == "" { url := cfg.AuthCodeURL("") res.StatusCode = http.StatusTemporaryRedirect res.Header.Set("Location", url) if err := res.Write(c); err != nil { return "", err } if _, err := fmt.Fprintf(c, "%s\n", url); err != nil { return "", err } } else { res.StatusCode = http.StatusOK res.ContentLength = int64(len(allDonePage)) res.Header.Set("Content-Type", "text/html;charset=utf-8") if err := res.Write(c); err != nil { return "", err } if _, err := fmt.Fprintln(c, allDonePage); err != nil { return "", err } } return code, nil }
// ProcessRequest will run any checks on the request on the way through the system, return an error to have the chain fail func (m *RedisCacheMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Request, configuration interface{}) (error, int) { // Allow global cache disabe if !m.Spec.APIDefinition.CacheOptions.EnableCache { return nil, 200 } var stat RequestStatus var isVirtual bool // Only allow idempotent (safe) methods if r.Method == "GET" || r.Method == "OPTIONS" || r.Method == "HEAD" { // Lets see if we can throw a sledgehammer at this if m.Spec.APIDefinition.CacheOptions.CacheAllSafeRequests { stat = StatusCached } else { // New request checker, more targetted, less likely to fail _, versionPaths, _, _ := m.TykMiddleware.Spec.GetVersionData(r) found, _ := m.TykMiddleware.Spec.CheckSpecMatchesStatus(r.URL.Path, r.Method, versionPaths, Cached) isVirtual, _ = m.TykMiddleware.Spec.CheckSpecMatchesStatus(r.URL.Path, r.Method, versionPaths, VirtualPath) if found { stat = StatusCached } } // Cached route matched, let go if stat == StatusCached { var authHeaderValue string var ipErr error authVal := context.Get(r, AuthHeaderValue) // No authentication data? use the IP. if authVal == nil { authHeaderValue, ipErr = GetIP(r.RemoteAddr) if ipErr != nil { log.Error(ipErr) return nil, 200 } } else { authHeaderValue = authVal.(string) } var copiedRequest *http.Request if config.AnalyticsConfig.EnableDetailedRecording { copiedRequest = CopyHttpRequest(r) } thisKey := m.CreateCheckSum(r, authHeaderValue) retBlob, found := m.CacheStore.GetKey(thisKey) if found != nil { log.Debug("Cache enabled, but record not found") // Pass through to proxy AND CACHE RESULT reqVal := new(http.Response) if isVirtual { log.Debug("This is a virtual function") thisVP := VirtualEndpoint{TykMiddleware: m.TykMiddleware} thisVP.New() reqVal = thisVP.ServeHTTPForCache(w, r) } else { // This passes through and will write the value to the writer, but spit out a copy for the cache log.Debug("Not virtual, passing") reqVal = m.sh.ServeHTTPWithCache(w, r) } cacheThisRequest := true cacheTTL := m.Spec.APIDefinition.CacheOptions.CacheTimeout // Are we using upstream cache control? if m.Spec.APIDefinition.CacheOptions.EnableUpstreamCacheControl { log.Debug("Upstream control enabled") // Do we cache? if reqVal.Header.Get(UPSTREAM_CACHE_HEADER_NAME) == "" { log.Warning("Upstream cache action not found, not caching") cacheThisRequest = false } // Do we override TTL? ttl := reqVal.Header.Get(UPSTREAM_CACHE_TTL_HEADER_NAME) if ttl != "" { log.Debug("TTL Set upstream") cacheAsInt, valErr := strconv.Atoi(ttl) if valErr != nil { log.Error("Failed to decode TTL cache value: ", valErr) cacheTTL = m.Spec.APIDefinition.CacheOptions.CacheTimeout } cacheTTL = int64(cacheAsInt) } } if cacheThisRequest { log.Debug("Caching request to redis") var wireFormatReq bytes.Buffer reqVal.Write(&wireFormatReq) log.Debug("Cache TTL is:", cacheTTL) go m.CacheStore.SetKey(thisKey, wireFormatReq.String(), cacheTTL) } return nil, 666 } retObj := bytes.NewReader([]byte(retBlob)) log.Debug("Cache got: ", retBlob) asBufioReader := bufio.NewReader(retObj) newRes, resErr := http.ReadResponse(asBufioReader, r) if resErr != nil { log.Error("Could not create response object: ", resErr) } defer newRes.Body.Close() for _, h := range hopHeaders { newRes.Header.Del(h) } copyHeader(w.Header(), newRes.Header) sessObj := context.Get(r, SessionData) var thisSessionState SessionState // Only add ratelimit data to keyed sessions if sessObj != nil { thisSessionState = sessObj.(SessionState) w.Header().Set("X-RateLimit-Limit", strconv.Itoa(int(thisSessionState.QuotaMax))) w.Header().Set("X-RateLimit-Remaining", strconv.Itoa(int(thisSessionState.QuotaRemaining))) w.Header().Set("X-RateLimit-Reset", strconv.Itoa(int(thisSessionState.QuotaRenews))) } w.Header().Add("x-tyk-cached-response", "1") w.WriteHeader(newRes.StatusCode) m.Proxy.copyResponse(w, newRes.Body) // Record analytics if m.Spec.DoNotTrack == false { go m.sh.RecordHit(w, r, 0, newRes.StatusCode, copiedRequest, nil) } // Stop any further execution return nil, 666 } } return nil, 200 }
func (srv *Server) handler(c net.Conn) { startTime := time.Now() bpe := srv.bufferPool.take(c) defer srv.bufferPool.give(bpe) var closeSentinelChan = make(chan int) go srv.sentinel(c, closeSentinelChan) defer srv.connectionFinished(c, closeSentinelChan) var err error var req *http.Request // no keepalive (for now) reqCount := 0 keepAlive := true for err == nil && keepAlive { if req, err = http.ReadRequest(bpe.br); err == nil { if req.Header.Get("Connection") != "Keep-Alive" { keepAlive = false } request := newRequest(req, c, startTime) reqCount++ var res *http.Response pssInit := new(PipelineStageStat) pssInit.Name = "server.Init" pssInit.StartTime = startTime pssInit.EndTime = time.Now() request.appendPipelineStage(pssInit) // execute the pipeline if res = srv.Pipeline.execute(request); res == nil { res = SimpleResponse(req, 404, nil, "Not Found") } // cleanup request.startPipelineStage("server.ResponseWrite") req.Body.Close() // shutting down? select { case <-srv.stopAccepting: keepAlive = false res.Close = true default: } // write response if srv.sendfile { res.Write(c) } else { wbuf := bufio.NewWriter(c) res.Write(wbuf) wbuf.Flush() } if res.Body != nil { res.Body.Close() } request.finishPipelineStage() request.finishRequest() srv.requestFinished(request) // Reset the startTime // this isn't great since there may be lag between requests; but it's the best we've got startTime = time.Now() } else { // EOF is socket closed if nerr, ok := err.(net.Error); err != io.EOF && !(ok && nerr.Timeout()) { Error("%s %v ERROR reading request: <%T %v>", srv.serverLogPrefix(), c.RemoteAddr(), err, err) } } } //Debug("%s Processed %v requests on connection %v", srv.serverLogPrefix(), reqCount, c.RemoteAddr()) }
func (s *HttpSrever) Error(w io.Writer, StatusCode int, body string) { r := http.Response{StatusCode: StatusCode, Body: Body{strings.NewReader(body)}} r.Write(w) }
func (google *GoogleConnection) Request(conn *SessionConnection, ev event.Event) (err error, res event.Event) { f := func(local, remote net.Conn, ch chan int) { io.Copy(remote, local) ch <- 1 local.Close() remote.Close() } //L: switch ev.GetType() { case event.HTTP_REQUEST_EVENT_TYPE: req := ev.(*event.HTTPRequestEvent) if conn.Type == HTTPS_TUNNEL { var proxyConn net.Conn if len(googleLocalProxy) > 0 { proxyURL, _ := url.Parse(googleLocalProxy) proxyConn, err = net.Dial("tcp", proxyURL.Host) addr, _ := getLocalHostMapping(GOOGLE_HTTPS) connreq := req.RawReq connreq.Host = addr if nil == err { connreq.Write(proxyConn) } } else { addr := getGoogleHostport(true) proxyConn, err = net.DialTimeout("tcp", addr, connTimeoutSecs) if nil != err { //try again addr = getGoogleHostport(true) proxyConn, err = net.DialTimeout("tcp", addr, connTimeoutSecs) } } log.Printf("Session[%d]Request %s\n", req.GetHash(), util.GetURLString(req.RawReq, true)) if nil == err { if len(googleLocalProxy) > 0 { } else { conn.LocalRawConn.Write([]byte("HTTP/1.1 200 Connection established\r\n\r\n")) } } else { return fmt.Errorf("No google proxy reachable:%v", err), nil } ch := make(chan int) go f(conn.LocalRawConn, proxyConn, ch) go f(proxyConn, conn.LocalRawConn, ch) atomic.AddInt32(&total_google_routine_num, 2) <-ch <-ch atomic.AddInt32(&total_google_routine_num, -2) proxyConn.Close() google.Close() conn.State = STATE_SESSION_CLOSE } else { google.proxyAddr = req.RawReq.Host log.Printf("Session[%d]Request %s\n", req.GetHash(), util.GetURLString(req.RawReq, true)) req.RawReq.URL.Scheme = "http" req.RawReq.RequestURI = "" var resp *http.Response tryProxy := func() (*http.Response, error) { if google.manager == httpGoogleManager { return httpGoogleClient.Do(req.RawReq) } return httpsGoogleClient.Do(req.RawReq) } resp, err = tryProxy() if nil != err && strings.EqualFold(req.Method, "GET") { //try proxy again resp, err = tryProxy() } if nil != err { var tmp bytes.Buffer req.RawReq.Write(&tmp) log.Printf("Session[%d]Request error:%v\n%s\n", req.GetHash(), err, tmp.String()) return err, nil } err = resp.Write(conn.LocalRawConn) if nil != err || !util.IsResponseKeepAlive(resp) || !util.IsRequestKeepAlive(req.RawReq) { conn.LocalRawConn.Close() conn.State = STATE_SESSION_CLOSE } else { log.Printf("Session[%d]Res %d %v\n", req.GetHash(), resp.StatusCode, resp.Header) conn.State = STATE_RECV_HTTP } } default: } return nil, nil }
func (x *httpConn) WriteResponse(resp *http.Response) { err := resp.Write(x.writer) x.c.Assert(err, IsNil) x.writer.Flush() }
func (s SuccessHandler) RecordHit(w http.ResponseWriter, r *http.Request, timing int64, code int, requestCopy *http.Request, responseCopy *http.Response) { if s.Spec.DoNotTrack { return } if config.StoreAnalytics(r) { t := time.Now() // Track the key ID if it exists authHeaderValue := context.Get(r, AuthHeaderValue) keyName := "" if authHeaderValue != nil { keyName = authHeaderValue.(string) } // Track version data version := s.Spec.getVersionFromRequest(r) if version == "" { version = "Non Versioned" } // If OAuth, we need to grab it from the session, which may or may not exist OauthClientID := "" tags := make([]string, 0) thisSessionState := context.Get(r, SessionData) if thisSessionState != nil { OauthClientID = thisSessionState.(SessionState).OauthClientID tags = thisSessionState.(SessionState).Tags } rawRequest := "" rawResponse := "" if config.AnalyticsConfig.EnableDetailedRecording { if requestCopy != nil { // Get the wire format representation var wireFormatReq bytes.Buffer requestCopy.Write(&wireFormatReq) rawRequest = b64.StdEncoding.EncodeToString(wireFormatReq.Bytes()) } if responseCopy != nil { // Get the wire format representation var wireFormatRes bytes.Buffer responseCopy.Write(&wireFormatRes) rawResponse = b64.StdEncoding.EncodeToString(wireFormatRes.Bytes()) } } thisRecord := AnalyticsRecord{ r.Method, r.URL.Path, r.ContentLength, r.Header.Get("User-Agent"), t.Day(), t.Month(), t.Year(), t.Hour(), code, keyName, t, version, s.Spec.APIDefinition.Name, s.Spec.APIDefinition.APIID, s.Spec.APIDefinition.OrgID, OauthClientID, timing, rawRequest, rawResponse, tags, time.Now(), } expiresAfter := s.Spec.ExpireAnalyticsAfter if config.EnforceOrgDataAge { thisOrg := s.Spec.OrgID orgSessionState, found := s.GetOrgSession(thisOrg) if found { if orgSessionState.DataExpires > 0 { expiresAfter = orgSessionState.DataExpires } } } thisRecord.SetExpiry(expiresAfter) go analytics.RecordHit(thisRecord) } // Report in health check ReportHealthCheckValue(s.Spec.Health, RequestLog, strconv.FormatInt(int64(timing), 10)) if doMemoryProfile { pprof.WriteHeapProfile(profileFile) } context.Clear(r) }
func (x *conn) WriteResponse(resp *http.Response) { err := resp.Write(x.bw) x.c.Assert(err, IsNil) x.bw.Flush() }
func (srv *Server) handler(c net.Conn) { startTime := time.Now() bpe := srv.bufferPool.take(c) defer srv.bufferPool.give(bpe) var closeSentinelChan = make(chan int) go srv.sentinel(c, closeSentinelChan) defer srv.connectionFinished(c, closeSentinelChan) var err error var req *http.Request // no keepalive (for now) reqCount := 0 keepAlive := true for err == nil && keepAlive { if req, err = http.ReadRequest(bpe.br); err == nil { if req.Header.Get("Connection") != "Keep-Alive" { keepAlive = false } request := newRequest(req, c, startTime) reqCount++ var res *http.Response pssInit := new(PipelineStageStat) pssInit.Name = "server.Init" pssInit.StartTime = startTime pssInit.EndTime = time.Now() request.appendPipelineStage(pssInit) // execute the pipeline if res = srv.Pipeline.execute(request); res == nil { res = SimpleResponse(req, 404, nil, "Not Found") } // cleanup request.startPipelineStage("server.ResponseWrite") req.Body.Close() // shutting down? select { case <-srv.stopAccepting: keepAlive = false res.Close = true default: } // The res.Write omits Content-length on 0 length bodies, and by spec, // it SHOULD. While this is not MUST, it's kinda broken. See sec 4.4 // of rfc2616 and a 200 with a zero length does not satisfy any of the // 5 conditions if Connection: keep-alive is set :( // I'm forcing chunked which seems to work because I couldn't get the // content length to write if it was 0. // Specifically, the android http client waits forever if there's no // content-length instead of assuming zero at the end of headers. der. if res.ContentLength == 0 && len(res.TransferEncoding) == 0 && !((res.StatusCode-100 < 100) || res.StatusCode == 204 || res.StatusCode == 304) { res.TransferEncoding = []string{"identity"} } if res.ContentLength < 0 { res.TransferEncoding = []string{"chunked"} } // For HTTP/1.0 and Keep-Alive, sending the Connection: Keep-Alive response header is required // because close is default (opposite of 1.1) if keepAlive && !req.ProtoAtLeast(1, 1) { res.Header.Add("Connection", "Keep-Alive") } // write response if srv.sendfile { res.Write(c) srv.cycleNonBlock(c) } else { wbuf := bufio.NewWriter(c) res.Write(wbuf) wbuf.Flush() } if res.Body != nil { res.Body.Close() } request.finishPipelineStage() request.finishRequest() srv.requestFinished(request) if res.Close { keepAlive = false } // Reset the startTime // this isn't great since there may be lag between requests; but it's the best we've got startTime = time.Now() } else { // EOF is socket closed if nerr, ok := err.(net.Error); err != io.EOF && !(ok && nerr.Timeout()) { Error("%s %v ERROR reading request: <%T %v>", srv.serverLogPrefix(), c.RemoteAddr(), err, err) } } } //Debug("%s Processed %v requests on connection %v", srv.serverLogPrefix(), reqCount, c.RemoteAddr()) }
func (x *HttpConn) WriteResponse(resp *http.Response) { err := resp.Write(x.Writer) ExpectWithOffset(1, err).NotTo(HaveOccurred()) x.Writer.Flush() }