// Read the request body into the provided variable, it will read the body as json // if the specified "v" isn't ([]byte, io.Writer) . func (self Context) GetBody(v interface{}, max int64) (err error) { switch v.(type) { case []byte: _, err = http.MaxBytesReader(self.Res, self.Req.Body, max).Read(v.([]byte)) case io.Writer: _, err = io.Copy(v.(io.Writer), http.MaxBytesReader(self.Res, self.Req.Body, max)) default: err = json.NewDecoder(http.MaxBytesReader(self.Res, self.Req.Body, max)).Decode(v) } return err }
func handleBuild(w http.ResponseWriter, r *http.Request) { r.ParseForm() var insecure bool rawInsecure := r.FormValue("insecure") if rawInsecure != "" && rawInsecure == "true" { insecure = true } j := &job{ insecure: insecure, pkg: r.URL.Path, tar: http.MaxBytesReader(w, r.Body, MaxTarSize), done: make(chan struct{}), } Q <- j <-j.done const httpTooLarge = "http: request body too large" if j.err != nil && j.err.Error() == httpTooLarge { http.Error(w, httpTooLarge, http.StatusRequestEntityTooLarge) return } if j.err != nil { log.Println(j.err) http.Error(w, "unprocessable entity", 422) w.Write(j.out) return } defer j.bin.Close() http.ServeContent(w, r, "", time.Time{}, j.bin) }
func main() { var handlerInstance *HandlerObject = NewHandlerObject(123) var httpMux *http.ServeMux = http.NewServeMux() httpServer := &http.Server{ Addr: ":8080", Handler: httpMux, ReadTimeout: 2 * time.Second, WriteTimeout: 2 * time.Second, MaxHeaderBytes: 1 << 20, } var httpsMux *http.ServeMux = http.NewServeMux() httpsServer := &http.Server{ Addr: ":8443", Handler: httpsMux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, } httpMux.Handle("/foo", handlerInstance) httpMux.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { log.Printf("%+v", r) r.Body = http.MaxBytesReader(w, nopCloser{r.Body}, 1024) fmt.Fprintf(w, "HandlerFunc, %q", html.EscapeString(r.URL.Path)) }) httpsMux.Handle("/foo", handlerInstance) httpsMux.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { log.Printf("%+v", r) r.Body = http.MaxBytesReader(w, nopCloser{r.Body}, 65536) fmt.Fprintf(w, "HandlerFunc, %q", html.EscapeString(r.URL.Path)) }) go func() { log.Println("Before starting HTTPS listener...") err := httpsServer.ListenAndServeTLS("server.crt", "server.key.insecure") if err != nil { log.Fatal("HTTPS listener couldn't start") } }() log.Println("Before starting HTTP listener...") err := httpServer.ListenAndServe() if err != nil { log.Fatal("HTTP listener couldn't start") } }
func UploadFile(path string, maxMemory int64, w http.ResponseWriter, r *http.Request) (string, error) { r.Body = http.MaxBytesReader(w, r.Body, maxMemory) file, _, err := r.FormFile("file") if err != nil { return "", err } defer file.Close() randName, err := str.Rand(32) if err != nil { return "", err } filePath := filepath.Join(path, randName) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return "", err } defer f.Close() _, err = io.Copy(f, file) return randName, err }
// convertHTTPRequestToAPIRequest converts the HTTP request query // parameters and request body to the JSON object import format // expected by the API request handlers. func convertHTTPRequestToAPIRequest( w http.ResponseWriter, r *http.Request, requestBodyName string) (requestJSONObject, error) { params := make(requestJSONObject) for name, values := range r.URL.Query() { for _, value := range values { params[name] = value // Note: multiple values per name are ignored break } } if requestBodyName != "" { r.Body = http.MaxBytesReader(w, r.Body, MAX_API_PARAMS_SIZE) body, err := ioutil.ReadAll(r.Body) if err != nil { return nil, psiphon.ContextError(err) } var bodyParams requestJSONObject err = json.Unmarshal(body, &bodyParams) if err != nil { return nil, psiphon.ContextError(err) } params[requestBodyName] = bodyParams } return params, nil }
func dataHandler(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { w.Header().Set("Allow", "POST") w.WriteHeader(http.StatusMethodNotAllowed) w.Write([]byte("You must send a POST request to get data.")) return } r.Body = http.MaxBytesReader(w, r.Body, MAX_REQSIZE) payload, err := ioutil.ReadAll(r.Body) if err != nil { w.Write([]byte(fmt.Sprintf("Could not read received POST payload: %v", err))) return } var wrapper RespWrapper = RespWrapper{w} uuidBytes, startTime, endTime, pw, token, _, success := parseDataRequest(string(payload), wrapper) if success { var loginsession *LoginSession if token != "" { loginsession = validateToken(token) if loginsession == nil { w.Write([]byte(ERROR_INVALID_TOKEN)) return } } if hasPermission(loginsession, uuidBytes) { dr.MakeDataRequest(uuidBytes, startTime, endTime, uint8(pw), wrapper) } else { wrapper.GetWriter().Write([]byte("[]")) } } }
func Urlresolve(conn *irc.Connection, resp string, message string) { a := strings.Split(message, " ") for _, b := range a { if strings.HasPrefix(b, "http://") || strings.HasPrefix(b, "https://") { response, err := http.Get(b) if err != nil { fmt.Println(err) return } else { defer response.Body.Close() c := http.MaxBytesReader(nil, response.Body, 10000) d := new(bytes.Buffer) d.ReadFrom(c) e := d.String() f := strings.Split(string(e), "<") for _, g := range f { if strings.Contains(g, "title>") || strings.Contains(g, "Title>") || strings.Contains(g, "TITLE>") { h := strings.TrimSpace(g) i := strings.Split(h, ">") j := i[1] k := strings.Replace(j, "\n", "", -1) l := strings.TrimSpace(k) conn.Privmsgf(resp, l) break } } } } } }
func newContext(w http.ResponseWriter, r *http.Request) (*Context, error) { var err error c := new(Context) c.Request = r c.RequestId = atomic.AddInt64(&globalReqId, 1) if w != nil { c.ResponseHeader = w.Header() } c.ResponseWriter = w c.Values = make(map[string]interface{}) if r.Body != nil { mr := http.MaxBytesReader(w, r.Body, MaxBodyLength) c.RawPostData, err = ioutil.ReadAll(mr) r.Body.Close() if err != nil { return c, NewError(err.Error(), StatusBadRequest) } } return c, nil }
func (h *httpHandler) handlePost(w http.ResponseWriter, r *http.Request) { r.Body = http.MaxBytesReader(w, r.Body, int64(maxSize)) content, err := getContentFromForm(r) size := int64(len(content)) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } if err := h.stats.MakeSpaceFor(size); err != nil { http.Error(w, err.Error(), http.StatusServiceUnavailable) } id, err := h.store.Put(content) if err != nil { log.Printf("Unknown error on POST: %v", err) h.stats.FreeSpace(size) http.Error(w, err.Error(), http.StatusInternalServerError) return } storage.SetupPasteDeletion(h.store, h.stats, id, size, *lifeTime) url := fmt.Sprintf("%s/%s", *siteURL, id) switch r.URL.Path { case "/redirect": http.Redirect(w, r, url, 302) default: fmt.Fprintln(w, url) } }
func readReqJSON(w http.ResponseWriter, r *http.Request, n int64, v interface{}) bool { err := json.NewDecoder(http.MaxBytesReader(w, r.Body, n)).Decode(v) if err != nil { http.Error(w, "unprocessable entity", 422) } return err == nil }
func runHandler(resp http.ResponseWriter, req *http.Request, fn func(resp http.ResponseWriter, req *http.Request) error, errfn httputil.Error) { defer func() { if rv := recover(); rv != nil { err := errors.New("handler panic") logError(req, err, rv) errfn(resp, req, http.StatusInternalServerError, err) } }() if s := req.Header.Get("X-Real-Ip"); s != "" && httputil.StripPort(req.RemoteAddr) == "127.0.0.1" { req.RemoteAddr = s } req.Body = http.MaxBytesReader(resp, req.Body, 2048) req.ParseForm() var rb httputil.ResponseBuffer err := fn(&rb, req) if err == nil { rb.WriteTo(resp) } else if e, ok := err.(*httpError); ok { if e.status >= 500 { logError(req, err, nil) } errfn(resp, req, e.status, e.err) } else { logError(req, err, nil) errfn(resp, req, http.StatusInternalServerError, err) } }
// Feed the body of req into the OR port, and write any data read from the OR // port back to w. func transact(session *Session, w http.ResponseWriter, req *http.Request) error { body := http.MaxBytesReader(w, req.Body, maxPayloadLength+1) _, err := io.Copy(session.Or, body) if err != nil { return fmt.Errorf("error copying body to ORPort: %s", scrubError(err)) } buf := make([]byte, maxPayloadLength) session.Or.SetReadDeadline(time.Now().Add(turnaroundTimeout)) n, err := session.Or.Read(buf) if err != nil { if e, ok := err.(net.Error); !ok || !e.Timeout() { httpInternalServerError(w) // Don't scrub err here because it always refers to localhost. return fmt.Errorf("reading from ORPort: %s", err) } } // log.Printf("read %d bytes from ORPort: %q", n, buf[:n]) // Set a Content-Type to prevent Go and the CDN from trying to guess. w.Header().Set("Content-Type", "application/octet-stream") n, err = w.Write(buf[:n]) if err != nil { return fmt.Errorf("error writing to response: %s", scrubError(err)) } // log.Printf("wrote %d bytes to response", n) return nil }
// This method handles all requests. It dispatches to handleInternal after // handling / adapting websocket connections. func handle(w http.ResponseWriter, r *http.Request) { if maxRequestSize := int64(Config.IntDefault("http.maxrequestsize", 0)); maxRequestSize > 0 { r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize) } upgrade := r.Header.Get("Upgrade") if upgrade == "websocket" || upgrade == "Websocket" { websocket.Handler(func(ws *websocket.Conn) { //Override default Read/Write timeout with sane value for a web socket request ws.SetDeadline(time.Now().Add(time.Hour * 24)) r.Method = "WS" handleInternal(w, r, ws) }).ServeHTTP(w, r) } else { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, X-session, X-adm-token") // Stop here for a Preflighted OPTIONS request. if r.Method == "OPTIONS" { return } handleInternal(w, r, nil) } }
func TestRequestBodyLimit(t *testing.T) { const limit = 1 << 20 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r.Body = http.MaxBytesReader(w, r.Body, limit) n, err := io.Copy(ioutil.Discard, r.Body) if err == nil { t.Errorf("expected error from io.Copy") } if n != limit { t.Errorf("io.Copy = %d, want %d", n, limit) } })) defer ts.Close() nWritten := int64(0) req, _ := http.NewRequest("POST", ts.URL, io.LimitReader(countReader{neverEnding('a'), &nWritten}, limit*200)) // Send the POST, but don't care it succeeds or not. The // remote side is going to reply and then close the TCP // connection, and HTTP doesn't really define if that's // allowed or not. Some HTTP clients will get the response // and some (like ours, currently) will complain that the // request write failed, without reading the response. // // But that's okay, since what we're really testing is that // the remote side hung up on us before we wrote too much. _, _ = http.DefaultClient.Do(req) if nWritten > limit*100 { t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d", limit, nWritten) } }
// CreateC handles the multipart form upload and creates an encrypted file func CreateC(c *gin.Context) { var err error var duration time.Duration var once bool c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, conf.C.SizeLimit*utils.MegaByte) once = c.PostForm("once") != "" d := c.DefaultPostForm("duration", "1d") if val, ok := models.DurationMap[d]; ok { duration = val } else { logger.ErrC(c, "server", "Invalid duration", d) c.String(http.StatusBadRequest, "Invalid duration\n") c.AbortWithStatus(http.StatusBadRequest) return } fd, h, err := c.Request.FormFile("file") if err != nil { logger.ErrC(c, "server", "Couldn't read file", err) c.String(http.StatusRequestEntityTooLarge, "Entity is too large (Max : %v MB)\n", conf.C.SizeLimit) c.AbortWithStatus(http.StatusRequestEntityTooLarge) return } defer fd.Close() res := models.NewResourceFromForm(h, once, duration) k, err := res.WriteEncrypted(fd) if err != nil { logger.ErrC(c, "server", "Couldn't write file", err) c.String(http.StatusInternalServerError, "Something went wrong on the server. Try again later.") c.AbortWithStatus(http.StatusInternalServerError) return } if conf.C.DiskQuota > 0 { if models.S.CurrentSize+uint64(res.Size) > uint64(conf.C.DiskQuota*utils.GigaByte) { logger.ErrC(c, "server", "Quota exceeded") c.String(http.StatusBadRequest, "Insufficient disk space. Try again later.") c.AbortWithStatus(http.StatusBadRequest) os.Remove(path.Join(conf.C.UploadDir, res.Key)) return } } if err = res.Save(); err != nil { logger.ErrC(c, "server", "Couldn't save in the database", err) c.String(http.StatusInternalServerError, "Something went wrong on the server. Try again later.") c.AbortWithStatus(http.StatusInternalServerError) return } res.LogCreated(c) ns := conf.C.NameServer if conf.C.AppendPort { ns = fmt.Sprintf("%s:%d", conf.C.NameServer, conf.C.Port) } c.String(http.StatusCreated, "%v://%s/v/%s/%s\n", utils.DetectScheme(c), ns, res.Key, k) }
func (h *JQHandler) handleJqSharePost(c *gin.Context) { if size, err := h.checkReqSize(c); err != nil { h.logger(c).WithField("size", size).WithError(err).Info("req too large") c.String(http.StatusExpectationFailed, err.Error()) return } c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, JSONPayloadLimit) var jq *jq.JQ if err := c.BindJSON(&jq); err != nil { err = fmt.Errorf("error parsing JSON: %s", err) h.logger(c).WithError(err).Info("error parsing JSON") c.String(http.StatusUnprocessableEntity, err.Error()) return } if err := jq.Validate(); err != nil { c.String(http.StatusUnprocessableEntity, err.Error()) return } id, err := h.DB.UpsertSnippet(FromJQ(jq)) if err != nil { h.logger(c).WithError(err).Info("error upserting snippet") c.String(http.StatusUnprocessableEntity, "error sharing snippet") return } c.String(http.StatusCreated, id) }
func (c *Control) volumeMount(w http.ResponseWriter, req *http.Request) { const reqMaxSize = 4096 buf, err := ioutil.ReadAll(http.MaxBytesReader(w, req.Body, reqMaxSize)) if err != nil { // they really should export that error if err.Error() == "http: request body too large" { http.Error(w, err.Error(), http.StatusRequestEntityTooLarge) return } http.Error(w, err.Error(), http.StatusInternalServerError) return } var msg wire.VolumeMountRequest err = msg.Unmarshal(buf) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } _, err = c.app.Mount(msg.VolumeName, msg.Mountpoint) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } }
// Process implements the Middleware interface. func (m *FormMiddleware) Process(app *Application, c *Context, next func() error) error { c.Request.Body = http.MaxBytesReader(c.Response, c.Request.Body, app.Config.MaxClientBodySize) if err := c.Request.ParseMultipartForm(app.Config.MaxClientBodySize); err != nil && err != http.ErrNotMultipart { return err } c.Params = c.newParams() return next() }
func (srv *server) transactUDP(s *session, w http.ResponseWriter, req *http.Request) (err error) { body := http.MaxBytesReader(w, req.Body, maxPayloadLength+1) udpHeader := req.Header.Get(headerUDPPkts) // log.Printf("c -> s : %s", s) if udpHeader != "" { for _, v := range strings.Split(udpHeader, ",") { n, e := strconv.Atoi(v) if e != nil { err = fmt.Errorf("error to split UDP packets: %s", err) return } p := make([]byte, n) _, e = io.ReadFull(body, p) if e != nil { err = fmt.Errorf("error to split UDP packets: %s", e) return } s.uc.Write(p) } } start := time.Now() var data [][]byte var pkts []string total := 0 loop: for { var buf [maxPayloadLength]byte s.uc.SetReadDeadline(time.Now().Add(turnaroundTimeout)) n, e := s.uc.Read(buf[:]) if e != nil { e, ok := e.(net.Error) if !ok || !e.Timeout() { err = e } break loop } data = append(data, buf[:n]) pkts = append(pkts, strconv.Itoa(n)) total += n if total > socksReadBufSize { break loop } if time.Now().After(start.Add(maxTurnaroundTimeout)) { return } } w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Length", strconv.Itoa(total)) if total > 0 { w.Header().Set(headerUDPPkts, strings.Join(pkts, ",")) } // log.Printf("s -> c : %s", strings.Join(pkts, ",")) w.Write(bytes.Join(data, []byte(""))) return }
func Data(ctx dingo.Context) ([]byte, error) { if ctx.Method != "POST" && ctx.Method != "PUT" { return nil, nil } defer ctx.Body.Close() reader := http.MaxBytesReader(ctx.Response, ctx.Body, MaxMsgSize) return ioutil.ReadAll(reader) }
// Limit limits the body of a post, compress response and format eventual errors func Limit(ctx maze.IContext) (err error) { r := ctx.GetRequest() // https only -- redirect in openshift if HttpsOnly && !isHttps(r) { url := "https://" + r.Host + r.RequestURI logger.Debugf("redirecting to %s", url) http.Redirect(ctx.GetResponse(), r, url, http.StatusMovedPermanently) return } /* Very Important: Before compressing the response, the "Content-Type" header must be properly set! */ // encodes only text files var zip bool var ext = filepath.Ext(r.URL.Path) for _, v := range zipexts { if v == ext { zip = true break } } // TODO gzip encoding should occour only after a size threshold if zip && strings.Contains(fmt.Sprint(r.Header["Accept-Encoding"]), "gzip") { appCtx := ctx.(*AppCtx) w := appCtx.Response w.Header().Set("Content-Encoding", "gzip") // Get a Writer from the Pool gz := zippers.Get().(*gzip.Writer) // When done, put the Writer back in to the Pool defer zippers.Put(gz) // We use Reset to set the writer we want to use. gz.Reset(w) defer gz.Close() appCtx.Response = gzipResponseWriter{Writer: gz, ResponseWriter: w} } defer func() { if r := recover(); r != nil { if e, ok := r.(runtime.Error); ok { logger.Errorf("%s\n========== Begin Stack Trace ==========\n%s\n========== End Stack Trace ==========\n", e, debug.Stack()) } err = formatError(ctx.GetResponse(), r.(error)) } }() logger.Debugf("requesting %s", r.URL.Path) r.Body = http.MaxBytesReader(ctx.GetResponse(), r.Body, postLimit) err = ctx.Proceed() if err != nil { err = formatError(ctx.GetResponse(), err) } return err }
func TestInputRAWLargePayload(t *testing.T) { wg := new(sync.WaitGroup) quit := make(chan int) // Generate 200kb file dd := exec.Command("dd", "if=/dev/urandom", "of=/tmp/large", "bs=1KB", "count=100") err := dd.Run() if err != nil { log.Fatal("dd error:", err) } origin := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { defer req.Body.Close() body, _ := ioutil.ReadAll(req.Body) if len(body) != 100*1000 { t.Error("File size should be 1mb:", len(body)) } wg.Done() })) originAddr := strings.Replace(origin.Listener.Addr().String(), "[::]", "127.0.0.1", -1) input := NewRAWInput(originAddr, time.Second) defer input.Close() replay := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { req.Body = http.MaxBytesReader(w, req.Body, 1*1024*1024) buf := make([]byte, 1*1024*1024) n, _ := req.Body.Read(buf) body := buf[0:n] if len(body) != 100*1000 { t.Error("File size should be 100000 bytes:", len(body)) } wg.Done() })) defer replay.Close() httpOutput := NewHTTPOutput(replay.URL, &HTTPOutputConfig{Debug: false}) Plugins.Inputs = []io.Reader{input} Plugins.Outputs = []io.Writer{httpOutput} go Start(quit) wg.Add(2) curl := exec.Command("curl", "http://"+originAddr, "--header", "Transfer-Encoding: chunked", "--data-binary", "@/tmp/large") err = curl.Run() if err != nil { log.Fatal("curl error:", err) } wg.Wait() close(quit) }
// listMetrics retrieves a list of metrics on the localhost and sends // it to the client. func listMetrics(w http.ResponseWriter, r *http.Request) { logRequest(r) // Check our methods. We handle GET/POST. if r.Method != "GET" && r.Method != "POST" { http.Error(w, "Bad request method.", http.StatusBadRequest) return } // Do we need to init the metricsCache? if metricsCache == nil { metricsCache = NewMetricsCache() } // XXX: Calling r.FormValue will set a safety limit on the size of // the body of 10MiB which may be small for the amount of JSON data // included in a list command. Set the limit higher here. How // can we do this better? This is 160MiB. r.Body = http.MaxBytesReader(w, r.Body, 10<<24) // Handle case when we are currently building the cache if r.FormValue("force") != "" && metricsCache.IsAvailable() { metricsCache.RefreshCache() } metrics, ok := metricsCache.GetMetrics() if !ok { http.Error(w, "Cache update in progress.", http.StatusAccepted) return } // Options if r.FormValue("regex") != "" { m, err := FilterRegex(r.FormValue("regex"), metrics) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } metrics = m } if r.FormValue("list") != "" { filter, err := unmarshalList(r.FormValue("list")) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } metrics = FilterList(filter, metrics) } // Marshal the data back as a JSON list blob, err := json.Marshal(metrics) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) log.Printf("Error marshaling data: %s", err) } else { w.Header().Set("Content-Type", "application/json") w.Write(blob) } }
// MuxHandler wraps a request handler into a MuxHandler. The MuxHandler initializes the request // context by loading the request state, invokes the handler and in case of error invokes the // controller (if there is one) or Service error handler. // This function is intended for the controller generated code. User code should not need to call // it directly. func (ctrl *Controller) MuxHandler(name string, hdlr Handler, unm Unmarshaler) MuxHandler { // Make sure middleware doesn't get mounted later ctrl.Service.finalize() // Setup middleware outside of closure middleware := func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { if !ContextResponse(ctx).Written() { return hdlr(ctx, rw, req) } return nil } chain := append(ctrl.Service.middleware, ctrl.middleware...) ml := len(chain) for i := range chain { middleware = chain[ml-i-1](middleware) } return func(rw http.ResponseWriter, req *http.Request, params url.Values) { // Build context ctx := NewContext(WithAction(ctrl.Context, name), rw, req, params) // Protect against request bodies with unreasonable length if MaxRequestBodyLength > 0 { req.Body = http.MaxBytesReader(rw, req.Body, MaxRequestBodyLength) } // Load body if any var err error if req.ContentLength > 0 && unm != nil { err = unm(ctx, ctrl.Service, req) } // Handle invalid payload handler := middleware if err != nil { handler = func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { rw.Header().Set("Content-Type", ErrorMediaIdentifier) status := 400 body := ErrInvalidEncoding(err) if err.Error() == "http: request body too large" { status = 413 body = ErrRequestBodyTooLarge("body length exceeds %d bytes", MaxRequestBodyLength) } return ctrl.Service.Send(ctx, status, body) } for i := range chain { handler = chain[ml-i-1](handler) } } // Invoke middleware chain, errors should be caught earlier, e.g. by ErrorHandler middleware if err := handler(ctx, ContextResponse(ctx), req); err != nil { LogError(ctx, "uncaught error", "err", err) respBody := fmt.Sprintf("Internal error: %s", err) // Sprintf catches panics ctrl.Service.Send(ctx, 500, respBody) } } }
func upload(w http.ResponseWriter, r *http.Request) { const MaxRequestSize = 10 << 20 const MaxInMemory = 5 << 20 if r.Method == "POST" { r.Body = http.MaxBytesReader(w, r.Body, MaxRequestSize) r.ParseMultipartForm(MaxInMemory) file, handler, err := r.FormFile("file") if err != nil { fmt.Println(err) fmt.Fprintf(w, err.Error()) return } defer file.Close() // Validate image type and determine extension var ext string switch handler.Header.Get("Content-Type") { case "image/jpeg": ext = ".jpg" case "image/png": ext = ".png" case "image/gif": ext = ".gif" default: fmt.Fprintf(w, "Image must be JPEG, PNG, or GIF.") return } // Create unique, human-friendly filename h := sha256.New() h.Write([]byte(handler.Filename + time.Now().String())) name := strings.Replace(base64.StdEncoding.EncodeToString(h.Sum(nil)), "+", "", -1) name = strings.Replace(name, "/", "", -1)[:12] + ext // Write image to local directory f, err := os.OpenFile(UploadDir+name, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666) if err != nil { fmt.Println(err) fmt.Fprintf(w, "Something went wrong on the backend.") return } defer f.Close() if ext == ".jpg" && ReencodeJPEG { // Re-encode jpeg to remove metadata img, _, _ := image.Decode(file) jpeg.Encode(f, img, nil) } else { io.Copy(f, file) } http.Redirect(w, r, ImageURL+name, 302) } }
func loginHandler(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { w.Header().Set("Allow", "POST") w.WriteHeader(http.StatusMethodNotAllowed) w.Write([]byte("To log in, make a POST request with JSON containing a username and password.")) return } var err error var jsonLogin map[string]interface{} var usernameint interface{} var username string var passwordint interface{} var password string var ok bool r.Body = http.MaxBytesReader(w, r.Body, MAX_REQSIZE) var loginDecoder *json.Decoder = json.NewDecoder(r.Body) err = loginDecoder.Decode(&jsonLogin) if err != nil { w.Write([]byte(fmt.Sprintf("Error: received invalid JSON: %v", err))) return } usernameint, ok = jsonLogin["username"] if !ok { w.Write([]byte(fmt.Sprintf("Error: JSON must contain field 'username'"))) return } passwordint, ok = jsonLogin["password"] if !ok { w.Write([]byte(fmt.Sprintf("Error: JSON must contain field 'password'"))) return } username, ok = usernameint.(string) if !ok { w.Write([]byte(fmt.Sprintf("Error: field 'username' must be a string"))) return } password, ok = passwordint.(string) if !ok { w.Write([]byte(fmt.Sprintf("Error: field 'password' must be a string"))) return } tokenarr := userlogin(accountConn, username, []byte(password)) if tokenarr != nil { token64buf := make([]byte, token64len) base64.StdEncoding.Encode(token64buf, tokenarr) w.Write(token64buf) } }
func parseRequest(r *http.Request, w http.ResponseWriter, out interface{}) error { // Limit the maximum number of bytes to MaxRequestSize to protect // against an indefinite amount of data being read. limit := http.MaxBytesReader(w, r.Body, MaxRequestSize) err := jsonutil.DecodeJSONFromReader(limit, out) if err != nil && err != io.EOF { return errwrap.Wrapf("failed to parse JSON input: {{err}}", err) } return err }
func (dispatcher *Dispatcher) relayPayload(sessionCookie *http.Cookie, session *Session, responseWriter http.ResponseWriter, request *http.Request) error { body := http.MaxBytesReader(responseWriter, request.Body, MAX_PAYLOAD_LENGTH+1) requestBodySize, err := io.Copy(session.psiConn, body) if err != nil { return errors.New(fmt.Sprintf("writing payload to psiConn: %s", err)) } session.BytesTransferred += requestBodySize throttle := dispatcher.config.ThrottleThresholdBytes > 0 && session.IsThrottled && session.BytesTransferred >= dispatcher.config.ThrottleThresholdBytes if session.meekProtocolVersion >= MEEK_PROTOCOL_VERSION_2 && session.meekSessionKeySent == false { http.SetCookie(responseWriter, sessionCookie) session.meekSessionKeySent = true } if !throttle && session.meekProtocolVersion >= MEEK_PROTOCOL_VERSION_1 { responseSize, err := copyWithTimeout(responseWriter, session.psiConn) if err != nil { return errors.New(fmt.Sprintf("reading payload from psiConn: %s", err)) } session.BytesTransferred += responseSize } else { reponseMaxPayloadLength := MAX_PAYLOAD_LENGTH if throttle { time.Sleep( time.Duration(dispatcher.config.ThrottleSleepMilliseconds) * time.Millisecond) reponseMaxPayloadLength = int(float64(reponseMaxPayloadLength) * dispatcher.config.ThrottleMaxPayloadSizeMultiple) } buf := make([]byte, reponseMaxPayloadLength) session.psiConn.SetReadDeadline(time.Now().Add(TURN_AROUND_TIMEOUT)) responseSize, err := session.psiConn.Read(buf) if err != nil { if e, ok := err.(net.Error); !ok || !e.Timeout() { return errors.New(fmt.Sprintf("reading from psiConn: %s", err)) } } responseSize, err = responseWriter.Write(buf[:responseSize]) if err != nil { return errors.New(fmt.Sprintf("writing to response: %s", err)) } session.BytesTransferred += int64(responseSize) } return nil }
// convertHTTPRequestToAPIRequest converts the HTTP request query // parameters and request body to the JSON object import format // expected by the API request handlers. func convertHTTPRequestToAPIRequest( w http.ResponseWriter, r *http.Request, requestBodyName string) (requestJSONObject, error) { params := make(requestJSONObject) for name, values := range r.URL.Query() { for _, value := range values { // Note: multiple values per name are ignored // TODO: faster lookup? isArray := false for _, paramSpec := range baseRequestParams { if paramSpec.name == name { isArray = (paramSpec.flags&requestParamArray != 0) break } } if isArray { // Special case: a JSON encoded array var arrayValue []interface{} err := json.Unmarshal([]byte(value), &arrayValue) if err != nil { return nil, common.ContextError(err) } params[name] = arrayValue } else { // All other query parameters are simple strings params[name] = value } break } } if requestBodyName != "" { r.Body = http.MaxBytesReader(w, r.Body, MAX_API_PARAMS_SIZE) body, err := ioutil.ReadAll(r.Body) if err != nil { return nil, common.ContextError(err) } var bodyParams map[string]interface{} if len(body) != 0 { err = json.Unmarshal(body, &bodyParams) if err != nil { return nil, common.ContextError(err) } params[requestBodyName] = bodyParams } } return params, nil }
func fileStoreHandler(w http.ResponseWriter, r *http.Request) { /* We *don't* always set the the content-type to application/json here, * for obvious reasons. Still do for the PUT/POST though. */ chksum := r.URL.Path[12:] /* Eventually, both local storage (in-memory or on disk, depending) or * uploading to s3 or a similar cloud storage provider needs to be * supported. */ switch r.Method { case "GET": w.Header().Set("Content-Type", "application/x-binary") fileStore, err := filestore.Get(chksum) if err != nil { http.Error(w, err.Error(), http.StatusNotFound) return } w.Write(*fileStore.Data) case "PUT", "POST": /* Seems like for file uploads we ought to * support POST too. */ w.Header().Set("Content-Type", "application/json") /* Need to distinguish file already existing and some * sort of error with uploading the file. */ if fileStore, _ := filestore.Get(chksum); fileStore != nil { fileErr := fmt.Errorf("File with checksum %s already exists.", chksum) /* Send status OK. It seems chef-pedant at least * tries to upload files twice for some reason. */ jsonErrorReport(w, r, fileErr.Error(), http.StatusOK) return } r.Body = http.MaxBytesReader(w, r.Body, config.Config.ObjMaxSize) fileStore, err := filestore.New(chksum, r.Body, r.ContentLength) if err != nil { jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError) return } err = fileStore.Save() if err != nil { jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError) return } fileResponse := make(map[string]string) fileResponse[fileStore.Chksum] = fmt.Sprintf("File with checksum %s uploaded.", fileStore.Chksum) enc := json.NewEncoder(w) if err := enc.Encode(&fileResponse); err != nil { jsonErrorReport(w, r, err.Error(), http.StatusInternalServerError) } /* Add DELETE later? */ default: jsonErrorReport(w, r, "Unrecognized method!", http.StatusMethodNotAllowed) } }