// Execute runs the decode command func (c *DecodeCommand) Execute(args []string) error { if opts.HmacKey == "" { return errors.New("Empty HMAC") } if len(args) == 0 { return errors.New("No url argument provided") } oURL := args[0] if oURL == "" { return errors.New("No url argument provided") } hmacKeyBytes := []byte(opts.HmacKey) u, err := url.Parse(oURL) if err != nil { return err } comp := strings.SplitN(u.Path, "/", 3) decURL, valid := encoding.DecodeURL(hmacKeyBytes, comp[1], comp[2]) if !valid { return errors.New("hmac is invalid") } fmt.Println(decURL) return nil }
// ServerHTTP handles the client request, validates the request is validly // HMAC signed, filters based on the Allow list, and then proxies // valid requests to the desired endpoint. Responses are filtered for // proper image content types. func (p *Proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) { if p.metrics != nil { p.metrics.AddServed() } if p.config.DisableKeepAlivesFE { w.Header().Set("Connection", "close") } if req.Header.Get("Via") == p.config.ServerName { http.Error(w, "Request loop failure", http.StatusNotFound) return } // split path and get components components := strings.Split(req.URL.Path, "/") if len(components) < 3 { http.Error(w, "Malformed request path", http.StatusNotFound) return } sigHash, encodedURL := components[1], components[2] mlog.Debugm("client request", mlog.Map{"req": req}) sURL, ok := encoding.DecodeURL(p.config.HMACKey, sigHash, encodedURL) if !ok { http.Error(w, "Bad Signature", http.StatusForbidden) return } mlog.Debugm("signed client url", mlog.Map{"url": sURL}) u, err := url.Parse(sURL) if err != nil { mlog.Debugm("url parse error", mlog.Map{"err": err}) http.Error(w, "Bad url", http.StatusBadRequest) return } u.Host = strings.ToLower(u.Host) if u.Host == "" || localhostRegex.MatchString(u.Host) { http.Error(w, "Bad url host", http.StatusNotFound) return } // if allowList is set, require match for _, rgx := range p.allowList { if rgx.MatchString(u.Host) { http.Error(w, "Allowlist host failure", http.StatusNotFound) return } } // filter out rfc1918 hosts if ip := net.ParseIP(u.Host); ip != nil { if addr1918PrefixRegex.MatchString(ip.String()) { http.Error(w, "Denylist host failure", http.StatusNotFound) return } } nreq, err := http.NewRequest(req.Method, sURL, nil) if err != nil { mlog.Debugm("could not create NewRequest", mlog.Map{"err": err}) http.Error(w, "Error Fetching Resource", http.StatusBadGateway) return } // filter headers p.copyHeader(&nreq.Header, &req.Header, &ValidReqHeaders) if req.Header.Get("X-Forwarded-For") == "" { host, _, err := net.SplitHostPort(req.RemoteAddr) if err == nil && !addr1918PrefixRegex.MatchString(host) { nreq.Header.Add("X-Forwarded-For", host) } } // add an accept header if the client didn't send one if nreq.Header.Get("Accept") == "" { nreq.Header.Add("Accept", "image/*") } nreq.Header.Add("User-Agent", p.config.ServerName) nreq.Header.Add("Via", p.config.ServerName) mlog.Debugm("built outgoing request", mlog.Map{"req": nreq}) resp, err := p.client.Do(nreq) if err != nil { mlog.Debugm("could not connect to endpoint", mlog.Map{"err": err}) // this is a bit janky, but better than peeling off the // 3 layers of wrapped errors and trying to get to net.OpErr and // still having to rely on string comparison to find out if it is // a net.errClosing or not. errString := err.Error() // go 1.5 changes this to http.httpError // go 1.4 has this as net.OpError // and the error strings are different depending on which version too. if strings.Contains(errString, "timeout") || strings.Contains(errString, "Client.Timeout") { http.Error(w, "Error Fetching Resource", http.StatusGatewayTimeout) } else if strings.Contains(errString, "use of closed") { http.Error(w, "Error Fetching Resource", http.StatusBadGateway) } else { // some other error. call it a not found (camo compliant) http.Error(w, "Error Fetching Resource", http.StatusNotFound) } return } defer resp.Body.Close() mlog.Debugm("response from upstream", mlog.Map{"resp": resp}) // check for too large a response if resp.ContentLength > p.config.MaxSize { mlog.Debugm("content length exceeded", mlog.Map{"url": sURL}) http.Error(w, "Content length exceeded", http.StatusNotFound) return } switch resp.StatusCode { case 200: // check content type if !strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") { mlog.Debugm("Non-Image content-type returned", mlog.Map{"type": u}) http.Error(w, "Non-Image content-type returned", http.StatusBadRequest) return } case 300: http.Error(w, "Multiple choices not supported", http.StatusNotFound) return case 301, 302, 303, 307: // if we get a redirect here, we either disabled following, // or followed until max depth and still got one (redirect loop) http.Error(w, "Not Found", http.StatusNotFound) return case 304: h := w.Header() p.copyHeader(&h, &resp.Header, &ValidRespHeaders) w.WriteHeader(304) return case 404: http.Error(w, "Not Found", http.StatusNotFound) return case 500, 502, 503, 504: // upstream errors should probably just 502. client can try later. http.Error(w, "Error Fetching Resource", http.StatusBadGateway) return default: http.Error(w, "Not Found", http.StatusNotFound) return } h := w.Header() p.copyHeader(&h, &resp.Header, &ValidRespHeaders) w.WriteHeader(resp.StatusCode) // since this uses io.Copy from the respBody, it is streaming // from the request to the response. This means it will nearly // always end up with a chunked response. bW, err := io.Copy(w, resp.Body) if err != nil { // only log broken pipe errors at debug level if isBrokenPipe(err) { mlog.Debugm("error writing response", mlog.Map{"err": err}) } else { // unknown error and not a broken pipe mlog.Printm("error writing response", mlog.Map{"err": err}) } // we may have written some bytes before the error if p.metrics != nil && bW != 0 { p.metrics.AddBytes(bW) } return } if p.metrics != nil { p.metrics.AddBytes(bW) } mlog.Debugm("response to client", mlog.Map{"resp": w}) }