// Retry wrapper for http://golang.org/pkg/net/http/#Client.Head where attempts is the number of http calls made (one plus number of retries). func (httpRetryClient *Client) ClientHead(c *http.Client, url string) (resp *http.Response, attempts int, err error) { return httpRetryClient.Retry(func() (*http.Response, error, error) { resp, err := c.Head(url) // assume all errors should result in a retry return resp, err, nil }) }
// if the raft algorithm making progress in leader, if not, the leader is isolated from cluster func isValid(tr *http.Transport, ep string) bool { httpclient := http.Client{ Transport: tr, } // we only need the header of response resp0, err := httpclient.Head(ep + "/v2/keys") if err != nil { return false } rt0, err1 := strconv.ParseUint(resp0.Header.Get("X-Raft-Term"), 10, 64) ri0, err2 := strconv.ParseUint(resp0.Header.Get("X-Raft-Index"), 10, 64) if err1 != nil || err2 != nil { return false } time.Sleep(time.Second) // we only need the header of response resp1, err := httpclient.Head(ep + "/v2/keys") if err != nil { return false } rt1, err1 := strconv.ParseUint(resp1.Header.Get("X-Raft-Term"), 10, 64) ri1, err2 := strconv.ParseUint(resp1.Header.Get("X-Raft-Index"), 10, 64) if err1 != nil || err2 != nil { return false } // raft algorithm doesn't make progress, leader is invalid if rt0 != rt1 || ri0 == ri1 { return false } return true }
// startupHealthcheck is used at startup to check if the server is available // at all. func (c *Client) startupHealthcheck(timeout time.Duration) error { c.mu.Lock() urls := c.urls c.mu.Unlock() // If we don't get a connection after "timeout", we bail. start := time.Now() for { // Make a copy of the HTTP client provided via options to respect // settings like Basic Auth or a user-specified http.Transport. cl := new(http.Client) *cl = *c.c cl.Timeout = timeout for _, url := range urls { res, err := cl.Head(url) if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { return nil } } time.Sleep(1 * time.Second) if time.Now().Sub(start) > timeout { break } } return ErrNoClient }
func getCsrfToken(client *http.Client, url string) string { // CSRF support jar := &cookieJar{} jar.jar = make(map[string][]*http.Cookie) client.Jar = jar headResponse, err := client.Head(url) checkError(err) token := headResponse.Header.Get(csrfTokenHeader) return token }
func fetchNonce(client *http.Client, url string) (string, error) { resp, err := client.Head(url) if err != nil { return "", nil } defer resp.Body.Close() enc := resp.Header.Get("replay-nonce") if enc == "" { return "", errors.New("acme: nonce not found") } return enc, nil }
func GetFileSize(url string, client *http.Client) int64 { resp, err := client.Head(url) if err != nil { log.Debug(err.Error()) return -1 } defer resp.Body.Close() if c := resp.StatusCode; c == 200 || (c > 300 && c <= 308) { return resp.ContentLength } return -1 }
func GetSize(urls string) (int64, error) { cl := http.Client{} resp, err := cl.Head(urls) if err != nil { log.Printf("error: when try get file size %v \n", err) return 0, err } if resp.StatusCode != 200 { log.Printf("error: file not found or moved status:", resp.StatusCode) return 0, errors.New("error: file not found or moved") } log.Printf("info: file size is %d bytes \n", resp.ContentLength) return resp.ContentLength, nil }
func (i *BulkEveIndexer) CheckForRedirect() { httpClient := http.Client{ CheckRedirect: func(request *http.Request, via []*http.Request) error { if request.Response != nil { location, err := request.Response.Location() if err == nil { log.Info("Redirection to %s detected, updating Elastic Search base URL.", location.String()) i.baseUrl = location.String() } } return nil }, } httpClient.Head(i.es.baseUrl) }
//RequestedReturnCodeIsOK makes an HEAD or GET request. If the returncode is 2XX it will return true. func RequestedReturnCodeIsOK(client http.Client, url, function string) bool { var resp *http.Response var err error switch function { case "HEAD": resp, err = client.Head(url) case "GET": resp, err = client.Get(url) default: err = errors.New("Unknown Function") } if err == nil && isReturnCodeOK(resp) { return true } return false }
func newWalkFunc(invalidLink *bool, client *http.Client) filepath.WalkFunc { return func(filePath string, info os.FileInfo, err error) error { hasSuffix := false for _, suffix := range *fileSuffix { hasSuffix = hasSuffix || strings.HasSuffix(info.Name(), suffix) } if !hasSuffix { return nil } fileBytes, err := ioutil.ReadFile(filePath) if err != nil { return err } foundInvalid := false allURLs := xurls.Strict.FindAll(fileBytes, -1) fmt.Fprintf(os.Stdout, "\nChecking file %s\n", filePath) URL: for _, URL := range allURLs { // Don't check non http/https URL if !httpOrhttpsReg.Match(URL) { continue } for _, whiteURL := range regWhiteList { if whiteURL.Match(URL) { continue URL } } if _, found := fullURLWhiteList[string(URL)]; found { continue } // remove the htmlpreview Prefix processedURL := htmlpreviewReg.ReplaceAll(URL, []byte{}) // check if we have visited the URL. if _, found := visitedURLs[string(processedURL)]; found { continue } visitedURLs[string(processedURL)] = struct{}{} retry := 0 const maxRetry int = 3 backoff := 100 for retry < maxRetry { fmt.Fprintf(os.Stdout, "Visiting %s\n", string(processedURL)) // Use verb HEAD to increase efficiency. However, some servers // do not handle HEAD well, so we need to try a GET to avoid // false alert. resp, err := client.Head(string(processedURL)) // URLs with mock host or mock port will cause error. If we report // the error here, people need to add the mock URL to the white // list every time they add a mock URL, which will be a maintenance // nightmare. Hence, we decide to only report 404 to catch the // cases where host and port are legit, but path is not, which // is the most common mistake in our docs. if err != nil { break } if resp.StatusCode == http.StatusTooManyRequests { retryAfter := resp.Header.Get("Retry-After") if seconds, err := strconv.Atoi(retryAfter); err != nil { backoff = seconds + 10 } fmt.Fprintf(os.Stderr, "Got %d visiting %s, retry after %d seconds.\n", resp.StatusCode, string(URL), backoff) time.Sleep(time.Duration(backoff) * time.Second) backoff *= 2 retry++ } else if resp.StatusCode == http.StatusNotFound { // We only check for 404 error for now. 401, 403 errors are hard to handle. // We need to try a GET to avoid false alert. resp, err = client.Get(string(processedURL)) if err != nil { break } if resp.StatusCode != http.StatusNotFound { continue URL } foundInvalid = true fmt.Fprintf(os.Stderr, "Failed: in file %s, Got %d visiting %s\n", filePath, resp.StatusCode, string(URL)) break } else { break } } if retry == maxRetry { foundInvalid = true fmt.Fprintf(os.Stderr, "Failed: in file %s, still got 429 visiting %s after %d retries\n", filePath, string(URL), maxRetry) } } if foundInvalid { *invalidLink = true } return nil } }
// check if URL is reachable on the internet func IsReachable(url string, reachTimeoutMs int) bool { client := http.Client{ Timeout: time.Duration(reachTimeoutMs) * time.Millisecond} _, err := client.Head(url) return err == nil }
// pester provides all the logic of retries, concurrency, backoff, and logging func (c *Client) pester(p params) (*http.Response, error) { resultCh := make(chan result) finishCh := make(chan struct{}) // GET calls should be idempotent and can make use // of concurrency. Other verbs can mutate and should not // make use of the concurrency feature concurrency := c.Concurrency if p.verb != "GET" { concurrency = 1 } // re-create the http client so we can leverage the std lib httpClient := http.Client{ Transport: c.Transport, CheckRedirect: c.CheckRedirect, Jar: c.Jar, Timeout: c.Timeout, } // if we have a request body, we need to save it for later var originalRequestBody []byte var originalBody []byte var err error if p.req != nil && p.req.Body != nil { originalRequestBody, err = ioutil.ReadAll(p.req.Body) if err != nil { return &http.Response{}, errors.New("error reading request body") } p.req.Body.Close() } if p.body != nil { originalBody, err = ioutil.ReadAll(p.body) if err != nil { return &http.Response{}, errors.New("error reading body") } } for req := 0; req < concurrency; req++ { go func(n int, p params) { resp := &http.Response{} var err error for i := 0; i < c.MaxRetries; i++ { select { case <-finishCh: return default: } // rehydrate the body (it is drained each read) if len(originalRequestBody) > 0 { p.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody)) } if len(originalBody) > 0 { p.body = bytes.NewBuffer(originalBody) } // route the calls switch p.method { case "Do": resp, err = httpClient.Do(p.req) case "Get": resp, err = httpClient.Get(p.url) case "Head": resp, err = httpClient.Head(p.url) case "Post": resp, err = httpClient.Post(p.url, p.bodyType, p.body) case "PostForm": resp, err = httpClient.PostForm(p.url, p.data) } // 200 and 300 level errors are considered success and we are done if err == nil { if resp.StatusCode < 400 { resultCh <- result{resp: resp, err: err, req: n, retry: i} return } resp.Body.Close() } c.log(ErrEntry{ Time: time.Now(), Method: p.method, Verb: p.verb, URL: p.url, Request: n, Retry: i, Err: err, }) // prevent a 0 from causing the tick to block, pass additional microsecond <-time.Tick(c.Backoff(i) + 1*time.Microsecond) } resultCh <- result{resp: resp, err: err} }(req, p) } for { select { case res := <-resultCh: close(finishCh) c.SuccessReqNum = res.req c.SuccessRetryNum = res.retry return res.resp, res.err } } }
func TestContentLength(t *testing.T) { // Startup a basic server and get the port pipeline := NewPipeline() srv := NewServer(0, pipeline) pipeline.Upstream.PushBack(NewRequestFilter(func(req *Request) *http.Response { for _, entry := range contentLengthTestData { if entry.method == req.HttpRequest.Method && entry.path == req.HttpRequest.URL.Path { var body io.Reader if entry.body != nil { body = bytes.NewBuffer(entry.body) } return SimpleResponse(req.HttpRequest, 200, nil, entry.resContentLength, body) } } panic("Thing not found") })) go func() { srv.ListenAndServe() }() <-srv.AcceptReady serverPort := srv.Port() // Connect and make some requests c := new(http.Client) for _, test := range contentLengthTestData { var res *http.Response var err error if test.method == "GET" { res, err = c.Get(fmt.Sprintf("http://localhost:%v%v", serverPort, test.path)) } else { res, err = c.Head(fmt.Sprintf("http://localhost:%v%v", serverPort, test.path)) } if err != nil || res == nil { t.Fatal(fmt.Sprintf("Couldn't get req: %v", err)) } var isChunked bool = res.TransferEncoding != nil && len(res.TransferEncoding) > 0 && res.TransferEncoding[0] == "chunked" if test.chunked { if !isChunked { t.Errorf("%s %s Expected a chunked response. Didn't get one. Content-Length: %v", test.method, test.path, res.ContentLength) } } else { if isChunked { t.Errorf("%s %s Response is chunked. Expected a content length", test.method, test.path) } if res.ContentLength != test.expectedContentLength { t.Errorf("%s %s Incorrect content length. Expected: %v Got: %v", test.method, test.path, test.expectedContentLength, res.ContentLength) } } if test.method == "GET" { bodyBuf := new(bytes.Buffer) io.Copy(bodyBuf, res.Body) body := bodyBuf.Bytes() if !bytes.Equal(body, test.body) { t.Errorf("%v Body mismatch.\n\tExpecting:\n\t%v\n\tGot:\n\t%v", test.path, test.body, body) } } res.Body.Close() } // Clean up srv.StopAccepting() }
// pester provides all the logic of retries, concurrency, backoff, and logging func (c *Client) pester(p params) (*http.Response, error) { resultCh := make(chan result) multiplexCh := make(chan result) finishCh := make(chan struct{}) // track all requests that go out so we can close the late listener routine that closes late incoming response bodies totalSentRequests := &sync.WaitGroup{} totalSentRequests.Add(1) defer totalSentRequests.Done() allRequestsBackCh := make(chan struct{}) go func() { totalSentRequests.Wait() close(allRequestsBackCh) }() // GET calls should be idempotent and can make use // of concurrency. Other verbs can mutate and should not // make use of the concurrency feature concurrency := c.Concurrency if p.verb != "GET" { concurrency = 1 } c.Lock() if c.hc == nil { c.hc = &http.Client{} c.hc.Transport = c.Transport c.hc.CheckRedirect = c.CheckRedirect c.hc.Jar = c.Jar c.hc.Timeout = c.Timeout } c.Unlock() // re-create the http client so we can leverage the std lib httpClient := http.Client{ Transport: c.hc.Transport, CheckRedirect: c.hc.CheckRedirect, Jar: c.hc.Jar, Timeout: c.hc.Timeout, } // if we have a request body, we need to save it for later var originalRequestBody []byte var originalBody []byte var err error if p.req != nil && p.req.Body != nil { originalRequestBody, err = ioutil.ReadAll(p.req.Body) if err != nil { return &http.Response{}, errors.New("error reading request body") } p.req.Body.Close() } if p.body != nil { originalBody, err = ioutil.ReadAll(p.body) if err != nil { return &http.Response{}, errors.New("error reading body") } } AttemptLimit := c.MaxRetries if AttemptLimit <= 0 { AttemptLimit = 1 } for req := 0; req < concurrency; req++ { c.wg.Add(1) totalSentRequests.Add(1) go func(n int, p params) { defer c.wg.Done() defer totalSentRequests.Done() var err error for i := 1; i <= AttemptLimit; i++ { c.wg.Add(1) defer c.wg.Done() select { case <-finishCh: return default: } resp := &http.Response{} // rehydrate the body (it is drained each read) if len(originalRequestBody) > 0 { p.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody)) } if len(originalBody) > 0 { p.body = bytes.NewBuffer(originalBody) } // route the calls switch p.method { case "Do": resp, err = httpClient.Do(p.req) case "Get": resp, err = httpClient.Get(p.url) case "Head": resp, err = httpClient.Head(p.url) case "Post": resp, err = httpClient.Post(p.url, p.bodyType, p.body) case "PostForm": resp, err = httpClient.PostForm(p.url, p.data) } // Early return if we have a valid result // Only retry (ie, continue the loop) on 5xx status codes if err == nil && resp.StatusCode < 500 { multiplexCh <- result{resp: resp, err: err, req: n, retry: i} return } c.log(ErrEntry{ Time: time.Now(), Method: p.method, Verb: p.verb, URL: p.url, Request: n, Retry: i + 1, // would remove, but would break backward compatibility Attempt: i, Err: err, }) // if it is the last iteration, grab the result (which is an error at this point) if i == AttemptLimit { multiplexCh <- result{resp: resp, err: err} return } // if we are retrying, we should close this response body to free the fd if resp != nil { resp.Body.Close() } // prevent a 0 from causing the tick to block, pass additional microsecond <-time.Tick(c.Backoff(i) + 1*time.Microsecond) } }(req, p) } // spin off the go routine so it can continually listen in on late results and close the response bodies go func() { gotFirstResult := false for { select { case res := <-multiplexCh: if !gotFirstResult { gotFirstResult = true close(finishCh) resultCh <- res } else if res.resp != nil { // we only return one result to the caller; close all other response bodies that come back // drain the body before close as to not prevent keepalive. see https://gist.github.com/mholt/eba0f2cc96658be0f717 io.Copy(ioutil.Discard, res.resp.Body) res.resp.Body.Close() } case <-allRequestsBackCh: // don't leave this goroutine running return } } }() select { case res := <-resultCh: c.Lock() defer c.Unlock() c.SuccessReqNum = res.req c.SuccessRetryNum = res.retry return res.resp, res.err } }
func checkServer(conf *ClientConfig, client *http.Client) { _, err := client.Head(conf.JobsResourceURL.String()) if err != nil { log.Fatalf("ERROR: schedula server is unavailable: %v", err) } }