Beispiel #1
0
// The muxer for the whole operation.  Everything starts here.
func (a APIMux) ServeHTTP(w http.ResponseWriter, req *http.Request) {
	var resp *apicache.Response
	startTime := time.Now()

	req.ParseForm()

	url := path.Clean(req.URL.Path)
	if url == "/stats" {
		statsHandler(w, req)
		return
	}

	params := makeParams(req)

	debugLog.Printf("Starting request for %s...", url)

	w.Header().Add("Content-Type", "text/xml")
	if handler, valid := validPages[strings.ToLower(url)]; valid {
		if handler == nil {
			handler = defaultHandler
		}

		resp = handler(url, params)

		w.WriteHeader(resp.HTTPCode)
		w.Write(resp.Data)
	} else {
		w.WriteHeader(404)
		w.Write(apicache.SynthesizeAPIError(404, "Invalid API page.", 24*time.Hour))
	}

	if conf.Logging.LogRequests || (resp != nil && resp.HTTPCode != 200) {
		logRequest(req, url, params, resp, startTime)
	}

	if debug && time.Since(startTime).Seconds() > 10 {
		debugLog.Printf("Slow Request took %.2f seconds:", time.Since(startTime).Seconds())
		debugLog.Printf("%+v", req)
	}
}
Beispiel #2
0
func worker(reqChan chan apiReq, workerID int) {
	atomic.AddInt32(&workerCount, 1)

	for req := range reqChan {
		var err, eErr, rErr error
		var errStr string

		atomic.AddInt32(&activeWorkerCount, 1)

		// Run both of the error limiters simultaneously rather than in
		// sequence. Still need both before we continue.
		errorLimiter := make(chan error)
		rpsLimiter := make(chan error)
		go func() {
			err := errorRateLimiter.Start(30 * time.Second)
			errorLimiter <- err
		}()
		go func() {
			err := rateLimiter.Start(30 * time.Second)
			rpsLimiter <- err
		}()
		eErr = <-errorLimiter
		rErr = <-rpsLimiter

		// Check the error limiter for timeouts
		if eErr != nil {
			err = eErr
			errStr = "error throttling"

			// If the rate limiter didn't timeout be sure to signal it that we
			// didn't do anything.
			if rErr == nil {
				rateLimiter.Finish(true)
			}
		}
		if rErr != nil {
			err = rErr
			if errStr == "" {
				errStr = "rate limiting"
			} else {
				errStr += " and rate limiting"
			}

			// If the error limiter didn't also timeout be sure to signal it that we
			// didn't do anything.
			if eErr == nil {
				errorRateLimiter.Finish(true)
			}
		}
		// We're left with a single err and errStr for returning an error to the client.
		if err != nil {
			log.Printf("Rate Limit Error: %s - %s", errStr, err)
			log.Printf("RPS Events: %d Outstanding: %d", rateLimiter.Count(), rateLimiter.Outstanding())
			log.Printf("Errors Events: %d Outstanding: %d", errorRateLimiter.Count(), errorRateLimiter.Outstanding())

			req.apiResp = &apicache.Response{
				Data: apicache.SynthesizeAPIError(500,
					fmt.Sprintf("APIProxy Error: Proxy timeout due to %s.", errStr),
					5*time.Minute),
				Expires: time.Now().Add(5 * time.Minute),
				Error: apicache.APIError{500,
					fmt.Sprintf("APIProxy Error: Proxy timeout due to %s.", errStr)},
				HTTPCode: 504,
			}
			req.err = err
		} else {
			resp, err := req.apiReq.Do()
			req.apiResp = resp
			req.err = err
			if resp.Error.ErrorCode == 0 || resp.HTTPCode == 504 || resp.HTTPCode == 418 {
				// 418 means we are currently tempbanned from the API.
				// 504 means the API proxy had some kind of internal or network error.
				//
				// We do not treat these as an error for rate limiting because
				// the apicache library handles it for us, these requests are
				// not actually making it to the CCP API.

				// Finish, but skip recording the event in the rate limiter
				// when there is no error.
				errorRateLimiter.Finish(true)
			} else {
				errorRateLimiter.Finish(false)
			}
			rateLimiter.Finish(false)
		}

		req.worker = workerID
		req.respChan <- req
		atomic.AddInt32(&workCount[workerID], 1)
		atomic.AddInt32(&activeWorkerCount, -1)
	}
	atomic.AddInt32(&workerCount, -1)
}