func (l *looper) tick() { tickStart := l.clk.Now() err := l.tickFunc(l.batchSize) l.stats.TimingDuration(fmt.Sprintf("OCSP.%s.TickDuration", l.name), time.Since(tickStart), 1.0) l.stats.Inc(fmt.Sprintf("OCSP.%s.Ticks", l.name), 1, 1.0) tickEnd := tickStart.Add(time.Since(tickStart)) expectedTickEnd := tickStart.Add(l.tickDur) if tickEnd.After(expectedTickEnd) { l.stats.Inc(fmt.Sprintf("OCSP.%s.LongTicks", l.name), 1, 1.0) } // After we have all the stats stuff out of the way let's check if the tick // function failed, if the reason is the HSM is dead increase the length of // sleepDur using the exponentially increasing duration returned by core.RetryBackoff. sleepDur := expectedTickEnd.Sub(tickEnd) if err != nil { l.stats.Inc(fmt.Sprintf("OCSP.%s.FailedTicks", l.name), 1, 1.0) if _, ok := err.(core.ServiceUnavailableError); ok && (l.failureBackoffFactor > 0 && l.failureBackoffMax > 0) { l.failures++ sleepDur = core.RetryBackoff(l.failures, l.tickDur, l.failureBackoffMax, l.failureBackoffFactor) } } else if l.failures > 0 { // If the tick was successful and previously there were failures reset // counter to 0 l.failures = 0 } // Sleep for the remaining tick period or for the backoff time l.clk.Sleep(sleepDur) }
// Purge attempts to send a purge request to the Akamai CCU API cpc.retries number // of times before giving up and returning ErrAllRetriesFailed func (cpc *CachePurgeClient) Purge(urls []string) error { successful := false for i := 0; i <= cpc.retries; i++ { core.RetryBackoff(i, cpc.retryBackoff, time.Minute, 1.3) err := cpc.purge(urls) if err != nil { if _, ok := err.(errFatal); ok { cpc.log.AuditErr(err) cpc.stats.Inc("CCU.FatalFailures", 1, 1.0) return err } cpc.stats.Inc("CCU.RetryableFailures", 1, 1.0) continue } successful = true break } if !successful { cpc.stats.Inc("CCU.FatalFailures", 1, 1.0) return ErrAllRetriesFailed } cpc.stats.Inc("CCU.SuccessfulPurges", 1, 1.0) return nil }
func (l *looper) tick() { tickStart := l.clk.Now() ctx := context.TODO() err := l.tickFunc(ctx, l.batchSize) l.stats.TimingDuration("TickDuration", time.Since(tickStart)) l.stats.Inc("Ticks", 1) tickEnd := tickStart.Add(time.Since(tickStart)) expectedTickEnd := tickStart.Add(l.tickDur) if tickEnd.After(expectedTickEnd) { l.stats.Inc("LongTicks", 1) } // After we have all the stats stuff out of the way let's check if the tick // function failed, if the reason is the HSM is dead increase the length of // sleepDur using the exponentially increasing duration returned by core.RetryBackoff. sleepDur := expectedTickEnd.Sub(tickEnd) if err != nil { l.stats.Inc("FailedTicks", 1) l.failures++ sleepDur = core.RetryBackoff(l.failures, l.tickDur, l.failureBackoffMax, l.failureBackoffFactor) } else if l.failures > 0 { // If the tick was successful but previously there were failures reset // counter to 0 l.failures = 0 } // Sleep for the remaining tick period or for the backoff time l.clk.Sleep(sleepDur) }
// reconnect attempts repeatedly to connect and subscribe to the named queue. It // will loop forever until it succeeds. This is used for a running server, where // we don't want to shut down because we lost our AMQP connection. func (ac *amqpConnector) reconnect(config cmd.Config, log blog.SyslogWriter) { for i := 0; ; i++ { ac.clk.Sleep(core.RetryBackoff(i, ac.retryTimeoutBase, ac.retryTimeoutMax, 2)) log.Info(fmt.Sprintf(" [!] attempting reconnect for %s", ac.queueName)) err := ac.connect(config) if err != nil { log.Warning(fmt.Sprintf(" [!] %s", err)) continue } break } log.Info(fmt.Sprintf(" [!] reconnect success for %s", ac.queueName)) return }
func (m *MailerImpl) reconnect() { for i := 0; ; i++ { sleepDuration := core.RetryBackoff(i, m.reconnectBase, m.reconnectMax, 2) m.log.Info(fmt.Sprintf("sleeping for %s before reconnecting mailer", sleepDuration)) m.clk.Sleep(sleepDuration) m.log.Info("attempting to reconnect mailer") err := m.Connect() if err != nil { m.log.Warning(fmt.Sprintf("reconnect error: %s", err)) continue } break } m.log.Info("reconnected successfully") }