func (a *Attempt) nextSleep(now time.Time) time.Duration { sleep := a.strategy.Delay - now.Sub(a.last) if sleep < 0 { return 0 } return sleep }
// loopFetchOnly is a version of loop that includes only the logic // that calls Fetch. func (s *sub) loopFetchOnly() { // STARTFETCHONLY OMIT var pending []Item // appended by fetch; consumed by send var next time.Time // initially January 1, year 0 var err error for { var fetchDelay time.Duration // initally 0 (no delay) if now := time.Now(); next.After(now) { fetchDelay = next.Sub(now) } startFetch := time.After(fetchDelay) select { case <-startFetch: var fetched []Item fetched, next, err = s.fetcher.Fetch() if err != nil { next = time.Now().Add(10 * time.Second) break } pending = append(pending, fetched...) } } // STOPFETCHONLY OMIT }
// compareTime checks if a and b are roughly equal func compareTime(a time.Time, b time.Time) bool { diff := a.Sub(b) if diff < 0 { diff = -diff } return diff < precision }
// minExpiry returns a minimal expiry. A minimal expiry is the larger on // between now + minLeaseTerm and the given expectedExpiry. func minExpiry(now time.Time, expectedExpiry time.Time) time.Time { minExpiry := time.Now().Add(minLeaseTerm) if expectedExpiry.Sub(minExpiry) < 0 { expectedExpiry = minExpiry } return expectedExpiry }
// delta returns the elapsed time since the last event or the trace start, // and whether it spans midnight. // L >= tr.mu func (tr *trace) delta(t time.Time) (time.Duration, bool) { if len(tr.events) == 0 { return t.Sub(tr.Start), false } prev := tr.events[len(tr.events)-1].When return t.Sub(prev), prev.Day() != t.Day() }
func TestAccuracy(t *testing.T) { var count int64 var i int64 count = 100 var tstart time.Time var tend time.Time var td int64 var tsum int64 tdarr := make([]int64, count) for i = 0; i < count; i++ { tstart = time.Now() time.Sleep(1 * time.Second) tend = time.Now() td = tend.Sub(tstart).Nanoseconds() //fmt.Printf("%d ", td) tdarr[i] = td tsum += td } var mean float64 = float64(tsum) / float64(count) vari := float64(0) for i = 0; i < count; i++ { x := float64(tdarr[i]) - float64(mean) vari += x * x } vari = vari / float64(count-1) fmt.Printf("mean: %v\n", mean) fmt.Printf("variance: %v\n", vari) fmt.Printf("stddev: %v\n", math.Sqrt(float64(vari))) //fmt.Println(tdarr) }
// PrintSummary ... func PrintSummary(buildRunResults models.BuildRunResultsModel) { iconBoxWidth := len(" ") timeBoxWidth := len(" time (s) ") titleBoxWidth := stepRunSummaryBoxWidthInChars - 4 - iconBoxWidth - timeBoxWidth fmt.Println() fmt.Println() log.Infof("+%s+", strings.Repeat("-", stepRunSummaryBoxWidthInChars-2)) whitespaceWidth := (stepRunSummaryBoxWidthInChars - 2 - len("bitrise summary")) / 2 log.Infof("|%sbitrise summary%s|", strings.Repeat(" ", whitespaceWidth), strings.Repeat(" ", whitespaceWidth)) log.Infof("+%s+%s+%s+", strings.Repeat("-", iconBoxWidth), strings.Repeat("-", titleBoxWidth), strings.Repeat("-", timeBoxWidth)) whitespaceWidth = stepRunSummaryBoxWidthInChars - len("| | title") - len("| time (s) |") log.Infof("| | title%s| time (s) |", strings.Repeat(" ", whitespaceWidth)) log.Infof("+%s+%s+%s+", strings.Repeat("-", iconBoxWidth), strings.Repeat("-", titleBoxWidth), strings.Repeat("-", timeBoxWidth)) orderedResults := buildRunResults.OrderedResults() tmpTime := time.Time{} for _, stepRunResult := range orderedResults { tmpTime = tmpTime.Add(stepRunResult.RunTime) log.Info(stepResultCell(stepRunResult)) } runtime := tmpTime.Sub(time.Time{}) log.Infof("+%s+", strings.Repeat("-", stepRunSummaryBoxWidthInChars-2)) runtimeStr := TimeToFormattedSeconds(runtime, " sec") whitespaceWidth = stepRunSummaryBoxWidthInChars - len(fmt.Sprintf("| Total runtime: %s|", runtimeStr)) log.Infof("| Total runtime: %s%s|", runtimeStr, strings.Repeat(" ", whitespaceWidth)) log.Infof("+%s+", strings.Repeat("-", stepRunSummaryBoxWidthInChars-2)) fmt.Println() }
// validTokenAtTime reports whether a token is valid at the given time. func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { // Extract the issue time of the token. sep := strings.LastIndex(token, ":") if sep < 0 { return false } millis, err := strconv.ParseInt(token[sep+1:], 10, 64) if err != nil { return false } issueTime := time.Unix(0, millis*1e6) // Check that the token is not expired. if now.Sub(issueTime) >= Timeout { return false } // Check that the token is not from the future. // Allow 1 minute grace period in case the token is being verified on a // machine whose clock is behind the machine that issued the token. if issueTime.After(now.Add(1 * time.Minute)) { return false } expected := generateTokenAtTime(key, userID, actionID, issueTime) // Check that the token matches the expected value. // Use constant time comparison to avoid timing attacks. return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 }
// lookupIPDeadline looks up a hostname with a deadline. func lookupIPDeadline(host string, deadline time.Time) (addrs []IPAddr, err error) { if deadline.IsZero() { return lookupIPMerge(host) } // We could push the deadline down into the name resolution // functions. However, the most commonly used implementation // calls getaddrinfo, which has no timeout. timeout := deadline.Sub(time.Now()) if timeout <= 0 { return nil, errTimeout } t := time.NewTimer(timeout) defer t.Stop() ch := lookupGroup.DoChan(host, func() (interface{}, error) { return testHookLookupIP(lookupIP, host) }) select { case <-t.C: // The DNS lookup timed out for some reason. Force // future requests to start the DNS lookup again // rather than waiting for the current lookup to // complete. See issue 8602. lookupGroup.Forget(host) return nil, errTimeout case r := <-ch: return lookupIPReturn(r.Val, r.Err, r.Shared) } }
// toNtpTime converts the time value t into an ntpTime representation. func toNtpTime(t time.Time) ntpTime { nsec := uint64(t.Sub(ntpEpoch)) return ntpTime{ Seconds: uint32(nsec / nanoPerSec), Fraction: uint32((nsec % nanoPerSec) << 32 / nanoPerSec), } }
// Latest returns the latest local time after which we can be confident that // the remote writer will agree the supplied time is in the past. func (skew Skew) Latest(remote time.Time) (local time.Time) { if skew.isZero() { return remote } delta := remote.Sub(skew.LastWrite) return skew.End.Add(delta) }
// Earliest returns the earliest local time after which we can be confident // that the remote writer will agree the supplied time is in the past. func (skew Skew) Earliest(remote time.Time) (local time.Time) { if skew.isZero() { return remote } delta := remote.Sub(skew.LastWrite) return skew.Beginning.Add(delta) }
// NextExpected returns duration (relative to now) until the next expected event // given the state of a, the fact that no new event happened until // now, and assuming the event source is a Poisson process with a mean // interval time much smaller than our Tau. func (a Counter) NextExpected(now time.Time) time.Duration { if a.Value <= 0 { return time.Duration(math.MaxInt64) } delta := float64(now.Sub(a.Timestamp)) / float64(a.Tau) // discount activity till now return time.Duration(math.Exp(delta) * float64(a.Tau) / a.Value) // tau/(value * exp(-d)) }
// Writer reads rows from ch and writes them to // the underlying Collection func (b *Benchmark) Writer(ch chan []*Row) { n := int64(0) // row set counter ns := int64(0) // elapsed time in nanoseconds var t0, t1 time.Time // time between arrivals for rows := range ch { n, t1 = n+1, time.Now() if n > 1 { ns += t1.Sub(t0).Nanoseconds() } t0 = t1 if b.mu != nil { b.mu.Lock() } err := b.c.Set(rows) if b.mu != nil { b.mu.Unlock() } if err != nil { log.Println(err) return } } b.done <- true if n > 0 { log.Printf("%d row sets arrived at an average inter-arrival rate of %s", n, time.Duration(ns/n)) } }
// estimateTps estimates the average transactions per second of // the simulation. func (com *Communication) estimateTps(tpsChan chan<- float64, txCurve map[int32]*Row) { defer com.wg.Done() var first, last time.Time var diff, curveDiff time.Duration var txnCount, curveCount, block int firstTx := true for { select { case last = <-com.timeReceived: if firstTx { first = last firstTx = false } txnCount++ diff = last.Sub(first) curveCount++ if c, ok := txCurve[int32(block)]; ok && curveCount == c.txCount { // A block has been mined; reset necessary variables curveCount = 0 firstTx = true curveDiff += diff diff = curveDiff // Use next block's desired transaction count block++ } case <-com.exit: tpsChan <- float64(txnCount) / diff.Seconds() return } } }
// compareTime checks if a and b are roughly equal (1s precision) func compareTime(a time.Time, b time.Time) bool { diff := a.Sub(b) if diff < 0 { diff = -diff } return diff < time.Second }
func timerLoop(count, bytes *uint64, ticker *time.Ticker) { lastTime := time.Now().UTC() lastCount := *count lastBytes := *bytes zeroes := int8(0) var ( msgsSent, newCount, bytesSent, newBytes uint64 elapsedTime time.Duration now time.Time msgRate, bitRate float64 ) for { _ = <-ticker.C newCount = *count newBytes = *bytes now = time.Now() msgsSent = newCount - lastCount lastCount = newCount bytesSent = newBytes - lastBytes lastBytes = newBytes elapsedTime = now.Sub(lastTime) lastTime = now msgRate = float64(msgsSent) / elapsedTime.Seconds() bitRate = float64(bytesSent*8.0) / 1e6 / elapsedTime.Seconds() if msgsSent == 0 { if newCount == 0 || zeroes == 3 { continue } zeroes++ } else { zeroes = 0 } client.LogInfo.Printf("Sent %d messages. %0.2f msg/sec %0.2f Mbit/sec\n", newCount, msgRate, bitRate) } }
// checkTokenWithTime checks token using the given time. func checkTokenWithTime(c context.Context, token, user, action string, now time.Time) error { if token == "" { return fmt.Errorf("token is not given") } d, err := base64.URLEncoding.DecodeString(token) sig := &sigData{} if err = json.Unmarshal(d, sig); err != nil { return err } issueTime := time.Unix(0, sig.IssueTime) if now.Sub(issueTime) >= Timeout { return fmt.Errorf("signature has already expired") } if issueTime.After(now.Add(validFuture)) { return fmt.Errorf("token come from future") } toVerify := toData(user, action, sig.IssueTime) certs, err := signature.PublicCerts(c) if err != nil { return err } cert := signature.X509CertByName(certs, sig.Key) if cert == nil { return fmt.Errorf("cannot find cert") } return signature.Check(toVerify, cert, sig.Signature) }
// CheckTimeEqualWithPrecision checks the times are equal within the // precision, returns the delta and a flag func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) { dt := t0.Sub(t1) if dt >= precision || dt <= -precision { return dt, false } return dt, true }
// dialChannel is the simple pure-Go implementation of dial, still // used on operating systems where the deadline hasn't been pushed // down into the pollserver. (Plan 9 and some old versions of Windows) func dialChannel(net string, ra Addr, dialer func(time.Time) (Conn, error), deadline time.Time) (Conn, error) { var timeout time.Duration if !deadline.IsZero() { timeout = deadline.Sub(time.Now()) } if timeout <= 0 { return dialer(noDeadline) } t := time.NewTimer(timeout) defer t.Stop() type racer struct { Conn error } ch := make(chan racer, 1) go func() { if testingIssue5349 { time.Sleep(time.Millisecond) } c, err := dialer(noDeadline) ch <- racer{c, err} }() select { case <-t.C: return nil, &OpError{Op: "dial", Net: net, Addr: ra, Err: errTimeout} case racer := <-ch: return racer.Conn, racer.error } }
func TestResolution(t *testing.T) { var count int64 var i int64 count = 10000 var tstart time.Time var tend time.Time var td int64 var tsum int64 var sumsqr int64 tdarr := make([]int64, count) for i = 0; i < count; i++ { tstart = time.Now() tend = time.Now() td = tend.Sub(tstart).Nanoseconds() //fmt.Printf("%d ", td) tdarr[i] = td tsum += td sumsqr += td * td } mean := tsum / count variance := (sumsqr/(count) - mean*mean) vari := float64(0) for i = 0; i < count; i++ { x := float64(tdarr[i]) - float64(mean) vari += x * x } fmt.Printf("mean: %v\n", mean) fmt.Printf("variance: %v\n", variance) fmt.Printf("stddev: %v\n", math.Sqrt(float64(variance))) //fmt.Println(tdarr) }
// purgeLogsOnce removes logfiles for program for dir, if their age // relative to now is greater than keep. func purgeLogsOnce(now time.Time, dir, program string, keep time.Duration) { current := make(map[string]bool) for _, level := range levels { c, err := os.Readlink(path.Join(dir, fmt.Sprintf("%s.%s", program, level))) if err != nil { continue } current[c] = true } files, err := filepath.Glob(path.Join(dir, fmt.Sprintf("%s.*", program))) if err != nil { return } for _, file := range files { if current[file] { continue } created, err := parseTimestamp(file) if err != nil { continue } if now.Sub(created) > keep { os.Remove(file) } } }
//newWakeSignal create a wakeSignal that sends wakeTime on dst when wakeTime passes. //this function should be used to create wakeSignals. //the zero value wakeSignal is not valid. func newWakeSignal(dst chan time.Time, wakeTime time.Time) *wakeSignal { return &wakeSignal{ dst: dst, src: time.After(wakeTime.Sub(time.Now())), stop: make(chan struct{}), } }
// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to // abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending, // the content of iodata is returned. func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) { if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING { var timer <-chan time.Time if deadline != nil { if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 { timer = time.After(timeDiff) } } done := make(chan iodata) go func() { n, err := waitForCompletion(c.handle, overlapped) done <- iodata{n, err} }() select { case data = <-done: case <-timer: syscall.CancelIoEx(c.handle, overlapped) data = iodata{0, timeout(c.addr.String())} } } // Windows will produce ERROR_BROKEN_PIPE upon closing // a handle on the other end of a connection. Go RPC // expects an io.EOF error in this case. if data.err == syscall.ERROR_BROKEN_PIPE { data.err = io.EOF } return int(data.n), data.err }
func showSince(writer io.Writer, now time.Time, since time.Time) { if now.IsZero() || since.IsZero() { fmt.Fprintln(writer, " <td></td>") } else { showDuration(writer, now.Sub(since), false) } }
// In some environments, the slow IPs may be explicitly unreachable, and fail // more quickly than expected. This test hook prevents dialTCP from returning // before the deadline. func slowDialTCP(net string, laddr, raddr *TCPAddr, deadline time.Time) (*TCPConn, error) { c, err := dialTCP(net, laddr, raddr, deadline) if ParseIP(slowDst4).Equal(raddr.IP) || ParseIP(slowDst6).Equal(raddr.IP) { time.Sleep(deadline.Sub(time.Now())) } return c, err }
func timerLoop(count *uint64, ticker *time.Ticker) { lastTime := time.Now().UTC() lastCount := *count zeroes := int8(0) var ( msgsSent, newCount uint64 elapsedTime time.Duration now time.Time rate float64 ) for { _ = <-ticker.C newCount = *count now = time.Now() msgsSent = newCount - lastCount lastCount = newCount elapsedTime = now.Sub(lastTime) lastTime = now rate = float64(msgsSent) / elapsedTime.Seconds() if msgsSent == 0 { if newCount == 0 || zeroes == 3 { continue } zeroes++ } else { zeroes = 0 } log.Printf("Sent %d messages. %0.2f msg/sec\n", newCount, rate) } }
func (f *ReplayFilter) compactFilter(now time.Time) { e := f.fifo.Front() for e != nil { ent, _ := e.Value.(*entry) // If the filter is not full, only purge entries that exceed the TTL, // otherwise purge at least one entry, then revert to TTL based // compaction. if f.fifo.Len() < maxFilterSize && f.ttl > 0 { deltaT := now.Sub(ent.firstSeen) if deltaT < 0 { // Aeeeeeee, the system time jumped backwards, potentially by // a lot. This will eventually self-correct, but "eventually" // could be a long time. As much as this sucks, jettison the // entire filter. f.reset() return } else if deltaT < f.ttl { return } } // Remove the eldest entry. eNext := e.Next() delete(f.filter, ent.digest) f.fifo.Remove(ent.element) ent.element = nil e = eNext } }
// Valid returns true if token is a valid, unexpired token returned by Generate. func validAtTime(token, key, userID string, now time.Time) bool { // Decode the token. data, err := base64.URLEncoding.DecodeString(token) if err != nil { return false } // Extract the issue time of the token. sep := bytes.LastIndex(data, []byte{':'}) if sep < 0 { return false } nanos, err := strconv.ParseInt(string(data[sep+1:]), 10, 64) if err != nil { return false } issueTime := time.Unix(0, nanos) // Check that the token is not expired. if now.Sub(issueTime) >= timeout { return false } // Check that the token is not from the future. // Allow 1 minute grace period in case the token is being verified on a // machine whose clock is behind the machine that issued the token. if issueTime.After(now.Add(1 * time.Minute)) { return false } // Check that the token matches the expected value. expected := generateAtTime(key, userID, issueTime) return token == expected }
// CostForDuration returns the cost of running a host between the given start and end times func (cloudManager *EC2Manager) CostForDuration(h *host.Host, start, end time.Time) (float64, error) { // sanity check if end.Before(start) || util.IsZeroTime(start) || util.IsZeroTime(end) { return 0, fmt.Errorf("task timing data is malformed") } // grab instance details from EC2 ec2Handle := getUSEast(*cloudManager.awsCredentials) instance, err := getInstanceInfo(ec2Handle, h.Id) if err != nil { return 0, err } os := osLinux if strings.Contains(h.Distro.Arch, "windows") { os = osWindows } dur := end.Sub(start) region := azToRegion(instance.AvailabilityZone) iType := instance.InstanceType ebsCost, err := blockDeviceCosts(ec2Handle, instance.BlockDevices, dur) if err != nil { return 0, fmt.Errorf("calculating block device costs: %v", err) } hostCost, err := onDemandCost(&pkgOnDemandPriceFetcher, os, iType, region, dur) if err != nil { return 0, err } return hostCost + ebsCost, nil }