// CreateShardGroup creates a shard group on a database and policy for a given timestamp. func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) error { // Find retention policy. rpi, err := data.RetentionPolicy(database, policy) if err != nil { return err } else if rpi == nil { return influxdb.ErrRetentionPolicyNotFound(policy) } // Verify that shard group doesn't already exist for this timestamp. if rpi.ShardGroupByTimestamp(timestamp) != nil { return nil } // Create the shard group. data.MaxShardGroupID++ sgi := ShardGroupInfo{} sgi.ID = data.MaxShardGroupID sgi.StartTime = timestamp.Truncate(rpi.ShardGroupDuration).UTC() sgi.EndTime = sgi.StartTime.Add(rpi.ShardGroupDuration).UTC() data.MaxShardID++ sgi.Shards = []ShardInfo{ {ID: data.MaxShardID}, } // Retention policy has a new shard group, so update the policy. Shard // Groups must be stored in sorted order, as other parts of the system // assume this to be the case. rpi.ShardGroups = append(rpi.ShardGroups, sgi) sort.Sort(ShardGroupInfos(rpi.ShardGroups)) return nil }
func (l *queue) PurgeOlderThan(when time.Time) error { l.mu.Lock() defer l.mu.Unlock() if len(l.segments) == 0 { return nil } cutoff := when.Truncate(time.Second) for { mod, err := l.head.lastModified() if err != nil { return err } if mod.After(cutoff) || mod.Equal(cutoff) { return nil } // If this is the last segment, first append a new one allowing // trimming to proceed. if len(l.segments) == 1 { _, err := l.addSegment() if err != nil { return err } } if err := l.trimHead(); err != nil { return err } } }
// processTimeRange calls gs.GetLatestGSDirs to get a list of func (xformer *pdfXformer) processTimeRange(start time.Time, end time.Time) { glog.Infof("Processing time range: (%s, %s)", start.Truncate(time.Second), end.Truncate(time.Second)) for _, dir := range gs.GetLatestGSDirs(start.Unix(), end.Unix(), *storageJsonDirectory) { glog.Infof("> Reading gs://%s/%s\n", *storageBucket, dir) requestedObjects := xformer.client.storageService.Objects.List(*storageBucket).Prefix(dir).Fields( "nextPageToken", "items/updated", "items/md5Hash", "items/mediaLink", "items/name", "items/metadata") for requestedObjects != nil { responseObjects, err := requestedObjects.Do() if err != nil { glog.Errorf("request %#v failed: %s", requestedObjects, err) } else { for _, jsonObject := range responseObjects.Items { xformer.counter++ glog.Infof("> > Processing object: gs://%s/%s {%d}", *storageBucket, jsonObject.Name, xformer.counter) xformer.processJsonFile(jsonObject) } } if len(responseObjects.NextPageToken) > 0 { requestedObjects.PageToken(responseObjects.NextPageToken) } else { requestedObjects = nil } } } glog.Infof("finished time range.") }
// Begins returns the timestamp of the beginning of this RRA assuming // that that the argument "now" is within it. This will be a time // approximately but not exactly the RRA length ago, because it is // aligned on the RRA step boundary. func (rra *RoundRobinArchive) Begins(now time.Time) time.Time { rraStart := now.Add(rra.step * time.Duration(rra.size) * -1).Truncate(rra.step) if now.Equal(now.Truncate(rra.step)) { rraStart = rraStart.Add(rra.step) } return rraStart }
// ServiceEvents returns an array of events for the specified service id. If start- and/or end is // not nil the list if filtered to include only the events between start- and end time, inclusive. // // Note that only the date part of the start- and end times considered and parts with finer // granularity are ignored. func ServiceEvents(serviceId string, start *time.Time, end *time.Time) ([]ApiServiceEvent, error) { v := struct { ClientError Events []ApiServiceEvent `json:"events"` }{} u, err := url.Parse(fmt.Sprintf("/v1/services/%s/events", serviceId)) if err != nil { return nil, err } q := u.Query() if start != nil { q.Set("start", start.Truncate(24*time.Hour).Format(time.RFC1123)) } if end != nil { q.Set("end", end.Truncate(24*time.Hour).Format(time.RFC1123)) } u.RawQuery = q.Encode() if err = getStatus(u.String(), &v); err != nil { return nil, err } if v.IsError { return nil, &v.ClientError } return v.Events, nil }
// Creates a signed url using RSAwithSHA1 as specified by // http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-signature func (cf *CloudFront) CannedSignedURL(path, queryString string, expires time.Time) (string, error) { resource := cf.BaseURL + path if queryString != "" { resource = path + "?" + queryString } policy, err := buildPolicy(resource, expires) if err != nil { return "", err } signature, err := cf.generateSignature(policy) if err != nil { return "", err } // TOOD: Do this once uri, err := url.Parse(cf.BaseURL) if err != nil { return "", err } uri.RawQuery = queryString if queryString != "" { uri.RawQuery += "&" } expireTime := expires.Truncate(time.Millisecond).Unix() uri.Path = path uri.RawQuery += fmt.Sprintf("Expires=%d&Signature=%s&Key-Pair-Id=%s", expireTime, signature, cf.keyPairId) return uri.String(), nil }
func calcDailySummary(now time.Time, config StartupConfig, runningConfig RunningConfig) { log.Infof("lastSummaryTime is %v", runningConfig.LastSummaryTime) if runningConfig.LastSummaryTime.Day() != now.Day() { startTime := now.Truncate(24 * time.Hour).Add(-24 * time.Hour) endTime := startTime.Add(24 * time.Hour) log.Info("Summarizing from ", startTime, " (", startTime.Unix(), ") to ", endTime, " (", endTime.Unix(), ")") // influx connection influxClient, err := influxConnect(config, runningConfig) if err != nil { log.Error("Could not connect to InfluxDb to get daily summary stats!!") errHndlr(err, ERROR) return } bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: "daily_stats", Precision: "s", RetentionPolicy: config.DailySummaryRetentionPolicy, }) calcDailyMaxGbps(influxClient, bp, startTime, endTime, config) calcDailyBytesServed(influxClient, bp, startTime, endTime, config) log.Info("Collected daily stats @ ", now) } }
func TestParse(t *testing.T) { var ( s string p, x time.Time err error ) // Local time now := time.Now() times := map[string]time.Time{ "5m": now.Add(time.Duration(time.Minute * 5)), "-0h": now.Add(-time.Duration(time.Hour * 0)), "-48h5m": now.Add(-time.Duration(time.Hour*48 + time.Minute*5)), // UTC "2013-04-10": time.Date(2013, 4, 10, 0, 0, 0, 0, time.UTC), "April 4, 2013": time.Date(2013, 4, 4, 0, 0, 0, 0, time.UTC), "Apr 04, 2013": time.Date(2013, 4, 4, 0, 0, 0, 0, time.UTC), "47065363200000000": time.Date(1492, 6, 11, 0, 0, 0, 0, time.UTC), "02-01-2006": time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), "02-01-2006 2:04 PM": time.Date(2006, 1, 2, 14, 4, 0, 0, time.UTC), "02-01-2006 2:04 PM -0700": time.Date(2006, 1, 2, 21, 4, 0, 0, time.UTC), "02-01-2006 2:04 PM -07:00": time.Date(2006, 1, 2, 21, 4, 0, 0, time.UTC), "2 January 2006": time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), "2 January 2006 3:04 PM": time.Date(2006, 1, 2, 15, 4, 0, 0, time.UTC), "2 January 2006 3:04 PM -0700": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "2 January 2006 3:04 PM -07:00": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "2006-01-02": time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), "2006-01-02 3:04 PM": time.Date(2006, 1, 2, 15, 4, 0, 0, time.UTC), "2006-01-02 3:04 PM -0700": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "2006-01-02 3:04 PM -07:00": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "January 2, 2006": time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), "January 2, 2006 3:04 PM": time.Date(2006, 1, 2, 15, 4, 0, 0, time.UTC), "January 2, 2006 3:04 PM -0700": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "January 2, 2006 3:04 PM -07:00": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "Jan 2, 2006": time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), "Jan 2, 2006, 3:04 PM": time.Date(2006, 1, 2, 15, 4, 0, 0, time.UTC), "Jan 2, 2006 3:04 PM -0700": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), "Jan 2, 2006 3:04 PM -07:00": time.Date(2006, 1, 2, 22, 4, 0, 0, time.UTC), } // Duration to truncate for comparison. td := time.Second for s, x = range times { p, err = Parse(s) if err != nil { t.Errorf("time: failed to parse %s as time", s) } else { x = x.Truncate(td) p = p.Truncate(td) if !p.Equal(x) { t.Errorf("time: expected %s, got %s", x, p) } } } }
// creates a new caldav datetime representation, must be in UTC func NewDateTime(name string, t time.Time) (*DateTime, error) { if t.Location() != time.UTC { return nil, errors.New("CalDAV datetime must be in UTC") } else { return &DateTime{name: name, t: t.Truncate(time.Second)}, nil } }
// surroundingStep returns begin and end of a PDP which either // includes or ends on a given time mark. func surroundingStep(mark time.Time, step time.Duration) (time.Time, time.Time) { begin := mark.Truncate(step) if mark.Equal(begin) { // We are exactly at the end, need to move one step back. begin = begin.Add(step * -1) } return begin, begin.Add(step) }
func NextSchedule(now time.Time, offset time.Duration, d time.Duration) time.Time { t := now.Truncate(d).Add(offset) if t.After(now) { return t } else { return t.Add(d) } }
func (t *threadThrottler) throttle(now time.Time) time.Duration { // Initialize or advance the current second interval when necessary. nowSecond := now.Truncate(time.Second) if !t.initialized { t.resetSecond(nowSecond) t.initialized = true } if !t.currentSecond.Equal(nowSecond) { t.resetSecond(nowSecond) } maxRate := t.maxRateSecond if maxRate == ZeroRateNoProgess { // Throughput is effectively paused. Do not let anything through until // the max rate changes. return t.currentSecond.Add(1 * time.Second).Sub(now) } // Check if we have already received too many requests within this second. if t.currentRate >= maxRate { return t.currentSecond.Add(1 * time.Second).Sub(now) } // Next request isn't expected earlier than nextRequestInterval. // With this check we ensure there's one request per request interval at most. if now.Before(t.nextRequestInterval) { return t.nextRequestInterval.Sub(now) } // Check if we have to pace the user. // NOTE: Pacing won't work if maxRate > 1e9 (since 1e9ns = 1s) and therefore // the returned backoff will always be zero. // Minimum time between two requests. requestIntervalNs := (1 * time.Second).Nanoseconds() / maxRate // End of the previous request is the earliest allowed time of this request. earliestArrivalOffsetNs := t.currentRate * requestIntervalNs earliestArrival := t.currentSecond.Add(time.Duration(earliestArrivalOffsetNs) * time.Nanosecond) // TODO(mberlin): Most likely we overshoot here since we don't take into // account our and the user's processing time. Due to too long backoffs, they // might not be able to fully use their capacity/maximum rate. backoff := earliestArrival.Sub(now) if backoff > 0 { return backoff } // Calculate the earlist time the next request can pass. requestInterval := time.Duration(requestIntervalNs) * time.Nanosecond currentRequestInterval := now.Truncate(requestInterval) t.nextRequestInterval = currentRequestInterval.Add(requestInterval) // QPS rates >= 10k are prone to skipping their next request interval. // We have to be more relaxed in this case. if requestInterval <= 100*time.Microsecond { t.nextRequestInterval = t.nextRequestInterval.Add(-requestInterval) } t.currentRate++ return NotThrottled }
func testBucket(name, source, user, pass string, t time.Time, res time.Duration, vals []float64) *bucket.Bucket { id := new(bucket.Id) id.Name = name id.Source = source id.User = user id.Pass = pass id.Time = t.Truncate(res) id.Resolution = res return &bucket.Bucket{Id: id, Vals: vals} }
// On filters journal entries by creation date func (journal Journal) On(on time.Time) *Journal { var results []entry.Entry for _, entry := range journal.Query() { if entry.CreatedAt.Truncate(24 * time.Hour).Equal(on.Truncate(24 * time.Hour)) { results = append(results, entry) } } return &Journal{results} }
// IfModifiedSince returns true if lastModified exceeds 'If-Modified-Since' // value from the request header. // // The function returns true also 'If-Modified-Since' request header is missing. func (ctx *RequestCtx) IfModifiedSince(lastModified time.Time) bool { ifModStr := ctx.Request.Header.peek(strIfModifiedSince) if len(ifModStr) == 0 { return true } ifMod, err := ParseHTTPDate(ifModStr) if err != nil { return true } lastModified = lastModified.Truncate(time.Second) return ifMod.Before(lastModified) }
// Must be exactly midnight on the first day of the month. This is the first // time that is always valid after the leap has occurred for both adding and // removing a second. This is the same way leap seconds are officially listed. // https://www.ietf.org/timezones/data/leap-seconds.list func (s *Server) SetLeapSecond(second time.Time, direction LeapIndicator) { second = second.UTC() if (second.IsZero() && direction != LEAP_NONE) || (second.Truncate(24*time.Hour) != second) || (second.Day() != 1) { panic("Invalid leap second.") } s.mu.Lock() defer s.mu.Unlock() s.leapTime = second s.leapType = direction }
func (s *SampleNode) shouldKeep(group models.GroupID, t time.Time) bool { if s.duration != 0 { keepTime := t.Truncate(s.duration) return t.Equal(keepTime) } else { count := s.counts[group] keep := count%s.s.Count == 0 count++ s.counts[group] = count return keep } }
func (r *WUHourlyResponse) stale(targetTime time.Time) bool { var max time.Time for _, forecast := range r.HourlyForecast { curTime := epochToTime(forecast.Fcttime.Epoch) if curTime.After(max) { max = curTime } } lastHour := targetTime.Truncate(time.Hour) isStale := (max.After(lastHour)) log.Printf("env: sensor: weather: cache stale=%t lastHour=%v", isStale, lastHour, max) return isStale }
// utility method for if time t is included within the spans timeframe func (s *Span) Covers(t time.Time) bool { if len(*s) == 0 { return false } if !sort.IsSorted(s) { sort.Sort(s) } // get times for comparison with only hours, everything else can get messy compareTime := t.Truncate(time.Hour) firstDate, lastDate := (*s)[0].Time.Truncate(time.Hour), (*s)[len(*s)].Time.Truncate(time.Hour) return ((firstDate.Before(compareTime) || firstDate.Equal(compareTime)) && (lastDate.After(compareTime) || lastDate.Equal(compareTime))) }
// checkRecentTime produces a test error if the time is not within the specified number // of seconds of the given start time. func checkTimeElapsed(t *testing.T, timeStr string, elapsed time.Duration, start time.Time) { // Truncate start time, because the CLI currently outputs times with a second-level // granularity. start = start.Truncate(time.Second) tm, err := time.ParseInLocation(localTimeFormat, timeStr, start.Location()) if err != nil { t.Errorf("couldn't parse time '%s': %s", timeStr, err) return } end := start.Add(elapsed) if tm.Before(start) || tm.After(end) { t.Errorf("time (%s) not within range [%s,%s]", tm, start, end) } }
func (db *DownloadHourDb) MarkDownload(fileId, userId, ip string, t time.Time) error { t = t.Truncate(time.Hour) sql := ` INSERT INTO download_hour (file_id, user_id, ip, hour, downloads) VALUES ($1, $2, $3, $4, 1) ON CONFLICT ON CONSTRAINT download_hour_file_id_hour_ip_key DO UPDATE SET downloads = download_hour.downloads + 1 RETURNING * ` _, err := db.DB.Exec(sql, fileId, userId, ip, t) return err }
func buildPolicy(resource string, expireTime time.Time) ([]byte, error) { p := &policy{ Statement: []statement{ { Resource: resource, Condition: condition{ DateLessThan: epochTime{ EpochTime: expireTime.Truncate(time.Millisecond).Unix(), }, }, }, }, } return json.Marshal(p) }
func (e *Emissary) ShouldRun(t time.Time) (bool, error) { t = t.Truncate(time.Minute) compare := t.Add(-1 * time.Nanosecond) for _, s := range e.Schedules { parsed, err := cronexpr.Parse(s) if err != nil { return false, err } next := parsed.Next(compare) if next == t { return true, nil } } return false, nil }
func (r *Receiver) measure(name string, t time.Time) { if !conf.OutletMeasurements { return } b := &bucket.Bucket{ Id: &bucket.Id{ Name: conf.AppName + ".receiver." + name, Source: "", User: conf.OutletUser, Pass: conf.OutletPass, Time: t.Truncate(time.Minute), Resolution: time.Minute, }, Vals: []float64{float64(time.Since(t) / time.Millisecond)}, } r.addRegister(b) }
func newWindowByTime( now time.Time, period, every time.Duration, name string, group models.GroupID, align, byName, fillPeriod bool, tags models.Tags, logger *log.Logger, ) *windowByTime { // Determine first nextEmit time. var nextEmit time.Time if fillPeriod { nextEmit = now.Add(period) if align { firstPeriod := nextEmit // Needs to be aligned with Every and be greater than now+Period nextEmit = nextEmit.Truncate(every) if !nextEmit.After(firstPeriod) { // This means we will drop the first few points nextEmit = nextEmit.Add(every) } } } else { nextEmit = now.Add(every) if align { nextEmit = nextEmit.Truncate(every) } } return &windowByTime{ buf: &windowTimeBuffer{logger: logger}, nextEmit: nextEmit, align: align, period: period, every: every, name: name, group: group, byName: byName, tags: tags, logger: logger, } }
// average returns the average value across all observations which span // the range [from, to). // Partially included observations are accounted by their included fraction. // Missing observations are assumed with the value zero. func (h *intervalHistory) average(from, to time.Time) float64 { // Search only entries whose time of observation is in [start, end). // Example: [from, to) = [1.5s, 2.5s) => [start, end) = [1s, 2s) start := from.Truncate(h.interval) end := to.Truncate(h.interval) sum := 0.0 count := 0.0 var nextIntervalStart time.Time for i := len(h.records) - 1; i >= 0; i-- { t := h.records[i].time if t.After(end) { continue } if t.Before(start) { break } // Account for intervals which were not recorded. if !nextIntervalStart.IsZero() { uncoveredRange := nextIntervalStart.Sub(t) count += float64(uncoveredRange / h.interval) } // If an interval is only partially included, count only that fraction. durationAfterTo := t.Add(h.interval).Sub(to) if durationAfterTo < 0 { durationAfterTo = 0 } durationBeforeFrom := from.Sub(t) if durationBeforeFrom < 0 { durationBeforeFrom = 0 } weight := float64((h.interval - durationBeforeFrom - durationAfterTo).Nanoseconds()) / float64(h.interval.Nanoseconds()) sum += weight * float64(h.records[i].value) count += weight nextIntervalStart = t.Add(-1 * h.interval) } return float64(sum) / count }
// ago доступна в шаблонах через fm["ago"] func ago(t time.Time) string { now := time.Now() diff := now.Sub(t) hours := int(diff.Hours()) days := int(now.Truncate(24*time.Hour).Sub(t.Truncate(24*time.Hour)).Hours() / 24) switch { case hours == 0: m := diff.Minutes() return fmt.Sprintf("%d %s назад", int(m), rplural(int(m), "минуту", "минуты", "минут")) case days == 0 && hours < 24: h := diff.Hours() return fmt.Sprintf("%d %s назад", int(h), rplural(int(h), "час", "часа", "часов")) case days == 1: return "вчера" case days == 2: return "позавчера" default: return t.Format("02.01.2006") } }
func (interval Interval) CalcIndex(now time.Time, snapshotTime time.Time) int { firstMonday := time.Date(1970, 1, 5, 0, 0, 0, 0, time.UTC) switch interval { case Hourly: now = now.Truncate(time.Hour) snapshotTime = snapshotTime.Truncate(time.Hour) return int(now.Sub(snapshotTime).Hours()) case Daily: nowDays := int(now.Sub(firstMonday).Hours() / 24) snapshotDays := int(snapshotTime.Sub(firstMonday).Hours() / 24) return nowDays - snapshotDays case Weekly: nowWeeks := int(now.Sub(firstMonday).Hours() / 24 / 7) snapshotWeeks := int(snapshotTime.Sub(firstMonday).Hours() / 24 / 7) return nowWeeks - snapshotWeeks case Monthly: return int(now.Month()) - int(snapshotTime.Month()) + 12*(now.Year()-snapshotTime.Year()) } return 0 }
// advanceTimeWithFill moves the timeseries forward to time t and fills in any // slots that get skipped in the process with the given value. Values older than // the timeseries period are lost. func (ts *timeseries) advanceTimeWithFill(t time.Time, value int64) { advanceTo := t.Truncate(ts.resolution) if !advanceTo.After(ts.time) { // This is shortcut for the most common case of a busy counter // where updates come in many times per ts.resolution. ts.time = advanceTo return } steps := int(advanceTo.Sub(ts.time).Nanoseconds() / ts.resolution.Nanoseconds()) ts.stepCount += int64(steps) if steps > ts.size { steps = ts.size } for steps > 0 { ts.head = (ts.head + 1) % ts.size ts.slots[ts.head] = value steps-- } ts.time = advanceTo }
// given a time, calculate the instant that the log should next roll func calculateNextRollTime(t time.Time, freq rollFrequency) time.Time { if freq == RollMinutely { t = t.Truncate(time.Minute) t = t.Add(time.Minute) } else if freq == RollHourly { t = t.Truncate(time.Hour) t = t.Add(time.Hour) } else { t = t.Truncate(time.Hour) // easiest way to beat DST bugs is to just iterate for t.Hour() > 0 { t = t.Add(-time.Hour) } if freq == RollDaily { t = t.AddDate(0, 0, 1) } else { if t.Weekday() == time.Sunday { t = t.AddDate(0, 0, 7) } else { for t.Weekday() != time.Sunday { t = t.AddDate(0, 0, 1) } } } } return t }