// If paths is empty, all targets will be removed. func (r *Repo) RemoveTargetsWithExpires(paths []string, expires time.Time) error { if !validExpires(expires) { return ErrInvalidExpires{expires} } t, err := r.targets() if err != nil { return err } if len(paths) == 0 { t.Targets = make(data.Files) } else { removed := false for _, path := range paths { path = util.NormalizeTarget(path) if _, ok := t.Targets[path]; !ok { continue } removed = true delete(t.Targets, path) } if !removed { return nil } } t.Expires = expires.Round(time.Second) t.Version++ return r.setMeta("targets.json", t) }
func writeTimestamp(w io.Writer, t time.Time) (err error) { nanoSeconds := t.Round(time.Microsecond).UnixNano() if t.IsZero() { return writeLong(w, math.MinInt64) } return writeLong(w, nanoSeconds/int64(time.Microsecond)) }
// RoundTimeUp rounds the time to the next second/minute/hours depending on the duration func RoundTimeUp(realTime time.Time, duration time.Duration) time.Time { tmpTime := realTime.Round(duration) if tmpTime.Before(realTime) { return tmpTime.Add(duration) } return tmpTime }
func (r *Repo) GenKeyWithExpires(keyRole string, expires time.Time) (string, error) { if !keys.ValidRole(keyRole) { return "", ErrInvalidRole{keyRole} } if !validExpires(expires) { return "", ErrInvalidExpires{expires} } root, err := r.root() if err != nil { return "", err } key, err := keys.NewKey() if err != nil { return "", err } if err := r.local.SaveKey(keyRole, key.SerializePrivate()); err != nil { return "", err } role, ok := root.Roles[keyRole] if !ok { role = &data.Role{KeyIDs: []string{}, Threshold: 1} root.Roles[keyRole] = role } role.KeyIDs = append(role.KeyIDs, key.ID) root.Keys[key.ID] = key.Serialize() root.Expires = expires.Round(time.Second) root.Version++ return key.ID, r.setMeta("root.json", root) }
// cronHandler is called by app engine cron to check for work // and also called by task queue invocations to run the work for // a specific registered functions. func cronHandler(ctxt appengine.Context, w http.ResponseWriter, req *http.Request) { cron.RLock() list := cron.list cron.RUnlock() force := req.FormValue("force") == "1" // We're being called by app engine master cron, // so look for new work to queue in tasks. now := time.Now() var old time.Time err := Transaction(ctxt, func(ctxt appengine.Context) error { if err := ReadMeta(ctxt, "app.cron.time", &old); err != nil && err != datastore.ErrNoSuchEntity { return err } if !old.Before(now) { return nil } return WriteMeta(ctxt, "app.cron.time", now) }) if err != nil { // already logged return } ctxt.Infof("cron %v -> %v", old, now) for _, cr := range list { if now.Round(cr.dt) != old.Round(cr.dt) || force { ctxt.Infof("start cron %s", cr.name) Task(ctxt, "app.cron."+cr.name, "cron", cr.name) } } }
// RoundFrac rounds fractional seconds precision with new fsp and returns a new one. // We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0, // so 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:11 // and 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10 func RoundFrac(t gotime.Time, fsp int) (gotime.Time, error) { _, err := checkFsp(fsp) if err != nil { return t, errors.Trace(err) } return t.Round(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond), nil }
func (r *Repo) SnapshotWithExpires(t CompressionType, expires time.Time) error { if !validExpires(expires) { return ErrInvalidExpires{expires} } snapshot, err := r.snapshot() if err != nil { return err } db, err := r.db() if err != nil { return err } // TODO: generate compressed manifests for _, name := range snapshotManifests { if err := r.verifySignature(name, db); err != nil { return err } var err error snapshot.Meta[name], err = r.fileMeta(name) if err != nil { return err } } snapshot.Expires = expires.Round(time.Second) snapshot.Version++ return r.setMeta("snapshot.json", snapshot) }
func (self *defaultDecoder) getContainerMetrics(container *source_api.Container, labels map[string]string) []sinksV1Api.Timeseries { if container == nil { return nil } labels[sinksV1Api.LabelContainerName.Key] = container.Name // One metric value per data point. var result []sinksV1Api.Timeseries labelsAsString := util.LabelsToString(labels, ",") for _, stat := range container.Stats { if stat == nil { continue } // Add all supported metrics that have values. for index, supported := range self.supportedStatMetrics { // Finest allowed granularity is seconds. stat.Timestamp = stat.Timestamp.Round(time.Second) key := timeseriesKey{ Name: supported.Name, Labels: labelsAsString, } // TODO: remove this once the heapster source is tested to not provide duplicate stats. if data, ok := self.lastExported[key]; ok && data.After(stat.Timestamp) { continue } if supported.HasValue(&container.Spec) { // Cumulative stats have container creation time as their start time. var startTime time.Time if supported.Type == sinksV1Api.MetricCumulative { startTime = container.Spec.CreationTime } else { startTime = stat.Timestamp } points := supported.GetValue(&container.Spec, stat) for _, point := range points { labels := util.CopyLabels(labels) for name, value := range point.Labels { labels[name] = value } timeseries := sinksV1Api.Timeseries{ MetricDescriptor: &self.supportedStatMetrics[index].MetricDescriptor, Point: &sinksV1Api.Point{ Name: supported.Name, Labels: labels, Start: startTime.Round(time.Second), End: stat.Timestamp, Value: point.Value, }, } result = append(result, timeseries) } } self.lastExported[key] = stat.Timestamp } } return result }
// nextTimeBoundary returns the time when the currently open time window closes. func nextTimeBoundary(baseTime time.Time, windowSize time.Duration) time.Time { // This will round down before the halfway point. b := baseTime.Round(windowSize) if b.Before(baseTime) { // It was rounded down, adjust up to next boundary. b = b.Add(windowSize) } return b }
func (ac accumulator) getTime(t []time.Time) time.Time { var timestamp time.Time if len(t) > 0 { timestamp = t[0] } else { timestamp = time.Now() } return timestamp.Round(ac.precision) }
// Get the closest time before or equal to <t> that is an integer multiple of // <period>. func stratificationBoundary(t time.Time, period time.Duration) time.Time { return t.Round(period) /*secsSinceEpoch := t.Unix() // TODO: Is it safe to use Unix for this? Will leap seconds screw us up? periodSecs := int64(period)/int64(time.Second) // Round down to nearest stratification boundarySecsSinceEpoch := secsSinceEpoch - (secsSinceEpoch % periodSecs) return time.Unix(boundarySecsSinceEpoch)*/ }
// The number of time units returned represents // the processing time accumulated within l2met. // E.g. If the resolution of the bucket/id is 60s // and the delay is 2, then it took 120s for l2met // to process the bucket. func (id *Id) Delay(t time.Time) int64 { t0 := id.Time.Round(id.Resolution).Unix() t1 := t.Round(id.Resolution).Unix() base := id.Resolution / time.Second if base != 0 { return (t1 - t0) / int64(base) } return 0 }
func stateIdx(t time.Time) int { t = t.Round(durPrTick) a := time.Duration(t.Hour()) * time.Hour a += time.Duration(t.Minute()) * time.Minute a += time.Duration(t.Second()) * time.Second return int(a / durPrTick) }
func insertNum(t *testing.T, conn *sql.Tx, small int, bigint string, notint float64, bigreal string, text string, date time.Time, ) bool { date = date.Round(time.Second) qry := fmt.Sprintf(`INSERT INTO `+tbl+` (F_int, F_bigint, F_real, F_bigreal, F_text, F_date) VALUES (%d, %s, %3.3f, %s, '%s', TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS')) `, small, bigint, notint, bigreal, text, date.Format("2006-01-02 15:04:05")) if _, err := conn.Exec(qry); err != nil { t.Errorf("cannot insert into "+tbl+" (%q): %v", qry, err) return false } row := conn.QueryRow("SELECT F_int, F_bigint, F_real, F_bigreal, F_text, F_date FROM "+tbl+" WHERE F_int = :1", small) var ( smallO int bigintO big.Int notintO float64 bigrealF, bigrealO big.Rat bigintS, bigrealS string textO string dateO time.Time ) if err := row.Scan(&smallO, &bigintS, ¬intO, &bigrealS, &textO, &dateO); err != nil { t.Errorf("error scanning row[%d]: %v", small, errgo.Details(err)) return false } t.Logf("row: small=%d big=%s notint=%f bigreal=%s text=%q date=%s", smallO, bigintS, notintO, bigrealS, textO, dateO) if smallO != small { t.Errorf("small mismatch: got %d, awaited %d.", smallO, small) } (&bigintO).SetString(bigintS, 10) if bigintO.String() != bigint { t.Errorf("bigint mismatch: got %s, awaited %s.", bigintO, bigint) } if notintO != notint { t.Errorf("noting mismatch: got %f, awaited %f.", notintO, notint) } (&bigrealF).SetString(bigreal) (&bigrealO).SetString(bigrealS) if (&bigrealO).Cmp(&bigrealF) != 0 { t.Errorf("bigreal mismatch: got %s, awaited %f.", (&bigrealO), (&bigrealF)) } if textO != text { t.Errorf("text mismatch: got %q, awaited %q.", textO, text) } if !dateO.Equal(date) { t.Errorf("date mismatch: got %s, awaited %s.", dateO, date.Round(time.Second)) } return true }
// SetStartEnd sets the start date and the end date of an Usage func (u *Usage) SetStartEnd(start, end time.Time) error { roundedStart := start.Round(u.Object.UsageGranularity) if roundedStart.After(start) { roundedStart = roundedStart.Add(-u.Object.UsageGranularity) } roundedEnd := end.Round(u.Object.UsageGranularity) if roundedEnd.Before(end) { roundedEnd = roundedEnd.Add(u.Object.UsageGranularity) } return u.SetDuration(roundedEnd.Sub(roundedStart)) }
// SetPrecision will round a time to the specified precision func SetPrecision(t time.Time, precision string) time.Time { switch precision { case "n": case "u": return t.Round(time.Microsecond) case "ms": return t.Round(time.Millisecond) case "s": return t.Round(time.Second) case "m": return t.Round(time.Minute) case "h": return t.Round(time.Hour) } return t }
func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error { if !keys.ValidRole(keyRole) { return ErrInvalidRole{keyRole} } if !validExpires(expires) { return ErrInvalidExpires{expires} } root, err := r.root() if err != nil { return err } if _, ok := root.Keys[id]; !ok { return ErrKeyNotFound{keyRole, id} } role, ok := root.Roles[keyRole] if !ok { return ErrKeyNotFound{keyRole, id} } keyIDs := make([]string, 0, len(role.KeyIDs)) for _, keyID := range role.KeyIDs { if keyID == id { continue } keyIDs = append(keyIDs, keyID) } if len(keyIDs) == len(role.KeyIDs) { return ErrKeyNotFound{keyRole, id} } role.KeyIDs = keyIDs delete(root.Keys, id) root.Roles[keyRole] = role root.Expires = expires.Round(time.Second) root.Version++ return r.setMeta("root.json", root) }
func (r *Repo) AddTargetsWithExpires(paths []string, custom json.RawMessage, expires time.Time) error { if !validExpires(expires) { return ErrInvalidExpires{expires} } t, err := r.targets() if err != nil { return err } normalizedPaths := make([]string, len(paths)) for i, path := range paths { normalizedPaths[i] = util.NormalizeTarget(path) } if err := r.local.WalkStagedTargets(normalizedPaths, func(path string, target io.Reader) (err error) { meta, err := util.GenerateFileMeta(target, r.hashAlgorithms...) if err != nil { return err } path = util.NormalizeTarget(path) // if we have custom metadata, set it, otherwise maintain // existing metadata if present if len(custom) > 0 { meta.Custom = &custom } else if t, ok := t.Targets[path]; ok { meta.Custom = t.Custom } t.Targets[path] = meta return nil }); err != nil { return err } t.Expires = expires.Round(time.Second) t.Version++ return r.setMeta("targets.json", t) }
func (r *Repo) TimestampWithExpires(expires time.Time) error { if !validExpires(expires) { return ErrInvalidExpires{expires} } db, err := r.db() if err != nil { return err } if err := r.verifySignature("snapshot.json", db); err != nil { return err } timestamp, err := r.timestamp() if err != nil { return err } timestamp.Meta["snapshot.json"], err = r.fileMeta("snapshot.json") if err != nil { return err } timestamp.Expires = expires.Round(time.Second) timestamp.Version++ return r.setMeta("timestamp.json", timestamp) }
func pt(t time.Time) string { r := t.Round(200 * time.Millisecond) return fmt.Sprintf("%02d:%02d", r.Minute(), r.Minute()) }
func (self *decoder) getContainerMetrics(container *cache.ContainerElement, labels map[string]string) []Timeseries { if container == nil { return nil } labels[LabelContainerName.Key] = container.Name labels[LabelContainerBaseImage.Key] = container.Image // Add container specific labels along with existing labels. containerLabels := util.LabelsToString(container.Labels, ",") if labels[LabelLabels.Key] != "" { containerLabels = fmt.Sprintf("%s,%s", labels[LabelLabels.Key], containerLabels) } labels[LabelLabels.Key] = containerLabels if _, exists := labels[LabelHostID.Key]; !exists { labels[LabelHostID.Key] = container.ExternalID } // One metric value per data point. var result []Timeseries labelsAsString := util.LabelsToString(labels, ",") for _, metric := range container.Metrics { if metric == nil || metric.Spec == nil || metric.Stats == nil { continue } // Add all supported metrics that have values. for index, supported := range self.supportedStatMetrics { // Finest allowed granularity is seconds. metric.Stats.Timestamp = metric.Stats.Timestamp.Round(time.Second) key := timeseriesKey{ Name: supported.Name, Labels: labelsAsString, } // TODO: remove this once the heapster source is tested to not provide duplicate metric.Statss. if data, ok := self.lastExported[key]; ok && data.After(metric.Stats.Timestamp) { continue } if supported.HasValue(metric.Spec) { // Cumulative metric.Statss have container creation time as their start time. var startTime time.Time if supported.Type == MetricCumulative { startTime = metric.Spec.CreationTime } else { startTime = metric.Stats.Timestamp } points := supported.GetValue(metric.Spec, metric.Stats) for _, point := range points { labels := util.CopyLabels(labels) for name, value := range point.Labels { labels[name] = value } timeseries := Timeseries{ MetricDescriptor: &self.supportedStatMetrics[index].MetricDescriptor, Point: &Point{ Name: supported.Name, Labels: labels, Start: startTime.Round(time.Second), End: metric.Stats.Timestamp, Value: point.Value, }, } result = append(result, timeseries) } } self.lastExported[key] = metric.Stats.Timestamp } } return result }
func updateAnalyticsTick(t time.Time) { if os.Getenv("ABOT_ENV") == "test" { return } log.Info("updating analytics") createdAt := t.Round(24 * time.Hour) // User count var count int q := `SELECT COUNT(*) FROM ( SELECT DISTINCT (flexid, flexidtype) FROM userflexids ) AS t` if err := db.Get(&count, q); err != nil { log.Info("failed to retrieve user count.", err) return } aq := `INSERT INTO analytics (label, value, createdat) VALUES ($1, $2, $3) ON CONFLICT (label, createdat) DO UPDATE SET value=$2` _, err := db.Exec(aq, keyUserCount, count, createdAt) if err != nil { log.Info("failed to update analytics (user count).", err) return } // Message count q = `SELECT COUNT(*) FROM messages` if err = db.Get(&count, q); err != nil { log.Info("failed to retrieve message count.", err) return } _, err = db.Exec(aq, keyMsgCount, count, createdAt) if err != nil { log.Info("failed to update analytics (msg count).", err) return } // Messages needing training q = `SELECT COUNT(*) FROM messages WHERE needstraining=TRUE AND abotsent=FALSE` if err = db.Get(&count, q); err != nil { log.Info("failed to retrieve user count.", err) return } _, err = db.Exec(aq, keyTrainCount, count, createdAt) if err != nil { log.Info("failed to update analytics (msg count).", err) return } // Version number client := &http.Client{Timeout: 15 * time.Minute} u := "https://raw.githubusercontent.com/itsabot/abot/master/base/plugins.json" req, err := http.NewRequest("GET", u, nil) if err != nil { log.Info("failed to retrieve version number.", err) return } reqResp, err := client.Do(req) if err != nil { log.Info("failed to retrieve version number.", err) return } defer func() { if err = reqResp.Body.Close(); err != nil { log.Info("failed to close body.", err) } }() var remoteConf PluginJSON if err = json.NewDecoder(reqResp.Body).Decode(&remoteConf); err != nil { log.Info("failed to retrieve version number.", err) return } _, err = db.Exec(aq, keyVersion, remoteConf.Version, createdAt) if err != nil { log.Info("failed to update analytics (version number).", err) return } }
// MakeDTimestampTZ creates a DTimestampTZ with specified precision. func MakeDTimestampTZ(t time.Time, precision time.Duration) *DTimestampTZ { return &DTimestampTZ{Time: t.Round(precision)} }
func (s *status) summary(now time.Time) string { ts := s.start.Round(defaultTimeOutputScale) took := now.Round(defaultTimeOutputScale).Sub(ts) return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took) }
//规则到秒,去掉毫秒什么的 func ModBySecond(t1 time.Time) time.Time { return t1.Round(time.Second) }
// RoundTime rounds a time.Time to microseconds, which is the (undocumented) // way that the AppEngine SDK stores it. func RoundTime(t time.Time) time.Time { return t.Round(time.Microsecond) }
func timeSince(t time.Time) string { return time.Now().Round(time.Second / 10).Sub(t.Round(time.Second / 10)).String() }
// makeMetric either returns a metric, or returns nil if the metric doesn't // need to be created (because of filtering, an error, etc.) func (ac *accumulator) makeMetric( measurement string, fields map[string]interface{}, tags map[string]string, mType telegraf.ValueType, t ...time.Time, ) telegraf.Metric { if len(fields) == 0 || len(measurement) == 0 { return nil } if tags == nil { tags = make(map[string]string) } // Override measurement name if set if len(ac.inputConfig.NameOverride) != 0 { measurement = ac.inputConfig.NameOverride } // Apply measurement prefix and suffix if set if len(ac.inputConfig.MeasurementPrefix) != 0 { measurement = ac.inputConfig.MeasurementPrefix + measurement } if len(ac.inputConfig.MeasurementSuffix) != 0 { measurement = measurement + ac.inputConfig.MeasurementSuffix } // Apply plugin-wide tags if set for k, v := range ac.inputConfig.Tags { if _, ok := tags[k]; !ok { tags[k] = v } } // Apply daemon-wide tags if set for k, v := range ac.defaultTags { if _, ok := tags[k]; !ok { tags[k] = v } } // Apply the metric filter(s) if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok { return nil } for k, v := range fields { // Validate uint64 and float64 fields switch val := v.(type) { case uint64: // InfluxDB does not support writing uint64 if val < uint64(9223372036854775808) { fields[k] = int64(val) } else { fields[k] = int64(9223372036854775807) } continue case float64: // NaNs are invalid values in influxdb, skip measurement if math.IsNaN(val) || math.IsInf(val, 0) { if ac.debug { log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ "field, skipping", measurement, k) } delete(fields, k) continue } } fields[k] = v } var timestamp time.Time if len(t) > 0 { timestamp = t[0] } else { timestamp = time.Now() } timestamp = timestamp.Round(ac.precision) var m telegraf.Metric var err error switch mType { case telegraf.Counter: m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp) case telegraf.Gauge: m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp) default: m, err = telegraf.NewMetric(measurement, tags, fields, timestamp) } if err != nil { log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) return nil } if ac.trace { fmt.Println("> " + m.String()) } return m }
func (ac *accumulator) AddFields( measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time, ) { if len(fields) == 0 || len(measurement) == 0 { return } if !ac.inputConfig.Filter.ShouldNamePass(measurement) { return } if !ac.inputConfig.Filter.ShouldTagsPass(tags) { return } // Override measurement name if set if len(ac.inputConfig.NameOverride) != 0 { measurement = ac.inputConfig.NameOverride } // Apply measurement prefix and suffix if set if len(ac.inputConfig.MeasurementPrefix) != 0 { measurement = ac.inputConfig.MeasurementPrefix + measurement } if len(ac.inputConfig.MeasurementSuffix) != 0 { measurement = measurement + ac.inputConfig.MeasurementSuffix } if tags == nil { tags = make(map[string]string) } // Apply plugin-wide tags if set for k, v := range ac.inputConfig.Tags { if _, ok := tags[k]; !ok { tags[k] = v } } // Apply daemon-wide tags if set for k, v := range ac.defaultTags { if _, ok := tags[k]; !ok { tags[k] = v } } ac.inputConfig.Filter.FilterTags(tags) result := make(map[string]interface{}) for k, v := range fields { // Filter out any filtered fields if ac.inputConfig != nil { if !ac.inputConfig.Filter.ShouldFieldsPass(k) { continue } } // Validate uint64 and float64 fields switch val := v.(type) { case uint64: // InfluxDB does not support writing uint64 if val < uint64(9223372036854775808) { result[k] = int64(val) } else { result[k] = int64(9223372036854775807) } continue case float64: // NaNs are invalid values in influxdb, skip measurement if math.IsNaN(val) || math.IsInf(val, 0) { if ac.debug { log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ "field, skipping", measurement, k) } continue } } result[k] = v } fields = nil if len(result) == 0 { return } var timestamp time.Time if len(t) > 0 { timestamp = t[0] } else { timestamp = time.Now() } timestamp = timestamp.Round(ac.precision) m, err := telegraf.NewMetric(measurement, tags, result, timestamp) if err != nil { log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) return } if ac.trace { fmt.Println("> " + m.String()) } ac.metrics <- m }
// ExecuteContinuousQuery executes a single CQ. func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.ContinuousQueryInfo, now time.Time) error { // TODO: re-enable stats //s.stats.Inc("continuousQueryExecuted") // Local wrapper / helper. cq, err := NewContinuousQuery(dbi.Name, cqi) if err != nil { return err } // Get the last time this CQ was run from the service's cache. s.mu.Lock() defer s.mu.Unlock() cq.LastRun = s.lastRuns[cqi.Name] // Set the retention policy to default if it wasn't specified in the query. if cq.intoRP() == "" { cq.setIntoRP(dbi.DefaultRetentionPolicy) } // See if this query needs to be run. computeNoMoreThan := time.Duration(s.Config.ComputeNoMoreThan) run, err := cq.shouldRunContinuousQuery(s.Config.ComputeRunsPerInterval, computeNoMoreThan) if err != nil { return err } else if !run { return nil } // We're about to run the query so store the time. lastRun := time.Now() cq.LastRun = lastRun s.lastRuns[cqi.Name] = lastRun // Get the group by interval. interval, err := cq.q.GroupByInterval() if err != nil { return err } else if interval == 0 { return nil } // Calculate and set the time range for the query. startTime := now.Round(interval) if startTime.UnixNano() > now.UnixNano() { startTime = startTime.Add(-interval) } if err := cq.q.SetTimeRange(startTime, startTime.Add(interval)); err != nil { s.Logger.Printf("error setting time range: %s\n", err) } if s.loggingEnabled { s.Logger.Printf("executing continuous query %s", cq.Info.Name) } // Do the actual processing of the query & writing of results. if err := s.runContinuousQueryAndWriteResult(cq); err != nil { s.Logger.Printf("error: %s. running: %s\n", err, cq.q.String()) return err } recomputeNoOlderThan := time.Duration(s.Config.RecomputeNoOlderThan) for i := 0; i < s.Config.RecomputePreviousN; i++ { // if we're already more time past the previous window than we're going to look back, stop if now.Sub(startTime) > recomputeNoOlderThan { return nil } newStartTime := startTime.Add(-interval) if err := cq.q.SetTimeRange(newStartTime, startTime); err != nil { s.Logger.Printf("error setting time range: %s\n", err) return err } if err := s.runContinuousQueryAndWriteResult(cq); err != nil { s.Logger.Printf("error during recompute previous: %s. running: %s\n", err, cq.q.String()) return err } startTime = newStartTime } return nil }