// read nodes.json compatible data into database func (db *NodeDB) ImportNodes(r io.Reader, persistent bool) error { nodes, err := readNodesJSON(r) if err != nil { return err } if nodes.Version != 1 { return ErrUnknownVersion } for _, node := range nodes.Nodes { var addr alfred.HardwareAddr if err := addr.Parse(node.NodeInfo.NodeID); err != nil { log.Printf("Import: error parsing NodeID %s: %v, skipping", node.NodeInfo.NodeID, err) continue } n := &NodeInfo{NodeInfo: gluon.NodeInfo{Source: addr, Data: &node.NodeInfo}} m := store.NewMeta(n) m.Updated = time.Time(node.LastSeen).Local() m.Created = time.Time(node.FirstSeen).Local() if !persistent { m.InvalidateIn(db.validTimeGluon) } err := db.Main.Batch(func(tx *bolt.Tx) error { return db.Main.UpdateMeta(tx, store.NewMeta(&NodeInfo{}), m) }) if err != nil { log.Printf("Import: error on node %v", node.NodeInfo.NodeID) } } return err }
/* Use the arch, software and version to find the latest */ func get_latest(arch string, software string, version string) (dropbox.Entry, bool) { target_path := get_target_path(arch, version) s := []string{} s = append(s, software) s = append(s, "-") search := strings.Join(s, "") mTime := time.Time(dropbox.DBTime{}) var latest_file dropbox.Entry files := get_files(cache_instance, db, target_path) for _, file := range files { if strings.Contains(file.Path, search) { if time.Time(file.Modified).After(mTime) { mTime = time.Time(file.Modified) latest_file = file } } } if latest_file.Path == "" { return latest_file, false } else { return latest_file, true } }
func (r *RRD) ToLogdata(out chan<- map[string]Logitem) error { names := make([]string, 0, 5) for _, ds := range r.DS { if ds.Type != "GAUGE" { return errors.New("unknown dataset type") } names = append(names, string(ds.Name)) } for _, rra := range r.RRA { step := time.Duration(rra.PDPPerRow) * time.Duration(r.Step) last_item := time.Time(r.LastUpdate).Round(step) if time.Time(r.LastUpdate).Before(last_item) { // correction for round-up behaviour last_item = last_item.Add(-step) } first_item := last_item.Add(-step * time.Duration(len(rra.Database.Row)-1)) for row, items := range rra.Database.Row { data := make(map[string]Logitem) for nr, v := range items.V { data[names[nr]] = Logitem{Timestamp: first_item.Add(step * time.Duration(row)), Value: v} } out <- data } } return nil }
// Returns the Timerange interval as both float32 and string that can be passed // to the Elasticseach date_histogram field. For example, 613.234s is a valid // interval. The interval is computed in such a way so that there will be // approximately the given number of points in the histogram. func computeHistogramInterval(tr *datetime.Timerange, points int) (float32, string) { // the bucket interval in seconds (can be a float) total_interval := time.Time(tr.To).Sub(time.Time(tr.From)) interval_secs := float32(int64(total_interval)/int64(points)/int64(1e6)) / 1000 return interval_secs, fmt.Sprintf("%.3fs", interval_secs) }
func testCreatedJsonTime(engine *xorm.Engine, t *testing.T) { di5 := new(MyJsonTime) err := engine.Sync2(di5) if err != nil { t.Fatal(err) } ci5 := &MyJsonTime{} _, err = engine.Insert(ci5) if err != nil { t.Fatal(err) } has, err := engine.Desc("(id)").Get(di5) if err != nil { t.Fatal(err) } if !has { t.Fatal(xorm.ErrNotExist) } if time.Time(ci5.Created).Unix() != time.Time(di5.Created).Unix() { t.Fatal("should equal:", time.Time(ci5.Created).Unix(), time.Time(di5.Created).Unix()) } fmt.Println("ci5:", ci5, "di5:", di5) var dis = make([]MyJsonTime, 0) err = engine.Find(&dis) if err != nil { t.Fatal(err) } }
func (task *Task) String() string { var fields = []string{ "Type: Task", task.Object.String(), "Closed: " + strconv.FormatBool(task.Closed), "Planned start: " + time.Time(task.PlannedStart).String(), "Planned end: " + time.Time(task.PlannedEnd).String(), "Planned duration: " + time.Duration(task.PlannedDuration).String(), "Estimated start: " + time.Time(task.EstimatedStart).String(), "Estimated end: " + time.Time(task.EstimatedEnd).String(), "Estimated duration: " + time.Duration(task.EstimatedDuration).String(), "Planned effort: " + time.Duration(task.PlannedEffort).String(), "Estimated effort: " + time.Duration(task.EstimatedEffort).String(), "Current effort: " + time.Duration(task.CurrentEffort).String(), "Estimated progress: " + strconv.FormatFloat(float64(task.EstimatedProgress), 'f', 2, 64), "Planned progress: " + strconv.FormatFloat(float64(task.PlannedProgress), 'f', 2, 64), } var dependencies = []string{"Tasks:"} var resources = []string{"Assigned resources:"} var efforts = []string{"Efforts:"} for _, value := range task.OutgoingDependencies { dependencies = append(dependencies, "\t"+value.String()) } for _, value := range task.Resources { resources = append(resources, "\t"+value.String()) } for _, value := range task.Efforts { efforts = append(efforts, "\t"+value.String()) } return strings.Join(append(append(append(fields, dependencies...), resources...), efforts...), "\n") }
func (s Time) Equal(v Value) bool { t, ok := v.(Time) if !ok { return false } return time.Time(s).Equal(time.Time(t)) }
// MarshalText implements the encoding.TextMarshaler interface func (t JSONTime) MarshalText() ([]byte, error) { if time.Time(t).IsZero() || time.Time(t).Unix() == 0 { return []byte{}, nil } stamp := time.Time(t).UTC().Format(time.RFC3339Nano) return []byte(stamp), nil }
func ConvertToActivity(rkActivity *runkeeper.FitnessActivity) *dm.Activity { returnActivity := dm.CreateActivity() if rkActivity.Type == "Other" { returnActivity.Type = "Activity" } else { returnActivity.Type = rkActivity.Type } //RK time is 'Local' correctedTime := time.Time(rkActivity.StartTime).Add(time.Duration(rkActivity.UtcOffset) * time.Hour) log.Printf("RK Local date: %s, start date: %s, unix: %d, offset: %d", time.Time(rkActivity.StartTime), correctedTime, time.Time(rkActivity.StartTime).Unix(), rkActivity.UtcOffset) returnActivity.StartTime = int(time.Time(correctedTime).Unix()) returnActivity.UtcOffSet = rkActivity.UtcOffset returnActivity.Duration = int(rkActivity.Duration) returnActivity.Name = rkActivity.Notes returnActivity.Notes = "" //rkActivity.Comment //hmm dunno returnActivity.Private = false returnActivity.Stationary = rkActivity.HasMap returnActivity.AverageHeartRate = 0 //rkActivity.AverageHeartRate returnActivity.Calories = rkActivity.TotalCalories returnActivity.Distance = rkActivity.TotalDistance returnActivity.GPS = convertFromPath(rkActivity.Path) returnActivity.HeartRate = convertFromHR(rkActivity.HeartRate) //log.Printf("INPUT: %s, OUTPUT: %s", rkActivity, returnActivity) return returnActivity }
// fetchLatestTarVersions reads the atlassian download feed and fetches the // latest tar.gz entry for each version. func fetchLatestTarVersions(url string) (versions map[string]Package, err error) { resp, err := http.Get(url) if err != nil { return nil, err } defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } start := bytes.Index(data, []byte("(")) end := bytes.LastIndex(data, []byte(")")) if !(end > start && start > -1) { return nil, errors.New("error in jsonp content") } var archives []Package err = json.Unmarshal(data[start+1:end], &archives) if err != nil { return nil, err } versions = map[string]Package{} for _, archive := range archives { filename := path.Base(archive.ZipURL) if !strings.Contains(filename, ".tar.gz") { continue } majmin := archive.Version.MajorMinor() v, ok := versions[majmin] if !ok || time.Time(archive.Released).After(time.Time(v.Released)) { versions[majmin] = archive } } return versions, nil }
func (r *CopyHeader) UnmarshalJSON(b []byte) error { type tmp CopyHeader var s struct { tmp ContentLength string `json:"Content-Length"` CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` Date gophercloud.JSONRFC1123 `json:"Date"` LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` } err := json.Unmarshal(b, &s) if err != nil { return err } *r = CopyHeader(s.tmp) switch s.ContentLength { case "": r.ContentLength = 0 default: r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) if err != nil { return err } } r.Date = time.Time(s.Date) r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) r.LastModified = time.Time(s.LastModified) return nil }
// PermanodeTimeLocked returns the time of the content in permanode. func (c *Corpus) PermanodeTimeLocked(pn blob.Ref) (t time.Time, ok bool) { // TODO(bradfitz): keep this time property cached on the permanode / files // TODO(bradfitz): finish implmenting all these // Priorities: // -- Permanode explicit "camliTime" property // -- EXIF GPS time // -- Exif camera time // -- File time // -- File modtime // -- camliContent claim set time ccRef, ccTime, ok := c.pnCamliContentLocked(pn) if !ok { return } fi, ok := c.files[ccRef] if ok { if fi.Time != nil { return time.Time(*fi.Time), true } if fi.ModTime != nil { return time.Time(*fi.ModTime), true } } return ccTime, true }
// UpdateCommitInfo finds all the new commits since the last time we ran and // adds them to the tiles, creating new tiles if necessary. func (i *Ingester) UpdateCommitInfo(pull bool) error { glog.Infof("Ingest %s: Starting UpdateCommitInfo", i.datasetName) if err := i.git.Update(pull, false); err != nil { return fmt.Errorf("Ingest %s: Failed git pull for during UpdateCommitInfo: %s", i.datasetName, err) } // Compute Git CL number for each Git hash. allHashes := i.git.From(time.Time(BEGINNING_OF_TIME)) hashToNumber := map[string]int{} for i, h := range allHashes { hashToNumber[h] = i } i.hashToNumber = hashToNumber // Find the time of the last Commit seen. ts := time.Time(BEGINNING_OF_TIME) lastTile, err := i.tileStore.Get(0, -1) if err == nil && lastTile != nil { ts = i.lastCommitTimeInTile(lastTile) } else { // Boundary condition; just started making Tiles and none exist. newTile := tiling.NewTile() newTile.Scale = 0 newTile.TileIndex = 0 if err := i.tileStore.Put(0, 0, newTile); err != nil { return fmt.Errorf("Ingest %s: UpdateCommitInfo: Failed to write new tile: %s", i.datasetName, err) } } glog.Infof("Ingest %s: UpdateCommitInfo: Last commit timestamp: %s", i.datasetName, ts) // Find all the Git hashes that are new to us. newHashes := i.git.From(ts) glog.Infof("Ingest %s: len(newHashes): from %d", i.datasetName, len(newHashes)) // Add Commit info to the Tiles for each new hash. tt := NewTileTracker(i.tileStore, i.hashToNumber) for _, hash := range newHashes { if err := tt.Move(hash); err != nil { glog.Errorf("UpdateCommitInfo Move(%s) failed with: %s", hash, err) continue } details, err := i.git.Details(hash, true) if err != nil { glog.Errorf("Failed to get details for hash: %s: %s", hash, err) continue } tt.Tile().Commits[tt.Offset(hash)] = &tiling.Commit{ CommitTime: details.Timestamp.Unix(), Hash: hash, Author: details.Author, } } glog.Infof("Ingest %s: Starting to flush tile.", i.datasetName) tt.Flush() glog.Infof("Ingest %s: Finished UpdateCommitInfo", i.datasetName) return nil }
func (dates *ProjectSummaryDates) String() string { var fields = []string{ time.Time(dates.Planned).String() + " (Planned)", time.Time(dates.Estimated).String() + " (Estimated)", } return strings.Join(fields, " ") }
func (self Date) nextAfter(t time.Time) (time.Time, error) { if t.Before(time.Time(self)) { return time.Time(self), nil } var zeroTime time.Time return zeroTime, fmt.Errorf("no more occurrences after %s", t) }
func (ss *SiteStat) store(statPath string) (err error) { now := time.Now() var savedSS *SiteStat if ss.Update == Date(zeroTime) { ss.Update = Date(time.Now()) } if now.Sub(time.Time(ss.Update)) > siteStaleThreshold { // Not updated for a long time, don't drop any record savedSS = ss // Changing update time too fast will also drop useful record savedSS.Update = Date(time.Time(ss.Update).Add(siteStaleThreshold / 2)) if time.Time(savedSS.Update).After(now) { savedSS.Update = Date(now) } } else { savedSS = newSiteStat() savedSS.Update = Date(now) ss.vcLock.RLock() for site, vcnt := range ss.Vcnt { if vcnt.shouldNotSave() { continue } savedSS.Vcnt[site] = vcnt } ss.vcLock.RUnlock() } b, err := json.MarshalIndent(savedSS, "", "\t") if err != nil { errl.Println("Error marshalling site stat:", err) panic("internal error: error marshalling site") } // Store stat into temp file first and then rename. // Ensures atomic update to stat file to avoid file damage. // Create tmp file inside config firectory to avoid cross FS rename. f, err := ioutil.TempFile(config.dir, "stat") if err != nil { errl.Println("create tmp file to store stat", err) return } if _, err = f.Write(b); err != nil { errl.Println("Error writing stat file:", err) f.Close() return } f.Close() // Windows don't allow rename to existing file. os.Remove(statPath + ".bak") os.Rename(statPath, statPath+".bak") if err = os.Rename(f.Name(), statPath); err != nil { errl.Println("rename new stat file", err) return } return }
func (s memorySeriesStorage) GetValueAtTime(fp model.Fingerprint, t time.Time, p StalenessPolicy) (sample *model.Sample, err error) { series, ok := s.fingerprintToSeries[fp] if !ok { return } iterator := series.values.Seek(skipListTime(t)) if iterator == nil { return } foundTime := time.Time(iterator.Key().(skipListTime)) if foundTime.Equal(t) { value := iterator.Value().(value) sample = &model.Sample{ Metric: series.metric, Value: value.get(), Timestamp: t, } return } if t.Sub(foundTime) > p.DeltaAllowance { return } secondTime := foundTime secondValue := iterator.Value().(value).get() if !iterator.Previous() { sample = &model.Sample{ Metric: series.metric, Value: iterator.Value().(value).get(), Timestamp: t, } return } firstTime := time.Time(iterator.Key().(skipListTime)) if t.Sub(firstTime) > p.DeltaAllowance { return } if firstTime.Sub(secondTime) > p.DeltaAllowance { return } firstValue := iterator.Value().(value).get() sample = &model.Sample{ Metric: series.metric, Value: interpolateSample(firstTime, secondTime, float32(firstValue), float32(secondValue), t), Timestamp: t, } return }
func (app *positionsApp) updateHandler(w http.ResponseWriter, req *http.Request) { parts := strings.Split(req.URL.Path, "/") positionID, _ := strconv.Atoi(parts[len(parts)-1]) var p, err = app.scanPosition(app.Db.QueryRow(selectPositionSQL, positionID)) if err != nil { w.WriteHeader(http.StatusBadRequest) return } dec := json.NewDecoder(req.Body) if err := dec.Decode(&p); err != nil && err != io.EOF { log.Fatal("decode error", err) } if errors, ok := p.IsValid(); !ok { log.Println("INFO: unable to update position due to validation errors: %v", errors) w.WriteHeader(http.StatusBadRequest) p.Errors = errors if b, err := json.Marshal(p); err == nil { io.WriteString(w, string(b)) } return } // TODO(rr) rewrite with evented approach. we might need to add a flag as well to persist if the lookup was fine if err := app.setTotalAmountCentsInEur(&p); err != nil { fmt.Println("currency lookup error %v", err) w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "{}") return } _, updateError := app.Db.Exec(updatePositionSQL, p.AccountCodeFrom, p.AccountCodeTo, p.PositionType, time.Time(p.InvoiceDate), time.Time(p.BookingDate), p.InvoiceNumber, p.TotalAmountCents, p.TotalAmountCentsInEur, p.Currency, p.Tax, p.FiscalPeriodID, p.Description, p.AttachmentPath, positionID) enc := json.NewEncoder(w) if err := enc.Encode(p); err != nil || updateError != nil { fmt.Printf(`Error updating position: %v, %v\n`, err, updateError) w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "{}") } }
// Timeout returns the timeout based on the current time. // If the computed timeout is less than 10ms, it returns // a TimeoutErr. If deadline is a zero value, it returns // a timeout of 0. func (dl Deadline) Timeout() (time.Duration, error) { if time.Time(dl).IsZero() { return 0, nil } timeout := time.Time(dl).Sub(time.Now()) if timeout <= 10*time.Millisecond { return 0, TimeoutErr } return timeout, nil }
// setMetaData sets the metadata from info func (o *Object) setMetaData(info *api.Item) { o.hasMetaData = true o.size = info.Size if info.FileSystemInfo != nil { o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime) } else { o.modTime = time.Time(info.LastModifiedDateTime) } o.id = info.ID }
// Parse extracts time from string-based info, with some constraints. // // The described time cannot be in the future, or more than 1000 years in the past. // // Note that month is 0-indexed, unlike time.Month. func Parse(year, month, day, hourMinute string, loc *time.Location) (time.Time, error) { now := time.Now().In(loc) y64, err := strconv.ParseInt(year, 10, 0) y := int(y64) if err != nil { return time.Time{}, err } if y < now.Year()-1000 { return time.Time{}, fmt.Errorf("bad year; %d is too far in the past", y) } m, err := strconv.ParseInt(month, 10, 0) if err != nil { return time.Time{}, err } if m < 0 || m > 11 { return time.Time{}, fmt.Errorf("bad month: %d is not within [0, 11]", m) } // Month +1 since time.Month is [1, 12]. m = m + 1 d64, err := strconv.ParseInt(day, 10, 0) d := int(d64) if err != nil { return time.Time{}, err } if d < 1 { return time.Time{}, fmt.Errorf("bad day: %d; can't be negative", d) } else if d > daysIn(time.Month(m), y) { return time.Time{}, fmt.Errorf("bad day: %d; only %d days in %v, %d", d, daysIn(time.Month(m), y), time.Month(m), y) } parts := strings.Split(hourMinute, ":") if len(parts) != 2 { return time.Time{}, fmt.Errorf("bad hour/minute: %s", hourMinute) } h, err := strconv.ParseInt(parts[0], 10, 0) if err != nil { return time.Time{}, err } if h < 0 || h > 60 { return time.Time{}, fmt.Errorf("bad hour: %d", h) } min, err := strconv.ParseInt(parts[1], 10, 0) if err != nil { return time.Time{}, err } if min < 0 || min > 60 { return time.Time{}, fmt.Errorf("bad minute: %d", min) } t := time.Time(time.Date(int(y), time.Month(m), int(d), int(h), int(min), 0, 0, loc)) if t.After(now) { return time.Time{}, fmt.Errorf("bad time; %v is in the future", time.Time(t)) } return t, nil }
func TestIntersectionOccurrences(t *testing.T) { tr := TimeRange{time.Time(NewDate("2006-01-01")), time.Time(NewDate("2009-12-31"))} expectations := map[int]Schedule{ 8: Intersection{Friday, Day(13)}, 4: Intersection{November, Thursday, Week(4)}, 3: Intersection{Day(Last), Day(28)}, } assertOccurrenceGeneration2(t, tr, expectations) }
func mainHandler(w http.ResponseWriter, r *http.Request) { fmt.Printf("%+v\n", r) pathList := strings.Split(r.URL.Path, "/") endPath := pathList[len(pathList) - 1] if len(pathList) == 0 { openScoreSheet(w, r) } else { switch filepath.Ext(endPath) { case ".js", ".html", ".ico", ".css" : // open input file fi, err := os.Open(*hostPath + endPath) if err != nil { fmt.Print("error reading ", *hostPath + endPath, "\n") fmt.Fprint(w,"") } // close fi on exit and check for its returned error defer func() { if err := fi.Close(); err != nil { fmt.Print("error closing ", *hostPath + endPath, "\n") } }() // make a read buffer fr := bufio.NewReader(fi) io.Copy(w,fr) case ".json": fmt.Fprint(w,"TODO") default: if contest.Name != "" { if time.Time(contest.StartTime).Before(time.Now()) { if len(pathList) >= 3 && pathList[1] == "problem" { openProblem(w, r, pathList[2]) } else if strings.Contains(r.URL.Path, "judg") { openJudge(w, r) } else { openScoreSheet(w,r) } } else { if strings.Contains(r.URL.Path, "judg") { openJudge(w, r) } else { fmt.Fprintf(w, "Contest will begin in: %v", time.Time(contest.StartTime).Sub(time.Now())) } } } else { if strings.Contains(r.URL.Path, "judg") { openJudge(w, r) } else { fmt.Fprint(w, "No Competition set up") } } } } }
func TestUnionOccurrences(t *testing.T) { tr := TimeRange{time.Time(NewDate("2006-01-01")), time.Time(NewDate("2009-12-31"))} expectations := map[int]Schedule{ 368: Union{June, July, August}, 626: Union{Monday, Wednesday, Friday}, 209: Union{Monday, Monday}, // Shouldn't duplicate days } assertOccurrenceGeneration2(t, tr, expectations) }
func (s *sandbox) uploadArtifacts() error { folder := filepath.Join(s.folder.Path(), artifactFolder) return filepath.Walk(folder, func(p string, info os.FileInfo, err error) error { // Abort if there is an error if err != nil { return err } // Skip folders if info.IsDir() { return nil } // Guess mimetype mimeType := mime.TypeByExtension(filepath.Ext(p)) if mimeType == "" { // application/octet-stream is the mime type for "unknown" mimeType = "application/octet-stream" } // Open file f, err := os.Open(p) if err != nil { return err } // Find filename name, _ := filepath.Rel(folder, p) // Ensure expiration is no later than task.expires expires := time.Now().Add(time.Duration(s.engine.config.Expiration) * 24 * time.Hour) if time.Time(s.context.Expires).Before(expires) { expires = time.Time(s.context.Expires) } // Upload artifact err = runtime.UploadS3Artifact(runtime.S3Artifact{ Name: filepath.ToSlash(name), Mimetype: mimeType, Expires: tcclient.Time(expires), Stream: f, }, s.context) // Ensure that we close the file cerr := f.Close() // Return first error, if any if err != nil { err = cerr } return err }) }
func (m Movement) Countdown() int { var t time.Time if time.Time(m.ExpectedDepartureTime).IsZero() { t = time.Time(m.ActualDepartureTime) } else { t = time.Time(m.ExpectedDepartureTime) } d := t.Sub(time.Time(m.TimeStamp)) return int(d.Seconds()) }
func TestDateOccurrences(t *testing.T) { tr := TimeRange{time.Time(NewDate("2006-01-01")), time.Time(NewDate("2009-12-31"))} expectations := map[Schedule]int{ NewDate("2005-12-31"): 0, NewDate("2006-01-01"): 1, NewDate("2007-04-08"): 1, NewDate("2009-12-31"): 1, NewDate("2010-01-01"): 0, } assertOccurrenceGeneration(t, tr, expectations) }
func TestYearOccurrences(t *testing.T) { tr := NewTimeRange("2000-01-01", "3000-01-01") expectations := map[Schedule]int{ Year(2525): 365, } assertOccurrenceGeneration(t, tr, expectations) tr = TimeRange{time.Time(NewDate("2525-12-31")), time.Time(NewDate("3000-01-01"))} expectations = map[Schedule]int{ Year(2525): 1, } assertOccurrenceGeneration(t, tr, expectations) }
func (ss *SiteStat) store(file string) (err error) { if err = mkConfigDir(); err != nil { return } now := time.Now() var s *SiteStat if ss.Update == Date(zeroTime) { ss.Update = Date(time.Now()) } if now.Sub(time.Time(ss.Update)) > siteStaleThreshold { // Not updated for a long time, don't drop any record s = ss // Changing update time too fast will also drop useful record s.Update = Date(time.Time(ss.Update).Add(siteStaleThreshold / 2)) if time.Time(s.Update).After(now) { s.Update = Date(now) } } else { s = newSiteStat() s.Update = Date(now) ss.vcLock.RLock() for site, vcnt := range ss.Vcnt { // user specified sites may change, always filter them out dmcnt := ss.get(host2Domain(site)) if (dmcnt != nil && dmcnt.userSpecified()) || vcnt.shouldDrop() { continue } s.Vcnt[site] = vcnt } ss.vcLock.RUnlock() } b, err := json.MarshalIndent(s, "", "\t") if err != nil { errl.Println("Error marshalling site stat:", err) panic("internal error: error marshalling site") } f, err := os.Create(file) if err != nil { errl.Println("Can't create stat file:", err) return } defer f.Close() if _, err = f.Write(b); err != nil { errl.Println("Error writing stat file:", err) return } return }
func TestWeekOccurrences(t *testing.T) { tr := TimeRange{time.Time(NewDate("2006-01-01")), time.Time(NewDate("2006-12-31"))} expectations := map[Schedule]int{ Week(1): 84, Week(2): 84, Week(3): 84, Week(4): 84, Week(5): 29, Week(Last): 84, } assertOccurrenceGeneration(t, tr, expectations) }