func UpdateFeeds(c mpg.Context, w http.ResponseWriter, r *http.Request) { q := datastore.NewQuery("F").KeysOnly().Filter("n <=", time.Now()) q = q.Limit(10 * 60 * 2) // 10/s queue, 2 min cron it := q.Run(appengine.Timeout(c, time.Minute)) tc := make(chan *taskqueue.Task) done := make(chan bool) i := 0 u := routeUrl("update-feed") go taskSender(c, "update-feed", tc, done) for { k, err := it.Next(nil) if err == datastore.Done { break } else if err != nil { c.Errorf("next error: %v", err.Error()) break } tc <- taskqueue.NewPOSTTask(u, url.Values{ "feed": {k.StringID()}, }) i++ } close(tc) <-done c.Infof("updating %d feeds", i) }
func UpdateFeed(c mpg.Context, w http.ResponseWriter, r *http.Request) { gn := goon.FromContext(appengine.Timeout(c, time.Minute)) url := r.FormValue("feed") if url == "" { c.Errorf("empty update feed") return } c.Debugf("update feed %s", url) last := len(r.FormValue("last")) > 0 f := Feed{Url: url} s := "" defer func() { gn.Put(&Log{ Parent: gn.Key(&f), Id: time.Now().UnixNano(), Text: "UpdateFeed - " + s, }) }() if err := gn.Get(&f); err == datastore.ErrNoSuchEntity { c.Errorf("no such entity - " + url) s += "NSE" return } else if err != nil { s += "err - " + err.Error() return } else if last { // noop } if time.Now().Before(f.NextUpdate) { c.Errorf("feed %v already updated: %v", url, f.NextUpdate) s += "already updated" return } feedError := func(err error) { s += "feed err - " + err.Error() f.Errors++ v := f.Errors + 1 const max = 24 * 7 if v > max { v = max } else if f.Errors == 1 { v = 0 } f.NextUpdate = time.Now().Add(time.Hour * time.Duration(v)) gn.Put(&f) c.Warningf("error with %v (%v), bump next update to %v, %v", url, f.Errors, f.NextUpdate, err) } if feed, stories, err := fetchFeed(c, f.Url, f.Url); err == nil { if err := updateFeed(c, f.Url, feed, stories, false, false, last); err != nil { feedError(err) } else { s += "success" } } else { feedError(err) } f.Subscribe(c) }
func reportHandler(w http.ResponseWriter, r *http.Request) { if r.FormValue("date") == "" { var params = map[string]interface{}{ "Yesterday": date.NowInPdt().AddDate(0, 0, -1), } if err := templates.ExecuteTemplate(w, "report-form", params); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } return } c := appengine.Timeout(appengine.NewContext(r), 60*time.Second) // Default has a 5s timeout s, e, _ := widget.FormValueDateRange(r) opt := ReportOptions{ ClassB_OnePerFlight: widget.FormValueCheckbox(r, "classb_oneperflight"), ClassB_LocalDataOnly: widget.FormValueCheckbox(r, "classb_localdataonly"), Skimmer_AltitudeTolerance: widget.FormValueFloat64(w, r, "skimmer_altitude_tolerance"), Skimmer_MinDurationNM: widget.FormValueFloat64(w, r, "skimmer_min_duration_nm"), } if fix := strings.ToUpper(r.FormValue("waypoint")); fix != "" { if _, exists := sfo.KFixes[fix]; !exists { http.Error(w, fmt.Sprintf("Waypoint '%s' not known", fix), http.StatusInternalServerError) return } opt.Waypoint = fix } reportWriter(c, w, r, s, e, opt, r.FormValue("reportname"), r.FormValue("resultformat")) }
// Upgrade the set of complaints for each user. func upgradeUserHandler(w http.ResponseWriter, r *http.Request) { c := appengine.Timeout(appengine.NewContext(r), 30*time.Second) cdb := ComplaintDB{C: c, Memcache: false} p := types.ComplainerProfile{} if err := p.Base64Decode(r.FormValue("profile")); err != nil { c.Errorf("upgradeUserHandler: decode failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } // Get *all* the complaints for this person, unfiltered. var data = []types.Complaint{} q := datastore. NewQuery(kComplaintKind). Ancestor(cdb.emailToRootKey(p.EmailAddress)). Order("Timestamp") keys, err := q.GetAll(c, &data) if err != nil { c.Errorf("upgradeUserHandler/%s: GetAll failed: %v", p.EmailAddress, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } nDeleted, nUpdated := 0, 0 str := "" for i, complaint := range data { FixupComplaint(&complaint, keys[i]) // Put the key where cdb.* expect to find it deleteMe := i < len(data)-1 && ComplaintsAreEquivalent(data[i], data[i+1]) noProfile := complaint.Profile.EmailAddress == "" //str += fmt.Sprintf(" [%03d] [DEL=%5v,PRO=%5v] %s\n", i, deleteMe, noProfile, complaint) if true { if deleteMe { if err := cdb.DeleteComplaints([]string{complaint.DatastoreKey}, p.EmailAddress); err != nil { c.Errorf("upgradeUserHandler/%s: deletecomplaints failed: %v", p.EmailAddress, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } nDeleted++ } else if noProfile { complaint.Profile = p if err := cdb.UpdateComplaint(complaint, p.EmailAddress); err != nil { c.Errorf("upgradeUserHandler/%s: updatecomplaints failed: %v", p.EmailAddress, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } nUpdated++ } } } str += fmt.Sprintf(" *** upgraded {%s} [tot=%d, del=%d, upd=%d]\n", p.EmailAddress, len(data), nDeleted, nUpdated) c.Infof(" -- Upgrade for %s --\n%s", p.EmailAddress, str) w.Write([]byte(fmt.Sprintf("OK, upgraded %s\n", p.EmailAddress))) }
func downloadHandler(w http.ResponseWriter, r *http.Request) { session := sessions.Get(r) if session.Values["email"] == nil { http.Error(w, "session was empty; no cookie ? is this browser in privacy mode ?", http.StatusInternalServerError) return } c := appengine.Timeout(appengine.NewContext(r), 60*time.Second) cdb := complaintdb.ComplaintDB{C: c} cap, err := cdb.GetAllByEmailAddress(session.Values["email"].(string), true) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } _ = cap filename := date.NowInPdt().Format("complaints-20060102.csv") w.Header().Set("Content-Type", "application/csv") w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) cols := []string{ "Date", "Time(PDT)", "Notes", "Speedbrakes", "Loudness", "Activity", "Flightnumber", "Origin", "Destination", "Speed(Knots)", "Altitude(Feet)", "Lat", "Long", "Registration", "Callsign", "VerticalSpeed(FeetPerMin)", "Dist2(km)", "Dist3(km)", } csvWriter := csv.NewWriter(w) csvWriter.Write(cols) for _, c := range cap.Complaints { a := c.AircraftOverhead speedbrakes := "" if c.HeardSpeedbreaks { speedbrakes = "y" } r := []string{ c.Timestamp.Format("2006/01/02"), c.Timestamp.Format("15:04:05"), c.Description, speedbrakes, fmt.Sprintf("%d", c.Loudness), c.Activity, a.FlightNumber, a.Origin, a.Destination, fmt.Sprintf("%.0f", a.Speed), fmt.Sprintf("%.0f", a.Altitude), fmt.Sprintf("%.5f", a.Lat), fmt.Sprintf("%.5f", a.Long), a.Registration, a.Callsign, fmt.Sprintf("%.0f", a.VerticalSpeed), fmt.Sprintf("%.1f", c.Dist2KM), fmt.Sprintf("%.1f", c.Dist3KM), } if err := csvWriter.Write(r); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } csvWriter.Flush() }
func NewDriver(ctx appengine.Context, kind string, logger wcg.Logger) *Driver { return &Driver{ ctx: appengine.Timeout(ctx, DefaultTimeout), kind: kind, Logger: logger, EnableOperationLogging: DataStoreConfig.EnableOperationLogging, Timeout: DefaultTimeout, namespace: "", PutSplitThreshold: DefaultPutSplitTreshold, } }
func cannedSerfr1ComplaintsHandler(w http.ResponseWriter, r *http.Request) { s, e := date.WindowForYesterday() e = e.Add(-1 * time.Second) format := "list" if r.FormValue("csv") != "" { format = "csv" } c := appengine.Timeout(appengine.NewContext(r), 60*time.Second) // Default has a 5s timeout reportWriter(c, w, r, s, e, ReportOptions{}, "serfr1complaints", format) }
func cannedAdsbClassBHandler(w http.ResponseWriter, r *http.Request) { s, e := date.WindowForYesterday() e = e.Add(-1 * time.Second) opt := ReportOptions{} format := "list" if r.FormValue("csv") != "" { format = "csv" } c := appengine.Timeout(appengine.NewContext(r), 60*time.Second) // Default has a 5s timeout reportWriter(c, w, r, s, e, opt, "adsbclassb", format) }
func CronFetchHandler(w http.ResponseWriter, r *http.Request) { c := appengine.Timeout(appengine.NewContext(r), 30*time.Minute) q := datastore.NewQuery("Queue").KeysOnly() keys, _ := q.GetAll(c, nil) db := DB{c} start := time.Now() for _, v := range keys { db.FetchUser(strconv.FormatInt(v.IntID(), 10)) } finish := time.Since(start) c.Infof("Finished cron fetch, took %s", finish) fmt.Fprint(w, "fetching users") }
func UpdateFeeds(c mpg.Context, w http.ResponseWriter, r *http.Request) { q := datastore.NewQuery("F").KeysOnly().Filter("n <=", time.Now()) q = q.Limit(10 * 60 * 20) // 10/s queue, 20 min cron var keys []*datastore.Key it := q.Run(appengine.Timeout(c, time.Second*60)) for { k, err := it.Next(nil) if err == datastore.Done { break } else if err != nil { c.Errorf("next error: %v", err.Error()) break } keys = append(keys, k) } if len(keys) == 0 { c.Errorf("no results") return } c.Infof("updating %d feeds", len(keys)) var tasks []*taskqueue.Task for _, k := range keys { tasks = append(tasks, taskqueue.NewPOSTTask(routeUrl("update-feed"), url.Values{ "feed": {k.StringID()}, })) } var ts []*taskqueue.Task const taskLimit = 100 for len(tasks) > 0 { if len(tasks) > taskLimit { ts = tasks[:taskLimit] tasks = tasks[taskLimit:] } else { ts = tasks tasks = tasks[0:0] } if _, err := taskqueue.AddMulti(c, ts, "update-feed"); err != nil { c.Errorf("taskqueue error: %v", err.Error()) } } }
func runTestTQ( c appengine.Context, r *http.Request, ) { //u.Debugf(c, "payload: %q", len(payload)) c = appengine.Timeout(c, deadline) _, err := taskqueueAdd(c, &taskqueue.Task{ Payload: payload, Method: "PULL", }, "pull") if err != nil { u.Errorf(c, "%v", err) return } delayTestTQ.Call(c, "") return }
func (g *Goon) putMemcache(srcs []interface{}, exists []byte) error { items := make([]*memcache.Item, len(srcs)) payloadSize := 0 for i, src := range srcs { toSerialize := src if exists[i] == 0 { toSerialize = nil } data, err := serializeStruct(toSerialize) if err != nil { g.error(err) return err } key, _, err := g.getStructKey(src) if err != nil { return err } // payloadSize will overflow if we push 2+ gigs on a 32bit machine payloadSize += len(data) items[i] = &memcache.Item{ Key: memkey(key), Value: data, } } memcacheTimeout := MemcachePutTimeoutSmall if payloadSize >= MemcachePutTimeoutThreshold { memcacheTimeout = MemcachePutTimeoutLarge } errc := make(chan error) go func() { errc <- memcache.SetMulti(appengine.Timeout(g.Context, memcacheTimeout), items) }() g.putMemoryMulti(srcs, exists) err := <-errc if appengine.IsTimeoutError(err) { g.timeoutError(err) err = nil } else if err != nil { g.error(err) } return err }
func DeleteOldFeeds(c mpg.Context, w http.ResponseWriter, r *http.Request) { ctx := appengine.Timeout(c, time.Minute) gn := goon.FromContext(c) q := datastore.NewQuery(gn.Kind(&Feed{})).Filter("n=", timeMax).KeysOnly() if cur, err := datastore.DecodeCursor(r.FormValue("c")); err == nil { q = q.Start(cur) } it := q.Run(ctx) done := false var tasks []*taskqueue.Task for i := 0; i < 10000 && len(tasks) < 100; i++ { k, err := it.Next(nil) if err == datastore.Done { c.Criticalf("done") done = true break } else if err != nil { c.Errorf("err: %v", err) continue } values := make(url.Values) values.Add("f", k.StringID()) tasks = append(tasks, taskqueue.NewPOSTTask("/tasks/delete-old-feed", values)) } if len(tasks) > 0 { c.Errorf("deleting %v feeds", len(tasks)) if _, err := taskqueue.AddMulti(c, tasks, ""); err != nil { c.Errorf("err: %v", err) } } if !done { if cur, err := it.Cursor(); err == nil { values := make(url.Values) values.Add("c", cur.String()) taskqueue.Add(c, taskqueue.NewPOSTTask("/tasks/delete-old-feeds", values), "") } else { c.Errorf("err: %v", err) } } }
func DeleteOldFeed(c mpg.Context, w http.ResponseWriter, r *http.Request) { ctx := appengine.Timeout(c, time.Minute) g := goon.FromContext(ctx) oldDate := time.Now().Add(-time.Hour * 24 * 90) feed := Feed{Url: r.FormValue("f")} if err := g.Get(&feed); err != nil { c.Criticalf("err: %v", err) return } if feed.LastViewed.After(oldDate) { return } q := datastore.NewQuery(g.Kind(&Story{})).Ancestor(g.Key(&feed)).KeysOnly() keys, err := q.GetAll(ctx, nil) if err != nil { c.Criticalf("err: %v", err) return } q = datastore.NewQuery(g.Kind(&StoryContent{})).Ancestor(g.Key(&feed)).KeysOnly() sckeys, err := q.GetAll(ctx, nil) if err != nil { c.Criticalf("err: %v", err) return } keys = append(keys, sckeys...) c.Infof("delete: %v - %v", feed.Url, len(keys)) feed.NextUpdate = timeMax.Add(time.Hour) if _, err := g.Put(&feed); err != nil { c.Criticalf("put err: %v", err) } if len(keys) == 0 { return } err = g.DeleteMulti(keys) if err != nil { c.Criticalf("err: %v", err) } }
func DeleteBlobs(c mpg.Context, w http.ResponseWriter, r *http.Request) { ctx := appengine.Timeout(c, time.Minute) q := datastore.NewQuery("__BlobInfo__").KeysOnly() it := q.Run(ctx) wg := sync.WaitGroup{} something := false for _i := 0; _i < 20; _i++ { var bk []appengine.BlobKey for i := 0; i < 1000; i++ { k, err := it.Next(nil) if err == datastore.Done { break } else if err != nil { c.Errorf("err: %v", err) continue } bk = append(bk, appengine.BlobKey(k.StringID())) } if len(bk) == 0 { break } go func(bk []appengine.BlobKey) { something = true c.Errorf("deleteing %v blobs", len(bk)) err := blobstore.DeleteMulti(ctx, bk) if err != nil { c.Errorf("blobstore delete err: %v", err) } wg.Done() }(bk) wg.Add(1) } wg.Wait() if something { taskqueue.Add(c, taskqueue.NewPOSTTask("/tasks/delete-blobs", nil), "") } }
func nukeDatastore(w http.ResponseWriter, r *http.Request) { c := appengine.Timeout(appengine.NewContext(r), 1*time.Hour) kind := r.FormValue("kind") c.Debugf("starting query") q := datastore.NewQuery(kind).KeysOnly().Limit(-1) keys, err := q.GetAll(c, nil) if err != nil { c.Errorf("query failed: %s", err) return } c.Debugf("query finished %d", len(keys)) var wg sync.WaitGroup count := len(keys) batchSize := 500 for i := 0; i < count; i += batchSize { wg.Add(1) j := i + batchSize if j > count { j = count } c.Infof("will delete keys [%d:%d]", i, j) go func(i, j int) { defer wg.Done() c.Infof("starting delete [%d:%d]", i, j) if err := datastore.DeleteMulti(c, keys[i:j]); err != nil { c.Errorf("failed deleting keys [%d:%d]: %s", i, j, err) } else { c.Infof("deleted keys [%d:%d]", i, j) } }(i, j) } c.Debugf("waiting...") wg.Wait() w.Write([]byte(fmt.Sprintf("nuked %s", kind))) }
func zipHandler(w http.ResponseWriter, r *http.Request) { if r.FormValue("date") == "" { var params = map[string]interface{}{ "Yesterday": date.NowInPdt().AddDate(0, 0, -1), } if err := templates.ExecuteTemplate(w, "zip-report-form", params); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } return } ctx := appengine.Timeout(appengine.NewContext(r), 60*time.Second) // Default has a 5s timeout cdb := complaintdb.ComplaintDB{C: ctx, Memcache: false} zip := r.FormValue("zip") s, e, _ := widget.FormValueDateRange(r) var countsByHour [24]int countsByDate := map[string]int{} var uniquesByHour [24]map[string]int uniquesByDate := map[string]map[string]int{} uniquesAll := map[string]int{} iter := cdb.NewIter(cdb.QueryInSpanInZip(s, e, zip)) for { c, err := iter.NextWithErr() if err != nil { http.Error(w, fmt.Sprintf("Zip iterator failed: %v", err), http.StatusInternalServerError) return } else if c == nil { break // We've hit EOF } h := c.Timestamp.Hour() countsByHour[h]++ if uniquesByHour[h] == nil { uniquesByHour[h] = map[string]int{} } uniquesByHour[h][c.Profile.EmailAddress]++ d := c.Timestamp.Format("2006.01.02") countsByDate[d]++ if uniquesByDate[d] == nil { uniquesByDate[d] = map[string]int{} } uniquesByDate[d][c.Profile.EmailAddress]++ uniquesAll[c.Profile.EmailAddress]++ } dateKeys := []string{} for k, _ := range countsByDate { dateKeys = append(dateKeys, k) } sort.Strings(dateKeys) data := [][]string{} data = append(data, []string{"Date", "NumComplaints", "UniqueComplainers"}) for _, k := range dateKeys { data = append(data, []string{ k, fmt.Sprintf("%d", countsByDate[k]), fmt.Sprintf("%d", len(uniquesByDate[k])), }) } data = append(data, []string{"------"}) data = append(data, []string{"HourAcrossAllDays", "NumComplaints", "UniqueComplainers"}) for i, v := range countsByHour { data = append(data, []string{ fmt.Sprintf("%02d:00", i), fmt.Sprintf("%d", v), fmt.Sprintf("%d", len(uniquesByHour[i])), }) } data = append(data, []string{"------"}) data = append(data, []string{"UniqueComplainersAcrossAllDays", fmt.Sprintf("%d", len(uniquesAll))}) var params = map[string]interface{}{"Data": data} if err := templates.ExecuteTemplate(w, "report", params); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }
func personalReportHandler(w http.ResponseWriter, r *http.Request) { session := sessions.Get(r) if session.Values["email"] == nil { http.Error(w, "session was empty; no cookie ?", http.StatusInternalServerError) return } email := session.Values["email"].(string) if r.FormValue("date") == "" { var params = map[string]interface{}{ "Yesterday": date.NowInPdt().AddDate(0, 0, -1), } if err := templates.ExecuteTemplate(w, "personal-report-form", params); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } return } start, end, _ := widget.FormValueDateRange(r) ctx := appengine.Timeout(appengine.NewContext(r), 60*time.Second) cdb := complaintdb.ComplaintDB{C: ctx} w.Header().Set("Content-Type", "text/plain") // w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", "sc.txt")) fmt.Fprintf(w, "Personal disturbances report for <%s>:\n From [%s]\n To [%s]\n", email, start, end) complaintStrings := []string{} var countsByHour [24]int countsByDate := map[string]int{} countsByAirline := map[string]int{} iter := cdb.NewIter(cdb.QueryInSpanByEmailAddress(start, end, email)) n := 0 for { c := iter.Next() if c == nil { break } str := fmt.Sprintf("Time: %s, Loudness:%d, Speedbrakes:%v, Flight:%6.6s, Notes:%s", c.Timestamp.Format("2006.01.02 15:04:05"), c.Loudness, c.HeardSpeedbreaks, c.AircraftOverhead.FlightNumber, c.Description) n++ complaintStrings = append(complaintStrings, str) countsByHour[c.Timestamp.Hour()]++ countsByDate[c.Timestamp.Format("2006.01.02")]++ if airline := c.AircraftOverhead.IATAAirlineCode(); airline != "" { countsByAirline[airline]++ } } fmt.Fprintf(w, "\nTotal number of disturbance reports, over %d days: %d\n", len(countsByDate), n) fmt.Fprintf(w, "\nDisturbance reports, counted by Airline (where known):\n") for _, k := range keysByIntValDesc(countsByAirline) { fmt.Fprintf(w, " %s: % 4d\n", k, countsByAirline[k]) } fmt.Fprintf(w, "\nDisturbance reports, counted by date:\n") for _, k := range keysByKeyAsc(countsByDate) { fmt.Fprintf(w, " %s: % 4d\n", k, countsByDate[k]) } fmt.Fprintf(w, "\nDisturbance reports, counted by hour of day (across all dates):\n") for i, n := range countsByHour { fmt.Fprintf(w, " %02d: % 4d\n", i, n) } fmt.Fprintf(w, "\nFull dump of all disturbance reports:\n\n") for _, s := range complaintStrings { fmt.Fprint(w, s+"\n") } }
func monthTaskHandler(w http.ResponseWriter, r *http.Request) { //ctx,_ := context.WithTimeout(appengine.NewContext(r), 599*time.Second) ctx := appengine.NewContext(r) cdb := complaintdb.ComplaintDB{ //C: oldappengine.NewContext(r), C: oldappengine.Timeout(oldappengine.NewContext(r), 599*time.Second), } year, err := strconv.ParseInt(r.FormValue("year"), 10, 64) if err != nil { http.Error(w, "need arg 'year' (2015)", http.StatusInternalServerError) return } month, err := strconv.ParseInt(r.FormValue("month"), 10, 64) if err != nil { http.Error(w, "need arg 'month' (1-12)", http.StatusInternalServerError) return } now := date.NowInPdt() s := time.Date(int(year), time.Month(month), 1, 0, 0, 0, 0, now.Location()) e := s.AddDate(0, 1, 0).Add(-1 * time.Second) log.Infof(ctx, "Starting /be/month: %s", s) // One time, at 00:00, for each day of the given month days := date.IntermediateMidnights(s.Add(-1*time.Second), e) filename := s.Format("complaints-20060102") + e.Format("-20060102.csv") gcsHandle, err := gcs.OpenRW(ctx, "serfr0-reports", filename, "text/plain") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } csvWriter := csv.NewWriter(gcsHandle.IOWriter()) cols := []string{ "CallerCode", "Name", "Address", "Zip", "Email", "HomeLat", "HomeLong", "UnixEpoch", "Date", "Time(PDT)", "Notes", "ActivityDisturbed", "Flightnumber", "Notes", // Column names above are incorrect, but BKSV are used to them. // //"CallerCode", "Name", "Address", "Zip", "Email", "HomeLat", "HomeLong", //"UnixEpoch", "Date", "Time(PDT)", "Notes", "Flightnumber", //"ActivityDisturbed", "CcSFO", } csvWriter.Write(cols) for _, dayStart := range days { dayEnd := dayStart.AddDate(0, 0, 1).Add(-1 * time.Second) log.Infof(ctx, " /be/month: %s - %s", dayStart, dayEnd) iter := cdb.NewIter(cdb.QueryInSpan(dayStart, dayEnd)) for { c, err := iter.NextWithErr() if err != nil { http.Error(w, fmt.Sprintf("iterator failed: %v", err), http.StatusInternalServerError) return } if c == nil { break } r := []string{ c.Profile.CallerCode, c.Profile.FullName, c.Profile.Address, c.Profile.StructuredAddress.Zip, c.Profile.EmailAddress, fmt.Sprintf("%.4f", c.Profile.Lat), fmt.Sprintf("%.4f", c.Profile.Long), fmt.Sprintf("%d", c.Timestamp.UTC().Unix()), c.Timestamp.Format("2006/01/02"), c.Timestamp.Format("15:04:05"), c.Description, c.AircraftOverhead.FlightNumber, c.Activity, fmt.Sprintf("%v", c.Profile.CcSfo), } if err := csvWriter.Write(r); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } } csvWriter.Flush() if err := gcsHandle.Close(); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } log.Infof(ctx, "GCS report '%s' successfully written", filename) w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK!\nGCS file '%s' written to bucket", filename))) }
func ListFeeds(c mpg.Context, w http.ResponseWriter, r *http.Request) { cu := user.Current(c) gn := goon.FromContext(c) u := &User{Id: cu.ID} ud := &UserData{Id: "data", Parent: gn.Key(u)} if err := gn.GetMulti([]interface{}{u, ud}); err != nil && !goon.NotFound(err, 1) { serveError(w, err) return } l := &Log{ Parent: ud.Parent, Id: time.Now().UnixNano(), Text: "list feeds", } l.Text += fmt.Sprintf(", len opml %v", len(ud.Opml)) putU := false putUD := false fixRead := false if time.Since(u.Read) > oldDuration { u.Read = time.Now().Add(-oldDuration) putU = true fixRead = true l.Text += ", u.Read" } trialRemaining := 0 if STRIPE_KEY != "" && ud.Opml != nil { if u.Created.IsZero() { u.Created = time.Now() putU = true } else if time.Since(u.Created) > accountFreeDuration { b, _ := json.Marshal(struct { ErrorSubscription bool }{ true, }) w.Write(b) return } trialRemaining = int((accountFreeDuration-time.Since(u.Created))/time.Hour/24) + 1 } read := make(Read) var uf Opml c.Step("unmarshal user data", func(c mpg.Context) { gob.NewDecoder(bytes.NewReader(ud.Read)).Decode(&read) json.Unmarshal(ud.Opml, &uf) }) var feeds []*Feed opmlMap := make(map[string]*OpmlOutline) var merr error c.Step("fetch feeds", func(c mpg.Context) { gn := goon.FromContext(appengine.Timeout(c, time.Minute)) for _, outline := range uf.Outline { if outline.XmlUrl == "" { for _, so := range outline.Outline { feeds = append(feeds, &Feed{Url: so.XmlUrl}) opmlMap[so.XmlUrl] = so } } else { feeds = append(feeds, &Feed{Url: outline.XmlUrl}) opmlMap[outline.XmlUrl] = outline } } merr = gn.GetMulti(feeds) }) lock := sync.Mutex{} fl := make(map[string][]*Story) q := datastore.NewQuery(gn.Key(&Story{}).Kind()). Filter(IDX_COL+" >=", u.Read). KeysOnly(). Order("-" + IDX_COL). Limit(250) updatedLinks := false now := time.Now() numStories := 0 c.Step(fmt.Sprintf("feed unreads: %v", u.Read), func(c mpg.Context) { queue := make(chan *Feed) tc := make(chan *taskqueue.Task) done := make(chan bool) wg := sync.WaitGroup{} feedProc := func() { for f := range queue { c.Step(f.Title, func(c mpg.Context) { defer wg.Done() var stories []*Story gn := goon.FromContext(appengine.Timeout(c, time.Minute)) if !f.Date.Before(u.Read) { fk := gn.Key(f) sq := q.Ancestor(fk) keys, _ := gn.GetAll(sq, nil) stories = make([]*Story, len(keys)) for j, key := range keys { stories[j] = &Story{ Id: key.StringID(), Parent: fk, } } gn.GetMulti(stories) } if f.Link != opmlMap[f.Url].HtmlUrl { l.Text += fmt.Sprintf(", link: %v -> %v", opmlMap[f.Url].HtmlUrl, f.Link) updatedLinks = true opmlMap[f.Url].HtmlUrl = f.Link } manualDone := false if time.Since(f.LastViewed) > time.Hour*24*2 { if f.NextUpdate.Equal(timeMax) { tc <- taskqueue.NewPOSTTask(routeUrl("update-feed-manual"), url.Values{ "feed": {f.Url}, "last": {"1"}, }) manualDone = true } else { tc <- taskqueue.NewPOSTTask(routeUrl("update-feed-last"), url.Values{ "feed": {f.Url}, }) } } if !manualDone && now.Sub(f.NextUpdate) >= 0 { tc <- taskqueue.NewPOSTTask(routeUrl("update-feed-manual"), url.Values{ "feed": {f.Url}, }) } lock.Lock() fl[f.Url] = stories numStories += len(stories) lock.Unlock() }) } } go taskSender(c, "update-manual", tc, done) for i := 0; i < 20; i++ { go feedProc() } for i, f := range feeds { if goon.NotFound(merr, i) { continue } wg.Add(1) queue <- f } close(queue) // wait for feeds to complete so there are no more tasks to queue wg.Wait() // then finish enqueuing tasks close(tc) <-done }) if numStories > 0 { c.Step("numStories", func(c mpg.Context) { stories := make([]*Story, 0, numStories) for _, v := range fl { stories = append(stories, v...) } sort.Sort(sort.Reverse(Stories(stories))) if len(stories) > numStoriesLimit { stories = stories[:numStoriesLimit] fl = make(map[string][]*Story) for _, s := range stories { fk := s.Parent.StringID() p := fl[fk] fl[fk] = append(p, s) } } last := stories[len(stories)-1].Created if u.Read.Before(last) { u.Read = last putU = true fixRead = true } }) } if fixRead { c.Step("fix read", func(c mpg.Context) { nread := make(Read) for k, v := range fl { for _, s := range v { rs := readStory{Feed: k, Story: s.Id} if read[rs] { nread[rs] = true } } } if len(nread) != len(read) { read = nread var b bytes.Buffer gob.NewEncoder(&b).Encode(&read) ud.Read = b.Bytes() putUD = true l.Text += ", fix read" } }) } numStories = 0 for k, v := range fl { newStories := make([]*Story, 0, len(v)) for _, s := range v { if !read[readStory{Feed: k, Story: s.Id}] { newStories = append(newStories, s) } } numStories += len(newStories) fl[k] = newStories } if numStories == 0 { l.Text += ", clear read" fixRead = false if ud.Read != nil { putUD = true ud.Read = nil } last := u.Read for _, v := range feeds { if last.Before(v.Date) { last = v.Date } } c.Infof("nothing here, move up: %v -> %v", u.Read, last) if u.Read.Before(last) { putU = true u.Read = last } } if updatedLinks { backupOPML(c) if o, err := json.Marshal(&uf); err == nil { ud.Opml = o putUD = true l.Text += ", update links" } else { c.Errorf("json UL err: %v, %v", err, uf) } } if putU { gn.Put(u) l.Text += ", putU" } if putUD { gn.Put(ud) l.Text += ", putUD" } l.Text += fmt.Sprintf(", len opml %v", len(ud.Opml)) gn.Put(l) c.Step("json marshal", func(c mpg.Context) { gn := goon.FromContext(c) o := struct { Opml []*OpmlOutline Stories map[string][]*Story Options string TrialRemaining int Feeds []*Feed }{ Opml: uf.Outline, Stories: fl, Options: u.Options, TrialRemaining: trialRemaining, Feeds: feeds, } b, err := json.Marshal(o) if err != nil { c.Errorf("cleaning") for _, v := range fl { for _, s := range v { n := sanitizer.CleanNonUTF8(s.Summary) if n != s.Summary { s.Summary = n c.Errorf("cleaned %v", s.Id) gn.Put(s) } } } b, _ = json.Marshal(o) } w.Write(b) }) }
func (d *Driver) SetTimeout(to time.Duration) *Driver { d.ctx = appengine.Timeout(d.ctx, to) return d }
func report3Handler(w http.ResponseWriter, r *http.Request) { if r.FormValue("rep") == "" { var params = map[string]interface{}{ "Yesterday": date.NowInPdt().AddDate(0, 0, -1), "Reports": report.ListReports(), "FormUrl": "/report3/", "Waypoints": sfo.ListWaypoints(), } if err := templates.ExecuteTemplate(w, "report3-form", params); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } return } rep, err := report.SetupReport(r) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if r.FormValue("debug") != "" { str := fmt.Sprintf("Report Options\n\n%s\n", rep.Options) w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK\n\n%s\n", str))) return } //airframes := ref.NewAirframeCache(c) client := newurlfetch.Client(newappengine.NewContext(r)) metars, err := metar.FetchFromNOAA(client, "KSFO", rep.Start.AddDate(0, 0, -1), rep.End.AddDate(0, 0, 1)) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } v1idspecs := []string{} v2idspecs := []string{} v1idspecComplement := []string{} reportFunc := func(oldF *oldfdb.Flight) { newF, err := oldF.V2() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } else { newF.ComputeIndicatedAltitudes(metars) if included, err := rep.Process(newF); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } else if included { v1idspecs = append(v1idspecs, oldF.UniqueIdentifier()) v2idspecs = append(v2idspecs, newF.IdSpec()) } else { v1idspecComplement = append(v1idspecComplement, oldF.UniqueIdentifier()) } } } tags := tagList(rep.Options) db := oldfgae.FlightDB{C: oldappengine.Timeout(oldappengine.NewContext(r), 600*time.Second)} s, e := rep.Start, rep.End if err := db.LongIterWith(db.QueryTimeRangeByTags(tags, s, e), reportFunc); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } postButtons := ButtonPOST(fmt.Sprintf("%d Matches as a VectorMap", len(v1idspecs)), fmt.Sprintf("/fdb/trackset2?%s", rep.ToCGIArgs()), v1idspecs) postButtons += ButtonPOST(fmt.Sprintf("%d Non-matches as a VectorMap", len(v1idspecComplement)), fmt.Sprintf("/fdb/trackset2?%s", rep.ToCGIArgs()), v1idspecComplement) if rep.Name == "sfoclassb" { postButtons += ButtonPOST(fmt.Sprintf("%d Matches as ClassBApproaches", len(v1idspecs)), fmt.Sprintf("/fdb/approach2?%s", rep.ToCGIArgs()), v1idspecs) // This is kinda useless, as it isn't limited to SERFR1 things postButtons += ButtonPOST(fmt.Sprintf("%d Non-matches as ClassBApproaches", len(v1idspecComplement)), fmt.Sprintf("/fdb/approach2?%s", rep.ToCGIArgs()), v1idspecComplement) postButtons += ButtonPOST(fmt.Sprintf("%d Matches as ClassBApproaches (delta)", len(v1idspecs)), fmt.Sprintf("/fdb/approach2?%s&colorby=delta", rep.ToCGIArgs()), v1idspecs) // This is kinda useless, as it isn't limited to SERFR1 things postButtons += ButtonPOST(fmt.Sprintf("%d Non-matches as ClassBApproaches (delta)", len(v1idspecComplement)), fmt.Sprintf("/fdb/approach2?%s&colorby=delta", rep.ToCGIArgs()), v1idspecComplement) } var params = map[string]interface{}{ "R": rep, "Metadata": rep.MetadataTable(), "PostButtons": template.HTML(postButtons), "OptStr": template.HTML(fmt.Sprintf("<pre>%s</pre>\n", rep.Options)), "IdSpecs": template.HTML(strings.Join(v1idspecs, ",")), } if err := templates.ExecuteTemplate(w, "report3-results", params); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }
func CFixer(c mpg.Context, w http.ResponseWriter, r *http.Request) { q := datastore.NewQuery("F").KeysOnly() q = q.Limit(1000) cs := r.FormValue("c") if len(cs) > 0 { if cur, err := datastore.DecodeCursor(cs); err == nil { q = q.Start(cur) c.Infof("starting at %v", cur) } else { c.Errorf("cursor error %v", err.Error()) } } var keys []*datastore.Key it := q.Run(appengine.Timeout(c, time.Second*15)) for { k, err := it.Next(nil) if err == datastore.Done { break } else if err != nil { c.Errorf("next error: %v", err.Error()) break } keys = append(keys, k) } if len(keys) == 0 { c.Errorf("no results") return } else { cur, err := it.Cursor() if err != nil { c.Errorf("to cur error %v", err.Error()) } else { c.Infof("add with cur %v", cur) t := taskqueue.NewPOSTTask("/tasks/cfixer", url.Values{ "c": {cur.String()}, }) taskqueue.Add(c, t, "cfixer") } } c.Infof("fixing %d feeds", len(keys)) var tasks []*taskqueue.Task for _, k := range keys { c.Infof("f: %v", k.StringID()) tasks = append(tasks, taskqueue.NewPOSTTask("/tasks/cfix", url.Values{ "feed": {k.StringID()}, })) } var ts []*taskqueue.Task const taskLimit = 100 for len(tasks) > 0 { if len(tasks) > taskLimit { ts = tasks[:taskLimit] tasks = tasks[taskLimit:] } else { ts = tasks tasks = tasks[0:0] } if _, err := taskqueue.AddMulti(c, ts, "cfixer"); err != nil { c.Errorf("taskqueue error: %v", err.Error()) } } }
func UpdateFeeds(c mpg.Context, w http.ResponseWriter, r *http.Request) { q := datastore.NewQuery("F").KeysOnly().Filter("n <=", time.Now()) q = q.Limit(1000) cs := r.FormValue("c") hasCursor := false if len(cs) > 0 { if cur, err := datastore.DecodeCursor(cs); err == nil { q = q.Start(cur) hasCursor = true c.Infof("starting at %v", cur) } else { c.Errorf("cursor error %v", err.Error()) } } if !hasCursor && !isDevServer { qs, err := taskqueue.QueueStats(c, []string{"update-feed"}, 0) if err != nil || qs[0].Tasks > 0 || qs[0].Executed1Minute > 0 { c.Infof("already %v (%v) tasks", qs[0].Tasks, qs[0].Executed1Minute) return } } var keys []*datastore.Key it := q.Run(appengine.Timeout(c, time.Second*60)) for { k, err := it.Next(nil) if err == datastore.Done { break } else if err != nil { c.Errorf("next error: %v", err.Error()) break } keys = append(keys, k) } if len(keys) == 0 { c.Errorf("no results") return } else { } c.Infof("updating %d feeds", len(keys)) var tasks []*taskqueue.Task for _, k := range keys { tasks = append(tasks, taskqueue.NewPOSTTask(routeUrl("update-feed"), url.Values{ "feed": {k.StringID()}, })) } cur, err := it.Cursor() if err != nil { c.Errorf("to cur error %v", err.Error()) } else { c.Infof("add with cur %v", cur) tasks = append(tasks, taskqueue.NewPOSTTask(routeUrl("update-feeds"), url.Values{ "c": {cur.String()}, })) } var ts []*taskqueue.Task const taskLimit = 100 for len(tasks) > 0 { if len(tasks) > taskLimit { ts = tasks[:taskLimit] tasks = tasks[taskLimit:] } else { ts = tasks tasks = tasks[0:0] } if _, err := taskqueue.AddMulti(c, ts, "update-feed"); err != nil { c.Errorf("taskqueue error: %v", err.Error()) } } }
func Timeout(req *wcg.Request, t time.Duration) appengine.Context { ctx := NewContext(req) return appengine.Timeout(ctx, t) }
// GetMulti is a batch version of Get. // // dst must be a *[]S, *[]*S, *[]I, []S, []*S, or []I, for some struct type S, // or some interface type I. If *[]I or []I, each element must be a struct pointer. func (g *Goon) GetMulti(dst interface{}) error { keys, err := g.extractKeys(dst, false) // don't allow incomplete keys on a Get request if err != nil { return err } v := reflect.Indirect(reflect.ValueOf(dst)) if g.inTransaction { // todo: support getMultiLimit in transactions return datastore.GetMulti(g.Context, keys, v.Interface()) } var dskeys []*datastore.Key var dsdst []interface{} var dixs []int var memkeys []string var mixs []int g.cacheLock.RLock() for i, key := range keys { m := memkey(key) vi := v.Index(i) if vi.Kind() == reflect.Struct { vi = vi.Addr() } if s, present := g.cache[m]; present { if vi.Kind() == reflect.Interface { vi = vi.Elem() } reflect.Indirect(vi).Set(reflect.Indirect(reflect.ValueOf(s))) } else { memkeys = append(memkeys, m) mixs = append(mixs, i) dskeys = append(dskeys, key) dsdst = append(dsdst, vi.Interface()) dixs = append(dixs, i) } } g.cacheLock.RUnlock() if len(memkeys) == 0 { return nil } multiErr, any := make(appengine.MultiError, len(keys)), false memvalues, err := memcache.GetMulti(appengine.Timeout(g.Context, MemcacheGetTimeout), memkeys) if appengine.IsTimeoutError(err) { g.timeoutError(err) err = nil } else if err != nil { g.error(err) // timing out or another error from memcache isn't something to fail over, but do log it // No memvalues found, prepare the datastore fetch list already prepared above } else if len(memvalues) > 0 { // since memcache fetch was successful, reset the datastore fetch list and repopulate it dskeys = dskeys[:0] dsdst = dsdst[:0] dixs = dixs[:0] // we only want to check the returned map if there weren't any errors // unlike the datastore, memcache will return a smaller map with no error if some of the keys were missed for i, m := range memkeys { d := v.Index(mixs[i]).Interface() if v.Index(mixs[i]).Kind() == reflect.Struct { d = v.Index(mixs[i]).Addr().Interface() } if s, present := memvalues[m]; present { err := deserializeStruct(d, s.Value) if err == datastore.ErrNoSuchEntity { any = true // this flag tells GetMulti to return multiErr later multiErr[mixs[i]] = err } else if err != nil { g.error(err) return err } else { g.putMemory(d) } } else { dskeys = append(dskeys, keys[mixs[i]]) dsdst = append(dsdst, d) dixs = append(dixs, mixs[i]) } } if len(dskeys) == 0 { if any { return realError(multiErr) } return nil } } goroutines := (len(dskeys)-1)/getMultiLimit + 1 var wg sync.WaitGroup wg.Add(goroutines) for i := 0; i < goroutines; i++ { go func(i int) { defer wg.Done() var toCache []interface{} var exists []byte lo := i * getMultiLimit hi := (i + 1) * getMultiLimit if hi > len(dskeys) { hi = len(dskeys) } gmerr := datastore.GetMulti(g.Context, dskeys[lo:hi], dsdst[lo:hi]) if gmerr != nil { any = true // this flag tells GetMulti to return multiErr later merr, ok := gmerr.(appengine.MultiError) if !ok { g.error(gmerr) for j := lo; j < hi; j++ { multiErr[j] = gmerr } return } for i, idx := range dixs[lo:hi] { if merr[i] == nil { toCache = append(toCache, dsdst[lo+i]) exists = append(exists, 1) } else { if merr[i] == datastore.ErrNoSuchEntity { toCache = append(toCache, dsdst[lo+i]) exists = append(exists, 0) } multiErr[idx] = merr[i] } } } else { toCache = append(toCache, dsdst[lo:hi]...) exists = append(exists, bytes.Repeat([]byte{1}, hi-lo)...) } if len(toCache) > 0 { if err := g.putMemcache(toCache, exists); err != nil { g.error(err) // since putMemcache() gives no guarantee it will actually store the data in memcache // we log and swallow this error } } }(i) } wg.Wait() if any { return realError(multiErr) } return nil }