// pingDevicesAsync schedules len(endpoints) tasks of /ping-device. // d specifies the duration the tasker must wait before executing the task. // If scheduling fails for some endpoints, those will be in the returned values // along with a non-nil error. func pingDevicesAsync(c context.Context, uid string, endpoints []string, d time.Duration) ([]string, error) { if len(endpoints) == 0 { return nil, nil } p := path.Join(config.Prefix, "/task/ping-device") jobs := make([]*taskqueue.Task, 0, len(endpoints)) for _, endpoint := range endpoints { t := taskqueue.NewPOSTTask(p, url.Values{ "uid": {uid}, "endpoint": {endpoint}, }) t.Delay = d jobs = append(jobs, t) } _, err := taskqueue.AddMulti(c, jobs, "") merr, mok := err.(appengine.MultiError) if !mok { return nil, err } errEndpoints := make([]string, 0) for i, e := range merr { if e == nil { continue } errEndpoints = append(errEndpoints, endpoints[i]) } if len(errEndpoints) == 0 { return nil, nil } return errEndpoints, fmt.Errorf("pingDevicesAsync: %v", err) }
// Grab all users, and enqueue them for batch processing func upgradeHandler(w http.ResponseWriter, r *http.Request) { ctx := req2ctx(r) cdb := complaintdb.NewDB(ctx) var cps = []types.ComplainerProfile{} cps, err := cdb.GetAllProfiles() if err != nil { cdb.Errorf("upgradeHandler: getallprofiles: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } for _, cp := range cps { t := taskqueue.NewPOSTTask("/backend/cdb-batch-user", map[string][]string{ "email": {cp.EmailAddress}, }) if _, err := taskqueue.Add(cdb.Ctx(), t, "batch"); err != nil { cdb.Errorf("upgradeHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } cdb.Infof("enqueued %d batch", len(cps)) w.Write([]byte(fmt.Sprintf("OK, enqueued %d", len(cps)))) }
func TestTaskQueue(t *testing.T) { // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { t.Skip("APPENGINE_DEV_APPSERVER not set") } queueNames := []string{ "taskQueueName", } ctx, done, err := NewContextOptions(&Options{ TaskQueues: queueNames, }) queueNames = append(queueNames, "default") if err != nil { t.Fatalf("NewContext: %v", err) } defer done() for _, queueName := range queueNames { _, err = taskqueue.Add(ctx, taskqueue.NewPOSTTask("/worker", url.Values{ "key": {"value"}, }), queueName) if err != nil { t.Errorf("Unable to add task to queue - %v", err) } if stats, err := taskqueue.QueueStats(ctx, []string{queueName}); err != nil { t.Errorf("Unable to fetch queue statistics - %v", err) } else if len(stats) == 0 { t.Errorf("No stats found for the default taskqueue!") } else if stats[0].Tasks != 1 { t.Errorf("Wrong number of tasks found in queue, wanted 1, got %d", stats[0].Tasks) } } }
// Writes them all into a batch queue func publishAllComplaintsHandler(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) str := "" s, e, _ := widget.FormValueDateRange(r) days := date.IntermediateMidnights(s.Add(-1*time.Second), e) // decrement start, to include it url := "/backend/publish-complaints" for i, day := range days { dayStr := day.Format("2006.01.02") thisUrl := fmt.Sprintf("%s?datestring=%s", url, dayStr) if r.FormValue("skipload") != "" { thisUrl += "&skipload=" + r.FormValue("skipload") } t := taskqueue.NewPOSTTask(thisUrl, map[string][]string{}) // Give ourselves time to get all these tasks posted, and stagger them out a bit t.Delay = time.Minute + time.Duration(i)*15*time.Second if _, err := taskqueue.Add(ctx, t, "batch"); err != nil { log.Errorf(ctx, "publishAllComplaintsHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } str += " * posting for " + thisUrl + "\n" } w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK, enqueued %d\n--\n%s", len(days), str))) }
func notifyShardAsync(c context.Context, shard, changes string, all bool) error { p := path.Join(config.Prefix, "/task/notify-shard") t := taskqueue.NewPOSTTask(p, url.Values{ "shard": {shard}, "changes": {changes}, "all": {fmt.Sprintf("%v", all)}, }) _, err := taskqueue.Add(c, t, "") return err }
// pingUserAsync creates an async job to send a push notification to user devices. // sessions are session IDs used to compare against user bookmarks. // TODO: add ioext support func pingUserAsync(c context.Context, uid string, sessions []string, all bool) error { p := path.Join(config.Prefix, "/task/ping-user") t := taskqueue.NewPOSTTask(p, url.Values{ "uid": {uid}, "sessions": {strings.Join(sessions, " ")}, "all": {fmt.Sprintf("%v", all)}, }) _, err := taskqueue.Add(c, t, "") return err }
// pingExtPartyAsync notifies extra parties at config.ExtPingURL about data updates. func pingExtPartyAsync(c context.Context, key string) error { if key == "" || config.ExtPingURL == "" { return nil } p := path.Join(config.Prefix, "/task/ping-ext") t := taskqueue.NewPOSTTask(p, url.Values{ "key": {key}, }) _, err := taskqueue.Add(c, t, "") return err }
// This enqueues tasks for each individual day, or flight func batchFlightScanHandler(w http.ResponseWriter, r *http.Request) { c := appengine.NewContext(r) tags := []string{} //"ADSB"} // Maybe make this configurable ... n := 0 str := "" s, e, _ := widget.FormValueDateRange(r) job := r.FormValue("job") if job == "" { http.Error(w, "Missing argument: &job=foo", http.StatusInternalServerError) } days := date.IntermediateMidnights(s.Add(-1*time.Second), e) // decrement start, to include it for _, day := range days { // Get the keys for all the flights on this day. fdb := oldfgae.FlightDB{C: oldappengine.NewContext(r)} dStart, dEnd := date.WindowForTime(day) dEnd = dEnd.Add(-1 * time.Second) keys, err := fdb.KeysInTimeRangeByTags(tags, dStart, dEnd) if err != nil { log.Errorf(c, "upgradeHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } singleFlightUrl := "/backend/fdb-batch/flight" for _, key := range keys { str += fmt.Sprintf("Enqueing day=%s: %s?job=%s&key=%s\n", day.Format("2006.01.02"), singleFlightUrl, job, key.Encode()) t := taskqueue.NewPOSTTask(singleFlightUrl, map[string][]string{ "date": {day.Format("2006.01.02")}, "key": {key.Encode()}, "job": {job}, }) if _, err := taskqueue.Add(c, t, "batch"); err != nil { log.Errorf(c, "upgradeHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } n++ } } log.Infof(c, "enqueued %d batch items for '%s'", n, job) w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK, batch, enqueued %d tasks for %s\n%s", n, job, str))) }
// Dequeue a single day, and enqueue a job for each flight on that day func batchFlightDayHandler(w http.ResponseWriter, r *http.Request) { ctx := req2ctx(r) tags := []string{} //"ADSB"} // Maybe make this configurable ... n := 0 str := "" job := r.FormValue("job") if job == "" { http.Error(w, "Missing argument: &job=foo", http.StatusInternalServerError) } day := date.ArbitraryDatestring2MidnightPdt(r.FormValue("day"), "2006/01/02") fdb := oldfgae.NewDB(r) dStart, dEnd := date.WindowForTime(day) dEnd = dEnd.Add(-1 * time.Second) keys, err := fdb.KeysInTimeRangeByTags(tags, dStart, dEnd) if err != nil { log.Errorf(ctx, "upgradeHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } singleFlightUrl := "/backend/fdb-batch/flight" for _, key := range keys { str += fmt.Sprintf("Enqueing day=%s: %s?job=%s&key=%s\n", day.Format("2006.01.02"), singleFlightUrl, job, key.Encode()) if r.FormValue("dryrun") == "" { t := taskqueue.NewPOSTTask(singleFlightUrl, map[string][]string{ // "date": {day.Format("2006.01.02")}, "key": {key.Encode()}, "job": {job}, }) if _, err := taskqueue.Add(ctx, t, "batch"); err != nil { log.Errorf(ctx, "upgradeHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } n++ } log.Infof(ctx, "enqueued %d batch items for '%s'", n, job) w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK, batch, enqueued %d tasks for %s\n%s", n, job, str))) }
// PushTask to push a task into the queue. func (queue *PushQueue) PushTask(req *wcg.Request, urlPath string, form url.Values) error { var queueName string if req.IsTest() || lib.IsOnLocalGAE() { queueName = "default" } else { queueName = queue.Name } if _, err := taskqueue.Add(gae.NewContext(req), taskqueue.NewPOSTTask(urlPath, form), queueName); err != nil { req.Logger.Errorf("[Queue] Error adding a task (%s) into the queue (%q): %v", urlPath, queueName, err) return err } return nil }
// notifySubscriberAsync creates an async job to begin notify subscribers. func notifySubscribersAsync(c context.Context, d *dataChanges, all bool) error { changes, err := json.Marshal(d) if err != nil { return err } p := path.Join(config.Prefix, "/task/notify-subscribers") t := taskqueue.NewPOSTTask(p, url.Values{ "changes": {string(changes)}, "all": {fmt.Sprintf("%v", all)}, }) _, err = taskqueue.Add(c, t, "") return err }
func notifyUserAsync(c context.Context, uid, shard string, m *pushMessage) error { p := path.Join(config.Prefix, "/task/notify-user") msg, err := json.Marshal(m) if err != nil { return err } t := taskqueue.NewPOSTTask(p, url.Values{ "uid": {uid}, "shard": {shard}, "message": {string(msg)}, }) _, err = taskqueue.Add(c, t, "") return err }
// notifySubscriberAsync creates an async job to begin notify subscribers. func notifySubscribersAsync(c context.Context, d *dataChanges, all bool) error { skeys := make([]string, 0, len(d.Sessions)) for id, _ := range d.Sessions { skeys = append(skeys, id) } p := path.Join(config.Prefix, "/task/notify-subscribers") // TODO: add ioext to the payload t := taskqueue.NewPOSTTask(p, url.Values{ "sessions": {strings.Join(skeys, " ")}, "all": {fmt.Sprintf("%v", all)}, }) _, err := taskqueue.Add(c, t, "") return err }
func CallMinecraftTQ(c context.Context, minecraftKey *datastore.Key, operationID string) (*taskqueue.Task, error) { log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID) if minecraftKey == nil { return nil, errors.New("key is required") } if len(operationID) < 1 { return nil, errors.New("operationID is required") } t := taskqueue.NewPOSTTask("/tq/1/minecraft", url.Values{ "keyStr": {minecraftKey.Encode()}, "operationID": {operationID}, }) t.Delay = time.Second * 30 return taskqueue.Add(c, t, "minecraft") }
func (a *ServerTQApi) CallDeleteInstance(c context.Context, minecraftKey *datastore.Key, operationID string, latestSnapshot string) (*taskqueue.Task, error) { log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID) if minecraftKey == nil { return nil, errors.New("key is required") } if len(operationID) < 1 { return nil, errors.New("operationID is required") } t := taskqueue.NewPOSTTask("/tq/1/server/instance/delete", url.Values{ "keyStr": {minecraftKey.Encode()}, "operationID": {operationID}, "latestSnapshot": {latestSnapshot}, }) t.Delay = time.Second * 30 return taskqueue.Add(c, t, "minecraft") }
func callbackHandler(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) chi, err := strconv.ParseInt(os.Getenv("CHANNEL_ID"), 10, 64) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } chs := os.Getenv("CHANNEL_SECRET") mid := os.Getenv("MID") bot, err := linebot.NewClient(chi, chs, mid) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } received, err := bot.ParseRequest(r) if err != nil { if err == linebot.ErrInvalidSignature { w.WriteHeader(http.StatusBadRequest) } else { w.WriteHeader(http.StatusInternalServerError) } return } for _, result := range received.Results { content := result.Content() if content != nil && content.IsMessage && content.ContentType == linebot.ContentTypeText { text, _ := content.TextContent() log.Debugf(ctx, "id: %s, text: %s, from: %s, to: %v", content.ID, text.Text, text.From, text.To) values := url.Values{} values.Set("to", text.From) values.Set("text", text.Text) t := taskqueue.NewPOSTTask(TEXT_URI, values) _, err = taskqueue.Add(ctx, t, TEXT_MESSAGE_QUEUE) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } } } w.WriteHeader(http.StatusOK) }
func f(ctx context.Context) { err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { t := taskqueue.NewPOSTTask("/worker", url.Values{ // ... }) // Use the transaction's context when invoking taskqueue.Add. _, err := taskqueue.Add(ctx, t, "") if err != nil { // Handle error } // ... return nil }, nil) if err != nil { // Handle error } // ... }
// AddToSearchIndex adds an Item to the search index. // To current implementation uses task queues so this operation will // be executed in the background func AddToSearchIndex(con *data.Context, i data.Item) { // We'll update the search index next // FIRST: Store the HTML of the item in the memcache. // We do that because it is often larger than the maximum // task size allowed at the GAE. memI := &memcache.Item{ Key: i.DSKey, Value: []byte(i.HTMLforSearch), } if err := memcache.Set(con.C, memI); err != nil { con.Log.Infof("Error while storing the search HTML in the memcache for URL %v", i.URL) } // SECOND: Put the search index update task in the queue task := taskqueue.NewPOSTTask("/t/search/add_to_index", itemToSearchIndexTask(i)) if _, err := taskqueue.Add(con.C, task, "search-index"); err != nil { con.Log.Errorf("Error while triggering the add to index: %v", err) } }
// Enqueues one 'day' task per day in the range func batchFlightDateRangeHandler(w http.ResponseWriter, r *http.Request) { ctx := req2ctx(r) n := 0 str := "" s, e, _ := widget.FormValueDateRange(r) job := r.FormValue("job") if job == "" { http.Error(w, "Missing argument: &job=foo", http.StatusInternalServerError) return } str += fmt.Sprintf("** s: %s\n** e: %s\n", s, e) days := date.IntermediateMidnights(s.Add(-1*time.Second), e) // decrement start, to include it for _, day := range days { dayUrl := "/backend/fdb-batch/day" dayStr := day.Format("2006/01/02") str += fmt.Sprintf(" * adding %s, %s via %s\n", job, dayStr, dayUrl) if r.FormValue("dryrun") == "" { t := taskqueue.NewPOSTTask(dayUrl, map[string][]string{ "day": {dayStr}, "job": {job}, }) if _, err := taskqueue.Add(ctx, t, "batch"); err != nil { log.Errorf(ctx, "upgradeHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } n++ } log.Infof(ctx, "enqueued %d batch items for '%s'", n, job) w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK, batch, enqueued %d tasks for %s\n%s", n, job, str))) }
func handler(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) if name := r.FormValue("name"); name != "" { t := taskqueue.NewPOSTTask("/worker", map[string][]string{"name": {name}}) if _, err := taskqueue.Add(ctx, t, ""); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } q := datastore.NewQuery("Counter") var counters []Counter if _, err := q.GetAll(ctx, &counters); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if err := handlerTemplate.Execute(w, counters); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } // OK }
// Examine all users. If they had any complaints, throw them in the queue. func bksvScanYesterdayHandler(w http.ResponseWriter, r *http.Request) { ctx := req2ctx(r) cdb := complaintdb.NewDB(ctx) var cps = []types.ComplainerProfile{} cps, err := cdb.GetAllProfiles() if err != nil { cdb.Errorf(" /bksv/scan-yesterday: getallprofiles: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } start, end := date.WindowForYesterday() bksv_ok := 0 for _, cp := range cps { // if cp.CcSfo == false { continue } // We do not care about this value. var complaints = []types.Complaint{} complaints, err = cdb.GetComplaintsInSpanByEmailAddress(cp.EmailAddress, start, end) if err != nil { cdb.Errorf(" /bksv/scan-yesterday: getbyemail(%s): %v", cp.EmailAddress, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } if len(complaints) > 0 { t := taskqueue.NewPOSTTask("/bksv/submit-user", map[string][]string{ "user": {cp.EmailAddress}, }) if _, err := taskqueue.Add(cdb.Ctx(), t, "submitreports"); err != nil { cdb.Errorf(" /bksv/scan-yesterday: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } bksv_ok++ } } cdb.Infof("enqueued %d bksv", bksv_ok) w.Write([]byte(fmt.Sprintf("OK, enqueued %d", bksv_ok))) }
// NewTask creates a new taskqueue.Task for the entity with the correct // headers set to match those on the entity func (l *Locker) NewTask(key *datastore.Key, entity Lockable, path string, params url.Values) *taskqueue.Task { // prepare the lock entries lock := entity.getLock() lock.Timestamp = getTime() lock.RequestID = "" lock.Retries = 0 lock.Sequence++ json, _ := key.MarshalJSON() // set task headers so that we can retrieve the matching entity // and check that the executing task is the one we're expecting task := taskqueue.NewPOSTTask(path, params) task.Header.Set("X-Lock-Seq", strconv.Itoa(lock.Sequence)) task.Header.Set("X-Lock-Key", string(json)) if l.Host != "" { task.Header.Set("Host", l.Host) } return task }
func pushHandler(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) key := r.FormValue("key") // Create a task pointed at a backend. t := taskqueue.NewPOSTTask("/path/to/my/worker/", url.Values{ "key": {key}, }) host, err := appengine.ModuleHostname(ctx, "backend1", "", "") if err != nil { // Handle err } t.Header = http.Header{ "Host": {host}, } // Add the task to the default queue. if _, err := taskqueue.Add(ctx, t, ""); err != nil { // Handle err } }
func someRequest(w http.ResponseWriter, r *http.Request) { ctx := appengine.NewContext(r) // Perform asynchronous requests to update counter. // (missing error handling here.) t := taskqueue.NewPOSTTask("/_ah/counter", map[string][]string{ "counter_name": {"someRequest"}, }) taskqueue.Add(ctx, t, "") // temporarily use a new namespace { ctx, err := appengine.Namespace(ctx, "-global-") if err != nil { // ... handle err } taskqueue.Add(ctx, t, "") } io.WriteString(w, "Counters will be updated.\n") }
func counterHandler(c context.Context, r *http.Request, key *datastore.Key, entity locker.Lockable) error { counter := entity.(*Counter) log.Debugf(c, "process: %d", counter.Sequence) // simulate some processing work time.Sleep(time.Duration(1) * time.Second) if counter.Sequence == 5 { // simulate a duplicate task execution by creating one ourselves // needless to say, you wouldn't want to be doing this in practice // but it should demonstrate that the locker prevents spurious // task execution and guarantees the correct sequencing happens json, _ := key.MarshalJSON() t := taskqueue.NewPOSTTask("/process", nil) t.Header.Set("X-Lock-Seq", "6") t.Header.Set("X-Lock-Key", string(json)) taskqueue.Add(c, t, "") } if counter.Sequence < counter.Limit { return l.Schedule(c, key, counter, "/process", nil) } return l.Complete(c, key, counter) }
func (q AppengineTaskQueue) PostStatus(c context.Context, taskUrl string) error { task := taskqueue.NewPOSTTask(taskUrl, url.Values{}) _, err := taskqueue.Add(c, task, q.StatusQueueName) return err }
func (q AppengineTaskQueue) PostTask(c context.Context, taskUrl string, jsonParameters string) error { task := taskqueue.NewPOSTTask(taskUrl, url.Values{"json": []string{jsonParameters}}) _, err := taskqueue.Add(c, task, q.TaskQueueName) return err }
// Look for new flights that we should add to our database. Invoked by cron. func scanHandler(w http.ResponseWriter, r *http.Request) { ctx := req2ctx(r) client := req2client(r) if db, err1 := fdb24.NewFlightDBFr24(client); err1 != nil { log.Errorf(ctx, " /mdb/scan: newdb: %v", err1) http.Error(w, err1.Error(), http.StatusInternalServerError) } else { if flights, err2 := db.LookupList(sfo.KBoxSnarfingCatchment); err2 != nil { log.Errorf(ctx, " /mdb/scan: lookup: %v", err2) http.Error(w, err2.Error(), http.StatusInternalServerError) } else { set := ftype.FIFOSet{} if err3 := loadFIFOSet(ctx, &set); err3 != nil { log.Errorf(ctx, " /mdb/scan: loadcache: %v", err3) http.Error(w, err3.Error(), http.StatusInternalServerError) } new := set.FindNew(flights) if err4 := saveFIFOSet(ctx, set); err4 != nil { log.Errorf(ctx, " /mdb/scan: savecache: %v", err4) http.Error(w, err4.Error(), http.StatusInternalServerError) } // Enqueue the new flights n := 1000 for i, fs := range new { if i >= n { break } if fsStr, err5 := fs.Base64Encode(); err5 != nil { http.Error(w, err5.Error(), http.StatusInternalServerError) return } else { url := fmt.Sprintf("/fdb/addflight?deb=%s", fs.F.UniqueIdentifier()) t := taskqueue.NewPOSTTask(url, map[string][]string{ "flightsnapshot": {fsStr}, }) // We could be smarter about this. t.Delay = time.Minute * 60 if _, err6 := taskqueue.Add(ctx, t, "addflight"); err6 != nil { log.Errorf(ctx, " /mdb/scan: enqueue: %v", err6) http.Error(w, err6.Error(), http.StatusInternalServerError) return } } } var params = map[string]interface{}{ "New": new, "Flights": flights, } if err7 := templates.ExecuteTemplate(w, "fdb-scan", params); err7 != nil { http.Error(w, err7.Error(), http.StatusInternalServerError) } } } }
// Where is the version of this that does GCS via batch ? func monthHandler(w http.ResponseWriter, r *http.Request) { ctx := req2ctx(r) year, err := strconv.ParseInt(r.FormValue("year"), 10, 64) if err != nil { http.Error(w, "need arg 'year' (2015)", http.StatusInternalServerError) return } month, err := strconv.ParseInt(r.FormValue("month"), 10, 64) if err != nil { http.Error(w, "need arg 'month' (1-12)", http.StatusInternalServerError) return } day, err := strconv.ParseInt(r.FormValue("day"), 10, 64) if err != nil { // Presume we should enqueue this for batch taskUrl := fmt.Sprintf("/backend/monthdump?year=%d&month=%d", year, month) t := taskqueue.NewPOSTTask(taskUrl, map[string][]string{ "year": {r.FormValue("year")}, "month": {r.FormValue("month")}, }) if _, err := taskqueue.Add(ctx, t, "batch"); err != nil { log.Errorf(ctx, "monthHandler: enqueue: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/plain") w.Write([]byte(fmt.Sprintf("OK\nHave enqueued for batch {%s}\n", taskUrl))) return } num, err := strconv.ParseInt(r.FormValue("num"), 10, 64) if err != nil { http.Error(w, "need arg 'num' (31 - 'day')", http.StatusInternalServerError) return } now := date.NowInPdt() firstOfMonth := time.Date(int(year), time.Month(month), 1, 0, 0, 0, 0, now.Location()) s := firstOfMonth.AddDate(0, 0, int(day-1)) e := s.AddDate(0, 0, int(num)).Add(-1 * time.Second) log.Infof(ctx, "Yow: START : %s", s) log.Infof(ctx, "Yow: END : %s", e) cdb := complaintdb.NewDB(ctx) filename := s.Format("complaints-20060102") + e.Format("-20060102.csv") w.Header().Set("Content-Type", "application/csv") w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) cols := []string{ "CallerCode", "Name", "Address", "Zip", "Email", "HomeLat", "HomeLong", "UnixEpoch", "Date", "Time(PDT)", "Notes", "Flightnumber", "ActivityDisturbed", "AutoSubmit", } csvWriter := csv.NewWriter(w) csvWriter.Write(cols) iter := cdb.NewIter(cdb.QueryInSpan(s, e)) for { c, err := iter.NextWithErr() if err != nil { http.Error(w, fmt.Sprintf("Zip iterator failed: %v", err), http.StatusInternalServerError) return } else if c == nil { break // We've hit EOF } r := []string{ c.Profile.CallerCode, c.Profile.FullName, c.Profile.Address, c.Profile.StructuredAddress.Zip, c.Profile.EmailAddress, fmt.Sprintf("%.4f", c.Profile.Lat), fmt.Sprintf("%.4f", c.Profile.Long), fmt.Sprintf("%d", c.Timestamp.UTC().Unix()), c.Timestamp.Format("2006/01/02"), c.Timestamp.Format("15:04:05"), c.Description, c.AircraftOverhead.FlightNumber, c.Activity, fmt.Sprintf("%v", c.Profile.CcSfo), } //r = []string{c.Timestamp.Format("15:04:05")} if err := csvWriter.Write(r); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } csvWriter.Flush() }