示例#1
0
func handlerOCNReceiver(w http.ResponseWriter, r *http.Request) {
	ctx := appengine.NewContext(r)

	for k, v := range r.Header {
		log.Infof(ctx, "%s:%s", k, v)
	}

	body, err := ioutil.ReadAll(r.Body)
	if err != nil {
		log.Errorf(ctx, "ERROR request body read: %s", err)
		log.Errorf(ctx, "ERROR task queue add: %s", err)
		w.WriteHeader(500)
		return
	}
	log.Infof(ctx, string(body))

	if r.Header.Get("X-Goog-Resource-State") == "sync" {
		w.WriteHeader(200)
		return
	}

	var m OCNMessage
	err = json.NewDecoder(bytes.NewReader(body)).Decode(&m)
	if err != nil {
		log.Errorf(ctx, "ERROR json decode: %s", err)
		log.Errorf(ctx, "ERROR task queue add: %s", err)
		w.WriteHeader(500)
		return
	}

	if r.Header.Get("X-Goog-Resource-State") == "exists" {
		t := &taskqueue.Task{
			Payload: body,
			Method:  "PULL",
		}
		_, err = taskqueue.Add(ctx, t, "pull-queue")
		if err != nil {
			log.Errorf(ctx, "ERROR pull-queue task add: %s", err)
			w.WriteHeader(500)
			return
		}

		_, err = taskqueue.Add(ctx,
			&taskqueue.Task{
				Path: "/api/1/gcemanager",
			},
			"gce-manager")
		if err != nil {
			log.Errorf(ctx, "ERROR gce-manager task add: %s", err)
			w.WriteHeader(500)
			return
		}
	}

	w.WriteHeader(200)
	w.Write([]byte("done!"))
}
示例#2
0
文件: tada.go 项目: catamorphism/tada
// Adds a reminder with the given text and due date to the pull queue.
// A reminder will be sent half an hour before the due date
func addReminder(ctx context.Context, item TodoItem) *MaybeError {
	maybeBlob := itemToJson(item)
	switch (*maybeBlob).(type) {
	case Blob:
		{
			item1 := ([]byte)((*maybeBlob).(Blob))
			t := &taskqueue.Task{
				Payload: []byte(item1),
				Method:  "PULL",
			}
			_, err := taskqueue.Add(ctx, t, "reminders")
			if err != nil {
				var result = new(MaybeError)
				*result = E(err.Error())
				return result
			}
		}
	case E:
		{
			return maybeBlob
		}
	case TodoItem, Matches, TodoID:
		{
			var result = new(MaybeError)
			*result = E("strange result from JSON encoder")
			return result
		}
	}
	var result = new(MaybeError)
	*result = Ok{}
	return result
}
示例#3
0
// Writes them all into a batch queue
func publishAllComplaintsHandler(w http.ResponseWriter, r *http.Request) {
	ctx := appengine.NewContext(r)
	str := ""

	s, e, _ := widget.FormValueDateRange(r)
	days := date.IntermediateMidnights(s.Add(-1*time.Second), e) // decrement start, to include it
	url := "/backend/publish-complaints"

	for i, day := range days {
		dayStr := day.Format("2006.01.02")

		thisUrl := fmt.Sprintf("%s?datestring=%s", url, dayStr)
		if r.FormValue("skipload") != "" {
			thisUrl += "&skipload=" + r.FormValue("skipload")
		}

		t := taskqueue.NewPOSTTask(thisUrl, map[string][]string{})
		// Give ourselves time to get all these tasks posted, and stagger them out a bit
		t.Delay = time.Minute + time.Duration(i)*15*time.Second

		if _, err := taskqueue.Add(ctx, t, "batch"); err != nil {
			log.Errorf(ctx, "publishAllComplaintsHandler: enqueue: %v", err)
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}

		str += " * posting for " + thisUrl + "\n"
	}

	w.Header().Set("Content-Type", "text/plain")
	w.Write([]byte(fmt.Sprintf("OK, enqueued %d\n--\n%s", len(days), str)))
}
示例#4
0
// Grab all users, and enqueue them for batch processing
func upgradeHandler(w http.ResponseWriter, r *http.Request) {
	ctx := req2ctx(r)
	cdb := complaintdb.NewDB(ctx)

	var cps = []types.ComplainerProfile{}
	cps, err := cdb.GetAllProfiles()
	if err != nil {
		cdb.Errorf("upgradeHandler: getallprofiles: %v", err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	for _, cp := range cps {
		t := taskqueue.NewPOSTTask("/backend/cdb-batch-user", map[string][]string{
			"email": {cp.EmailAddress},
		})
		if _, err := taskqueue.Add(cdb.Ctx(), t, "batch"); err != nil {
			cdb.Errorf("upgradeHandler: enqueue: %v", err)
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
	}
	cdb.Infof("enqueued %d batch", len(cps))
	w.Write([]byte(fmt.Sprintf("OK, enqueued %d", len(cps))))
}
示例#5
0
// Schedule schedules a task with lock
func (l *Locker) Schedule(c context.Context, key *datastore.Key, entity Lockable, path string, params url.Values) error {
	task := l.NewTask(key, entity, path, params)

	// Use same queue that we started on if defined, otherwise use configured default
	queue, ok := QueueFromContext(c)
	if !ok {
		queue = l.DefaultQueue
	}

	// write the datastore entity and schedule the task within a
	// transaction to guarantees that both happen and the entity
	// will be committed to the datastore when the task executes but
	// the task won't be scheduled if our entity update fails
	err := storage.RunInTransaction(c, func(tc context.Context) error {
		// TODO: check if entity already exists and handle accordingly
		// don't overwrite if already locked for processing
		if _, err := storage.Put(tc, key, entity); err != nil {
			return err
		}
		if _, err := taskqueue.Add(tc, task, queue); err != nil {
			return err
		}
		return nil
	}, &datastore.TransactionOptions{XG: false, Attempts: 3})

	return err
}
func (n *namespace) update(c context.Context, mapper *mapper, key *datastore.Key) error {
	queue, ok := locker.QueueFromContext(c)
	if !ok {
		queue = mapper.config.DefaultQueue
	}

	// update namespace status within a transaction
	return storage.RunInTransaction(c, func(tc context.Context) error {
		fresh := new(namespace)
		if err := storage.Get(tc, key, fresh); err != nil {
			return err
		}

		// shards can already be processing ahead of this total being written
		fresh.ShardsTotal = n.ShardsTotal

		// if all shards have completed, schedule namespace/completed to update job
		if fresh.ShardsSuccessful == fresh.ShardsTotal {
			t := mapper.locker.NewTask(key, fresh, mapper.config.Path+namespaceCompleteURL, nil)
			if _, err := taskqueue.Add(tc, t, queue); err != nil {
				log.Errorf(c, "add task %s", err.Error())
				return err
			}
		}

		if _, err := storage.Put(tc, key, fresh); err != nil {
			return err
		}

		return nil
	}, &datastore.TransactionOptions{XG: true})
}
示例#7
0
func TestTaskQueue(t *testing.T) {
	// Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
	if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
		t.Skip("APPENGINE_DEV_APPSERVER not set")
	}
	queueNames := []string{
		"taskQueueName",
	}
	ctx, done, err := NewContextOptions(&Options{
		TaskQueues: queueNames,
	})
	queueNames = append(queueNames, "default")
	if err != nil {
		t.Fatalf("NewContext: %v", err)
	}
	defer done()

	for _, queueName := range queueNames {
		_, err = taskqueue.Add(ctx, taskqueue.NewPOSTTask("/worker", url.Values{
			"key": {"value"},
		}), queueName)
		if err != nil {
			t.Errorf("Unable to add task to queue - %v", err)
		}
		if stats, err := taskqueue.QueueStats(ctx, []string{queueName}); err != nil {
			t.Errorf("Unable to fetch queue statistics - %v", err)
		} else if len(stats) == 0 {
			t.Errorf("No stats found for the default taskqueue!")
		} else if stats[0].Tasks != 1 {
			t.Errorf("Wrong number of tasks found in queue, wanted 1, got %d", stats[0].Tasks)
		}
	}
}
示例#8
0
func notifyShardAsync(c context.Context, shard, changes string, all bool) error {
	p := path.Join(config.Prefix, "/task/notify-shard")
	t := taskqueue.NewPOSTTask(p, url.Values{
		"shard":   {shard},
		"changes": {changes},
		"all":     {fmt.Sprintf("%v", all)},
	})
	_, err := taskqueue.Add(c, t, "")
	return err
}
示例#9
0
// pingUserAsync creates an async job to send a push notification to user devices.
// sessions are session IDs used to compare against user bookmarks.
// TODO: add ioext support
func pingUserAsync(c context.Context, uid string, sessions []string, all bool) error {
	p := path.Join(config.Prefix, "/task/ping-user")
	t := taskqueue.NewPOSTTask(p, url.Values{
		"uid":      {uid},
		"sessions": {strings.Join(sessions, " ")},
		"all":      {fmt.Sprintf("%v", all)},
	})
	_, err := taskqueue.Add(c, t, "")
	return err
}
示例#10
0
// pingExtPartyAsync notifies extra parties at config.ExtPingURL about data updates.
func pingExtPartyAsync(c context.Context, key string) error {
	if key == "" || config.ExtPingURL == "" {
		return nil
	}
	p := path.Join(config.Prefix, "/task/ping-ext")
	t := taskqueue.NewPOSTTask(p, url.Values{
		"key": {key},
	})
	_, err := taskqueue.Add(c, t, "")
	return err
}
示例#11
0
// This enqueues tasks for each individual day, or flight
func batchFlightScanHandler(w http.ResponseWriter, r *http.Request) {
	c := appengine.NewContext(r)

	tags := []string{} //"ADSB"} // Maybe make this configurable ...

	n := 0
	str := ""
	s, e, _ := widget.FormValueDateRange(r)
	job := r.FormValue("job")
	if job == "" {
		http.Error(w, "Missing argument: &job=foo", http.StatusInternalServerError)
	}

	days := date.IntermediateMidnights(s.Add(-1*time.Second), e) // decrement start, to include it
	for _, day := range days {
		// Get the keys for all the flights on this day.
		fdb := oldfgae.FlightDB{C: oldappengine.NewContext(r)}

		dStart, dEnd := date.WindowForTime(day)
		dEnd = dEnd.Add(-1 * time.Second)
		keys, err := fdb.KeysInTimeRangeByTags(tags, dStart, dEnd)
		if err != nil {
			log.Errorf(c, "upgradeHandler: enqueue: %v", err)
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}

		singleFlightUrl := "/backend/fdb-batch/flight"
		for _, key := range keys {
			str += fmt.Sprintf("Enqueing day=%s: %s?job=%s&key=%s\n",
				day.Format("2006.01.02"), singleFlightUrl, job, key.Encode())

			t := taskqueue.NewPOSTTask(singleFlightUrl, map[string][]string{
				"date": {day.Format("2006.01.02")},
				"key":  {key.Encode()},
				"job":  {job},
			})

			if _, err := taskqueue.Add(c, t, "batch"); err != nil {
				log.Errorf(c, "upgradeHandler: enqueue: %v", err)
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}

			n++
		}
	}

	log.Infof(c, "enqueued %d batch items for '%s'", n, job)

	w.Header().Set("Content-Type", "text/plain")
	w.Write([]byte(fmt.Sprintf("OK, batch, enqueued %d tasks for %s\n%s", n, job, str)))
}
示例#12
0
func addTaskHandler(w http.ResponseWriter, r *http.Request) {
	ctx := appengine.NewContext(r)

	// [START adding_tasks_to_a_pull_queue]
	t := &taskqueue.Task{
		Payload: []byte("hello world"),
		Method:  "PULL",
	}
	_, err := taskqueue.Add(ctx, t, "pull-queue")
	// [END adding_tasks_to_a_pull_queue]
	_ = err

	// [START leasing_tasks_1]
	tasks, err := taskqueue.Lease(ctx, 100, "pull-queue", 3600)
	// [END leasing_tasks_1]

	// [START leasing_tasks_2]
	_, err = taskqueue.Add(ctx, &taskqueue.Task{
		Payload: []byte("parse"), Method: "PULL", Tag: "parse",
	}, "pull-queue")
	_, err = taskqueue.Add(ctx, &taskqueue.Task{
		Payload: []byte("render"), Method: "PULL", Tag: "render",
	}, "pull-queue")

	// leases render tasks, but not parse
	tasks, err = taskqueue.LeaseByTag(ctx, 100, "pull-queue", 3600, "render")

	// Leases up to 100 tasks that have same tag.
	// Tag is that of "oldest" task by ETA.
	tasks, err = taskqueue.LeaseByTag(ctx, 100, "pull-queue", 3600, "")
	// [END leasing_tasks_2]

	// [START deleting_tasks_1]
	tasks, err = taskqueue.Lease(ctx, 100, "pull-queue", 3600)
	// Perform some work with the tasks here

	taskqueue.DeleteMulti(ctx, tasks, "pull-queue")
	// [END deleting_tasks_1]

}
示例#13
0
// Dequeue a single day, and enqueue a job for each flight on that day
func batchFlightDayHandler(w http.ResponseWriter, r *http.Request) {
	ctx := req2ctx(r)

	tags := []string{} //"ADSB"} // Maybe make this configurable ...

	n := 0
	str := ""
	job := r.FormValue("job")
	if job == "" {
		http.Error(w, "Missing argument: &job=foo", http.StatusInternalServerError)
	}

	day := date.ArbitraryDatestring2MidnightPdt(r.FormValue("day"), "2006/01/02")

	fdb := oldfgae.NewDB(r)

	dStart, dEnd := date.WindowForTime(day)
	dEnd = dEnd.Add(-1 * time.Second)
	keys, err := fdb.KeysInTimeRangeByTags(tags, dStart, dEnd)
	if err != nil {
		log.Errorf(ctx, "upgradeHandler: enqueue: %v", err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	singleFlightUrl := "/backend/fdb-batch/flight"
	for _, key := range keys {
		str += fmt.Sprintf("Enqueing day=%s: %s?job=%s&key=%s\n",
			day.Format("2006.01.02"), singleFlightUrl, job, key.Encode())

		if r.FormValue("dryrun") == "" {
			t := taskqueue.NewPOSTTask(singleFlightUrl, map[string][]string{
				// "date": {day.Format("2006.01.02")},
				"key": {key.Encode()},
				"job": {job},
			})

			if _, err := taskqueue.Add(ctx, t, "batch"); err != nil {
				log.Errorf(ctx, "upgradeHandler: enqueue: %v", err)
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
		}

		n++
	}

	log.Infof(ctx, "enqueued %d batch items for '%s'", n, job)

	w.Header().Set("Content-Type", "text/plain")
	w.Write([]byte(fmt.Sprintf("OK, batch, enqueued %d tasks for %s\n%s", n, job, str)))
}
示例#14
0
// notifySubscriberAsync creates an async job to begin notify subscribers.
func notifySubscribersAsync(c context.Context, d *dataChanges, all bool) error {
	changes, err := json.Marshal(d)
	if err != nil {
		return err
	}
	p := path.Join(config.Prefix, "/task/notify-subscribers")
	t := taskqueue.NewPOSTTask(p, url.Values{
		"changes": {string(changes)},
		"all":     {fmt.Sprintf("%v", all)},
	})
	_, err = taskqueue.Add(c, t, "")
	return err
}
示例#15
0
func someRequest(w http.ResponseWriter, r *http.Request) {
	ctx := appengine.NewContext(r)

	// Perform asynchronous requests to update counter.
	// (missing error handling here.)
	t := taskqueue.NewPOSTTask("/_ah/counter", map[string][]string{
		"counter_name": {"someRequest"},
	})

	taskqueue.Add(ctx, t, "")

	// temporarily use a new namespace
	{
		ctx, err := appengine.Namespace(ctx, "-global-")
		if err != nil {
			// ... handle err
		}
		taskqueue.Add(ctx, t, "")
	}

	io.WriteString(w, "Counters will be updated.\n")
}
示例#16
0
// PushTask to push a task into the queue.
func (queue *PushQueue) PushTask(req *wcg.Request, urlPath string, form url.Values) error {
	var queueName string
	if req.IsTest() || lib.IsOnLocalGAE() {
		queueName = "default"
	} else {
		queueName = queue.Name
	}
	if _, err := taskqueue.Add(gae.NewContext(req), taskqueue.NewPOSTTask(urlPath, form), queueName); err != nil {
		req.Logger.Errorf("[Queue] Error adding a task (%s) into the queue (%q): %v", urlPath, queueName, err)
		return err
	}
	return nil
}
示例#17
0
func example() {
	var ctx context.Context
	// [START transactional_task_enqueuing]
	datastore.RunInTransaction(ctx, func(ctx context.Context) error {
		t := &taskqueue.Task{Path: "/path/to/worker"}
		if _, err := taskqueue.Add(ctx, t, ""); err != nil {
			return err
		}
		// ...
		return nil
	}, nil)
	// [END transactional_task_enqueuing]
}
示例#18
0
// notifySubscriberAsync creates an async job to begin notify subscribers.
func notifySubscribersAsync(c context.Context, d *dataChanges, all bool) error {
	skeys := make([]string, 0, len(d.Sessions))
	for id, _ := range d.Sessions {
		skeys = append(skeys, id)
	}
	p := path.Join(config.Prefix, "/task/notify-subscribers")
	// TODO: add ioext to the payload
	t := taskqueue.NewPOSTTask(p, url.Values{
		"sessions": {strings.Join(skeys, " ")},
		"all":      {fmt.Sprintf("%v", all)},
	})
	_, err := taskqueue.Add(c, t, "")
	return err
}
示例#19
0
// submitSurveyAsync schedules an async job to submit feedback survey s for session sid.
func submitSurveyAsync(c context.Context, sid string, s *sessionSurvey) error {
	payload, err := json.Marshal(s)
	if err != nil {
		return err
	}
	t := &taskqueue.Task{
		Path:    path.Join(config.Prefix, "/task/survey", sid),
		Payload: payload,
		Header:  http.Header{"Content-Type": {"application/json"}},
		Method:  "POST",
	}
	_, err = taskqueue.Add(c, t, "")
	return err
}
示例#20
0
func notifyUserAsync(c context.Context, uid, shard string, m *pushMessage) error {
	p := path.Join(config.Prefix, "/task/notify-user")
	msg, err := json.Marshal(m)
	if err != nil {
		return err
	}
	t := taskqueue.NewPOSTTask(p, url.Values{
		"uid":     {uid},
		"shard":   {shard},
		"message": {string(msg)},
	})
	_, err = taskqueue.Add(c, t, "")
	return err
}
示例#21
0
// scheduleRangeSync schedule a new run of a key range sync using appengine/taskqueue.
func scheduleRangeSync(c context.Context, w http.ResponseWriter, start, end *datastore.Key, proj, dataset, exclude, queue string) error {
	queue = strings.Trim(queue, " \n\t")
	path := "/bq/sync/range?startKey=%s&endKey=%s&project=%s&dataset=%s&exclude=%s&queue=%s"
	url := fmt.Sprintf(path, encodeKey(start), encodeKey(end), proj, dataset, exclude, queue)
	t := &taskqueue.Task{
		Path:   url,
		Method: "GET",
	}
	t, err := taskqueue.Add(c, t, queue)
	if err != nil {
		return err
	}
	infof(c, w, "Schedule range [%s,%s]\n", start, end)
	return nil
}
示例#22
0
func (it *iterator) completed(c context.Context, mapper *mapper, key *datastore.Key) error {
	// mark iterator as complete
	it.complete()

	// update iterator status and job within a transaction
	queue, ok := locker.QueueFromContext(c)
	if !ok {
		queue = mapper.config.DefaultQueue
	}
	fresh := new(iterator)
	jobKey := it.jobKey(c, *mapper.config)
	job := new(job)

	return storage.RunInTransaction(c, func(tc context.Context) error {
		keys := []*datastore.Key{key, jobKey}
		vals := []interface{}{fresh, job}
		if err := storage.GetMulti(tc, keys, vals); err != nil {
			return err
		}

		if job.Abort {
			return nil
		}

		fresh.copyFrom(*it)
		fresh.Lock.Complete()

		job.NamespacesTotal += int(it.Count)
		job.Iterating = false

		// only the iterator walltime is rolled up into the job counts
		job.WallTime += it.WallTime

		// it's unlikely (but possible) that the shards and namespaces completed
		// before this task so handle case that job is now also fully complete
		if job.NamespacesSuccessful == job.NamespacesTotal && !job.Iterating {
			t := mapper.locker.NewTask(jobKey, job, mapper.config.Path+jobCompleteURL, nil)
			if _, err := taskqueue.Add(tc, t, queue); err != nil {
				return err
			}
		}

		if _, err := storage.PutMulti(tc, keys, vals); err != nil {
			return err
		}
		return nil
	}, &datastore.TransactionOptions{XG: true})
}
示例#23
0
func CallMinecraftTQ(c context.Context, minecraftKey *datastore.Key, operationID string) (*taskqueue.Task, error) {
	log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID)
	if minecraftKey == nil {
		return nil, errors.New("key is required")
	}
	if len(operationID) < 1 {
		return nil, errors.New("operationID is required")
	}

	t := taskqueue.NewPOSTTask("/tq/1/minecraft", url.Values{
		"keyStr":      {minecraftKey.Encode()},
		"operationID": {operationID},
	})
	t.Delay = time.Second * 30
	return taskqueue.Add(c, t, "minecraft")
}
示例#24
0
func (a *ServerTQApi) CallDeleteInstance(c context.Context, minecraftKey *datastore.Key, operationID string, latestSnapshot string) (*taskqueue.Task, error) {
	log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID)
	if minecraftKey == nil {
		return nil, errors.New("key is required")
	}
	if len(operationID) < 1 {
		return nil, errors.New("operationID is required")
	}

	t := taskqueue.NewPOSTTask("/tq/1/server/instance/delete", url.Values{
		"keyStr":         {minecraftKey.Encode()},
		"operationID":    {operationID},
		"latestSnapshot": {latestSnapshot},
	})
	t.Delay = time.Second * 30
	return taskqueue.Add(c, t, "minecraft")
}
示例#25
0
func callbackHandler(w http.ResponseWriter, r *http.Request) {

	ctx := appengine.NewContext(r)

	chi, err := strconv.ParseInt(os.Getenv("CHANNEL_ID"), 10, 64)
	if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	chs := os.Getenv("CHANNEL_SECRET")
	mid := os.Getenv("MID")

	bot, err := linebot.NewClient(chi, chs, mid)
	if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	received, err := bot.ParseRequest(r)
	if err != nil {
		if err == linebot.ErrInvalidSignature {
			w.WriteHeader(http.StatusBadRequest)
		} else {
			w.WriteHeader(http.StatusInternalServerError)
		}
		return
	}
	for _, result := range received.Results {
		content := result.Content()
		if content != nil && content.IsMessage && content.ContentType == linebot.ContentTypeText {
			text, _ := content.TextContent()
			log.Debugf(ctx, "id: %s, text: %s, from: %s, to: %v", content.ID, text.Text, text.From, text.To)

			values := url.Values{}
			values.Set("to", text.From)
			values.Set("text", text.Text)
			t := taskqueue.NewPOSTTask(TEXT_URI, values)
			_, err = taskqueue.Add(ctx, t, TEXT_MESSAGE_QUEUE)
			if err != nil {
				w.WriteHeader(http.StatusInternalServerError)
				return
			}
		}
	}
	w.WriteHeader(http.StatusOK)
}
func f(ctx context.Context) {
	err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
		t := taskqueue.NewPOSTTask("/worker", url.Values{
		// ...
		})
		// Use the transaction's context when invoking taskqueue.Add.
		_, err := taskqueue.Add(ctx, t, "")
		if err != nil {
			// Handle error
		}
		// ...
		return nil
	}, nil)
	if err != nil {
		// Handle error
	}
	// ...
}
示例#27
0
// Enqueues one 'day' task per day in the range
func batchFlightDateRangeHandler(w http.ResponseWriter, r *http.Request) {
	ctx := req2ctx(r)

	n := 0
	str := ""
	s, e, _ := widget.FormValueDateRange(r)
	job := r.FormValue("job")
	if job == "" {
		http.Error(w, "Missing argument: &job=foo", http.StatusInternalServerError)
		return
	}

	str += fmt.Sprintf("** s: %s\n** e: %s\n", s, e)

	days := date.IntermediateMidnights(s.Add(-1*time.Second), e) // decrement start, to include it
	for _, day := range days {

		dayUrl := "/backend/fdb-batch/day"
		dayStr := day.Format("2006/01/02")

		str += fmt.Sprintf(" * adding %s, %s via %s\n", job, dayStr, dayUrl)

		if r.FormValue("dryrun") == "" {
			t := taskqueue.NewPOSTTask(dayUrl, map[string][]string{
				"day": {dayStr},
				"job": {job},
			})

			if _, err := taskqueue.Add(ctx, t, "batch"); err != nil {
				log.Errorf(ctx, "upgradeHandler: enqueue: %v", err)
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
		}

		n++
	}

	log.Infof(ctx, "enqueued %d batch items for '%s'", n, job)

	w.Header().Set("Content-Type", "text/plain")
	w.Write([]byte(fmt.Sprintf("OK, batch, enqueued %d tasks for %s\n%s", n, job, str)))
}
示例#28
0
// AddToSearchIndex adds an Item to the search index.
// To current implementation uses task queues so this operation will
// be executed in the background
func AddToSearchIndex(con *data.Context, i data.Item) {
	// We'll update the search index next
	// FIRST: Store the HTML of the item in the memcache.
	//        We do that because it is often larger than the maximum
	//        task size allowed at the GAE.
	memI := &memcache.Item{
		Key:   i.DSKey,
		Value: []byte(i.HTMLforSearch),
	}
	if err := memcache.Set(con.C, memI); err != nil {
		con.Log.Infof("Error while storing the search HTML in the memcache for URL %v", i.URL)
	}

	// SECOND: Put the search index update task in the queue
	task := taskqueue.NewPOSTTask("/t/search/add_to_index", itemToSearchIndexTask(i))
	if _, err := taskqueue.Add(con.C, task, "search-index"); err != nil {
		con.Log.Errorf("Error while triggering the add to index: %v", err)
	}
}
func (n *namespace) completed(c context.Context, mapper *mapper, key *datastore.Key) error {
	n.complete()

	// update namespace status and job within a transaction
	queue, ok := locker.QueueFromContext(c)
	if !ok {
		queue = mapper.config.DefaultQueue
	}
	fresh := new(namespace)
	jobKey := n.jobKey(c, *mapper.config)
	job := new(job)

	return storage.RunInTransaction(c, func(tc context.Context) error {
		keys := []*datastore.Key{key, jobKey}
		vals := []interface{}{fresh, job}
		if err := storage.GetMulti(tc, keys, vals); err != nil {
			return err
		}

		if job.Abort {
			return nil
		}

		fresh.copyFrom(*n)
		fresh.Lock.Complete()

		job.NamespacesSuccessful++
		job.common.rollup(n.common)

		if job.NamespacesSuccessful == job.NamespacesTotal && !job.Iterating {
			t := mapper.locker.NewTask(jobKey, job, mapper.config.Path+jobCompleteURL, nil)
			if _, err := taskqueue.Add(tc, t, queue); err != nil {
				return err
			}
		}

		if _, err := storage.PutMulti(tc, keys, vals); err != nil {
			return err
		}
		return nil
	}, &datastore.TransactionOptions{XG: true})
}
示例#30
0
func (s *shard) completed(c context.Context, mapper *mapper, key *datastore.Key) error {
	s.complete()
	s.Cursor = ""

	// update shard status and owning namespace within a transaction
	queue, ok := locker.QueueFromContext(c)
	if !ok {
		queue = mapper.config.DefaultQueue
	}
	fresh := new(shard)
	nsKey := s.namespaceKey(c, *mapper.config)
	ns := new(namespace)

	return storage.RunInTransaction(c, func(tc context.Context) error {
		keys := []*datastore.Key{key, nsKey}
		vals := []interface{}{fresh, ns}
		if err := storage.GetMulti(tc, keys, vals); err != nil {
			return err
		}

		fresh.copyFrom(*s)
		fresh.Lock.Complete()

		ns.ShardsSuccessful++
		ns.common.rollup(s.common)

		// if all shards have completed, schedule namespace/completed to update job
		if ns.ShardsSuccessful == ns.ShardsTotal {
			t := mapper.locker.NewTask(nsKey, ns, mapper.config.Path+namespaceCompleteURL, nil)
			if _, err := taskqueue.Add(tc, t, queue); err != nil {
				return err
			}
		}

		if _, err := storage.PutMulti(tc, keys, vals); err != nil {
			return err
		}

		return nil
	}, &datastore.TransactionOptions{XG: true})
}