Пример #1
0
func TestTasks(t *testing.T) {

	c, err := NewContext(&Options{TaskQueues: []string{"testQueue"}})
	if err != nil {
		t.Fatalf("NewContext: %v", err)
	}
	defer c.Close()

	task := taskqueue.NewPOSTTask("/post", map[string][]string{})
	_, err = taskqueue.Add(c, task, "testQueue")
	if err != nil {
		t.Fatalf("Could not add task to queue")
	}
	stats, err := taskqueue.QueueStats(c, []string{"testQueue"}, 0) // fetch all of them
	if err != nil {
		t.Fatalf("Could not get taskqueue statistics")
	}
	t.Logf("TaskStatistics = %#v", stats)
	if len(stats) == 0 {
		t.Fatalf("Queue statistics are empty")
	} else if stats[0].Tasks != 1 {
		t.Fatalf("Could not find the task we just added")
	}

	err = taskqueue.Purge(c, "testQueue")
	if err != nil {
		t.Fatalf("Could not purge the queue")
	}
	stats, err = taskqueue.QueueStats(c, []string{"testQueue"}, 0) // fetch all of them
	if len(stats) == 0 {
		t.Fatalf("Queue statistics are empty")
	}
	if stats[0].Tasks != 0 {
		t.Fatalf("Purge command not successful")
	}

	tasks := []*taskqueue.Task{
		taskqueue.NewPOSTTask("/post1", map[string][]string{}),
		taskqueue.NewPOSTTask("/post2", map[string][]string{}),
	}
	_, err = taskqueue.AddMulti(c, tasks, "testQueue")
	if err != nil {
		t.Fatalf("Could not add bulk tasklist to queue")
	}
	stats, err = taskqueue.QueueStats(c, []string{"testQueue"}, 0) // fetch all of them
	if err != nil {
		t.Fatalf("Could not get taskqueue statistics")
	}
	if len(stats) == 0 {
		t.Fatalf("Could not find the tasks we just added")
	} else if stats[0].Tasks != 2 {
		t.Fatalf("Could not find the tasks we just added")
	}

}
Пример #2
0
func queueMore(c appengine.Context) bool {
	st, err := taskqueue.QueueStats(c, []string{mapStage2, "couchit",
		"updateunique", "updateUniquePull"}, 0)
	if err != nil {
		c.Errorf("Error getting queue stats: %v", err)
		return false
	}
	c.Infof("map2 queue stats: %+v", st[0])
	c.Infof("couchit queue stats: %+v", st[1])
	c.Infof("updateunique queue stats: %+v", st[2])
	c.Infof("updateUniquePull stats: %+v", st[3])

	return st[0].Tasks < resubmitThreshold &&
		st[1].Tasks < couchitThreshold &&
		st[2].Tasks < updateUniqueThresh &&
		st[3].Tasks < updateUniquePullThresh
}
Пример #3
0
func UpdateFeeds(c mpg.Context, w http.ResponseWriter, r *http.Request) {
	q := datastore.NewQuery("F").KeysOnly().Filter("n <=", time.Now())
	q = q.Limit(100)
	cs := r.FormValue("c")
	hasCursor := false
	if len(cs) > 0 {
		if cur, err := datastore.DecodeCursor(cs); err == nil {
			q = q.Start(cur)
			hasCursor = true
			c.Infof("starting at %v", cur)
		} else {
			c.Errorf("cursor error %v", err.Error())
		}
	}
	if !hasCursor {
		qs, err := taskqueue.QueueStats(c, []string{"update-feed"}, 0)
		if err != nil || !qs[0].OldestETA.IsZero() {
			c.Errorf("already %v (%v) tasks", qs[0].Tasks, qs[0].Executed1Minute)
			return
		}
	}
	var keys []*datastore.Key
	it := q.Run(Timeout(c, time.Second*60))
	for {
		k, err := it.Next(nil)
		if err == datastore.Done {
			break
		} else if err != nil {
			c.Errorf("next error: %v", err.Error())
			break
		}
		keys = append(keys, k)
	}

	if len(keys) == 0 {
		c.Errorf("no results")
		return
	} else {
		cur, err := it.Cursor()
		if err != nil {
			c.Errorf("to cur error %v", err.Error())
		} else {
			c.Infof("add with cur %v", cur)
			t := taskqueue.NewPOSTTask(routeUrl("update-feeds"), url.Values{
				"c": {cur.String()},
			})
			taskqueue.Add(c, t, "update-feed")
		}
	}
	c.Infof("updating %d feeds", len(keys))

	var tasks []*taskqueue.Task
	for _, k := range keys {
		tasks = append(tasks, taskqueue.NewPOSTTask(routeUrl("update-feed"), url.Values{
			"feed": {k.StringID()},
		}))
	}
	var ts []*taskqueue.Task
	const taskLimit = 100
	for len(tasks) > 0 {
		if len(tasks) > taskLimit {
			ts = tasks[:taskLimit]
			tasks = tasks[taskLimit:]
		} else {
			ts = tasks
			tasks = tasks[0:0]
		}
		if _, err := taskqueue.AddMulti(c, ts, "update-feed"); err != nil {
			c.Errorf("taskqueue error: %v", err.Error())
		}
	}
}
Пример #4
0
func batchBulkUpdateUnique(c appengine.Context, w http.ResponseWriter, r *http.Request) {
	tasks, err := taskqueue.LeaseByTag(c, 100, "updateUniquePull", 3600, "")
	if err != nil {
		c.Errorf("Error leasing: %v", err)
		http.Error(w, err.Error(), 500)
		return
	}
	if len(tasks) == 0 {
		c.Infof("No tasks found")
		w.WriteHeader(204)
		return
	}
	uuid := tasks[0].Tag

	mckey := "ulock." + uuid
	item := memcache.Item{
		Key:        mckey,
		Expiration: time.Minute,
	}

	if memcache.Add(c, &item) == memcache.ErrNotStored {
		c.Errorf("Already processing %v, skipping", uuid)
		http.Error(w, "Already processing item", 503)
		return
	}

	c.Infof("Processing %v things for %v", len(tasks), uuid)
	uk := datastore.NewKey(c, "Unique", uuid, 0, nil)

	chu := make(chan uniqueFetch)

	go func() {
		rv := uniqueFetch{}
		rv.u, rv.err = loadUnique(c, uk)
		chu <- rv
	}()

	keys := []*datastore.Key{}
	obs := make([]oldLoader, len(tasks))
	for i, t := range tasks {
		key, err := datastore.DecodeKey(string(t.Payload))
		if err != nil {
			c.Errorf("Error decoding key: %s: %v", t.Payload, err)
			http.Error(w, err.Error(), 500)
			return
		}
		keys = append(keys, key)
		obs[i] = oldLoader{c: c, into: &Stats{}}
	}
	err = datastore.GetMulti(c, keys, obs)
	if err != nil {
		c.Errorf("Error grabbing all the stats: %v", err)
		http.Error(w, err.Error(), 500)
		return
	}

	uf := <-chu
	if uf.err != nil {
		c.Errorf("Error fetching unique: %v", err)
		http.Error(w, err.Error(), 500)
		return
	}

	cherr := make(chan error, 5)

	// We're probably safe enough to count these now
	go func() {
		cherr <- sharded_counter.IncrementBy(c, "unique_"+uuid, len(tasks))
	}()

	for _, wst := range obs {
		st := wst.into.(*Stats)
		st.uncompress()
		err := uf.u.Update(*st)
		if err != nil {
			c.Warningf("Problem updating a unique: %v (continuing)", err)
		}
	}

	go func() {
		cherr <- uf.u.compress()
		_, err = datastore.Put(c, uk, &uf.u)
		if err != nil {
			c.Errorf("Error storing unique: %v", err)
		}
		cherr <- err
	}()

	go func() {
		memcache.Delete(c, mckey)
		cherr <- couchit(c, uk, url.Values{
			"isnew": []string{fmt.Sprintf("%t", uf.u.isNew)}})
	}()

	go func() {
		cherr <- taskqueue.DeleteMulti(c, tasks, "updateUniquePull")
	}()

	go func() {
		st, err := taskqueue.QueueStats(c, []string{"updateUniquePull", "bulkupdateunique"}, 0)
		if err != nil {
			c.Errorf("Error getting queue stats: %v", err)
			cherr <- err
			return
		}
		if st[0].Tasks > 5000 && st[1].Tasks < 1000 {
			c.Infof("There's more to go, resubmitting: %v", err)
			taskqueue.Add(c, taskqueue.NewPOSTTask("/submit/bulkUpdateUnique",
				url.Values{"times": []string{"100"}}),
				"default")
		}
		cherr <- nil
	}()

	err = anyErr(<-cherr, <-cherr, <-cherr, <-cherr, <-cherr)
	if err != nil {
		c.Errorf("Error in batch processing: %v", err)
		http.Error(w, err.Error(), 500)
		return
	}
	w.WriteHeader(204)
}