// NewTask creates a new taskqueue.Task for the entity with the correct
// headers set to match those on the entity
func (l *Locker) NewTask(key *datastore.Key, entity Lockable, path string, params url.Values) *taskqueue.Task {
	// prepare the lock entries
	lock := entity.getLock()
	lock.Timestamp = getTime()
	lock.RequestID = ""
	lock.Retries = 0
	lock.Sequence++

	json, _ := key.MarshalJSON()

	// set task headers so that we can retrieve the matching entity
	// and check that the executing task is the one we're expecting
	task := taskqueue.NewPOSTTask(path, params)
	task.Header.Set("X-Lock-Seq", strconv.Itoa(lock.Sequence))
	task.Header.Set("X-Lock-Key", string(json))

	if l.Host != "" {
		task.Header.Set("Host", l.Host)
	}

	return task
}
func counterHandler(c context.Context, r *http.Request, key *datastore.Key, entity locker.Lockable) error {
	counter := entity.(*Counter)
	log.Debugf(c, "process: %d", counter.Sequence)

	// simulate some processing work
	time.Sleep(time.Duration(1) * time.Second)
	if counter.Sequence == 5 {
		// simulate a duplicate task execution by creating one ourselves
		// needless to say, you wouldn't want to be doing this in practice
		// but it should demonstrate that the locker prevents spurious
		// task execution and guarantees the correct sequencing happens
		json, _ := key.MarshalJSON()
		t := taskqueue.NewPOSTTask("/process", nil)
		t.Header.Set("X-Lock-Seq", "6")
		t.Header.Set("X-Lock-Key", string(json))
		taskqueue.Add(c, t, "")
	}

	if counter.Sequence < counter.Limit {
		return l.Schedule(c, key, counter, "/process", nil)
	}

	return l.Complete(c, key, counter)
}