Ejemplo n.º 1
0
func FixupComplaint(c *types.Complaint, key *datastore.Key) {
	// 0. Snag the key, so we can refer to this object later
	c.DatastoreKey = key.Encode()

	// 1. GAE datastore helpfully converts timezones to UTC upon storage; fix that
	c.Timestamp = date.InPdt(c.Timestamp)

	// 2. Compute the flight details URL, if within 24 days
	age := date.NowInPdt().Sub(c.Timestamp)
	if age < time.Hour*24 {
		// c.AircraftOverhead.Fr24Url = c.AircraftOverhead.PlaybackUrl()

		c.AircraftOverhead.Fr24Url = "http://flightaware.com/live/flight/" +
			c.AircraftOverhead.FlightNumber
		// Or: http://flightaware.com/live/flight/UAL337/history/20151215/ [0655Z/KLAX/KSFO]
		// date is UTC of departure time; might be tricky to guess :/
	}

	// 3. Compute distances, if we have an aircraft
	if c.AircraftOverhead.FlightNumber != "" {
		a := c.AircraftOverhead
		aircraftPos := geo.Latlong{a.Lat, a.Long}
		observerPos := geo.Latlong{c.Profile.Lat, c.Profile.Long}
		c.Dist2KM = observerPos.Dist(aircraftPos)
		c.Dist3KM = observerPos.Dist3(aircraftPos, a.Altitude)
	}
}
Ejemplo n.º 2
0
func createMemcacheKey(key *datastore.Key) string {
	memcacheKey := memcachePrefix + key.Encode()
	if len(memcacheKey) > memcacheMaxKeySize {
		hash := sha1.Sum([]byte(memcacheKey))
		memcacheKey = hex.EncodeToString(hash[:])
	}
	return memcacheKey
}
Ejemplo n.º 3
0
func mapTask(c context.Context, ds appwrap.Datastore, baseUrl string, mr MapReducePipeline, taskKey *datastore.Key, w http.ResponseWriter, r *http.Request) {
	var finalErr error
	var shardNames map[string]int
	var task JobTask

	start := time.Now()

	// we do this before starting the task below so that the parameters are set before
	// the task status callback is invoked
	jsonParameters := r.FormValue("json")
	mr.SetMapParameters(jsonParameters)
	mr.SetShardParameters(jsonParameters)

	if t, err, retry := startTask(c, ds, mr, taskKey); err != nil && retry {
		logCritical(c, "failed updating task to running: %s", err)
		http.Error(w, err.Error(), 500) // this will run us again
		return
	} else if err != nil {
		logCritical(c, "(fatal) failed updating task to running: %s", err)
		http.Error(w, err.Error(), 200) // this will run us again
		return
	} else {
		task = t
	}

	defer func() {
		if r := recover(); r != nil {
			stack := make([]byte, 16384)
			bytes := runtime.Stack(stack, false)
			logCritical(c, "panic inside of map task %s: %s\n%s\n", taskKey.Encode(), r, stack[0:bytes])

			if err := retryTask(c, ds, mr, task.Job, taskKey); err != nil {
				panic(fmt.Errorf("failed to retry task after panic: %s", err))
			}
		}
	}()

	if readerName := r.FormValue("reader"); readerName == "" {
		finalErr = fmt.Errorf("reader parameter required")
	} else if shardStr := r.FormValue("shards"); shardStr == "" {
		finalErr = fmt.Errorf("shards parameter required")
	} else if shardCount, err := strconv.ParseInt(shardStr, 10, 32); err != nil {
		finalErr = fmt.Errorf("error parsing shard count: %s", err.Error())
	} else if reader, err := mr.ReaderFromName(c, readerName); err != nil {
		finalErr = fmt.Errorf("error making reader: %s", err)
	} else {
		shardNames, finalErr = mapperFunc(c, mr, reader, int(shardCount),
			makeStatusUpdateFunc(c, ds, mr, fmt.Sprintf("%s/mapstatus", baseUrl), taskKey.Encode()))
	}

	if err := endTask(c, ds, mr, task.Job, taskKey, finalErr, shardNames); err != nil {
		logCritical(c, "Could not finish task: %s", err)
		http.Error(w, err.Error(), 500)
		return
	}

	logInfo(c, "mapper done after %s", time.Now().Sub(start))
}
Ejemplo n.º 4
0
func fromCache(ctx context.Context, key *datastore.Key) (p *Passenger, err error) {
	item, err := memcache.Get(ctx, key.Encode())
	if err != nil {
		return nil, err
	}
	p = new(Passenger)
	err = gob.NewDecoder(bytes.NewReader(item.Value)).Decode(&p)
	return
}
Ejemplo n.º 5
0
// Write takes a key and the corresponding writes it out to w after marshaling to JSON.
func (ƨ Submission) Write(w http.ResponseWriter, key *datastore.Key) {
	w.Header().Set("Content-Type", "application/json; charset=utf-8")
	w.Write([]byte(`{"`))
	//w.Write([]byte(strconv.FormatInt(key.IntID(), 10)))
	w.Write([]byte(key.Encode()))
	w.Write([]byte(`":`))
	e := json.NewEncoder(w)
	e.Encode(ƨ)
	w.Write([]byte(`}`))
}
Ejemplo n.º 6
0
// Write takes a key and the corresponding writes it out to w after marshaling to JSON.
func (x AccessToken) Write(w http.ResponseWriter, key *datastore.Key) {
	body, err := json.Marshal(map[string]AccessToken{
		key.Encode(): x,
	})

	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	w.Header().Set("Content-Type", "application/json; charset=utf-8")
	w.Write(body)
}
func indexIdiomFullText(c context.Context, idiom *Idiom, idiomKey *datastore.Key) error {
	index, err := gaesearch.Open("idioms")
	if err != nil {
		return err
	}
	// By using directly the idiom Key as docID,
	// we can leverage faster ID-only search later.
	docID := strconv.Itoa(idiom.Id)
	w, wTitle, wLead := idiom.ExtractIndexableWords()
	doc := &searchableIdiomDoc{
		IdiomKeyString: gaesearch.Atom(idiomKey.Encode()),
		IdiomID:        gaesearch.Atom(strconv.Itoa(idiom.Id)),
		Bulk:           strings.Join(w, " "),
		Langs:          implementedLanguagesConcat(idiom),
		TitleWords:     strings.Join(wTitle, " "),
		LeadWords:      strings.Join(wLead, " "),
	}
	doc.TitleOrLeadWords = doc.TitleWords + " " + doc.LeadWords
	_, err = index.Put(c, docID, doc)
	if err != nil {
		return err
	}

	// Also index each impl individually,
	// so we know what to highlight.
	indexImpl, err := gaesearch.Open("impls")
	if err != nil {
		return err
	}
	for _, impl := range idiom.Implementations {
		implDocID := fmt.Sprintf("%d_%d", idiom.Id, impl.Id)
		w := impl.ExtractIndexableWords()
		implDoc := &searchableImplDoc{
			Lang:    impl.LanguageName,
			IdiomID: gaesearch.Atom(strconv.Itoa(idiom.Id)),
			Bulk:    strings.Join(w, " "),
		}
		// Weird that the search API doesn't have batch queries.
		// TODO: index each impl concurrently?
		// TODO: index only last edited impl?
		_, err = indexImpl.Put(c, implDocID, implDoc)
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 8
0
func CallMinecraftTQ(c context.Context, minecraftKey *datastore.Key, operationID string) (*taskqueue.Task, error) {
	log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID)
	if minecraftKey == nil {
		return nil, errors.New("key is required")
	}
	if len(operationID) < 1 {
		return nil, errors.New("operationID is required")
	}

	t := taskqueue.NewPOSTTask("/tq/1/minecraft", url.Values{
		"keyStr":      {minecraftKey.Encode()},
		"operationID": {operationID},
	})
	t.Delay = time.Second * 30
	return taskqueue.Add(c, t, "minecraft")
}
Ejemplo n.º 9
0
func (a *ServerTQApi) CallDeleteInstance(c context.Context, minecraftKey *datastore.Key, operationID string, latestSnapshot string) (*taskqueue.Task, error) {
	log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID)
	if minecraftKey == nil {
		return nil, errors.New("key is required")
	}
	if len(operationID) < 1 {
		return nil, errors.New("operationID is required")
	}

	t := taskqueue.NewPOSTTask("/tq/1/server/instance/delete", url.Values{
		"keyStr":         {minecraftKey.Encode()},
		"operationID":    {operationID},
		"latestSnapshot": {latestSnapshot},
	})
	t.Delay = time.Second * 30
	return taskqueue.Add(c, t, "minecraft")
}
Ejemplo n.º 10
0
func EncodeDatastoreKey(e *Encoder, key *ds.Key) error {
	if key == nil {
		return e.EncodeNil()
	}
	return e.EncodeString(key.Encode())
}
Ejemplo n.º 11
0
func memkey(k *datastore.Key) string {
	// Versioning, so that incompatible changes to the cache system won't cause problems
	return "g2:" + k.Encode()
}
Ejemplo n.º 12
0
// encodeKey safely encodes k as a string, returning empty string
// if k is nil.
func encodeKey(k *datastore.Key) string {
	if k == nil {
		return ""
	}
	return k.Encode()
}
Ejemplo n.º 13
0
func reduceTask(c context.Context, ds appwrap.Datastore, baseUrl string, mr MapReducePipeline, taskKey *datastore.Key, w http.ResponseWriter, r *http.Request) {
	var writer SingleOutputWriter
	var task JobTask
	var err error
	var retry bool

	start := time.Now()

	// we do this before starting the task below so that the parameters are set before
	// the task status callback is invoked
	mr.SetReduceParameters(r.FormValue("json"))

	if task, err, retry = startTask(c, ds, mr, taskKey); err != nil && retry {
		logCritical(c, "failed updating task to running: %s", err)
		http.Error(w, err.Error(), 500) // this will run us again
		return
	} else if err != nil {
		logCritical(c, "(fatal) failed updating task to running: %s", err)
		http.Error(w, err.Error(), 200) // this will run us again
		return
	}

	defer func() {
		if r := recover(); r != nil {
			stack := make([]byte, 16384)
			bytes := runtime.Stack(stack, false)
			logCritical(c, "panic inside of reduce task %s: %s\n%s\n", taskKey.Encode(), r, stack[0:bytes])

			if err := retryTask(c, ds, mr, task.Job, taskKey); err != nil {
				panic(fmt.Errorf("failed to retry task after panic: %s", err))
			}
		}
	}()

	var finalErr error
	if writerName := r.FormValue("writer"); writerName == "" {
		finalErr = fmt.Errorf("writer parameter required")
	} else if writer, err = mr.WriterFromName(c, writerName); err != nil {
		finalErr = fmt.Errorf("error getting writer: %s", err.Error())
	} else if len(task.ReadFrom) == 0 {
		// nothing to read
	} else {
		shardReader, _ := zlib.NewReader(bytes.NewBuffer(task.ReadFrom))
		shardJson, _ := ioutil.ReadAll(shardReader)
		var shards []string
		json.Unmarshal(shardJson, &shards)

		finalErr = ReduceFunc(c, mr, writer, shards, task.SeparateReduceItems,
			makeStatusUpdateFunc(c, ds, mr, fmt.Sprintf("%s/reducestatus", baseUrl), taskKey.Encode()))
	}

	writer.Close(c)

	if err := endTask(c, ds, mr, task.Job, taskKey, finalErr, writer.ToName()); err != nil {
		logCritical(c, "Could not finish task: %s", err)
		http.Error(w, err.Error(), 500)
		return
	}

	logInfo(c, "reducer done after %s", time.Now().Sub(start))
}
Ejemplo n.º 14
0
func mapMonitorTask(c context.Context, ds appwrap.Datastore, pipeline MapReducePipeline, jobKey *datastore.Key, r *http.Request, timeout time.Duration) int {
	start := time.Now()

	job, err := waitForStageCompletion(c, ds, pipeline, jobKey, StageMapping, StageReducing, timeout)
	if err != nil {
		logCritical(c, "waitForStageCompletion() failed: %s", err)
		return 200
	} else if job.Stage == StageMapping {
		logInfo(c, "wait timed out -- returning an error and letting us automatically restart")
		return 500
	}

	logInfo(c, "map stage completed -- stage is now %s", job.Stage)

	// erm... we just did this in jobStageComplete. dumb to do it again
	mapTasks, err := gatherTasks(ds, job)
	if err != nil {
		logError(c, "failed loading tasks: %s", mapTasks)
		jobFailed(c, ds, pipeline, jobKey, fmt.Errorf("error loading tasks after map complete: %s", err.Error()))
		return 200
	}

	// we have one set for each reducer task
	storageNames := make([][]string, len(job.WriterNames))

	for i := range mapTasks {
		var shardNames map[string]int
		if err = json.Unmarshal([]byte(mapTasks[i].Result), &shardNames); err != nil {
			logError(c, `unmarshal error for result from map %d result '%+v'`, job.FirstTaskId+int64(i), mapTasks[i].Result)
			jobFailed(c, ds, pipeline, jobKey, fmt.Errorf("cannot unmarshal map shard names: %s", err.Error()))
			return 200
		} else {
			for name, shard := range shardNames {
				storageNames[shard] = append(storageNames[shard], name)
			}
		}
	}

	firstId, _, err := datastore.AllocateIDs(c, TaskEntity, nil, len(job.WriterNames))
	if err != nil {
		jobFailed(c, ds, pipeline, jobKey, fmt.Errorf("failed to allocate ids for reduce tasks: %s", err.Error()))
		return 200
	}
	taskKeys := makeTaskKeys(ds, firstId, len(job.WriterNames))
	tasks := make([]JobTask, 0, len(job.WriterNames))

	for shard := range job.WriterNames {
		if shards := storageNames[shard]; len(shards) > 0 {
			url := fmt.Sprintf("%s/reduce?taskKey=%s;shard=%d;writer=%s",
				job.UrlPrefix, taskKeys[len(tasks)].Encode(), shard, url.QueryEscape(job.WriterNames[shard]))

			firstId++

			shardJson, _ := json.Marshal(shards)
			shardZ := &bytes.Buffer{}
			w := zlib.NewWriter(shardZ)
			w.Write(shardJson)
			w.Close()

			tasks = append(tasks, JobTask{
				Status:              TaskStatusPending,
				Url:                 url,
				ReadFrom:            shardZ.Bytes(),
				SeparateReduceItems: job.SeparateReduceItems,
				Type:                TaskTypeReduce,
			})
		}
	}

	// this means we got nothing from maps. there is no result. so, we're done! right? that's hard to communicate though
	// so we'll just start a single task with no inputs
	if len(tasks) == 0 {
		logInfo(c, "no results from maps -- starting noop reduce task")
		url := fmt.Sprintf("%s/reduce?taskKey=%s;shard=%d;writer=%s",
			job.UrlPrefix, taskKeys[len(tasks)].Encode(), 0, url.QueryEscape(job.WriterNames[0]))

		tasks = append(tasks, JobTask{
			Status:              TaskStatusPending,
			Url:                 url,
			ReadFrom:            []byte(``),
			SeparateReduceItems: job.SeparateReduceItems,
			Type:                TaskTypeReduce,
		})
	}

	taskKeys = taskKeys[0:len(tasks)]

	if err := createTasks(ds, jobKey, taskKeys, tasks, StageReducing); err != nil {
		jobFailed(c, ds, pipeline, jobKey, fmt.Errorf("failed to create reduce tasks: %s", err.Error()))
		return 200
	}

	for i := range tasks {
		if err := pipeline.PostTask(c, tasks[i].Url, job.JsonParameters); err != nil {
			jobFailed(c, ds, pipeline, jobKey, fmt.Errorf("failed to post reduce task: %s", err.Error()))
			return 200
		}
	}

	if err := pipeline.PostStatus(c, fmt.Sprintf("%s/reduce-monitor?jobKey=%s", job.UrlPrefix, jobKey.Encode())); err != nil {
		jobFailed(c, ds, pipeline, jobKey, fmt.Errorf("failed to start reduce monitor: %s", err.Error()))
		return 200
	}

	logInfo(c, "mapping complete after %s of monitoring ", time.Now().Sub(start))
	return 200
}