func (c *Cache) Set(key *datastore.Key, value interface{}) { if c == nil { return } c.objs[key.String()] = value }
func createFinalResult(ctx context.Context, w http.ResponseWriter, resultKey *datastore.Key, result model.Result) (int, error) { go computeFinalScore(ctx, result) var challenge model.Challenge if err := datastore.Get(ctx, result.Challenge, &challenge); err != nil { return http.StatusInternalServerError, nil } var taskKey *datastore.Key for _, taskKey = range challenge.Tasks { switch taskKey.Kind() { case model.CodeTaskKind: var submissions model.CodeSubmissions keys, err := doQuery(ctx, model.NewQueryForCodeSubmission(), resultKey, taskKey, submissions) if err != nil { return http.StatusInternalServerError, nil } if len(keys) == 0 { // Most likely the authenticated user called this endpoint // before finishing the challenge return http.StatusUnauthorized, nil } result.FinalSubmissions = append(result.FinalSubmissions, keys[0]) //TODO(pbochis, vbalan, flowlo): Add more cases when more task kinds are added. default: return http.StatusBadRequest, errors.New("Unknown submission kind.") } } key, err := result.Save(ctx, resultKey) if err != nil { return http.StatusInternalServerError, err } json.NewEncoder(w).Encode(result.Key(key)) return http.StatusOK, nil }
func endTask(c context.Context, ds appwrap.Datastore, taskIntf startTopIntf, jobKey *datastore.Key, taskKey *datastore.Key, resultErr error, result interface{}) error { if resultErr == nil { if task, err := updateTask(ds, taskKey, TaskStatusDone, 0, "", result); err != nil { return fmt.Errorf("Could not update task: %s", err) } else { taskIntf.Status(jobKey.IntID(), task) } } else { if _, ok := resultErr.(tryAgainError); ok { // wasn't fatal, go for it if retryErr := retryTask(c, ds, taskIntf, jobKey, taskKey); retryErr != nil { return fmt.Errorf("error retrying: %s (task failed due to: %s)", retryErr, resultErr) } else { logInfo(c, "retrying task due to %s", resultErr) return nil } } // fatal error if _, err := updateTask(ds, taskKey, TaskStatusFailed, 0, resultErr.Error(), nil); err != nil { return fmt.Errorf("Could not update task with failure: %s", err) } } return nil }
func (c *Cache) Get(key *datastore.Key) interface{} { if c == nil { return nil } return c.objs[key.String()] }
func FixupComplaint(c *types.Complaint, key *datastore.Key) { // 0. Snag the key, so we can refer to this object later c.DatastoreKey = key.Encode() // 1. GAE datastore helpfully converts timezones to UTC upon storage; fix that c.Timestamp = date.InPdt(c.Timestamp) // 2. Compute the flight details URL, if within 24 days age := date.NowInPdt().Sub(c.Timestamp) if age < time.Hour*24 { // c.AircraftOverhead.Fr24Url = c.AircraftOverhead.PlaybackUrl() c.AircraftOverhead.Fr24Url = "http://flightaware.com/live/flight/" + c.AircraftOverhead.FlightNumber // Or: http://flightaware.com/live/flight/UAL337/history/20151215/ [0655Z/KLAX/KSFO] // date is UTC of departure time; might be tricky to guess :/ } // 3. Compute distances, if we have an aircraft if c.AircraftOverhead.FlightNumber != "" { a := c.AircraftOverhead aircraftPos := geo.Latlong{a.Lat, a.Long} observerPos := geo.Latlong{c.Profile.Lat, c.Profile.Long} c.Dist2KM = observerPos.Dist(aircraftPos) c.Dist3KM = observerPos.Dist3(aircraftPos, a.Altitude) } }
// Next processes the next item func (x *example6) Next(c context.Context, counters mapper.Counters, key *datastore.Key) error { // we need to load the entity ourselves photo := new(Photo) if err := nds.Get(c, key, photo); err != nil { return err } photo.ID = key.IntID() suffix := photo.Taken.Format("20060102") _, err := x.bq.Tabledata.InsertAll(x.appID, "datasetName", "tableName", &bigquery.TableDataInsertAllRequest{ TemplateSuffix: suffix, Rows: []*bigquery.TableDataInsertAllRequestRows{ { Json: map[string]bigquery.JsonValue{ "id": photo.ID, "taken": photo.Taken, "photographer": map[string]bigquery.JsonValue{ "id": photo.Photographer.ID, "name": photo.Photographer.Name, }, }, }, }, }).Context(c).Do() return err }
func createMemcacheKey(key *datastore.Key) string { memcacheKey := memcachePrefix + key.Encode() if len(memcacheKey) > memcacheMaxKeySize { hash := sha1.Sum([]byte(memcacheKey)) memcacheKey = hex.EncodeToString(hash[:]) } return memcacheKey }
func mapTask(c context.Context, ds appwrap.Datastore, baseUrl string, mr MapReducePipeline, taskKey *datastore.Key, w http.ResponseWriter, r *http.Request) { var finalErr error var shardNames map[string]int var task JobTask start := time.Now() // we do this before starting the task below so that the parameters are set before // the task status callback is invoked jsonParameters := r.FormValue("json") mr.SetMapParameters(jsonParameters) mr.SetShardParameters(jsonParameters) if t, err, retry := startTask(c, ds, mr, taskKey); err != nil && retry { logCritical(c, "failed updating task to running: %s", err) http.Error(w, err.Error(), 500) // this will run us again return } else if err != nil { logCritical(c, "(fatal) failed updating task to running: %s", err) http.Error(w, err.Error(), 200) // this will run us again return } else { task = t } defer func() { if r := recover(); r != nil { stack := make([]byte, 16384) bytes := runtime.Stack(stack, false) logCritical(c, "panic inside of map task %s: %s\n%s\n", taskKey.Encode(), r, stack[0:bytes]) if err := retryTask(c, ds, mr, task.Job, taskKey); err != nil { panic(fmt.Errorf("failed to retry task after panic: %s", err)) } } }() if readerName := r.FormValue("reader"); readerName == "" { finalErr = fmt.Errorf("reader parameter required") } else if shardStr := r.FormValue("shards"); shardStr == "" { finalErr = fmt.Errorf("shards parameter required") } else if shardCount, err := strconv.ParseInt(shardStr, 10, 32); err != nil { finalErr = fmt.Errorf("error parsing shard count: %s", err.Error()) } else if reader, err := mr.ReaderFromName(c, readerName); err != nil { finalErr = fmt.Errorf("error making reader: %s", err) } else { shardNames, finalErr = mapperFunc(c, mr, reader, int(shardCount), makeStatusUpdateFunc(c, ds, mr, fmt.Sprintf("%s/mapstatus", baseUrl), taskKey.Encode())) } if err := endTask(c, ds, mr, task.Job, taskKey, finalErr, shardNames); err != nil { logCritical(c, "Could not finish task: %s", err) http.Error(w, err.Error(), 500) return } logInfo(c, "mapper done after %s", time.Now().Sub(start)) }
func ktoi(key *datastore.Key) id { return id{ kind: key.Kind(), stringID: key.StringID(), intID: key.IntID(), appID: key.AppID(), namespace: key.Namespace(), } }
func fromCache(ctx context.Context, key *datastore.Key) (p *Passenger, err error) { item, err := memcache.Get(ctx, key.Encode()) if err != nil { return nil, err } p = new(Passenger) err = gob.NewDecoder(bytes.NewReader(item.Value)).Decode(&p) return }
// HasParent returns true if key or any of its parents equals // parent (recursively), otherwise false. func HasParent(parent, key *datastore.Key) bool { for key != nil { if key.Equal(parent) { return true } key = key.Parent() } return false }
// Write takes a key and the corresponding writes it out to w after marshaling to JSON. func (ƨ Submission) Write(w http.ResponseWriter, key *datastore.Key) { w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Write([]byte(`{"`)) //w.Write([]byte(strconv.FormatInt(key.IntID(), 10))) w.Write([]byte(key.Encode())) w.Write([]byte(`":`)) e := json.NewEncoder(w) e.Encode(ƨ) w.Write([]byte(`}`)) }
func fromDatastore(ctx context.Context, key *datastore.Key) (p *Passenger, err error) { p = new(Passenger) if err = datastore.Get(ctx, key, p.AccessToken); err != nil { return } if p.UserKey = key.Parent(); p.UserKey == nil { return nil, ErrTokenNotAssociated } return }
func lookupCache(ctx context.Context, key datastore.Key) *MaybeError { maybeItem, err := memcache.Get(ctx, key.String()) var result = new(MaybeError) if err != nil { // treat all errors as "cache miss" *result = CacheMiss{} } else { result = jsonToTodoItem(maybeItem.Value) } return result }
func alertAdmins(c context.Context, key *datastore.Key, entity Lockable, reason string) error { sender := "locker@" + appengine.AppID(c) + ".appspot.com" msg := &mail.Message{ Sender: sender, Subject: reason, Body: fmt.Sprintf("key: %s, entity: %#v", key.String(), entity), } return mail.SendToAdmins(c, msg) }
// Parse returns the namespace, datatore.Key, sequence and queue name from a task request func (l *Locker) Parse(c context.Context, r *http.Request) (*datastore.Key, int, error) { key := new(datastore.Key) if err := key.UnmarshalJSON([]byte(r.Header.Get("X-Lock-Key"))); err != nil { return nil, 0, err } seq, err := strconv.Atoi(r.Header.Get("X-Lock-Seq")) if err != nil { return nil, 0, err } return key, seq, nil }
// Next processes the next item func (x *example3) Next(c context.Context, counters mapper.Counters, key *datastore.Key) error { photo := new(Photo) if err := nds.Get(c, key, photo); err != nil { log.Errorf(c, err.Error()) return err } photo.ID = key.IntID() counters.Increment(photo.Photographer.Name, 1) return nil }
func jobFailed(c context.Context, ds appwrap.Datastore, taskIntf TaskInterface, jobKey *datastore.Key, err error) { logError(c, "jobFailed: %s", err) prevJob, _ := markJobFailed(c, ds, jobKey) // this might mark it failed again. whatever. if prevJob.OnCompleteUrl != "" { taskIntf.PostStatus(c, fmt.Sprintf("%s?status=error;error=%s;id=%d", prevJob.OnCompleteUrl, url.QueryEscape(err.Error()), jobKey.IntID())) } return }
func invalidateCache(ctx context.Context, key datastore.Key) *MaybeError { // delete key from memcache err := memcache.Delete(ctx, key.String()) var result = new(MaybeError) if err != nil { *result = E(err.Error()) } else { *result = Ok{} } return result }
// Load - Takes a datastore.Key provided and loads it into the current Ticket object func (t *Ticket) Load(ctx context.Context, k datastore.Key) error { err := datastore.Get(ctx, &k, t) t.DatastoreKey = k t.EventKey = k.Parent() if err = datastore.Get(ctx, &k, t); err != nil { return err } return nil }
func fromDatastore(ctx context.Context, key *datastore.Key) (*Passenger, error) { p := &Passenger{ Token: &model.Token{}, } if err := datastore.Get(ctx, key, p.Token); err != nil { return nil, err } if p.User = key.Parent(); p.User == nil { return nil, ErrTokenNotAssociated } return p, nil }
// Write takes a key and the corresponding writes it out to w after marshaling to JSON. func (x AccessToken) Write(w http.ResponseWriter, key *datastore.Key) { body, err := json.Marshal(map[string]AccessToken{ key.Encode(): x, }) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Write(body) }
// Next processes the next item func (x *example5) Next(c context.Context, counters mapper.Counters, key *datastore.Key) error { photo := x.photo photo.ID = key.IntID() out := &photoOutput{ Photo: photo, Namespace: key.Namespace(), } x.encoder.Encode(out) return nil }
func updateCache(ctx context.Context, key datastore.Key, item TodoItem) { var result = itemToJson(item) switch (*result).(type) { case Blob: blob := ([]byte)((*result).(Blob)) item := memcache.Item{ Key: key.String(), Value: blob, } memcache.Set(ctx, &item) // ignore errors... worst that can happen is we get a cache miss later default: break } }
func lookup(key *datastore.Key) *websocket.Conn { conns.RLock() defer conns.RUnlock() for key != nil { conn, ok := conns.m[ktoi(key)] if ok { return conn } key = key.Parent() } return nil }
func getJob(ds appwrap.Datastore, jobKey *datastore.Key) (JobInfo, error) { var job JobInfo err := backoff.Retry(func() error { if err := ds.Get(jobKey, &job); err != nil { return err } return nil }, mrBackOff()) job.Id = jobKey.IntID() return job, err }
func indexIdiomFullText(c context.Context, idiom *Idiom, idiomKey *datastore.Key) error { index, err := gaesearch.Open("idioms") if err != nil { return err } // By using directly the idiom Key as docID, // we can leverage faster ID-only search later. docID := strconv.Itoa(idiom.Id) w, wTitle, wLead := idiom.ExtractIndexableWords() doc := &searchableIdiomDoc{ IdiomKeyString: gaesearch.Atom(idiomKey.Encode()), IdiomID: gaesearch.Atom(strconv.Itoa(idiom.Id)), Bulk: strings.Join(w, " "), Langs: implementedLanguagesConcat(idiom), TitleWords: strings.Join(wTitle, " "), LeadWords: strings.Join(wLead, " "), } doc.TitleOrLeadWords = doc.TitleWords + " " + doc.LeadWords _, err = index.Put(c, docID, doc) if err != nil { return err } // Also index each impl individually, // so we know what to highlight. indexImpl, err := gaesearch.Open("impls") if err != nil { return err } for _, impl := range idiom.Implementations { implDocID := fmt.Sprintf("%d_%d", idiom.Id, impl.Id) w := impl.ExtractIndexableWords() implDoc := &searchableImplDoc{ Lang: impl.LanguageName, IdiomID: gaesearch.Atom(strconv.Itoa(idiom.Id)), Bulk: strings.Join(w, " "), } // Weird that the search API doesn't have batch queries. // TODO: index each impl concurrently? // TODO: index only last edited impl? _, err = indexImpl.Put(c, implDocID, implDoc) if err != nil { return err } } return nil }
func CallMinecraftTQ(c context.Context, minecraftKey *datastore.Key, operationID string) (*taskqueue.Task, error) { log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID) if minecraftKey == nil { return nil, errors.New("key is required") } if len(operationID) < 1 { return nil, errors.New("operationID is required") } t := taskqueue.NewPOSTTask("/tq/1/minecraft", url.Values{ "keyStr": {minecraftKey.Encode()}, "operationID": {operationID}, }) t.Delay = time.Second * 30 return taskqueue.Add(c, t, "minecraft") }
func (a *ServerTQApi) CallDeleteInstance(c context.Context, minecraftKey *datastore.Key, operationID string, latestSnapshot string) (*taskqueue.Task, error) { log.Infof(c, "Call Minecraft TQ, key = %v, operationID = %s", minecraftKey, operationID) if minecraftKey == nil { return nil, errors.New("key is required") } if len(operationID) < 1 { return nil, errors.New("operationID is required") } t := taskqueue.NewPOSTTask("/tq/1/server/instance/delete", url.Values{ "keyStr": {minecraftKey.Encode()}, "operationID": {operationID}, "latestSnapshot": {latestSnapshot}, }) t.Delay = time.Second * 30 return taskqueue.Add(c, t, "minecraft") }
// EncodeToken translates the key and raw secret of a newly generated token to // a form suitable for the client. func encodeToken(key *datastore.Key, raw *[tokenLength]byte) (string, error) { // Buffer size will be 8 (size of an int64) times the number of keys // in the hirarchy plus the length of the raw token itself. var b [len(kinds)*8 + tokenLength]byte for i := range kinds { if n := binary.PutVarint(b[i*8:(i+1)*8], key.IntID()); n < 8 { return "", errors.New("short write when encoding token") } if key != nil { key = key.Parent() } } copy(b[len(kinds)*8:len(kinds)*8+tokenLength], raw[:]) return hex.EncodeToString(b[:]), nil }