// ByIDs filters out Tasks that are not visible to the current User. func (s *TaskService) ByIDs(ctx context.Context, ids []string) ([]*Task, error) { span := trace.FromContext(ctx).NewChild("trythings.task.ByIDs") defer span.Finish() rootKey := datastore.NewKey(ctx, "Root", "root", 0, nil) ks := []*datastore.Key{} for _, id := range ids { ks = append(ks, datastore.NewKey(ctx, "Task", id, 0, rootKey)) } var allTasks = make([]*Task, len(ks)) err := datastore.GetMulti(ctx, ks, allTasks) if err != nil { return nil, err } ts := []*Task{} for _, t := range allTasks { // TODO#Perf: Batch the isVisible check. ok, err := s.IsVisible(ctx, t) if err != nil { return nil, err } if !ok { continue } ts = append(ts, t) } return ts, nil }
// 2015-11-06 to force field EditSummary (even if empty) on every IdiomHistory persisted entity. func resaveAllIdiomHistory(c context.Context) error { defer memcache.Flush(c) saved := 0 keys, err := datastore.NewQuery("IdiomHistory").KeysOnly().GetAll(c, nil) if err != nil { return err } nbEntities := len(keys) defer func() { log.Infof(c, "Resaved %d IdiomHistory entities out of %d.", saved, nbEntities) }() for len(keys) > 0 { bunch := 100 if len(keys) < bunch { bunch = len(keys) } histories := make([]*IdiomHistory, bunch) err := datastore.GetMulti(c, keys[:bunch], histories) if err != nil { return err } _, err = datastore.PutMulti(c, keys[:bunch], histories) if err != nil { return err } saved += bunch // Remove processed keys keys = keys[bunch:] } return nil }
// GetMulti wraps datastore.GetMulti func (d *Driver) GetMulti(key []*datastore.Key, dst interface{}) error { var keyLen = len(key) var fromIdx, toIdx int var v = reflect.ValueOf(dst) // TODO: split multiple goroutine for { fromIdx = toIdx toIdx = fromIdx + 999 if toIdx > keyLen { toIdx = keyLen } _keys := key[fromIdx:toIdx] _data := v.Slice(fromIdx, toIdx).Interface() d.logOps(opRead, len(_keys), "GetMulti") if err := datastore.GetMulti(d.ctx, _keys, _data); err != nil { return err } v1 := reflect.ValueOf(_data) for i := 0; i < toIdx-fromIdx; i++ { v.Index(fromIdx + i).Set(v1.Index(i)) } if toIdx == keyLen { break } } return nil }
// filterNextSessions queries kindNext entities and returns a subset of items // containing only the elements not present in the datastore, previously saved with // storeNextSessions(). func filterNextSessions(c context.Context, items []*eventSession) ([]*eventSession, error) { pkey := nextSessionParent(c) keys := make([]*datastore.Key, len(items)) for i, s := range items { id := s.ID + ":" + s.Update keys[i] = datastore.NewKey(c, kindNext, id, 0, pkey) } zeros := make([]struct{}, len(keys)) err := datastore.GetMulti(c, keys, zeros) merr, ok := err.(appengine.MultiError) if !ok && err != nil { return nil, err } res := make([]*eventSession, 0, len(keys)) for i, e := range merr { if e == nil { continue } if e != datastore.ErrNoSuchEntity { return nil, err } res = append(res, items[i]) } return res, nil }
func averageResulter(ctx context.Context, result model.KeyedResult, challenge model.Challenge) error { tasks := make([]model.Task, len(challenge.Tasks)) if err := datastore.GetMulti(ctx, challenge.Tasks, tasks); err != nil { return err } var user model.User if err := datastore.Get(ctx, result.Key.Parent().Parent(), &user); err != nil { return err } var nrOfComputations float64 average := model.Skills{} for i, task := range tasks { taskResult, err := Tasker(task.Tasker).Call(ctx, result, *task.Key(challenge.Tasks[i]), user, result.StartTimes[getTaskIndex(challenge, challenge.Tasks[i])]) if err != nil { // TODO: ignore error for now. We`ll treat it after we have all the taskers available //return err } else { average = average.Add(taskResult) nrOfComputations++ } } result.Skills = average.DivBy(nrOfComputations) result.Computed = time.Now() _, err := result.Put(ctx, result.Key) return err }
func (s *Store) LoadMagazines(keys []*datastore.Key, a []*Magazine) error { if err := datastore.GetMulti(s.c, keys, a); err != nil { return err } for _, m := range a { m.Init(s.c) } return nil }
func (s *Store) LoadUsers(keys []*datastore.Key, a []*User) error { if err := datastore.GetMulti(s.c, keys, a); err != nil { return err } for _, u := range a { u.Init(s.c) } return nil }
func (d rdsImpl) GetMulti(keys []ds.Key, _meta ds.MultiMetaGetter, cb ds.GetMultiCB) error { rkeys := dsMF2R(keys) vals := make([]datastore.PropertyLoadSaver, len(keys)) for i := range keys { vals[i] = &typeFilter{ds.PropertyMap{}} } err := datastore.GetMulti(d, rkeys, vals) return idxCallbacker(err, len(keys), func(idx int, err error) { cb(vals[idx].(*typeFilter).pm, err) }) }
func example() { // [START batch] // A batch put. _, err = datastore.PutMulti(ctx, []*datastore.Key{k1, k2, k3}, []interface{}{e1, e2, e3}) // A batch get. var entities = make([]*T, 3) err = datastore.GetMulti(ctx, []*datastore.Key{k1, k2, k3}, entities) // A batch delete. err = datastore.DeleteMulti(ctx, []*datastore.Key{k1, k2, k3}) // [END batch] _ = err }
func GetChallengesForProfile(ctx context.Context, w http.ResponseWriter, r *http.Request) (status int, err error) { if r.Method != "GET" { return http.StatusMethodNotAllowed, nil } _, ok := passenger.FromContext(ctx) if !ok { return http.StatusUnauthorized, nil } var profileKey *datastore.Key if profileKey, err = datastore.DecodeKey(mux.Vars(r)["key"]); err != nil { return http.StatusInternalServerError, err } q := model.NewQueryForResult(). Ancestor(profileKey) if finished := r.URL.Query()["finished"]; len(finished) > 0 && finished[0] == "true" { q = q.Filter("Finished >", time.Time{}) } if order := r.URL.Query()["order"]; len(order) > 0 && order[0] != "" { q = q.Order(order[0]) } if limitQuery := r.URL.Query()["limit"]; len(limitQuery) > 0 { if limit, err := strconv.Atoi(limitQuery[0]); err != nil { return http.StatusInternalServerError, err } else { q = q.Limit(limit) } } var results model.Results if _, err = q.GetAll(ctx, &results); err != nil { return http.StatusInternalServerError, err } challengeKeys := make([]*datastore.Key, len(results)) for i, val := range results { challengeKeys[i] = val.Challenge } challenges := make(model.Challenges, len(challengeKeys)) if err = datastore.GetMulti(ctx, challengeKeys, challenges); err != nil { return http.StatusInternalServerError, err } json.NewEncoder(w).Encode(challenges.Key(challengeKeys)) return }
func (d rdsImpl) GetMulti(keys []*ds.Key, _meta ds.MultiMetaGetter, cb ds.GetMultiCB) error { vals := make([]datastore.PropertyLoadSaver, len(keys)) rkeys, err := dsMF2R(d.aeCtx, keys) if err == nil { for i := range keys { vals[i] = &typeFilter{d.aeCtx, ds.PropertyMap{}} } err = datastore.GetMulti(d.aeCtx, rkeys, vals) } return idxCallbacker(err, len(keys), func(idx int, err error) { if pls := vals[idx]; pls != nil { cb(pls.(*typeFilter).pm, err) } else { cb(nil, err) } }) }
// Load reads the JSON representation of entities from the io.Reader "r", // and stores them in the Datastore using the given context.Context. // The Options parameter allows you to configure how the dump will work. // If there is any parsing erros, improper format, or datastore failures // during the process, that error is returned and processing stops. The // error may be returned after some entities were loaded: there is no // parsing cache. func Load(c context.Context, r io.Reader, o *Options) error { entities, err := DecodeEntities(c, r) if err != nil { return err } if len(entities) == 0 { log.Infof(c, "Skipping load of 0 entities") return nil } batchSize := o.BatchSize if batchSize <= 0 { batchSize = 50 } for start, end := 0, 0; start < len(entities); { end += batchSize if end > len(entities) { end = len(entities) } keys := make([]*datastore.Key, 0, end-start) values := make([]datastore.PropertyList, 0, cap(keys)) for _, e := range entities[start:end] { keys = append(keys, e.Key) values = append(values, e.Properties) } keys, err = datastore.PutMulti(c, keys, values) if err != nil { return err } log.Infof(c, "Loaded %d entities ...", len(keys)) if o.GetAfterPut { log.Infof(c, "Making a read to force consistency ...") l := make([]Entity, len(keys)) err := datastore.GetMulti(c, keys, l) if err != nil { return err } } start = end } return nil }
func adminMarkPaid(w http.ResponseWriter, r *http.Request) { c := appengine.NewContext(r) action := r.FormValue("action") keys := make([]*datastore.Key, 0, len(r.Form["pay"])) for _, s := range r.Form["pay"] { k, err := datastore.DecodeKey(s) if err != nil { panic(err) } keys = append(keys, k) } if action == "Mark Paid" { tasks := make([]LoggedTask, len(keys)) err := datastore.GetMulti(c, keys, tasks) if err != nil { panic(err) } now := time.Now().UTC() for i := range tasks { tasks[i].Paid = true tasks[i].PaidTime = now } _, err = datastore.PutMulti(c, keys, tasks) if err != nil { panic(err) } } else if action == "Delete" { err := datastore.DeleteMulti(c, keys) if err != nil { panic(err) } } else { panic("Unhandled action: " + action) } http.Redirect(w, r, "/admin/", 303) }
func executeIdiomTextSearchQuery(c context.Context, query string, limit int) ([]*Idiom, error) { // log.Infof(c, query) index, err := gaesearch.Open("idioms") if err != nil { return nil, err } if limit == 0 { // Limit is not optional. 0 means zero result. return nil, nil } idiomKeys := make([]*datastore.Key, 0, limit) // This is an *IDsOnly* search, where docID == Idiom.Id it := index.Search(c, query, &gaesearch.SearchOptions{ Limit: limit, IDsOnly: true, }) for { docID, err := it.Next(nil) if err == gaesearch.Done { break } if err != nil { return nil, err } idiomID, err := strconv.Atoi(docID) if err != nil { return nil, err } key := newIdiomKey(c, idiomID) idiomKeys = append(idiomKeys, key) } // Fetch Idioms in a []Idiom buffer := make([]Idiom, len(idiomKeys)) err = datastore.GetMulti(c, idiomKeys, buffer) // Convert []Idiom to []*Idiom idioms := make([]*Idiom, len(buffer)) for i := range buffer { // Do not take the address of the 2nd range variable, it would make a copy. // Better take the address in the existing buffer. idioms[i] = &buffer[i] } return idioms, err }
func (iter *LongBatchingIterator) NextWithErr() (*types.Complaint, error) { if iter.err != nil { return nil, iter.err } if len(iter.vals) == 0 && len(iter.keys) == 0 { return nil, nil // We're all done ! } // No new vals left in the cache; fetch some if len(iter.vals) == 0 { var keysForThisBatch []*datastore.Key if len(iter.keys) < iter.BatchSize { // Remaining keys not enough for a full page; grab all of 'em keysForThisBatch = iter.keys iter.keys = []*datastore.Key{} } else { keysForThisBatch = iter.keys[0:iter.BatchSize] iter.keys = iter.keys[iter.BatchSize:] } // Fetch the complaints for the keys in this batch complaints := make([]types.Complaint, len(keysForThisBatch)) if err := datastore.GetMulti(iter.Ctx, keysForThisBatch, complaints); err != nil { iter.err = err return nil, err } iter.vals = make([]*types.Complaint, len(keysForThisBatch)) for i, _ := range complaints { FixupComplaint(&complaints[i], keysForThisBatch[i]) iter.vals[i] = &complaints[i] } } // We have unreturned results in the cache; shift & return it complaint := iter.vals[0] iter.vals = iter.vals[1:] return complaint, nil }
func init() { RegisterResulter(Average, func(ctx context.Context, resultKey *datastore.Key) error { var result model.Result if err := datastore.Get(ctx, resultKey, &result); err != nil { return err } var challenge model.Challenge if err := datastore.Get(ctx, result.Challenge, &challenge); err != nil { return err } var tasks model.Tasks if err := datastore.GetMulti(ctx, challenge.Tasks, &tasks); err != nil { return err } weightSum := model.Skills{} // this could be SkillWeights, but would need more conversions average := model.Skills{} for i, task := range tasks { taskResult, err := Tasker(task.Tasker).Call(ctx, challenge.Tasks[i], resultKey.Parent().Parent()) if err != nil { return err } average = average.Add(taskResult.Mul(model.Skills(task.SkillWeights))) weightSum = weightSum.Add(model.Skills(task.SkillWeights)) } result.Skills = average.Div(weightSum) result.Computed = time.Now() _, err := result.Put(ctx, resultKey) return err }) }
func createSampleEntities(c context.Context, size int) error { buff := make([]Entity, 0, 10) keys := make([]*datastore.Key, 0, 10) for i := 1; i <= size; i++ { k := datastore.NewKey(c, "User", "", int64(i), nil) e := Entity{Key: k} e.Add(datastore.Property{Name: "Title", Value: lorem.Sentence(5, 10)}) e.Add(datastore.Property{ Name: "SubTitle", Value: lorem.Sentence(3, 5), NoIndex: true, }) e.Add(datastore.Property{ Name: "Description", Value: lorem.Paragraph(3, 5), NoIndex: true, }) e.Add(datastore.Property{Name: "Size", Value: int64(32)}) for j := 0; j < 5; j++ { e.Add(datastore.Property{ Name: "Tags", Value: lorem.Word(5, 10), Multiple: true, }) } e.Add(datastore.Property{Name: "Price", Value: float64(123.45)}) for j := 0; j < 10; j++ { e.Add(datastore.Property{ Name: "PriceHistory", Value: float64(123.45) - float64(j), Multiple: true, }) } e.Add(datastore.Property{Name: "Favicon", Value: icon, NoIndex: true}) e.Add(datastore.Property{Name: "FaviconSource", Value: blobKey}) for j := 1; j <= 3; j++ { e.Add(datastore.Property{ Name: "Friends", Value: datastore.NewKey(c, "Friend", "", int64(j), k), Multiple: true, }) } buff = append(buff, e) keys = append(keys, k) if len(buff) == 10 { _, err := datastore.PutMulti(c, keys, buff) if err != nil { return err } _ = datastore.GetMulti(c, keys, buff) buff = make([]Entity, 0, 10) keys = make([]*datastore.Key, 0, 10) } } if len(buff) > 0 { k, err := datastore.PutMulti(c, keys, buff) if err != nil { return err } _ = datastore.GetMulti(c, k, buff) } return nil }
// GetMulti officially supports less 1000 keys per a call. // internal func split keys to support +1000 func (d *Driver) getMulti(key []*datastore.Key, dst interface{}) error { d.logOps(opRead, len(key), "GetMulti") return datastore.GetMulti(d.ctx, key, dst) }
// GetMulti is a batch version of Get. // // dst must be a *[]S, *[]*S, *[]I, []S, []*S, or []I, for some struct type S, // or some interface type I. If *[]I or []I, each element must be a struct pointer. func (g *Goon) GetMulti(dst interface{}) error { keys, err := g.extractKeys(dst, false) // don't allow incomplete keys on a Get request if err != nil { return err } v := reflect.Indirect(reflect.ValueOf(dst)) if g.inTransaction { // todo: support getMultiLimit in transactions return datastore.GetMulti(g.Context, keys, v.Interface()) } var dskeys []*datastore.Key var dsdst []interface{} var dixs []int var memkeys []string var mixs []int g.cacheLock.RLock() for i, key := range keys { m := memkey(key) vi := v.Index(i) if vi.Kind() == reflect.Struct { vi = vi.Addr() } if s, present := g.cache[m]; present { if vi.Kind() == reflect.Interface { vi = vi.Elem() } reflect.Indirect(vi).Set(reflect.Indirect(reflect.ValueOf(s))) } else { memkeys = append(memkeys, m) mixs = append(mixs, i) dskeys = append(dskeys, key) dsdst = append(dsdst, vi.Interface()) dixs = append(dixs, i) } } g.cacheLock.RUnlock() if len(memkeys) == 0 { return nil } multiErr, any := make(appengine.MultiError, len(keys)), false tc, cf := context.WithTimeout(g.Context, MemcacheGetTimeout) memvalues, err := memcache.GetMulti(tc, memkeys) cf() if appengine.IsTimeoutError(err) { g.timeoutError(err) err = nil } else if err != nil { g.error(err) // timing out or another error from memcache isn't something to fail over, but do log it // No memvalues found, prepare the datastore fetch list already prepared above } else if len(memvalues) > 0 { // since memcache fetch was successful, reset the datastore fetch list and repopulate it dskeys = dskeys[:0] dsdst = dsdst[:0] dixs = dixs[:0] // we only want to check the returned map if there weren't any errors // unlike the datastore, memcache will return a smaller map with no error if some of the keys were missed for i, m := range memkeys { d := v.Index(mixs[i]).Interface() if v.Index(mixs[i]).Kind() == reflect.Struct { d = v.Index(mixs[i]).Addr().Interface() } if s, present := memvalues[m]; present { err := deserializeStruct(d, s.Value) if err == datastore.ErrNoSuchEntity { any = true // this flag tells GetMulti to return multiErr later multiErr[mixs[i]] = err } else if err != nil { g.error(err) return err } else { g.putMemory(d) } } else { dskeys = append(dskeys, keys[mixs[i]]) dsdst = append(dsdst, d) dixs = append(dixs, mixs[i]) } } if len(dskeys) == 0 { if any { return realError(multiErr) } return nil } } goroutines := (len(dskeys)-1)/getMultiLimit + 1 var wg sync.WaitGroup wg.Add(goroutines) for i := 0; i < goroutines; i++ { go func(i int) { defer wg.Done() var toCache []interface{} var exists []byte lo := i * getMultiLimit hi := (i + 1) * getMultiLimit if hi > len(dskeys) { hi = len(dskeys) } gmerr := datastore.GetMulti(g.Context, dskeys[lo:hi], dsdst[lo:hi]) if gmerr != nil { any = true // this flag tells GetMulti to return multiErr later merr, ok := gmerr.(appengine.MultiError) if !ok { g.error(gmerr) for j := lo; j < hi; j++ { multiErr[j] = gmerr } return } for i, idx := range dixs[lo:hi] { if merr[i] == nil { toCache = append(toCache, dsdst[lo+i]) exists = append(exists, 1) } else { if merr[i] == datastore.ErrNoSuchEntity { toCache = append(toCache, dsdst[lo+i]) exists = append(exists, 0) } multiErr[idx] = merr[i] } } } else { toCache = append(toCache, dsdst[lo:hi]...) exists = append(exists, bytes.Repeat([]byte{1}, hi-lo)...) } if len(toCache) > 0 { if err := g.putMemcache(toCache, exists); err != nil { g.error(err) // since putMemcache() gives no guarantee it will actually store the data in memcache // we log and swallow this error } } }(i) } wg.Wait() if any { return realError(multiErr) } return nil }
// searchIdiomsByWordsWithFavorites must return idioms that contain *all* the searched words. // If seeNonFavorite==false, it must only return idioms that have at least 1 implementation in 1 of the user favoriteLangs. // If seeNonFavorite==true, it must return the same list but extended with idioms that contain all the searched words but no implementation in a user favoriteLang. func (a *GaeDatastoreAccessor) searchIdiomsByWordsWithFavorites(c context.Context, typedWords, typedLangs []string, favoriteLangs []string, seeNonFavorite bool, limit int) ([]*Idiom, error) { terms := append(append([]string(nil), typedWords...), typedLangs...) var retrievers []retriever idiomKeyStrings := make([]string, 0, limit) seenIdiomKeyStrings := make(map[string]bool, limit) var idiomQueryRetriever = func(q string) retriever { return func() ([]string, error) { return executeIdiomKeyTextSearchQuery(c, q, limit) } } if len(typedLangs) == 1 { // Exactly 1 term is a lang: assume user really wants this lang lang := typedLangs[0] log.Debugf(c, "User is looking for results in [%v]", lang) // 1) Impls in lang, containing all words implRetriever := func() ([]string, error) { var keystrings []string implQuery := "Bulk:(~" + strings.Join(terms, " AND ~") + ") AND Lang:" + lang implIdiomIDs, _, err := executeImplTextSearchQuery(c, implQuery, limit) if err != nil { return nil, err } for _, idiomID := range implIdiomIDs { idiomKey := newIdiomKey(c, idiomID) idiomKeyString := idiomKey.Encode() keystrings = append(keystrings, idiomKeyString) } return keystrings, nil } retrievers = []retriever{ // 1) Idioms with words in title, having an impl in lang idiomQueryRetriever("TitleWords:(~" + strings.Join(typedWords, " AND ~") + ") AND Langs:(" + lang + ")"), // 2) Implementations in lang, containing all terms implRetriever, // 3) Idioms with words in lead paragraph (or title), having an impl in lang idiomQueryRetriever("TitleOrLeadWords:(~" + strings.Join(typedWords, " AND ~") + ") AND Langs:(" + lang + ")"), // 4) Just all the terms idiomQueryRetriever("Bulk:(~" + strings.Join(terms, " AND ~") + ")"), } } else { // Either 0 or many langs. Just make sure all terms are respected. retrievers = append(retrievers, // 1) Words in idiom title, having all the langs implemented idiomQueryRetriever("TitleWords:(~"+strings.Join(typedWords, " AND ~")+") AND Bulk:(~"+strings.Join(terms, " AND ~")+")"), // 2) Words in idiom lead paragraph (or title), having all the langs implemented idiomQueryRetriever("TitleOrLeadWords:(~"+strings.Join(typedWords, " AND ~")+") AND Bulk:(~"+strings.Join(terms, " AND ~")+")"), // 3) Terms (words and langs) somewhere in idiom idiomQueryRetriever("Bulk:(~"+strings.Join(terms, " AND ~")+")"), ) } // Each retriever will send 1 slice in 1 channel. So we can harvest them in right order. promises := make([]chan []string, len(retrievers)) for i := range retrievers { retriever := retrievers[i] promises[i] = make(chan []string, 1) ch := promises[i] go func() { keyStrings, err := retriever() if err != nil { log.Errorf(c, "problem fetching search results: %v", err) ch <- nil } else { ch <- keyStrings } close(ch) }() } harvestloop: for _, promise := range promises { kstrChunk := <-promise m := 0 dupes := 0 for _, kstr := range kstrChunk { if seenIdiomKeyStrings[kstr] { dupes++ } else { m++ idiomKeyStrings = append(idiomKeyStrings, kstr) seenIdiomKeyStrings[kstr] = true if len(idiomKeyStrings) == limit { log.Debugf(c, "%d new results, %d dupes, stopping here.", m, dupes) break harvestloop } } } log.Debugf(c, "%d new results, %d dupes.", m, dupes) } // TODO use favoriteLangs // TODO use seeNonFavorite (or not) var err error idiomKeys := make([]*datastore.Key, len(idiomKeyStrings)) for i, kstr := range idiomKeyStrings { idiomKeys[i], err = datastore.DecodeKey(kstr) if err != nil { return nil, err } } // Fetch Idioms in a []Idiom buffer := make([]Idiom, len(idiomKeys)) err = datastore.GetMulti(c, idiomKeys, buffer) // Convert []Idiom to []*Idiom idioms := make([]*Idiom, len(buffer)) for i := range buffer { // Do not take the address of the 2nd range variable, it would make a copy. // Better take the address in the existing buffer. idioms[i] = &buffer[i] } return idioms, err }
func serveComplete(w http.ResponseWriter, r *http.Request) { c := appengine.NewContext(r) u := user.Current(c) su, err := getUser(c, u) if err != nil { w.WriteHeader(400) fmt.Fprintf(w, "You are not permitted, %v", u) return } if err := r.ParseForm(); err != nil { http.Error(w, err.Error(), 400) fmt.Fprintf(w, "Can't parse form from %v", u) return } taskIds := []*datastore.Key{} for _, s := range r.Form["task"] { k, err := datastore.DecodeKey(s) if err != nil { panic(err) } taskIds = append(taskIds, k) } log.Infof(c, "Doing tasks for %v: %v", su, taskIds) tasks := make([]Task, len(taskIds)) err = datastore.GetMulti(c, taskIds, tasks) if err != nil { panic(err) } now := time.Now() storeKeys := make([]*datastore.Key, 0, 2*len(taskIds)) vals := []interface{}{} for i := range tasks { if tasks[i].Next.Before(now) { tasks[i].updateTime() tasks[i].Prev = now storeKeys = append(storeKeys, taskIds[i]) vals = append(vals, &tasks[i]) storeKeys = append(storeKeys, datastore.NewIncompleteKey(c, "LoggedTask", nil)) vals = append(vals, &LoggedTask{ Task: taskIds[i], User: su.Key, Completed: now, Who: su.Name, Name: tasks[i].Name, Amount: tasks[i].Value, }) } } log.Infof(c, "Putting %#v in %v", vals, storeKeys) _, err = datastore.PutMulti(c, storeKeys, vals) if err != nil { http.Error(w, err.Error(), 500) log.Errorf(c, "Error saving stuff: %v", err) return } http.Redirect(w, r, "/", http.StatusTemporaryRedirect) }