func ktoi(key *datastore.Key) id { return id{ kind: key.Kind(), stringID: key.StringID(), intID: key.IntID(), appID: key.AppID(), namespace: key.Namespace(), } }
func buildKey(req *wcg.Request, key *datastore.Key) *datastore.Key { kind := entities.FindKind(key.Kind(), key.Namespace()) if kind == nil { return nil } if key.Parent() != nil { return kind.NewKey(req, key.StringID(), buildKey(req, key.Parent())) } return kind.NewKey(req, key.StringID(), nil) }
func buildDatastoreKey(key *datastore.Key) (map[string]bigquery.JsonValue, error) { if key == nil { return map[string]bigquery.JsonValue{ "namespace": "", "app": "", "path": "", "kind": "", "name": "", "id": 0, }, nil } var workKey = key var keys []*datastore.Key keys = append(keys, key) for { if workKey.Parent() == nil { break } keys = append(keys, workKey.Parent()) workKey = workKey.Parent() } var buf bytes.Buffer for i := len(keys) - 1; i >= 0; i-- { if buf.Len() > 0 { _, err := buf.WriteString(", ") if err != nil { return map[string]bigquery.JsonValue{}, nil } } key := keys[i] if len(key.StringID()) < 1 { _, err := buf.WriteString(fmt.Sprintf(`"%s", "%s"`, keys[i].Kind(), keys[i].IntID())) if err != nil { return map[string]bigquery.JsonValue{}, nil } } else { _, err := buf.WriteString(fmt.Sprintf(`"%s", "%s"`, keys[i].Kind(), keys[i].StringID())) if err != nil { return map[string]bigquery.JsonValue{}, nil } } } return map[string]bigquery.JsonValue{ "namespace": key.Namespace(), "app": key.AppID(), "path": buf.String(), "kind": key.Kind(), "name": key.StringID(), "id": key.IntID(), }, nil }
func nameObject(key *datastore.Key) string { name := "" for key != nil { id := key.StringID() if id == "" { id = strconv.FormatInt(key.IntID(), 10) } name = "/" + key.Kind() + "/" + id + name key = key.Parent() } // NOTE: The name of a GCS object must not be prefixed "/", // this will give you a major headache. return name[1:] }
func syncCrawlerSettings(req *wcg.Request, key *datastore.Key, artistPageInfo *crawlers.ArtistPageInfo) { if len(artistPageInfo.CrawlableURLs) == 0 { return } var urlKeys = make([]string, len(artistPageInfo.CrawlableURLs)) for i, u := range artistPageInfo.CrawlableURLs { urlKeys[i] = u.Link } _, _settingsList := entities.CrawlerSettings.GetMulti().Keys(urlKeys...).UseDefaultIfNil(true).MustList(req) settingsList := _settingsList.([]*hplink.CrawlerSettings) for i, u := range artistPageInfo.CrawlableURLs { settingsList[i].ArtistKey = key.StringID() settingsList[i].Type = u.Type } _, err := entities.CrawlerSettings.PutMulti().Keys(urlKeys...).Update(req, settingsList) if err != nil { req.Logger.Errorf("Could not update CrawlerSettings: %v", err) } }
// cmpKey compares k and other, returning -1, 0 or 1 if k is // less than, equal or grather than other. The algorithm doesn't // takes into account any ancestors in the two keys. The order // of comparision is AppID, Kind, IntID and StringID. Keys with // integer identifiers are smaller than string identifiers. func cmpKey(k, other *datastore.Key) int { if k == other { return 0 } if r := cmpStr(k.AppID(), other.AppID()); r != 0 { return r } if r := cmpStr(k.Kind(), other.Kind()); r != 0 { return r } if k.IntID() != 0 { if other.IntID() == 0 { return -1 } return cmpInt(k.IntID(), other.IntID()) } if other.IntID() != 0 { return 1 } return cmpStr(k.StringID(), other.StringID()) }
// dsR2F (DS real-to-fake) converts an SDK Key to a ds.Key func dsR2F(k *datastore.Key) *ds.Key { if k == nil { return nil } aid := k.AppID() ns := k.Namespace() count := 0 for nk := k; nk != nil; nk = nk.Parent() { count++ } toks := make([]ds.KeyTok, count) for ; k != nil; k = k.Parent() { count-- toks[count].Kind = k.Kind() toks[count].StringID = k.StringID() toks[count].IntID = k.IntID() } return ds.NewKeyToks(aid, ns, toks) }
func (g *Goon) setStructKey(src interface{}, key *datastore.Key) error { v := reflect.ValueOf(src) t := v.Type() k := t.Kind() if k != reflect.Ptr { return fmt.Errorf("goon: Expected pointer to struct, got instead: %v", k) } v = reflect.Indirect(v) t = v.Type() k = t.Kind() if k != reflect.Struct { return fmt.Errorf(fmt.Sprintf("goon: Expected struct, got instead: %v", k)) } idSet := false kindSet := false parentSet := false for i := 0; i < v.NumField(); i++ { tf := t.Field(i) vf := v.Field(i) if !vf.CanSet() { continue } tag := tf.Tag.Get("goon") tagValues := strings.Split(tag, ",") if len(tagValues) > 0 { tagValue := tagValues[0] if tagValue == "id" { if idSet { return fmt.Errorf("goon: Only one field may be marked id") } switch vf.Kind() { case reflect.Int64: vf.SetInt(key.IntID()) idSet = true case reflect.String: vf.SetString(key.StringID()) idSet = true } } else if tagValue == "kind" { if kindSet { return fmt.Errorf("goon: Only one field may be marked kind") } if vf.Kind() == reflect.String { if (len(tagValues) <= 1 || key.Kind() != tagValues[1]) && g.KindNameResolver(src) != key.Kind() { vf.Set(reflect.ValueOf(key.Kind())) } kindSet = true } } else if tagValue == "parent" { if parentSet { return fmt.Errorf("goon: Only one field may be marked parent") } dskeyType := reflect.TypeOf(&datastore.Key{}) vfType := vf.Type() if vfType.ConvertibleTo(dskeyType) { vf.Set(reflect.ValueOf(key.Parent()).Convert(vfType)) parentSet = true } } } } if !idSet { return fmt.Errorf("goon: Could not set id field") } return nil }
func _syncMemberProfiles(req *wcg.Request, key *datastore.Key, artist *hplink.Artist) ([]*datastore.Key, []*hplink.Member, error) { // // Crawling pages. // req.Logger.Infof("Importing the member list from the artist page ...") artistPageInfo, err := crawlers.NewArtist(urlfetch.NewHTTPClient(req), artist.Key).Run() if err != nil { return nil, nil, err } keys := make([]string, len(artistPageInfo.Members)) memberPageInfoList := make([]*crawlers.MemberPageInfo, len(artistPageInfo.Members)) for i, member := range artistPageInfo.Members { req.Logger.Infof("Importing member details") result, err := crawlers.NewMember(urlfetch.NewHTTPClient(req), artist.Key, member.Key).Run() if err != nil { return nil, nil, err } keys[i] = fmt.Sprintf("%s.%s", artist.Key, member.Key) memberPageInfoList[i] = result } // optional task to update crawler settings var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() syncCrawlerSettings(req, key, artistPageInfo) }() defer wg.Wait() // // Update Datastore // // Check existing Member entities to merge with the crawling result. memberKeys, _members := entities.Member.GetMulti().Keys(keys...).UseDefaultIfNil(true).MustList(req) members := _members.([]*hplink.Member) // MemberPublicProfile can always be overwritten profiles := make([]*hplink.MemberPublicProfile, len(members)) for i := range profiles { profiles[i] = &hplink.MemberPublicProfile{} } for i, member := range artistPageInfo.Members { members[i].Key = keys[i] members[i].ShortKey = member.Key members[i].ArtistKey = artist.Key members[i].Name = member.Name members[i].Index = member.Index members[i].Birthday = memberPageInfoList[i].Birthday members[i].Joinday = memberPageInfoList[i].Joinday members[i].Images = make([]models.Image, len(memberPageInfoList[i].ImageURLs)) for j, u := range memberPageInfoList[i].ImageURLs { // TODO: integrate blob service members[i].Images[j] = models.Image{ URL: u, } } profiles[i].Key = keys[i] profiles[i].Nicknames = memberPageInfoList[i].Nicknames profiles[i].BloodType = memberPageInfoList[i].BloodType profiles[i].Hometown = memberPageInfoList[i].Hometown profiles[i].Skills = memberPageInfoList[i].Skills profiles[i].Hobbies = memberPageInfoList[i].Hobbies profiles[i].MusicGenres = memberPageInfoList[i].MusicGenres profiles[i].Sports = memberPageInfoList[i].Sports profiles[i].Motto = memberPageInfoList[i].Motto } // cache images if cacher, err := cacheutil.NewImageCacher(req, entities.ImageCache); err == nil { for _, m := range members { for i := range m.Images { cache, err := cacher.Cache(m.Images[i].URL) if err != nil { req.Logger.Warnf("Image cache failed: %v", err) } else { m.Images[i] = *cache.ToImage() } } } } entities.Member.PutMulti().DatastoreKeys(memberKeys...).Cache( fmt.Sprintf(ckAllMembersTemplate, artist.Key), ).MustUpdate(req, members) profileKeys := make([]*datastore.Key, len(memberKeys)) for i, key := range memberKeys { profileKeys[i] = entities.MemberPublicProfile.NewKey(req, key.StringID(), nil) } entities.MemberPublicProfile.PutMulti().DatastoreKeys(profileKeys...).Cache( fmt.Sprintf(ckAllMembersTemplate, artist.Key), ).MustUpdate(req, profiles) return memberKeys, members, nil }
func (it *Iterator) Next() bool { if it.offset+1 < len(it.buffer) { it.offset++ it.result = &Token{Kind: it.kind, Hash: it.buffer[it.offset]} return true } if it.done { return false } // Reset buffer and offset it.offset = 0 it.buffer = make([]string, 0, bufferSize) // Create query // TODO (panamafrancis) Keys only query? q := datastore.NewQuery(it.kind).Limit(bufferSize) if !it.isAll { // Filter on the direction {subject,objekt...} q = q.Filter(it.dir.String()+" =", it.name) } // Get last cursor position cursor, err := datastore.DecodeCursor(it.last) if err == nil { q = q.Start(cursor) } // Buffer the keys of the next 50 matches t := q.Run(it.qs.context) for { // Quirk of the datastore, you cannot pass a nil value to to Next() // even if you just want the keys var k *datastore.Key skip := false if it.kind == quadKind { temp := new(QuadEntry) k, err = t.Next(temp) // Skip if quad has been deleted if len(temp.Added) <= len(temp.Deleted) { skip = true } } else { temp := new(NodeEntry) k, err = t.Next(temp) // Skip if node has been deleted if temp.Size == 0 { skip = true } } if err == datastore.Done { it.done = true break } if err != nil { clog.Errorf("Error fetching next entry %v", err) it.err = err return false } if !skip { it.buffer = append(it.buffer, k.StringID()) } } // Save cursor position cursor, err = t.Cursor() if err == nil { it.last = cursor.String() } // Protect against bad queries if it.done && len(it.buffer) == 0 { clog.Warningf("Query did not return any results") return false } // First result it.result = &Token{Kind: it.kind, Hash: it.buffer[it.offset]} return true }
func populateDeviceId(k *datastore.Key, d *Device) { d.DeviceId = k.StringID() }