// ProfileMetrics builds metrics about profiles func ProfileMetrics(start time.Time) (created int, edited int, total int, err error) { db, err := h.GetConnection() if err != nil { glog.Errorf("ProfileMetrics: %s", err.Error()) return } err = db.QueryRow(`SELECT COUNT(*) FROM profiles`).Scan(&total) if err != nil { return } err = db.QueryRow(`SELECT COUNT(*) FROM profiles WHERE created >= $1`, start, ).Scan( &created, ) if err != nil { return } err = db.QueryRow(` SELECT COUNT(*) FROM profiles WHERE profile_name !~ 'user*' AND avatar_id IS NOT NULL`, ).Scan( &edited, ) return }
// GetAttendeeID returns the attendee id of a profile func GetAttendeeID(eventID int64, profileID int64) (int64, int, error) { // Open db connection and retrieve resource db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return 0, http.StatusInternalServerError, err } var attendeeID int64 err = db.QueryRow(` SELECT attendee_id FROM attendees WHERE event_id = $1 AND profile_id = $2`, eventID, profileID, ).Scan( &attendeeID, ) if err == sql.ErrNoRows { return 0, http.StatusNotFound, fmt.Errorf("attendee not found") } else if err != nil { glog.Errorf("db.QueryRow(%d, %d) %+v", eventID, profileID, err) return 0, http.StatusInternalServerError, fmt.Errorf("Database query failed") } return attendeeID, http.StatusOK, nil }
// GetHuddleTitle returns the title of the huddle func GetHuddleTitle(id int64) string { // Get from cache if it's available mcKey := fmt.Sprintf(mcHuddleKeys[c.CacheTitle], id) if val, ok := c.GetString(mcKey); ok { return val } // Retrieve resource db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConmection() %+v", err) return "" } var title string err = db.QueryRow(` SELECT title FROM huddles WHERE huddle_id = $1`, id, ).Scan( &title, ) if err != nil { glog.Errorf("row.Scan() %+v", err) return "" } // Update cache c.SetString(mcKey, title, mcTTL) return title }
// GetMicrocosmIDForItem provides a cheap way to fetch an id for an item func GetMicrocosmIDForItem(itemTypeID int64, itemID int64) int64 { db, err := h.GetConnection() if err != nil { glog.Error(err) return 0 } var microcosmID int64 err = db.QueryRow(`--GetMicrocosmIdForItem SELECT microcosm_id FROM flags WHERE item_type_id = $1 AND item_id = $2`, itemTypeID, itemID, ).Scan( µcosmID, ) if err != nil { glog.Error(err) return 0 } return microcosmID }
// GetMenu returns a menu for a site func GetMenu(siteID int64) ([]h.LinkType, int, error) { db, err := h.GetConnection() if err != nil { return []h.LinkType{}, http.StatusInternalServerError, err } rows, err := db.Query(` SELECT href ,title ,"text" FROM menus WHERE site_id = $1 ORDER BY sequence ASC`, siteID, ) if err != nil { glog.Errorf("tx.Query(%d) %+v", siteID, err) return []h.LinkType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed") } defer rows.Close() ems := []h.LinkType{} for rows.Next() { m := h.LinkType{} s := sql.NullString{} err = rows.Scan( &m.Href, &s, &m.Text, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return []h.LinkType{}, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } if s.Valid { m.Title = s.String } ems = append(ems, m) } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return []h.LinkType{}, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() return ems, http.StatusOK, nil }
// GetProfileID fetches a profileID given a userID func GetProfileID(siteID int64, userID int64) (int64, int, error) { if siteID == 0 || userID == 0 { return 0, http.StatusOK, nil } // Get from cache if it's available // // This map of siteId+userId = profileId is never expected to change, so // this cache key is unique and does not conform to the cache flushing // mechanism mcKey := fmt.Sprintf("s%d_u%d", siteID, userID) if val, ok := c.GetInt64(mcKey); ok { return val, http.StatusOK, nil } var profileID int64 db, err := h.GetConnection() if err != nil { glog.Error(err) return profileID, http.StatusInternalServerError, err } err = db.QueryRow(`--GetProfileId SELECT profile_id FROM profiles WHERE site_id = $1 AND user_id = $2`, siteID, userID, ).Scan( &profileID, ) if err == sql.ErrNoRows { glog.Warning(err) return profileID, http.StatusNotFound, fmt.Errorf( "Profile for site (%d) and user (%d) not found.", siteID, userID, ) } else if err != nil { glog.Error(err) return profileID, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } c.SetInt64(mcKey, profileID, mcTTL) return profileID, http.StatusOK, nil }
// GetProfileOptionsDefaults returns the default options for this site func GetProfileOptionsDefaults(siteID int64) (ProfileOptionType, int, error) { db, err := h.GetConnection() if err != nil { return ProfileOptionType{}, http.StatusInternalServerError, err } rows, err := db.Query(` SELECT COALESCE(s.send_email, p.send_email) AS send_email ,COALESCE(s.send_sms, p.send_sms) AS send_sms FROM platform_options p LEFT JOIN ( SELECT send_email ,send_sms FROM site_options WHERE site_id = $1 ) s ON 1=1`, siteID, ) if err != nil { return ProfileOptionType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } defer rows.Close() var m ProfileOptionType for rows.Next() { m = ProfileOptionType{} err = rows.Scan( &m.SendEMail, &m.SendSMS, ) if err != nil { return ProfileOptionType{}, http.StatusInternalServerError, fmt.Errorf("Row parsing error: %v", err.Error()) } } err = rows.Err() if err != nil { return ProfileOptionType{}, http.StatusInternalServerError, fmt.Errorf("Error fetching rows: %v", err.Error()) } rows.Close() m.IsDiscouraged = false m.ShowDOB = false m.ShowDOBYear = false return m, http.StatusOK, nil }
// GetProfileOptions returns the options for a profile func GetProfileOptions(profileID int64) (ProfileOptionType, int, error) { // Get from cache if it's available mcKey := fmt.Sprintf(mcProfileKeys[c.CacheOptions], profileID) if val, ok := c.Get(mcKey, ProfileOptionType{}); ok { m := val.(ProfileOptionType) return m, http.StatusOK, nil } db, err := h.GetConnection() if err != nil { return ProfileOptionType{}, http.StatusInternalServerError, err } var m ProfileOptionType err = db.QueryRow(` SELECT profile_id ,show_dob_date ,show_dob_year ,send_email ,send_sms ,is_discouraged FROM profile_options WHERE profile_id = $1`, profileID, ).Scan( &m.ProfileID, &m.ShowDOB, &m.ShowDOBYear, &m.SendEMail, &m.SendSMS, &m.IsDiscouraged, ) if err == sql.ErrNoRows { return ProfileOptionType{}, http.StatusNotFound, fmt.Errorf("Resource with profile ID %d not found", profileID) } else if err != nil { return ProfileOptionType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } // Update cache c.Set(mcKey, m, mcTTL) return m, http.StatusOK, nil }
// GetUpdateOptionByUpdateType returns the notification settings (email, sms) // for a given user and update type. func GetUpdateOptionByUpdateType( profileID int64, updateTypeID int64, ) ( UpdateOptionType, int, error, ) { db, err := h.GetConnection() if err != nil { return UpdateOptionType{}, http.StatusInternalServerError, err } var m UpdateOptionType err = db.QueryRow(` SELECT uo.profile_id ,uo.update_type_id ,ut.description ,uo.send_email ,uo.send_sms FROM update_options uo LEFT JOIN update_types ut ON uo.update_type_id = ut.update_type_id WHERE uo.profile_id = $1 AND uo.update_type_id = $2`, profileID, updateTypeID, ).Scan( &m.ProfileID, &m.UpdateTypeID, &m.Description, &m.SendEmail, &m.SendSMS, ) if err == sql.ErrNoRows { return UpdateOptionType{}, http.StatusNotFound, fmt.Errorf("Update options for profile ID %d not found", profileID) } else if err != nil { return UpdateOptionType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } return m, http.StatusOK, nil }
// IsAttending indicates whether the given profile is attending the event func IsAttending(profileID int64, eventID int64) (bool, error) { if profileID == 0 || eventID == 0 { return false, nil } var attendeeIDs []int64 key := fmt.Sprintf(mcEventKeys[c.CacheProfileIds], eventID) attendeeIDs, ok := c.GetInt64Slice(key) if !ok { db, err := h.GetConnection() if err != nil { return false, err } rows, err := db.Query(` SELECT profile_id FROM attendees WHERE event_id = $1 AND state_id = 1`, eventID, ) if err != nil { return false, err } for rows.Next() { var attendeeID int64 err = rows.Scan(&attendeeID) attendeeIDs = append(attendeeIDs, attendeeID) } c.SetInt64Slice(key, attendeeIDs, mcTTL) } for _, id := range attendeeIDs { if profileID == id { return true, nil } } return false, nil }
// FetchSummaries populates a partially populated microcosm struct func (m *MicrocosmType) FetchSummaries( siteID int64, profileID int64, ) ( int, error, ) { profile, status, err := GetProfileSummary(siteID, m.Meta.CreatedByID) if err != nil { return status, err } m.Meta.CreatedBy = profile if m.Meta.EditedByNullable.Valid { profile, status, err := GetProfileSummary(siteID, m.Meta.EditedByNullable.Int64) if err != nil { return status, err } m.Meta.EditedBy = profile } db, err := h.GetConnection() if err != nil { return http.StatusInternalServerError, err } var unread bool err = db.QueryRow( `SELECT has_unread(2, $1, $2)`, m.ID, profileID, ).Scan( &unread, ) if err != nil { glog.Errorf("db.QueryRow() %+v", err) return http.StatusInternalServerError, err } m.Meta.Flags.Unread = unread return http.StatusOK, nil }
// IsBanned returns true if the user is banned for the given site func IsBanned(siteID int64, userID int64) bool { if siteID == 0 || userID == 0 { return false } // Get from cache if it's available // // This map of siteID+userID = profileId is never expected to change, so // this cache key is unique and does not conform to the cache flushing // mechanism mcKey := fmt.Sprintf(banCacheKey, siteID, userID) if val, ok := c.GetBool(mcKey); ok { return val } var isBanned bool db, err := h.GetConnection() if err != nil { return false } err = db.QueryRow(`--IsBanned SELECT EXISTS( SELECT 1 FROM bans WHERE site_id = $1 AND user_id = $2 )`, siteID, userID, ).Scan( &isBanned, ) if err == sql.ErrNoRows { return false } else if err != nil { return false } c.SetBool(mcKey, isBanned, mcTTL) return isBanned }
// UserGenMetrics builds metrics about user activity func UserGenMetrics( start time.Time, ) ( signins int, comments int, convs int, err error, ) { db, err := h.GetConnection() if err != nil { glog.Errorf("UserGenMetrics: %s", err.Error()) return } err = db.QueryRow( `SELECT COUNT(*) FROM profiles where profiles.last_active > $1`, start, ).Scan( &signins, ) if err != nil { return } err = db.QueryRow( `SELECT COUNT(*) FROM comments WHERE created >= $1`, start, ).Scan( &comments, ) if err != nil { return } err = db.QueryRow( `SELECT COUNT(*) FROM conversations WHERE created >= $1`, start, ).Scan( &convs, ) return }
// UpdateLastActive marks a profile as being active func UpdateLastActive(profileID int64, lastActive time.Time) (int, error) { db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return http.StatusInternalServerError, fmt.Errorf("Could not get a database connection: %v", err.Error()) } tx, err := db.Begin() if err != nil { return http.StatusInternalServerError, fmt.Errorf("Could not start transaction: %v", err.Error()) } defer tx.Rollback() _, err = tx.Exec(`--UpdateLastActive UPDATE profiles SET last_active = $2 WHERE profile_id = $1;`, profileID, lastActive, ) if err != nil { nerr := tx.Rollback() if nerr != nil { glog.Errorf("Cannot rollback: %+v", nerr) } return http.StatusInternalServerError, fmt.Errorf("Update of last active failed: %v", err.Error()) } err = tx.Commit() if err != nil { return http.StatusInternalServerError, fmt.Errorf("Transaction failed: %v", err.Error()) } PurgeCacheByScope(c.CacheDetail, h.ItemTypes[h.ItemTypeProfile], profileID) return http.StatusOK, nil }
// GetUnreadHuddleCount fetches the current unread huddle count func (m *ProfileType) GetUnreadHuddleCount() (int, error) { // Get from cache if it's available mcKey := fmt.Sprintf(mcProfileKeys[c.CacheCounts], m.ID) if i, ok := c.GetInt64(mcKey); ok { m.Meta.Stats = append( m.Meta.Stats, h.StatType{Metric: "unreadHuddles", Value: i}, ) return http.StatusOK, nil } db, err := h.GetConnection() if err != nil { return http.StatusInternalServerError, err } var unreadHuddles int64 err = db.QueryRow(`--GetUnreadHuddleCount SELECT unread_huddles FROM profiles WHERE profile_id = $1`, m.ID, ).Scan( &unreadHuddles, ) if err != nil { return http.StatusInternalServerError, fmt.Errorf("Error fetching row: %v", err.Error()) } m.Meta.Stats = append( m.Meta.Stats, h.StatType{Metric: "unreadHuddles", Value: unreadHuddles}, ) c.SetInt64(mcKey, unreadHuddles, mcTTL) return http.StatusOK, nil }
// GetUserByEmailAddress performs a case-insensitive search for any matching // user and returns it. func GetUserByEmailAddress(email string) (UserType, int, error) { if strings.Trim(email, " ") == "" { return UserType{}, http.StatusBadRequest, fmt.Errorf("You must specify an email address") } db, err := h.GetConnection() if err != nil { return UserType{}, http.StatusInternalServerError, err } // Note that we match emails based on full case-insensitivity. // The design decision behind this is that there are no major email // providers out there that honour case sensitivity on the local part // (before the @) of an email address, and that the benefits to the // end user that incorrectly enters their email address (either with // CAPS LOCK on, or using a mobile device that upper-cased the first // char) far outweighs the risk to security. // // This scenario is far more likely when the users email has been // provided and the user stubbed rather than created as a by-product // of logging into the system var m UserType err = db.QueryRow(` SELECT user_id FROM users WHERE LOWER(email) = LOWER($1)`, email, ).Scan( &m.ID, ) if err == sql.ErrNoRows { return UserType{}, http.StatusNotFound, fmt.Errorf("Resource with email %v not found", email) } else if err != nil { return UserType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed: %+v", err) } return GetUser(m.ID) }
// UserIsOnSite returns true if the given userId exists as a profile on the // given site. func UserIsOnSite(userID int64, siteID int64) bool { db, err := h.GetConnection() if err != nil { return false } var val bool err = db.QueryRow(`--UserIsOnSite SELECT COUNT(*) > 0 FROM profiles WHERE site_id = $1 AND user_id = $2`, siteID, userID, ).Scan(&val) if err != nil { return false } return val }
// DecrementProfileCommentCount decrements the comment count func DecrementProfileCommentCount(profileID int64) { db, err := h.GetConnection() if err != nil { glog.Error(err) return } _, err = db.Exec(`--Update Profile Comment Count UPDATE profiles SET comment_count = comment_count - 1 WHERE profile_id = $1`, profileID, ) if err != nil { glog.Error(err) return } PurgeCacheByScope(c.CacheDetail, h.ItemTypes[h.ItemTypeProfile], profileID) }
// ForumMetrics builds metrics about sites func ForumMetrics() (total int, engaged int, err error) { db, err := h.GetConnection() if err != nil { glog.Errorf("ForumMetrics: %s", err.Error()) return } err = db.QueryRow(`SELECT COUNT(*) FROM sites`).Scan(&total) if err != nil { return } err = db.QueryRow(` SELECT COUNT(*) FROM ( SELECT s.site_id FROM sites s JOIN microcosms m ON s.site_id = m.site_id JOIN conversations c ON m.microcosm_id = c.microcosm_id JOIN comments cm ON c.conversation_id = cm.item_id UNION SELECT s.site_id FROM sites s JOIN microcosms m ON s.site_id = m.site_id JOIN events e ON m.microcosm_id = e.microcosm_id JOIN comments cm ON e.event_id = cm.item_id WHERE logo_url !~ $1 AND ( SELECT COUNT(*) FROM profiles p WHERE p.site_id = s.site_id ) > 1 GROUP BY s.site_id ) AS t`, defaultLogoURL, ).Scan( &engaged, ) return }
// IncrementViewCount increments the views of an item func IncrementViewCount(itemTypeID int64, itemID int64) { // No transaction as we don't care for accuracy on these updates // Note: This function doesn't even return errors, we don't even care // if the occasional INSERT fails. db, err := h.GetConnection() if err != nil { glog.Error(err) return } // Integrity insert, gets rolled up and updated on cron _, err = db.Exec( `INSERT INTO views(item_type_id, item_id) VALUES ($1, $2)`, itemTypeID, itemID, ) if err != nil { glog.Error(err) return } }
// GetAttributeID fetches the id of an attribute func GetAttributeID( itemTypeID int64, itemID int64, key string, ) ( int64, int, error, ) { db, err := h.GetConnection() if err != nil { return 0, http.StatusInternalServerError, err } var attrID int64 err = db.QueryRow(` SELECT attribute_id FROM attribute_keys WHERE item_type_id = $1 AND item_id = $2 AND key = $3 `, itemTypeID, itemID, key, ).Scan( &attrID, ) if err == sql.ErrNoRows { return attrID, http.StatusNotFound, fmt.Errorf("Attribute not found.") } else if err != nil { return attrID, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } return attrID, http.StatusOK, nil }
// GetRoleProfiles fetches multiple profiles belonging to a role func GetRoleProfiles( siteID int64, roleID int64, limit int64, offset int64, ) ( []ProfileSummaryType, int64, int64, int, error, ) { db, err := h.GetConnection() if err != nil { return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError, err } rows, err := db.Query(` SELECT COUNT(*) OVER() AS total ,rp.profile_id FROM role_profiles rp, roles r WHERE r.role_id = rp.role_id AND r.role_id = $1 ORDER BY rp.profile_id ASC LIMIT $2 OFFSET $3`, roleID, limit, offset, ) if err != nil { return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } defer rows.Close() // Get a list of the identifiers of the items to return var total int64 ids := []int64{} for rows.Next() { var id int64 err = rows.Scan( &total, &id, ) if err != nil { return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Row parsing error: %v", err.Error()) } ids = append(ids, id) } err = rows.Err() if err != nil { return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Error fetching rows: %v", err.Error()) } rows.Close() // Make a request for each identifier var wg1 sync.WaitGroup req := make(chan ProfileSummaryRequest) defer close(req) for seq, id := range ids { go HandleProfileSummaryRequest(siteID, id, seq, req) wg1.Add(1) } // Receive the responses and check for errors resps := []ProfileSummaryRequest{} for i := 0; i < len(ids); i++ { resp := <-req wg1.Done() resps = append(resps, resp) } wg1.Wait() for _, resp := range resps { if resp.Err != nil { return []ProfileSummaryType{}, 0, 0, resp.Status, resp.Err } } // Sort them sort.Sort(ProfileSummaryRequestBySeq(resps)) // Extract the values ems := []ProfileSummaryType{} for _, resp := range resps { ems = append(ems, resp.Item) } pages := h.GetPageCount(total, limit) maxOffset := h.GetMaxOffset(total, limit) if offset > maxOffset { return []ProfileSummaryType{}, 0, 0, http.StatusBadRequest, fmt.Errorf( "not enough records, offset (%d) would return an empty page", offset, ) } return ems, total, pages, http.StatusOK, nil }
// GetRoleProfile returns a single profile for a role func GetRoleProfile( siteID int64, roleID int64, profileID int64, ) ( ProfileSummaryType, int, error, ) { // Retrieve resources db, err := h.GetConnection() if err != nil { return ProfileSummaryType{}, http.StatusInternalServerError, err } rows, err := db.Query(` SELECT profile_id FROM role_profiles WHERE role_id = $1 AND profile_id = $2`, roleID, profileID, ) if err != nil { return ProfileSummaryType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } defer rows.Close() m := ProfileSummaryType{} for rows.Next() { err = rows.Scan( &m.ID, ) if err != nil { return ProfileSummaryType{}, http.StatusInternalServerError, fmt.Errorf("Error fetching row: %v", err.Error()) } // Make a request the profile summary req := make(chan ProfileSummaryRequest) defer close(req) go HandleProfileSummaryRequest(siteID, m.ID, 0, req) // Receive the response resp := <-req if resp.Err != nil { return ProfileSummaryType{}, resp.Status, resp.Err } m = resp.Item } err = rows.Err() if err != nil { return ProfileSummaryType{}, http.StatusInternalServerError, fmt.Errorf("Error fetching rows: %v", err.Error()) } rows.Close() return m, http.StatusOK, nil }
// GetCommunicationOptions returns a user's update options if present, // otherwise it returns the default preference for the given update type. func GetCommunicationOptions( siteID int64, profileID int64, updateTypeID int64, itemTypeID int64, itemID int64, ) ( UpdateOptionType, int, error, ) { _, status, err := GetProfileOptions(profileID) if err != nil { glog.Errorf("GetProfileOptions(%d) %+v", profileID, err) // Can't do anything here as the profile_id fkey constraint will fail return UpdateOptionType{}, status, fmt.Errorf("Insert of update options failed") } db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return UpdateOptionType{}, http.StatusInternalServerError, err } rows, err := db.Query(` SELECT send_email ,send_sms ,description FROM get_communication_options($1, $2, $3, $4, $5) LEFT JOIN update_types a ON update_type_id = $5`, siteID, itemID, itemTypeID, profileID, updateTypeID, ) if err != nil { glog.Errorf( "db.Query(%d, %d, %d, %d, %d) %+v", siteID, itemID, itemTypeID, profileID, updateTypeID, err, ) return UpdateOptionType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed") } defer rows.Close() var m UpdateOptionType for rows.Next() { m = UpdateOptionType{} err = rows.Scan( &m.SendEmail, &m.SendSMS, &m.Description, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return UpdateOptionType{}, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return UpdateOptionType{}, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() m.ProfileID = int64(profileID) m.UpdateTypeID = int64(updateTypeID) return m, http.StatusOK, nil }
func resolveVbulletinURL(redirect Redirect, profileID int64) Redirect { // Query string checks are cheap, so we do those first qs := redirect.ParsedURL.Query() for _, q := range vbqs { i := atoi64(qs.Get(q.key)) if i > 0 { i = getNewID(redirect.Origin.OriginID, q.itemTypeID, i) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemTypeID = q.itemTypeID redirect.ItemID = i break } } i := atoi64(qs.Get("page")) if i > 0 { switch redirect.ItemTypeID { case h.ItemTypes[h.ItemTypeConversation]: redirect.Offset = pageToOffset(i, vbPostsPerThreadPage) case h.ItemTypes[h.ItemTypeMicrocosm]: redirect.Offset = pageToOffset(i, vbThreadsPerForumPage) default: redirect.Offset = pageToOffset(i, vbPostsPerThreadPage) } } action := qs.Get("goto") if action != "" { switch action { case "newpost": action = ActionNewComment default: } } // Move on to path based searches, but only if we haven't found anything. // These are potentially expensive, so these are ordered by the most likely // to the least likely... threads first, posts next, forums after, and then // the rest. // Look at the URL itself path := redirect.ParsedURL.Path // Thread redirects if redirect.ItemTypeID == 0 { matches := vbLastPostInThread.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeConversation] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i redirect.Action = ActionNewComment } } if redirect.ItemTypeID == 0 { matches := vbNewPostInThread.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeConversation] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i redirect.Action = ActionNewComment } } if redirect.ItemTypeID == 0 { matches := vbThreadPrintPage.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeConversation] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i redirect.Offset = pageToOffset(atoi64(matches[2]), vbPostsPerThreadPage) } } if redirect.ItemTypeID == 0 { matches := vbThreadPrint.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeConversation] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i } } if redirect.ItemTypeID == 0 { matches := vbThreadPage.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeConversation] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i // This is no longer done, we just send people to the first page // This is due to a massive increase in errors as it turned out there // were a lot of bad links in people's posts that vBulletin was more // forgiving about (it would auto-send people to the last page) //redirect.Offset = pageToOffset(atoi64(matches[2]), vbPostsPerThreadPage) } } if redirect.ItemTypeID == 0 { matches := vbThread.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeConversation] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i } } // Comment redirects if redirect.ItemTypeID == 0 { matches := vbPostPosition.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeComment] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i redirect.Action = ActionCommentInContext } } if redirect.ItemTypeID == 0 { matches := vbPost.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeComment] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i redirect.Action = ActionCommentInContext } } // Microcosm redirects if redirect.ItemTypeID == 0 { matches := vbForumPage.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeMicrocosm] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i redirect.Offset = pageToOffset(atoi64(matches[2]), vbThreadsPerForumPage) } } if redirect.ItemTypeID == 0 { matches := vbForum.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeMicrocosm] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i } } if redirect.ItemTypeID == 0 { matches := vbAnnouncement.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeMicrocosm] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i } } // Profile redirects if redirect.ItemTypeID == 0 { matches := vbMemberListLetter.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeProfile] redirect.Action = ActionSearch redirect.Search = matches[1] } } if redirect.ItemTypeID == 0 { matches := vbMember.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeProfile] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i } } // Attachment redirects if redirect.ItemTypeID == 0 { matches := vbAttachment.FindStringSubmatch(path) if len(matches) > 0 { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeAttachment] i = getNewID( redirect.Origin.OriginID, redirect.ItemTypeID, atoi64(matches[1]), ) if i == 0 { redirect.Status = http.StatusNotFound return redirect } redirect.ItemID = i } } // Random URLs if redirect.ItemTypeID == 0 && vbMemberList.MatchString(path) { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeProfile] } if redirect.ItemTypeID == 0 && vbOnline.MatchString(path) { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeProfile] redirect.Action = ActionWhoIsOnline } if redirect.ItemTypeID == 0 && vbPMs.MatchString(path) { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeHuddle] } if redirect.ItemTypeID == 0 && vbSubscription.MatchString(path) { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeUpdate] } if redirect.ItemTypeID == 0 && vbUserCP.MatchString(path) { redirect.ItemTypeID = h.ItemTypes[h.ItemTypeUpdate] } // Construct the actual URLs to send people to canPaginate := false switch redirect.ItemTypeID { case h.ItemTypes[h.ItemTypeMicrocosm]: redirect.ItemType = h.ItemTypeMicrocosm if redirect.ItemID > 0 { redirect.URL.Href = fmt.Sprintf( "%s/%d", h.APITypeMicrocosm, redirect.ItemID, ) } else { redirect.URL.Href = h.APITypeMicrocosm } redirect.URL.Rel = redirect.ItemType canPaginate = true case h.ItemTypes[h.ItemTypeConversation]: redirect.ItemType = h.ItemTypeConversation switch redirect.Action { case ActionNewComment: if redirect.ItemID > 0 { t, _, err := models.GetLastReadTime( h.ItemTypes[h.ItemTypeConversation], redirect.ItemID, profileID, ) if err != nil { redirect.Status = http.StatusNotFound return redirect } commentID, _, err := models.GetNextOrLastCommentID( h.ItemTypes[h.ItemTypeConversation], redirect.ItemID, t, profileID, ) if err != nil { redirect.Status = http.StatusNotFound return redirect } redirect.ItemTypeID = h.ItemTypes[h.ItemTypeComment] redirect.ItemType = h.ItemTypeComment redirect.ItemID = commentID redirect.URL.Href = fmt.Sprintf( "%s/%d", h.APITypeComment, redirect.ItemID, ) redirect.URL.Rel = redirect.ItemType redirect.Action = ActionCommentInContext } else { redirect.Status = http.StatusNotFound return redirect } default: if redirect.ItemID > 0 { redirect.URL.Href = fmt.Sprintf( "%s/%d", h.APITypeConversation, redirect.ItemID, ) redirect.URL.Rel = redirect.ItemType canPaginate = true } else { redirect.Status = http.StatusNotFound return redirect } } case h.ItemTypes[h.ItemTypeComment]: redirect.ItemType = h.ItemTypeComment if redirect.ItemID > 0 { redirect.URL.Href = fmt.Sprintf( "%s/%d", h.APITypeComment, redirect.ItemID, ) redirect.URL.Rel = redirect.ItemType } else { redirect.Status = http.StatusNotFound return redirect } case h.ItemTypes[h.ItemTypeAttachment]: redirect.ItemType = h.ItemTypeAttachment db, err := h.GetConnection() if err != nil { redirect.Status = http.StatusNotFound return redirect } var fileSha1 string err = db.QueryRow(`--Get Attachment ID SELECT file_sha1 FROM attachments WHERE attachment_meta_id = $1`, redirect.ItemID, ).Scan(&fileSha1) if err != nil { redirect.Status = http.StatusNotFound return redirect } site, _, err := models.GetSite(redirect.Origin.SiteID) if err != nil { redirect.Status = http.StatusNotFound return redirect } redirect.URL.Href = fmt.Sprintf( "https://%s.microco.sm%s/%s", site.SubdomainKey, h.APITypeFile, fileSha1, ) case h.ItemTypes[h.ItemTypeHuddle]: redirect.ItemType = h.ItemTypeHuddle if redirect.ItemID > 0 { redirect.URL.Href = fmt.Sprintf( "%s/%d", h.APITypeHuddle, redirect.ItemID, ) } else { redirect.URL.Href = h.APITypeHuddle } redirect.URL.Rel = redirect.ItemType case h.ItemTypes[h.ItemTypeProfile]: redirect.ItemType = h.ItemTypeProfile if redirect.ItemID > 0 { redirect.URL.Href = fmt.Sprintf( "%s/%d", h.APITypeProfile, redirect.ItemID, ) } else { switch redirect.Action { case ActionSearch: redirect.URL.Href = h.APITypeProfile + "?q=" + redirect.Search case ActionWhoIsOnline: redirect.URL.Href = h.APITypeProfile + "?online=true" default: redirect.URL.Href = h.APITypeProfile } canPaginate = true } redirect.URL.Rel = redirect.ItemType case h.ItemTypes[h.ItemTypeUpdate]: redirect.ItemType = h.ItemTypeUpdate redirect.URL.Href = h.APITypeUpdate redirect.URL.Rel = redirect.ItemType default: redirect.Status = http.StatusNotFound return redirect } // Append offset if applicable if canPaginate && redirect.Offset > 0 { url, err := url.Parse(redirect.URL.Href) if err != nil { redirect.Status = http.StatusNotFound return redirect } q := url.Query() q.Add("offset", strconv.FormatInt(redirect.Offset, 10)) url.RawQuery = q.Encode() redirect.URL.Href = url.String() } else { redirect.Offset = 0 } redirect.Status = http.StatusMovedPermanently return redirect }
// EmbedAllMedia is called after a revision has been created to perform media // embedding and gets all links in the revision and processes them all func EmbedAllMedia(revisionID int64) (int, error) { db, err := h.GetConnection() if err != nil { return http.StatusInternalServerError, err } // Are there any external links in this revision? rows, err := db.Query(`--EmbedAllMedia SELECT l.link_id ,l.short_url ,l.domain ,l.url ,l.inner_text ,l.created ,l.resolved_url ,l.resolved ,l.hits FROM revision_links r JOIN links l ON l.link_id = r.link_id WHERE r.revision_id = $1 GROUP BY l.link_id ,l.short_url ,l.domain ,l.url ,l.inner_text ,l.created ,l.resolved_url ,l.resolved ,l.hits `, revisionID, ) if err != nil { return http.StatusInternalServerError, fmt.Errorf("Get links failed: %v", err.Error()) } defer rows.Close() links := []Link{} for rows.Next() { link := Link{} err = rows.Scan( &link.ID, &link.ShortURL, &link.Domain, &link.URL, &link.Text, &link.Created, &link.ResolvedURL, &link.Resolved, &link.Hits, ) if err != nil { return http.StatusInternalServerError, fmt.Errorf("Row parsing error: %v", err.Error()) } links = append(links, link) } err = rows.Err() if err != nil { return http.StatusInternalServerError, fmt.Errorf("Error fetching rows: %v", err.Error()) } rows.Close() // Now process each one for _, link := range links { embedMediaForLink(link, revisionID) } return http.StatusOK, nil }
func (m *Link) fetchRewriteRule() (RewriteRule, int, error) { rewriteRule := RewriteRule{} if !m.rewriteRuleMayExist() { return rewriteRule, http.StatusOK, nil } db, err := h.GetConnection() if err != nil { return rewriteRule, http.StatusInternalServerError, err } rows, err := db.Query(` SELECT r.rule_id ,r.name As title ,r.match_regex ,r.replace_regex ,r.is_enabled ,r.sequence FROM rewrite_domains d JOIN rewrite_domain_rules dr ON dr.domain_id = d.domain_id JOIN rewrite_rules r ON r.rule_id = dr.rule_id WHERE r.is_enabled IS NOT FALSE AND $1 ~ d.domain_regex ORDER BY r.sequence`, m.Domain, ) if err != nil { return rewriteRule, http.StatusInternalServerError, fmt.Errorf("Get links failed: %+v", err) } defer rows.Close() rules := []RewriteRule{} for rows.Next() { rule := RewriteRule{} err = rows.Scan( &rule.ID, &rule.Title, &rule.RegexMatch, &rule.RegexReplace, &rule.Enabled, &rule.Sequence, ) if err != nil { return rewriteRule, http.StatusInternalServerError, fmt.Errorf("Row parsing error: %+v", err) } rules = append(rules, rule) } err = rows.Err() if err != nil { return rewriteRule, http.StatusInternalServerError, fmt.Errorf("Error fetching rows: %+v", err) } rows.Close() for _, rule := range rules { matched, err := regexp.Match(`(?i)`+rule.RegexMatch, []byte(m.URL)) if err != nil { glog.Errorf("%s %+v", "regexp.Compile(rule.RegexMatch)", err) continue } if matched { rule.Valid = true return rule, http.StatusOK, nil } } return rewriteRule, http.StatusOK, nil }
// GetUpdates retieves the list of updates for the given profile func GetUpdates( siteID int64, profileID int64, limit int64, offset int64, ) ( []UpdateType, int64, int64, int, error, ) { db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, err } sqlQuery := `--GetUpdates WITH m AS ( SELECT m.microcosm_id FROM microcosms m LEFT JOIN permissions_cache p ON p.site_id = m.site_id AND p.item_type_id = 2 AND p.item_id = m.microcosm_id AND p.profile_id = $2 LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 2 AND i.item_id = m.microcosm_id WHERE m.site_id = $1 AND m.is_deleted IS NOT TRUE AND m.is_moderated IS NOT TRUE AND i.profile_id IS NULL AND ( (p.can_read IS NOT NULL AND p.can_read IS TRUE) OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE ) ) SELECT total ,update_id ,for_profile_id ,update_type_id ,item_type_id ,item_id ,created_by ,created ,site_id ,has_unread(COALESCE(parent_item_type_id, item_type_id), COALESCE(parent_item_id, item_id), $2) FROM ( SELECT COUNT(*) OVER() AS total ,rollup.update_id ,rollup.for_profile_id ,rollup.update_type_id ,rollup.item_type_id ,rollup.item_id ,rollup.created_by ,rollup.created ,rollup.site_id ,f.parent_item_type_id ,f.parent_item_id FROM flags f JOIN ( -- 1;'new_comment';'When a comment has been posted in an item you are watching' -- 4;'new_comment_in_huddle';'When you receive a new comment in a private message' SELECT u.update_id ,u.for_profile_id ,u.update_type_id ,u.item_type_id ,u.item_id ,u.created_by ,u.created ,$1 AS site_id FROM updates u JOIN ( SELECT MAX(u.update_id) AS update_id ,f.parent_item_type_id AS item_type_id ,f.parent_item_id AS item_id FROM updates u JOIN flags f ON f.item_type_id = u.item_type_id AND f.item_id = u.item_id LEFT JOIN ignores i ON i.profile_id = $2 AND ( (i.item_type_id = 3 AND i.item_id = u.created_by) OR (i.item_type_id = f.parent_item_type_id AND i.item_id = f.parent_item_id) ) LEFT JOIN huddle_profiles hp ON hp.huddle_id = f.parent_item_id AND hp.profile_id = u.for_profile_id AND f.parent_item_type_id = 5 WHERE u.for_profile_id = $2 AND i.profile_id IS NULL AND u.update_type_id IN (1, 4) AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND ( f.microcosm_id IN (SELECT microcosm_id FROM m) OR hp.profile_id = u.for_profile_id ) GROUP BY f.parent_item_type_id ,f.parent_item_id ,f.site_id ) r ON r.update_id = u.update_id JOIN watchers w ON w.profile_id = $2 AND w.item_type_id = r.item_type_id AND w.item_id = r.item_id UNION -- 2;'reply_to_comment';'When a comment of yours is replied to' -- 3;'mentioned';'When you are @mentioned in a comment' SELECT u.update_id ,u.for_profile_id ,u.update_type_id ,u.item_type_id ,u.item_id ,u.created_by ,u.created ,$1 AS site_id FROM updates u WHERE update_id IN ( SELECT MAX(u.update_id) FROM updates u JOIN flags f ON f.item_type_id = u.item_type_id AND f.item_id = u.item_id LEFT JOIN huddle_profiles hp ON hp.huddle_id = f.parent_item_id AND hp.profile_id = u.for_profile_id AND f.parent_item_type_id = 5 LEFT JOIN ignores i ON i.profile_id = $2 AND ( (i.item_type_id = 3 AND i.item_id = u.created_by) OR (i.item_type_id = f.parent_item_type_id AND i.item_id = f.parent_item_id) ) WHERE u.for_profile_id = $2 AND i.profile_id IS NULL AND (u.update_type_id = 2 OR u.update_type_id = 3) -- replies (2) & mentions (3) AND f.site_id = $1 AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND ( f.microcosm_id IN (SELECT microcosm_id FROM m) OR hp.profile_id = u.for_profile_id ) GROUP BY u.update_type_id ,u.item_type_id ,u.item_id ) UNION -- 8;'new_item';'When a new item is created in a microcosm you are watching' SELECT u.update_id ,u.for_profile_id ,u.update_type_id ,u.item_type_id ,u.item_id ,u.created_by ,u.created ,$1 AS site_id FROM updates u WHERE update_id IN ( SELECT MAX(u.update_id) FROM updates u JOIN flags f ON f.item_type_id = u.item_type_id AND f.item_id = u.item_id AND f.microcosm_id IN (SELECT microcosm_id FROM m) JOIN watchers w ON w.profile_id = $2 AND w.item_type_id = 2 AND w.item_id = f.microcosm_id LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 3 AND i.item_id = u.created_by WHERE u.for_profile_id = $2 AND i.profile_id IS NULL AND u.update_type_id = 8 AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE GROUP BY u.item_type_id, u.item_id ) ) AS rollup ON rollup.item_type_id = f.item_type_id AND rollup.item_id = f.item_id ORDER BY created DESC LIMIT $3 OFFSET $4 ) final_rollup` rows, err := db.Query(sqlQuery, siteID, profileID, limit, offset) if err != nil { glog.Errorf( "db.Query(%d, %d, %d, %d) %+v", profileID, siteID, limit, offset, err, ) return []UpdateType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Database query failed") } defer rows.Close() var total int64 ems := []UpdateType{} for rows.Next() { var unread bool m := UpdateType{} err = rows.Scan( &total, &m.ID, &m.ForProfileID, &m.UpdateTypeID, &m.ItemTypeID, &m.ItemID, &m.Meta.CreatedByID, &m.Meta.Created, &m.SiteID, &unread, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } itemType, err := h.GetItemTypeFromInt(m.ItemTypeID) if err != nil { glog.Errorf("h.GetItemTypeFromInt(%d) %+v", m.ItemTypeID, err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, err } m.ItemType = itemType m.Meta.Flags.Unread = unread ems = append(ems, m) } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() pages := h.GetPageCount(total, limit) maxOffset := h.GetMaxOffset(total, limit) if offset > maxOffset { glog.Infoln("offset > maxOffset") return []UpdateType{}, 0, 0, http.StatusBadRequest, fmt.Errorf("not enough records, "+ "offset (%d) would return an empty page.", offset) } // Get the first round of summaries var wg1 sync.WaitGroup chan1 := make(chan SummaryContainerRequest) defer close(chan1) seq := 0 for i := 0; i < len(ems); i++ { go HandleSummaryContainerRequest( siteID, h.ItemTypes[h.ItemTypeProfile], ems[i].Meta.CreatedByID, ems[i].ForProfileID, seq, chan1, ) wg1.Add(1) seq++ go HandleSummaryContainerRequest( siteID, ems[i].ItemTypeID, ems[i].ItemID, ems[i].ForProfileID, seq, chan1, ) wg1.Add(1) seq++ updateType, status, err := GetUpdateType(ems[i].UpdateTypeID) if err != nil { return []UpdateType{}, 0, 0, status, err } ems[i].UpdateType = updateType.Title } resps := []SummaryContainerRequest{} for i := 0; i < seq; i++ { resp := <-chan1 wg1.Done() resps = append(resps, resp) } wg1.Wait() for _, resp := range resps { if resp.Err != nil { return []UpdateType{}, 0, 0, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(resps)) // Insert the first round of summaries, and get the summaries for the // comments var wg2 sync.WaitGroup chan2 := make(chan SummaryContainerRequest) defer close(chan2) seq = 0 parentSeq := 0 for i := 0; i < len(ems); i++ { ems[i].Meta.CreatedBy = resps[seq].Item.Summary seq++ ems[i].Item = resps[seq].Item.Summary seq++ if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] { comment := ems[i].Item.(CommentSummaryType) go HandleSummaryContainerRequest( siteID, comment.ItemTypeID, comment.ItemID, ems[i].ForProfileID, seq, chan2, ) parentSeq++ wg2.Add(1) } } parentResps := []SummaryContainerRequest{} for i := 0; i < parentSeq; i++ { resp := <-chan2 wg2.Done() parentResps = append(parentResps, resp) } wg2.Wait() for _, resp := range parentResps { if resp.Err != nil { return []UpdateType{}, 0, 0, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(parentResps)) // Insert the comment summaries, and get the summaries of the items the // comments are attached to var wg3 sync.WaitGroup chan3 := make(chan SummaryContainerRequest) defer close(chan3) parentSeq = 0 commentItemSeq := 0 for i := 0; i < len(ems); i++ { if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] { comment := ems[i].Item.(CommentSummaryType) go HandleSummaryContainerRequest( siteID, comment.ItemTypeID, comment.ItemID, ems[i].ForProfileID, commentItemSeq, chan3, ) parentSeq++ commentItemSeq++ wg3.Add(1) ems[i].ParentItemTypeID = comment.ItemTypeID parentItemType, err := h.GetMapStringFromInt( h.ItemTypes, comment.ItemTypeID, ) if err != nil { return []UpdateType{}, 0, 0, http.StatusInternalServerError, err } ems[i].ParentItemType = parentItemType ems[i].ParentItemID = comment.ItemID } } commentResps := []SummaryContainerRequest{} for i := 0; i < commentItemSeq; i++ { resp := <-chan3 wg3.Done() commentResps = append(commentResps, resp) } wg3.Wait() for _, resp := range commentResps { if resp.Err != nil { return []UpdateType{}, 0, 0, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(commentResps)) commentItemSeq = 0 for i := 0; i < len(ems); i++ { if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] { ems[i].ParentItem = commentResps[commentItemSeq].Item.Summary commentItemSeq++ } } return ems, total, pages, http.StatusOK, nil }
// GetUpdate fetches the detail of a single update for a profile func GetUpdate( siteID int64, updateID int64, profileID int64, ) ( UpdateType, int, error, ) { // Try fetching from cache mcKey := fmt.Sprintf(mcUpdateKeys[c.CacheDetail], updateID) if val, ok := c.Get(mcKey, UpdateType{}); ok { m := val.(UpdateType) m.FetchSummaries(siteID) return m, http.StatusOK, nil } db, err := h.GetConnection() if err != nil { return UpdateType{}, http.StatusInternalServerError, err } var m UpdateType err = db.QueryRow(` SELECT update_id ,for_profile_id ,update_type_id ,item_type_id ,item_id ,created_by ,created ,site_id FROM updates WHERE site_id = $1 AND update_id = $2 AND for_profile_id = $3`, siteID, updateID, profileID, ).Scan( &m.ID, &m.ForProfileID, &m.UpdateTypeID, &m.ItemTypeID, &m.ItemID, &m.Meta.CreatedByID, &m.Meta.Created, &m.SiteID, ) if err == sql.ErrNoRows { return UpdateType{}, http.StatusNotFound, fmt.Errorf("Update not found: %v", err.Error()) } else if err != nil { return UpdateType{}, http.StatusInternalServerError, fmt.Errorf("Error fetching update: %v", err.Error()) } itemType, err := h.GetItemTypeFromInt(m.ItemTypeID) if err != nil { return UpdateType{}, http.StatusInternalServerError, err } m.ItemType = itemType m.FetchSummaries(siteID) c.Set(mcKey, m, mcTTL) return m, http.StatusOK, nil }
func searchMetaData( siteID int64, searchURL url.URL, profileID int64, m SearchResults, ) ( SearchResults, int, error, ) { limit, offset, status, err := h.GetLimitAndOffset(searchURL.Query()) if err != nil { glog.Errorf("h.GetLimitAndOffset(searchUrl.Query()) %+v", err) return m, status, err } start := time.Now() // The goal is to produce a piece of SQL that looks at just the flags table // and fetches a list of the items that we care about. // // Our target SQL should look roughly like this (fetches all viewable comments): // // WITH m AS ( // SELECT microcosm_id // FROM microcosms // WHERE site_id = 2 // AND (get_effective_permissions(2,microcosm_id,2,microcosm_id,7)).can_read IS TRUE // ), h AS ( // SELECT huddle_id // FROM huddle_profiles // WHERE profile_id = 7 // ) // SELECT item_type_id // ,item_id // FROM flags // WHERE item_type_id = 4 // AND ( // site_is_deleted // AND microcosm_is_deleted // AND parent_is_deleted // AND item_is_deleted // ) IS NOT TRUE // AND ( // (-- Things that are public by default and low in quantity // item_type_id IN (1,3) // OR parent_item_type_id IN (3) // ) // OR (-- Things directly in microcosms // item_type_id IN (2,6,7,9) // AND COALESCE(microcosm_id, item_id) IN (SELECT microcosm_id FROM m) // ) // OR (-- Comments on things in microcosms // item_type_id = 4 // AND parent_item_type_id IN (6,7,9) // AND microcosm_id IN (SELECT microcosm_id FROM m) // ) // OR (-- Huddles // item_type_id = 5 // AND item_id IN (SELECT huddle_id FROM h) // ) // OR (-- Comments on things in huddles // item_type_id = 4 // AND parent_item_type_id = 5 // AND parent_item_id IN (SELECT huddle_id FROM h) // ) // ) // ORDER BY last_modified DESC // LIMIT 25; // // The key is to only put into the query the bits that will definitely be // used. // Process search options var filterFollowing string var filterItemTypes string var filterItems string var includeHuddles bool var includeComments bool var joinEvents bool orderBy := `rank DESC ,f.last_modified DESC` switch m.Query.Sort { case "date": orderBy = `f.last_modified DESC` case "oldest": joinEvents = true orderBy = `e."when" ASC` case "newest": joinEvents = true orderBy = `e."when" DESC` } if m.Query.Following { filterFollowing = ` JOIN watchers w ON w.item_type_id = f.item_type_id AND w.item_id = f.item_id AND w.profile_id = $2` } if len(m.Query.ItemTypeIDs) > 0 { var inList string // Take care of the item types for i, v := range m.Query.ItemTypeIDs { switch v { case h.ItemTypes[h.ItemTypeComment]: includeComments = true case h.ItemTypes[h.ItemTypeHuddle]: includeHuddles = true } inList += strconv.FormatInt(v, 10) if i < len(m.Query.ItemTypeIDs)-1 { inList += `,` } } if len(m.Query.ItemTypeIDs) == 1 { filterItemTypes = fmt.Sprintf(` AND f.item_type_id = %d`, m.Query.ItemTypeIDs[0], ) } else { if includeComments { filterItemTypes = ` AND ( (f.item_type_id <> 4 AND f.item_type_id IN (` + inList + `)) OR (f.item_type_id = 4 AND f.parent_item_type_id IN (` + inList + `)) )` } else { filterItemTypes = ` AND f.item_type_id IN (` + inList + `)` } } // Take care of the item ids, which are only valid when we have item // types if len(m.Query.ItemIDs) > 0 { if len(m.Query.ItemIDs) == 1 { if includeComments { filterItems = fmt.Sprintf(` AND ( (f.item_type_id <> 4 AND f.item_id = %d) OR (f.item_type_id = 4 AND f.parent_item_id = %d) )`, m.Query.ItemIDs[0], m.Query.ItemIDs[0], ) } else { filterItems = fmt.Sprintf(` AND f.item_id = %d`, m.Query.ItemIDs[0], ) } } else { var inList = `` for i, v := range m.Query.ItemIDs { inList += strconv.FormatInt(v, 10) if i < len(m.Query.ItemIDs)-1 { inList += `,` } } if includeComments { filterItems = ` AND ( (f.item_type_id <> 4 AND f.item_id IN (` + inList + `)) OR (f.item_type_id = 4 AND f.parent_item_id IN (` + inList + `)) )` } else { filterItems = ` AND f.item_id IN (` + inList + `)` } } } } var filterProfileID string if m.Query.ProfileID > 0 { filterProfileID = fmt.Sprintf(` AND f.created_by = %d`, m.Query.ProfileID) } var filterMicrocosmIDs string if len(m.Query.MicrocosmIDs) > 0 { if len(m.Query.MicrocosmIDs) == 1 { filterMicrocosmIDs = fmt.Sprintf(` AND f.microcosm_id = %d`, m.Query.MicrocosmIDs[0]) includeHuddles = false } else { var inList = `` for i, v := range m.Query.MicrocosmIDs { inList += strconv.FormatInt(v, 10) if i < len(m.Query.MicrocosmIDs)-1 { inList += `,` } } filterMicrocosmIDs = ` AND f.microcosm_id IN (` + inList + `)` } } var filterModified string if !m.Query.SinceTime.IsZero() || !m.Query.UntilTime.IsZero() { if m.Query.UntilTime.IsZero() { filterModified = fmt.Sprintf(` AND f.last_modified > to_timestamp(%d)`, m.Query.SinceTime.Unix(), ) } else if m.Query.SinceTime.IsZero() { filterModified = fmt.Sprintf(` AND f.last_modified < to_timestamp(%d)`, m.Query.UntilTime.Unix(), ) } else { filterModified = fmt.Sprintf(` AND f.last_modified BETWEEN to_timestamp(%d) AND to_timestamp(%d)`, m.Query.SinceTime.Unix(), m.Query.UntilTime.Unix(), ) } } var ( filterEventsJoin string filterEventsWhere string ) if !m.Query.EventAfterTime.IsZero() || !m.Query.EventBeforeTime.IsZero() { joinEvents = true if m.Query.EventBeforeTime.IsZero() { filterModified = fmt.Sprintf(` AND e."when" > to_timestamp(%d)`, m.Query.EventAfterTime.Unix(), ) } else if m.Query.EventAfterTime.IsZero() { filterModified = fmt.Sprintf(` AND e."when" < to_timestamp(%d)`, m.Query.EventBeforeTime.Unix(), ) } else { filterModified = fmt.Sprintf(` AND e."when" BETWEEN to_timestamp(%d) AND to_timestamp(%d)`, m.Query.EventAfterTime.Unix(), m.Query.EventBeforeTime.Unix(), ) } } if joinEvents || m.Query.Attendee { filterEventsJoin = ` JOIN events e ON e.event_id = f.item_id` if m.Query.Attendee { filterEventsJoin += ` JOIN attendees a ON a.event_id = e.event_id AND a.profile_id = ` + strconv.FormatInt(profileID, 10) + ` AND a.state_id = 1` } } // These make up our SQL query sqlSelect := ` SELECT 0,0,0,NULL,NULL,NOW(),0,''` sqlFromWhere := ` FROM flags WHERE 1=2` // Query with only meta data sqlWith := ` WITH m AS ( SELECT m.microcosm_id FROM microcosms m LEFT JOIN permissions_cache p ON p.site_id = m.site_id AND p.item_type_id = 2 AND p.item_id = m.microcosm_id AND p.profile_id = $2 LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 2 AND i.item_id = m.microcosm_id WHERE m.site_id = $1 AND m.is_deleted IS NOT TRUE AND m.is_moderated IS NOT TRUE AND i.profile_id IS NULL AND ( (p.can_read IS NOT NULL AND p.can_read IS TRUE) OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE ) )` if includeHuddles || includeComments { if filterModified != "" { sqlWith += `, h AS ( SELECT hp.huddle_id FROM huddle_profiles hp JOIN flags f ON f.item_type_id = 5 AND f.item_id = hp.huddle_id WHERE hp.profile_id = $2` + filterModified + ` )` } else { sqlWith += `, h AS ( SELECT huddle_id FROM huddle_profiles WHERE profile_id = $2 )` } } sqlSelect = ` SELECT f.item_type_id ,f.item_id ,f.parent_item_type_id ,f.parent_item_id ,f.last_modified ,0.5 AS rank ,'' AS highlight` sqlFromWhere = ` FROM flags f LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = f.item_type_id AND i.item_id = f.item_id` + filterFollowing + filterEventsJoin + ` WHERE f.site_id = $1 AND i.profile_id IS NULL` + filterModified + filterMicrocosmIDs + filterItemTypes + filterItems + filterProfileID + filterEventsWhere + ` AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND ( (-- Things that are public by default and low in quantity f.item_type_id IN (1,3) OR f.parent_item_type_id IN (3) ) OR (-- Things directly in microcosms f.item_type_id IN (2,6,7,9) AND COALESCE(f.microcosm_id, f.item_id) IN (SELECT microcosm_id FROM m) )` if includeComments { sqlFromWhere += ` OR (-- Comments on things in microcosms f.item_type_id = 4 AND f.parent_item_type_id IN (6,7,9) AND f.microcosm_id IN (SELECT microcosm_id FROM m) ) OR (-- Comments on things in huddles f.item_type_id = 4 AND f.parent_item_type_id = 5 AND f.parent_item_id IN (SELECT huddle_id FROM h) )` } if includeHuddles { sqlFromWhere += ` OR (-- Huddles f.item_type_id = 5 AND f.item_id IN (SELECT huddle_id FROM h) )` } sqlFromWhere += ` )` sqlOrderLimit := ` ORDER BY ` + orderBy + ` LIMIT $3 OFFSET $4` db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return m, http.StatusInternalServerError, err } var total int64 err = db.QueryRow( sqlWith+`SELECT COUNT(*)`+sqlFromWhere, siteID, profileID, ).Scan(&total) if err != nil { glog.Error(err) return m, http.StatusInternalServerError, err } // This nested query is used to run the `has_unread` query on only the rows // that are returned, rather than on all rows in the underlying query before // limit has been applied. rows, err := db.Query(` SELECT item_type_id ,item_id ,parent_item_type_id ,parent_item_id ,last_modified ,rank ,highlight ,has_unread(item_type_id, item_id, $2) FROM (`+ sqlWith+ sqlSelect+ sqlFromWhere+ sqlOrderLimit+ `) r`, siteID, profileID, limit, offset, ) if err != nil { glog.Errorf( "stmt.Query(%d, %s, %d, %d, %d) %+v", siteID, m.Query.Query, profileID, limit, offset, err, ) return m, http.StatusInternalServerError, fmt.Errorf("Database query failed") } defer rows.Close() rs := []SearchResult{} for rows.Next() { var r SearchResult err = rows.Scan( &r.ItemTypeID, &r.ItemID, &r.ParentItemTypeID, &r.ParentItemID, &r.LastModified, &r.Rank, &r.Highlight, &r.Unread, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return m, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } itemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ItemTypeID) if err != nil { glog.Errorf( "h.GetMapStringFromInt(h.ItemTypes, %d) %+v", r.ItemTypeID, err, ) return m, http.StatusInternalServerError, err } r.ItemType = itemType if r.ParentItemTypeID.Valid { parentItemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ParentItemTypeID.Int64) if err != nil { glog.Errorf( "h.GetMapStringFromInt(h.ItemTypes, %d) %+v", r.ParentItemTypeID.Int64, err, ) return m, http.StatusInternalServerError, err } r.ParentItemType = parentItemType } rs = append(rs, r) } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return m, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() pages := h.GetPageCount(total, limit) maxOffset := h.GetMaxOffset(total, limit) if offset > maxOffset { glog.Infoln("offset > maxOffset") return m, http.StatusBadRequest, fmt.Errorf("not enough records, "+ "offset (%d) would return an empty page.", offset) } // Extract the summaries var wg1 sync.WaitGroup req := make(chan SummaryContainerRequest) defer close(req) seq := 0 for i := 0; i < len(rs); i++ { go HandleSummaryContainerRequest( siteID, rs[i].ItemTypeID, rs[i].ItemID, profileID, seq, req, ) seq++ wg1.Add(1) if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 { go HandleSummaryContainerRequest( siteID, rs[i].ParentItemTypeID.Int64, rs[i].ParentItemID.Int64, profileID, seq, req, ) seq++ wg1.Add(1) } } resps := []SummaryContainerRequest{} for i := 0; i < seq; i++ { resp := <-req wg1.Done() resps = append(resps, resp) } wg1.Wait() for _, resp := range resps { if resp.Err != nil { return m, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(resps)) seq = 0 for i := 0; i < len(rs); i++ { rs[i].Item = resps[seq].Item.Summary seq++ if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 { rs[i].ParentItem = resps[seq].Item.Summary seq++ } } m.Results = h.ConstructArray( rs, "result", total, limit, offset, pages, &searchURL, ) // return milliseconds m.TimeTaken = time.Now().Sub(start).Nanoseconds() / 1000000 return m, http.StatusOK, nil }