// GetSummaryContainer wraps GetSummary func GetSummaryContainer( siteID int64, itemTypeID int64, itemID int64, profileID int64, ) ( SummaryContainer, int, error, ) { summary, status, err := GetSummary(siteID, itemTypeID, itemID, profileID) if err != nil { return SummaryContainer{}, status, err } item := SummaryContainer{} item.ItemTypeID = itemTypeID itemType, _ := h.GetMapStringFromInt(h.ItemTypes, itemTypeID) item.ItemType = itemType item.ItemID = itemID item.Summary = summary item.Valid = true return item, http.StatusOK, nil }
// FetchSummaries fetches profile/item summary for a update entry. // Called post SELECT or post-GetFromCache func (m *UpdateType) FetchSummaries(siteID int64) (int, error) { profile, status, err := GetSummary( siteID, h.ItemTypes[h.ItemTypeProfile], m.Meta.CreatedByID, m.ForProfileID, ) if err != nil { return status, err } m.Meta.CreatedBy = profile itemSummary, status, err := GetSummary( siteID, m.ItemTypeID, m.ItemID, m.ForProfileID, ) if err != nil { return status, err } m.Item = itemSummary if m.ItemTypeID == h.ItemTypes[h.ItemTypeComment] { comment := itemSummary.(CommentSummaryType) parent, status, err := GetSummary( siteID, comment.ItemTypeID, comment.ItemID, m.ForProfileID, ) if err != nil { return status, err } m.ParentItem = parent m.ParentItemTypeID = comment.ItemTypeID parentItemType, err := h.GetMapStringFromInt( h.ItemTypes, comment.ItemTypeID, ) if err != nil { return http.StatusInternalServerError, err } m.ParentItemType = parentItemType m.ParentItemID = comment.ItemID } updateType, status, err := GetUpdateType(m.UpdateTypeID) if err != nil { return status, err } m.UpdateType = updateType.Title return http.StatusOK, nil }
// GetUpdates retieves the list of updates for the given profile func GetUpdates( siteID int64, profileID int64, limit int64, offset int64, ) ( []UpdateType, int64, int64, int, error, ) { db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, err } sqlQuery := `--GetUpdates WITH m AS ( SELECT m.microcosm_id FROM microcosms m LEFT JOIN permissions_cache p ON p.site_id = m.site_id AND p.item_type_id = 2 AND p.item_id = m.microcosm_id AND p.profile_id = $2 LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 2 AND i.item_id = m.microcosm_id WHERE m.site_id = $1 AND m.is_deleted IS NOT TRUE AND m.is_moderated IS NOT TRUE AND i.profile_id IS NULL AND ( (p.can_read IS NOT NULL AND p.can_read IS TRUE) OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE ) ) SELECT total ,update_id ,for_profile_id ,update_type_id ,item_type_id ,item_id ,created_by ,created ,site_id ,has_unread(COALESCE(parent_item_type_id, item_type_id), COALESCE(parent_item_id, item_id), $2) FROM ( SELECT COUNT(*) OVER() AS total ,rollup.update_id ,rollup.for_profile_id ,rollup.update_type_id ,rollup.item_type_id ,rollup.item_id ,rollup.created_by ,rollup.created ,rollup.site_id ,f.parent_item_type_id ,f.parent_item_id FROM flags f JOIN ( -- 1;'new_comment';'When a comment has been posted in an item you are watching' -- 4;'new_comment_in_huddle';'When you receive a new comment in a private message' SELECT u.update_id ,u.for_profile_id ,u.update_type_id ,u.item_type_id ,u.item_id ,u.created_by ,u.created ,$1 AS site_id FROM updates u JOIN ( SELECT MAX(u.update_id) AS update_id ,f.parent_item_type_id AS item_type_id ,f.parent_item_id AS item_id FROM updates u JOIN flags f ON f.item_type_id = u.item_type_id AND f.item_id = u.item_id LEFT JOIN ignores i ON i.profile_id = $2 AND ( (i.item_type_id = 3 AND i.item_id = u.created_by) OR (i.item_type_id = f.parent_item_type_id AND i.item_id = f.parent_item_id) ) LEFT JOIN huddle_profiles hp ON hp.huddle_id = f.parent_item_id AND hp.profile_id = u.for_profile_id AND f.parent_item_type_id = 5 WHERE u.for_profile_id = $2 AND i.profile_id IS NULL AND u.update_type_id IN (1, 4) AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND ( f.microcosm_id IN (SELECT microcosm_id FROM m) OR hp.profile_id = u.for_profile_id ) GROUP BY f.parent_item_type_id ,f.parent_item_id ,f.site_id ) r ON r.update_id = u.update_id JOIN watchers w ON w.profile_id = $2 AND w.item_type_id = r.item_type_id AND w.item_id = r.item_id UNION -- 2;'reply_to_comment';'When a comment of yours is replied to' -- 3;'mentioned';'When you are @mentioned in a comment' SELECT u.update_id ,u.for_profile_id ,u.update_type_id ,u.item_type_id ,u.item_id ,u.created_by ,u.created ,$1 AS site_id FROM updates u WHERE update_id IN ( SELECT MAX(u.update_id) FROM updates u JOIN flags f ON f.item_type_id = u.item_type_id AND f.item_id = u.item_id LEFT JOIN huddle_profiles hp ON hp.huddle_id = f.parent_item_id AND hp.profile_id = u.for_profile_id AND f.parent_item_type_id = 5 LEFT JOIN ignores i ON i.profile_id = $2 AND ( (i.item_type_id = 3 AND i.item_id = u.created_by) OR (i.item_type_id = f.parent_item_type_id AND i.item_id = f.parent_item_id) ) WHERE u.for_profile_id = $2 AND i.profile_id IS NULL AND (u.update_type_id = 2 OR u.update_type_id = 3) -- replies (2) & mentions (3) AND f.site_id = $1 AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND ( f.microcosm_id IN (SELECT microcosm_id FROM m) OR hp.profile_id = u.for_profile_id ) GROUP BY u.update_type_id ,u.item_type_id ,u.item_id ) UNION -- 8;'new_item';'When a new item is created in a microcosm you are watching' SELECT u.update_id ,u.for_profile_id ,u.update_type_id ,u.item_type_id ,u.item_id ,u.created_by ,u.created ,$1 AS site_id FROM updates u WHERE update_id IN ( SELECT MAX(u.update_id) FROM updates u JOIN flags f ON f.item_type_id = u.item_type_id AND f.item_id = u.item_id AND f.microcosm_id IN (SELECT microcosm_id FROM m) JOIN watchers w ON w.profile_id = $2 AND w.item_type_id = 2 AND w.item_id = f.microcosm_id LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 3 AND i.item_id = u.created_by WHERE u.for_profile_id = $2 AND i.profile_id IS NULL AND u.update_type_id = 8 AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE GROUP BY u.item_type_id, u.item_id ) ) AS rollup ON rollup.item_type_id = f.item_type_id AND rollup.item_id = f.item_id ORDER BY created DESC LIMIT $3 OFFSET $4 ) final_rollup` rows, err := db.Query(sqlQuery, siteID, profileID, limit, offset) if err != nil { glog.Errorf( "db.Query(%d, %d, %d, %d) %+v", profileID, siteID, limit, offset, err, ) return []UpdateType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Database query failed") } defer rows.Close() var total int64 ems := []UpdateType{} for rows.Next() { var unread bool m := UpdateType{} err = rows.Scan( &total, &m.ID, &m.ForProfileID, &m.UpdateTypeID, &m.ItemTypeID, &m.ItemID, &m.Meta.CreatedByID, &m.Meta.Created, &m.SiteID, &unread, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } itemType, err := h.GetItemTypeFromInt(m.ItemTypeID) if err != nil { glog.Errorf("h.GetItemTypeFromInt(%d) %+v", m.ItemTypeID, err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, err } m.ItemType = itemType m.Meta.Flags.Unread = unread ems = append(ems, m) } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return []UpdateType{}, 0, 0, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() pages := h.GetPageCount(total, limit) maxOffset := h.GetMaxOffset(total, limit) if offset > maxOffset { glog.Infoln("offset > maxOffset") return []UpdateType{}, 0, 0, http.StatusBadRequest, fmt.Errorf("not enough records, "+ "offset (%d) would return an empty page.", offset) } // Get the first round of summaries var wg1 sync.WaitGroup chan1 := make(chan SummaryContainerRequest) defer close(chan1) seq := 0 for i := 0; i < len(ems); i++ { go HandleSummaryContainerRequest( siteID, h.ItemTypes[h.ItemTypeProfile], ems[i].Meta.CreatedByID, ems[i].ForProfileID, seq, chan1, ) wg1.Add(1) seq++ go HandleSummaryContainerRequest( siteID, ems[i].ItemTypeID, ems[i].ItemID, ems[i].ForProfileID, seq, chan1, ) wg1.Add(1) seq++ updateType, status, err := GetUpdateType(ems[i].UpdateTypeID) if err != nil { return []UpdateType{}, 0, 0, status, err } ems[i].UpdateType = updateType.Title } resps := []SummaryContainerRequest{} for i := 0; i < seq; i++ { resp := <-chan1 wg1.Done() resps = append(resps, resp) } wg1.Wait() for _, resp := range resps { if resp.Err != nil { return []UpdateType{}, 0, 0, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(resps)) // Insert the first round of summaries, and get the summaries for the // comments var wg2 sync.WaitGroup chan2 := make(chan SummaryContainerRequest) defer close(chan2) seq = 0 parentSeq := 0 for i := 0; i < len(ems); i++ { ems[i].Meta.CreatedBy = resps[seq].Item.Summary seq++ ems[i].Item = resps[seq].Item.Summary seq++ if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] { comment := ems[i].Item.(CommentSummaryType) go HandleSummaryContainerRequest( siteID, comment.ItemTypeID, comment.ItemID, ems[i].ForProfileID, seq, chan2, ) parentSeq++ wg2.Add(1) } } parentResps := []SummaryContainerRequest{} for i := 0; i < parentSeq; i++ { resp := <-chan2 wg2.Done() parentResps = append(parentResps, resp) } wg2.Wait() for _, resp := range parentResps { if resp.Err != nil { return []UpdateType{}, 0, 0, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(parentResps)) // Insert the comment summaries, and get the summaries of the items the // comments are attached to var wg3 sync.WaitGroup chan3 := make(chan SummaryContainerRequest) defer close(chan3) parentSeq = 0 commentItemSeq := 0 for i := 0; i < len(ems); i++ { if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] { comment := ems[i].Item.(CommentSummaryType) go HandleSummaryContainerRequest( siteID, comment.ItemTypeID, comment.ItemID, ems[i].ForProfileID, commentItemSeq, chan3, ) parentSeq++ commentItemSeq++ wg3.Add(1) ems[i].ParentItemTypeID = comment.ItemTypeID parentItemType, err := h.GetMapStringFromInt( h.ItemTypes, comment.ItemTypeID, ) if err != nil { return []UpdateType{}, 0, 0, http.StatusInternalServerError, err } ems[i].ParentItemType = parentItemType ems[i].ParentItemID = comment.ItemID } } commentResps := []SummaryContainerRequest{} for i := 0; i < commentItemSeq; i++ { resp := <-chan3 wg3.Done() commentResps = append(commentResps, resp) } wg3.Wait() for _, resp := range commentResps { if resp.Err != nil { return []UpdateType{}, 0, 0, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(commentResps)) commentItemSeq = 0 for i := 0; i < len(ems); i++ { if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] { ems[i].ParentItem = commentResps[commentItemSeq].Item.Summary commentItemSeq++ } } return ems, total, pages, http.StatusOK, nil }
func searchMetaData( siteID int64, searchURL url.URL, profileID int64, m SearchResults, ) ( SearchResults, int, error, ) { limit, offset, status, err := h.GetLimitAndOffset(searchURL.Query()) if err != nil { glog.Errorf("h.GetLimitAndOffset(searchUrl.Query()) %+v", err) return m, status, err } start := time.Now() // The goal is to produce a piece of SQL that looks at just the flags table // and fetches a list of the items that we care about. // // Our target SQL should look roughly like this (fetches all viewable comments): // // WITH m AS ( // SELECT microcosm_id // FROM microcosms // WHERE site_id = 2 // AND (get_effective_permissions(2,microcosm_id,2,microcosm_id,7)).can_read IS TRUE // ), h AS ( // SELECT huddle_id // FROM huddle_profiles // WHERE profile_id = 7 // ) // SELECT item_type_id // ,item_id // FROM flags // WHERE item_type_id = 4 // AND ( // site_is_deleted // AND microcosm_is_deleted // AND parent_is_deleted // AND item_is_deleted // ) IS NOT TRUE // AND ( // (-- Things that are public by default and low in quantity // item_type_id IN (1,3) // OR parent_item_type_id IN (3) // ) // OR (-- Things directly in microcosms // item_type_id IN (2,6,7,9) // AND COALESCE(microcosm_id, item_id) IN (SELECT microcosm_id FROM m) // ) // OR (-- Comments on things in microcosms // item_type_id = 4 // AND parent_item_type_id IN (6,7,9) // AND microcosm_id IN (SELECT microcosm_id FROM m) // ) // OR (-- Huddles // item_type_id = 5 // AND item_id IN (SELECT huddle_id FROM h) // ) // OR (-- Comments on things in huddles // item_type_id = 4 // AND parent_item_type_id = 5 // AND parent_item_id IN (SELECT huddle_id FROM h) // ) // ) // ORDER BY last_modified DESC // LIMIT 25; // // The key is to only put into the query the bits that will definitely be // used. // Process search options var filterFollowing string var filterItemTypes string var filterItems string var includeHuddles bool var includeComments bool var joinEvents bool orderBy := `rank DESC ,f.last_modified DESC` switch m.Query.Sort { case "date": orderBy = `f.last_modified DESC` case "oldest": joinEvents = true orderBy = `e."when" ASC` case "newest": joinEvents = true orderBy = `e."when" DESC` } if m.Query.Following { filterFollowing = ` JOIN watchers w ON w.item_type_id = f.item_type_id AND w.item_id = f.item_id AND w.profile_id = $2` } if len(m.Query.ItemTypeIDs) > 0 { var inList string // Take care of the item types for i, v := range m.Query.ItemTypeIDs { switch v { case h.ItemTypes[h.ItemTypeComment]: includeComments = true case h.ItemTypes[h.ItemTypeHuddle]: includeHuddles = true } inList += strconv.FormatInt(v, 10) if i < len(m.Query.ItemTypeIDs)-1 { inList += `,` } } if len(m.Query.ItemTypeIDs) == 1 { filterItemTypes = fmt.Sprintf(` AND f.item_type_id = %d`, m.Query.ItemTypeIDs[0], ) } else { if includeComments { filterItemTypes = ` AND ( (f.item_type_id <> 4 AND f.item_type_id IN (` + inList + `)) OR (f.item_type_id = 4 AND f.parent_item_type_id IN (` + inList + `)) )` } else { filterItemTypes = ` AND f.item_type_id IN (` + inList + `)` } } // Take care of the item ids, which are only valid when we have item // types if len(m.Query.ItemIDs) > 0 { if len(m.Query.ItemIDs) == 1 { if includeComments { filterItems = fmt.Sprintf(` AND ( (f.item_type_id <> 4 AND f.item_id = %d) OR (f.item_type_id = 4 AND f.parent_item_id = %d) )`, m.Query.ItemIDs[0], m.Query.ItemIDs[0], ) } else { filterItems = fmt.Sprintf(` AND f.item_id = %d`, m.Query.ItemIDs[0], ) } } else { var inList = `` for i, v := range m.Query.ItemIDs { inList += strconv.FormatInt(v, 10) if i < len(m.Query.ItemIDs)-1 { inList += `,` } } if includeComments { filterItems = ` AND ( (f.item_type_id <> 4 AND f.item_id IN (` + inList + `)) OR (f.item_type_id = 4 AND f.parent_item_id IN (` + inList + `)) )` } else { filterItems = ` AND f.item_id IN (` + inList + `)` } } } } var filterProfileID string if m.Query.ProfileID > 0 { filterProfileID = fmt.Sprintf(` AND f.created_by = %d`, m.Query.ProfileID) } var filterMicrocosmIDs string if len(m.Query.MicrocosmIDs) > 0 { if len(m.Query.MicrocosmIDs) == 1 { filterMicrocosmIDs = fmt.Sprintf(` AND f.microcosm_id = %d`, m.Query.MicrocosmIDs[0]) includeHuddles = false } else { var inList = `` for i, v := range m.Query.MicrocosmIDs { inList += strconv.FormatInt(v, 10) if i < len(m.Query.MicrocosmIDs)-1 { inList += `,` } } filterMicrocosmIDs = ` AND f.microcosm_id IN (` + inList + `)` } } var filterModified string if !m.Query.SinceTime.IsZero() || !m.Query.UntilTime.IsZero() { if m.Query.UntilTime.IsZero() { filterModified = fmt.Sprintf(` AND f.last_modified > to_timestamp(%d)`, m.Query.SinceTime.Unix(), ) } else if m.Query.SinceTime.IsZero() { filterModified = fmt.Sprintf(` AND f.last_modified < to_timestamp(%d)`, m.Query.UntilTime.Unix(), ) } else { filterModified = fmt.Sprintf(` AND f.last_modified BETWEEN to_timestamp(%d) AND to_timestamp(%d)`, m.Query.SinceTime.Unix(), m.Query.UntilTime.Unix(), ) } } var ( filterEventsJoin string filterEventsWhere string ) if !m.Query.EventAfterTime.IsZero() || !m.Query.EventBeforeTime.IsZero() { joinEvents = true if m.Query.EventBeforeTime.IsZero() { filterModified = fmt.Sprintf(` AND e."when" > to_timestamp(%d)`, m.Query.EventAfterTime.Unix(), ) } else if m.Query.EventAfterTime.IsZero() { filterModified = fmt.Sprintf(` AND e."when" < to_timestamp(%d)`, m.Query.EventBeforeTime.Unix(), ) } else { filterModified = fmt.Sprintf(` AND e."when" BETWEEN to_timestamp(%d) AND to_timestamp(%d)`, m.Query.EventAfterTime.Unix(), m.Query.EventBeforeTime.Unix(), ) } } if joinEvents || m.Query.Attendee { filterEventsJoin = ` JOIN events e ON e.event_id = f.item_id` if m.Query.Attendee { filterEventsJoin += ` JOIN attendees a ON a.event_id = e.event_id AND a.profile_id = ` + strconv.FormatInt(profileID, 10) + ` AND a.state_id = 1` } } // These make up our SQL query sqlSelect := ` SELECT 0,0,0,NULL,NULL,NOW(),0,''` sqlFromWhere := ` FROM flags WHERE 1=2` // Query with only meta data sqlWith := ` WITH m AS ( SELECT m.microcosm_id FROM microcosms m LEFT JOIN permissions_cache p ON p.site_id = m.site_id AND p.item_type_id = 2 AND p.item_id = m.microcosm_id AND p.profile_id = $2 LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 2 AND i.item_id = m.microcosm_id WHERE m.site_id = $1 AND m.is_deleted IS NOT TRUE AND m.is_moderated IS NOT TRUE AND i.profile_id IS NULL AND ( (p.can_read IS NOT NULL AND p.can_read IS TRUE) OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE ) )` if includeHuddles || includeComments { if filterModified != "" { sqlWith += `, h AS ( SELECT hp.huddle_id FROM huddle_profiles hp JOIN flags f ON f.item_type_id = 5 AND f.item_id = hp.huddle_id WHERE hp.profile_id = $2` + filterModified + ` )` } else { sqlWith += `, h AS ( SELECT huddle_id FROM huddle_profiles WHERE profile_id = $2 )` } } sqlSelect = ` SELECT f.item_type_id ,f.item_id ,f.parent_item_type_id ,f.parent_item_id ,f.last_modified ,0.5 AS rank ,'' AS highlight` sqlFromWhere = ` FROM flags f LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = f.item_type_id AND i.item_id = f.item_id` + filterFollowing + filterEventsJoin + ` WHERE f.site_id = $1 AND i.profile_id IS NULL` + filterModified + filterMicrocosmIDs + filterItemTypes + filterItems + filterProfileID + filterEventsWhere + ` AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND ( (-- Things that are public by default and low in quantity f.item_type_id IN (1,3) OR f.parent_item_type_id IN (3) ) OR (-- Things directly in microcosms f.item_type_id IN (2,6,7,9) AND COALESCE(f.microcosm_id, f.item_id) IN (SELECT microcosm_id FROM m) )` if includeComments { sqlFromWhere += ` OR (-- Comments on things in microcosms f.item_type_id = 4 AND f.parent_item_type_id IN (6,7,9) AND f.microcosm_id IN (SELECT microcosm_id FROM m) ) OR (-- Comments on things in huddles f.item_type_id = 4 AND f.parent_item_type_id = 5 AND f.parent_item_id IN (SELECT huddle_id FROM h) )` } if includeHuddles { sqlFromWhere += ` OR (-- Huddles f.item_type_id = 5 AND f.item_id IN (SELECT huddle_id FROM h) )` } sqlFromWhere += ` )` sqlOrderLimit := ` ORDER BY ` + orderBy + ` LIMIT $3 OFFSET $4` db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return m, http.StatusInternalServerError, err } var total int64 err = db.QueryRow( sqlWith+`SELECT COUNT(*)`+sqlFromWhere, siteID, profileID, ).Scan(&total) if err != nil { glog.Error(err) return m, http.StatusInternalServerError, err } // This nested query is used to run the `has_unread` query on only the rows // that are returned, rather than on all rows in the underlying query before // limit has been applied. rows, err := db.Query(` SELECT item_type_id ,item_id ,parent_item_type_id ,parent_item_id ,last_modified ,rank ,highlight ,has_unread(item_type_id, item_id, $2) FROM (`+ sqlWith+ sqlSelect+ sqlFromWhere+ sqlOrderLimit+ `) r`, siteID, profileID, limit, offset, ) if err != nil { glog.Errorf( "stmt.Query(%d, %s, %d, %d, %d) %+v", siteID, m.Query.Query, profileID, limit, offset, err, ) return m, http.StatusInternalServerError, fmt.Errorf("Database query failed") } defer rows.Close() rs := []SearchResult{} for rows.Next() { var r SearchResult err = rows.Scan( &r.ItemTypeID, &r.ItemID, &r.ParentItemTypeID, &r.ParentItemID, &r.LastModified, &r.Rank, &r.Highlight, &r.Unread, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return m, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } itemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ItemTypeID) if err != nil { glog.Errorf( "h.GetMapStringFromInt(h.ItemTypes, %d) %+v", r.ItemTypeID, err, ) return m, http.StatusInternalServerError, err } r.ItemType = itemType if r.ParentItemTypeID.Valid { parentItemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ParentItemTypeID.Int64) if err != nil { glog.Errorf( "h.GetMapStringFromInt(h.ItemTypes, %d) %+v", r.ParentItemTypeID.Int64, err, ) return m, http.StatusInternalServerError, err } r.ParentItemType = parentItemType } rs = append(rs, r) } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return m, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() pages := h.GetPageCount(total, limit) maxOffset := h.GetMaxOffset(total, limit) if offset > maxOffset { glog.Infoln("offset > maxOffset") return m, http.StatusBadRequest, fmt.Errorf("not enough records, "+ "offset (%d) would return an empty page.", offset) } // Extract the summaries var wg1 sync.WaitGroup req := make(chan SummaryContainerRequest) defer close(req) seq := 0 for i := 0; i < len(rs); i++ { go HandleSummaryContainerRequest( siteID, rs[i].ItemTypeID, rs[i].ItemID, profileID, seq, req, ) seq++ wg1.Add(1) if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 { go HandleSummaryContainerRequest( siteID, rs[i].ParentItemTypeID.Int64, rs[i].ParentItemID.Int64, profileID, seq, req, ) seq++ wg1.Add(1) } } resps := []SummaryContainerRequest{} for i := 0; i < seq; i++ { resp := <-req wg1.Done() resps = append(resps, resp) } wg1.Wait() for _, resp := range resps { if resp.Err != nil { return m, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(resps)) seq = 0 for i := 0; i < len(rs); i++ { rs[i].Item = resps[seq].Item.Summary seq++ if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 { rs[i].ParentItem = resps[seq].Item.Summary seq++ } } m.Results = h.ConstructArray( rs, "result", total, limit, offset, pages, &searchURL, ) // return milliseconds m.TimeTaken = time.Now().Sub(start).Nanoseconds() / 1000000 return m, http.StatusOK, nil }
func searchFullText( siteID int64, searchURL url.URL, profileID int64, m SearchResults, ) ( SearchResults, int, error, ) { limit, offset, status, err := h.GetLimitAndOffset(searchURL.Query()) if err != nil { glog.Errorf("h.GetLimitAndOffset(searchURL.Query()) %+v", err) return m, status, err } start := time.Now() // Search options var joinEvents bool orderBy := `rank DESC ,f.last_modified DESC` switch m.Query.Sort { case "date": orderBy = `f.last_modified DESC` case "oldest": joinEvents = true orderBy = `e."when" ASC` case "newest": joinEvents = true orderBy = `e."when" DESC` } var filterFollowing string if m.Query.Following { filterFollowing = ` JOIN watchers w ON w.item_type_id = f.item_type_id AND w.item_id = f.item_id AND w.profile_id = $2` } fullTextScope := `document` var filterTitle string if m.Query.InTitle { fullTextScope = `title` filterTitle = ` AND f.item_type_id <> 4` } var filterItemTypes string var filterItems string var includeComments bool if !m.Query.InTitle { includeComments = true } if len(m.Query.ItemTypeIDs) > 0 { var itemTypeInList []string var itemTypeSansCommentsInList []string // Take care of the item types for _, v := range m.Query.ItemTypeIDs { switch v { case h.ItemTypes[h.ItemTypeComment]: includeComments = true itemTypeInList = append(itemTypeInList, strconv.FormatInt(v, 10)) default: itemTypeInList = append(itemTypeInList, strconv.FormatInt(v, 10)) itemTypeSansCommentsInList = append(itemTypeSansCommentsInList, strconv.FormatInt(v, 10)) } } if len(m.Query.ItemIDs) == 0 { if len(m.Query.ItemTypeIDs) == 1 { filterItemTypes = fmt.Sprintf(` AND f.item_type_id = %d`, m.Query.ItemTypeIDs[0], ) } else { if includeComments { filterItemTypes = ` AND ( (f.item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `)) OR (f.item_type_id = 4 AND f.parent_item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `)) )` } else { filterItemTypes = ` AND f.item_type_id IN (` + strings.Join(itemTypeInList, `,`) + `)` } } } // Take care of the item ids, which are only valid when we have item // types if len(m.Query.ItemIDs) > 0 { var itemIdsInList []string for _, v := range m.Query.ItemIDs { itemIdsInList = append(itemIdsInList, strconv.FormatInt(v, 10)) } if len(m.Query.ItemIDs) == 1 { if includeComments { filterItems = fmt.Sprintf(` AND ( (si.item_type_id IN (`+strings.Join(itemTypeSansCommentsInList, `,`)+`) AND si.item_id = %d) OR (si.item_type_id = 4 AND si.parent_item_id = %d AND si.parent_item_type_id IN (`+strings.Join(itemTypeSansCommentsInList, `,`)+`)) )`, m.Query.ItemIDs[0], m.Query.ItemIDs[0], ) } else { filterItems = fmt.Sprintf(` AND si.item_id = %d`, m.Query.ItemIDs[0], ) } } else { if includeComments { filterItems = ` AND ( (si.item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `) AND si.item_id IN (` + strings.Join(itemIdsInList, `,`) + `)) OR (si.item_type_id = 4 AND si.parent_item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `) AND si.parent_item_id IN (` + strings.Join(itemIdsInList, `,`) + `)) )` } else { filterItems = ` AND si.item_type_id IN (` + strings.Join(itemTypeInList, `,`) + `) AND si.item_id IN (` + strings.Join(itemIdsInList, `,`) + `)` } } } } // Note: hashtags being inserted into the query this way may appear // initially to be a vector for a SQL injection attack. However the // source of these hashtags is a regexp in hashtags.go which only // matches contiguous alphanum strings and does not permit spaces, // quotes, semicolons or any other escapable sequence that can be // utilised to create an attack. var filterHashTag string for _, hashtag := range m.Query.Hashtags { filterHashTag += ` AND si.` + fullTextScope + `_text ~* '\W` + hashtag + `\W'` } var filterProfileID string if m.Query.ProfileID > 0 { filterProfileID = fmt.Sprintf(` AND si.profile_id = %d`, m.Query.ProfileID) } var filterMicrocosmIDs string if len(m.Query.MicrocosmIDs) > 0 { if len(m.Query.MicrocosmIDs) == 1 { filterMicrocosmIDs = fmt.Sprintf(` AND f.microcosm_id = %d`, m.Query.MicrocosmIDs[0]) } else { var inList = `` for i, v := range m.Query.MicrocosmIDs { inList += strconv.FormatInt(v, 10) if i < len(m.Query.MicrocosmIDs)-1 { inList += `,` } } filterMicrocosmIDs = ` AND f.microcosm_id IN (` + inList + `)` } } var filterModified string if !m.Query.SinceTime.IsZero() || !m.Query.UntilTime.IsZero() { if m.Query.UntilTime.IsZero() { filterModified = fmt.Sprintf(` AND f.last_modified > to_timestamp(%d)`, m.Query.SinceTime.Unix(), ) } else if m.Query.SinceTime.IsZero() { filterModified = fmt.Sprintf(` AND f.last_modified < to_timestamp(%d)`, m.Query.UntilTime.Unix(), ) } else { filterModified = fmt.Sprintf(` AND f.last_modified BETWEEN to_timestamp(%d) AND to_timestamp(%d)`, m.Query.SinceTime.Unix(), m.Query.UntilTime.Unix(), ) } } var ( filterEventsJoin string filterEventsWhere string ) if !m.Query.EventAfterTime.IsZero() || !m.Query.EventBeforeTime.IsZero() { joinEvents = true if m.Query.EventBeforeTime.IsZero() { filterModified = fmt.Sprintf(` AND e."when" > to_timestamp(%d)`, m.Query.EventAfterTime.Unix(), ) } else if m.Query.EventAfterTime.IsZero() { filterModified = fmt.Sprintf(` AND e."when" < to_timestamp(%d)`, m.Query.EventBeforeTime.Unix(), ) } else { filterModified = fmt.Sprintf(` AND e."when" BETWEEN to_timestamp(%d) AND to_timestamp(%d)`, m.Query.EventAfterTime.Unix(), m.Query.EventBeforeTime.Unix(), ) } } if joinEvents || m.Query.Attendee { filterEventsJoin = ` JOIN events e ON e.event_id = f.item_id` if m.Query.Attendee { filterEventsJoin += ` JOIN attendees a ON a.event_id = e.event_id AND a.profile_id = ` + strconv.FormatInt(profileID, 10) + ` AND a.state_id = 1` } } sqlQuery := ` WITH m AS ( SELECT m.microcosm_id FROM microcosms m LEFT JOIN permissions_cache p ON p.site_id = m.site_id AND p.item_type_id = 2 AND p.item_id = m.microcosm_id AND p.profile_id = $2 LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = 2 AND i.item_id = m.microcosm_id WHERE m.site_id = $1 AND m.is_deleted IS NOT TRUE AND m.is_moderated IS NOT TRUE AND i.profile_id IS NULL AND ( (p.can_read IS NOT NULL AND p.can_read IS TRUE) OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE ) ) SELECT total ,item_type_id ,item_id ,parent_item_type_id ,parent_item_id ,last_modified ,rank ,ts_headline(` + fullTextScope + `_text, query) AS highlight ,has_unread(item_type_id, item_id, $2) FROM ( SELECT COUNT(*) OVER() AS total ,f.item_type_id ,f.item_id ,f.parent_item_type_id ,f.parent_item_id ,f.last_modified ,ts_rank_cd(si.` + fullTextScope + `_vector, query, 8) AS rank ,si.` + fullTextScope + `_text ,query.query FROM search_index si JOIN flags f ON f.item_type_id = si.item_type_id AND f.item_id = si.item_id LEFT JOIN ignores i ON i.profile_id = $2 AND i.item_type_id = f.item_type_id AND i.item_id = f.item_id` + filterEventsJoin + filterFollowing + ` LEFT JOIN huddle_profiles h ON (f.parent_item_type_id = 5 OR f.item_type_id = 5) AND h.huddle_id = COALESCE(f.parent_item_id, f.item_id) AND h.profile_id = $2 ,plainto_tsquery($3) AS query WHERE f.site_id = $1 AND i.profile_id IS NULL` + filterModified + filterMicrocosmIDs + filterTitle + filterItemTypes + filterItems + filterHashTag + filterEventsWhere + filterProfileID + ` AND f.microcosm_is_deleted IS NOT TRUE AND f.microcosm_is_moderated IS NOT TRUE AND f.parent_is_deleted IS NOT TRUE AND f.parent_is_moderated IS NOT TRUE AND f.item_is_deleted IS NOT TRUE AND f.item_is_moderated IS NOT TRUE AND si.` + fullTextScope + `_vector @@ query` + ` AND ( -- Things that are public by default COALESCE(f.parent_item_type_id, f.item_type_id) = 3 OR -- Things in microcosms COALESCE(f.microcosm_id, f.item_id) IN (SELECT microcosm_id FROM m) OR -- Things in huddles COALESCE(f.parent_item_id, f.item_id) = h.huddle_id ) ORDER BY ` + orderBy + ` LIMIT $4 OFFSET $5 ) r ` db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return m, http.StatusInternalServerError, err } queryID := `Search` + randomString() queryTimer := time.NewTimer(searchTimeout) go func() { <-queryTimer.C db.Exec(`SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE state = 'active' AND query LIKE '--` + queryID + `%'`) }() // This nested query is used to run the `has_unread` query on only the rows // that are returned, rather than on all rows in the underlying query before // limit has been applied. rows, err := db.Query( `--`+queryID+ sqlQuery, siteID, profileID, m.Query.Query, limit, offset, ) queryTimer.Stop() if err != nil { e, ok := err.(*pq.Error) if !ok { glog.Errorf( "stmt.Query(%d, %s, %d, %d, %d) %+v", siteID, m.Query.Query, profileID, limit, offset, err, ) return m, http.StatusInternalServerError, fmt.Errorf("Database query failed") } switch e.Code.Name() { case "query_canceled": glog.Errorf( "Query for '%s' took too long", m.Query.Query, ) return m, http.StatusInternalServerError, merrors.MicrocosmError{ ErrorCode: 24, ErrorMessage: "The search query took too long and has been cancelled", } default: glog.Errorf( "stmt.Query(%d, %s, %d, %d, %d) %+v", siteID, m.Query.Query, profileID, limit, offset, err, ) return m, http.StatusInternalServerError, fmt.Errorf("Database query failed") } } defer rows.Close() var total int64 rs := []SearchResult{} for rows.Next() { var r SearchResult err = rows.Scan( &total, &r.ItemTypeID, &r.ItemID, &r.ParentItemTypeID, &r.ParentItemID, &r.LastModified, &r.Rank, &r.Highlight, &r.Unread, ) if err != nil { glog.Errorf("rows.Scan() %+v", err) return m, http.StatusInternalServerError, fmt.Errorf("Row parsing error") } itemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ItemTypeID) if err != nil { glog.Errorf( "h.GetMapStringFromInt(h.ItemTypes, %d) %+v", r.ItemTypeID, err, ) return m, http.StatusInternalServerError, err } r.ItemType = itemType if r.ParentItemTypeID.Valid { parentItemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ParentItemTypeID.Int64) if err != nil { glog.Errorf( "h.GetMapStringFromInt(h.ItemTypes, %d) %+v", r.ParentItemTypeID.Int64, err, ) return m, http.StatusInternalServerError, err } r.ParentItemType = parentItemType } rs = append(rs, r) } err = rows.Err() if err != nil { glog.Errorf("rows.Err() %+v", err) return m, http.StatusInternalServerError, fmt.Errorf("Error fetching rows") } rows.Close() pages := h.GetPageCount(total, limit) maxOffset := h.GetMaxOffset(total, limit) if offset > maxOffset { glog.Infoln("offset > maxOffset") return m, http.StatusBadRequest, fmt.Errorf("not enough records, "+ "offset (%d) would return an empty page.", offset) } // Extract the summaries var wg1 sync.WaitGroup req := make(chan SummaryContainerRequest) defer close(req) seq := 0 for i := 0; i < len(rs); i++ { go HandleSummaryContainerRequest( siteID, rs[i].ItemTypeID, rs[i].ItemID, profileID, seq, req, ) seq++ wg1.Add(1) if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 { go HandleSummaryContainerRequest( siteID, rs[i].ParentItemTypeID.Int64, rs[i].ParentItemID.Int64, profileID, seq, req, ) seq++ wg1.Add(1) } } resps := []SummaryContainerRequest{} for i := 0; i < seq; i++ { resp := <-req wg1.Done() resps = append(resps, resp) } wg1.Wait() for _, resp := range resps { if resp.Err != nil { return m, resp.Status, resp.Err } } sort.Sort(SummaryContainerRequestsBySeq(resps)) seq = 0 for i := 0; i < len(rs); i++ { rs[i].Item = resps[seq].Item.Summary seq++ if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 { rs[i].ParentItem = resps[seq].Item.Summary seq++ } } m.Results = h.ConstructArray( rs, "result", total, limit, offset, pages, &searchURL, ) // return milliseconds m.TimeTaken = time.Now().Sub(start).Nanoseconds() / 1000000 return m, http.StatusOK, nil }
// GetAttendee returns an attendee func GetAttendee(siteID int64, id int64) (AttendeeType, int, error) { // Get from cache if it's available mcKey := fmt.Sprintf(mcAttendeeKeys[c.CacheDetail], id) if val, ok := c.Get(mcKey, AttendeeType{}); ok { m := val.(AttendeeType) m.FetchProfileSummaries(siteID) return m, 0, nil } // Open db connection and retrieve resource db, err := h.GetConnection() if err != nil { glog.Errorf("h.GetConnection() %+v", err) return AttendeeType{}, http.StatusInternalServerError, err } var m AttendeeType err = db.QueryRow(` SELECT attendee_id ,event_id ,profile_id ,created ,created_by ,edited ,edited_by ,edit_reason ,state_id ,state_date FROM attendees WHERE attendee_id = $1`, id, ).Scan( &m.ID, &m.EventID, &m.ProfileID, &m.Meta.Created, &m.Meta.CreatedByID, &m.Meta.EditedNullable, &m.Meta.EditedByNullable, &m.Meta.EditReasonNullable, &m.RSVPID, &m.RSVPd, ) if err == sql.ErrNoRows { return AttendeeType{}, http.StatusNotFound, fmt.Errorf("Resource with ID %d not found", id) } else if err != nil { glog.Errorf("db.QueryRow(%d) %+v", id, err) return AttendeeType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed") } if m.Meta.EditReasonNullable.Valid { m.Meta.EditReason = m.Meta.EditReasonNullable.String } if m.Meta.EditedNullable.Valid { m.Meta.Edited = m.Meta.EditedNullable.Time.Format(time.RFC3339Nano) } if m.RSVPd.Valid { m.RSVPdOn = m.RSVPd.Time.Format(time.RFC3339Nano) } m.RSVP, err = h.GetMapStringFromInt(RSVPStates, m.RSVPID) if err != nil { return AttendeeType{}, http.StatusInternalServerError, err } m.Meta.Links = []h.LinkType{ h.GetExtendedLink( "self", "", h.ItemTypeAttendee, m.EventID, m.ProfileID, ), h.GetLink("profile", "", h.ItemTypeProfile, m.ProfileID), h.GetLink("event", "", h.ItemTypeEvent, m.EventID), } // Update cache c.Set(mcKey, m, mcTTL) m.FetchProfileSummaries(siteID) return m, http.StatusOK, nil }
// Validate returns true if the query is valid func (sq *SearchQuery) Validate() { var valid bool if (sq.Lat == 0 && sq.Lon == 0 && sq.Radius == 0) || (sq.Lat > 0 && sq.Lon > 0) { if sq.Lat > 0 && sq.Radius == 0 { sq.Radius = 5000 // Value is in meters, 5KM } // TODO: Implement geo search // valid = true } else { if sq.Lat > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("lat:%f", sq.Lat), ) sq.Lat = 0 } if sq.Lon > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("lon:%f", sq.Lon), ) sq.Lon = 0 } if sq.Radius > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("radius:%d", sq.Radius), ) sq.Radius = 0 } } if (sq.North == 0 && sq.East == 0 && sq.South == 0 && sq.West == 0) || (sq.North > 0 && sq.East > 0 && sq.South > 0 && sq.West > 0) { // NESW overrides lat,lon + radius if sq.Lat > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("lat:%f", sq.Lat), ) sq.Lat = 0 } if sq.Lon > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("lon:%f", sq.Lon), ) sq.Lon = 0 } if sq.Radius > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("radius:%d", sq.Radius), ) sq.Radius = 0 } // TODO: Implement geo search // valid = true } else { if sq.North > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("north:%f", sq.North), ) sq.North = 0 } if sq.East > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("east:%f", sq.East), ) sq.East = 0 } if sq.South > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("south:%f", sq.South), ) sq.South = 0 } if sq.West > 0 { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("west:%f", sq.West), ) sq.West = 0 } } sq.Query = strings.TrimSpace(sq.Query) if sq.Query != "" { valid = true } else { sq.Query = "" } // TODO: Implement geo search // if sq.Lat > 0 { // searched = append(searched, fmt.Sprintf("lat:%f", sq.Lat)) // } // if sq.Lon > 0 { // searched = append(searched, fmt.Sprintf("lon:%f", sq.Lon)) // } // if sq.Radius > 0 { // searched = append(searched, fmt.Sprintf("radius:%f", sq.Radius)) // } // if sq.North > 0 { // searched = append(searched, fmt.Sprintf("north:%f", sq.North)) // } // if sq.East > 0 { // searched = append(searched, fmt.Sprintf("east:%f", sq.East)) // } // if sq.South > 0 { // searched = append(searched, fmt.Sprintf("south:%f", sq.South)) // } // if sq.West > 0 { // searched = append(searched, fmt.Sprintf("west:%f", sq.West)) // } if len(sq.ItemTypeIDs) > 0 { valid = true } if !sq.EventAfterTime.IsZero() { if len(sq.ItemTypeIDs) != 1 || sq.ItemTypeIDs[0] != h.ItemTypes[h.ItemTypeEvent] { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("eventAfter:%s", sq.EventAfter), ) sq.EventAfterTime = time.Time{} } } if !sq.EventBeforeTime.IsZero() { if len(sq.ItemTypeIDs) != 1 || sq.ItemTypeIDs[0] != h.ItemTypes[h.ItemTypeEvent] { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("eventBefore:%s", sq.EventBefore), ) sq.EventBeforeTime = time.Time{} } } if strings.TrimSpace(sq.ProfileName) != "" { if sq.ProfileID == 0 { // TODO: get profile ID by search for profiles that exact match a username if sq.ProfileID > 0 { // valid = true } else { sq.IgnoredArr = append( sq.IgnoredArr, fmt.Sprintf("author:%s", sq.ProfileName), ) sq.ProfileName = "" } } } if sq.ProfileID > 0 { valid = true } if sq.Attendee { // Events can be sorted by the date of the event if !(len(sq.ItemTypeIDs) == 1 && sq.ItemTypeIDs[0] == h.ItemTypes[h.ItemTypeEvent]) { sq.IgnoredArr = append(sq.IgnoredArr, "attendee:true") sq.Attendee = false } } if len(sq.MicrocosmIDs) > 0 { // Implement Microcosm search, which means havign a really cheap way of looking // up a Microcosm Id even when given a comment ID valid = true } // Build up our knowledge of what we're ignoring and what we are searching sq.Ignored = strings.Join(sq.IgnoredArr, " ") searched := []string{} if sq.Query != "" { searched = append(searched, sq.Query) } if len(sq.ItemTypeIDs) > 0 { for _, v := range sq.ItemTypeIDs { itemType, _ := h.GetMapStringFromInt(h.ItemTypes, v) sq.ItemTypesQuery = append(sq.ItemTypesQuery, itemType) searched = append(searched, fmt.Sprintf("type:%s", itemType)) } } if len(sq.ItemIDs) > 0 { for _, v := range sq.ItemIDs { sq.ItemIDsQuery = append(sq.ItemIDsQuery, v) searched = append(searched, fmt.Sprintf("id:%d", v)) } } if sq.InTitle { searched = append(searched, fmt.Sprintf("inTitle:%t", sq.InTitle)) } if sq.Following { searched = append(searched, fmt.Sprintf("following:%t", sq.Following)) } if !sq.SinceTime.IsZero() { searched = append(searched, fmt.Sprintf("since:%s", sq.Since)) } if !sq.UntilTime.IsZero() { searched = append(searched, fmt.Sprintf("until:%s", sq.Until)) } if !sq.EventAfterTime.IsZero() { searched = append(searched, fmt.Sprintf("eventAfter:%s", sq.EventAfter)) } if !sq.EventBeforeTime.IsZero() { searched = append(searched, fmt.Sprintf("eventBefore:%s", sq.EventBefore)) } if sq.Attendee { searched = append(searched, fmt.Sprintf("attendee:%t", sq.Attendee)) } if len(sq.MicrocosmIDs) > 0 { for _, v := range sq.MicrocosmIDs { sq.MicrocosmIDsQuery = append(sq.MicrocosmIDsQuery, v) searched = append(searched, fmt.Sprintf("forumId:%d", v)) } } if sq.ProfileID > 0 { searched = append(searched, fmt.Sprintf("authorId:%d", sq.ProfileID)) } if sq.Sort != "" { searched = append(searched, fmt.Sprintf("sort:%s", sq.Sort)) } sq.Searched = strings.Join(searched, " ") if valid { sq.Valid = true } return }
// GetAttribute returns an attribute func GetAttribute(id int64) (AttributeType, int, error) { db, err := h.GetConnection() if err != nil { return AttributeType{}, http.StatusInternalServerError, err } var typeID int64 m := AttributeType{ID: id} err = db.QueryRow(` SELECT k.key ,v.value_type_id ,v.string ,v.date ,v."number" ,v."boolean" FROM attribute_keys k, attribute_values v WHERE k.attribute_id = v.attribute_id AND k.attribute_id = $1`, id, ).Scan( &m.Key, &typeID, &m.String, &m.Date, &m.Number, &m.Boolean, ) if err == sql.ErrNoRows { return AttributeType{}, http.StatusNotFound, fmt.Errorf("Attribute not found: %v", err.Error()) } else if err != nil { return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Database query failed: %v", err.Error()) } typeStr, err := h.GetMapStringFromInt(AttributeTypes, typeID) if err != nil { return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Type is not a valid attribute type: %v", err.Error()) } m.Type = typeStr switch m.Type { case tString: if m.String.Valid { m.Value = m.String.String } else { return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Type is string, but value is invalid") } case tDate: if m.Date.Valid { m.Value = m.Date.Time.Format("2006-01-02") } else { return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Type is date, but value is invalid") } case tNumber: if m.Number.Valid { m.Value = m.Number.Float64 } else { return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Type is number, but value is invalid") } case tBoolean: if m.Boolean.Valid { m.Value = m.Boolean.Bool } else { return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Type is boolean, but value is invalid") } default: return AttributeType{}, http.StatusInternalServerError, fmt.Errorf("Type was not one of string|date|number|boolean") } return m, http.StatusOK, nil }