Exemplo n.º 1
0
// GetRoleProfiles fetches multiple profiles belonging to a role
func GetRoleProfiles(
	siteID int64,
	roleID int64,
	limit int64,
	offset int64,
) (
	[]ProfileSummaryType,
	int64,
	int64,
	int,
	error,
) {

	db, err := h.GetConnection()
	if err != nil {
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError, err
	}

	rows, err := db.Query(`
SELECT COUNT(*) OVER() AS total
      ,rp.profile_id
  FROM role_profiles rp,
       roles r
 WHERE r.role_id = rp.role_id
   AND r.role_id = $1
 ORDER BY rp.profile_id ASC
 LIMIT $2
OFFSET $3`,
		roleID,
		limit,
		offset,
	)
	if err != nil {
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}
	defer rows.Close()

	// Get a list of the identifiers of the items to return
	var total int64
	ids := []int64{}
	for rows.Next() {
		var id int64
		err = rows.Scan(
			&total,
			&id,
		)
		if err != nil {
			return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error: %v", err.Error())
		}

		ids = append(ids, id)
	}
	err = rows.Err()
	if err != nil {
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows: %v", err.Error())
	}
	rows.Close()

	// Make a request for each identifier
	var wg1 sync.WaitGroup
	req := make(chan ProfileSummaryRequest)
	defer close(req)

	for seq, id := range ids {
		go HandleProfileSummaryRequest(siteID, id, seq, req)
		wg1.Add(1)
	}

	// Receive the responses and check for errors
	resps := []ProfileSummaryRequest{}
	for i := 0; i < len(ids); i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return []ProfileSummaryType{}, 0, 0, resp.Status, resp.Err
		}
	}

	// Sort them
	sort.Sort(ProfileSummaryRequestBySeq(resps))

	// Extract the values
	ems := []ProfileSummaryType{}
	for _, resp := range resps {
		ems = append(ems, resp.Item)
	}

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []ProfileSummaryType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf(
				"not enough records, offset (%d) would return an empty page",
				offset,
			)
	}

	return ems, total, pages, http.StatusOK, nil

}
Exemplo n.º 2
0
func searchMetaData(
	siteID int64,
	searchURL url.URL,
	profileID int64,
	m SearchResults,
) (
	SearchResults,
	int,
	error,
) {

	limit, offset, status, err := h.GetLimitAndOffset(searchURL.Query())
	if err != nil {
		glog.Errorf("h.GetLimitAndOffset(searchUrl.Query()) %+v", err)
		return m, status, err
	}

	start := time.Now()

	// The goal is to produce a piece of SQL that looks at just the flags table
	// and fetches a list of the items that we care about.
	//
	// Our target SQL should look roughly like this (fetches all viewable comments):
	//
	// WITH m AS (
	//     SELECT microcosm_id
	//       FROM microcosms
	//      WHERE site_id = 2
	//        AND (get_effective_permissions(2,microcosm_id,2,microcosm_id,7)).can_read IS TRUE
	// ), h AS (
	//     SELECT huddle_id
	//       FROM huddle_profiles
	//      WHERE profile_id = 7
	// )
	// SELECT item_type_id
	//       ,item_id
	//   FROM flags
	//  WHERE item_type_id = 4
	//    AND (
	//            site_is_deleted
	//        AND microcosm_is_deleted
	//        AND parent_is_deleted
	//        AND item_is_deleted
	//        ) IS NOT TRUE
	//    AND (
	//        (-- Things that are public by default and low in quantity
	//            item_type_id IN (1,3)
	//         OR parent_item_type_id IN (3)
	//        )
	//     OR (-- Things directly in microcosms
	//            item_type_id IN (2,6,7,9)
	//        AND COALESCE(microcosm_id, item_id) IN (SELECT microcosm_id FROM m)
	//        )
	//     OR (-- Comments on things in microcosms
	//            item_type_id = 4
	//        AND parent_item_type_id IN (6,7,9)
	//        AND microcosm_id IN (SELECT microcosm_id FROM m)
	//        )
	//     OR (-- Huddles
	//            item_type_id = 5
	//        AND item_id IN (SELECT huddle_id FROM h)
	//        )
	//     OR (-- Comments on things in huddles
	//            item_type_id = 4
	//        AND parent_item_type_id = 5
	//        AND parent_item_id IN (SELECT huddle_id FROM h)
	//        )
	//        )
	//  ORDER BY last_modified DESC
	//  LIMIT 25;
	//
	// The key is to only put into the query the bits that will definitely be
	// used.

	// Process search options

	var filterFollowing string
	var filterItemTypes string
	var filterItems string
	var includeHuddles bool
	var includeComments bool
	var joinEvents bool

	orderBy := `rank DESC
         ,f.last_modified DESC`

	switch m.Query.Sort {
	case "date":
		orderBy = `f.last_modified DESC`
	case "oldest":
		joinEvents = true
		orderBy = `e."when" ASC`
	case "newest":
		joinEvents = true
		orderBy = `e."when" DESC`
	}

	if m.Query.Following {
		filterFollowing = `
       JOIN watchers w ON w.item_type_id = f.item_type_id
                      AND w.item_id = f.item_id
                      AND w.profile_id = $2`
	}

	if len(m.Query.ItemTypeIDs) > 0 {
		var inList string

		// Take care of the item types
		for i, v := range m.Query.ItemTypeIDs {

			switch v {
			case h.ItemTypes[h.ItemTypeComment]:
				includeComments = true
			case h.ItemTypes[h.ItemTypeHuddle]:
				includeHuddles = true
			}

			inList += strconv.FormatInt(v, 10)
			if i < len(m.Query.ItemTypeIDs)-1 {
				inList += `,`
			}
		}

		if len(m.Query.ItemTypeIDs) == 1 {
			filterItemTypes = fmt.Sprintf(`
   AND f.item_type_id = %d`,
				m.Query.ItemTypeIDs[0],
			)
		} else {
			if includeComments {
				filterItemTypes = `
   AND (   (f.item_type_id <> 4 AND f.item_type_id IN (` + inList + `))
        OR (f.item_type_id = 4 AND f.parent_item_type_id IN (` + inList + `))
       )`
			} else {
				filterItemTypes = `
   AND f.item_type_id IN (` + inList + `)`
			}
		}

		// Take care of the item ids, which are only valid when we have item
		// types
		if len(m.Query.ItemIDs) > 0 {

			if len(m.Query.ItemIDs) == 1 {
				if includeComments {
					filterItems = fmt.Sprintf(`
   AND (   (f.item_type_id <> 4 AND f.item_id = %d)
        OR (f.item_type_id = 4 AND f.parent_item_id = %d)
       )`,
						m.Query.ItemIDs[0],
						m.Query.ItemIDs[0],
					)
				} else {
					filterItems = fmt.Sprintf(`
   AND f.item_id = %d`,
						m.Query.ItemIDs[0],
					)
				}
			} else {
				var inList = ``
				for i, v := range m.Query.ItemIDs {
					inList += strconv.FormatInt(v, 10)
					if i < len(m.Query.ItemIDs)-1 {
						inList += `,`
					}
				}

				if includeComments {
					filterItems = `
   AND (   (f.item_type_id <> 4 AND f.item_id IN (` + inList + `))
        OR (f.item_type_id = 4 AND f.parent_item_id IN (` + inList + `))
       )`
				} else {
					filterItems = `
   AND f.item_id IN (` + inList + `)`
				}
			}
		}
	}

	var filterProfileID string
	if m.Query.ProfileID > 0 {
		filterProfileID = fmt.Sprintf(`
   AND f.created_by = %d`, m.Query.ProfileID)
	}

	var filterMicrocosmIDs string
	if len(m.Query.MicrocosmIDs) > 0 {
		if len(m.Query.MicrocosmIDs) == 1 {
			filterMicrocosmIDs = fmt.Sprintf(`
   AND f.microcosm_id = %d`, m.Query.MicrocosmIDs[0])
			includeHuddles = false
		} else {
			var inList = ``

			for i, v := range m.Query.MicrocosmIDs {
				inList += strconv.FormatInt(v, 10)
				if i < len(m.Query.MicrocosmIDs)-1 {
					inList += `,`
				}
			}
			filterMicrocosmIDs = `
   AND f.microcosm_id IN (` + inList + `)`
		}
	}

	var filterModified string
	if !m.Query.SinceTime.IsZero() || !m.Query.UntilTime.IsZero() {
		if m.Query.UntilTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND f.last_modified > to_timestamp(%d)`,
				m.Query.SinceTime.Unix(),
			)

		} else if m.Query.SinceTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND f.last_modified < to_timestamp(%d)`,
				m.Query.UntilTime.Unix(),
			)
		} else {
			filterModified = fmt.Sprintf(`
   AND f.last_modified BETWEEN to_timestamp(%d) AND to_timestamp(%d)`,
				m.Query.SinceTime.Unix(),
				m.Query.UntilTime.Unix(),
			)
		}
	}

	var (
		filterEventsJoin  string
		filterEventsWhere string
	)
	if !m.Query.EventAfterTime.IsZero() || !m.Query.EventBeforeTime.IsZero() {
		joinEvents = true

		if m.Query.EventBeforeTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND e."when" > to_timestamp(%d)`,
				m.Query.EventAfterTime.Unix(),
			)

		} else if m.Query.EventAfterTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND e."when" < to_timestamp(%d)`,
				m.Query.EventBeforeTime.Unix(),
			)
		} else {
			filterModified = fmt.Sprintf(`
   AND e."when" BETWEEN to_timestamp(%d) AND to_timestamp(%d)`,
				m.Query.EventAfterTime.Unix(),
				m.Query.EventBeforeTime.Unix(),
			)
		}
	}

	if joinEvents || m.Query.Attendee {
		filterEventsJoin = `
       JOIN events e ON e.event_id = f.item_id`

		if m.Query.Attendee {
			filterEventsJoin += `
       JOIN attendees a ON a.event_id = e.event_id
                       AND a.profile_id = ` + strconv.FormatInt(profileID, 10) + `
                       AND a.state_id = 1`
		}
	}

	// These make up our SQL query
	sqlSelect := `
SELECT 0,0,0,NULL,NULL,NOW(),0,''`
	sqlFromWhere := `
  FROM flags WHERE 1=2`

	// Query with only meta data
	sqlWith := `
WITH m AS (
    SELECT m.microcosm_id
      FROM microcosms m
      LEFT JOIN permissions_cache p ON p.site_id = m.site_id
                                   AND p.item_type_id = 2
                                   AND p.item_id = m.microcosm_id
                                   AND p.profile_id = $2
           LEFT JOIN ignores i ON i.profile_id = $2
                              AND i.item_type_id = 2
                              AND i.item_id = m.microcosm_id
     WHERE m.site_id = $1
       AND m.is_deleted IS NOT TRUE
       AND m.is_moderated IS NOT TRUE
       AND i.profile_id IS NULL
       AND (
               (p.can_read IS NOT NULL AND p.can_read IS TRUE)
            OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE
           )
)`
	if includeHuddles || includeComments {
		if filterModified != "" {
			sqlWith += `, h AS (
    SELECT hp.huddle_id
      FROM huddle_profiles hp
      JOIN flags f ON f.item_type_id = 5
                  AND f.item_id = hp.huddle_id
     WHERE hp.profile_id = $2` + filterModified + `
)`
		} else {
			sqlWith += `, h AS (
    SELECT huddle_id
      FROM huddle_profiles
     WHERE profile_id = $2
)`
		}

	}

	sqlSelect = `
SELECT f.item_type_id
      ,f.item_id
      ,f.parent_item_type_id
      ,f.parent_item_id
      ,f.last_modified
      ,0.5 AS rank
      ,'' AS highlight`

	sqlFromWhere = `
  FROM flags f
  LEFT JOIN ignores i ON i.profile_id = $2
                     AND i.item_type_id = f.item_type_id
                     AND i.item_id = f.item_id` +
		filterFollowing +
		filterEventsJoin + `
 WHERE f.site_id = $1
   AND i.profile_id IS NULL` +
		filterModified +
		filterMicrocosmIDs +
		filterItemTypes +
		filterItems +
		filterProfileID +
		filterEventsWhere + `
   AND f.microcosm_is_deleted IS NOT TRUE
   AND f.microcosm_is_moderated IS NOT TRUE
   AND f.parent_is_deleted IS NOT TRUE
   AND f.parent_is_moderated IS NOT TRUE
   AND f.item_is_deleted IS NOT TRUE
   AND f.item_is_moderated IS NOT TRUE
   AND (
       (-- Things that are public by default and low in quantity
           f.item_type_id IN (1,3)
        OR f.parent_item_type_id IN (3)
       )
    OR (-- Things directly in microcosms
           f.item_type_id IN (2,6,7,9)
       AND COALESCE(f.microcosm_id, f.item_id) IN (SELECT microcosm_id FROM m)
       )`

	if includeComments {
		sqlFromWhere += `
    OR (-- Comments on things in microcosms
           f.item_type_id = 4
       AND f.parent_item_type_id IN (6,7,9)
       AND f.microcosm_id IN (SELECT microcosm_id FROM m)
       )
    OR (-- Comments on things in huddles
           f.item_type_id = 4
       AND f.parent_item_type_id = 5
       AND f.parent_item_id IN (SELECT huddle_id FROM h)
       )`
	}

	if includeHuddles {
		sqlFromWhere += `
    OR (-- Huddles
           f.item_type_id = 5
       AND f.item_id IN (SELECT huddle_id FROM h)
       )`
	}

	sqlFromWhere += `
       )`

	sqlOrderLimit := `
 ORDER BY ` + orderBy + `
 LIMIT $3
OFFSET $4`

	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return m, http.StatusInternalServerError, err
	}

	var total int64
	err = db.QueryRow(
		sqlWith+`SELECT COUNT(*)`+sqlFromWhere,
		siteID,
		profileID,
	).Scan(&total)
	if err != nil {
		glog.Error(err)
		return m, http.StatusInternalServerError, err
	}

	// This nested query is used to run the `has_unread` query on only the rows
	// that are returned, rather than on all rows in the underlying query before
	// limit has been applied.
	rows, err := db.Query(`
SELECT item_type_id
      ,item_id
      ,parent_item_type_id
      ,parent_item_id
      ,last_modified
      ,rank
      ,highlight
      ,has_unread(item_type_id, item_id, $2)
  FROM (`+
		sqlWith+
		sqlSelect+
		sqlFromWhere+
		sqlOrderLimit+
		`) r`,
		siteID,
		profileID,
		limit,
		offset,
	)
	if err != nil {
		glog.Errorf(
			"stmt.Query(%d, %s, %d, %d, %d) %+v",
			siteID,
			m.Query.Query,
			profileID,
			limit,
			offset,
			err,
		)
		return m, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	rs := []SearchResult{}
	for rows.Next() {
		var r SearchResult
		err = rows.Scan(
			&r.ItemTypeID,
			&r.ItemID,
			&r.ParentItemTypeID,
			&r.ParentItemID,
			&r.LastModified,
			&r.Rank,
			&r.Highlight,
			&r.Unread,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return m, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		itemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ItemTypeID)
		if err != nil {
			glog.Errorf(
				"h.GetMapStringFromInt(h.ItemTypes, %d) %+v",
				r.ItemTypeID,
				err,
			)
			return m, http.StatusInternalServerError, err
		}
		r.ItemType = itemType

		if r.ParentItemTypeID.Valid {
			parentItemType, err :=
				h.GetMapStringFromInt(h.ItemTypes, r.ParentItemTypeID.Int64)
			if err != nil {
				glog.Errorf(
					"h.GetMapStringFromInt(h.ItemTypes, %d) %+v",
					r.ParentItemTypeID.Int64,
					err,
				)
				return m, http.StatusInternalServerError, err
			}
			r.ParentItemType = parentItemType
		}

		rs = append(rs, r)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return m, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		glog.Infoln("offset > maxOffset")
		return m, http.StatusBadRequest,
			fmt.Errorf("not enough records, "+
				"offset (%d) would return an empty page.", offset)
	}

	// Extract the summaries
	var wg1 sync.WaitGroup
	req := make(chan SummaryContainerRequest)
	defer close(req)

	seq := 0
	for i := 0; i < len(rs); i++ {
		go HandleSummaryContainerRequest(
			siteID,
			rs[i].ItemTypeID,
			rs[i].ItemID,
			profileID,
			seq,
			req,
		)
		seq++
		wg1.Add(1)

		if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 {
			go HandleSummaryContainerRequest(
				siteID,
				rs[i].ParentItemTypeID.Int64,
				rs[i].ParentItemID.Int64,
				profileID,
				seq,
				req,
			)
			seq++
			wg1.Add(1)
		}
	}

	resps := []SummaryContainerRequest{}
	for i := 0; i < seq; i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return m, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(resps))

	seq = 0
	for i := 0; i < len(rs); i++ {

		rs[i].Item = resps[seq].Item.Summary
		seq++

		if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 {
			rs[i].ParentItem = resps[seq].Item.Summary
			seq++
		}
	}

	m.Results = h.ConstructArray(
		rs,
		"result",
		total,
		limit,
		offset,
		pages,
		&searchURL,
	)

	// return milliseconds
	m.TimeTaken = time.Now().Sub(start).Nanoseconds() / 1000000

	return m, http.StatusOK, nil

}
Exemplo n.º 3
0
// GetUpdates retieves the list of updates for the given profile
func GetUpdates(
	siteID int64,
	profileID int64,
	limit int64,
	offset int64,
) (
	[]UpdateType,
	int64,
	int64,
	int,
	error,
) {

	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return []UpdateType{}, 0, 0, http.StatusInternalServerError, err
	}

	sqlQuery := `--GetUpdates
WITH m AS (
    SELECT m.microcosm_id
      FROM microcosms m
      LEFT JOIN permissions_cache p ON p.site_id = m.site_id
                                   AND p.item_type_id = 2
                                   AND p.item_id = m.microcosm_id
                                   AND p.profile_id = $2
           LEFT JOIN ignores i ON i.profile_id = $2
                              AND i.item_type_id = 2
                              AND i.item_id = m.microcosm_id
     WHERE m.site_id = $1
       AND m.is_deleted IS NOT TRUE
       AND m.is_moderated IS NOT TRUE
       AND i.profile_id IS NULL
       AND (
               (p.can_read IS NOT NULL AND p.can_read IS TRUE)
            OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE
           )
)
SELECT total
      ,update_id
      ,for_profile_id
      ,update_type_id
      ,item_type_id
      ,item_id
      ,created_by
      ,created
      ,site_id
      ,has_unread(COALESCE(parent_item_type_id, item_type_id), COALESCE(parent_item_id, item_id), $2)
  FROM (
          SELECT COUNT(*) OVER() AS total
                ,rollup.update_id
                ,rollup.for_profile_id
                ,rollup.update_type_id
                ,rollup.item_type_id
                ,rollup.item_id
                ,rollup.created_by
                ,rollup.created
                ,rollup.site_id
                ,f.parent_item_type_id
                ,f.parent_item_id
            FROM flags f
                 JOIN (
                          -- 1;'new_comment';'When a comment has been posted in an item you are watching'
                          -- 4;'new_comment_in_huddle';'When you receive a new comment in a private message'
                          SELECT u.update_id
                                ,u.for_profile_id
                                ,u.update_type_id
                                ,u.item_type_id
                                ,u.item_id
                                ,u.created_by
                                ,u.created
                                ,$1 AS site_id
                            FROM updates u
                                 JOIN (
                                          SELECT MAX(u.update_id) AS update_id
                                                ,f.parent_item_type_id AS item_type_id
                                                ,f.parent_item_id AS item_id
                                            FROM updates u
                                                 JOIN flags f ON f.item_type_id = u.item_type_id
                                                             AND f.item_id = u.item_id
                                            LEFT JOIN ignores i ON i.profile_id = $2
                                                               AND (
                                                                       (i.item_type_id = 3 AND i.item_id = u.created_by)
                                                                    OR (i.item_type_id = f.parent_item_type_id AND i.item_id = f.parent_item_id)
                                                                   )
                                            LEFT JOIN huddle_profiles hp ON hp.huddle_id = f.parent_item_id
                                                                        AND hp.profile_id = u.for_profile_id
                                                                        AND f.parent_item_type_id = 5
                                           WHERE u.for_profile_id = $2
                                             AND i.profile_id IS NULL
                                             AND u.update_type_id IN (1, 4)
                                             AND f.microcosm_is_deleted IS NOT TRUE
                                             AND f.microcosm_is_moderated IS NOT TRUE
                                             AND f.item_is_deleted IS NOT TRUE
                                             AND f.item_is_moderated IS NOT TRUE
                                             AND f.parent_is_deleted IS NOT TRUE
                                             AND f.parent_is_moderated IS NOT TRUE
                                             AND (
                                                     f.microcosm_id IN (SELECT microcosm_id FROM m)
                                                  OR hp.profile_id = u.for_profile_id
                                                 )
                                           GROUP BY f.parent_item_type_id
                                                   ,f.parent_item_id
                                                   ,f.site_id
                                      ) r ON r.update_id = u.update_id
                                 JOIN watchers w ON w.profile_id = $2
                                                AND w.item_type_id = r.item_type_id
                                                AND w.item_id = r.item_id
                           UNION
                          -- 2;'reply_to_comment';'When a comment of yours is replied to'
                          -- 3;'mentioned';'When you are @mentioned in a comment'
                          SELECT u.update_id
                                ,u.for_profile_id
                                ,u.update_type_id
                                ,u.item_type_id
                                ,u.item_id
                                ,u.created_by
                                ,u.created
                                ,$1 AS site_id
                            FROM updates u
                           WHERE update_id IN (
                                     SELECT MAX(u.update_id)
                                       FROM updates u
                                            JOIN flags f ON f.item_type_id = u.item_type_id
                                                        AND f.item_id = u.item_id
                                            LEFT JOIN huddle_profiles hp ON hp.huddle_id = f.parent_item_id
                                                                        AND hp.profile_id = u.for_profile_id
                                                                        AND f.parent_item_type_id = 5
                                            LEFT JOIN ignores i ON i.profile_id = $2
                                                               AND (
                                                                       (i.item_type_id = 3 AND i.item_id = u.created_by)
                                                                    OR (i.item_type_id = f.parent_item_type_id AND i.item_id = f.parent_item_id)
                                                                   )
                                      WHERE u.for_profile_id = $2
                                        AND i.profile_id IS NULL
                                        AND (u.update_type_id = 2 OR u.update_type_id = 3) -- replies (2) & mentions (3)
                                        AND f.site_id = $1
                                        AND f.microcosm_is_deleted IS NOT TRUE
                                        AND f.microcosm_is_moderated IS NOT TRUE
                                        AND f.item_is_deleted IS NOT TRUE
                                        AND f.item_is_moderated IS NOT TRUE
                                        AND f.parent_is_deleted IS NOT TRUE
                                        AND f.parent_is_moderated IS NOT TRUE
                                        AND (
                                                f.microcosm_id IN (SELECT microcosm_id FROM m)
                                             OR hp.profile_id = u.for_profile_id
                                            )
                                      GROUP BY u.update_type_id
                                              ,u.item_type_id
                                              ,u.item_id
                                     )
                           UNION
                          -- 8;'new_item';'When a new item is created in a microcosm you are watching'
                          SELECT u.update_id
                                ,u.for_profile_id
                                ,u.update_type_id
                                ,u.item_type_id
                                ,u.item_id
                                ,u.created_by
                                ,u.created
                                ,$1 AS site_id
                            FROM updates u
                           WHERE update_id IN (
                                     SELECT MAX(u.update_id)
                                       FROM updates u
                                            JOIN flags f ON f.item_type_id = u.item_type_id
                                                        AND f.item_id = u.item_id
                                                        AND f.microcosm_id IN (SELECT microcosm_id FROM m)
                                            JOIN watchers w ON w.profile_id = $2
                                                           AND w.item_type_id = 2
                                                           AND w.item_id = f.microcosm_id
                                            LEFT JOIN ignores i ON i.profile_id = $2
                                                               AND i.item_type_id = 3
                                                               AND i.item_id = u.created_by
                                      WHERE u.for_profile_id = $2
                                        AND i.profile_id IS NULL
                                        AND u.update_type_id = 8
                                        AND f.microcosm_is_deleted IS NOT TRUE
                                        AND f.microcosm_is_moderated IS NOT TRUE
                                        AND f.item_is_deleted IS NOT TRUE
                                        AND f.item_is_moderated IS NOT TRUE
                                        AND f.parent_is_deleted IS NOT TRUE
                                        AND f.parent_is_moderated IS NOT TRUE
                                      GROUP BY u.item_type_id, u.item_id
                                 )
                          ) AS rollup ON rollup.item_type_id = f.item_type_id
                                     AND rollup.item_id = f.item_id
           ORDER BY created DESC
           LIMIT $3
          OFFSET $4
          ) final_rollup`

	rows, err := db.Query(sqlQuery, siteID, profileID, limit, offset)
	if err != nil {
		glog.Errorf(
			"db.Query(%d, %d, %d, %d) %+v",
			profileID,
			siteID,
			limit,
			offset,
			err,
		)
		return []UpdateType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	var total int64
	ems := []UpdateType{}
	for rows.Next() {
		var unread bool
		m := UpdateType{}
		err = rows.Scan(
			&total,
			&m.ID,
			&m.ForProfileID,
			&m.UpdateTypeID,
			&m.ItemTypeID,
			&m.ItemID,
			&m.Meta.CreatedByID,
			&m.Meta.Created,
			&m.SiteID,
			&unread,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return []UpdateType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		itemType, err := h.GetItemTypeFromInt(m.ItemTypeID)
		if err != nil {
			glog.Errorf("h.GetItemTypeFromInt(%d) %+v", m.ItemTypeID, err)
			return []UpdateType{}, 0, 0, http.StatusInternalServerError, err
		}
		m.ItemType = itemType

		m.Meta.Flags.Unread = unread

		ems = append(ems, m)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return []UpdateType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		glog.Infoln("offset > maxOffset")
		return []UpdateType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf("not enough records, "+
				"offset (%d) would return an empty page.", offset)
	}

	// Get the first round of summaries
	var wg1 sync.WaitGroup
	chan1 := make(chan SummaryContainerRequest)
	defer close(chan1)

	seq := 0
	for i := 0; i < len(ems); i++ {
		go HandleSummaryContainerRequest(
			siteID,
			h.ItemTypes[h.ItemTypeProfile],
			ems[i].Meta.CreatedByID,
			ems[i].ForProfileID,
			seq,
			chan1,
		)
		wg1.Add(1)
		seq++

		go HandleSummaryContainerRequest(
			siteID,
			ems[i].ItemTypeID,
			ems[i].ItemID,
			ems[i].ForProfileID,
			seq,
			chan1,
		)
		wg1.Add(1)
		seq++

		updateType, status, err := GetUpdateType(ems[i].UpdateTypeID)
		if err != nil {
			return []UpdateType{}, 0, 0, status, err
		}
		ems[i].UpdateType = updateType.Title
	}

	resps := []SummaryContainerRequest{}
	for i := 0; i < seq; i++ {
		resp := <-chan1
		wg1.Done()

		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return []UpdateType{}, 0, 0, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(resps))

	// Insert the first round of summaries, and get the summaries for the
	// comments
	var wg2 sync.WaitGroup
	chan2 := make(chan SummaryContainerRequest)
	defer close(chan2)

	seq = 0
	parentSeq := 0
	for i := 0; i < len(ems); i++ {

		ems[i].Meta.CreatedBy = resps[seq].Item.Summary
		seq++

		ems[i].Item = resps[seq].Item.Summary
		seq++

		if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] {
			comment := ems[i].Item.(CommentSummaryType)

			go HandleSummaryContainerRequest(
				siteID,
				comment.ItemTypeID,
				comment.ItemID,
				ems[i].ForProfileID,
				seq,
				chan2,
			)
			parentSeq++
			wg2.Add(1)
		}
	}

	parentResps := []SummaryContainerRequest{}
	for i := 0; i < parentSeq; i++ {
		resp := <-chan2
		wg2.Done()
		parentResps = append(parentResps, resp)
	}
	wg2.Wait()

	for _, resp := range parentResps {
		if resp.Err != nil {
			return []UpdateType{}, 0, 0, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(parentResps))

	// Insert the comment summaries, and get the summaries of the items the
	// comments are attached to
	var wg3 sync.WaitGroup
	chan3 := make(chan SummaryContainerRequest)
	defer close(chan3)

	parentSeq = 0
	commentItemSeq := 0
	for i := 0; i < len(ems); i++ {

		if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] {
			comment := ems[i].Item.(CommentSummaryType)

			go HandleSummaryContainerRequest(
				siteID,
				comment.ItemTypeID,
				comment.ItemID,
				ems[i].ForProfileID,
				commentItemSeq,
				chan3,
			)
			parentSeq++
			commentItemSeq++
			wg3.Add(1)

			ems[i].ParentItemTypeID = comment.ItemTypeID
			parentItemType, err := h.GetMapStringFromInt(
				h.ItemTypes,
				comment.ItemTypeID,
			)
			if err != nil {
				return []UpdateType{}, 0, 0, http.StatusInternalServerError, err
			}
			ems[i].ParentItemType = parentItemType
			ems[i].ParentItemID = comment.ItemID
		}
	}

	commentResps := []SummaryContainerRequest{}
	for i := 0; i < commentItemSeq; i++ {
		resp := <-chan3
		wg3.Done()
		commentResps = append(commentResps, resp)
	}
	wg3.Wait()

	for _, resp := range commentResps {
		if resp.Err != nil {
			return []UpdateType{}, 0, 0, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(commentResps))

	commentItemSeq = 0
	for i := 0; i < len(ems); i++ {
		if ems[i].ItemTypeID == h.ItemTypes[h.ItemTypeComment] {
			ems[i].ParentItem = commentResps[commentItemSeq].Item.Summary
			commentItemSeq++
		}
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 4
0
// GetHuddles returns a collection of huddles
func GetHuddles(
	siteID int64,
	profileID int64,
	limit int64,
	offset int64,
) (
	[]HuddleSummaryType,
	int64,
	int64,
	int,
	error,
) {
	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return []HuddleSummaryType{}, 0, 0, http.StatusInternalServerError, err
	}

	var total int64
	err = db.QueryRow(`
SELECT COUNT(*) AS total
  FROM huddles h
  JOIN huddle_profiles hp ON hp.huddle_id = h.huddle_id
  LEFT JOIN ignores i ON i.profile_id = $1
                     AND i.item_type_id = 3
                     AND i.item_id = h.created_by
 WHERE hp.profile_id = $1
   AND i.profile_id IS NULL`,
		profileID,
	).Scan(&total)
	if err != nil {
		glog.Error(err)
		return []HuddleSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}

	rows, err := db.Query(`
SELECT huddle_id
      ,has_unread(5, huddle_id, $1)
  FROM (
            SELECT h.huddle_id
              FROM huddles h
              JOIN huddle_profiles hp ON hp.huddle_id = h.huddle_id
              JOIN flags f ON f.item_type_id = 5
                          AND f.item_id = h.huddle_id
              LEFT JOIN ignores i ON i.profile_id = $1
                                 AND i.item_type_id = 3
                                 AND i.item_id = h.created_by
             WHERE hp.profile_id = $1
               AND i.profile_id IS NULL
             ORDER BY f.last_modified DESC
             LIMIT $2
            OFFSET $3
       ) r`,
		profileID,
		limit,
		offset,
	)
	if err != nil {
		glog.Errorf(
			"db.Query(%d, %d, %d, %d) %+v",
			siteID,
			profileID,
			limit,
			offset,
			err,
		)
		return []HuddleSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	var ems []HuddleSummaryType

	for rows.Next() {
		var (
			id        int64
			hasUnread bool
		)
		err = rows.Scan(
			&id,
			&hasUnread,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return []HuddleSummaryType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		m, status, err := GetHuddleSummary(siteID, profileID, id)
		if err != nil {
			glog.Errorf(
				"GetHuddleSummary(%d, %d, %d) %+v",
				siteID,
				profileID,
				id,
				err,
			)
			return []HuddleSummaryType{}, 0, 0, status, err
		}

		m.Meta.Flags.Unread = hasUnread

		ems = append(ems, m)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return []HuddleSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		glog.Warningf("offset > maxOffset")
		return []HuddleSummaryType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf(
				fmt.Sprintf("not enough records, "+
					"offset (%d) would return an empty page.",
					offset,
				),
			)
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 5
0
// GetAttachments returns a collection of attachments
func GetAttachments(
	itemTypeID int64,
	itemID int64,
	limit int64,
	offset int64,
) (
	[]AttachmentType,
	int64,
	int64,
	int,
	error,
) {
	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return []AttachmentType{}, 0, 0, http.StatusInternalServerError, err
	}

	rows, err := db.Query(`
SELECT COUNT(*) OVER() as total
      ,profile_id
      ,attachment_meta_id
      ,item_type_id
      ,file_sha1
      ,item_id
      ,created
      ,view_count
      ,file_name
      ,file_ext
  FROM attachments
 WHERE item_type_id = $1
   AND item_id = $2
 ORDER BY attachment_id
 LIMIT $3
OFFSET $4`,
		itemTypeID,
		itemID,
		limit,
		offset,
	)
	if err != nil {
		glog.Errorf(
			"db.Query(%d, %d, %d, %d) %+v",
			itemTypeID,
			itemID,
			limit,
			offset,
			err,
		)
		return []AttachmentType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	var total int64
	attachments := []AttachmentType{}
	for rows.Next() {
		m := AttachmentType{}
		err = rows.Scan(
			&total,
			&m.ProfileID,
			&m.AttachmentMetaID,
			&m.ItemTypeID,
			&m.FileHash,
			&m.ItemID,
			&m.Created,
			&m.ViewCount,
			&m.FileName,
			&m.FileExt,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return []AttachmentType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		// TODO: add link to the file metadata and describe the
		// content-type of the file
		filePath := m.FileHash
		if m.FileExt != "" {
			filePath += `.` + m.FileExt
		}
		link := h.LinkType{
			Rel:   "related",
			Href:  fmt.Sprintf("%s/%s", h.APITypeFile, filePath),
			Title: "File resource",
		}
		m.Meta.Links = append(m.Meta.Links, link)
		attachments = append(attachments, m)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return []AttachmentType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []AttachmentType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf(
				"not enough records, offset (%d) would return an empty page",
				offset,
			)
	}

	return attachments, total, pages, http.StatusOK, nil
}
Exemplo n.º 6
0
func searchFullText(
	siteID int64,
	searchURL url.URL,
	profileID int64,
	m SearchResults,
) (
	SearchResults,
	int,
	error,
) {

	limit, offset, status, err := h.GetLimitAndOffset(searchURL.Query())
	if err != nil {
		glog.Errorf("h.GetLimitAndOffset(searchURL.Query()) %+v", err)
		return m, status, err
	}

	start := time.Now()

	// Search options
	var joinEvents bool

	orderBy := `rank DESC
         ,f.last_modified DESC`

	switch m.Query.Sort {
	case "date":
		orderBy = `f.last_modified DESC`
	case "oldest":
		joinEvents = true
		orderBy = `e."when" ASC`
	case "newest":
		joinEvents = true
		orderBy = `e."when" DESC`
	}

	var filterFollowing string
	if m.Query.Following {
		filterFollowing = `
                  JOIN watchers w ON w.item_type_id = f.item_type_id
                                 AND w.item_id = f.item_id
                                 AND w.profile_id = $2`
	}

	fullTextScope := `document`
	var filterTitle string
	if m.Query.InTitle {
		fullTextScope = `title`
		filterTitle = `
              AND f.item_type_id <> 4`
	}

	var filterItemTypes string
	var filterItems string
	var includeComments bool
	if !m.Query.InTitle {
		includeComments = true
	}

	if len(m.Query.ItemTypeIDs) > 0 {
		var itemTypeInList []string
		var itemTypeSansCommentsInList []string

		// Take care of the item types
		for _, v := range m.Query.ItemTypeIDs {
			switch v {
			case h.ItemTypes[h.ItemTypeComment]:
				includeComments = true
				itemTypeInList = append(itemTypeInList, strconv.FormatInt(v, 10))
			default:
				itemTypeInList = append(itemTypeInList, strconv.FormatInt(v, 10))
				itemTypeSansCommentsInList = append(itemTypeSansCommentsInList, strconv.FormatInt(v, 10))
			}
		}

		if len(m.Query.ItemIDs) == 0 {
			if len(m.Query.ItemTypeIDs) == 1 {
				filterItemTypes = fmt.Sprintf(`
              AND f.item_type_id = %d`,
					m.Query.ItemTypeIDs[0],
				)
			} else {
				if includeComments {
					filterItemTypes = `
              AND (   (f.item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `))
                   OR (f.item_type_id = 4 AND f.parent_item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `))
                 )`
				} else {
					filterItemTypes = `
              AND f.item_type_id IN (` + strings.Join(itemTypeInList, `,`) + `)`
				}
			}
		}

		// Take care of the item ids, which are only valid when we have item
		// types
		if len(m.Query.ItemIDs) > 0 {
			var itemIdsInList []string
			for _, v := range m.Query.ItemIDs {
				itemIdsInList = append(itemIdsInList, strconv.FormatInt(v, 10))
			}

			if len(m.Query.ItemIDs) == 1 {
				if includeComments {
					filterItems = fmt.Sprintf(`
              AND (   (si.item_type_id IN (`+strings.Join(itemTypeSansCommentsInList, `,`)+`) AND si.item_id = %d)
                   OR (si.item_type_id = 4 AND si.parent_item_id = %d AND si.parent_item_type_id IN (`+strings.Join(itemTypeSansCommentsInList, `,`)+`))
                  )`,
						m.Query.ItemIDs[0],
						m.Query.ItemIDs[0],
					)
				} else {
					filterItems = fmt.Sprintf(`
              AND si.item_id = %d`,
						m.Query.ItemIDs[0],
					)
				}
			} else {
				if includeComments {
					filterItems = `
              AND (   (si.item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `) AND si.item_id IN (` + strings.Join(itemIdsInList, `,`) + `))
                   OR (si.item_type_id = 4 AND si.parent_item_type_id IN (` + strings.Join(itemTypeSansCommentsInList, `,`) + `) AND si.parent_item_id IN (` + strings.Join(itemIdsInList, `,`) + `))
                  )`
				} else {
					filterItems = `
              AND si.item_type_id IN (` + strings.Join(itemTypeInList, `,`) + `)
              AND si.item_id IN (` + strings.Join(itemIdsInList, `,`) + `)`
				}
			}
		}
	}

	// Note: hashtags being inserted into the query this way may appear
	// initially to be a vector for a SQL injection attack. However the
	// source of these hashtags is a regexp in hashtags.go which only
	// matches contiguous alphanum strings and does not permit spaces,
	// quotes, semicolons or any other escapable sequence that can be
	// utilised to create an attack.
	var filterHashTag string
	for _, hashtag := range m.Query.Hashtags {
		filterHashTag += `
              AND si.` + fullTextScope + `_text ~* '\W` + hashtag + `\W'`
	}

	var filterProfileID string
	if m.Query.ProfileID > 0 {
		filterProfileID = fmt.Sprintf(`
              AND si.profile_id = %d`, m.Query.ProfileID)
	}

	var filterMicrocosmIDs string
	if len(m.Query.MicrocosmIDs) > 0 {
		if len(m.Query.MicrocosmIDs) == 1 {
			filterMicrocosmIDs = fmt.Sprintf(`
   AND f.microcosm_id = %d`, m.Query.MicrocosmIDs[0])
		} else {
			var inList = ``

			for i, v := range m.Query.MicrocosmIDs {
				inList += strconv.FormatInt(v, 10)
				if i < len(m.Query.MicrocosmIDs)-1 {
					inList += `,`
				}
			}
			filterMicrocosmIDs = `
   AND f.microcosm_id IN (` + inList + `)`
		}
	}

	var filterModified string
	if !m.Query.SinceTime.IsZero() || !m.Query.UntilTime.IsZero() {

		if m.Query.UntilTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND f.last_modified > to_timestamp(%d)`,
				m.Query.SinceTime.Unix(),
			)

		} else if m.Query.SinceTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND f.last_modified < to_timestamp(%d)`,
				m.Query.UntilTime.Unix(),
			)
		} else {
			filterModified = fmt.Sprintf(`
   AND f.last_modified BETWEEN to_timestamp(%d) AND to_timestamp(%d)`,
				m.Query.SinceTime.Unix(),
				m.Query.UntilTime.Unix(),
			)
		}
	}

	var (
		filterEventsJoin  string
		filterEventsWhere string
	)
	if !m.Query.EventAfterTime.IsZero() || !m.Query.EventBeforeTime.IsZero() {
		joinEvents = true

		if m.Query.EventBeforeTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND e."when" > to_timestamp(%d)`,
				m.Query.EventAfterTime.Unix(),
			)

		} else if m.Query.EventAfterTime.IsZero() {
			filterModified = fmt.Sprintf(`
   AND e."when" < to_timestamp(%d)`,
				m.Query.EventBeforeTime.Unix(),
			)
		} else {
			filterModified = fmt.Sprintf(`
   AND e."when" BETWEEN to_timestamp(%d) AND to_timestamp(%d)`,
				m.Query.EventAfterTime.Unix(),
				m.Query.EventBeforeTime.Unix(),
			)
		}
	}

	if joinEvents || m.Query.Attendee {
		filterEventsJoin = `
       JOIN events e ON e.event_id = f.item_id`

		if m.Query.Attendee {
			filterEventsJoin += `
       JOIN attendees a ON a.event_id = e.event_id
                       AND a.profile_id = ` + strconv.FormatInt(profileID, 10) + `
                       AND a.state_id = 1`
		}
	}

	sqlQuery := `
WITH m AS (
    SELECT m.microcosm_id
      FROM microcosms m
      LEFT JOIN permissions_cache p ON p.site_id = m.site_id
                                   AND p.item_type_id = 2
                                   AND p.item_id = m.microcosm_id
                                   AND p.profile_id = $2
           LEFT JOIN ignores i ON i.profile_id = $2
                              AND i.item_type_id = 2
                              AND i.item_id = m.microcosm_id
     WHERE m.site_id = $1
       AND m.is_deleted IS NOT TRUE
       AND m.is_moderated IS NOT TRUE
       AND i.profile_id IS NULL
       AND (
               (p.can_read IS NOT NULL AND p.can_read IS TRUE)
            OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE
           )
)
SELECT total
      ,item_type_id
      ,item_id
      ,parent_item_type_id
      ,parent_item_id
      ,last_modified
      ,rank
      ,ts_headline(` + fullTextScope + `_text, query) AS highlight
      ,has_unread(item_type_id, item_id, $2)
  FROM (
           SELECT COUNT(*) OVER() AS total
                 ,f.item_type_id
                 ,f.item_id
                 ,f.parent_item_type_id
                 ,f.parent_item_id
                 ,f.last_modified
                 ,ts_rank_cd(si.` + fullTextScope + `_vector, query, 8) AS rank
                 ,si.` + fullTextScope + `_text
                 ,query.query
             FROM search_index si
                  JOIN flags f ON f.item_type_id = si.item_type_id
                              AND f.item_id = si.item_id
             LEFT JOIN ignores i ON i.profile_id = $2
                                AND i.item_type_id = f.item_type_id
                                AND i.item_id = f.item_id` +
		filterEventsJoin +
		filterFollowing + `
             LEFT JOIN huddle_profiles h ON (f.parent_item_type_id = 5 OR f.item_type_id = 5)
                                        AND h.huddle_id = COALESCE(f.parent_item_id, f.item_id)
                                        AND h.profile_id = $2
                 ,plainto_tsquery($3) AS query
            WHERE f.site_id = $1
              AND i.profile_id IS NULL` +
		filterModified +
		filterMicrocosmIDs +
		filterTitle +
		filterItemTypes +
		filterItems +
		filterHashTag +
		filterEventsWhere +
		filterProfileID + `
              AND f.microcosm_is_deleted IS NOT TRUE
              AND f.microcosm_is_moderated IS NOT TRUE
              AND f.parent_is_deleted IS NOT TRUE
              AND f.parent_is_moderated IS NOT TRUE
              AND f.item_is_deleted IS NOT TRUE
              AND f.item_is_moderated IS NOT TRUE
              AND si.` + fullTextScope + `_vector @@ query` + `
              AND (
                      -- Things that are public by default
                      COALESCE(f.parent_item_type_id, f.item_type_id) = 3
                   OR -- Things in microcosms
                      COALESCE(f.microcosm_id, f.item_id) IN (SELECT microcosm_id FROM m)
                   OR -- Things in huddles
                      COALESCE(f.parent_item_id, f.item_id) = h.huddle_id
                  )
            ORDER BY ` + orderBy + `
            LIMIT $4
           OFFSET $5
       ) r
`

	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return m, http.StatusInternalServerError, err
	}

	queryID := `Search` + randomString()
	queryTimer := time.NewTimer(searchTimeout)
	go func() {
		<-queryTimer.C
		db.Exec(`SELECT pg_cancel_backend(pid)
  FROM pg_stat_activity
 WHERE state = 'active'
   AND query LIKE '--` + queryID + `%'`)
	}()
	// This nested query is used to run the `has_unread` query on only the rows
	// that are returned, rather than on all rows in the underlying query before
	// limit has been applied.
	rows, err := db.Query(
		`--`+queryID+
			sqlQuery,
		siteID,
		profileID,
		m.Query.Query,
		limit,
		offset,
	)
	queryTimer.Stop()
	if err != nil {
		e, ok := err.(*pq.Error)

		if !ok {
			glog.Errorf(
				"stmt.Query(%d, %s, %d, %d, %d) %+v",
				siteID,
				m.Query.Query,
				profileID,
				limit,
				offset,
				err,
			)
			return m, http.StatusInternalServerError,
				fmt.Errorf("Database query failed")
		}

		switch e.Code.Name() {
		case "query_canceled":
			glog.Errorf(
				"Query for '%s' took too long",
				m.Query.Query,
			)
			return m, http.StatusInternalServerError,
				merrors.MicrocosmError{
					ErrorCode:    24,
					ErrorMessage: "The search query took too long and has been cancelled",
				}
		default:
			glog.Errorf(
				"stmt.Query(%d, %s, %d, %d, %d) %+v",
				siteID,
				m.Query.Query,
				profileID,
				limit,
				offset,
				err,
			)
			return m, http.StatusInternalServerError,
				fmt.Errorf("Database query failed")
		}
	}
	defer rows.Close()

	var total int64
	rs := []SearchResult{}
	for rows.Next() {
		var r SearchResult
		err = rows.Scan(
			&total,
			&r.ItemTypeID,
			&r.ItemID,
			&r.ParentItemTypeID,
			&r.ParentItemID,
			&r.LastModified,
			&r.Rank,
			&r.Highlight,
			&r.Unread,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return m, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		itemType, err := h.GetMapStringFromInt(h.ItemTypes, r.ItemTypeID)
		if err != nil {
			glog.Errorf(
				"h.GetMapStringFromInt(h.ItemTypes, %d) %+v",
				r.ItemTypeID,
				err,
			)
			return m, http.StatusInternalServerError, err
		}
		r.ItemType = itemType

		if r.ParentItemTypeID.Valid {
			parentItemType, err :=
				h.GetMapStringFromInt(h.ItemTypes, r.ParentItemTypeID.Int64)
			if err != nil {
				glog.Errorf(
					"h.GetMapStringFromInt(h.ItemTypes, %d) %+v",
					r.ParentItemTypeID.Int64,
					err,
				)
				return m, http.StatusInternalServerError, err
			}
			r.ParentItemType = parentItemType
		}

		rs = append(rs, r)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return m, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		glog.Infoln("offset > maxOffset")
		return m, http.StatusBadRequest,
			fmt.Errorf("not enough records, "+
				"offset (%d) would return an empty page.", offset)
	}

	// Extract the summaries
	var wg1 sync.WaitGroup
	req := make(chan SummaryContainerRequest)
	defer close(req)

	seq := 0
	for i := 0; i < len(rs); i++ {
		go HandleSummaryContainerRequest(
			siteID,
			rs[i].ItemTypeID,
			rs[i].ItemID,
			profileID,
			seq,
			req,
		)
		seq++
		wg1.Add(1)

		if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 {
			go HandleSummaryContainerRequest(
				siteID,
				rs[i].ParentItemTypeID.Int64,
				rs[i].ParentItemID.Int64,
				profileID,
				seq,
				req,
			)
			seq++
			wg1.Add(1)
		}
	}

	resps := []SummaryContainerRequest{}
	for i := 0; i < seq; i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return m, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(resps))

	seq = 0
	for i := 0; i < len(rs); i++ {

		rs[i].Item = resps[seq].Item.Summary
		seq++

		if rs[i].ParentItemID.Valid && rs[i].ParentItemID.Int64 > 0 {
			rs[i].ParentItem = resps[seq].Item.Summary
			seq++
		}
	}

	m.Results = h.ConstructArray(
		rs,
		"result",
		total,
		limit,
		offset,
		pages,
		&searchURL,
	)

	// return milliseconds
	m.TimeTaken = time.Now().Sub(start).Nanoseconds() / 1000000

	return m, http.StatusOK, nil

}
Exemplo n.º 7
0
// GetIgnored returns a collection of ignored items
func GetIgnored(
	siteID int64,
	profileID int64,
	limit int64,
	offset int64,
) (
	[]IgnoreType,
	int64,
	int64,
	int,
	error,
) {
	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return []IgnoreType{}, 0, 0, http.StatusInternalServerError, err
	}

	// This query intentionally does not provide has_unread() status. This is
	// to pacify angry people ignoring things, then unignoring on updates and
	// subsequently getting in to fights.
	sqlQuery := `--Get Ignores
WITH m AS (
    SELECT m.microcosm_id
      FROM microcosms m
      LEFT JOIN permissions_cache p ON p.site_id = m.site_id
                                   AND p.item_type_id = 2
                                   AND p.item_id = m.microcosm_id
                                   AND p.profile_id = $2
     WHERE m.site_id = $1
       AND m.is_deleted IS NOT TRUE
       AND m.is_moderated IS NOT TRUE
       AND (
               (p.can_read IS NOT NULL AND p.can_read IS TRUE)
            OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE
           )
)
SELECT COUNT(*) OVER() AS total
      ,a.profile_id
      ,a.item_type_id
      ,a.item_id
  FROM (
           SELECT i.profile_id
                 ,i.item_type_id
                 ,i.item_id
             FROM ignores i
            INNER JOIN flags f ON f.item_type_id = i.item_type_id
                              AND f.item_id = i.item_id
            WHERE i.profile_id = $2
              AND f.site_id = $1
              AND (
                      f.microcosm_id IS NULL
                   OR f.microcosm_id IN (SELECT microcosm_id FROM m)
                  )
              AND f.microcosm_is_deleted IS NOT TRUE
              AND f.microcosm_is_moderated IS NOT TRUE
              AND f.parent_is_deleted IS NOT TRUE
              AND f.parent_is_moderated IS NOT TRUE
              AND f.item_is_deleted IS NOT TRUE
              AND f.item_is_moderated IS NOT TRUE
       ) a
 INNER JOIN search_index si ON si.item_type_id = a.item_type_id
                           AND si.item_id = a.item_id
 ORDER BY a.item_type_id ASC
         ,si.title_text ASC
 LIMIT $3
OFFSET $4`

	rows, err := db.Query(sqlQuery, siteID, profileID, limit, offset)
	if err != nil {
		glog.Errorf(
			"db.Query(%d, %d, %d, %d) %+v",
			siteID,
			profileID,
			limit,
			offset,
			err,
		)
		return []IgnoreType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	var total int64
	ems := []IgnoreType{}
	for rows.Next() {
		m := IgnoreType{}
		err = rows.Scan(
			&total,
			&m.ProfileID,
			&m.ItemTypeID,
			&m.ItemID,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return []IgnoreType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		itemType, err := h.GetItemTypeFromInt(m.ItemTypeID)
		if err != nil {
			glog.Errorf("h.GetItemTypeFromInt(%d) %+v", m.ItemTypeID, err)
			return []IgnoreType{}, 0, 0, http.StatusInternalServerError, err
		}
		m.ItemType = itemType

		ems = append(ems, m)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return []IgnoreType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		glog.Infoln("offset > maxOffset")
		return []IgnoreType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf("not enough records, "+
				"offset (%d) would return an empty page", offset)
	}

	// Get the first round of summaries
	var wg1 sync.WaitGroup
	chan1 := make(chan SummaryContainerRequest)
	defer close(chan1)

	seq := 0
	for i := 0; i < len(ems); i++ {
		go HandleSummaryContainerRequest(
			siteID,
			ems[i].ItemTypeID,
			ems[i].ItemID,
			ems[i].ProfileID,
			seq,
			chan1,
		)
		wg1.Add(1)
		seq++
	}

	resps := []SummaryContainerRequest{}
	for i := 0; i < seq; i++ {
		resp := <-chan1
		wg1.Done()

		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return []IgnoreType{}, 0, 0, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(resps))

	seq = 0
	for i := 0; i < len(ems); i++ {
		ems[i].Item = resps[seq].Item.Summary
		seq++
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 8
0
// GetEvents returns a collection of events
func GetEvents(
	siteID int64,
	profileID int64,
	attending bool,
	limit int64,
	offset int64,
) (
	[]EventSummaryType,
	int64,
	int64,
	int,
	error,
) {
	db, err := h.GetConnection()
	if err != nil {
		return []EventSummaryType{}, 0, 0, http.StatusInternalServerError, err
	}

	var whereAttending string
	if attending {
		whereAttending = `
   AND is_attending(item_id, $3)`
	}

	rows, err := db.Query(`--GetEvents
WITH m AS (
    SELECT m.microcosm_id
      FROM microcosms m
      LEFT JOIN ignores i ON i.profile_id = $3
                         AND i.item_type_id = 2
                         AND i.item_id = m.microcosm_id
     WHERE i.profile_id IS NULL
       AND (get_effective_permissions(m.site_id, m.microcosm_id, 2, m.microcosm_id, $3)).can_read IS TRUE
)
SELECT COUNT(*) OVER() AS total
      ,f.item_id
	  ,f.is_attending(f.item_id, $3)
  FROM flags f
  LEFT JOIN ignores i ON i.profile_id = $3
                     AND i.item_type_id = f.item_type_id
                     AND i.item_id = f.item_id
 WHERE f.site_id = $1
   AND i.profile_id IS NULL
   AND f.item_type_id = $2
   AND f.microcosm_is_deleted IS NOT TRUE
   AND f.microcosm_is_moderated IS NOT TRUE
   AND f.parent_is_deleted IS NOT TRUE
   AND f.parent_is_moderated IS NOT TRUE
   AND f.item_is_deleted IS NOT TRUE
   AND f.item_is_moderated IS NOT TRUE`+whereAttending+`
   AND f.microcosm_id IN (SELECT * FROM m)
 ORDER BY f.item_is_sticky DESC
         ,f.last_modified DESC
 LIMIT $4
OFFSET $5`,
		siteID,
		h.ItemTypes[h.ItemTypeEvent],
		profileID,
		limit,
		offset,
	)
	if err != nil {
		return []EventSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}
	defer rows.Close()

	var ems []EventSummaryType

	var total int64
	for rows.Next() {
		var (
			id          int64
			isAttending bool
		)
		err = rows.Scan(
			&total,
			&id,
			&isAttending,
		)
		if err != nil {
			return []EventSummaryType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error: %v", err.Error())
		}

		m, status, err := GetEventSummary(siteID, id, profileID)
		if err != nil {
			return []EventSummaryType{}, 0, 0, status, err
		}

		m.Meta.Flags.Attending = isAttending
		ems = append(ems, m)
	}
	err = rows.Err()
	if err != nil {
		return []EventSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf(
				fmt.Sprintf("Error fetching rows: %v", err.Error()),
			)
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []EventSummaryType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf(
				"not enough records, offset (%d) would return an empty page",
				offset,
			)
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 9
0
// GetAttendees returns a collection of attendees
func GetAttendees(
	siteID int64,
	eventID int64,
	limit int64,
	offset int64,
	attending bool,
) (
	[]AttendeeType,
	int64,
	int64,
	int,
	error,
) {
	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		return []AttendeeType{}, 0, 0, http.StatusInternalServerError, err
	}

	var where string
	if attending {
		where += `
		 AND state_id = 1`
	}

	rows, err := db.Query(
		fmt.Sprintf(`
SELECT COUNT(*) OVER() AS total
      ,attendee_id
  FROM attendees
 WHERE event_id = $1%s
 ORDER BY state_id ASC, state_date ASC
 LIMIT $2
OFFSET $3`,
			where,
		),
		eventID,
		limit,
		offset,
	)
	if err != nil {
		return []AttendeeType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}
	defer rows.Close()

	// Get a list of the identifiers of the items to return
	var total int64
	ids := []int64{}
	for rows.Next() {
		var id int64
		err = rows.Scan(
			&total,
			&id,
		)
		if err != nil {
			return []AttendeeType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error: %v", err.Error())
		}

		ids = append(ids, id)
	}
	err = rows.Err()
	if err != nil {
		return []AttendeeType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows: %v", err.Error())
	}
	rows.Close()

	// Make a request for each identifier
	var wg1 sync.WaitGroup
	req := make(chan AttendeeRequest)
	defer close(req)

	for seq, id := range ids {
		go HandleAttendeeRequest(siteID, id, seq, req)
		wg1.Add(1)
	}

	// Receive the responses and check for errors
	resps := []AttendeeRequest{}
	for i := 0; i < len(ids); i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return []AttendeeType{}, 0, 0, resp.Status, resp.Err
		}
	}

	// Sort them
	sort.Sort(AttendeeRequestBySeq(resps))

	// Extract the values
	ems := []AttendeeType{}
	for _, resp := range resps {
		ems = append(ems, resp.Item)
	}

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []AttendeeType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf(
				"not enough records, offset (%d) would return an empty page",
				offset,
			)
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 10
0
// GetAllItems fetches items within a microcosm
func GetAllItems(
	siteID int64,
	microcosmID int64,
	profileID int64,
	limit int64,
	offset int64,
) (
	[]SummaryContainer,
	int64,
	int64,
	int,
	error,
) {
	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		return []SummaryContainer{}, 0, 0, http.StatusInternalServerError, err
	}

	sqlFromWhere := `
          FROM flags f
          LEFT JOIN ignores i ON i.profile_id = $3
                             AND i.item_type_id = f.item_type_id
                             AND i.item_id = f.item_id
         WHERE f.microcosm_id = (
                   SELECT $2::bigint AS microcosm_id
                    WHERE (get_effective_permissions($1, $2, 2, $2, $3)).can_read IS TRUE
               )
           AND (f.item_type_id = 6 OR f.item_type_id = 9)
           AND f.site_id = $1
           AND i.profile_id IS NULL
           AND f.microcosm_is_deleted IS NOT TRUE
           AND f.microcosm_is_moderated IS NOT TRUE
           AND f.parent_is_deleted IS NOT TRUE
           AND f.parent_is_moderated IS NOT TRUE
           AND f.item_is_deleted IS NOT TRUE
           AND f.item_is_moderated IS NOT TRUE`

	var total int64
	err = db.QueryRow(`
SELECT COUNT(*) AS total`+sqlFromWhere,
		siteID,
		microcosmID,
		profileID,
	).Scan(
		&total,
	)
	if err != nil {
		glog.Error(err)
		return []SummaryContainer{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}

	rows, err := db.Query(`
SELECT item_type_id
      ,item_id
      ,has_unread(item_type_id, item_id, $3)
      ,CASE WHEN item_type_id = 9
           THEN is_attending(item_id, $3)
           ELSE FALSE
       END AS is_attending
  FROM (
        SELECT f.item_type_id
              ,f.item_id`+sqlFromWhere+`
         ORDER BY f.item_is_sticky DESC
                 ,f.last_modified DESC
         LIMIT $4
        OFFSET $5
       ) r`,
		siteID,
		microcosmID,
		profileID,
		limit,
		offset,
	)
	if err != nil {
		glog.Error(err)
		return []SummaryContainer{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}
	defer rows.Close()

	var wg1 sync.WaitGroup
	req := make(chan SummaryContainerRequest)
	defer close(req)

	// [itemTypeId_itemId] = hasUnread
	unread := map[string]bool{}
	attending := map[int64]bool{}

	seq := 0
	for rows.Next() {
		var (
			itemTypeID  int64
			itemID      int64
			hasUnread   bool
			isAttending bool
		)

		err = rows.Scan(
			&itemTypeID,
			&itemID,
			&hasUnread,
			&isAttending,
		)
		if err != nil {
			return []SummaryContainer{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error: %v", err.Error())
		}

		unread[strconv.FormatInt(itemTypeID, 10)+`_`+
			strconv.FormatInt(itemID, 10)] = hasUnread

		if itemTypeID == h.ItemTypes[h.ItemTypeEvent] {
			attending[itemID] = isAttending
		}

		go HandleSummaryContainerRequest(
			siteID,
			itemTypeID,
			itemID,
			profileID,
			seq,
			req,
		)
		seq++
		wg1.Add(1)
	}
	err = rows.Err()
	if err != nil {
		return []SummaryContainer{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows: %v", err.Error())
	}
	rows.Close()

	resps := []SummaryContainerRequest{}
	for i := 0; i < seq; i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return []SummaryContainer{}, 0, 0, resp.Status, resp.Err
		}
	}

	sort.Sort(SummaryContainerRequestsBySeq(resps))

	ems := []SummaryContainer{}
	for _, resp := range resps {
		m := resp.Item

		switch m.Summary.(type) {
		case ConversationSummaryType:
			summary := m.Summary.(ConversationSummaryType)
			summary.Meta.Flags.Unread =
				unread[strconv.FormatInt(m.ItemTypeID, 10)+`_`+
					strconv.FormatInt(m.ItemID, 10)]

			m.Summary = summary

		case EventSummaryType:
			summary := m.Summary.(EventSummaryType)
			summary.Meta.Flags.Attending = attending[m.ItemID]
			summary.Meta.Flags.Unread =
				unread[strconv.FormatInt(m.ItemTypeID, 10)+`_`+
					strconv.FormatInt(m.ItemID, 10)]

			m.Summary = summary

		case PollSummaryType:
			summary := m.Summary.(PollSummaryType)
			summary.Meta.Flags.Unread =
				unread[strconv.FormatInt(m.ItemTypeID, 10)+`_`+
					strconv.FormatInt(m.ItemID, 10)]

			m.Summary = summary

		default:
		}

		ems = append(ems, m)
	}

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []SummaryContainer{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf("Not enough records, "+
				"offset (%d) would return an empty page",
				offset,
			)
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 11
0
// GetPolls returns a collection of polls
func GetPolls(
	siteID int64,
	profileID int64,
	limit int64,
	offset int64,
) (
	[]PollSummaryType,
	int64,
	int64,
	int,
	error,
) {

	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		return []PollSummaryType{}, 0, 0, http.StatusInternalServerError, err
	}

	rows, err := db.Query(`
SELECT COUNT(*) OVER() AS total
      ,p.poll_id
  FROM polls p
      ,microcosms m
 WHERE p.microcosm_id = m.microcosm_id
   AND m.site_id = $1
   AND m.is_deleted IS NOT TRUE
   AND m.is_moderated IS NOT TRUE
   AND p.is_deleted IS NOT TRUE
   AND p.is_moderated IS NOT TRUE
 ORDER BY p.created ASC
 LIMIT $2
OFFSET $3`,
		siteID,
		limit,
		offset,
	)
	if err != nil {
		return []PollSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}
	defer rows.Close()

	var ems []PollSummaryType

	var total int64
	for rows.Next() {
		var id int64
		err = rows.Scan(
			&total,
			&id,
		)
		if err != nil {
			return []PollSummaryType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error: %v", err.Error())
		}

		m, status, err := GetPollSummary(siteID, id, profileID)
		if err != nil {
			return []PollSummaryType{}, 0, 0, status, err
		}

		ems = append(ems, m)
	}
	err = rows.Err()
	if err != nil {
		return []PollSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows: %v", err.Error())
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []PollSummaryType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf("not enough records, "+
				"offset (%d) would return an empty page.", offset)
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 12
0
// GetMicrocosms fetches a collection of microcosms
func GetMicrocosms(
	siteID int64,
	profileID int64,
	limit int64,
	offset int64,
) (
	[]MicrocosmSummaryType,
	int64,
	int64,
	int,
	error,
) {

	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return []MicrocosmSummaryType{}, 0, 0,
			http.StatusInternalServerError, err
	}

	rows, err := db.Query(`--GetMicrocosms
WITH m AS (
    SELECT m.microcosm_id
      FROM microcosms m
      LEFT JOIN permissions_cache p ON p.site_id = m.site_id
                                   AND p.item_type_id = 2
                                   AND p.item_id = m.microcosm_id
                                   AND p.profile_id = $2
           LEFT JOIN ignores i ON i.profile_id = $2
                              AND i.item_type_id = 2
                              AND i.item_id = m.microcosm_id
     WHERE m.site_id = $1
       AND m.is_deleted IS NOT TRUE
       AND m.is_moderated IS NOT TRUE
       AND i.profile_id IS NULL
       AND (
               (p.can_read IS NOT NULL AND p.can_read IS TRUE)
            OR (get_effective_permissions($1,m.microcosm_id,2,m.microcosm_id,$2)).can_read IS TRUE
           )
)
SELECT (SELECT COUNT(*) FROM m) AS total
      ,microcosm_id
      ,has_unread(2, microcosm_id, $2)
  FROM (
           SELECT microcosm_id
             FROM microcosms
            WHERE microcosm_id IN (SELECT microcosm_id FROM m)
            ORDER BY is_sticky DESC
                    ,comment_count DESC
                    ,item_count DESC
                    ,created ASC
            LIMIT $3
           OFFSET $4
       ) r`,
		siteID,
		profileID,
		limit,
		offset,
	)

	if err != nil {
		glog.Errorf(
			"db.Query(%d, %d, %d, %d) %+v",
			siteID,
			profileID,
			limit,
			offset,
			err,
		)
		return []MicrocosmSummaryType{}, 0, 0,
			http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	// Get a list of the identifiers of the items to return
	var total int64
	ids := []int64{}
	unread := map[int64]bool{}
	for rows.Next() {
		var (
			id        int64
			hasUnread bool
		)
		err = rows.Scan(
			&total,
			&id,
			&hasUnread,
		)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return []MicrocosmSummaryType{}, 0, 0,
				http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		unread[id] = hasUnread
		ids = append(ids, id)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return []MicrocosmSummaryType{}, 0, 0,
			http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	// Make a request for each identifier
	var wg1 sync.WaitGroup
	req := make(chan MicrocosmSummaryRequest)
	defer close(req)

	for seq, id := range ids {
		go HandleMicrocosmSummaryRequest(siteID, id, profileID, seq, req)
		wg1.Add(1)
	}

	// Receive the responses and check for errors
	resps := []MicrocosmSummaryRequest{}
	for i := 0; i < len(ids); i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			return []MicrocosmSummaryType{}, 0, 0,
				http.StatusInternalServerError, resp.Err
		}
	}

	// Sort them
	sort.Sort(MicrocosmSummaryRequestBySeq(resps))

	// Extract the values
	ems := []MicrocosmSummaryType{}
	for _, resp := range resps {
		m := resp.Item
		m.Meta.Flags.Unread = unread[m.ID]
		ems = append(ems, m)
	}

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []MicrocosmSummaryType{}, 0, 0,
			http.StatusBadRequest,
			fmt.Errorf(fmt.Sprintf("not enough records, "+
				"offset (%d) would return an empty page.", offset))
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 13
0
// GetProfiles returns a collection of profiles
func GetProfiles(
	siteID int64,
	so ProfileSearchOptions,
	limit int64,
	offset int64,
) (
	[]ProfileSummaryType,
	int64,
	int64,
	int,
	error,
) {

	// Retrieve resources
	db, err := h.GetConnection()
	if err != nil {
		glog.Errorf("h.GetConnection() %+v", err)
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError, err
	}

	var following string
	if so.IsFollowing {
		following = `
       JOIN watchers w ON w.profile_id = ` + strconv.FormatInt(so.ProfileID, 10) + `
                      AND w.item_type_id = 3
                      AND p.profile_id = w.item_id`
	}

	var online string
	if so.IsOnline {
		online = `
   AND p.last_active > NOW() - interval '90 minute'`
	}

	var selectCountArgs []interface{}
	var selectArgs []interface{}
	//                                        $1      $2            $3     $4
	selectCountArgs = append(selectCountArgs, siteID, so.ProfileID, limit, offset)
	//                              $1      $2            $3     $4
	selectArgs = append(selectArgs, siteID, so.ProfileID, limit, offset)

	var startsWith string
	var startsWithOrderBy string
	if so.StartsWith != "" {
		//                                        $5
		selectCountArgs = append(selectCountArgs, so.StartsWith+`%`)
		//                              $5                 $6
		selectArgs = append(selectArgs, so.StartsWith+`%`, so.StartsWith)
		startsWith = `
   AND p.profile_name ILIKE $5`
		startsWithOrderBy = `p.profile_name ILIKE $6 DESC
         ,`
	}

	// Construct the query
	sqlSelect := `--GetProfiles
SELECT p.profile_id`

	sqlFromWhere := `
  FROM profiles p
  LEFT JOIN ignores i ON i.profile_id = $2
                     AND i.item_type_id = 3
                     AND i.item_id = p.profile_id` + following + `
 WHERE p.site_id = $1
   AND i.profile_id IS NULL
   AND p.profile_name <> 'deleted'` + online + startsWith

	var sqlOrderLimit string
	if so.OrderByCommentCount {
		sqlOrderLimit = `
 ORDER BY ` + startsWithOrderBy + `p.comment_count DESC
         ,p.profile_name ASC
 LIMIT $3
OFFSET $4`
	} else {
		sqlOrderLimit = `
 ORDER BY ` + startsWithOrderBy + `p.profile_name ASC
 LIMIT $3
OFFSET $4`
	}

	var total int64
	err = db.QueryRow(
		`SELECT COUNT(*)`+sqlFromWhere+`
   AND $3 > 0
   AND $4 >= 0`,
		selectCountArgs...,
	).Scan(
		&total,
	)
	if err != nil {
		glog.Error(err)
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}

	rows, err := db.Query(
		sqlSelect+sqlFromWhere+sqlOrderLimit,
		selectArgs...,
	)
	if err != nil {
		glog.Errorf(
			"stmt.Query(%d, `%s`, %d, %d) %+v",
			siteID,
			so.StartsWith+`%`,
			limit,
			offset,
			err,
		)
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed")
	}
	defer rows.Close()

	ids := []int64{}

	for rows.Next() {
		var id int64
		err = rows.Scan(&id)
		if err != nil {
			glog.Errorf("rows.Scan() %+v", err)
			return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error")
		}

		ids = append(ids, id)
	}
	err = rows.Err()
	if err != nil {
		glog.Errorf("rows.Err() %+v", err)
		return []ProfileSummaryType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows")
	}
	rows.Close()

	var wg1 sync.WaitGroup
	req := make(chan ProfileSummaryRequest)
	defer close(req)

	for seq, id := range ids {
		go HandleProfileSummaryRequest(siteID, id, seq, req)
		wg1.Add(1)
	}

	resps := []ProfileSummaryRequest{}
	for i := 0; i < len(ids); i++ {
		resp := <-req
		wg1.Done()
		resps = append(resps, resp)
	}
	wg1.Wait()

	for _, resp := range resps {
		if resp.Err != nil {
			glog.Errorf("resp.Err != nil %+v", resp.Err)
			return []ProfileSummaryType{}, 0, 0, resp.Status, resp.Err
		}
	}

	sort.Sort(ProfileSummaryRequestBySeq(resps))

	ems := []ProfileSummaryType{}
	for _, resp := range resps {
		ems = append(ems, resp.Item)
	}

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		glog.Infoln("offset > maxOffset")
		return []ProfileSummaryType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf("not enough records, "+
				"offset (%d) would return an empty page.", offset)
	}

	return ems, total, pages, http.StatusOK, nil
}
Exemplo n.º 14
0
// GetProfileWatchers fetches all watchers registered to a particular profile.
// This is mainly used for showing a list of watchers to the user.
func GetProfileWatchers(
	profileID int64,
	siteID int64,
	limit int64,
	offset int64,
) (
	[]WatcherType,
	int64,
	int64,
	int,
	error,
) {

	db, err := h.GetConnection()
	if err != nil {
		glog.Error(err)
		return []WatcherType{}, 0, 0, http.StatusInternalServerError, err
	}

	rows, err := db.Query(`
SELECT COUNT(*) OVER() AS total
      ,watcher_id
  FROM watchers
 WHERE profile_id = $1
 ORDER BY last_notified DESC
         ,item_type_id ASC
         ,item_id DESC
 LIMIT $2
OFFSET $3`,
		profileID,
		limit,
		offset,
	)
	if err != nil {
		glog.Error(err)
		return []WatcherType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Database query failed: %v", err.Error())
	}
	defer rows.Close()

	var ems []WatcherType

	var total int64
	for rows.Next() {
		var id int64
		err = rows.Scan(
			&total,
			&id,
		)
		if err != nil {
			glog.Error(err)
			return []WatcherType{}, 0, 0, http.StatusInternalServerError,
				fmt.Errorf("Row parsing error: %v", err.Error())
		}

		m, status, err := GetWatcher(id, siteID)
		if err != nil {
			glog.Error(err)
			return []WatcherType{}, 0, 0, status, err
		}

		ems = append(ems, m)
	}
	err = rows.Err()
	if err != nil {
		glog.Error(err)
		return []WatcherType{}, 0, 0, http.StatusInternalServerError,
			fmt.Errorf("Error fetching rows: %v", err.Error())
	}
	rows.Close()

	pages := h.GetPageCount(total, limit)
	maxOffset := h.GetMaxOffset(total, limit)

	if offset > maxOffset {
		return []WatcherType{}, 0, 0, http.StatusBadRequest,
			fmt.Errorf("Not enough records, "+
				"offset (%d) would return an empty page.", offset)
	}

	return ems, total, pages, http.StatusOK, nil
}