Exemplo n.º 1
1
// GetBoardPostsByID returns the filtered posts for a board by the ID of the board
func (ps *PostService) GetBoardPostsByID(boardID, page int) (posts []*model.Post, err error) {
	var rows *sqlx.Rows
	rows, err = ps.db.Queryx(queries.Get("get_board_posts_by_id"), boardID, page*50)
	if err != nil {
		return
	}

	for rows.Next() {
		var post model.Post
		err = rows.StructScan(&post)
		if err != nil {
			return
		}

		posts = append(posts, &post)
	}
	return
}
Exemplo n.º 2
0
// ScanOne returns the instance, if any, returned from sql query
func ScanOne(instance interface{}, rows *sqlx.Rows) error {
	defer rows.Close()
	for rows.Next() {
		if err := rows.StructScan(instance); err != nil {
			return err
		}
	}
	return nil
}
Exemplo n.º 3
0
func EncodeStructCustom(rows *sqlx.Rows, w http.ResponseWriter) {
	sa := make([]Custom, 0)
	t := Custom{}

	for rows.Next() {
		rows.StructScan(&t)
		sa = append(sa, t)
	}

	enc := json.NewEncoder(w)
	enc.Encode(sa)
}
Exemplo n.º 4
0
func loadResult(rows *sqlx.Rows) ([]TestEntity, error) {
	result := []TestEntity{}
	for rows.Next() {
		row := TestEntity{}
		err := rows.StructScan(&row)
		if err != nil {
			return nil, err
		}
		result = append(result, row)
	}
	return result, nil
}
Exemplo n.º 5
0
// GetUserPosts returns all posts owned by a certain user
func (ps *PostService) GetUserPosts(userID, page int) (posts []*model.Post, err error) {
	var rows *sqlx.Rows
	rows, err = ps.db.Queryx(queries.Get("get_user_posts"), userID, page*50)
	if err != nil {
		return
	}

	for rows.Next() {
		var post model.Post
		err = rows.StructScan(&post)
		if err != nil {
			return
		}

		posts = append(posts, &post)
	}
	return
}
Exemplo n.º 6
0
// GetInactiveUserInfo returns a list of users who have not been active for the specified time interval
func (db *dbw) GetInactiveUserInfo(fid int, interval time.Duration) (users []peerInfo, err error) {
	query := `SELECT user_id, ip FROM files_users
		WHERE time < (UNIX_TIMESTAMP() - ?)
		AND active = 1
		AND file_id = ?;`

	result := peerInfo{}
	checkInterval := int(interval / time.Second)

	var rows *sqlx.Rows
	if rows, err = db.Queryx(query, checkInterval, fid); err == nil && err != sql.ErrNoRows {
		for rows.Next() {
			if err = rows.StructScan(&result); err == nil {
				users = append(users, result)
			}
		}
	}

	return
}
Exemplo n.º 7
0
// GetPostComments returns all root level comments along with their children for a Post`
func (cs *CommentService) GetPostComments(postID int) (comments []*model.Comment, err error) {
	var rows *sqlx.Rows
	rows, err = cs.db.Queryx(queries.Get("get_post_comments"), postID)
	if err != nil {
		return
	}

	var pgcr []types.PGComment
	for rows.Next() {
		var pgc types.PGComment
		err = rows.StructScan(&pgc)
		if err != nil {
			return
		}
		pgcr = append(pgcr, pgc)
	}

	comments = types.AssembleCommentTree(pgcr)

	return
}
Exemplo n.º 8
0
// GetUserComments returns all of a users past comments
func (cs *CommentService) GetUserComments(userID int) (comments []*model.Comment, err error) {
	var rows *sqlx.Rows
	rows, err = cs.db.Queryx(queries.Get("get_user_comments"), userID)
	if err != nil {
		return
	}

	var pgcr []types.PGComment
	for rows.Next() {
		var pgc types.PGComment
		err = rows.StructScan(&pgc)
		if err != nil {
			return
		}
		log.Printf("%+v", pgc)
		pgcr = append(pgcr, pgc)
	}

	comments = types.AssembleCommentTree(pgcr)

	return
}
Exemplo n.º 9
0
Arquivo: rower.go Projeto: in3pid/ant
func (r structRower) scanRow(rows *sqlx.Rows) (Value, error) {
	p := reflect.New(r.t).Interface()
	e := rows.StructScan(p)
	return p, e
}
Exemplo n.º 10
0
// Allows the messages series to be queried in some general ways.
func (database *SocialHarvestDB) Messages(queryParams CommonQueryParams, conds BasicConditions) ([]config.SocialHarvestMessage, uint64, uint64, uint64) {
	sanitizedQueryParams := SanitizeCommonQueryParams(queryParams)
	var results = []config.SocialHarvestMessage{}

	var err error

	// Must have a territory (for now)
	if sanitizedQueryParams.Territory == "" {
		return results, 0, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
	}

	var buffer bytes.Buffer
	var bufferCount bytes.Buffer
	var bufferQuery bytes.Buffer
	bufferCount.WriteString("SELECT COUNT(*)")
	bufferQuery.WriteString("SELECT *")

	buffer.WriteString(" FROM messages WHERE territory = '")
	buffer.WriteString(sanitizedQueryParams.Territory)
	buffer.WriteString("'")

	// optional date range (can have either or both)
	if sanitizedQueryParams.From != "" {
		buffer.WriteString(" AND time >= ")
		buffer.WriteString(sanitizedQueryParams.From)
	}
	if sanitizedQueryParams.To != "" {
		buffer.WriteString(" AND time <= ")
		buffer.WriteString(sanitizedQueryParams.To)
	}
	if sanitizedQueryParams.Network != "" {
		buffer.WriteString(" AND network = ")
		buffer.WriteString(sanitizedQueryParams.Network)
	}

	// BasicConditions (various basic query conditions to be used explicitly, not in a loop, because not all fields will be available depending on the series)
	if conds.Lang != "" {
		buffer.WriteString(" AND contributor_lang = ")
		buffer.WriteString(conds.Lang)
	}
	if conds.Country != "" {
		buffer.WriteString(" AND contributor_country = ")
		buffer.WriteString(conds.Country)
	}
	if conds.Geohash != "" {
		// Ensure the goehash is alphanumeric.
		// TODO: Pass these conditions through a sanitizer too, though the ORM should use prepared statements and take care of SQL injection....right? TODO: Check that too.
		pattern := `(?i)[A-z0-9]`
		r, _ := regexp.Compile(pattern)
		if r.MatchString(conds.Geohash) {
			buffer.WriteString(" AND contributor_geohash LIKE ")
			buffer.WriteString(conds.Geohash)
			buffer.WriteString("%")
		}
	}
	if conds.Gender != "" {
		switch conds.Gender {
		case "-1", "f", "female":
			buffer.WriteString(" AND contributor_gender = -1")
			break
		case "1", "m", "male":
			buffer.WriteString(" AND contributor_gender = 1")
			break
		case "0", "u", "unknown":
			buffer.WriteString(" AND contributor_gender = 0")
			break
		}
	}
	if conds.IsQuestion != 0 {
		buffer.WriteString(" AND is_question = 1")
	}

	// Count here (before limit and order)
	bufferCount.WriteString(buffer.String())

	// Continue with query returning results
	// TODO: Allow other sorting options? I'm not sure it matters because people likely want timely data. More important would be a search.
	buffer.WriteString(" ORDER BY time DESC")

	buffer.WriteString(" LIMIT ")
	buffer.WriteString(strconv.FormatUint(sanitizedQueryParams.Limit, 10))

	if (sanitizedQueryParams.Skip) > 0 {
		buffer.WriteString(" OFFSET ")
		buffer.WriteString(strconv.FormatUint(sanitizedQueryParams.Skip, 10))
	}

	bufferQuery.WriteString(buffer.String())
	buffer.Reset()

	query := bufferQuery.String()
	bufferQuery.Reset()

	countQuery := bufferCount.String()
	bufferCount.Reset()

	total := uint64(0)

	if db.Postgres != nil {
		var rows *sqlx.Rows
		rows, err = db.Postgres.Queryx(query)
		if err != nil {
			log.Println(err)
			return results, 0, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
		}
		// Map rows to array of struct
		// TODO: Make slice of fixed size given we know limit?
		var msg config.SocialHarvestMessage
		for rows.Next() {
			err = rows.StructScan(&msg)
			if err != nil {
				log.Println(err)
				return results, 0, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
			}
			results = append(results, msg)
		}

		err = db.Postgres.Get(&total, countQuery)
		if err != nil {
			log.Println(err)
		}
	}

	return results, total, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
}