示例#1
1
// GetBoardPostsByID returns the filtered posts for a board by the ID of the board
func (ps *PostService) GetBoardPostsByID(boardID, page int) (posts []*model.Post, err error) {
	var rows *sqlx.Rows
	rows, err = ps.db.Queryx(queries.Get("get_board_posts_by_id"), boardID, page*50)
	if err != nil {
		return
	}

	for rows.Next() {
		var post model.Post
		err = rows.StructScan(&post)
		if err != nil {
			return
		}

		posts = append(posts, &post)
	}
	return
}
示例#2
1
// ResultsToCSV takes a sqlx.result without a struct
// and returns CSV string
func ResultsToCSV(results *sqlx.Rows) (string, error) {

	b := &bytes.Buffer{} // creates IO Writer
	csvWriter := csv.NewWriter(b)
	//csvWriter.Comma = 0x0009  // tab sep

	firstLine := true
	for results.Next() {
		row, err := results.SliceScan() // or do a results.StructScan(struct)
		if err != nil {
			log.Fatal(err)
		}
		if firstLine {
			firstLine = false
			cols, err := results.Columns()
			if err != nil {
				log.Fatal(err)
			}
			csvWriter.Write(cols)
		}

		rowStrings := make([]string, len(row))
		for i, col := range row {
			//log.Print(reflect.TypeOf(col))
			switch col.(type) {
			case float64:
				rowStrings[i] = strconv.FormatFloat(col.(float64), 'f', 6, 64)
			case int64:
				rowStrings[i] = strconv.FormatInt(col.(int64), 10)
			case bool:
				rowStrings[i] = strconv.FormatBool(col.(bool))
			case []byte:
				rowStrings[i] = string(col.([]byte))
			case string:
				rowStrings[i] = strings.Trim(col.(string), " ")
			// case sql.NullString:  // cannot use col.(sql.NullString) (type sql.NullString) as type string in argument to strings.Trim
			// 	rowStrings[i] = strings.Trim(col.(sql.NullString), " ")
			case time.Time:
				rowStrings[i] = col.(time.Time).String()
			case nil:
				rowStrings[i] = "NULL"
			default:
				log.Print(col)
			}
		}
		csvWriter.Write(rowStrings)
	}

	csvWriter.Flush()
	return b.String(), nil

}
示例#3
0
// DropAndCreateTable удаляет таблицу, если она уже существует и создает заново
func DropAndCreateTable(schema string, tableName string, db *sqlx.DB) (bool, error) {
	var err error
	var rows *sqlx.Rows
	// Проверяем нет ли такой таблицы в базе
	rows, err = db.Queryx("SELECT to_regclass('" + tableName + "');")
	if err != nil {
		//fmt.Println("Error on check table '"+tableName+"':", err)
		return false, err
	}
	defer rows.Close()

	// И если есть удаляем
	rowsCount := 0
	for rows.Next() {
		rowsCount++
	}

	if rowsCount > 0 {
		_, err = db.Exec("DROP TABLE IF EXISTS " + tableName + ";")
		if err != nil {
			//fmt.Println("Error on drop table '"+tableName+"':", err)
			return false, err
		}
	}

	// Создаем таблицу
	_, err = db.Exec(schema)
	if err != nil {
		//fmt.Println("Error on create table '"+tableName+"':", err)
		return false, err
	}

	return true, nil
}
示例#4
0
func sqlGetRow(rows *sqlx.Rows, bytesToStrings, idTo_id bool) (*map[string]interface{}, error) {
	results := make(map[string]interface{})
	err := rows.MapScan(results)
	if err != nil {
		log.Println(err)
	} else {

		if idTo_id {
			// Convert "id" column to "_id" for mongo
			if _, ok := results["id"]; ok {
				// Mangle id, because mongo is persnickity
				results["_id"] = results["id"]
				delete(results, "id")
			}
		}

		if bytesToStrings {
			// Convert byte arrays to strings
			for k, v := range results {
				if _, ok := v.([]byte); ok {
					// Damn. Byte. Arrays. Sqlx.
					results[k] = string(v.([]byte))
				}
			}
		}
	}
	return &results, err
}
示例#5
0
func flattenUuidRows(rows *sqlx.Rows) (uuids []string, err error) {
	for rows.Next() {
		var uuid string
		err = rows.Scan(&uuid)
		if err != nil {
			return
		}
		uuids = append(uuids, uuid)
	}
	return
}
示例#6
0
func loadResult(rows *sqlx.Rows) ([]TestEntity, error) {
	result := []TestEntity{}
	for rows.Next() {
		row := TestEntity{}
		err := rows.StructScan(&row)
		if err != nil {
			return nil, err
		}
		result = append(result, row)
	}
	return result, nil
}
示例#7
0
func EncodeStructCustom(rows *sqlx.Rows, w http.ResponseWriter) {
	sa := make([]Custom, 0)
	t := Custom{}

	for rows.Next() {
		rows.StructScan(&t)
		sa = append(sa, t)
	}

	enc := json.NewEncoder(w)
	enc.Encode(sa)
}
示例#8
0
文件: sql.go 项目: vozhyk-/gohan
func (tx *Transaction) decodeRows(s *schema.Schema, rows *sqlx.Rows, list []*schema.Resource) ([]*schema.Resource, error) {
	for rows.Next() {
		resourceData := map[string]interface{}{}
		data := map[string]interface{}{}
		rows.MapScan(data)

		var resource *schema.Resource
		tx.decode(s, s.GetDbTableName(), data, resourceData)
		resource, err := schema.NewResource(s, resourceData)
		if err != nil {
			return nil, fmt.Errorf("Failed to decode rows")
		}
		list = append(list, resource)
	}
	return list, nil
}
示例#9
0
// Orders need to be joined to extract usefull info, therefore they have to be parsed by hand
func (d Datastore) parseOrders(model interface{}, id interface{}) error {
	var rows *sqlx.Rows
	var e error
	query := "select o.id, o.user_id, o.place_id, o.start, o.end_time, p.name as place_name, u.username, u.firstname, u.lastname from orders o left join users u on u.id = o.user_id left join places p on p.id = o.place_id"
	/*	query := "select o.id, o.user_id, o.place_id, o.date + o.order_start as start, o.date + o.order_end as end, p.name as place_name, u.username, u.firstname, u.lastname from orders o left join users u on u.id = o.user_id left join places p on p.id = o.place_id"
	 */if id != nil {
		query += " where o.id = $1"
		rows, e = d.Db.Queryx(query, id)
	} else {
		rows, e = d.Db.Queryx(query)
	}
	/*query = "SELECT * FROM orders o, users u, places p where u.id = o.user_id and p.id = o.place_id"*/
	if e != nil {
		log.Println(e)
	}
	for rows.Next() {
		var id int64
		var place_id, user_id sql.NullInt64
		var username, firstname, lastname, place_name sql.NullString
		var start, end pq.NullTime
		if err := rows.Scan(&id, &user_id, &place_id, &start, &end, &place_name, &username, &firstname, &lastname); err != nil {
			log.Println(err)
		}
		order := Order{Id: id, UserId: user_id.Int64, PlaceId: place_id.Int64, Start: start.Time, End: end.Time, PlaceName: place_name.String, Username: username.String, Firstname: firstname.String, Lastname: lastname.String}
		if user_id.Valid == false || (lastname.String == "" && firstname.String == "") {
			order.Title = fmt.Sprintf("%v", place_name.String)
		} else {
			order.Title = fmt.Sprintf("%v: %v, %v", place_name.String, lastname.String, firstname.String)
		}
		*model.(*[]Order) = append(*model.(*[]Order), order)
	}
	e = rows.Err()
	return e
}
示例#10
0
// GetUserPosts returns all posts owned by a certain user
func (ps *PostService) GetUserPosts(userID, page int) (posts []*model.Post, err error) {
	var rows *sqlx.Rows
	rows, err = ps.db.Queryx(queries.Get("get_user_posts"), userID, page*50)
	if err != nil {
		return
	}

	for rows.Next() {
		var post model.Post
		err = rows.StructScan(&post)
		if err != nil {
			return
		}

		posts = append(posts, &post)
	}
	return
}
示例#11
0
// GetInactiveUserInfo returns a list of users who have not been active for the specified time interval
func (db *dbw) GetInactiveUserInfo(fid int, interval time.Duration) (users []peerInfo, err error) {
	query := `SELECT user_id, ip FROM files_users
		WHERE time < (UNIX_TIMESTAMP() - ?)
		AND active = 1
		AND file_id = ?;`

	result := peerInfo{}
	checkInterval := int(interval / time.Second)

	var rows *sqlx.Rows
	if rows, err = db.Queryx(query, checkInterval, fid); err == nil && err != sql.ErrNoRows {
		for rows.Next() {
			if err = rows.StructScan(&result); err == nil {
				users = append(users, result)
			}
		}
	}

	return
}
示例#12
0
// GetPostComments returns all root level comments along with their children for a Post`
func (cs *CommentService) GetPostComments(postID int) (comments []*model.Comment, err error) {
	var rows *sqlx.Rows
	rows, err = cs.db.Queryx(queries.Get("get_post_comments"), postID)
	if err != nil {
		return
	}

	var pgcr []types.PGComment
	for rows.Next() {
		var pgc types.PGComment
		err = rows.StructScan(&pgc)
		if err != nil {
			return
		}
		pgcr = append(pgcr, pgc)
	}

	comments = types.AssembleCommentTree(pgcr)

	return
}
示例#13
0
func mapEvents(r *sqlx.Rows) ([]event, error) {
	events := make([]event, 0)
	for r.Next() {
		var temp string = ""
		e := event{}
		err := r.Scan(&e.ID, &temp, &e.Title, &e.Description, &e.Font)
		if err != nil {
			return nil, err
		}
		e.Date, err = stringToTime(temp)
		events = append(events, e)
	}
	/*doesn't work
	e := event{}
	err := e.mapRow(r)
	if err != nil {
		return nil, err
	}
	events = append(events, e)
	*/
	return events, nil
}
示例#14
0
// GetUserComments returns all of a users past comments
func (cs *CommentService) GetUserComments(userID int) (comments []*model.Comment, err error) {
	var rows *sqlx.Rows
	rows, err = cs.db.Queryx(queries.Get("get_user_comments"), userID)
	if err != nil {
		return
	}

	var pgcr []types.PGComment
	for rows.Next() {
		var pgc types.PGComment
		err = rows.StructScan(&pgc)
		if err != nil {
			return
		}
		log.Printf("%+v", pgc)
		pgcr = append(pgcr, pgc)
	}

	comments = types.AssembleCommentTree(pgcr)

	return
}
示例#15
0
文件: exec.go 项目: PlanitarInc/dat
// QueryScan executes the query in builder and loads the resulting data into
// one or more destinations.
//
// Returns ErrNotFound if no value was found, and it was therefore not set.
func queryScalar(execer *Execer, destinations ...interface{}) error {
	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return err
	}
	if blob != nil {
		err = json.Unmarshal(blob, &destinations)
		if err == nil {
			return nil
		}
		// log it and fallthrough to let the query continue
		logger.Warn("queryScalar.2: Could not unmarshal cache data. Continuing with query")
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	// Run the query:
	var rows *sqlx.Rows
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return logSQLError(err, "QueryScalar.load_value.query", fullSQL, args)
	}

	defer rows.Close()
	if rows.Next() {
		err = rows.Scan(destinations...)
		if err != nil {
			return logSQLError(err, "QueryScalar.load_value.scan", fullSQL, args)
		}

		setCache(execer, destinations, dtStruct)

		return nil
	}
	if err := rows.Err(); err != nil {
		return logSQLError(err, "QueryScalar.load_value.rows_err", fullSQL, args)
	}

	return dat.ErrNotFound
}
示例#16
0
// ScanOne returns the instance, if any, returned from sql query
func ScanOne(instance interface{}, rows *sqlx.Rows) error {
	defer rows.Close()
	for rows.Next() {
		if err := rows.StructScan(instance); err != nil {
			return err
		}
	}
	return nil
}
示例#17
0
文件: tasks.go 项目: enr/goexpose
/*
Run postgres task
*/
func (p *PostgresTask) Run(r *http.Request, data map[string]interface{}) (response *Response) {

	response = NewResponse(http.StatusOK)
	queryresults := []*Response{}

	for _, query := range p.config.Queries {

		qresponse := NewResponse(http.StatusOK).StripStatusData()

		var (
			args []interface{}
			db   *sqlx.DB
			err  error
			url  string
			rows *sqlx.Rows
			Rows []map[string]interface{}

			errq error
		)
		if url, err = p.Interpolate(query.URL, data); err != nil {
			qresponse.Error(err)
			goto Append
		}

		// interpolate all args
		args = []interface{}{}
		for _, arg := range query.Args {
			interpolated, e := p.Interpolate(arg, data)
			if e != nil {
				qresponse.Error(e)
				goto Append
			}
			args = append(args, interpolated)
		}

		// add query with args to response?
		if p.config.ReturnQueries {
			qresponse.AddValue("query", query.Query).AddValue("args", args)
		}

		if db, err = sqlx.Connect("postgres", url); err != nil {

			if err, ok := err.(*pq.Error); ok {
				qresponse.AddValue("error_code", err.Code.Name())
			}
			qresponse.Error(err)
			goto Append
		}

		// run query
		rows, errq = db.Queryx(query.Query, args...)
		if errq != nil {
			if errq, ok := errq.(*pq.Error); ok {
				qresponse.AddValue("error_code", errq.Code.Name())
			}
			qresponse.Error(errq)
			goto Append
		}

		Rows = []map[string]interface{}{}

		for rows.Next() {
			results := make(map[string]interface{})
			err = rows.MapScan(results)
			if err != nil {
				if err, ok := err.(*pq.Error); ok {
					qresponse.AddValue("error_code", err.Code.Name())
				}
				qresponse.Error(err)
				goto Append
			}

			Rows = append(Rows, results)
		}
		qresponse.Result(Rows)

	Append:
		queryresults = append(queryresults, qresponse)
	}

	// single result
	if p.config.singleResultIndex != -1 {
		response.Result(queryresults[p.config.singleResultIndex])
	} else {
		response.Result(queryresults)
	}

	return
}
示例#18
0
文件: exec.go 项目: PlanitarInc/dat
// queryJSONBlob executes the query in builder and loads the resulting data
// into a blob. If a single item is to be returned, set single to true.
//
// Returns ErrNotFound if nothing was found
func queryJSONBlob(execer *Execer, single bool) ([]byte, error) {
	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return nil, err
	}
	if blob != nil {
		return blob, nil
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	var rows *sqlx.Rows
	// Run the query:
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return nil, logSQLError(err, "queryJSONStructs", fullSQL, args)
	}

	// TODO optimize this later, may be better to
	var buf bytes.Buffer
	i := 0
	if single {
		defer rows.Close()
		for rows.Next() {
			if i == 1 {
				if dat.Strict {
					logSQLError(errors.New("Multiple results returned"), "Expected single result", fullSQL, args)
					logger.Fatal("Expected single result, got many")
				} else {
					break
				}
			}
			i++

			err = rows.Scan(&blob)
			if err != nil {
				return nil, err
			}
			buf.Write(blob)
		}
	} else {
		defer rows.Close()
		for rows.Next() {
			if i == 0 {
				buf.WriteRune('[')
			} else {
				buf.WriteRune(',')
			}
			i++

			err = rows.Scan(&blob)
			if err != nil {
				return nil, err
			}
			buf.Write(blob)
		}
		if i > 0 {
			buf.WriteRune(']')
		}
	}

	if i == 0 {
		return nil, sql.ErrNoRows
	}

	blob = buf.Bytes()
	setCache(execer, blob, dtBytes)
	return blob, nil
}
示例#19
0
文件: exec.go 项目: PlanitarInc/dat
// QuerySlice executes the query in builder and loads the resulting data into a
// slice of primitive values
//
// Returns ErrNotFound if no value was found, and it was therefore not set.
func querySlice(execer *Execer, dest interface{}) error {
	// Validate the dest and reflection values we need

	// This must be a pointer to a slice
	valueOfDest := reflect.ValueOf(dest)
	kindOfDest := valueOfDest.Kind()

	if kindOfDest != reflect.Ptr {
		panic("invalid type passed to LoadValues. Need a pointer to a slice")
	}

	// This must a slice
	valueOfDest = reflect.Indirect(valueOfDest)
	kindOfDest = valueOfDest.Kind()

	if kindOfDest != reflect.Slice {
		panic("invalid type passed to LoadValues. Need a pointer to a slice")
	}

	recordType := valueOfDest.Type().Elem()

	recordTypeIsPtr := recordType.Kind() == reflect.Ptr
	if recordTypeIsPtr {
		reflect.ValueOf(dest)
	}

	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return err
	}
	if blob != nil {
		err = json.Unmarshal(blob, &dest)
		if err == nil {
			return nil
		}
		// log it and fallthrough to let the query continue
		logger.Warn("querySlice.2: Could not unmarshal cache data. Continuing with query")
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	var rows *sqlx.Rows
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return logSQLError(err, "querySlice.load_all_values.query", fullSQL, args)
	}

	sliceValue := valueOfDest
	defer rows.Close()
	for rows.Next() {
		// Create a new value to store our row:
		pointerToNewValue := reflect.New(recordType)
		newValue := reflect.Indirect(pointerToNewValue)

		err = rows.Scan(pointerToNewValue.Interface())
		if err != nil {
			return logSQLError(err, "querySlice.load_all_values.scan", fullSQL, args)
		}

		// Append our new value to the slice:
		sliceValue = reflect.Append(sliceValue, newValue)
	}
	valueOfDest.Set(sliceValue)

	if err := rows.Err(); err != nil {
		return logSQLError(err, "querySlice.load_all_values.rows_err", fullSQL, args)
	}

	setCache(execer, dest, dtStruct)

	return nil
}
示例#20
0
文件: rower.go 项目: in3pid/ant
func (r mapRower) scanRow(rows *sqlx.Rows) (Value, error) {
	m := make(map[string]interface{})
	e := rows.MapScan(m)
	return m, e
}
示例#21
0
文件: rower.go 项目: in3pid/ant
func (r structRower) scanRow(rows *sqlx.Rows) (Value, error) {
	p := reflect.New(r.t).Interface()
	e := rows.StructScan(p)
	return p, e
}
示例#22
0
func main() {

	var (
		debug      bool
		confFile   string
		configTest bool
		jobName    string
		listJobs   bool
		dontByte   bool
		dont_id    bool
	)

	flag.BoolVar(&debug, "debug", false, "Enable Debug output")
	flag.StringVar(&confFile, "config", "my2mo.json", "Config file to read")
	flag.BoolVar(&configTest, "configtest", false, "Load and parse configs, and exit")
	flag.StringVar(&jobName, "job", "", "Name of the job to run")
	flag.BoolVar(&dontByte, "dontconvertbytes", false, "We automatically convert byte arrays to strings. Set this to prevent it.")
	flag.BoolVar(&dont_id, "dontconvertid", false, "We automatically convert any SQL column 'id' to mongo element '_id'. Set this to prevent it.")
	flag.BoolVar(&listJobs, "list", false, "List the jobs available")
	flag.Parse()

	if debug {
		debugOut = log.New(os.Stdout, "[DEBUG]", log.Lshortfile)
	}

	// Because of my silly logic, the config params are logically
	// inverse from the function parameters. Dealing with that here.
	var (
		convertBytes bool = true
		convertId    bool = true
	)
	if dontByte {
		convertBytes = false
	}
	if dont_id {
		convertId = false
	}

	// Read config
	conf := loadFile(confFile)

	if configTest {
		// Just kicking the tires...
		fmt.Println("Config loaded and bootstrapped successfully...")
		os.Exit(0)
	}

	if listJobs {
		for _, job := range conf.Jobs {
			if job.Enabled == true {
				fmt.Printf("%s: %s\n", job.Name, job.Description)
			}
		}
		os.Exit(0)
	} else if jobName == "" {
		log.Fatal("--job is required")
	}

	// Grab the job, and let us get this party started
	job, ok := conf.GetJob(jobName)
	if ok == false {
		log.Fatalf("Job name %s is not valid, or disabled. Perhaps --list?\n", jobName)

	} else {

		// ensure writeoperation is one of insert, update, upsert
		if job.WriteOperation == "insert" || job.WriteOperation == "update" || job.WriteOperation == "upsert" {
		} else {
			log.Fatalf("WriteOperation of %s for job %s is invalid!\n", job.WriteOperation, job.Name)
		}

		// Mongo handle, right to the collection
		mongod, mongoc := mongoOpen(&job.Mongo)

		// SQL can vary
		var sqlc *sqlx.Rows
		var sqld *sqlx.DB

		// Test the SqlType
		found := false
		for k, f := range sqlList {
			if k == job.SqlType {
				found = true
				debugOut.Printf("Found SQL type %s\n", k)
				sqld, sqlc = f(&job.SQL, job.SqlColumns)
				break
			}
		}
		if found == false {
			log.Fatalf("SQL type of %s is not currently valid. Pick one of: "+joinKeys(sqlList)+"\n", job.SqlType)
		}

		writeCount := 0
		errorCount := 0
		// Let's do this
		for sqlc.Next() {
			res, err := sqlGetRow(sqlc, convertBytes, convertId)
			// we already outputted any error in sqlGetRow
			// so we'll just skip this write, and move along
			if err == nil {
				ok := mongoWriteRow(mongoc, job.WriteOperation, res)
				if ok {
					writeCount++
				} else {
					errorCount++
				}
			} else {
				errorCount++
			}
		}

		// Clean up, explicitly
		mongod.Close()
		sqld.Close()

		totalCount := writeCount + errorCount
		log.Printf("Load completed with %d/%d rows written", writeCount, totalCount)
	}
}
示例#23
0
// Allows the messages series to be queried in some general ways.
func (database *SocialHarvestDB) Messages(queryParams CommonQueryParams, conds BasicConditions) ([]config.SocialHarvestMessage, uint64, uint64, uint64) {
	sanitizedQueryParams := SanitizeCommonQueryParams(queryParams)
	var results = []config.SocialHarvestMessage{}

	var err error

	// Must have a territory (for now)
	if sanitizedQueryParams.Territory == "" {
		return results, 0, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
	}

	var buffer bytes.Buffer
	var bufferCount bytes.Buffer
	var bufferQuery bytes.Buffer
	bufferCount.WriteString("SELECT COUNT(*)")
	bufferQuery.WriteString("SELECT *")

	buffer.WriteString(" FROM messages WHERE territory = '")
	buffer.WriteString(sanitizedQueryParams.Territory)
	buffer.WriteString("'")

	// optional date range (can have either or both)
	if sanitizedQueryParams.From != "" {
		buffer.WriteString(" AND time >= ")
		buffer.WriteString(sanitizedQueryParams.From)
	}
	if sanitizedQueryParams.To != "" {
		buffer.WriteString(" AND time <= ")
		buffer.WriteString(sanitizedQueryParams.To)
	}
	if sanitizedQueryParams.Network != "" {
		buffer.WriteString(" AND network = ")
		buffer.WriteString(sanitizedQueryParams.Network)
	}

	// BasicConditions (various basic query conditions to be used explicitly, not in a loop, because not all fields will be available depending on the series)
	if conds.Lang != "" {
		buffer.WriteString(" AND contributor_lang = ")
		buffer.WriteString(conds.Lang)
	}
	if conds.Country != "" {
		buffer.WriteString(" AND contributor_country = ")
		buffer.WriteString(conds.Country)
	}
	if conds.Geohash != "" {
		// Ensure the goehash is alphanumeric.
		// TODO: Pass these conditions through a sanitizer too, though the ORM should use prepared statements and take care of SQL injection....right? TODO: Check that too.
		pattern := `(?i)[A-z0-9]`
		r, _ := regexp.Compile(pattern)
		if r.MatchString(conds.Geohash) {
			buffer.WriteString(" AND contributor_geohash LIKE ")
			buffer.WriteString(conds.Geohash)
			buffer.WriteString("%")
		}
	}
	if conds.Gender != "" {
		switch conds.Gender {
		case "-1", "f", "female":
			buffer.WriteString(" AND contributor_gender = -1")
			break
		case "1", "m", "male":
			buffer.WriteString(" AND contributor_gender = 1")
			break
		case "0", "u", "unknown":
			buffer.WriteString(" AND contributor_gender = 0")
			break
		}
	}
	if conds.IsQuestion != 0 {
		buffer.WriteString(" AND is_question = 1")
	}

	// Count here (before limit and order)
	bufferCount.WriteString(buffer.String())

	// Continue with query returning results
	// TODO: Allow other sorting options? I'm not sure it matters because people likely want timely data. More important would be a search.
	buffer.WriteString(" ORDER BY time DESC")

	buffer.WriteString(" LIMIT ")
	buffer.WriteString(strconv.FormatUint(sanitizedQueryParams.Limit, 10))

	if (sanitizedQueryParams.Skip) > 0 {
		buffer.WriteString(" OFFSET ")
		buffer.WriteString(strconv.FormatUint(sanitizedQueryParams.Skip, 10))
	}

	bufferQuery.WriteString(buffer.String())
	buffer.Reset()

	query := bufferQuery.String()
	bufferQuery.Reset()

	countQuery := bufferCount.String()
	bufferCount.Reset()

	total := uint64(0)

	if db.Postgres != nil {
		var rows *sqlx.Rows
		rows, err = db.Postgres.Queryx(query)
		if err != nil {
			log.Println(err)
			return results, 0, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
		}
		// Map rows to array of struct
		// TODO: Make slice of fixed size given we know limit?
		var msg config.SocialHarvestMessage
		for rows.Next() {
			err = rows.StructScan(&msg)
			if err != nil {
				log.Println(err)
				return results, 0, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
			}
			results = append(results, msg)
		}

		err = db.Postgres.Get(&total, countQuery)
		if err != nil {
			log.Println(err)
		}
	}

	return results, total, sanitizedQueryParams.Skip, sanitizedQueryParams.Limit
}
示例#24
0
文件: tasks.go 项目: enr/goexpose
/*
Run mysql task.
*/
func (m *MySQLTask) Run(r *http.Request, data map[string]interface{}) (response *Response) {

	response = NewResponse(http.StatusOK)

	queries := []*Response{}

	var (
		db   *sqlx.DB
		rows *sqlx.Rows
		err  error
	)

	for _, query := range m.config.Queries {

		var (
			Rows []map[string]interface{}
		)

		args := []interface{}{}

		qr := NewResponse(http.StatusOK).StripStatusData()

		var url string
		if url, err = m.Interpolate(query.URL, data); err != nil {
			qr.Error(err)
			goto Append
		}

		if m.config.ReturnQueries {
			qr.AddValue("query", query.Query)
		}

		if db, err = sqlx.Open("mysql", url); err != nil {
			qr.Error(err.Error())
			if err, ok := err.(*mysql.MySQLError); ok {
				qr.AddValue("error_code", err.Number)
			}
			goto Append
		}

		for _, arg := range query.Args {
			var a string

			if a, err = m.Interpolate(arg, data); err != nil {
				qr.Error(err)
				goto Append
			}

			args = append(args, a)
		}

		if m.config.ReturnQueries {
			qr.AddValue("args", args)
		}

		// run query
		rows, err = db.Queryx(query.Query, args...)
		if err != nil {
			qr.Error(err)
			if err, ok := err.(*mysql.MySQLError); ok {
				qr.AddValue("error_code", err.Number)
			}
			goto Append
		}

		Rows = []map[string]interface{}{}
		for rows.Next() {
			results := make(map[string]interface{})
			err = rows.MapScan(results)
			if err != nil {
				qr.Error(err)
				goto Append
			}
			Rows = append(Rows, results)
		}
		qr.Result(Rows)

	Append:
		queries = append(queries, qr)
	}

	// single result
	if m.config.singleResultIndex != -1 {
		response.Result(queries[m.config.singleResultIndex])
	} else {
		response.Result(queries)
	}

	return
}