Exemple #1
0
// Orders need to be joined to extract usefull info, therefore they have to be parsed by hand
func (d Datastore) parseOrders(model interface{}, id interface{}) error {
	var rows *sqlx.Rows
	var e error
	query := "select o.id, o.user_id, o.place_id, o.start, o.end_time, p.name as place_name, u.username, u.firstname, u.lastname from orders o left join users u on u.id = o.user_id left join places p on p.id = o.place_id"
	/*	query := "select o.id, o.user_id, o.place_id, o.date + o.order_start as start, o.date + o.order_end as end, p.name as place_name, u.username, u.firstname, u.lastname from orders o left join users u on u.id = o.user_id left join places p on p.id = o.place_id"
	 */if id != nil {
		query += " where o.id = $1"
		rows, e = d.Db.Queryx(query, id)
	} else {
		rows, e = d.Db.Queryx(query)
	}
	/*query = "SELECT * FROM orders o, users u, places p where u.id = o.user_id and p.id = o.place_id"*/
	if e != nil {
		log.Println(e)
	}
	for rows.Next() {
		var id int64
		var place_id, user_id sql.NullInt64
		var username, firstname, lastname, place_name sql.NullString
		var start, end pq.NullTime
		if err := rows.Scan(&id, &user_id, &place_id, &start, &end, &place_name, &username, &firstname, &lastname); err != nil {
			log.Println(err)
		}
		order := Order{Id: id, UserId: user_id.Int64, PlaceId: place_id.Int64, Start: start.Time, End: end.Time, PlaceName: place_name.String, Username: username.String, Firstname: firstname.String, Lastname: lastname.String}
		if user_id.Valid == false || (lastname.String == "" && firstname.String == "") {
			order.Title = fmt.Sprintf("%v", place_name.String)
		} else {
			order.Title = fmt.Sprintf("%v: %v, %v", place_name.String, lastname.String, firstname.String)
		}
		*model.(*[]Order) = append(*model.(*[]Order), order)
	}
	e = rows.Err()
	return e
}
Exemple #2
0
// QueryScan executes the query in builder and loads the resulting data into
// one or more destinations.
//
// Returns ErrNotFound if no value was found, and it was therefore not set.
func queryScalar(execer *Execer, destinations ...interface{}) error {
	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return err
	}
	if blob != nil {
		err = json.Unmarshal(blob, &destinations)
		if err == nil {
			return nil
		}
		// log it and fallthrough to let the query continue
		logger.Warn("queryScalar.2: Could not unmarshal cache data. Continuing with query")
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	// Run the query:
	var rows *sqlx.Rows
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return logSQLError(err, "QueryScalar.load_value.query", fullSQL, args)
	}

	defer rows.Close()
	if rows.Next() {
		err = rows.Scan(destinations...)
		if err != nil {
			return logSQLError(err, "QueryScalar.load_value.scan", fullSQL, args)
		}

		setCache(execer, destinations, dtStruct)

		return nil
	}
	if err := rows.Err(); err != nil {
		return logSQLError(err, "QueryScalar.load_value.rows_err", fullSQL, args)
	}

	return dat.ErrNotFound
}
Exemple #3
0
// QuerySlice executes the query in builder and loads the resulting data into a
// slice of primitive values
//
// Returns ErrNotFound if no value was found, and it was therefore not set.
func querySlice(execer *Execer, dest interface{}) error {
	// Validate the dest and reflection values we need

	// This must be a pointer to a slice
	valueOfDest := reflect.ValueOf(dest)
	kindOfDest := valueOfDest.Kind()

	if kindOfDest != reflect.Ptr {
		panic("invalid type passed to LoadValues. Need a pointer to a slice")
	}

	// This must a slice
	valueOfDest = reflect.Indirect(valueOfDest)
	kindOfDest = valueOfDest.Kind()

	if kindOfDest != reflect.Slice {
		panic("invalid type passed to LoadValues. Need a pointer to a slice")
	}

	recordType := valueOfDest.Type().Elem()

	recordTypeIsPtr := recordType.Kind() == reflect.Ptr
	if recordTypeIsPtr {
		reflect.ValueOf(dest)
	}

	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return err
	}
	if blob != nil {
		err = json.Unmarshal(blob, &dest)
		if err == nil {
			return nil
		}
		// log it and fallthrough to let the query continue
		logger.Warn("querySlice.2: Could not unmarshal cache data. Continuing with query")
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	var rows *sqlx.Rows
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return logSQLError(err, "querySlice.load_all_values.query", fullSQL, args)
	}

	sliceValue := valueOfDest
	defer rows.Close()
	for rows.Next() {
		// Create a new value to store our row:
		pointerToNewValue := reflect.New(recordType)
		newValue := reflect.Indirect(pointerToNewValue)

		err = rows.Scan(pointerToNewValue.Interface())
		if err != nil {
			return logSQLError(err, "querySlice.load_all_values.scan", fullSQL, args)
		}

		// Append our new value to the slice:
		sliceValue = reflect.Append(sliceValue, newValue)
	}
	valueOfDest.Set(sliceValue)

	if err := rows.Err(); err != nil {
		return logSQLError(err, "querySlice.load_all_values.rows_err", fullSQL, args)
	}

	setCache(execer, dest, dtStruct)

	return nil
}