// Orders need to be joined to extract usefull info, therefore they have to be parsed by hand func (d Datastore) parseOrders(model interface{}, id interface{}) error { var rows *sqlx.Rows var e error query := "select o.id, o.user_id, o.place_id, o.start, o.end_time, p.name as place_name, u.username, u.firstname, u.lastname from orders o left join users u on u.id = o.user_id left join places p on p.id = o.place_id" /* query := "select o.id, o.user_id, o.place_id, o.date + o.order_start as start, o.date + o.order_end as end, p.name as place_name, u.username, u.firstname, u.lastname from orders o left join users u on u.id = o.user_id left join places p on p.id = o.place_id" */if id != nil { query += " where o.id = $1" rows, e = d.Db.Queryx(query, id) } else { rows, e = d.Db.Queryx(query) } /*query = "SELECT * FROM orders o, users u, places p where u.id = o.user_id and p.id = o.place_id"*/ if e != nil { log.Println(e) } for rows.Next() { var id int64 var place_id, user_id sql.NullInt64 var username, firstname, lastname, place_name sql.NullString var start, end pq.NullTime if err := rows.Scan(&id, &user_id, &place_id, &start, &end, &place_name, &username, &firstname, &lastname); err != nil { log.Println(err) } order := Order{Id: id, UserId: user_id.Int64, PlaceId: place_id.Int64, Start: start.Time, End: end.Time, PlaceName: place_name.String, Username: username.String, Firstname: firstname.String, Lastname: lastname.String} if user_id.Valid == false || (lastname.String == "" && firstname.String == "") { order.Title = fmt.Sprintf("%v", place_name.String) } else { order.Title = fmt.Sprintf("%v: %v, %v", place_name.String, lastname.String, firstname.String) } *model.(*[]Order) = append(*model.(*[]Order), order) } e = rows.Err() return e }
func flattenUuidRows(rows *sqlx.Rows) (uuids []string, err error) { for rows.Next() { var uuid string err = rows.Scan(&uuid) if err != nil { return } uuids = append(uuids, uuid) } return }
// QueryScan executes the query in builder and loads the resulting data into // one or more destinations. // // Returns ErrNotFound if no value was found, and it was therefore not set. func queryScalar(execer *Execer, destinations ...interface{}) error { fullSQL, args, blob, err := cacheOrSQL(execer) if err != nil { return err } if blob != nil { err = json.Unmarshal(blob, &destinations) if err == nil { return nil } // log it and fallthrough to let the query continue logger.Warn("queryScalar.2: Could not unmarshal cache data. Continuing with query") } defer logExecutionTime(time.Now(), fullSQL, args) // Run the query: var rows *sqlx.Rows if args == nil { rows, err = execer.database.Queryx(fullSQL) } else { rows, err = execer.database.Queryx(fullSQL, args...) } if err != nil { return logSQLError(err, "QueryScalar.load_value.query", fullSQL, args) } defer rows.Close() if rows.Next() { err = rows.Scan(destinations...) if err != nil { return logSQLError(err, "QueryScalar.load_value.scan", fullSQL, args) } setCache(execer, destinations, dtStruct) return nil } if err := rows.Err(); err != nil { return logSQLError(err, "QueryScalar.load_value.rows_err", fullSQL, args) } return dat.ErrNotFound }
func mapEvents(r *sqlx.Rows) ([]event, error) { events := make([]event, 0) for r.Next() { var temp string = "" e := event{} err := r.Scan(&e.ID, &temp, &e.Title, &e.Description, &e.Font) if err != nil { return nil, err } e.Date, err = stringToTime(temp) events = append(events, e) } /*doesn't work e := event{} err := e.mapRow(r) if err != nil { return nil, err } events = append(events, e) */ return events, nil }
// queryJSONBlob executes the query in builder and loads the resulting data // into a blob. If a single item is to be returned, set single to true. // // Returns ErrNotFound if nothing was found func queryJSONBlob(execer *Execer, single bool) ([]byte, error) { fullSQL, args, blob, err := cacheOrSQL(execer) if err != nil { return nil, err } if blob != nil { return blob, nil } defer logExecutionTime(time.Now(), fullSQL, args) var rows *sqlx.Rows // Run the query: if args == nil { rows, err = execer.database.Queryx(fullSQL) } else { rows, err = execer.database.Queryx(fullSQL, args...) } if err != nil { return nil, logSQLError(err, "queryJSONStructs", fullSQL, args) } // TODO optimize this later, may be better to var buf bytes.Buffer i := 0 if single { defer rows.Close() for rows.Next() { if i == 1 { if dat.Strict { logSQLError(errors.New("Multiple results returned"), "Expected single result", fullSQL, args) logger.Fatal("Expected single result, got many") } else { break } } i++ err = rows.Scan(&blob) if err != nil { return nil, err } buf.Write(blob) } } else { defer rows.Close() for rows.Next() { if i == 0 { buf.WriteRune('[') } else { buf.WriteRune(',') } i++ err = rows.Scan(&blob) if err != nil { return nil, err } buf.Write(blob) } if i > 0 { buf.WriteRune(']') } } if i == 0 { return nil, sql.ErrNoRows } blob = buf.Bytes() setCache(execer, blob, dtBytes) return blob, nil }
// QuerySlice executes the query in builder and loads the resulting data into a // slice of primitive values // // Returns ErrNotFound if no value was found, and it was therefore not set. func querySlice(execer *Execer, dest interface{}) error { // Validate the dest and reflection values we need // This must be a pointer to a slice valueOfDest := reflect.ValueOf(dest) kindOfDest := valueOfDest.Kind() if kindOfDest != reflect.Ptr { panic("invalid type passed to LoadValues. Need a pointer to a slice") } // This must a slice valueOfDest = reflect.Indirect(valueOfDest) kindOfDest = valueOfDest.Kind() if kindOfDest != reflect.Slice { panic("invalid type passed to LoadValues. Need a pointer to a slice") } recordType := valueOfDest.Type().Elem() recordTypeIsPtr := recordType.Kind() == reflect.Ptr if recordTypeIsPtr { reflect.ValueOf(dest) } fullSQL, args, blob, err := cacheOrSQL(execer) if err != nil { return err } if blob != nil { err = json.Unmarshal(blob, &dest) if err == nil { return nil } // log it and fallthrough to let the query continue logger.Warn("querySlice.2: Could not unmarshal cache data. Continuing with query") } defer logExecutionTime(time.Now(), fullSQL, args) var rows *sqlx.Rows if args == nil { rows, err = execer.database.Queryx(fullSQL) } else { rows, err = execer.database.Queryx(fullSQL, args...) } if err != nil { return logSQLError(err, "querySlice.load_all_values.query", fullSQL, args) } sliceValue := valueOfDest defer rows.Close() for rows.Next() { // Create a new value to store our row: pointerToNewValue := reflect.New(recordType) newValue := reflect.Indirect(pointerToNewValue) err = rows.Scan(pointerToNewValue.Interface()) if err != nil { return logSQLError(err, "querySlice.load_all_values.scan", fullSQL, args) } // Append our new value to the slice: sliceValue = reflect.Append(sliceValue, newValue) } valueOfDest.Set(sliceValue) if err := rows.Err(); err != nil { return logSQLError(err, "querySlice.load_all_values.rows_err", fullSQL, args) } setCache(execer, dest, dtStruct) return nil }
func (r scanRower) scanRow(rows *sqlx.Rows) (Value, error) { p := reflect.New(r.t).Interface() e := rows.Scan(p) return p, e }