Example #1
0
// DropAndCreateTable удаляет таблицу, если она уже существует и создает заново
func DropAndCreateTable(schema string, tableName string, db *sqlx.DB) (bool, error) {
	var err error
	var rows *sqlx.Rows
	// Проверяем нет ли такой таблицы в базе
	rows, err = db.Queryx("SELECT to_regclass('" + tableName + "');")
	if err != nil {
		//fmt.Println("Error on check table '"+tableName+"':", err)
		return false, err
	}
	defer rows.Close()

	// И если есть удаляем
	rowsCount := 0
	for rows.Next() {
		rowsCount++
	}

	if rowsCount > 0 {
		_, err = db.Exec("DROP TABLE IF EXISTS " + tableName + ";")
		if err != nil {
			//fmt.Println("Error on drop table '"+tableName+"':", err)
			return false, err
		}
	}

	// Создаем таблицу
	_, err = db.Exec(schema)
	if err != nil {
		//fmt.Println("Error on create table '"+tableName+"':", err)
		return false, err
	}

	return true, nil
}
Example #2
0
// ScanOne returns the instance, if any, returned from sql query
func ScanOne(instance interface{}, rows *sqlx.Rows) error {
	defer rows.Close()
	for rows.Next() {
		if err := rows.StructScan(instance); err != nil {
			return err
		}
	}
	return nil
}
Example #3
0
// QueryScan executes the query in builder and loads the resulting data into
// one or more destinations.
//
// Returns ErrNotFound if no value was found, and it was therefore not set.
func queryScalar(execer *Execer, destinations ...interface{}) error {
	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return err
	}
	if blob != nil {
		err = json.Unmarshal(blob, &destinations)
		if err == nil {
			return nil
		}
		// log it and fallthrough to let the query continue
		logger.Warn("queryScalar.2: Could not unmarshal cache data. Continuing with query")
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	// Run the query:
	var rows *sqlx.Rows
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return logSQLError(err, "QueryScalar.load_value.query", fullSQL, args)
	}

	defer rows.Close()
	if rows.Next() {
		err = rows.Scan(destinations...)
		if err != nil {
			return logSQLError(err, "QueryScalar.load_value.scan", fullSQL, args)
		}

		setCache(execer, destinations, dtStruct)

		return nil
	}
	if err := rows.Err(); err != nil {
		return logSQLError(err, "QueryScalar.load_value.rows_err", fullSQL, args)
	}

	return dat.ErrNotFound
}
Example #4
0
// queryJSONBlob executes the query in builder and loads the resulting data
// into a blob. If a single item is to be returned, set single to true.
//
// Returns ErrNotFound if nothing was found
func queryJSONBlob(execer *Execer, single bool) ([]byte, error) {
	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return nil, err
	}
	if blob != nil {
		return blob, nil
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	var rows *sqlx.Rows
	// Run the query:
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return nil, logSQLError(err, "queryJSONStructs", fullSQL, args)
	}

	// TODO optimize this later, may be better to
	var buf bytes.Buffer
	i := 0
	if single {
		defer rows.Close()
		for rows.Next() {
			if i == 1 {
				if dat.Strict {
					logSQLError(errors.New("Multiple results returned"), "Expected single result", fullSQL, args)
					logger.Fatal("Expected single result, got many")
				} else {
					break
				}
			}
			i++

			err = rows.Scan(&blob)
			if err != nil {
				return nil, err
			}
			buf.Write(blob)
		}
	} else {
		defer rows.Close()
		for rows.Next() {
			if i == 0 {
				buf.WriteRune('[')
			} else {
				buf.WriteRune(',')
			}
			i++

			err = rows.Scan(&blob)
			if err != nil {
				return nil, err
			}
			buf.Write(blob)
		}
		if i > 0 {
			buf.WriteRune(']')
		}
	}

	if i == 0 {
		return nil, sql.ErrNoRows
	}

	blob = buf.Bytes()
	setCache(execer, blob, dtBytes)
	return blob, nil
}
Example #5
0
// QuerySlice executes the query in builder and loads the resulting data into a
// slice of primitive values
//
// Returns ErrNotFound if no value was found, and it was therefore not set.
func querySlice(execer *Execer, dest interface{}) error {
	// Validate the dest and reflection values we need

	// This must be a pointer to a slice
	valueOfDest := reflect.ValueOf(dest)
	kindOfDest := valueOfDest.Kind()

	if kindOfDest != reflect.Ptr {
		panic("invalid type passed to LoadValues. Need a pointer to a slice")
	}

	// This must a slice
	valueOfDest = reflect.Indirect(valueOfDest)
	kindOfDest = valueOfDest.Kind()

	if kindOfDest != reflect.Slice {
		panic("invalid type passed to LoadValues. Need a pointer to a slice")
	}

	recordType := valueOfDest.Type().Elem()

	recordTypeIsPtr := recordType.Kind() == reflect.Ptr
	if recordTypeIsPtr {
		reflect.ValueOf(dest)
	}

	fullSQL, args, blob, err := cacheOrSQL(execer)
	if err != nil {
		return err
	}
	if blob != nil {
		err = json.Unmarshal(blob, &dest)
		if err == nil {
			return nil
		}
		// log it and fallthrough to let the query continue
		logger.Warn("querySlice.2: Could not unmarshal cache data. Continuing with query")
	}

	defer logExecutionTime(time.Now(), fullSQL, args)
	var rows *sqlx.Rows
	if args == nil {
		rows, err = execer.database.Queryx(fullSQL)
	} else {
		rows, err = execer.database.Queryx(fullSQL, args...)
	}
	if err != nil {
		return logSQLError(err, "querySlice.load_all_values.query", fullSQL, args)
	}

	sliceValue := valueOfDest
	defer rows.Close()
	for rows.Next() {
		// Create a new value to store our row:
		pointerToNewValue := reflect.New(recordType)
		newValue := reflect.Indirect(pointerToNewValue)

		err = rows.Scan(pointerToNewValue.Interface())
		if err != nil {
			return logSQLError(err, "querySlice.load_all_values.scan", fullSQL, args)
		}

		// Append our new value to the slice:
		sliceValue = reflect.Append(sliceValue, newValue)
	}
	valueOfDest.Set(sliceValue)

	if err := rows.Err(); err != nil {
		return logSQLError(err, "querySlice.load_all_values.rows_err", fullSQL, args)
	}

	setCache(execer, dest, dtStruct)

	return nil
}