func GetHistoryPlayers(db *sql.DB, days int) ([]HistoryPlayers, error) { var data []HistoryPlayers var query string switch days { case 1: query = "SELECT history_slots_used, history_date FROM history WHERE history_date > NOW() - INTERVAL 1 DAY" case 7: query = "SELECT a.history_slots_used, a.history_date FROM (SELECT history_slots_used, history_date, DATE_FORMAT(history_date, '%p%d%m%y') AS g FROM history WHERE history_date > NOW() - INTERVAL 1 WEEK GROUP BY g) AS a" case 30: query = "SELECT a.history_slots_used, a.history_date FROM (SELECT history_slots_used, history_date, DATE_FORMAT(history_date, '%d%m%y') AS g FROM history WHERE history_date > NOW() - INTERVAL 1 MONTH GROUP BY g) AS a" case 365: query = "SELECT a.history_slots_used, a.history_date FROM (SELECT history_slots_used, history_date, DATE_FORMAT(history_date, '%u%y') AS g FROM history WHERE history_date > NOW() - INTERVAL 1 YEAR GROUP BY g) AS a" } rows, err := db.Query(query) if err != nil { return data, err } defer rows.Close() for rows.Next() { var tmp HistoryPlayers if err := rows.Scan(&tmp.Total, &tmp.Date); err != nil { return data, err } data = append(data, tmp) } return data, nil }
func getApplicationDeployments(db *sql.DB, a *models.Application, limit int) ([]*models.Deployment, error) { deployments := []*models.Deployment{} rows, err := db.Query(applicationDeploymentsStmt, a.Name, limit) if err != nil { return deployments, err } defer rows.Close() for rows.Next() { var state string d := &models.Deployment{} err = rows.Scan(&d.Id, &d.UserId, &d.TargetName, &d.CommitSha, &d.Branch, &d.Comment, &state, &d.CreatedAt) if err != nil { return deployments, err } d.State = models.DeploymentState(state) deployments = append(deployments, d) } if err := rows.Err(); err != nil { return deployments, err } return deployments, nil }
func DeployCumulative(db *sql.DB, simid []byte, proto string) (xys []XY, err error) { sql := `SELECT Time, IFNULL(Count, 0) FROM TimeList LEFT JOIN (SELECT ti.Time AS Timestep,COUNT(*) AS Count FROM TimeList AS ti JOIN Agents AS ag ON (ti.Time >= ag.EnterTime) AND (ag.ExitTime >= ti.Time OR ag.ExitTime IS NULL) WHERE ti.SimId = ag.SimId AND ag.SimId = ? AND ag.Prototype = ? GROUP BY ti.Time ORDER BY ti.Time) AS foo ON foo.Timestep = TimeList.Time;` rows, err := db.Query(sql, simid, proto) if err != nil { return nil, err } for rows.Next() { xy := XY{} if err := rows.Scan(&xy.X, &xy.Y); err != nil { return nil, err } xys = append(xys, xy) } if err := rows.Err(); err != nil { return nil, err } return xys, nil }
func GetOptions(db *sql.DB) (map[string][]Option, error) { options := make(map[string][]Option) rows, err := db.Query(`SELECT name, code, option_code, option_value , option_name FROM options`) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var opt Option if err := rows.Scan(&opt.Name, &opt.Code, &opt.OptionCode, &opt.OptionValue, &opt.OptionName); err != nil { log.Println("scan failed:", err) return nil, err } if sel, ok := options[opt.Code]; ok { sel = append(sel, opt) options[opt.Code] = sel } else { sel = []Option{} sel = append(sel, opt) options[opt.Code] = sel } } return options, nil }
func (store *MysqlStore) SearchObservations(db *sql.DB, tok string, trollId int, amis []int) ([]*Observation, error) { sql := "select auteur, num, date, type, nom, x, y, z from observation where" if num, _ := strconv.Atoi(tok); num != 0 { sql += " num=" + tok } else { sql += " nom like '%" + tok + "%'" } sql += " and auteur in (" + strconv.Itoa(trollId) for _, id := range amis { sql += "," + strconv.Itoa(id) } sql += ") order by num, date desc limit 100" rows, err := db.Query(sql) observations := make([]*Observation, 0, 20) for rows.Next() { r := new(Observation) err = rows.Scan(&r.Auteur, &r.Num, &r.Date, &r.Type, &r.Nom, &r.X, &r.Y, &r.Z) if err != nil { return nil, err } if len(observations) > 0 && r.Num == observations[len(observations)-1].Num { // dédoublonnage continue } observations = append(observations, r) } rows.Close() return observations, nil }
func DataCheck(qry string, conn *sql.DB) bool { // conn, err := connect.GetJanusCon() // if err != nil { // panic(err) // } // defer conn.Close() rowscheck, err := conn.Query(qry) if err != nil { log.Printf(`Error with "%s": %s`, qry, err) } len := 0 for rowscheck.Next() { len = len + 1 } // conn.Close() if len > 0 { return true } else { return false } }
func TableList(db *sql.DB) (map[string]Table, error) { rows, err := db.Query("SELECT id, rev, doc FROM divan WHERE doc->>'type' = 'table'") if err != nil && err.Error() == `pq: relation "divan" does not exist` { return nil, fmt.Errorf("divan table missing! Trying running with -b flag to bootstrap the environment or create the table manually.") } if err != nil { return nil, err } tables := make(map[string]Table) for rows.Next() { doc := new(Doc) if err := rows.Scan(&doc.Id, &doc.Rev, &doc.Doc); err != nil { return nil, err } t := new(Table) t.Db = db err := doc.JSON(t) if err != nil { return nil, err } tables[t.Name] = *t } return tables, nil }
// postgres does not have REPLACE INTO (upsert), so we use that custom // one for Set operations instead func altSet(db *sql.DB, key, value string) error { r, err := db.Query("SELECT replaceinto($1, $2)", key, value) if err != nil { return err } return r.Close() }
func SelectBodys(db *sql.DB, ids []int) (map[int]string, error) { m := map[int]string{} strs := make([]string, len(ids)) for i, id := range ids { strs[i] = fmt.Sprintf("%d", id) } clause := strings.Join(strs, ",") rows, err := db.Query( fmt.Sprintf( "SELECT id, body FROM reviews WHERE id IN (%s)", clause, ), ) if err != nil { return m, err } for rows.Next() { var id int var body string if err := rows.Scan(&id, &body); err != nil { return m, fmt.Errorf("SELECT error: %s", err) } m[id] = body } return m, nil }
// gatherPerfEventWaits can be used to get total time and number of event waits func (m *Mysql) gatherPerfEventWaits(db *sql.DB, serv string, acc telegraf.Accumulator) error { rows, err := db.Query(perfEventWaitsQuery) if err != nil { return err } defer rows.Close() var ( event string starCount, timeWait float64 ) servtag, err := parseDSN(serv) if err != nil { servtag = "localhost" } tags := map[string]string{ "server": servtag, } for rows.Next() { if err := rows.Scan(&event, &starCount, &timeWait); err != nil { return err } tags["event_name"] = event fields := map[string]interface{}{ "events_waits_total": starCount, "events_waits_seconds_total": timeWait / picoSeconds, } acc.AddFields("mysql_perf_schema", fields, tags) } return nil }
func getSummary(db *sql.DB) (summary, error) { s := newSummary() rows, err := db.Query(`SELECT Day, Version, Count FROM VersionSummary WHERE Day > now() - '1 year'::INTERVAL;`) if err != nil { return summary{}, err } defer rows.Close() for rows.Next() { var day time.Time var ver string var num int err := rows.Scan(&day, &ver, &num) if err != nil { return summary{}, err } if ver == "v0.0" { // ? continue } // SUPER UGLY HACK to avoid having to do sorting properly if len(ver) == 4 { // v0.x ver = ver[:3] + "0" + ver[3:] // now v0.0x } s.setCount(day.Format("2006-01-02"), ver, num) } return s, nil }
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error { tokudbRows, err := db.Query(engineTokudbStatusQuery) if err != nil { return err } defer tokudbRows.Close() var temp, key string var val sql.RawBytes for tokudbRows.Next() { if err := tokudbRows.Scan(&temp, &key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(tokudb, sanitizeTokudbMetric(key), "Generic metric from SHOW ENGINE TOKUDB STATUS."), prometheus.UntypedValue, floatVal, ) } } return nil }
// gatherInfoSchemaAutoIncStatuses can be used to get auto incremented values of the column func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { rows, err := db.Query(infoSchemaAutoIncQuery) if err != nil { return err } defer rows.Close() var ( schema, table, column string incValue, maxInt uint64 ) servtag, err := parseDSN(serv) if err != nil { servtag = "localhost" } for rows.Next() { if err := rows.Scan(&schema, &table, &column, &incValue, &maxInt); err != nil { return err } tags := map[string]string{ "server": servtag, "schema": schema, "table": table, "column": column, } fields := make(map[string]interface{}) fields["auto_increment_column"] = incValue fields["auto_increment_column_max"] = maxInt acc.AddFields("mysql_info_schema", fields, tags) } return nil }
func LockPartition(pg *sql.DB, ns string, max uint64) (uint64, error) { tab := crc32.MakeTable(crc32.IEEE) for { var p uint64 for p = 0; p < max; p++ { pId := fmt.Sprintf("%s.%d", ns, p) check := crc32.Checksum([]byte(pId), tab) rows, err := pg.Query("select pg_try_advisory_lock($1)", check) if err != nil { continue } for rows.Next() { var result sql.NullBool rows.Scan(&result) if result.Valid && result.Bool { fmt.Printf("at=%q partition=%d max=%d\n", "acquired-lock", p, max) rows.Close() return p, nil } } rows.Close() } fmt.Printf("at=%q\n", "waiting-for-partition-lock") time.Sleep(time.Second * 10) } return 0, errors.New("Unable to lock partition.") }
func mustQuery(db *sql.DB, sql string, args ...interface{}) *sql.Rows { res, err := db.Query(sql, args...) if err != nil { panic(err) } return res }
func getData(db *sql.DB) { qStr := "select task_total,task_condition from globe_task_list limit 2" rows, err := db.Query(qStr) if err != nil { fmt.Println(err) } // var vals [rows.Columns()]interface{} // for rows.Next() { // err = rows.Scan(vals) // fmt.Println(vals) // } // for col, err := range rows.Columns() { // fmt.Println(col) // } // var ( // title string // created string // index int // ) var ( taskTotal int con []byte ) for rows.Next() { // err = rows.Scan(&title, &created, &index) err = rows.Scan(&taskTotal, &con) // fmt.Println("title: ", title, "-created: ", created, "-index: ", index) fmt.Println("taskTotal:", taskTotal, "con:", string(con)) } }
func InvSeries(db *sql.DB, simid []byte, agent int, iso int) (xys []XY, err error) { sql := `SELECT ti.Time,SUM(cmp.MassFrac * inv.Quantity) FROM ( Compositions AS cmp INNER JOIN Inventories AS inv ON inv.QualId = cmp.QualId INNER JOIN TimeList AS ti ON (ti.Time >= inv.StartTime AND ti.Time < inv.EndTime) ) WHERE ( inv.SimId = ? AND inv.SimId = cmp.SimId AND ti.SimId = inv.SimId AND inv.AgentId = ? AND cmp.NucId = ? ) GROUP BY ti.Time,cmp.NucId;` rows, err := db.Query(sql, simid, agent, iso) if err != nil { return nil, err } for rows.Next() { xy := XY{} if err := rows.Scan(&xy.X, &xy.Y); err != nil { return nil, err } xys = append(xys, xy) } if err := rows.Err(); err != nil { return nil, err } return xys, nil }
func getMovement(db *sql.DB) ([][]interface{}, error) { rows, err := db.Query(`SELECT Day, Added, Removed, Bounced FROM UserMovement WHERE Day > now() - '1 year'::INTERVAL ORDER BY Day`) if err != nil { return nil, err } defer rows.Close() res := [][]interface{}{ {"Day", "Joined", "Left", "Bounced"}, } for rows.Next() { var day time.Time var added, removed, bounced int err := rows.Scan(&day, &added, &removed, &bounced) if err != nil { return nil, err } row := []interface{}{day.Format("2006-01-02"), added, -removed, bounced} if removed == 0 { row[2] = nil } if bounced == 0 { row[3] = nil } res = append(res, row) } return res, nil }
func AllAgents(db *sql.DB, simid []byte, proto string) (ags []AgentInfo, err error) { s := `SELECT AgentId,Kind,Spec,Prototype,ParentId,EnterTime,ExitTime,Lifetime FROM Agents WHERE Agents.SimId = ?` var rows *sql.Rows if proto != "" { s += ` AND Agents.Prototype = ?` rows, err = db.Query(s, simid, proto) } else { rows, err = db.Query(s, simid) } if err != nil { return nil, err } for rows.Next() { ai := AgentInfo{} var exit sql.NullInt64 if err := rows.Scan(&ai.Id, &ai.Kind, &ai.Impl, &ai.Proto, &ai.Parent, &ai.Enter, &exit, &ai.Lifetime); err != nil { return nil, err } if !exit.Valid { exit.Int64 = -1 } ai.Exit = int(exit.Int64) ags = append(ags, ai) } if err := rows.Err(); err != nil { return nil, err } return ags, nil }
func FirstComic(db *sql.DB) (c *Comic, err error) { rows, err := db.Query( `select id, title, image_url, description, date from comic order by id limit 1`, ) if checkError(err) { return nil, err } for rows.Next() { var id int64 var title string var imageURL string var description string var date time.Time err = rows.Scan(&id, &title, &imageURL, &description, &date) if checkError(err) { return nil, err } c = &Comic{ ID: id, Title: title, ImageURL: imageURL, Description: description, Date: date.Format("2006-01-02"), db: db, } return } return }
func QuerySql(db *sql.DB, sqlStmt string, params ...interface{}) (rows *sql.Rows) { if db == nil { return } rows, _ = db.Query(sqlStmt, params...) return }
func runQuery(ctx context.Context, db *sql.DB, query string) (*sql.Rows, error) { done := make(chan struct{}) var ( rows *sql.Rows errMsg error ) go func() { for { rs, err := db.Query(query) if err != nil { errMsg = err time.Sleep(time.Second) continue } else { rows = rs errMsg = nil done <- struct{}{} break } } }() select { case <-done: return rows, errMsg case <-ctx.Done(): return nil, fmt.Errorf("runQuery %s timed out with %v / %v", query, ctx.Err(), errMsg) } }
func getUsers(db *sql.DB) []User { q := ` SELECT ID, Name, Phone, Created FROM users ORDER BY datetime(Created) DESC ` rows, err := db.Query(q) if err != nil { log.Fatalln(err) } defer rows.Close() var users []User for rows.Next() { user := User{} err = rows.Scan(&user.ID, &user.Name, &user.Phone, &user.Created) if err != nil { log.Fatalln(err) } users = append(users, user) } return users }
func getDeploymentLogEntries(db *sql.DB, d *models.Deployment) ([]*deploy.LogEntry, error) { entries := []*deploy.LogEntry{} rows, err := db.Query(deploymentLogEntriesStmt, d.Id) if err != nil { return entries, err } defer rows.Close() for rows.Next() { var entryType string e := &deploy.LogEntry{} err = rows.Scan(&e.Id, &e.DeploymentId, &entryType, &e.Origin, &e.Message, &e.Timestamp) if err != nil { return entries, err } e.EntryType = deploy.LogEntryType(entryType) entries = append(entries, e) } if err := rows.Err(); err != nil { return entries, err } return entries, nil }
func GetFriendsListInternal(userId string, db *sql.DB) FriendsListOutbound { rows, err := db.Query("select users.user_id, users.user_name, users.first_name, users.last_name from friends right join users on friends.friend_id = users.user_id where friends.user_id = " + userId) if err != nil { log.Fatal(err) } var ( id int username string first string last string ) friendsList := FriendsListOutbound{Friends: make([]*FriendInfoOutbound, 0)} for rows.Next() { err = rows.Scan(&id, &username, &first, &last) if err != nil { panic(err) } friendInfoOutbound := FriendInfoOutbound{User_id: id, User_name: username, First_name: first, Last_name: last} friendsList.Friends = append(friendsList.Friends, &friendInfoOutbound) } return friendsList }
// Do a simple query to ensure the connection is still usable func ensureConnValid(t *testing.T, db *sql.DB) { var sum, rowCount int32 rows, err := db.Query("select generate_series(1,$1)", 10) if err != nil { t.Fatalf("db.Query failed: ", err) } defer rows.Close() for rows.Next() { var n int32 rows.Scan(&n) sum += n rowCount++ } if rows.Err() != nil { t.Fatalf("db.Query failed: ", err) } if rowCount != 10 { t.Error("Select called onDataRow wrong number of times") } if sum != 55 { t.Error("Wrong values returned") } }
// CreateDeductionsTable takes the string value of "deductions" for every person // in the people table, parses it, and creates an entry in the deductions table // for each deduction func CreateDeductionsTable(db *sql.DB) { // OK, we're connected to the database. On with the work. First thing to do is // to put all the deductions into a separate multivalued Deductions table organized // by employee id rows, err := db.Query("select uid, deductions from people") errcheck(err) defer rows.Close() InsrtDeduct, err := db.Prepare("INSERT INTO deductions (uid,deduction) VALUES(?,?)") errcheck(err) var ( uid int deductions string ) for rows.Next() { errcheck(rows.Scan(&uid, &deductions)) if len(deductions) > 0 { da := strings.Split(deductions, ",") for i := 0; i < len(da); i++ { d := deductionStringToInt(strings.Trim(da[i], " \n\r")) _, err := InsrtDeduct.Exec(uid, d) errcheck(err) } } } errcheck(rows.Err()) RemovePeopleDeductCol, err := db.Prepare("alter table people drop column deductions") errcheck(err) _, err = RemovePeopleDeductCol.Exec() errcheck(err) createDeductionsList(db) }
func fetchLanguages(db *sql.DB) (myDataStruct []*myData, err error) { rs, err := db.Query("select metadata, payload FROM val_store") if err != nil { return nil, err } defer rs.Close() fmt.Println("Fetch Query run \n") myDataStruct = make([]*myData, 0) fmt.Println("Starting DB fetch /n") for rs.Next() { fmt.Println("Fetch row . . . . /n") tmpMyData := new(myData) err = rs.Scan(&tmpMyData.Metadata, &tmpMyData.Payload) myDataStruct = append(myDataStruct, tmpMyData) if err != nil { return nil, err } } err = rs.Err() if err != nil { return nil, err } return }
func (store *MysqlStore) ObservationsAutour(db *sql.DB, x int, y int, z int, dist int, trollId int, amis []int, withTresors bool) ([]*Observation, error) { sql := "select auteur, num, date, type, nom, x, y, z from observation where" sql += " x>" + strconv.Itoa(x-dist-1) + " and x<" + strconv.Itoa(x+dist+1) sql += " and y>" + strconv.Itoa(y-dist-1) + " and y<" + strconv.Itoa(y+dist+1) sql += " and z>" + strconv.Itoa(z-dist/2-1) + " and z<" + strconv.Itoa(z+dist/2+1) if !withTresors { sql += " and type<>'tresor'" } sql += " and auteur in (" + strconv.Itoa(trollId) for _, id := range amis { sql += "," + strconv.Itoa(id) } sql += ") order by type, num, date desc" rows, err := db.Query(sql) observations := make([]*Observation, 0, 20) for rows.Next() { r := new(Observation) err = rows.Scan(&r.Auteur, &r.Num, &r.Date, &r.Type, &r.Nom, &r.X, &r.Y, &r.Z) if err != nil { return nil, err } if len(observations) > 0 && r.Num == observations[len(observations)-1].Num { // dédoublonnage continue } observations = append(observations, r) } rows.Close() return observations, nil }
// getLatestMigration retrives latest migration with status 'update' func getLatestMigration(db *sql.DB, goal string) (file string, createdAt int64) { sql := "SELECT name FROM migrations where status = 'update' ORDER BY id_migration DESC LIMIT 1" if rows, err := db.Query(sql); err != nil { ColorLog("[ERRO] Could not retrieve migrations: %s\n", err) os.Exit(2) } else { if rows.Next() { if err := rows.Scan(&file); err != nil { ColorLog("[ERRO] Could not read migrations in database: %s\n", err) os.Exit(2) } createdAtStr := file[len(file)-15:] if t, err := time.Parse("20060102_150405", createdAtStr); err != nil { ColorLog("[ERRO] Could not parse time: %s\n", err) os.Exit(2) } else { createdAt = t.Unix() } } else { // migration table has no 'update' record, no point rolling back if goal == "rollback" { ColorLog("[ERRO] There is nothing to rollback\n") os.Exit(2) } file, createdAt = "", 0 } } return }