// select the rows into table func selectRows(dbh *sql.DB) Rows { var t Rows logger.Println("events_stages_summary_global_by_event_name.selectRows()") sql := "SELECT EVENT_NAME, COUNT_STAR, SUM_TIMER_WAIT FROM events_stages_summary_global_by_event_name WHERE SUM_TIMER_WAIT > 0" rows, err := dbh.Query(sql) if err != nil { log.Fatal(err) } defer rows.Close() for rows.Next() { var r Row if err := rows.Scan( &r.eventName, &r.countStar, &r.sumTimerWait); err != nil { log.Fatal(err) } t = append(t, r) } if err := rows.Err(); err != nil { log.Fatal(err) } logger.Println("recovered", len(t), "row(s):") logger.Println(t) return t }
// SetByName sets the view based on its name. // - If we provide an empty name then use the default. // - If we don't provide a valid name then give an error func (v *View) SetByName(name string) { logger.Println("View.SetByName(" + name + ")") if name == "" { logger.Println("View.SetByName(): name is empty so setting to:", ViewLatency.String()) v.Set(ViewLatency) return } for i := range names { if name == names[i] { v.code = Code(i) logger.Println("View.SetByName(", name, ")") return } } // suggest what should be used allViews := "" for i := range names { allViews = allViews + " " + names[i] } // no need for now to strip off leading space from allViews. log.Fatal("Asked for a view name, '", name, "' which doesn't exist. Try one of:", allViews) }
// ValidateViews check which views are readable. If none are we give a fatal error func ValidateViews(dbh *sql.DB) error { var count int var status string logger.Println("Validating access to views...") for v := range names { ta := tables[v] if ta.CheckSelectable(dbh) { status = "is" count++ } else { status = "IS NOT" } tables[v] = ta logger.Println(v.String() + ": " + ta.Name() + " " + status + " SELECTable") } if count == 0 { return errors.New("None of the required tables are SELECTable. Giving up") } logger.Println(count, "of", len(names), "view(s) are SELECTable, continuing") setPrevAndNextViews() return nil }
// Connect makes a connection to the database using the previously defined settings func (c *Connector) Connect() { var err error switch { case c.connectMethod == ConnectByComponents: logger.Println("ConnectByComponents() Connecting...") newDsn := mysql_defaults_file.BuildDSN(c.components, db) c.dbh, err = sql.Open(sqlDriver, newDsn) case c.connectMethod == ConnectByDefaultsFile: logger.Println("ConnectByDefaults_file() Connecting...") c.dbh, err = mysql_defaults_file.OpenUsingDefaultsFile(sqlDriver, c.defaultsFile, db) case c.connectMethod == ConnectByEnvironment: /*************************************************************************** ** * * WARNING This functionality may be removed. WARNING * * * * While I've implemented this it may not be good/safe to actually use it. * * See: http://dev.mysql.com/doc/refman/5.6/en/password-security-user.html * * Store your password in the MYSQL_PWD environment variable. See Section * * 2.12, “Environment Variables”. * ****************************************************************************/ logger.Println("ConnectByEnvironment() Connecting...") c.dbh, err = mysql_defaults_file.OpenUsingEnvironment(sqlDriver) default: log.Fatal("Connector.Connect() c.connectMethod not ConnectByDefaultsFile/ConnectByComponents/ConnectByEnvironment") } // we catch Open...() errors here if err != nil { log.Fatal(err) } c.postConnectAction() }
// Convert the imported rows to a merged one with merged data. // - Combine all entries with the same "name" by adding their values. func (rows Rows) mergeByName(globalVariables *global.Variables) Rows { start := time.Now() rowsByName := make(map[string]Row) var newName string for i := range rows { var newRow Row if rows[i].sumTimerWait > 0 { newName = rows[i].simplifyName(globalVariables) // check if we have an entry in the map if _, found := rowsByName[newName]; found { newRow = rowsByName[newName] } else { newRow = Row{name: newName} // empty row with new name } newRow = add(newRow, rows[i]) rowsByName[newName] = newRow // update the map with the new summed value } } // add the map contents back into the table var mergedRows Rows for _, row := range rowsByName { mergedRows = append(mergedRows, row) } if !mergedRows.Valid() { logger.Println("WARNING: mergeByName(): mergedRows is invalid") } logger.Println("mergeByName() took:", time.Duration(time.Since(start)).String(), "and returned", len(rowsByName), "rows") return mergedRows }
// ValidateViews check which views are readable. If none are we give a fatal error func ValidateViews(dbh *sql.DB) error { var count int var status string logger.Println("Validating access to views...") // determine which of the defined views is valid because the underlying table access works for v := range names { ta := tables[v] e := ta.CheckSelectError(dbh) suffix := "" if e == nil { status = "is" count++ } else { status = "IS NOT" suffix = " " + e.Error() } tables[v] = ta logger.Println(v.String() + ": " + ta.Name() + " " + status + " SELECTable" + suffix) } if count == 0 { return errors.New("None of the required tables are SELECTable. Giving up") } logger.Println(count, "of", len(names), "view(s) are SELECTable, continuing") setPrevAndNextViews() return nil }
// EnableMutexMonitoring changes settings to monitor wait/synch/mutex/% func (si *SetupInstruments) EnableMutexMonitoring() { logger.Println("EnableMutexMonitoring") sqlMatch := "wait/synch/mutex/%" sqlSelect := "SELECT NAME, ENABLED, TIMED FROM setup_instruments WHERE NAME LIKE '" + sqlMatch + "' AND 'YES' NOT IN (ENABLED,TIMED)" collecting := "Collecting setup_instruments wait/synch/mutex configuration settings" updating := "Updating setup_instruments configuration for: wait/synch/mutex" si.Configure(sqlSelect, collecting, updating) logger.Println("EnableMutexMonitoring finishes") }
// CollectAll collects all the stats together in one go func (app *App) collectAll() { logger.Println("app.collectAll() start") app.fsbi.Collect(app.dbh) app.tlwsbt.Collect(app.dbh) app.tiwsbt.Collect(app.dbh) app.users.Collect(app.dbh) app.essgben.Collect(app.dbh) app.ewsgben.Collect(app.dbh) app.memory.Collect(app.dbh) logger.Println("app.collectAll() finished") }
// Load the ~/.pstoprc regexp expressions in section [munge] func loadRegexps() { if loadedRegexps { return } loadedRegexps = true logger.Println("rc.loadRegexps()") haveRegexps = false filename := convertFilename(pstoprc) // Is the file is there? f, err := os.Open(filename) if err != nil { logger.Println("- unable to open " + filename + ", nothing to munge") return // can't open file. This is not fatal. We just can't do anything useful. } // If we get here the file is readable, so close it again. err = f.Close() if err != nil { // Do nothing. What can we do? Do we care? } // Load and process the ini file. i, err := go_ini.LoadFile(filename) if err != nil { log.Fatal("Could not load ~/.pstoprc", filename, ":", err) } // Note: This is wrong if I want to have an _ordered_ list of regexps // as go-ini provides me a hash so I lose the ordering. This may not // be desirable but as a first step accept this is broken. section := i.Section("munge") regexps = make(mungeRegexps, 0, len(section)) // now look for regexps and load them in... for k, v := range section { var m mungeRegexp var err error m.pattern, m.replace = k, v m.re, err = regexp.Compile(m.pattern) if err == nil { m.valid = true } regexps = append(regexps, m) } if len(regexps) > 0 { haveRegexps = true } logger.Println("- found", len(regexps), "regexps to use to munge output") }
// Collect collects data from the db, updating initial // values if needed, and then subtracting initial values if we want // relative values, after which it stores totals. func (t *Object) Collect(dbh *sql.DB) { logger.Println("Object.Collect() - starting collection of data") start := time.Now() t.current = selectRows(dbh) logger.Println("t.current collected", len(t.current), "row(s) from SELECT") t.processlist2byUser() logger.Println("Object.Collect() END, took:", time.Duration(time.Since(start)).String()) }
// subtract the countable values in one row from another func (row *Row) subtract(other Row) { // check for issues here (we have a bug) and log it // - this situation should not happen so there's a logic bug somewhere else if row.sumTimerWait >= other.sumTimerWait { row.sumTimerWait -= other.sumTimerWait row.countStar -= other.countStar } else { logger.Println("WARNING: Row.subtract() - subtraction problem! (not subtracting)") logger.Println("row=", row) logger.Println("other=", other) } }
// NewApp sets up the application given various parameters. func NewApp(conn *connector.Connector, interval int, count int, stdout bool, defaultView string, disp display.Display) *App { logger.Println("app.NewApp()") app := new(App) app.ctx = new(context.Context) app.count = count app.dbh = conn.Handle() app.finished = false app.stdout = stdout app.display = disp app.display.SetContext(app.ctx) app.SetHelp(false) if err := view.ValidateViews(app.dbh); err != nil { log.Fatal(err) } logger.Println("app.Setup() Setting the default view to:", defaultView) app.view.SetByName(defaultView) // if empty will use the default app.setupInstruments = setup_instruments.NewSetupInstruments(app.dbh) app.setupInstruments.EnableMonitoring() app.wi.SetWaitInterval(time.Second * time.Duration(interval)) variables, _ := lib.SelectAllGlobalVariablesByVariableName(app.dbh) // setup to their initial types/values app.fsbi = fsbi.NewFileSummaryByInstance(variables) app.tlwsbt = new(tlwsbt.Object) app.ewsgben = new(ewsgben.Object) app.essgben = new(essgben.Object) app.ctx.SetWantRelativeStats(true) // we show info from the point we start collecting data app.fsbi.SetWantRelativeStats(app.ctx.WantRelativeStats()) app.tlwsbt.SetWantRelativeStats(app.ctx.WantRelativeStats()) app.tiwsbt.SetWantRelativeStats(app.ctx.WantRelativeStats()) app.users.SetWantRelativeStats(app.ctx.WantRelativeStats()) // ignored app.essgben.SetWantRelativeStats(app.ctx.WantRelativeStats()) app.ewsgben.SetWantRelativeStats(app.ctx.WantRelativeStats()) // ignored app.fixLatencySetting() // adjust to see ops/latency app.resetDBStatistics() // get short name (to save space) hostname, _ := lib.SelectGlobalVariableByVariableName(app.dbh, "HOSTNAME") app.ctx.SetHostname(hostname) // get the MySQL version mysqlVersion, _ := lib.SelectGlobalVariableByVariableName(app.dbh, "VERSION") app.ctx.SetMySQLVersion(mysqlVersion) return app }
// return true if the error is not in the expected list func errorInExpectedList(actualError string, expectedErrors []string) bool { logger.Println("checking if", actualError, "is in", expectedErrors) e := actualError[0:11] expectedError := false for i := range expectedErrors { if e == expectedErrors[i] { logger.Println("found an expected error", expectedErrors[i]) expectedError = true break } } logger.Println("returning", expectedError) return expectedError }
// catch a SELECT error - specifically this one. // Error 1146: Table 'performance_schema.memory_summary_global_by_event_name' doesn't exist func sqlErrorHandler(err error) bool { var ignore bool logger.Println("- SELECT gave an error:", err.Error()) if err.Error()[0:11] != "Error 1146:" { fmt.Println(fmt.Sprintf("XXX'%s'XXX", err.Error()[0:11])) log.Fatal("Unexpected error", fmt.Sprintf("XXX'%s'XXX", err.Error()[0:11])) // log.Fatal("Unexpected error:", err.Error()) } else { logger.Println("- expected error, so ignoring") ignore = true } return ignore }
// new connector returns a connected Connector given the different parameters func NewConnector(flags Flags) *Connector { var defaultsFile string connector := new(Connector) if *flags.UseEnvironment { connector.ConnectByEnvironment() } else { if *flags.Host != "" || *flags.Socket != "" { logger.Println("--host= or --socket= defined") var components = make(map[string]string) if *flags.Host != "" && *flags.Socket != "" { fmt.Println(lib.MyName() + ": Do not specify --host and --socket together") os.Exit(1) } if *flags.Host != "" { components["host"] = *flags.Host } if *flags.Port != 0 { if *flags.Socket == "" { components["port"] = fmt.Sprintf("%d", *flags.Port) } else { fmt.Println(lib.MyName() + ": Do not specify --socket and --port together") os.Exit(1) } } if *flags.Socket != "" { components["socket"] = *flags.Socket } if *flags.User != "" { components["user"] = *flags.User } if *flags.Password != "" { components["password"] = *flags.Password } connector.ConnectByComponents(components) } else { if flags.DefaultsFile != nil && *flags.DefaultsFile != "" { logger.Println("--defaults-file defined") defaultsFile = *flags.DefaultsFile } else { logger.Println("connecting by implicit defaults file") } connector.ConnectByDefaultsFile(defaultsFile) } } return connector }
// Convert the imported "table" to a merged one with merged data. // Combine all entries with the same "fileName" by adding their values. func mergeByTableName(orig Rows, globalVariables map[string]string) Rows { start := time.Now() t := make(Rows, 0, len(orig)) m := make(map[string]Row) // iterate over source table for i := range orig { var filename string var newRow Row origRow := orig[i] if origRow.countStar > 0 { filename = origRow.simplifyName(globalVariables) // check if we have an entry in the map if _, found := m[filename]; found { newRow = m[filename] } else { newRow.fileName = filename } newRow.add(origRow) m[filename] = newRow // update the map with the new value } } // add the map contents back into the table for _, row := range m { t = append(t, row) } logger.Println("mergeByTableName() took:", time.Duration(time.Since(start)).String()) return t }
// Select the raw data from the database into Rows // - filter out empty values // - merge rows with the same name into a single row // - change fileName into a more descriptive value. func selectRows(dbh *sql.DB) Rows { var t Rows start := time.Now() sql := "SELECT FILE_NAME, COUNT_STAR, SUM_TIMER_WAIT, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM file_summary_by_instance" rows, err := dbh.Query(sql) if err != nil { log.Fatal(err) } defer rows.Close() for rows.Next() { var r Row if err := rows.Scan(&r.fileName, &r.countStar, &r.sumTimerWait, &r.countRead, &r.sumTimerRead, &r.sumNumberOfBytesRead, &r.countWrite, &r.sumTimerWrite, &r.sumNumberOfBytesWrite, &r.countMisc, &r.sumTimerMisc); err != nil { log.Fatal(err) } t = append(t, r) } if err := rows.Err(); err != nil { log.Fatal(err) } logger.Println("selectRows() took:", time.Duration(time.Since(start)).String()) return t }
func NewMemoryUsage(ctx *context.Context) *Object { logger.Println("NewMemoryUsage()") o := new(Object) o.SetContext(ctx) return o }
func (t *Object) makeResults() { logger.Println("table_io_latency.makeResults()") logger.Println("- HaveRelativeStats()", t.HaveRelativeStats()) logger.Println("- WantRelativeStats()", t.WantRelativeStats()) t.results = make(Rows, len(t.current)) copy(t.results, t.current) if t.WantRelativeStats() { logger.Println("- subtracting t.initial from t.results as WantRelativeStats()") t.results.subtract(t.initial) } // logger.Println( "- sorting t.results" ) t.results.sort(t.wantLatency) // logger.Println( "- collecting t.totals from t.results" ) t.totals = t.results.totals() }
// Run runs the application in a loop until we're ready to finish func (app *App) Run() { logger.Println("app.Run()") app.sigChan = make(chan os.Signal, 10) // 10 entries signal.Notify(app.sigChan, syscall.SIGINT, syscall.SIGTERM) eventChan := app.display.EventChan() for !app.Finished() { select { case sig := <-app.sigChan: fmt.Println("Caught signal: ", sig) app.finished = true case <-app.wi.WaitNextPeriod(): app.Collect() app.Display() if app.stdout { app.setInitialFromCurrent() } case inputEvent := <-eventChan: switch inputEvent.Type { case event.EventAnonymise: anonymiser.Enable(!anonymiser.Enabled()) // toggle current behaviour case event.EventFinished: app.finished = true case event.EventViewNext: app.displayNext() case event.EventViewPrev: app.displayPrevious() case event.EventDecreasePollTime: if app.wi.WaitInterval() > time.Second { app.wi.SetWaitInterval(app.wi.WaitInterval() - time.Second) } case event.EventIncreasePollTime: app.wi.SetWaitInterval(app.wi.WaitInterval() + time.Second) case event.EventHelp: app.SetHelp(!app.Help()) case event.EventToggleWantRelative: app.ctx.SetWantRelativeStats(!app.ctx.WantRelativeStats()) app.Display() case event.EventResetStatistics: app.resetDBStatistics() app.Display() case event.EventResizeScreen: width, height := inputEvent.Width, inputEvent.Height app.display.Resize(width, height) app.Display() case event.EventError: log.Fatalf("Quitting because of EventError error") } } // provide a hook to stop the application if the counter goes down to zero if app.stdout && app.count > 0 { app.count-- if app.count == 0 { app.finished = true } } } }
// NewFileSummaryByInstance creates a new structure and include various variable values: // - datadir, relay_log // There's no checking that these are actually provided! func NewFileSummaryByInstance(ctx *context.Context) *Object { logger.Println("NewFileSummaryByInstance()") n := new(Object) n.SetContext(ctx) return n }
// Collect data from the db, then merge it in. func (t *Object) Collect(dbh *sql.DB) { start := time.Now() // UPDATE current from db handle t.current = mergeByTableName(selectRows(dbh), t.globalVariables) t.SetNow() // copy in initial data if it was not there if len(t.initial) == 0 && len(t.current) > 0 { t.initial = make(Rows, len(t.current)) copy(t.initial, t.current) } // check for reload initial characteristics if t.initial.needsRefresh(t.current) { t.initial = make(Rows, len(t.current)) copy(t.initial, t.current) } // update results to current value t.results = make(Rows, len(t.current)) copy(t.results, t.current) // make relative if need be if t.WantRelativeStats() { t.results.subtract(t.initial) } // sort the results t.results.sort() // setup the totals t.totals = t.results.totals() logger.Println("Object.Collect() took:", time.Duration(time.Since(start)).String()) }
func NewUserLatency(ctx *context.Context) *Object { logger.Println("NewUserLatency()") o := new(Object) o.SetContext(ctx) return o }
// get the output of I_S.PROCESSLIST func selectRows(dbh *sql.DB) Rows { var t Rows var id sql.NullInt64 var user sql.NullString var host sql.NullString var db sql.NullString var command sql.NullString var time sql.NullInt64 var state sql.NullString var info sql.NullString // we collect all information even if it's mainly empty as we may reference it later sql := "SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO FROM INFORMATION_SCHEMA.PROCESSLIST" rows, err := dbh.Query(sql) if err != nil { log.Fatal(err) } defer rows.Close() for rows.Next() { var r Row if err := rows.Scan( &id, &user, &host, &db, &command, &time, &state, &info); err != nil { log.Fatal(err) } r.ID = uint64(id.Int64) // be verbose for debugging. u := user.String a := anonymiser.Anonymise("user", user.String) logger.Println("user:"******", anonymised:", a) r.user = a r.host = host.String if db.Valid { r.db = db.String } r.command = command.String r.time = uint64(time.Int64) if state.Valid { r.state = state.String } r.info = info.String t = append(t, r) } if err := rows.Err(); err != nil { log.Fatal(err) } return t }
// TimeToWait returns the amount of time to wait before doing the next collection func (wi WaitInfo) TimeToWait() time.Duration { now := time.Now() logger.Println("WaitInfo.TimeToWait() now: ", now) nextTime := wi.lastCollected.Add(wi.collectInterval) logger.Println("WaitInfo.TimeToWait() nextTime: ", nextTime) if nextTime.Before(now) { logger.Println("WaitInfo.TimeToWait() nextTime scheduled time in the past, so schedule", extraDelay, "after", now) nextTime = now nextTime.Add(extraDelay) // add a deliberate tiny delay logger.Println("WaitInfo.TimeToWait() nextTime: ", nextTime, "(corrected)") } waitTime := nextTime.Sub(now) logger.Println("WaitInfo.TimeToWait() returning waitTime:", waitTime) return waitTime }
// Cleanup prepares the application prior to shutting down func (app *App) Cleanup() { app.display.Close() if app.dbh != nil { app.setupInstruments.RestoreConfiguration() _ = app.dbh.Close() } logger.Println("App.Cleanup completed") }
// RestoreConfiguration restores setup_instruments rows to their previous settings (if changed previously). func (si *SetupInstruments) RestoreConfiguration() { logger.Println("RestoreConfiguration()") // If the previous update didn't work then don't try to restore if !si.updateSucceeded { logger.Println("Not restoring p_s.setup_instruments to original settings as initial configuration attempt failed") return } logger.Println("Restoring p_s.setup_instruments to its original settings") // update the rows which need to be set - do multiple updates but I don't care updateSQL := "UPDATE setup_instruments SET enabled = ?, TIMED = ? WHERE NAME = ?" logger.Println("dbh.Prepare(", updateSQL, ")") stmt, err := si.dbh.Prepare(updateSQL) if err != nil { log.Fatal(err) } count := 0 for i := range si.rows { logger.Println("stmt.Exec(", si.rows[i].enabled, si.rows[i].timed, si.rows[i].name, ")") if _, err := stmt.Exec(si.rows[i].enabled, si.rows[i].timed, si.rows[i].name); err != nil { log.Fatal(err) } count++ } logger.Println("stmt.Close()") stmt.Close() logger.Println(count, "rows changed in p_s.setup_instruments") }
// used for testing // usage: match(r.name, "demodb.table") func match(text string, searchFor string) bool { re := regexp.MustCompile(searchFor) result := re.MatchString(text) logger.Println("match(", text, ",", searchFor, ")", result) return result }
func NewMutexLatency(ctx *context.Context) *Object { logger.Println("NewMutexLatency()") if ctx == nil { log.Println("NewMutexLatency() ctx == nil!") } o := new(Object) o.SetContext(ctx) return o }
// Valid checks if the row is valid and if asked to do so logs the problem func (row Row) Valid(logProblem bool) bool { var problem bool if (row.countStar < row.countRead) || (row.countStar < row.countWrite) || (row.countStar < row.countMisc) { problem = true if logProblem { logger.Println("Row.Valid() FAILED (count)", row) } } if (row.sumTimerWait < row.sumTimerRead) || (row.sumTimerWait < row.sumTimerWrite) || (row.sumTimerWait < row.sumTimerMisc) { problem = true if logProblem { logger.Println("Row.Valid() FAILED (sumTimer)", row) } } return problem }