func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool("CachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamTokens = sync2.NewSemaphore(config.StreamExecThrottle, time.Duration(config.StreamWaitTimeout*1e9)) qe.reservedPool = NewReservedPool("ReservedPool") qe.txPool = NewConnectionPool("TxPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTxPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
// NewQueryServiceStats returns a new QueryServiceStats instance. func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats { mysqlStatsName := "" queryStatsName := "" qpsRateName := "" waitStatsName := "" killStatsName := "" infoErrorsName := "" errorStatsName := "" internalErrorsName := "" resultStatsName := "" spotCheckCountName := "" userTableQueryCountName := "" userTableQueryTimesNsName := "" userTransactionCountName := "" userTransactionTimesNsName := "" if enablePublishStats { mysqlStatsName = statsPrefix + "Mysql" queryStatsName = statsPrefix + "Queries" qpsRateName = statsPrefix + "QPS" waitStatsName = statsPrefix + "Waits" killStatsName = statsPrefix + "Kills" infoErrorsName = statsPrefix + "InfoErrors" errorStatsName = statsPrefix + "Errors" internalErrorsName = statsPrefix + "InternalErrors" resultStatsName = statsPrefix + "Results" spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount" userTableQueryCountName = statsPrefix + "UserTableQueryCount" userTableQueryTimesNsName = statsPrefix + "UserTableQueryTimesNs" userTransactionCountName = statsPrefix + "UserTransactionCount" userTransactionTimesNsName = statsPrefix + "UserTransactionTimesNs" } resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000} queryStats := stats.NewTimings(queryStatsName) return &QueryServiceStats{ MySQLStats: stats.NewTimings(mysqlStatsName), QueryStats: queryStats, WaitStats: stats.NewTimings(waitStatsName), KillStats: stats.NewCounters(killStatsName, "Transactions", "Queries"), InfoErrors: stats.NewCounters(infoErrorsName, "Retry", "Fatal", "DupKey"), ErrorStats: stats.NewCounters(errorStatsName, "Fail", "TxPoolFull", "NotInTx", "Deadlock"), InternalErrors: stats.NewCounters(internalErrorsName, "Task", "MemcacheStats", "Mismatch", "StrayTransactions", "Invalidation", "Panic", "HungQuery", "Schema"), UserTableQueryCount: stats.NewMultiCounters( userTableQueryCountName, []string{"TableName", "CallerID", "Type"}), UserTableQueryTimesNs: stats.NewMultiCounters( userTableQueryTimesNsName, []string{"TableName", "CallerID", "Type"}), UserTransactionCount: stats.NewMultiCounters( userTransactionCountName, []string{"CallerID", "Conclusion"}), UserTransactionTimesNs: stats.NewMultiCounters( userTransactionTimesNsName, []string{"CallerID", "Conclusion"}), // Sample every 5 seconds and keep samples for up to 15 minutes. QPSRates: stats.NewRates(qpsRateName, queryStats, 15*60/5, 5*time.Second), ResultStats: stats.NewHistogram(resultStatsName, resultBuckets), SpotCheckCount: stats.NewInt(spotCheckCountName), } }
func TestHistogram(t *testing.T) { v := stats.NewHistogram("", []int64{1, 3, 5, 7}) load := func() { for i := int64(0); i < 10; i++ { v.Add(i) } } testMetric(t, v, load, `Desc{fqName: "test_name", help: "test_help", constLabels: {}, variableLabels: []}`, `histogram:<sample_count:10 sample_sum:45 bucket:<cumulative_count:2 upper_bound:1 > bucket:<cumulative_count:4 upper_bound:3 > bucket:<cumulative_count:6 upper_bound:5 > bucket:<cumulative_count:8 upper_bound:7 > > `, ) }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) mysqlStats = stats.NewTimings("Mysql") // Pools qe.cachePool = NewCachePool("Rowcache", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = dbconnpool.NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = dbconnpool.NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = dbconnpool.NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap // Services qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), qe.connKiller) qe.consolidator = NewConsolidator() qe.invalidator = NewRowcacheInvalidator(qe) qe.streamQList = NewQueryList(qe.connKiller) // Vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // loggers qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) // Stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("RowcacheSpotCheckCount") return qe }
func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool(config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool(config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool(config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.reservedPool = NewReservedPool() qe.txPool = NewConnectionPool(config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool(time.Duration(config.TransactionTimeout * 1e9)) qe.activePool = NewActivePool(time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.maxResultSize = sync2.AtomicInt32(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt32(config.StreamBufferSize) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) return qe }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} // services qe.cachePool = NewCachePool("RowcachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.SensitiveMode) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() // vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
// NewQueryServiceStats returns a new QueryServiceStats instance. func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats { mysqlStatsName := "" queryStatsName := "" qpsRateName := "" waitStatsName := "" killStatsName := "" infoErrorsName := "" errorStatsName := "" internalErrorsName := "" resultStatsName := "" spotCheckCountName := "" if enablePublishStats { mysqlStatsName = statsPrefix + "Mysql" queryStatsName = statsPrefix + "Queries" qpsRateName = statsPrefix + "QPS" waitStatsName = statsPrefix + "Waits" killStatsName = statsPrefix + "Kills" infoErrorsName = statsPrefix + "InfoErrors" errorStatsName = statsPrefix + "Errors" internalErrorsName = statsPrefix + "InternalErrors" resultStatsName = statsPrefix + "Results" spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount" } resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000} queryStats := stats.NewTimings(queryStatsName) return &QueryServiceStats{ MySQLStats: stats.NewTimings(mysqlStatsName), QueryStats: queryStats, WaitStats: stats.NewTimings(waitStatsName), KillStats: stats.NewCounters(killStatsName), InfoErrors: stats.NewCounters(infoErrorsName), ErrorStats: stats.NewCounters(errorStatsName), InternalErrors: stats.NewCounters(internalErrorsName), QPSRates: stats.NewRates(qpsRateName, queryStats, 15, 60*time.Second), ResultStats: stats.NewHistogram(resultStatsName, resultBuckets), SpotCheckCount: stats.NewInt(spotCheckCountName), } }
"time" ) func init() { //ML = &MemoryLoaderImpl{} ME = &MetricsExposerImpl{} BUFFER_MAX_NUM_TRACES = 1000 working = true fmt.Println("Metrics package initiated.") } // --> http://{mdh server}:{mdh port}/debug/vars var metricsChannel chan *traceMetric var mtm = stats.NewHistogram("TimeResponses", []int64{10000000, 50000000, 100000000, 500000000, 1000000000, 1500000000, 2000000000, 2500000000, 3000000000, 3500000000, 4000000000, 4500000000, 5000000000, 5500000000, 6000000000, 10000000000}) var working bool var ME MetricsExposer var BUFFER_MAX_NUM_TRACES int // Two metrics, these are exposed by "magic" :) // Number of calls to our server. var numCalls = expvar.NewInt("num_calls") var numError = expvar.NewInt("num_errs") var numPanic = expvar.NewInt("num_panics") var numTimeout = expvar.NewInt("num_timeouts") // ME Status type MEStatus int