func NewBlpStats() *blpStats { bs := &blpStats{} bs.parseStats = stats.NewCounters("ParseEvent") bs.txnCount = stats.NewCounters("TxnCount") bs.dmlCount = stats.NewCounters("DmlCount") bs.queriesPerSec = stats.NewRates("QueriesPerSec", bs.dmlCount, 15, 60e9) bs.txnsPerSec = stats.NewRates("TxnPerSec", bs.txnCount, 15, 60e9) return bs }
func newBlsStats() *blsStats { bs := &blsStats{} bs.parseStats = estats.NewCounters("") bs.txnCount = estats.NewCounters("") bs.dmlCount = estats.NewCounters("") bs.queriesPerSec = estats.NewRates("", bs.dmlCount, 15, 60e9) bs.txnsPerSec = estats.NewRates("", bs.txnCount, 15, 60e9) return bs }
func newBlsStats() *blsStats { bs := &blsStats{} bs.parseStats = stats.NewCounters("BinlogServerParseEvent") bs.txnCount = stats.NewCounters("BinlogServerTxnCount") bs.dmlCount = stats.NewCounters("BinlogServerDmlCount") bs.queriesPerSec = stats.NewRates("BinlogServerQPS", bs.dmlCount, 15, 60e9) bs.txnsPerSec = stats.NewRates("BinlogServerTPS", bs.txnCount, 15, 60e9) return bs }
func NewBlplStats() *blplStats { bs := &blplStats{} bs.txnCount = estats.NewCounters("") bs.queryCount = estats.NewCounters("") bs.queriesPerSec = estats.NewRates("", bs.queryCount, 15, 60e9) bs.txnsPerSec = estats.NewRates("", bs.txnCount, 15, 60e9) bs.txnTime = estats.NewTimings("") bs.queryTime = estats.NewTimings("") return bs }
func NewBlpStats() *blpStats { bs := &blpStats{} bs.txnCount = stats.NewCounters("TxnCount") bs.queryCount = stats.NewCounters("QueryCount") bs.queriesPerSec = stats.NewRates("QueriesPerSec", bs.queryCount, 15, 60e9) bs.txnsPerSec = stats.NewRates("TxnPerSec", bs.txnCount, 15, 60e9) bs.txnTime = stats.NewTimings("TxnTime") bs.queryTime = stats.NewTimings("QueryTime") bs.lookupTxn = stats.NewTimings("LookupTxn") return bs }
func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool("CachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamTokens = sync2.NewSemaphore(config.StreamExecThrottle, time.Duration(config.StreamWaitTimeout*1e9)) qe.reservedPool = NewReservedPool("ReservedPool") qe.txPool = NewConnectionPool("TxPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTxPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
// NewQueryServiceStats returns a new QueryServiceStats instance. func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats { mysqlStatsName := "" queryStatsName := "" qpsRateName := "" waitStatsName := "" killStatsName := "" infoErrorsName := "" errorStatsName := "" internalErrorsName := "" resultStatsName := "" spotCheckCountName := "" userTableQueryCountName := "" userTableQueryTimesNsName := "" userTransactionCountName := "" userTransactionTimesNsName := "" if enablePublishStats { mysqlStatsName = statsPrefix + "Mysql" queryStatsName = statsPrefix + "Queries" qpsRateName = statsPrefix + "QPS" waitStatsName = statsPrefix + "Waits" killStatsName = statsPrefix + "Kills" infoErrorsName = statsPrefix + "InfoErrors" errorStatsName = statsPrefix + "Errors" internalErrorsName = statsPrefix + "InternalErrors" resultStatsName = statsPrefix + "Results" spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount" userTableQueryCountName = statsPrefix + "UserTableQueryCount" userTableQueryTimesNsName = statsPrefix + "UserTableQueryTimesNs" userTransactionCountName = statsPrefix + "UserTransactionCount" userTransactionTimesNsName = statsPrefix + "UserTransactionTimesNs" } resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000} queryStats := stats.NewTimings(queryStatsName) return &QueryServiceStats{ MySQLStats: stats.NewTimings(mysqlStatsName), QueryStats: queryStats, WaitStats: stats.NewTimings(waitStatsName), KillStats: stats.NewCounters(killStatsName, "Transactions", "Queries"), InfoErrors: stats.NewCounters(infoErrorsName, "Retry", "Fatal", "DupKey"), ErrorStats: stats.NewCounters(errorStatsName, "Fail", "TxPoolFull", "NotInTx", "Deadlock"), InternalErrors: stats.NewCounters(internalErrorsName, "Task", "MemcacheStats", "Mismatch", "StrayTransactions", "Invalidation", "Panic", "HungQuery", "Schema"), UserTableQueryCount: stats.NewMultiCounters( userTableQueryCountName, []string{"TableName", "CallerID", "Type"}), UserTableQueryTimesNs: stats.NewMultiCounters( userTableQueryTimesNsName, []string{"TableName", "CallerID", "Type"}), UserTransactionCount: stats.NewMultiCounters( userTransactionCountName, []string{"CallerID", "Conclusion"}), UserTransactionTimesNs: stats.NewMultiCounters( userTransactionTimesNsName, []string{"CallerID", "Conclusion"}), // Sample every 5 seconds and keep samples for up to 15 minutes. QPSRates: stats.NewRates(qpsRateName, queryStats, 15*60/5, 5*time.Second), ResultStats: stats.NewHistogram(resultStatsName, resultBuckets), SpotCheckCount: stats.NewInt(spotCheckCountName), } }
func Init(serv SrvTopoServer, cell string, retryDelay time.Duration, retryCount int, timeout time.Duration) { if RpcVTGate != nil { log.Fatalf("VTGate already initialized") } RpcVTGate = &VTGate{ resolver: NewResolver(serv, "VttabletCall", cell, retryDelay, retryCount, timeout), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), errors: stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}), infoErrors: stats.NewCounters("VtgateInfoErrorCounts"), logExecuteShard: logutil.NewThrottledLogger("ExecuteShard", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShard: logutil.NewThrottledLogger("ExecuteBatchShard", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShard: logutil.NewThrottledLogger("StreamExecuteShard", 5*time.Second), } QPSByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(RpcVTGate.timings, "Operation"), 15, 1*time.Minute) QPSByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(RpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) QPSByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(RpcVTGate.timings, "DbType"), 15, 1*time.Minute) ErrorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(RpcVTGate.errors, "Operation"), 15, 1*time.Minute) ErrorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(RpcVTGate.errors, "Keyspace"), 15, 1*time.Minute) ErrorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(RpcVTGate.errors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(RpcVTGate) } }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) mysqlStats = stats.NewTimings("Mysql") // Pools qe.cachePool = NewCachePool("Rowcache", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = dbconnpool.NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = dbconnpool.NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = dbconnpool.NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap // Services qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), qe.connKiller) qe.consolidator = NewConsolidator() qe.invalidator = NewRowcacheInvalidator(qe) qe.streamQList = NewQueryList(qe.connKiller) // Vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // loggers qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) // Stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("RowcacheSpotCheckCount") return qe }
// Init initializes VTGate server. func Init(ctx context.Context, hc discovery.HealthCheck, topoServer topo.Server, serv topo.SrvTopoServer, cell string, retryCount int, tabletTypesToWait []topodatapb.TabletType) *VTGate { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(hc, topoServer, serv, "VttabletCall", cell, retryCount, tabletTypesToWait), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShards: logutil.NewThrottledLogger("ExecuteShards", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShards: logutil.NewThrottledLogger("ExecuteBatchShards", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShards: logutil.NewThrottledLogger("StreamExecuteShards", 5*time.Second), logUpdateStream: logutil.NewThrottledLogger("UpdateStream", 5*time.Second), } // vschemaCounters needs to be initialized before planner to // catch the initial load stats. vschemaCounters = stats.NewCounters("VtgateVSchemaCounts") // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(ctx, serv, cell, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) servenv.OnRun(func() { for _, f := range RegisterVTGates { f(rpcVTGate) } }) vtgateOnce.Do(rpcVTGate.registerDebugHealthHandler) return rpcVTGate }
func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool(config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool(config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool(config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.reservedPool = NewReservedPool() qe.txPool = NewConnectionPool(config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool(time.Duration(config.TransactionTimeout * 1e9)) qe.activePool = NewActivePool(time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.maxResultSize = sync2.AtomicInt32(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt32(config.StreamBufferSize) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) return qe }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} // services qe.cachePool = NewCachePool("RowcachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.SensitiveMode) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() // vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
// NewQueryServiceStats returns a new QueryServiceStats instance. func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats { mysqlStatsName := "" queryStatsName := "" qpsRateName := "" waitStatsName := "" killStatsName := "" infoErrorsName := "" errorStatsName := "" internalErrorsName := "" resultStatsName := "" spotCheckCountName := "" if enablePublishStats { mysqlStatsName = statsPrefix + "Mysql" queryStatsName = statsPrefix + "Queries" qpsRateName = statsPrefix + "QPS" waitStatsName = statsPrefix + "Waits" killStatsName = statsPrefix + "Kills" infoErrorsName = statsPrefix + "InfoErrors" errorStatsName = statsPrefix + "Errors" internalErrorsName = statsPrefix + "InternalErrors" resultStatsName = statsPrefix + "Results" spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount" } resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000} queryStats := stats.NewTimings(queryStatsName) return &QueryServiceStats{ MySQLStats: stats.NewTimings(mysqlStatsName), QueryStats: queryStats, WaitStats: stats.NewTimings(waitStatsName), KillStats: stats.NewCounters(killStatsName), InfoErrors: stats.NewCounters(infoErrorsName), ErrorStats: stats.NewCounters(errorStatsName), InternalErrors: stats.NewCounters(internalErrorsName), QPSRates: stats.NewRates(qpsRateName, queryStats, 15, 60*time.Second), ResultStats: stats.NewHistogram(resultStatsName, resultBuckets), SpotCheckCount: stats.NewInt(spotCheckCountName), } }
// Init initializes VTGate server. func Init(ctx context.Context, hc discovery.HealthCheck, topoServer topo.Server, serv topo.SrvTopoServer, cell string, retryDelay time.Duration, retryCount int, connTimeoutTotal, connTimeoutPerConn, connLife time.Duration, tabletTypesToWait []topodatapb.TabletType, maxInFlight int, testGateway string) *VTGate { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(hc, topoServer, serv, "VttabletCall", cell, retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife, tabletTypesToWait, testGateway), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), maxInFlight: int64(maxInFlight), inFlight: sync2.NewAtomicInt64(0), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShards: logutil.NewThrottledLogger("ExecuteShards", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShards: logutil.NewThrottledLogger("ExecuteBatchShards", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShards: logutil.NewThrottledLogger("StreamExecuteShards", 5*time.Second), } // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(ctx, serv, cell, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(rpcVTGate) } return rpcVTGate }
// Init initializes VTGate server. func Init(serv SrvTopoServer, schema *planbuilder.Schema, cell string, retryDelay time.Duration, retryCount int, connTimeoutTotal, connTimeoutPerConn, connLife time.Duration, maxInFlight int) { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(serv, "VttabletCall", cell, retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), maxInFlight: int64(maxInFlight), inFlight: 0, logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShard: logutil.NewThrottledLogger("ExecuteShard", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShard: logutil.NewThrottledLogger("ExecuteBatchShard", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShard: logutil.NewThrottledLogger("StreamExecuteShard", 5*time.Second), } // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(serv, cell, schema, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(rpcVTGate) } }
// NewBinlogPlayerStats creates a new BinlogPlayerStats structure func NewBinlogPlayerStats() *BinlogPlayerStats { bps := &BinlogPlayerStats{} bps.Timings = stats.NewTimings("") bps.Rates = stats.NewRates("", bps.Timings, 15, 60e9) return bps }
// NewStats creates a new Stats structure func NewStats() *Stats { bps := &Stats{} bps.Timings = stats.NewTimings("") bps.Rates = stats.NewRates("", bps.Timings, 15, 60e9) return bps }