func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool("CachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamTokens = sync2.NewSemaphore(config.StreamExecThrottle, time.Duration(config.StreamWaitTimeout*1e9)) qe.reservedPool = NewReservedPool("ReservedPool") qe.txPool = NewConnectionPool("TxPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTxPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
func NewTopoReader(ts vtgate.SrvTopoServer) *TopoReader { return &TopoReader{ ts: ts, queryCount: stats.NewCounters("TopoReaderRpcQueryCount"), errorCount: stats.NewCounters("TopoReaderRpcErrorCount"), } }
func NewTopoReader(ts topo.Server) *TopoReader { return &TopoReader{ ts: ts, queryCount: stats.NewCounters("VtTopoQueryCount"), errorCount: stats.NewCounters("VtTopoErrorCount"), } }
func newZkrStats() *zkrStats { zs := &zkrStats{} zs.zkReads = stats.NewCounters("ZkReaderZkReads") zs.cacheReads = stats.NewCounters("ZkReaderCacheReads") zs.staleReads = stats.NewCounters("ZkReaderStaleReads") zs.nodeNotFoundErrors = stats.NewCounters("ZkReaderNodeNotFoundErrors") zs.otherErrors = stats.NewCounters("ZkReaderOtherErrors") return zs }
func NewBlpStats() *blpStats { bs := &blpStats{} bs.parseStats = stats.NewCounters("ParseEvent") bs.txnCount = stats.NewCounters("TxnCount") bs.dmlCount = stats.NewCounters("DmlCount") bs.queriesPerSec = stats.NewRates("QueriesPerSec", bs.dmlCount, 15, 60e9) bs.txnsPerSec = stats.NewRates("TxnPerSec", bs.txnCount, 15, 60e9) return bs }
func newBlsStats() *blsStats { bs := &blsStats{} bs.parseStats = estats.NewCounters("") bs.txnCount = estats.NewCounters("") bs.dmlCount = estats.NewCounters("") bs.queriesPerSec = estats.NewRates("", bs.dmlCount, 15, 60e9) bs.txnsPerSec = estats.NewRates("", bs.txnCount, 15, 60e9) return bs }
func newBlsStats() *blsStats { bs := &blsStats{} bs.parseStats = stats.NewCounters("BinlogServerParseEvent") bs.txnCount = stats.NewCounters("BinlogServerTxnCount") bs.dmlCount = stats.NewCounters("BinlogServerDmlCount") bs.queriesPerSec = stats.NewRates("BinlogServerQPS", bs.dmlCount, 15, 60e9) bs.txnsPerSec = stats.NewRates("BinlogServerTPS", bs.txnCount, 15, 60e9) return bs }
// NewQueryServiceStats returns a new QueryServiceStats instance. func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats { mysqlStatsName := "" queryStatsName := "" qpsRateName := "" waitStatsName := "" killStatsName := "" infoErrorsName := "" errorStatsName := "" internalErrorsName := "" resultStatsName := "" spotCheckCountName := "" userTableQueryCountName := "" userTableQueryTimesNsName := "" userTransactionCountName := "" userTransactionTimesNsName := "" if enablePublishStats { mysqlStatsName = statsPrefix + "Mysql" queryStatsName = statsPrefix + "Queries" qpsRateName = statsPrefix + "QPS" waitStatsName = statsPrefix + "Waits" killStatsName = statsPrefix + "Kills" infoErrorsName = statsPrefix + "InfoErrors" errorStatsName = statsPrefix + "Errors" internalErrorsName = statsPrefix + "InternalErrors" resultStatsName = statsPrefix + "Results" spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount" userTableQueryCountName = statsPrefix + "UserTableQueryCount" userTableQueryTimesNsName = statsPrefix + "UserTableQueryTimesNs" userTransactionCountName = statsPrefix + "UserTransactionCount" userTransactionTimesNsName = statsPrefix + "UserTransactionTimesNs" } resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000} queryStats := stats.NewTimings(queryStatsName) return &QueryServiceStats{ MySQLStats: stats.NewTimings(mysqlStatsName), QueryStats: queryStats, WaitStats: stats.NewTimings(waitStatsName), KillStats: stats.NewCounters(killStatsName, "Transactions", "Queries"), InfoErrors: stats.NewCounters(infoErrorsName, "Retry", "Fatal", "DupKey"), ErrorStats: stats.NewCounters(errorStatsName, "Fail", "TxPoolFull", "NotInTx", "Deadlock"), InternalErrors: stats.NewCounters(internalErrorsName, "Task", "MemcacheStats", "Mismatch", "StrayTransactions", "Invalidation", "Panic", "HungQuery", "Schema"), UserTableQueryCount: stats.NewMultiCounters( userTableQueryCountName, []string{"TableName", "CallerID", "Type"}), UserTableQueryTimesNs: stats.NewMultiCounters( userTableQueryTimesNsName, []string{"TableName", "CallerID", "Type"}), UserTransactionCount: stats.NewMultiCounters( userTransactionCountName, []string{"CallerID", "Conclusion"}), UserTransactionTimesNs: stats.NewMultiCounters( userTransactionTimesNsName, []string{"CallerID", "Conclusion"}), // Sample every 5 seconds and keep samples for up to 15 minutes. QPSRates: stats.NewRates(qpsRateName, queryStats, 15*60/5, 5*time.Second), ResultStats: stats.NewHistogram(resultStatsName, resultBuckets), SpotCheckCount: stats.NewInt(spotCheckCountName), } }
func NewBlplStats() *blplStats { bs := &blplStats{} bs.txnCount = estats.NewCounters("") bs.queryCount = estats.NewCounters("") bs.queriesPerSec = estats.NewRates("", bs.queryCount, 15, 60e9) bs.txnsPerSec = estats.NewRates("", bs.txnCount, 15, 60e9) bs.txnTime = estats.NewTimings("") bs.queryTime = estats.NewTimings("") return bs }
func NewBlpStats() *blpStats { bs := &blpStats{} bs.txnCount = stats.NewCounters("TxnCount") bs.queryCount = stats.NewCounters("QueryCount") bs.queriesPerSec = stats.NewRates("QueriesPerSec", bs.queryCount, 15, 60e9) bs.txnsPerSec = stats.NewRates("TxnPerSec", bs.txnCount, 15, 60e9) bs.txnTime = stats.NewTimings("TxnTime") bs.queryTime = stats.NewTimings("QueryTime") bs.lookupTxn = stats.NewTimings("LookupTxn") return bs }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) mysqlStats = stats.NewTimings("Mysql") // Pools qe.cachePool = NewCachePool("Rowcache", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = dbconnpool.NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = dbconnpool.NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = dbconnpool.NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap // Services qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), qe.connKiller) qe.consolidator = NewConsolidator() qe.invalidator = NewRowcacheInvalidator(qe) qe.streamQList = NewQueryList(qe.connKiller) // Vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // loggers qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) // Stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("RowcacheSpotCheckCount") return qe }
// Init initializes VTGate server. func Init(ctx context.Context, hc discovery.HealthCheck, topoServer topo.Server, serv topo.SrvTopoServer, cell string, retryCount int, tabletTypesToWait []topodatapb.TabletType) *VTGate { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(hc, topoServer, serv, "VttabletCall", cell, retryCount, tabletTypesToWait), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShards: logutil.NewThrottledLogger("ExecuteShards", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShards: logutil.NewThrottledLogger("ExecuteBatchShards", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShards: logutil.NewThrottledLogger("StreamExecuteShards", 5*time.Second), logUpdateStream: logutil.NewThrottledLogger("UpdateStream", 5*time.Second), } // vschemaCounters needs to be initialized before planner to // catch the initial load stats. vschemaCounters = stats.NewCounters("VtgateVSchemaCounts") // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(ctx, serv, cell, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) servenv.OnRun(func() { for _, f := range RegisterVTGates { f(rpcVTGate) } }) vtgateOnce.Do(rpcVTGate.registerDebugHealthHandler) return rpcVTGate }
func Init(serv SrvTopoServer, cell string, retryDelay time.Duration, retryCount int, timeout time.Duration) { if RpcVTGate != nil { log.Fatalf("VTGate already initialized") } RpcVTGate = &VTGate{ resolver: NewResolver(serv, "VttabletCall", cell, retryDelay, retryCount, timeout), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), errors: stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}), infoErrors: stats.NewCounters("VtgateInfoErrorCounts"), logExecuteShard: logutil.NewThrottledLogger("ExecuteShard", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShard: logutil.NewThrottledLogger("ExecuteBatchShard", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShard: logutil.NewThrottledLogger("StreamExecuteShard", 5*time.Second), } QPSByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(RpcVTGate.timings, "Operation"), 15, 1*time.Minute) QPSByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(RpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) QPSByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(RpcVTGate.timings, "DbType"), 15, 1*time.Minute) ErrorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(RpcVTGate.errors, "Operation"), 15, 1*time.Minute) ErrorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(RpcVTGate.errors, "Keyspace"), 15, 1*time.Minute) ErrorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(RpcVTGate.errors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(RpcVTGate) } }
// NewResilientSrvTopoServer creates a new ResilientSrvTopoServer // based on the provided topo.Server. func NewResilientSrvTopoServer(base topo.Server, counterPrefix string) *ResilientSrvTopoServer { return &ResilientSrvTopoServer{ topoServer: base, cacheTTL: *srvTopoCacheTTL, counts: stats.NewCounters(counterPrefix + "Counts"), srvKeyspaceNamesCache: make(map[string]*srvKeyspaceNamesEntry), srvKeyspaceCache: make(map[string]*srvKeyspaceEntry), } }
// NewResilientSrvTopoServer creates a new ResilientSrvTopoServer // based on the provided SrvTopoServer. func NewResilientSrvTopoServer(base SrvTopoServer) *ResilientSrvTopoServer { return &ResilientSrvTopoServer{ topoServer: base, counts: stats.NewCounters("ResilientSrvTopoServerCounts"), srvKeyspaceNamesCache: make(map[string]*srvKeyspaceNamesEntry), srvKeyspaceCache: make(map[string]*srvKeyspaceEntry), endPointsCache: make(map[string]*endPointsEntry), } }
// Init initializes VTGate server. func Init(ctx context.Context, hc discovery.HealthCheck, topoServer topo.Server, serv topo.SrvTopoServer, cell string, retryDelay time.Duration, retryCount int, connTimeoutTotal, connTimeoutPerConn, connLife time.Duration, tabletTypesToWait []topodatapb.TabletType, maxInFlight int, testGateway string) *VTGate { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(hc, topoServer, serv, "VttabletCall", cell, retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife, tabletTypesToWait, testGateway), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), maxInFlight: int64(maxInFlight), inFlight: sync2.NewAtomicInt64(0), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShards: logutil.NewThrottledLogger("ExecuteShards", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShards: logutil.NewThrottledLogger("ExecuteBatchShards", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShards: logutil.NewThrottledLogger("StreamExecuteShards", 5*time.Second), } // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(ctx, serv, cell, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(rpcVTGate) } return rpcVTGate }
func TestCounters(t *testing.T) { v := stats.NewCounters("") load := func() { v.Add("a", 1) } testMetric(t, v, load, `Desc{fqName: "test_name", help: "test_help", constLabels: {}, variableLabels: [tag]}`, `label:<name:"tag" value:"a" > gauge:<value:1 > `, ) }
// Init initializes VTGate server. func Init(serv SrvTopoServer, schema *planbuilder.Schema, cell string, retryDelay time.Duration, retryCount int, connTimeoutTotal, connTimeoutPerConn, connLife time.Duration, maxInFlight int) { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(serv, "VttabletCall", cell, retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), maxInFlight: int64(maxInFlight), inFlight: 0, logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShard: logutil.NewThrottledLogger("ExecuteShard", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShard: logutil.NewThrottledLogger("ExecuteBatchShard", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShard: logutil.NewThrottledLogger("StreamExecuteShard", 5*time.Second), } // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(serv, cell, schema, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(rpcVTGate) } }
func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool(config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool(config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool(config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.reservedPool = NewReservedPool() qe.txPool = NewConnectionPool(config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool(time.Duration(config.TransactionTimeout * 1e9)) qe.activePool = NewActivePool(time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.maxResultSize = sync2.AtomicInt32(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt32(config.StreamBufferSize) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) return qe }
// NewResilientSrvTopoServer creates a new ResilientSrvTopoServer // based on the provided SrvTopoServer. func NewResilientSrvTopoServer(base topo.Server, counterName string) *ResilientSrvTopoServer { return &ResilientSrvTopoServer{ topoServer: base, cacheTTL: *srvTopoCacheTTL, enableRemoteMaster: *enableRemoteMaster, counts: stats.NewCounters(counterName), srvKeyspaceNamesCache: make(map[string]*srvKeyspaceNamesEntry), srvKeyspaceCache: make(map[string]*srvKeyspaceEntry), endPointsCache: make(map[string]*endPointsEntry), } }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} // services qe.cachePool = NewCachePool("RowcachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.SensitiveMode) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() // vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
// NewQueryServiceStats returns a new QueryServiceStats instance. func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats { mysqlStatsName := "" queryStatsName := "" qpsRateName := "" waitStatsName := "" killStatsName := "" infoErrorsName := "" errorStatsName := "" internalErrorsName := "" resultStatsName := "" spotCheckCountName := "" if enablePublishStats { mysqlStatsName = statsPrefix + "Mysql" queryStatsName = statsPrefix + "Queries" qpsRateName = statsPrefix + "QPS" waitStatsName = statsPrefix + "Waits" killStatsName = statsPrefix + "Kills" infoErrorsName = statsPrefix + "InfoErrors" errorStatsName = statsPrefix + "Errors" internalErrorsName = statsPrefix + "InternalErrors" resultStatsName = statsPrefix + "Results" spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount" } resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000} queryStats := stats.NewTimings(queryStatsName) return &QueryServiceStats{ MySQLStats: stats.NewTimings(mysqlStatsName), QueryStats: queryStats, WaitStats: stats.NewTimings(waitStatsName), KillStats: stats.NewCounters(killStatsName), InfoErrors: stats.NewCounters(infoErrorsName), ErrorStats: stats.NewCounters(errorStatsName), InternalErrors: stats.NewCounters(internalErrorsName), QPSRates: stats.NewRates(qpsRateName, queryStats, 15, 60*time.Second), ResultStats: stats.NewHistogram(resultStatsName, resultBuckets), SpotCheckCount: stats.NewInt(spotCheckCountName), } }
// license that can be found in the LICENSE file. package streamlog import ( "io" "net/http" "net/url" "sync" log "github.com/golang/glog" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/sync2" ) var droppedMessages = stats.NewCounters("StreamlogDroppedMessages") // StreamLogger is a non-blocking broadcaster of messages. // Subscribers can use channels or HTTP. type StreamLogger struct { name string dataQueue chan Formatter mu sync.Mutex subscribed map[chan string]url.Values // size is used to check if there are any subscriptions. Keep // it atomically in sync with the size of subscribed. size sync2.AtomicUint32 } // Formatter defines the interface that messages have to satisfy // to be broadcast through StreamLogger.
package zk import ( "bytes" "fmt" "log" "math/rand" "strings" "sync" "time" "github.com/youtube/vitess/go/stats" "launchpad.net/gozk/zookeeper" ) var cachedConnStates = stats.NewCounters("CachedConn") var cachedConnStatesMutex sync.Mutex func init() { rand.Seed(time.Now().UnixNano()) } /* When you need to talk to multiple zk cells, you need a simple abstraction so you aren't caching clients all over the place. ConnCache guarantees that you have at most one zookeeper connection per cell. */ const ( DISCONNECTED = 0 CONNECTING = 1
// Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tabletserver import ( "encoding/binary" "time" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/schema" ) var cacheStats = stats.NewTimings("Cache") var cacheCounters = stats.NewCounters("CacheCounters") var pack = binary.BigEndian const ( RC_DELETED = 1 ) type RowCache struct { tableInfo *TableInfo prefix string cachePool *CachePool } type RCResult struct { Row []sqltypes.Value
) /* API and config for UpdateStream Service */ const ( DISABLED int64 = iota ENABLED ) var usStateNames = map[int64]string{ ENABLED: "Enabled", DISABLED: "Disabled", } var ( streamCount = stats.NewCounters("UpdateStreamStreamCount") updateStreamErrors = stats.NewCounters("UpdateStreamErrors") updateStreamEvents = stats.NewCounters("UpdateStreamEvents") keyrangeStatements = stats.NewInt("UpdateStreamKeyRangeStatements") keyrangeTransactions = stats.NewInt("UpdateStreamKeyRangeTransactions") tablesStatements = stats.NewInt("UpdateStreamTablesStatements") tablesTransactions = stats.NewInt("UpdateStreamTablesTransactions") ) type UpdateStream struct { mycnf *mysqlctl.Mycnf actionLock sync.Mutex state sync2.AtomicInt64 mysqld *mysqlctl.Mysqld stateWaitGroup sync.WaitGroup
var ( retryDuration = flag.Duration("retry_duration", 2*time.Hour, "Amount of time we wait before giving up on a retryable action (e.g. write to destination, waiting for healthy tablets)") executeFetchRetryTime = flag.Duration("executefetch_retry_time", 30*time.Second, "Amount of time we should wait before retrying ExecuteFetch calls") remoteActionsTimeout = flag.Duration("remote_actions_timeout", time.Minute, "Amount of time to wait for remote actions (like replication stop, ...)") useV3ReshardingMode = flag.Bool("use_v3_resharding_mode", false, "True iff the workers should use V3-style resharding, which doesn't require a preset sharding key column.") healthCheckTopologyRefresh = flag.Duration("worker_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology") healthcheckRetryDelay = flag.Duration("worker_healthcheck_retry_delay", 5*time.Second, "delay before retrying a failed healthcheck") healthCheckTimeout = flag.Duration("worker_healthcheck_timeout", time.Minute, "the health check timeout period") statsState = stats.NewString("WorkerState") // statsRetryCount is the total number of times a query to vttablet had to be retried. statsRetryCount = stats.NewInt("WorkerRetryCount") // statsRetryCount groups the number of retries by category e.g. "TimeoutError" or "Readonly". statsRetryCounters = stats.NewCounters("WorkerRetryCounters") // statsThrottledCounters is the number of times a write has been throttled, // grouped by (keyspace, shard, threadID). Mainly used for testing. // If throttling is enabled, this should always be non-zero for all threads. statsThrottledCounters = stats.NewMultiCounters("WorkerThrottledCounters", []string{"keyspace", "shardname", "thread_id"}) // statsStateDurations tracks for each state how much time was spent in it. Mainly used for testing. statsStateDurationsNs = stats.NewCounters("WorkerStateDurations") // statsOnlineInsertsCounters tracks for every table how many rows were // inserted during the online clone (reconciliation) phase. statsOnlineInsertsCounters = stats.NewCounters("WorkerOnlineInsertsCounters") // statsOnlineUpdatesCounters tracks for every table how many rows were updated. statsOnlineUpdatesCounters = stats.NewCounters("WorkerOnlineUpdatesCounters") // statsOnlineUpdatesCounters tracks for every table how many rows were deleted. statsOnlineDeletesCounters = stats.NewCounters("WorkerOnlineDeletesCounters") // statsOnlineEqualRowsCounters tracks for every table how many rows were equal.
// Package streamlog provides a non-blocking message broadcaster. package streamlog import ( "io" "net/http" "net/url" "sync" log "github.com/golang/glog" "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/stats" ) var ( sendCount = stats.NewCounters("StreamlogSend") deliveredCount = stats.NewMultiCounters("StreamlogDelivered", []string{"Log", "Subscriber"}) deliveryDropCount = stats.NewMultiCounters("StreamlogDeliveryDroppedMessages", []string{"Log", "Subscriber"}) ) // StreamLogger is a non-blocking broadcaster of messages. // Subscribers can use channels or HTTP. type StreamLogger struct { name string size int mu sync.Mutex subscribed map[chan interface{}]string } // New returns a new StreamLogger that can stream events to subscribers. // The size parameter defines the channel size for the subscribers.
"strings" log "github.com/golang/glog" "golang.org/x/net/context" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/mysqlctl/replication" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" ) var ( binlogStreamerErrors = stats.NewCounters("BinlogStreamerErrors") // ErrClientEOF is returned by Streamer if the stream ended because the // consumer of the stream indicated it doesn't want any more events. ErrClientEOF = fmt.Errorf("binlog stream consumer ended the reply stream") // ErrServerEOF is returned by Streamer if the stream ended because the // connection to the mysqld server was lost, or the stream was terminated by // mysqld. ErrServerEOF = fmt.Errorf("binlog stream connection was closed by mysqld") // statementPrefixes are normal sql statement prefixes. statementPrefixes = map[string]binlogdatapb.BinlogTransaction_Statement_Category{ "begin": binlogdatapb.BinlogTransaction_Statement_BL_BEGIN, "commit": binlogdatapb.BinlogTransaction_Statement_BL_COMMIT, "rollback": binlogdatapb.BinlogTransaction_Statement_BL_ROLLBACK, "insert": binlogdatapb.BinlogTransaction_Statement_BL_DML,
// GetDestinationMaster returns the most recently resolved destination master for a particular shard. GetDestinationMaster(shardName string) (*topo.TabletInfo, error) } var ( resolveTTL = flag.Duration("resolve_ttl", 15*time.Second, "Amount of time that a topo resolution can be cached for") executeFetchRetryTime = flag.Duration("executefetch_retry_time", 30*time.Second, "Amount of time we should wait before retrying ExecuteFetch calls") remoteActionsTimeout = flag.Duration("remote_actions_timeout", time.Minute, "Amount of time to wait for remote actions (like replication stop, ...)") statsState = stats.NewString("WorkerState") // the number of times that the worker attempst to reresolve the masters statsDestinationAttemptedResolves = stats.NewInt("WorkerDestinationAttemptedResolves") // the number of times that the worker actually hits the topo server, i.e., they don't // use a cached topology statsDestinationActualResolves = stats.NewInt("WorkerDestinationActualResolves") statsRetryCounters = stats.NewCounters("WorkerRetryCount") ) // resetVars resets the debug variables that are meant to provide information on a // per-run basis. This should be called at the beginning of each worker run. func resetVars() { statsState.Set("") statsDestinationAttemptedResolves.Set(0) statsDestinationActualResolves.Set(0) statsRetryCounters.Reset() } // checkDone returns ctx.Err() iff ctx.Done() func checkDone(ctx context.Context) error { select { case <-ctx.Done():