func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.cachePool = NewCachePool("CachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamTokens = sync2.NewSemaphore(config.StreamExecThrottle, time.Duration(config.StreamWaitTimeout*1e9)) qe.reservedPool = NewReservedPool("ReservedPool") qe.txPool = NewConnectionPool("TxPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTxPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") stats.NewRates("QPS", queryStats, 15, 60e9) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") errorStats = stats.NewCounters("Errors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9)) mysqlStats = stats.NewTimings("Mysql") // Pools qe.cachePool = NewCachePool("Rowcache", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.connPool = dbconnpool.NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = dbconnpool.NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = dbconnpool.NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap // Services qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), qe.connKiller) qe.consolidator = NewConsolidator() qe.invalidator = NewRowcacheInvalidator(qe) qe.streamQList = NewQueryList(qe.connKiller) // Vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // loggers qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) // Stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("RowcacheSpotCheckCount") return qe }
// NewTxPool creates a new TxPool. It's not operational until it's Open'd. func NewTxPool( name string, txStatsPrefix string, capacity int, timeout time.Duration, poolTimeout time.Duration, idleTimeout time.Duration, enablePublishStats bool, qStats *QueryServiceStats) *TxPool { txStatsName := "" if enablePublishStats { txStatsName = txStatsPrefix + "Transactions" } axp := &TxPool{ pool: NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats), activePool: pools.NewNumbered(), lastID: sync2.AtomicInt64(time.Now().UnixNano()), timeout: sync2.AtomicDuration(timeout), poolTimeout: sync2.AtomicDuration(poolTimeout), ticks: timer.NewTimer(timeout / 10), txStats: stats.NewTimings(txStatsName), queryServiceStats: qStats, } // Careful: pool also exports name+"xxx" vars, // but we know it doesn't export Timeout. if enablePublishStats { stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get)) stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get)) } return axp }
func NewActiveTxPool(timeout time.Duration) *ActiveTxPool { return &ActiveTxPool{ pool: pools.NewNumbered(), lastId: sync2.AtomicInt64(time.Now().UnixNano()), timeout: sync2.AtomicDuration(timeout), ticks: timer.NewTimer(timeout / 10), txStats: stats.NewTimings("Transactions"), } }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{} // services qe.cachePool = NewCachePool("RowcachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.SensitiveMode) qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9)) qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9)) qe.txPool = NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9)) qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9)) qe.consolidator = NewConsolidator() // vars qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER) if config.StrictMode { qe.strictMode.Set(1) } qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // stats stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) queryStats = stats.NewTimings("Queries") QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second) waitStats = stats.NewTimings("Waits") killStats = stats.NewCounters("Kills") infoErrors = stats.NewCounters("InfoErrors") errorStats = stats.NewCounters("Errors") internalErrors = stats.NewCounters("InternalErrors") resultStats = stats.NewHistogram("Results", resultBuckets) stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER })) spotCheckCount = stats.NewInt("SpotCheckCount") return qe }
func NewActiveTxPool(name string, timeout time.Duration) *ActiveTxPool { axp := &ActiveTxPool{ pool: pools.NewNumbered(), lastId: sync2.AtomicInt64(time.Now().UnixNano()), timeout: sync2.AtomicDuration(timeout), ticks: timer.NewTimer(timeout / 10), txStats: stats.NewTimings("Transactions"), } stats.Publish(name+"Size", stats.IntFunc(axp.pool.Size)) stats.Publish( name+"Timeout", stats.DurationFunc(func() time.Duration { return axp.timeout.Get() }), ) return axp }
// NewResourcePool creates a new ResourcePool pool. // capacity is the initial capacity of the pool. // maxCap is the maximum capacity. // If a resource is unused beyond idleTimeout, it's discarded. // An idleTimeout of 0 means that there is no timeout. func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration) *ResourcePool { if capacity <= 0 || maxCap <= 0 || capacity > maxCap { panic(errors.New("invalid/out of range capacity")) } rp := &ResourcePool{ resources: make(chan resourceWrapper, maxCap), factory: factory, capacity: sync2.AtomicInt64(capacity), idleTimeout: sync2.AtomicDuration(idleTimeout), } for i := 0; i < capacity; i++ { rp.resources <- resourceWrapper{} } return rp }
func NewTxPool(name string, capacity int, timeout, poolTimeout, idleTimeout time.Duration) *TxPool { axp := &TxPool{ pool: dbconnpool.NewConnectionPool(name, capacity, idleTimeout), activePool: pools.NewNumbered(), lastId: sync2.AtomicInt64(time.Now().UnixNano()), timeout: sync2.AtomicDuration(timeout), poolTimeout: sync2.AtomicDuration(poolTimeout), ticks: timer.NewTimer(timeout / 10), txStats: stats.NewTimings("Transactions"), } // Careful: pool also exports name+"xxx" vars, // but we know it doesn't export Timeout. stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get)) stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get)) return axp }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{enableAutoCommit: config.EnableAutoCommit} qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats) qe.schemaInfo = NewSchemaInfo( config.QueryCacheSize, config.StatsPrefix, map[string]string{ debugQueryPlansKey: config.DebugURLPrefix + "/query_plans", debugQueryStatsKey: config.DebugURLPrefix + "/query_stats", debugTableStatsKey: config.DebugURLPrefix + "/table_stats", debugSchemaKey: config.DebugURLPrefix + "/schema", }, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) // Pools qe.cachePool = NewCachePool( config.PoolNamePrefix+"Rowcache", config.RowCache, time.Duration(config.IdleTimeout*1e9), config.DebugURLPrefix+"/memcache/", config.EnablePublishStats, qe.queryServiceStats, ) qe.connPool = NewConnPool( config.PoolNamePrefix+"ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) qe.streamConnPool = NewConnPool( config.PoolNamePrefix+"StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) // Services qe.txPool = NewTxPool( config.PoolNamePrefix+"TransactionPool", config.StatsPrefix, config.TransactionCap, time.Duration(config.TransactionTimeout*1e9), time.Duration(config.TxPoolTimeout*1e9), time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) qe.consolidator = sync2.NewConsolidator() http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator) qe.invalidator = NewRowcacheInvalidator(config.StatsPrefix, qe, config.EnablePublishStats) qe.streamQList = NewQueryList() // Vars qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9)) qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * spotCheckMultiplier) if config.StrictMode { qe.strictMode.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.enableTableAclDryRun = config.EnableTableAclDryRun qe.exemptACL = config.TableAclExemptACL qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize) qe.maxDMLRows = sync2.AtomicInt64(config.MaxDMLRows) qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize) // Loggers qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) var tableACLAllowedName string var tableACLDeniedName string var tableACLPseudoDeniedName string // Stats if config.EnablePublishStats { stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get)) stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) stats.Publish(config.StatsPrefix+"QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get)) stats.Publish(config.StatsPrefix+"RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier })) stats.Publish(config.StatsPrefix+"TableACLExemptCount", stats.IntFunc(qe.tableaclExemptCount.Get)) tableACLAllowedName = "TableACLAllowed" tableACLDeniedName = "TableACLDenied" tableACLPseudoDeniedName = "TableACLPseudoDenied" } qe.tableaclAllowed = stats.NewMultiCounters(tableACLAllowedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclDenied = stats.NewMultiCounters(tableACLDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclPseudoDenied = stats.NewMultiCounters(tableACLPseudoDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) return qe }