Exemplo n.º 1
0
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.cachePool = NewCachePool("CachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamTokens = sync2.NewSemaphore(config.StreamExecThrottle, time.Duration(config.StreamWaitTimeout*1e9))
	qe.reservedPool = NewReservedPool("ReservedPool")
	qe.txPool = NewConnectionPool("TxPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap
	qe.activeTxPool = NewActiveTxPool("ActiveTxPool", time.Duration(config.TransactionTimeout*1e9))
	qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER)
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	queryStats = stats.NewTimings("Queries")
	stats.NewRates("QPS", queryStats, 15, 60e9)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	errorStats = stats.NewCounters("Errors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER
	}))
	spotCheckCount = stats.NewInt("SpotCheckCount")
	return qe
}
Exemplo n.º 2
0
// NewQueryServiceStats returns a new QueryServiceStats instance.
func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats {
	mysqlStatsName := ""
	queryStatsName := ""
	qpsRateName := ""
	waitStatsName := ""
	killStatsName := ""
	infoErrorsName := ""
	errorStatsName := ""
	internalErrorsName := ""
	resultStatsName := ""
	spotCheckCountName := ""
	userTableQueryCountName := ""
	userTableQueryTimesNsName := ""
	userTransactionCountName := ""
	userTransactionTimesNsName := ""
	if enablePublishStats {
		mysqlStatsName = statsPrefix + "Mysql"
		queryStatsName = statsPrefix + "Queries"
		qpsRateName = statsPrefix + "QPS"
		waitStatsName = statsPrefix + "Waits"
		killStatsName = statsPrefix + "Kills"
		infoErrorsName = statsPrefix + "InfoErrors"
		errorStatsName = statsPrefix + "Errors"
		internalErrorsName = statsPrefix + "InternalErrors"
		resultStatsName = statsPrefix + "Results"
		spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount"
		userTableQueryCountName = statsPrefix + "UserTableQueryCount"
		userTableQueryTimesNsName = statsPrefix + "UserTableQueryTimesNs"
		userTransactionCountName = statsPrefix + "UserTransactionCount"
		userTransactionTimesNsName = statsPrefix + "UserTransactionTimesNs"
	}
	resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000}
	queryStats := stats.NewTimings(queryStatsName)
	return &QueryServiceStats{
		MySQLStats: stats.NewTimings(mysqlStatsName),
		QueryStats: queryStats,
		WaitStats:  stats.NewTimings(waitStatsName),
		KillStats:  stats.NewCounters(killStatsName, "Transactions", "Queries"),
		InfoErrors: stats.NewCounters(infoErrorsName, "Retry", "Fatal", "DupKey"),
		ErrorStats: stats.NewCounters(errorStatsName, "Fail", "TxPoolFull", "NotInTx", "Deadlock"),
		InternalErrors: stats.NewCounters(internalErrorsName, "Task", "MemcacheStats",
			"Mismatch", "StrayTransactions", "Invalidation", "Panic", "HungQuery", "Schema"),
		UserTableQueryCount: stats.NewMultiCounters(
			userTableQueryCountName, []string{"TableName", "CallerID", "Type"}),
		UserTableQueryTimesNs: stats.NewMultiCounters(
			userTableQueryTimesNsName, []string{"TableName", "CallerID", "Type"}),
		UserTransactionCount: stats.NewMultiCounters(
			userTransactionCountName, []string{"CallerID", "Conclusion"}),
		UserTransactionTimesNs: stats.NewMultiCounters(
			userTransactionTimesNsName, []string{"CallerID", "Conclusion"}),
		// Sample every 5 seconds and keep samples for up to 15 minutes.
		QPSRates:       stats.NewRates(qpsRateName, queryStats, 15*60/5, 5*time.Second),
		ResultStats:    stats.NewHistogram(resultStatsName, resultBuckets),
		SpotCheckCount: stats.NewInt(spotCheckCountName),
	}
}
Exemplo n.º 3
0
func NewBlplStats() *blplStats {
	bs := &blplStats{}
	bs.txnCount = estats.NewCounters("")
	bs.queryCount = estats.NewCounters("")
	bs.queriesPerSec = estats.NewRates("", bs.queryCount, 15, 60e9)
	bs.txnsPerSec = estats.NewRates("", bs.txnCount, 15, 60e9)
	bs.txnTime = estats.NewTimings("")
	bs.queryTime = estats.NewTimings("")
	return bs
}
Exemplo n.º 4
0
func NewBlpStats() *blpStats {
	bs := &blpStats{}
	bs.txnCount = stats.NewCounters("TxnCount")
	bs.queryCount = stats.NewCounters("QueryCount")
	bs.queriesPerSec = stats.NewRates("QueriesPerSec", bs.queryCount, 15, 60e9)
	bs.txnsPerSec = stats.NewRates("TxnPerSec", bs.txnCount, 15, 60e9)
	bs.txnTime = stats.NewTimings("TxnTime")
	bs.queryTime = stats.NewTimings("QueryTime")
	bs.lookupTxn = stats.NewTimings("LookupTxn")
	return bs
}
Exemplo n.º 5
0
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9))

	mysqlStats = stats.NewTimings("Mysql")

	// Pools
	qe.cachePool = NewCachePool("Rowcache", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.connPool = dbconnpool.NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamConnPool = dbconnpool.NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.txPool = dbconnpool.NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap

	// Services
	qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9))
	qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9))
	qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), qe.connKiller)
	qe.consolidator = NewConsolidator()
	qe.invalidator = NewRowcacheInvalidator(qe)
	qe.streamQList = NewQueryList(qe.connKiller)

	// Vars
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	// Stats
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	queryStats = stats.NewTimings("Queries")
	QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	infoErrors = stats.NewCounters("InfoErrors")
	errorStats = stats.NewCounters("Errors")
	internalErrors = stats.NewCounters("InternalErrors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER
	}))
	spotCheckCount = stats.NewInt("RowcacheSpotCheckCount")

	return qe
}
Exemplo n.º 6
0
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
func NewMysqld(config *Mycnf, dba, allprivs, app, repl *sqldb.ConnParams, enablePublishStats bool) *Mysqld {
	noParams := sqldb.ConnParams{}
	if *dba == noParams {
		dba.UnixSocket = config.SocketFile
	}

	// create and open the connection pool for dba access
	dbaMysqlStatsName := ""
	dbaPoolName := ""
	if enablePublishStats {
		dbaMysqlStatsName = "MysqlDba"
		dbaPoolName = "DbaConnPool"
	}
	dbaMysqlStats := stats.NewTimings(dbaMysqlStatsName)
	dbaPool := dbconnpool.NewConnectionPool(dbaPoolName, *dbaPoolSize, *dbaIdleTimeout)
	dbaPool.Open(dbconnpool.DBConnectionCreator(dba, dbaMysqlStats))

	// create and open the connection pool for allprivs access
	allprivsMysqlStatsName := ""
	if enablePublishStats {
		allprivsMysqlStatsName = "MysqlAllPrivs"
	}
	allprivsMysqlStats := stats.NewTimings(allprivsMysqlStatsName)

	// create and open the connection pool for app access
	appMysqlStatsName := ""
	appPoolName := ""
	if enablePublishStats {
		appMysqlStatsName = "MysqlApp"
		appPoolName = "AppConnPool"
	}
	appMysqlStats := stats.NewTimings(appMysqlStatsName)
	appPool := dbconnpool.NewConnectionPool(appPoolName, *appPoolSize, *appIdleTimeout)
	appPool.Open(dbconnpool.DBConnectionCreator(app, appMysqlStats))

	return &Mysqld{
		config:             config,
		dba:                dba,
		allprivs:           allprivs,
		dbApp:              app,
		dbaPool:            dbaPool,
		appPool:            appPool,
		replParams:         repl,
		dbaMysqlStats:      dbaMysqlStats,
		allprivsMysqlStats: allprivsMysqlStats,
		tabletDir:          path.Dir(config.DataDir),
	}
}
Exemplo n.º 7
0
// NewTxPool creates a new TxPool. It's not operational until it's Open'd.
func NewTxPool(
	name string,
	txStatsPrefix string,
	capacity int,
	timeout time.Duration,
	idleTimeout time.Duration,
	enablePublishStats bool,
	qStats *QueryServiceStats,
	checker MySQLChecker) *TxPool {

	txStatsName := ""
	if enablePublishStats {
		txStatsName = txStatsPrefix + "Transactions"
	}

	axp := &TxPool{
		pool:              NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats, checker),
		activePool:        pools.NewNumbered(),
		lastID:            sync2.NewAtomicInt64(time.Now().UnixNano()),
		timeout:           sync2.NewAtomicDuration(timeout),
		ticks:             timer.NewTimer(timeout / 10),
		txStats:           stats.NewTimings(txStatsName),
		checker:           checker,
		queryServiceStats: qStats,
	}
	// Careful: pool also exports name+"xxx" vars,
	// but we know it doesn't export Timeout.
	if enablePublishStats {
		stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get))
	}
	return axp
}
Exemplo n.º 8
0
func NewActiveTxPool(name string, timeout time.Duration) *ActiveTxPool {
	axp := &ActiveTxPool{
		pool:            pools.NewNumbered(),
		lastId:          sync2.AtomicInt64(time.Now().UnixNano()),
		timeout:         sync2.AtomicDuration(timeout),
		ticks:           timer.NewTimer(timeout / 10),
		txStats:         stats.NewTimings("Transactions"),
		completionStats: stats.NewTimings("TransactionCompletion"),
	}
	stats.Publish(name+"Size", stats.IntFunc(axp.pool.Size))
	stats.Publish(
		name+"Timeout",
		stats.DurationFunc(func() time.Duration { return axp.timeout.Get() }),
	)
	return axp
}
Exemplo n.º 9
0
// Open starts the 2PC MM service. If the metadata database or tables
// are not present, they are created.
func (tpc *TwoPC) Open(sidecarDBName string, dbaparams *sqldb.ConnParams) {
	conn, err := dbconnpool.NewDBConnection(dbaparams, stats.NewTimings(""))
	if err != nil {
		panic(err)
	}
	defer conn.Close()
	statements := []string{
		sqlTurnoffBinlog,
		fmt.Sprintf(sqlCreateSidecarDB, sidecarDBName),
		fmt.Sprintf(sqlCreateTableRedoLogTransaction, sidecarDBName),
		fmt.Sprintf(sqlCreateTableRedoLogStatement, sidecarDBName),
		fmt.Sprintf(sqlCreateTableTransaction, sidecarDBName),
		fmt.Sprintf(sqlCreateTableParticipant, sidecarDBName),
	}
	for _, s := range statements {
		if _, err := conn.ExecuteFetch(s, 0, false); err != nil {
			panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, err.Error()))
		}
	}
	tpc.insertRedoTx = buildParsedQuery(
		"insert into `%s`.redo_log_transaction(dtid, state, time_created) values (%a, 'Prepared', %a)",
		sidecarDBName, ":dtid", ":time_created")
	tpc.insertRedoStmt = buildParsedQuery(
		"insert into `%s`.redo_log_statement(dtid, id, statement) values %a",
		sidecarDBName, ":vals")
	tpc.deleteRedoTx = buildParsedQuery(
		"delete from `%s`.redo_log_transaction where dtid = %a",
		sidecarDBName, ":dtid")
	tpc.deleteRedoStmt = buildParsedQuery(
		"delete from `%s`.redo_log_statement where dtid = %a",
		sidecarDBName, ":dtid")
	tpc.readPrepared = fmt.Sprintf(
		"select s.dtid, s.id, s.statement from `%s`.redo_log_transaction t "+
			"join `%s`.redo_log_statement s on t.dtid = s.dtid "+
			"where t.state = 'Prepared' order by s.dtid, s.id",
		sidecarDBName, sidecarDBName)

	tpc.insertTransaction = buildParsedQuery(
		"insert into `%s`.transaction(dtid, state, time_created, time_updated) values (%a, 'Prepare', %a, %a)",
		sidecarDBName, ":dtid", ":cur_time", ":cur_time")
	tpc.insertParticipants = buildParsedQuery(
		"insert into `%s`.participant(dtid, id, keyspace, shard) values %a",
		sidecarDBName, ":vals")
	tpc.transition = buildParsedQuery(
		"update `%s`.transaction set state = %a where dtid = %a and state = 'Prepare'",
		sidecarDBName, ":state", ":dtid")
	tpc.deleteTransaction = buildParsedQuery(
		"delete from `%s`.transaction where dtid = %a",
		sidecarDBName, ":dtid")
	tpc.deleteParticipants = buildParsedQuery(
		"delete from `%s`.participant where dtid = %a",
		sidecarDBName, ":dtid")
	tpc.readTransaction = buildParsedQuery(
		"select dtid, state, time_created, time_updated from `%s`.transaction where dtid = %a",
		sidecarDBName, ":dtid")
	tpc.readParticipants = buildParsedQuery(
		"select keyspace, shard from `%s`.participant where dtid = %a",
		sidecarDBName, ":dtid")
}
Exemplo n.º 10
0
func NewActiveTxPool(timeout time.Duration) *ActiveTxPool {
	return &ActiveTxPool{
		pool:    pools.NewNumbered(),
		lastId:  sync2.AtomicInt64(time.Now().UnixNano()),
		timeout: sync2.AtomicDuration(timeout),
		ticks:   timer.NewTimer(timeout / 10),
		txStats: stats.NewTimings("Transactions"),
	}
}
Exemplo n.º 11
0
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.cachePool = NewCachePool(config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.connPool = NewConnectionPool(config.PoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamConnPool = NewConnectionPool(config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.reservedPool = NewReservedPool()
	qe.txPool = NewConnectionPool(config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap
	qe.activeTxPool = NewActiveTxPool(time.Duration(config.TransactionTimeout * 1e9))
	qe.activePool = NewActivePool(time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()
	qe.maxResultSize = sync2.AtomicInt32(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt32(config.StreamBufferSize)
	queryStats = stats.NewTimings("Queries")
	stats.NewRates("QPS", queryStats, 15, 60e9)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	errorStats = stats.NewCounters("Errors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	return qe
}
Exemplo n.º 12
0
func TestTimings(t *testing.T) {
	v := stats.NewTimings("")
	load := func() {
		for i := 100 * time.Microsecond; i < time.Second; i *= 2 {
			v.Add("a", i)
		}
	}
	testMetric(t, v, load,
		`Desc{fqName: "test_name", help: "test_help", constLabels: {}, variableLabels: [category]}`,
		`label:<name:"category" value:"a" > histogram:<sample_count:14 sample_sum:1.6383 bucket:<cumulative_count:3 upper_bound:0.0005 > bucket:<cumulative_count:4 upper_bound:0.001 > bucket:<cumulative_count:6 upper_bound:0.005 > bucket:<cumulative_count:7 upper_bound:0.01 > bucket:<cumulative_count:9 upper_bound:0.05 > bucket:<cumulative_count:10 upper_bound:0.1 > bucket:<cumulative_count:13 upper_bound:0.5 > bucket:<cumulative_count:14 upper_bound:1 > bucket:<cumulative_count:14 upper_bound:5 > bucket:<cumulative_count:14 upper_bound:10 > > `,
	)
}
Exemplo n.º 13
0
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}

	// services
	qe.cachePool = NewCachePool("RowcachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.SensitiveMode)
	qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.txPool = NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap
	qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9))
	qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()

	// vars
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// stats
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	queryStats = stats.NewTimings("Queries")
	QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	infoErrors = stats.NewCounters("InfoErrors")
	errorStats = stats.NewCounters("Errors")
	internalErrors = stats.NewCounters("InternalErrors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER
	}))
	spotCheckCount = stats.NewInt("SpotCheckCount")

	return qe
}
Exemplo n.º 14
0
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
// dbaName and appName are the base for stats exports, use 'Dba' and 'App', except in tests
func NewMysqld(dbaName, appName string, config *Mycnf, dba, app, repl *sqldb.ConnParams) *Mysqld {
	if *dba == dbconfigs.DefaultDBConfigs.Dba {
		dba.UnixSocket = config.SocketFile
	}

	// create and open the connection pool for dba access
	dbaMysqlStatsName := ""
	dbaPoolName := ""
	if dbaName != "" {
		dbaMysqlStatsName = "Mysql" + dbaName
		dbaPoolName = dbaName + "ConnPool"
	}
	dbaMysqlStats := stats.NewTimings(dbaMysqlStatsName)
	dbaPool := dbconnpool.NewConnectionPool(dbaPoolName, *dbaPoolSize, *dbaIdleTimeout)
	dbaPool.Open(dbconnpool.DBConnectionCreator(dba, dbaMysqlStats))

	// create and open the connection pool for app access
	appMysqlStatsName := ""
	appPoolName := ""
	if appName != "" {
		appMysqlStatsName = "Mysql" + appName
		appPoolName = appName + "ConnPool"
	}
	appMysqlStats := stats.NewTimings(appMysqlStatsName)
	appPool := dbconnpool.NewConnectionPool(appPoolName, *appPoolSize, *appIdleTimeout)
	appPool.Open(dbconnpool.DBConnectionCreator(app, appMysqlStats))

	return &Mysqld{
		config:        config,
		dba:           dba,
		dbApp:         app,
		dbaPool:       dbaPool,
		appPool:       appPool,
		replParams:    repl,
		dbaMysqlStats: dbaMysqlStats,
		TabletDir:     TabletDir(config.ServerID),
		SnapshotDir:   SnapshotDir(config.ServerID),
	}
}
Exemplo n.º 15
0
// NewQueryServiceStats returns a new QueryServiceStats instance.
func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QueryServiceStats {
	mysqlStatsName := ""
	queryStatsName := ""
	qpsRateName := ""
	waitStatsName := ""
	killStatsName := ""
	infoErrorsName := ""
	errorStatsName := ""
	internalErrorsName := ""
	resultStatsName := ""
	spotCheckCountName := ""
	if enablePublishStats {
		mysqlStatsName = statsPrefix + "Mysql"
		queryStatsName = statsPrefix + "Queries"
		qpsRateName = statsPrefix + "QPS"
		waitStatsName = statsPrefix + "Waits"
		killStatsName = statsPrefix + "Kills"
		infoErrorsName = statsPrefix + "InfoErrors"
		errorStatsName = statsPrefix + "Errors"
		internalErrorsName = statsPrefix + "InternalErrors"
		resultStatsName = statsPrefix + "Results"
		spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount"
	}
	resultBuckets := []int64{0, 1, 5, 10, 50, 100, 500, 1000, 5000, 10000}
	queryStats := stats.NewTimings(queryStatsName)
	return &QueryServiceStats{
		MySQLStats:     stats.NewTimings(mysqlStatsName),
		QueryStats:     queryStats,
		WaitStats:      stats.NewTimings(waitStatsName),
		KillStats:      stats.NewCounters(killStatsName),
		InfoErrors:     stats.NewCounters(infoErrorsName),
		ErrorStats:     stats.NewCounters(errorStatsName),
		InternalErrors: stats.NewCounters(internalErrorsName),
		QPSRates:       stats.NewRates(qpsRateName, queryStats, 15, 60*time.Second),
		ResultStats:    stats.NewHistogram(resultStatsName, resultBuckets),
		SpotCheckCount: stats.NewInt(spotCheckCountName),
	}
}
Exemplo n.º 16
0
func NewTxPool(name string, capacity int, timeout, poolTimeout, idleTimeout time.Duration) *TxPool {
	axp := &TxPool{
		pool:        dbconnpool.NewConnectionPool(name, capacity, idleTimeout),
		activePool:  pools.NewNumbered(),
		lastId:      sync2.AtomicInt64(time.Now().UnixNano()),
		timeout:     sync2.AtomicDuration(timeout),
		poolTimeout: sync2.AtomicDuration(poolTimeout),
		ticks:       timer.NewTimer(timeout / 10),
		txStats:     stats.NewTimings("Transactions"),
	}
	// Careful: pool also exports name+"xxx" vars,
	// but we know it doesn't export Timeout.
	stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get))
	stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get))
	return axp
}
Exemplo n.º 17
0
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
// name is the base for stats exports, use 'Dba', except in tests
func NewMysqld(name string, config *Mycnf, dba, repl *mysql.ConnectionParams) *Mysqld {
	if *dba == dbconfigs.DefaultDBConfigs.Dba {
		dba.UnixSocket = config.SocketFile
	}

	// create and open the connection pool for dba access
	mysqlStats := stats.NewTimings("Mysql" + name)
	dbaPool := dbconnpool.NewConnectionPool(name+"ConnPool", *dbaPoolSize, *dbaIdleTimeout)
	dbaPool.Open(dbconnpool.DBConnectionCreator(dba, mysqlStats))

	return &Mysqld{
		config:      config,
		dba:         dba,
		dbaPool:     dbaPool,
		replParams:  repl,
		TabletDir:   TabletDir(config.ServerId),
		SnapshotDir: SnapshotDir(config.ServerId),
	}
}
Exemplo n.º 18
0
// GetDbaConnection is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) GetDbaConnection() (*dbconnpool.DBConnection, error) {
	return dbconnpool.NewDBConnection(&sqldb.ConnParams{}, stats.NewTimings(""))
}
Exemplo n.º 19
0
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package tabletserver

import (
	"encoding/binary"
	"strconv"
	"time"

	"github.com/youtube/vitess/go/sqltypes"
	"github.com/youtube/vitess/go/stats"
	"github.com/youtube/vitess/go/vt/schema"
)

var cacheStats = stats.NewTimings("Rowcache")

var pack = binary.BigEndian

const (
	RC_DELETED = 1

	// MAX_KEY_LEN is a value less than memcache's limit of 250.
	MAX_KEY_LEN = 200

	// MAX_DATA_LEN prevents large rows from being inserted in rowcache.
	MAX_DATA_LEN = 8000
)

type RowCache struct {
	tableInfo *TableInfo
Exemplo n.º 20
0
// NewStats creates a new Stats structure
func NewStats() *Stats {
	bps := &Stats{}
	bps.Timings = stats.NewTimings("")
	bps.Rates = stats.NewRates("", bps.Timings, 15, 60e9)
	return bps
}
Exemplo n.º 21
0
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package tabletserver

import (
	"encoding/binary"
	"time"

	"github.com/youtube/vitess/go/sqltypes"
	"github.com/youtube/vitess/go/stats"
	"github.com/youtube/vitess/go/vt/schema"
)

var cacheStats = stats.NewTimings("Cache")
var cacheCounters = stats.NewCounters("CacheCounters")

var pack = binary.BigEndian

const (
	RC_DELETED = 1
)

type RowCache struct {
	tableInfo *TableInfo
	prefix    string
	cachePool *CachePool
}

type RCResult struct {
Exemplo n.º 22
0
// NewBinlogPlayerStats creates a new BinlogPlayerStats structure
func NewBinlogPlayerStats() *BinlogPlayerStats {
	bps := &BinlogPlayerStats{}
	bps.Timings = stats.NewTimings("")
	bps.Rates = stats.NewRates("", bps.Timings, 15, 60e9)
	return bps
}
Exemplo n.º 23
0
// GetAllPrivsConnection is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) GetAllPrivsConnection() (*dbconnpool.DBConnection, error) {
	return dbconnpool.NewDBConnection(&sqldb.ConnParams{Engine: fmd.db.Name}, stats.NewTimings(""))
}
Exemplo n.º 24
0
func init() {
	mysqlStats = stats.NewTimings("Mysql")
}
Exemplo n.º 25
0
	"github.com/youtube/vitess/go/trace"
	"github.com/youtube/vitess/go/vt/concurrency"
	"github.com/youtube/vitess/go/vt/tabletmanager/tmclient"
	"github.com/youtube/vitess/go/vt/topo"
	"github.com/youtube/vitess/go/vt/topo/topoproto"
	"github.com/youtube/vitess/go/vt/topotools"
	"github.com/youtube/vitess/go/vt/topotools/events"
	"golang.org/x/net/context"

	topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)

var (
	finalizeReparentTimeout = flag.Duration("finalize_external_reparent_timeout", 10*time.Second, "Timeout for the finalize stage of a fast external reparent reconciliation.")

	externalReparentStats = stats.NewTimings("ExternalReparents", "NewMasterVisible", "FullRebuild")
)

// SetReparentFlags changes flag values. It should only be used in tests.
func SetReparentFlags(timeout time.Duration) {
	*finalizeReparentTimeout = timeout
}

// TabletExternallyReparented updates all topo records so the current
// tablet is the new master for this shard.
// Should be called under RPCWrapLock.
func (agent *ActionAgent) TabletExternallyReparented(ctx context.Context, externalID string) error {
	startTime := time.Now()

	// If there is a finalize step running, wait for it to finish or time out
	// before checking the global shard record again.