コード例 #1
0
ファイル: tabletserver.go プロジェクト: aaijazi/vitess
// NewTabletServer creates an instance of TabletServer. Only one instance
// of TabletServer can be created per process.
func NewTabletServer(config Config) *TabletServer {
	tsv := &TabletServer{
		config:              config,
		QueryTimeout:        sync2.NewAtomicDuration(time.Duration(config.QueryTimeout * 1e9)),
		BeginTimeout:        sync2.NewAtomicDuration(time.Duration(config.TxPoolTimeout * 1e9)),
		checkMySQLThrottler: sync2.NewSemaphore(1, 0),
		streamHealthMap:     make(map[int]chan<- *querypb.StreamHealthResponse),
		sessionID:           Rand(),
		history:             history.New(10),
	}
	tsv.qe = NewQueryEngine(tsv, config)
	tsv.invalidator = NewRowcacheInvalidator(config.StatsPrefix, tsv, tsv.qe, config.EnablePublishStats)
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"TabletState", stats.IntFunc(func() int64 {
			tsv.mu.Lock()
			state := tsv.state
			tsv.mu.Unlock()
			return state
		}))
		stats.Publish(config.StatsPrefix+"QueryTimeout", stats.DurationFunc(tsv.QueryTimeout.Get))
		stats.Publish(config.StatsPrefix+"BeginTimeout", stats.DurationFunc(tsv.BeginTimeout.Get))
		stats.Publish(config.StatsPrefix+"TabletStateName", stats.StringFunc(tsv.GetState))
	}
	return tsv
}
コード例 #2
0
ファイル: connpool.go プロジェクト: littleyang/vitess
// NewConnPool creates a new ConnPool. The name is used
// to publish stats only.
func NewConnPool(
	name string,
	capacity int,
	idleTimeout time.Duration,
	enablePublishStats bool,
	queryServiceStats *QueryServiceStats,
	checker MySQLChecker) *ConnPool {
	cp := &ConnPool{
		capacity:          capacity,
		idleTimeout:       idleTimeout,
		dbaPool:           dbconnpool.NewConnectionPool("", 1, idleTimeout),
		queryServiceStats: queryServiceStats,
		checker:           checker,
	}
	if name == "" {
		return cp
	}
	if enablePublishStats {
		stats.Publish(name+"Capacity", stats.IntFunc(cp.Capacity))
		stats.Publish(name+"Available", stats.IntFunc(cp.Available))
		stats.Publish(name+"MaxCap", stats.IntFunc(cp.MaxCap))
		stats.Publish(name+"WaitCount", stats.IntFunc(cp.WaitCount))
		stats.Publish(name+"WaitTime", stats.DurationFunc(cp.WaitTime))
		stats.Publish(name+"IdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	}
	return cp
}
コード例 #3
0
ファイル: tx_pool.go プロジェクト: pranjal5215/vitess
// NewTxPool creates a new TxPool. It's not operational until it's Open'd.
func NewTxPool(
	name string,
	txStatsPrefix string,
	capacity int,
	timeout time.Duration,
	poolTimeout time.Duration,
	idleTimeout time.Duration,
	enablePublishStats bool,
	qStats *QueryServiceStats) *TxPool {

	txStatsName := ""
	if enablePublishStats {
		txStatsName = txStatsPrefix + "Transactions"
	}

	axp := &TxPool{
		pool:              NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats),
		activePool:        pools.NewNumbered(),
		lastID:            sync2.AtomicInt64(time.Now().UnixNano()),
		timeout:           sync2.AtomicDuration(timeout),
		poolTimeout:       sync2.AtomicDuration(poolTimeout),
		ticks:             timer.NewTimer(timeout / 10),
		txStats:           stats.NewTimings(txStatsName),
		queryServiceStats: qStats,
	}
	// Careful: pool also exports name+"xxx" vars,
	// but we know it doesn't export Timeout.
	if enablePublishStats {
		stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get))
		stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get))
	}
	return axp
}
コード例 #4
0
ファイル: cache_pool.go プロジェクト: fengshao0907/vitess
// NewCachePool creates a new pool for rowcache connections.
func NewCachePool(
	name string,
	rowCacheConfig RowCacheConfig,
	idleTimeout time.Duration,
	statsURL string,
	enablePublishStats bool,
	queryServiceStats *QueryServiceStats) *CachePool {
	cp := &CachePool{
		name:              name,
		idleTimeout:       idleTimeout,
		statsURL:          statsURL,
		queryServiceStats: queryServiceStats,
	}
	if name != "" && enablePublishStats {
		cp.memcacheStats = NewMemcacheStats(
			rowCacheConfig.StatsPrefix+name, 10*time.Second, enableMain,
			queryServiceStats,
			func(key string) string {
				conn := cp.Get(context.Background())
				// This is not the same as defer cachePool.Put(conn)
				defer func() { cp.Put(conn) }()
				stats, err := conn.Stats(key)
				if err != nil {
					conn.Close()
					conn = nil
					log.Errorf("Cannot export memcache %v stats: %v", key, err)
					queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
					return ""
				}
				return string(stats)
			})
		stats.Publish(name+"ConnPoolCapacity", stats.IntFunc(cp.Capacity))
		stats.Publish(name+"ConnPoolAvailable", stats.IntFunc(cp.Available))
		stats.Publish(name+"ConnPoolMaxCap", stats.IntFunc(cp.MaxCap))
		stats.Publish(name+"ConnPoolWaitCount", stats.IntFunc(cp.WaitCount))
		stats.Publish(name+"ConnPoolWaitTime", stats.DurationFunc(cp.WaitTime))
		stats.Publish(name+"ConnPoolIdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	}
	http.Handle(statsURL, cp)

	if rowCacheConfig.Binary == "" {
		return cp
	}
	cp.rowCacheConfig = rowCacheConfig

	// Start with memcached defaults
	cp.capacity = 1024 - 50
	if rowCacheConfig.Connections > 0 {
		if rowCacheConfig.Connections <= 50 {
			log.Fatalf("insufficient capacity: %d", rowCacheConfig.Connections)
		}
		cp.capacity = rowCacheConfig.Connections - 50
	}
	return cp
}
コード例 #5
0
ファイル: cache_pool.go プロジェクト: ZhuoRoger/vitess
func NewCachePool(name string, commandLine []string, queryTimeout time.Duration, idleTimeout time.Duration) *CachePool {
	cp := &CachePool{name: name, idleTimeout: idleTimeout}
	if name != "" {
		cp.memcacheStats = NewMemcacheStats(cp)
		stats.Publish(name+"Capacity", stats.IntFunc(cp.Capacity))
		stats.Publish(name+"Available", stats.IntFunc(cp.Available))
		stats.Publish(name+"MaxCap", stats.IntFunc(cp.MaxCap))
		stats.Publish(name+"WaitCount", stats.IntFunc(cp.WaitCount))
		stats.Publish(name+"WaitTime", stats.DurationFunc(cp.WaitTime))
		stats.Publish(name+"IdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	}
	http.Handle(statsURL, cp)

	if len(commandLine) == 0 {
		return cp
	}
	cp.commandLine = commandLine

	// Start with memcached defaults
	cp.capacity = 1024 - 50
	cp.port = "11211"
	for i := 0; i < len(commandLine); i++ {
		switch commandLine[i] {
		case "-p", "-s":
			i++
			if i == len(commandLine) {
				log.Fatalf("expecting value after -p")
			}
			cp.port = commandLine[i]
		case "-c":
			i++
			if i == len(commandLine) {
				log.Fatalf("expecting value after -c")
			}
			capacity, err := strconv.Atoi(commandLine[i])
			if err != nil {
				log.Fatalf("%v", err)
			}
			if capacity <= 50 {
				log.Fatalf("insufficient capacity: %d", capacity)
			}
			cp.capacity = capacity - 50
		}
	}

	seconds := uint64(queryTimeout / time.Second)
	// Add an additional grace period for
	// memcache expiry of deleted items
	if seconds != 0 {
		cp.DeleteExpiry = 2*seconds + 15
	}
	return cp
}
コード例 #6
0
ファイル: conn_pool.go プロジェクト: kingpro/vitess
func NewConnectionPool(name string, capacity int, idleTimeout time.Duration) *ConnectionPool {
	cp := &ConnectionPool{capacity: capacity, idleTimeout: idleTimeout}
	if name == "" {
		return cp
	}
	stats.Publish(name+"Capacity", stats.IntFunc(cp.Capacity))
	stats.Publish(name+"Available", stats.IntFunc(cp.Available))
	stats.Publish(name+"MaxCap", stats.IntFunc(cp.MaxCap))
	stats.Publish(name+"WaitCount", stats.IntFunc(cp.WaitCount))
	stats.Publish(name+"WaitTime", stats.DurationFunc(cp.WaitTime))
	stats.Publish(name+"IdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	return cp
}
コード例 #7
0
ファイル: schema_info.go プロジェクト: qman1989/vitess
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, sensitiveMode bool) *SchemaInfo {
	si := &SchemaInfo{
		queryCacheSize: queryCacheSize,
		queries:        cache.NewLRUCache(int64(queryCacheSize)),
		rules:          NewQueryRules(),
		connPool:       NewConnectionPool("", 2, idleTimeout),
		reloadTime:     reloadTime,
		ticks:          timer.NewTimer(reloadTime),
		sensitiveMode:  sensitiveMode,
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(func() time.Duration {
		return si.reloadTime
	}))
	stats.Publish("TableStats", stats.NewMatrixFunc("Table", "Stats", si.getTableStats))
	stats.Publish("TableInvalidations", stats.CountersFunc(si.getTableInvalidations))
	stats.Publish("QueryCounts", stats.NewMatrixFunc("Table", "Plan", si.getQueryCount))
	stats.Publish("QueryTimesNs", stats.NewMatrixFunc("Table", "Plan", si.getQueryTime))
	stats.Publish("QueryRowCounts", stats.NewMatrixFunc("Table", "Plan", si.getQueryRowCount))
	stats.Publish("QueryErrorCounts", stats.NewMatrixFunc("Table", "Plan", si.getQueryErrorCount))
	// query_plans cannot be shown in sensitive mode
	if !si.sensitiveMode {
		http.Handle("/debug/query_plans", si)
	}
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}
コード例 #8
0
ファイル: schema_info.go プロジェクト: fengshao0907/vitess
// NewSchemaInfo creates a new SchemaInfo.
func NewSchemaInfo(
	queryCacheSize int,
	statsPrefix string,
	endpoints map[string]string,
	reloadTime time.Duration,
	idleTimeout time.Duration,
	enablePublishStats bool,
	queryServiceStats *QueryServiceStats) *SchemaInfo {
	si := &SchemaInfo{
		queries:    cache.NewLRUCache(int64(queryCacheSize)),
		connPool:   NewConnPool("", 2, idleTimeout, enablePublishStats, queryServiceStats),
		ticks:      timer.NewTimer(reloadTime),
		endpoints:  endpoints,
		reloadTime: reloadTime,
	}
	if enablePublishStats {
		stats.Publish(statsPrefix+"QueryCacheLength", stats.IntFunc(si.queries.Length))
		stats.Publish(statsPrefix+"QueryCacheSize", stats.IntFunc(si.queries.Size))
		stats.Publish(statsPrefix+"QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
		stats.Publish(statsPrefix+"QueryCacheOldest", stats.StringFunc(func() string {
			return fmt.Sprintf("%v", si.queries.Oldest())
		}))
		stats.Publish(statsPrefix+"SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
		_ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheStats", []string{"Table", "Stats"}, si.getRowcacheStats)
		_ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheInvalidations", []string{"Table"}, si.getRowcacheInvalidations)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	}
	for _, ep := range endpoints {
		http.Handle(ep, si)
	}
	return si
}
コード例 #9
0
ファイル: schema_info.go プロジェクト: chinna1986/vitess
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration) *SchemaInfo {
	si := &SchemaInfo{
		queries:  cache.NewLRUCache(int64(queryCacheSize)),
		rules:    NewQueryRules(),
		connPool: dbconnpool.NewConnectionPool("", 2, idleTimeout),
		ticks:    timer.NewTimer(reloadTime),
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
	_ = stats.NewMultiCountersFunc("TableStats", []string{"Table", "Stats"}, si.getTableStats)
	_ = stats.NewMultiCountersFunc("TableInvalidations", []string{"Table"}, si.getTableInvalidations)
	_ = stats.NewMultiCountersFunc("QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
	_ = stats.NewMultiCountersFunc("QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
	_ = stats.NewMultiCountersFunc("QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
	_ = stats.NewMultiCountersFunc("QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	http.Handle("/debug/query_plans", si)
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}
コード例 #10
0
ファイル: tx_pool.go プロジェクト: miffa/vitess
func NewTxPool(name string, capacity int, timeout, poolTimeout, idleTimeout time.Duration) *TxPool {
	axp := &TxPool{
		pool:        dbconnpool.NewConnectionPool(name, capacity, idleTimeout),
		activePool:  pools.NewNumbered(),
		lastId:      sync2.AtomicInt64(time.Now().UnixNano()),
		timeout:     sync2.AtomicDuration(timeout),
		poolTimeout: sync2.AtomicDuration(poolTimeout),
		ticks:       timer.NewTimer(timeout / 10),
		txStats:     stats.NewTimings("Transactions"),
	}
	// Careful: pool also exports name+"xxx" vars,
	// but we know it doesn't export Timeout.
	stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get))
	stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get))
	return axp
}
コード例 #11
0
ファイル: collector_test.go プロジェクト: CowLeo/vitess
func TestDurationFunc(t *testing.T) {
	f := func() time.Duration {
		return 42 * time.Minute
	}
	v := stats.DurationFunc(f)
	testMetric(t, v, nil,
		`Desc{fqName: "test_name", help: "test_help", constLabels: {}, variableLabels: []}`,
		`gauge:<value:2520 > `,
	)
}
コード例 #12
0
ファイル: cache_pool.go プロジェクト: nangong92t/go_src
func NewCachePool(name string, rowCacheConfig RowCacheConfig, queryTimeout time.Duration, idleTimeout time.Duration) *CachePool {
	cp := &CachePool{name: name, idleTimeout: idleTimeout}
	if name != "" {
		cp.memcacheStats = NewMemcacheStats(cp, true, false, false)
		stats.Publish(name+"ConnPoolCapacity", stats.IntFunc(cp.Capacity))
		stats.Publish(name+"ConnPoolAvailable", stats.IntFunc(cp.Available))
		stats.Publish(name+"ConnPoolMaxCap", stats.IntFunc(cp.MaxCap))
		stats.Publish(name+"ConnPoolWaitCount", stats.IntFunc(cp.WaitCount))
		stats.Publish(name+"ConnPoolWaitTime", stats.DurationFunc(cp.WaitTime))
		stats.Publish(name+"ConnPoolIdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	}
	http.Handle(statsURL, cp)

	if rowCacheConfig.Binary == "" {
		return cp
	}
	cp.rowCacheConfig = rowCacheConfig

	// Start with memcached defaults
	cp.capacity = 1024 - 50
	cp.port = "11211"
	if rowCacheConfig.Socket != "" {
		cp.port = rowCacheConfig.Socket
	}
	if rowCacheConfig.TcpPort > 0 {
		cp.port = strconv.Itoa(rowCacheConfig.TcpPort)
	}
	if rowCacheConfig.Connections > 0 {
		if rowCacheConfig.Connections <= 50 {
			log.Fatalf("insufficient capacity: %d", rowCacheConfig.Connections)
		}
		cp.capacity = rowCacheConfig.Connections - 50
	}

	seconds := uint64(queryTimeout / time.Second)
	// Add an additional grace period for
	// memcache expiry of deleted items
	if seconds != 0 {
		cp.DeleteExpiry = 2*seconds + 15
	}
	return cp
}
コード例 #13
0
ファイル: active_pool.go プロジェクト: chinna1986/vitess
func NewActivePool(name string, queryTimeout time.Duration, connKiller *ConnectionKiller) *ActivePool {
	ap := &ActivePool{
		pool:       pools.NewNumbered(),
		timeout:    sync2.AtomicDuration(queryTimeout),
		ticks:      timer.NewTimer(queryTimeout / 10),
		connKiller: connKiller,
	}
	stats.Publish(name+"Size", stats.IntFunc(ap.pool.Size))
	stats.Publish(
		name+"Timeout",
		stats.DurationFunc(func() time.Duration { return ap.timeout.Get() }),
	)
	return ap
}
コード例 #14
0
ファイル: active_tx_pool.go プロジェクト: qman1989/vitess
func NewActiveTxPool(name string, timeout time.Duration) *ActiveTxPool {
	axp := &ActiveTxPool{
		pool:    pools.NewNumbered(),
		lastId:  sync2.AtomicInt64(time.Now().UnixNano()),
		timeout: sync2.AtomicDuration(timeout),
		ticks:   timer.NewTimer(timeout / 10),
		txStats: stats.NewTimings("Transactions"),
	}
	stats.Publish(name+"Size", stats.IntFunc(axp.pool.Size))
	stats.Publish(
		name+"Timeout",
		stats.DurationFunc(func() time.Duration { return axp.timeout.Get() }),
	)
	return axp
}
コード例 #15
0
ファイル: query_engine.go プロジェクト: fengshao0907/vitess
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{enableAutoCommit: config.EnableAutoCommit}
	qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats)
	qe.schemaInfo = NewSchemaInfo(
		config.QueryCacheSize,
		config.StatsPrefix,
		map[string]string{
			debugQueryPlansKey: config.DebugURLPrefix + "/query_plans",
			debugQueryStatsKey: config.DebugURLPrefix + "/query_stats",
			debugTableStatsKey: config.DebugURLPrefix + "/table_stats",
			debugSchemaKey:     config.DebugURLPrefix + "/schema",
		},
		time.Duration(config.SchemaReloadTime*1e9),
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)

	// Pools
	qe.cachePool = NewCachePool(
		config.PoolNamePrefix+"Rowcache",
		config.RowCache,
		time.Duration(config.IdleTimeout*1e9),
		config.DebugURLPrefix+"/memcache/",
		config.EnablePublishStats,
		qe.queryServiceStats,
	)
	qe.connPool = NewConnPool(
		config.PoolNamePrefix+"ConnPool",
		config.PoolSize,
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)
	qe.streamConnPool = NewConnPool(
		config.PoolNamePrefix+"StreamConnPool",
		config.StreamPoolSize,
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)

	// Services
	qe.txPool = NewTxPool(
		config.PoolNamePrefix+"TransactionPool",
		config.StatsPrefix,
		config.TransactionCap,
		time.Duration(config.TransactionTimeout*1e9),
		time.Duration(config.TxPoolTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)
	qe.consolidator = sync2.NewConsolidator()
	http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator)
	qe.streamQList = NewQueryList()

	// Vars
	qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9))
	qe.spotCheckFreq = sync2.NewAtomicInt64(int64(config.SpotCheckRatio * spotCheckMultiplier))
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.enableTableAclDryRun = config.EnableTableAclDryRun
	qe.exemptACL = config.TableAclExemptACL
	qe.maxResultSize = sync2.NewAtomicInt64(int64(config.MaxResultSize))
	qe.maxDMLRows = sync2.NewAtomicInt64(int64(config.MaxDMLRows))
	qe.streamBufferSize = sync2.NewAtomicInt64(int64(config.StreamBufferSize))

	// Loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	var tableACLAllowedName string
	var tableACLDeniedName string
	var tableACLPseudoDeniedName string
	// Stats
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
		stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get))
		stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
		stats.Publish(config.StatsPrefix+"QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get))
		stats.Publish(config.StatsPrefix+"RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
			return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier
		}))
		stats.Publish(config.StatsPrefix+"TableACLExemptCount", stats.IntFunc(qe.tableaclExemptCount.Get))
		tableACLAllowedName = "TableACLAllowed"
		tableACLDeniedName = "TableACLDenied"
		tableACLPseudoDeniedName = "TableACLPseudoDenied"
	}

	qe.tableaclAllowed = stats.NewMultiCounters(tableACLAllowedName, []string{"TableName", "TableGroup", "PlanID", "Username"})
	qe.tableaclDenied = stats.NewMultiCounters(tableACLDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"})
	qe.tableaclPseudoDenied = stats.NewMultiCounters(tableACLPseudoDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"})

	return qe
}
コード例 #16
0
ファイル: query_engine.go プロジェクト: miffa/vitess
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.schemaInfo = NewSchemaInfo(
		config.QueryCacheSize,
		time.Duration(config.SchemaReloadTime*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)

	mysqlStats = stats.NewTimings("Mysql")

	// Pools
	qe.cachePool = NewCachePool(
		"Rowcache",
		config.RowCache,
		time.Duration(config.QueryTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.connPool = dbconnpool.NewConnectionPool(
		"ConnPool",
		config.PoolSize,
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.streamConnPool = dbconnpool.NewConnectionPool(
		"StreamConnPool",
		config.StreamPoolSize,
		time.Duration(config.IdleTimeout*1e9),
	)

	// Services
	qe.txPool = NewTxPool(
		"TransactionPool",
		config.TransactionCap,
		time.Duration(config.TransactionTimeout*1e9),
		time.Duration(config.TxPoolTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()
	qe.invalidator = NewRowcacheInvalidator(qe)
	qe.streamQList = NewQueryList(qe.connKiller)

	// Vars
	qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9))
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * spotCheckMultiplier)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	// Stats
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	stats.Publish("QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get))
	queryStats = stats.NewTimings("Queries")
	QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	infoErrors = stats.NewCounters("InfoErrors")
	errorStats = stats.NewCounters("Errors")
	internalErrors = stats.NewCounters("InternalErrors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier
	}))
	spotCheckCount = stats.NewInt("RowcacheSpotCheckCount")

	return qe
}
コード例 #17
0
ファイル: query_engine.go プロジェクト: pranjal5215/vitess
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{enableAutoCommit: config.EnableAutoCommit}
	qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats)
	qe.schemaInfo = NewSchemaInfo(
		config.QueryCacheSize,
		config.StatsPrefix,
		map[string]string{
			debugQueryPlansKey: config.DebugURLPrefix + "/query_plans",
			debugQueryStatsKey: config.DebugURLPrefix + "/query_stats",
			debugTableStatsKey: config.DebugURLPrefix + "/table_stats",
			debugSchemaKey:     config.DebugURLPrefix + "/schema",
		},
		time.Duration(config.SchemaReloadTime*1e9),
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)

	// Pools
	qe.cachePool = NewCachePool(
		config.PoolNamePrefix+"Rowcache",
		config.RowCache,
		time.Duration(config.IdleTimeout*1e9),
		config.DebugURLPrefix+"/memcache/",
		config.EnablePublishStats,
		qe.queryServiceStats,
	)
	qe.connPool = NewConnPool(
		config.PoolNamePrefix+"ConnPool",
		config.PoolSize,
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)
	qe.streamConnPool = NewConnPool(
		config.PoolNamePrefix+"StreamConnPool",
		config.StreamPoolSize,
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)

	// Services
	qe.txPool = NewTxPool(
		config.PoolNamePrefix+"TransactionPool",
		config.StatsPrefix,
		config.TransactionCap,
		time.Duration(config.TransactionTimeout*1e9),
		time.Duration(config.TxPoolTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
		config.EnablePublishStats,
		qe.queryServiceStats,
	)
	qe.consolidator = sync2.NewConsolidator()
	http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator)
	qe.invalidator = NewRowcacheInvalidator(config.StatsPrefix, qe, config.EnablePublishStats)
	qe.streamQList = NewQueryList()

	// Vars
	qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9))
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * spotCheckMultiplier)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.maxDMLRows = sync2.AtomicInt64(config.MaxDMLRows)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// Loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	// Stats
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
		stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get))
		stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
		stats.Publish(config.StatsPrefix+"QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get))
		stats.Publish(config.StatsPrefix+"RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
			return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier
		}))
	}
	return qe
}