Ejemplo n.º 1
0
// NewRowcacheInvalidator creates a new RowcacheInvalidator.
// Just like QueryEngine, this is a singleton class.
// You must call this only once.
func NewRowcacheInvalidator(qe *QueryEngine) *RowcacheInvalidator {
	rci := &RowcacheInvalidator{qe: qe}
	stats.Publish("RowcacheInvalidatorState", stats.StringFunc(rci.svm.StateName))
	stats.Publish("RowcacheInvalidatorPosition", stats.StringFunc(rci.PositionString))
	stats.Publish("RowcacheInvalidatorLagSeconds", stats.IntFunc(rci.lagSeconds.Get))
	return rci
}
Ejemplo n.º 2
0
func (memstats *MemcacheStats) publishMainStats() {
	memstats.mu.Lock()
	defer memstats.mu.Unlock()
	for k, isstr := range mainStringMetrics {
		key := k
		if isstr {
			memstats.main[key] = ""
			stats.Publish(memstats.statsPrefix+"Memcache"+formatKey(key), stats.StringFunc(func() string {
				memstats.mu.Lock()
				defer memstats.mu.Unlock()
				return memstats.main[key]
			}))
		} else {
			memstats.main[key] = "0"
			stats.Publish(memstats.statsPrefix+"Memcache"+formatKey(key), stats.IntFunc(func() int64 {
				memstats.mu.Lock()
				defer memstats.mu.Unlock()
				ival, err := strconv.ParseInt(memstats.main[key], 10, 64)
				if err != nil {
					log.Errorf("value '%v' for key %v is not an int", memstats.main[key], key)
					memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
					return -1
				}
				return ival
			}))
		}
	}
}
Ejemplo n.º 3
0
// NewTabletServer creates an instance of TabletServer. Only one instance
// of TabletServer can be created per process.
func NewTabletServer(config Config) *TabletServer {
	tsv := &TabletServer{
		config:              config,
		QueryTimeout:        sync2.NewAtomicDuration(time.Duration(config.QueryTimeout * 1e9)),
		BeginTimeout:        sync2.NewAtomicDuration(time.Duration(config.TxPoolTimeout * 1e9)),
		checkMySQLThrottler: sync2.NewSemaphore(1, 0),
		streamHealthMap:     make(map[int]chan<- *querypb.StreamHealthResponse),
		sessionID:           Rand(),
		history:             history.New(10),
	}
	tsv.qe = NewQueryEngine(tsv, config)
	tsv.invalidator = NewRowcacheInvalidator(config.StatsPrefix, tsv, tsv.qe, config.EnablePublishStats)
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"TabletState", stats.IntFunc(func() int64 {
			tsv.mu.Lock()
			state := tsv.state
			tsv.mu.Unlock()
			return state
		}))
		stats.Publish(config.StatsPrefix+"QueryTimeout", stats.DurationFunc(tsv.QueryTimeout.Get))
		stats.Publish(config.StatsPrefix+"BeginTimeout", stats.DurationFunc(tsv.BeginTimeout.Get))
		stats.Publish(config.StatsPrefix+"TabletStateName", stats.StringFunc(tsv.GetState))
	}
	return tsv
}
Ejemplo n.º 4
0
// NewRowcacheInvalidator creates a new RowcacheInvalidator.
// Just like QueryEngine, this is a singleton class.
// You must call this only once.
func NewRowcacheInvalidator(qe *QueryEngine) *RowcacheInvalidator {
	rci := &RowcacheInvalidator{qe: qe}
	stats.Publish("RowcacheInvalidatorState", stats.StringFunc(rci.svm.StateName))
	stats.Publish("RowcacheInvalidatorPosition", stats.StringFunc(rci.GetGTIDString))
	stats.Publish("RowcacheInvalidatorTimestamp", stats.IntFunc(rci.Timestamp.Get))
	return rci
}
Ejemplo n.º 5
0
func (s *MemcacheStats) publishSlabsStats() {
	s.slabs.mu.Lock()
	defer s.slabs.mu.Unlock()
	for key, isSingle := range slabsSingleMetrics {
		key := key
		s.slabs.stats[key] = make(map[string]int64)
		if isSingle {
			f := func() int64 {
				s.slabs.mu.Lock()
				defer s.slabs.mu.Unlock()
				s.updateSlabsStats()
				return s.slabs.stats[key][""]
			}
			stats.Publish(s.cachePool.name+"MemcacheSlabs"+formatKey(key), stats.IntFunc(f))
			continue
		}
		f := func() map[string]int64 {
			s.slabs.mu.Lock()
			defer s.slabs.mu.Unlock()
			s.updateSlabsStats()
			return copyMap(s.slabs.stats[key])
		}
		stats.Publish(s.cachePool.name+"MemcacheSlabs"+formatKey(key), stats.CountersFunc(f))
	}
}
Ejemplo n.º 6
0
// NewTxPool creates a new TxPool. It's not operational until it's Open'd.
func NewTxPool(
	name string,
	txStatsPrefix string,
	capacity int,
	timeout time.Duration,
	poolTimeout time.Duration,
	idleTimeout time.Duration,
	enablePublishStats bool,
	qStats *QueryServiceStats) *TxPool {

	txStatsName := ""
	if enablePublishStats {
		txStatsName = txStatsPrefix + "Transactions"
	}

	axp := &TxPool{
		pool:              NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats),
		activePool:        pools.NewNumbered(),
		lastID:            sync2.AtomicInt64(time.Now().UnixNano()),
		timeout:           sync2.AtomicDuration(timeout),
		poolTimeout:       sync2.AtomicDuration(poolTimeout),
		ticks:             timer.NewTimer(timeout / 10),
		txStats:           stats.NewTimings(txStatsName),
		queryServiceStats: qStats,
	}
	// Careful: pool also exports name+"xxx" vars,
	// but we know it doesn't export Timeout.
	if enablePublishStats {
		stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get))
		stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get))
	}
	return axp
}
Ejemplo n.º 7
0
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration) *SchemaInfo {
	si := &SchemaInfo{
		queries:  cache.NewLRUCache(int64(queryCacheSize)),
		rules:    NewQueryRules(),
		connPool: dbconnpool.NewConnectionPool("", 2, idleTimeout),
		ticks:    timer.NewTimer(reloadTime),
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
	_ = stats.NewMultiCountersFunc("TableStats", []string{"Table", "Stats"}, si.getTableStats)
	_ = stats.NewMultiCountersFunc("TableInvalidations", []string{"Table"}, si.getTableInvalidations)
	_ = stats.NewMultiCountersFunc("QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
	_ = stats.NewMultiCountersFunc("QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
	_ = stats.NewMultiCountersFunc("QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
	_ = stats.NewMultiCountersFunc("QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	http.Handle("/debug/query_plans", si)
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}
Ejemplo n.º 8
0
func (s *MemcacheStats) publishMainStats() {
	s.main.mu.Lock()
	defer s.main.mu.Unlock()
	for key, isstr := range mainStringMetrics {
		key := key
		if isstr {
			s.main.stats[key] = ""
			f := func() string {
				s.main.mu.Lock()
				defer s.main.mu.Unlock()
				s.updateMainStats()
				return s.main.stats[key]
			}
			stats.Publish(s.cachePool.name+"Memcache"+formatKey(key), stats.StringFunc(f))
			continue
		}
		s.main.stats[key] = "0"
		f := func() int64 {
			s.main.mu.Lock()
			defer s.main.mu.Unlock()
			s.updateMainStats()
			ival, err := strconv.ParseInt(s.main.stats[key], 10, 64)
			if err != nil {
				log.Errorf("value '%v' for key %v is not an int", s.main.stats[key], key)
				return -1
			}
			return ival
		}
		stats.Publish(s.cachePool.name+"Memcache"+formatKey(key), stats.IntFunc(f))
	}
}
Ejemplo n.º 9
0
// NewSchemaInfo creates a new SchemaInfo.
func NewSchemaInfo(
	queryCacheSize int,
	statsPrefix string,
	endpoints map[string]string,
	reloadTime time.Duration,
	idleTimeout time.Duration,
	enablePublishStats bool,
	queryServiceStats *QueryServiceStats) *SchemaInfo {
	si := &SchemaInfo{
		queries:    cache.NewLRUCache(int64(queryCacheSize)),
		connPool:   NewConnPool("", 2, idleTimeout, enablePublishStats, queryServiceStats),
		ticks:      timer.NewTimer(reloadTime),
		endpoints:  endpoints,
		reloadTime: reloadTime,
	}
	if enablePublishStats {
		stats.Publish(statsPrefix+"QueryCacheLength", stats.IntFunc(si.queries.Length))
		stats.Publish(statsPrefix+"QueryCacheSize", stats.IntFunc(si.queries.Size))
		stats.Publish(statsPrefix+"QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
		stats.Publish(statsPrefix+"QueryCacheOldest", stats.StringFunc(func() string {
			return fmt.Sprintf("%v", si.queries.Oldest())
		}))
		stats.Publish(statsPrefix+"SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
		_ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheStats", []string{"Table", "Stats"}, si.getRowcacheStats)
		_ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheInvalidations", []string{"Table"}, si.getRowcacheInvalidations)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	}
	for _, ep := range endpoints {
		http.Handle(ep, si)
	}
	return si
}
Ejemplo n.º 10
0
// NewSqlQuery creates an instance of SqlQuery. Only one instance
// of SqlQuery can be created per process.
func NewSqlQuery(config Config) *SqlQuery {
	sq := &SqlQuery{}
	sq.qe = NewQueryEngine(config)
	stats.Publish("TabletState", stats.IntFunc(sq.state.Get))
	stats.Publish("TabletStateName", stats.StringFunc(sq.GetState))
	return sq
}
Ejemplo n.º 11
0
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.cachePool = NewCachePool("CachePool", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.connPool = NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamConnPool = NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamTokens = sync2.NewSemaphore(config.StreamExecThrottle, time.Duration(config.StreamWaitTimeout*1e9))
	qe.reservedPool = NewReservedPool("ReservedPool")
	qe.txPool = NewConnectionPool("TxPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap
	qe.activeTxPool = NewActiveTxPool("ActiveTxPool", time.Duration(config.TransactionTimeout*1e9))
	qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER)
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	queryStats = stats.NewTimings("Queries")
	stats.NewRates("QPS", queryStats, 15, 60e9)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	errorStats = stats.NewCounters("Errors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("SpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER
	}))
	spotCheckCount = stats.NewInt("SpotCheckCount")
	return qe
}
Ejemplo n.º 12
0
func (s *MemcacheStats) publishMainStats() {
	s.mu.Lock()
	defer s.mu.Unlock()
	s.main = make(map[string]string)
	for key, isstr := range mainStringMetrics {
		key := key
		if isstr {
			s.main[key] = ""
			stats.Publish(s.cachePool.name+"Memcache"+formatKey(key), stats.StringFunc(func() string {
				s.mu.Lock()
				defer s.mu.Unlock()
				return s.main[key]
			}))
		} else {
			s.main[key] = "0"
			stats.Publish(s.cachePool.name+"Memcache"+formatKey(key), stats.IntFunc(func() int64 {
				s.mu.Lock()
				defer s.mu.Unlock()
				ival, err := strconv.ParseInt(s.main[key], 10, 64)
				if err != nil {
					log.Errorf("value '%v' for key %v is not an int", s.main[key], key)
					internalErrors.Add("MemcacheStats", 1)
					return -1
				}
				return ival
			}))
		}
	}
}
Ejemplo n.º 13
0
func init() {
	CacheInvalidationProcessor = new(InvalidationProcessor)
	stats.Publish("RowcacheInvalidationState", stats.StringFunc(func() string {
		return rcinvStateNames[CacheInvalidationProcessor.state.Get()]
	}))
	stats.Publish("RowcacheInvalidationCheckPoint", stats.IntFunc(func() int64 {
		return CacheInvalidationProcessor.GroupId.Get()
	}))
}
Ejemplo n.º 14
0
func NewSqlQuery(config Config) *SqlQuery {
	sq := &SqlQuery{}
	sq.qe = NewQueryEngine(config)
	sq.rci = NewRowcacheInvalidator(sq.qe)
	stats.PublishJSONFunc("Voltron", sq.statsJSON)
	stats.Publish("TabletState", stats.IntFunc(sq.state.Get))
	stats.Publish("TabletStateName", stats.StringFunc(sq.GetState))
	return sq
}
Ejemplo n.º 15
0
// NewRowcacheInvalidator creates a new RowcacheInvalidator.
// Just like QueryEngine, this is a singleton class.
// You must call this only once.
func NewRowcacheInvalidator(statsPrefix string, checker MySQLChecker, qe *QueryEngine, enablePublishStats bool) *RowcacheInvalidator {
	rci := &RowcacheInvalidator{checker: checker, qe: qe}
	if enablePublishStats {
		stats.Publish(statsPrefix+"RowcacheInvalidatorState", stats.StringFunc(rci.svm.StateName))
		stats.Publish(statsPrefix+"RowcacheInvalidatorPosition", stats.StringFunc(rci.PositionString))
		stats.Publish(statsPrefix+"RowcacheInvalidatorLagSeconds", stats.IntFunc(rci.lagSeconds.Get))
	}
	return rci
}
Ejemplo n.º 16
0
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.schemaInfo = NewSchemaInfo(config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9))

	mysqlStats = stats.NewTimings("Mysql")

	// Pools
	qe.cachePool = NewCachePool("Rowcache", config.RowCache, time.Duration(config.QueryTimeout*1e9), time.Duration(config.IdleTimeout*1e9))
	qe.connPool = dbconnpool.NewConnectionPool("ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.streamConnPool = dbconnpool.NewConnectionPool("StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9))
	qe.txPool = dbconnpool.NewConnectionPool("TransactionPool", config.TransactionCap, time.Duration(config.IdleTimeout*1e9)) // connections in pool has to be > transactionCap

	// Services
	qe.activeTxPool = NewActiveTxPool("ActiveTransactionPool", time.Duration(config.TransactionTimeout*1e9))
	qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9))
	qe.activePool = NewActivePool("ActivePool", time.Duration(config.QueryTimeout*1e9), qe.connKiller)
	qe.consolidator = NewConsolidator()
	qe.invalidator = NewRowcacheInvalidator(qe)
	qe.streamQList = NewQueryList(qe.connKiller)

	// Vars
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * SPOT_CHECK_MULTIPLIER)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	// Stats
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	queryStats = stats.NewTimings("Queries")
	QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	infoErrors = stats.NewCounters("InfoErrors")
	errorStats = stats.NewCounters("Errors")
	internalErrors = stats.NewCounters("InternalErrors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / SPOT_CHECK_MULTIPLIER
	}))
	spotCheckCount = stats.NewInt("RowcacheSpotCheckCount")

	return qe
}
Ejemplo n.º 17
0
func NewActivePool(name string, queryTimeout time.Duration, connKiller *ConnectionKiller) *ActivePool {
	ap := &ActivePool{
		pool:       pools.NewNumbered(),
		timeout:    sync2.AtomicDuration(queryTimeout),
		ticks:      timer.NewTimer(queryTimeout / 10),
		connKiller: connKiller,
	}
	stats.Publish(name+"Size", stats.IntFunc(ap.pool.Size))
	stats.Publish(
		name+"Timeout",
		stats.DurationFunc(func() time.Duration { return ap.timeout.Get() }),
	)
	return ap
}
Ejemplo n.º 18
0
func NewActiveTxPool(name string, timeout time.Duration) *ActiveTxPool {
	axp := &ActiveTxPool{
		pool:    pools.NewNumbered(),
		lastId:  sync2.AtomicInt64(time.Now().UnixNano()),
		timeout: sync2.AtomicDuration(timeout),
		ticks:   timer.NewTimer(timeout / 10),
		txStats: stats.NewTimings("Transactions"),
	}
	stats.Publish(name+"Size", stats.IntFunc(axp.pool.Size))
	stats.Publish(
		name+"Timeout",
		stats.DurationFunc(func() time.Duration { return axp.timeout.Get() }),
	)
	return axp
}
Ejemplo n.º 19
0
func RegisterBinlogPlayerMap(blm *BinlogPlayerMap) {
	stats.Publish("BinlogPlayerMapSize", stats.IntFunc(blm.size))
	stats.Publish("BinlogPlayerSecondsBehindMaster", stats.IntFunc(func() int64 {
		sbm := int64(0)
		blm.mu.Lock()
		for _, bpc := range blm.players {
			psbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
			if psbm > sbm {
				sbm = psbm
			}
		}
		blm.mu.Unlock()
		return sbm
	}))
}
Ejemplo n.º 20
0
// NewSqlQuery creates an instance of SqlQuery. Only one instance
// of SqlQuery can be created per process.
func NewSqlQuery(config Config) *SqlQuery {
	sq := &SqlQuery{
		config: config,
	}
	sq.qe = NewQueryEngine(config)
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"TabletState", stats.IntFunc(func() int64 {
			sq.mu.Lock()
			state := sq.state
			sq.mu.Unlock()
			return state
		}))
		stats.Publish(config.StatsPrefix+"TabletStateName", stats.StringFunc(sq.GetState))
	}
	return sq
}
Ejemplo n.º 21
0
func NewTxPool(name string, capacity int, timeout, poolTimeout, idleTimeout time.Duration) *TxPool {
	axp := &TxPool{
		pool:        dbconnpool.NewConnectionPool(name, capacity, idleTimeout),
		activePool:  pools.NewNumbered(),
		lastId:      sync2.AtomicInt64(time.Now().UnixNano()),
		timeout:     sync2.AtomicDuration(timeout),
		poolTimeout: sync2.AtomicDuration(poolTimeout),
		ticks:       timer.NewTimer(timeout / 10),
		txStats:     stats.NewTimings("Transactions"),
	}
	// Careful: pool also exports name+"xxx" vars,
	// but we know it doesn't export Timeout.
	stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get))
	stats.Publish(name+"PoolTimeout", stats.DurationFunc(axp.poolTimeout.Get))
	return axp
}
Ejemplo n.º 22
0
// NewSqlQuery creates an instance of SqlQuery. Only one instance
// of SqlQuery can be created per process.
func NewSqlQuery(config Config) *SqlQuery {
	sq := &SqlQuery{
		config:          config,
		streamHealthMap: make(map[int]chan<- *pb.StreamHealthResponse),
	}
	sq.qe = NewQueryEngine(config)
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"TabletState", stats.IntFunc(func() int64 {
			sq.mu.Lock()
			state := sq.state
			sq.mu.Unlock()
			return state
		}))
		stats.Publish(config.StatsPrefix+"TabletStateName", stats.StringFunc(sq.GetState))
	}
	return sq
}
Ejemplo n.º 23
0
// RegisterBinlogPlayerMap registers the varz for the players.
func RegisterBinlogPlayerMap(blm *BinlogPlayerMap) {
	stats.Publish("BinlogPlayerMapSize", stats.IntFunc(blm.size))
	stats.Publish("BinlogPlayerSecondsBehindMaster", stats.IntFunc(func() int64 {
		sbm := int64(0)
		blm.mu.Lock()
		for _, bpc := range blm.players {
			psbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
			if psbm > sbm {
				sbm = psbm
			}
		}
		blm.mu.Unlock()
		return sbm
	}))
	stats.Publish("BinlogPlayerSecondsBehindMasterMap", stats.CountersFunc(func() map[string]int64 {
		blm.mu.Lock()
		result := make(map[string]int64, len(blm.players))
		for i, bpc := range blm.players {
			sbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
			result[fmt.Sprintf("%v", i)] = sbm
		}
		blm.mu.Unlock()
		return result
	}))
	stats.Publish("BinlogPlayerSourceShardNameMap", stats.StringMapFunc(func() map[string]string {
		blm.mu.Lock()
		result := make(map[string]string, len(blm.players))
		for i, bpc := range blm.players {
			name := bpc.sourceShard.Keyspace + "/" + bpc.sourceShard.Shard
			result[fmt.Sprintf("%v", i)] = name
		}
		blm.mu.Unlock()
		return result
	}))
	stats.Publish("BinlogPlayerSourceTabletAliasMap", stats.StringMapFunc(func() map[string]string {
		blm.mu.Lock()
		result := make(map[string]string, len(blm.players))
		for i, bpc := range blm.players {
			bpc.playerMutex.Lock()
			result[fmt.Sprintf("%v", i)] = bpc.sourceTablet.String()
			bpc.playerMutex.Unlock()
		}
		blm.mu.Unlock()
		return result
	}))
}
Ejemplo n.º 24
0
// NewTabletServer creates an instance of TabletServer. Only one instance
// of TabletServer can be created per process.
func NewTabletServer(config Config) *TabletServer {
	tsv := &TabletServer{
		config:          config,
		streamHealthMap: make(map[int]chan<- *pb.StreamHealthResponse),
		sessionID:       Rand(),
	}
	tsv.qe = NewQueryEngine(config)
	tsv.invalidator = NewRowcacheInvalidator(config.StatsPrefix, tsv.qe, config.EnablePublishStats)
	if config.EnablePublishStats {
		stats.Publish(config.StatsPrefix+"TabletState", stats.IntFunc(func() int64 {
			tsv.mu.Lock()
			state := tsv.state
			tsv.mu.Unlock()
			return state
		}))
		stats.Publish(config.StatsPrefix+"TabletStateName", stats.StringFunc(tsv.GetState))
	}
	return tsv
}
Ejemplo n.º 25
0
// RegisterService needs to be called to publish stats, and to start listening
// to clients. Only once instance can call this in a process.
func (updateStream *UpdateStreamImpl) RegisterService() {
	// publish the stats
	stats.Publish("UpdateStreamState", stats.StringFunc(func() string {
		return usStateNames[updateStream.state.Get()]
	}))

	// and register all the RPC protocols
	for _, f := range RegisterUpdateStreamServices {
		f(updateStream)
	}
}
Ejemplo n.º 26
0
func RegisterUpdateStreamService(mycnf *Mycnf) {
	if UpdateStreamRpcService != nil {
		panic("Update Stream service already initialized")
	}

	UpdateStreamRpcService = &UpdateStream{mycnf: mycnf}
	stats.Publish("UpdateStreamState", stats.StringFunc(func() string {
		return usStateNames[UpdateStreamRpcService.state.Get()]
	}))
	proto.RegisterAuthenticated(UpdateStreamRpcService)
}
Ejemplo n.º 27
0
func main() {
	flag.Parse()
	servenv.Init()

	// For the initial phase vtgate is exposing
	// topoReader api. This will be subsumed by
	// vtgate once vtgate's client functions become active.
	ts := topo.GetServer()
	defer topo.CloseServers()

	rts := vtgate.NewResilientSrvTopoServer(ts)

	stats.Publish("EndpointCount", stats.CountersFunc(rts.HealthyEndpointCount))
	stats.Publish("DegradedEndpointCount", stats.CountersFunc(rts.DegradedEndpointCount))

	topoReader = NewTopoReader(rts)
	topo.RegisterTopoReader(topoReader)

	vtgate.Init(rts, *cell, *retryDelay, *retryCount, *timeout)
	servenv.Run()
}
Ejemplo n.º 28
0
func (memstats *MemcacheStats) publishSlabsStats() {
	memstats.mu.Lock()
	defer memstats.mu.Unlock()
	for key, isSingle := range slabsSingleMetrics {
		key := key
		memstats.slabs[key] = make(map[string]int64)
		if isSingle {
			stats.Publish(memstats.statsPrefix+"MemcacheSlabs"+formatKey(key), stats.IntFunc(func() int64 {
				memstats.mu.Lock()
				defer memstats.mu.Unlock()
				return memstats.slabs[key][""]
			}))
		} else {
			stats.Publish(memstats.statsPrefix+"MemcacheSlabs"+formatKey(key), stats.CountersFunc(func() map[string]int64 {
				memstats.mu.Lock()
				defer memstats.mu.Unlock()
				return copyMap(memstats.slabs[key])
			}))
		}
	}
}
Ejemplo n.º 29
0
func newZkCell(name, zkaddr string, zkrstats *zkrStats) *zkCell {
	result := &zkCell{cellName: name, zkAddr: zkaddr, zcache: newZkCache(), zkrStats: zkrstats}
	result.ready = sync.NewCond(&result.mutex)
	stats.Publish("Zcell"+name, stats.StringFunc(func() string {

		result.mutex.Lock()
		defer result.mutex.Unlock()
		return stateNames[result.state]
	}))
	go result.backgroundRefresher()
	return result
}
Ejemplo n.º 30
0
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, sensitiveMode bool) *SchemaInfo {
	si := &SchemaInfo{
		queryCacheSize: queryCacheSize,
		queries:        cache.NewLRUCache(int64(queryCacheSize)),
		rules:          NewQueryRules(),
		connPool:       NewConnectionPool("", 2, idleTimeout),
		reloadTime:     reloadTime,
		ticks:          timer.NewTimer(reloadTime),
		sensitiveMode:  sensitiveMode,
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(func() time.Duration {
		return si.reloadTime
	}))
	_ = stats.NewMapCountersFunc("TableStats", []string{"Table", "Stats"}, si.getTableStats)
	stats.Publish("TableInvalidations", stats.CountersFunc(si.getTableInvalidations))
	_ = stats.NewMapCountersFunc("QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
	_ = stats.NewMapCountersFunc("QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
	_ = stats.NewMapCountersFunc("QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
	_ = stats.NewMapCountersFunc("QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	// query_plans cannot be shown in sensitive mode
	if !si.sensitiveMode {
		http.Handle("/debug/query_plans", si)
	}
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}