Example #1
0
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration) *SchemaInfo {
	si := &SchemaInfo{
		queries:  cache.NewLRUCache(int64(queryCacheSize)),
		connPool: dbconnpool.NewConnectionPool("", 2, idleTimeout),
		ticks:    timer.NewTimer(reloadTime),
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
	_ = stats.NewMultiCountersFunc("TableStats", []string{"Table", "Stats"}, si.getTableStats)
	_ = stats.NewMultiCountersFunc("TableInvalidations", []string{"Table"}, si.getTableInvalidations)
	_ = stats.NewMultiCountersFunc("QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
	_ = stats.NewMultiCountersFunc("QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
	_ = stats.NewMultiCountersFunc("QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
	_ = stats.NewMultiCountersFunc("QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	http.Handle("/debug/query_plans", si)
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}
Example #2
0
// NewConnectionPool creates a new ConnectionPool. The name is used
// to publish stats only.
func NewConnectionPool(name string, capacity int, idleTimeout time.Duration) *ConnectionPool {
	cp := &ConnectionPool{capacity: capacity, idleTimeout: idleTimeout}
	if name == "" {
		return cp
	}
	stats.Publish(name+"Capacity", stats.IntFunc(cp.Capacity))
	stats.Publish(name+"Available", stats.IntFunc(cp.Available))
	stats.Publish(name+"MaxCap", stats.IntFunc(cp.MaxCap))
	stats.Publish(name+"WaitCount", stats.IntFunc(cp.WaitCount))
	stats.Publish(name+"WaitTime", stats.DurationFunc(cp.WaitTime))
	stats.Publish(name+"IdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	return cp
}
Example #3
0
// NewSqlQuery creates an instance of SqlQuery. Only one instance
// of SqlQuery can be created per process.
func NewSqlQuery(config Config) *SqlQuery {
	sq := &SqlQuery{}
	sq.qe = NewQueryEngine(config)
	stats.Publish("TabletState", stats.IntFunc(sq.state.Get))
	stats.Publish("TabletStateName", stats.StringFunc(sq.GetState))
	return sq
}
// NewRowcacheInvalidator creates a new RowcacheInvalidator.
// Just like QueryEngine, this is a singleton class.
// You must call this only once.
func NewRowcacheInvalidator(qe *QueryEngine) *RowcacheInvalidator {
	rci := &RowcacheInvalidator{qe: qe}
	stats.Publish("RowcacheInvalidatorState", stats.StringFunc(rci.svm.StateName))
	stats.Publish("RowcacheInvalidatorPosition", stats.StringFunc(rci.PositionString))
	stats.Publish("RowcacheInvalidatorLagSeconds", stats.IntFunc(rci.lagSeconds.Get))
	return rci
}
Example #5
0
func (s *MemcacheStats) publishMainStats() {
	s.mu.Lock()
	defer s.mu.Unlock()
	s.main = make(map[string]string)
	for key, isstr := range mainStringMetrics {
		key := key
		if isstr {
			s.main[key] = ""
			stats.Publish(s.cachePool.name+"Memcache"+formatKey(key), stats.StringFunc(func() string {
				s.mu.Lock()
				defer s.mu.Unlock()
				return s.main[key]
			}))
		} else {
			s.main[key] = "0"
			stats.Publish(s.cachePool.name+"Memcache"+formatKey(key), stats.IntFunc(func() int64 {
				s.mu.Lock()
				defer s.mu.Unlock()
				ival, err := strconv.ParseInt(s.main[key], 10, 64)
				if err != nil {
					log.Errorf("value '%v' for key %v is not an int", s.main[key], key)
					internalErrors.Add("MemcacheStats", 1)
					return -1
				}
				return ival
			}))
		}
	}
}
Example #6
0
// RegisterBinlogPlayerMap registers the varz for the players
func RegisterBinlogPlayerMap(blm *BinlogPlayerMap) {
	stats.Publish("BinlogPlayerMapSize", stats.IntFunc(blm.size))
	stats.Publish("BinlogPlayerSecondsBehindMaster", stats.IntFunc(func() int64 {
		sbm := int64(0)
		blm.mu.Lock()
		for _, bpc := range blm.players {
			psbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
			if psbm > sbm {
				sbm = psbm
			}
		}
		blm.mu.Unlock()
		return sbm
	}))
	stats.Publish("BinlogPlayerSecondsBehindMasterMap", stats.CountersFunc(func() map[string]int64 {
		blm.mu.Lock()
		result := make(map[string]int64, len(blm.players))
		for i, bpc := range blm.players {
			sbm := bpc.binlogPlayerStats.SecondsBehindMaster.Get()
			result[fmt.Sprintf("%v", i)] = sbm
		}
		blm.mu.Unlock()
		return result
	}))
	stats.Publish("BinlogPlayerSourceShardNameMap", stats.StringMapFunc(func() map[string]string {
		blm.mu.Lock()
		result := make(map[string]string, len(blm.players))
		for i, bpc := range blm.players {
			name := bpc.sourceShard.Keyspace + "/" + bpc.sourceShard.Shard
			result[fmt.Sprintf("%v", i)] = name
		}
		blm.mu.Unlock()
		return result
	}))
	stats.Publish("BinlogPlayerSourceTabletAliasMap", stats.StringMapFunc(func() map[string]string {
		blm.mu.Lock()
		result := make(map[string]string, len(blm.players))
		for i, bpc := range blm.players {
			bpc.playerMutex.Lock()
			result[fmt.Sprintf("%v", i)] = bpc.sourceTablet.String()
			bpc.playerMutex.Unlock()
		}
		blm.mu.Unlock()
		return result
	}))
}
Example #7
0
func NewCachePool(name string, rowCacheConfig RowCacheConfig, queryTimeout time.Duration, idleTimeout time.Duration) *CachePool {
	cp := &CachePool{name: name, idleTimeout: idleTimeout}
	if name != "" {
		cp.memcacheStats = NewMemcacheStats(cp, true, false, false)
		stats.Publish(name+"ConnPoolCapacity", stats.IntFunc(cp.Capacity))
		stats.Publish(name+"ConnPoolAvailable", stats.IntFunc(cp.Available))
		stats.Publish(name+"ConnPoolMaxCap", stats.IntFunc(cp.MaxCap))
		stats.Publish(name+"ConnPoolWaitCount", stats.IntFunc(cp.WaitCount))
		stats.Publish(name+"ConnPoolWaitTime", stats.DurationFunc(cp.WaitTime))
		stats.Publish(name+"ConnPoolIdleTimeout", stats.DurationFunc(cp.IdleTimeout))
	}
	http.Handle(statsURL, cp)

	if rowCacheConfig.Binary == "" {
		return cp
	}
	cp.rowCacheConfig = rowCacheConfig

	// Start with memcached defaults
	cp.capacity = 1024 - 50
	cp.port = "11211"
	if rowCacheConfig.Socket != "" {
		cp.port = rowCacheConfig.Socket
	}
	if rowCacheConfig.TcpPort > 0 {
		//address: ":11211"
		cp.port = ":" + strconv.Itoa(rowCacheConfig.TcpPort)
	}
	if rowCacheConfig.Connections > 0 {
		if rowCacheConfig.Connections <= 50 {
			log.Fatalf("insufficient capacity: %d", rowCacheConfig.Connections)
		}
		cp.capacity = rowCacheConfig.Connections - 50
	}

	seconds := uint64(queryTimeout / time.Second)
	// Add an additional grace period for
	// memcache expiry of deleted items
	if seconds != 0 {
		cp.DeleteExpiry = 2*seconds + 15
	}
	return cp
}
Example #8
0
func (s *MemcacheStats) publishSlabsStats() {
	s.mu.Lock()
	defer s.mu.Unlock()
	s.slabs = make(map[string]map[string]int64)
	for key, isSingle := range slabsSingleMetrics {
		key := key
		s.slabs[key] = make(map[string]int64)
		if isSingle {
			stats.Publish(s.cachePool.name+"MemcacheSlabs"+formatKey(key), stats.IntFunc(func() int64 {
				s.mu.Lock()
				defer s.mu.Unlock()
				return s.slabs[key][""]
			}))
		} else {
			stats.Publish(s.cachePool.name+"MemcacheSlabs"+formatKey(key), stats.CountersFunc(func() map[string]int64 {
				s.mu.Lock()
				defer s.mu.Unlock()
				return copyMap(s.slabs[key])
			}))
		}
	}
}
Example #9
0
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.schemaInfo = NewSchemaInfo(
		config.QueryCacheSize,
		time.Duration(config.SchemaReloadTime*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.queryRuleInfo = NewQueryRuleInfo()

	mysqlStats = stats.NewTimings("Mysql")

	// Pools
	qe.cachePool = NewCachePool(
		"Rowcache",
		config.RowCache,
		time.Duration(config.QueryTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.connPool = dbconnpool.NewConnectionPool(
		"ConnPool",
		config.PoolSize,
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.streamConnPool = dbconnpool.NewConnectionPool(
		"StreamConnPool",
		config.StreamPoolSize,
		time.Duration(config.IdleTimeout*1e9),
	)

	// Services
	qe.txPool = NewTxPool(
		"TransactionPool",
		config.TransactionCap,
		time.Duration(config.TransactionTimeout*1e9),
		time.Duration(config.TxPoolTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()
	qe.invalidator = NewRowcacheInvalidator(qe)
	qe.streamQList = NewQueryList(qe.connKiller)

	// Vars
	qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9))
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * spotCheckMultiplier)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.maxDMLRows = sync2.AtomicInt64(config.MaxDMLRows)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	// Stats
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	stats.Publish("QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get))
	queryStats = stats.NewTimings("Queries")
	QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	infoErrors = stats.NewCounters("InfoErrors")
	errorStats = stats.NewCounters("Errors")
	internalErrors = stats.NewCounters("InternalErrors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier
	}))
	spotCheckCount = stats.NewInt("RowcacheSpotCheckCount")

	return qe
}