// NewMaxReplicationLagModule will create a new module instance and set the // initial max replication lag limit to maxReplicationLag. func NewMaxReplicationLagModule(config MaxReplicationLagModuleConfig, actualRatesHistory *aggregatedIntervalHistory, nowFunc func() time.Time) (*MaxReplicationLagModule, error) { if err := config.Verify(); err != nil { return nil, fmt.Errorf("invalid NewMaxReplicationLagModuleConfig: %v", err) } rate := int64(ReplicationLagModuleDisabled) if config.MaxReplicationLagSec != ReplicationLagModuleDisabled { rate = config.InitialRate } m := &MaxReplicationLagModule{ initialMaxReplicationLagSec: config.MaxReplicationLagSec, // Register "config" for a future config update. mutableConfig: config, applyMutableConfig: true, // Always start off with a non-zero rate because zero means all requests // get throttled. rate: sync2.NewAtomicInt64(rate), currentState: stateIncreaseRate, lastRateChange: nowFunc(), memory: newMemory(memoryGranularity, config.AgeBadRateAfter(), config.BadRateIncrease), nowFunc: nowFunc, lagRecords: make(chan replicationLagRecord, 10), // Prevent an immediate increase of the initial rate. nextAllowedIncrease: nowFunc().Add(config.MaxDurationBetweenIncreases()), actualRatesHistory: actualRatesHistory, lagCache: newReplicationLagCache(1000), results: newResultRing(1000), } // Enforce a config update. m.applyLatestConfig() return m, nil }
// NewTxPool creates a new TxPool. It's not operational until it's Open'd. func NewTxPool( name string, txStatsPrefix string, capacity int, timeout time.Duration, idleTimeout time.Duration, enablePublishStats bool, qStats *QueryServiceStats, checker MySQLChecker) *TxPool { txStatsName := "" if enablePublishStats { txStatsName = txStatsPrefix + "Transactions" } axp := &TxPool{ pool: NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats, checker), activePool: pools.NewNumbered(), lastID: sync2.NewAtomicInt64(time.Now().UnixNano()), timeout: sync2.NewAtomicDuration(timeout), ticks: timer.NewTimer(timeout / 10), txStats: stats.NewTimings(txStatsName), checker: checker, queryServiceStats: qStats, } // Careful: pool also exports name+"xxx" vars, // but we know it doesn't export Timeout. if enablePublishStats { stats.Publish(name+"Timeout", stats.DurationFunc(axp.timeout.Get)) } return axp }
// NewResourcePool creates a new ResourcePool pool. // capacity is the number of active resources in the pool: // there can be up to 'capacity' of these at a given time. // maxCap specifies the extent to which the pool can be resized // in the future through the SetCapacity function. // You cannot resize the pool beyond maxCap. // If a resource is unused beyond idleTimeout, it's discarded. // An idleTimeout of 0 means that there is no timeout. func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration) *ResourcePool { if capacity <= 0 || maxCap <= 0 || capacity > maxCap { panic(errors.New("invalid/out of range capacity")) } rp := &ResourcePool{ resources: make(chan resourceWrapper, maxCap), factory: factory, capacity: sync2.NewAtomicInt64(int64(capacity)), idleTimeout: sync2.NewAtomicDuration(idleTimeout), } for i := 0; i < capacity; i++ { rp.resources <- resourceWrapper{} } return rp }
// Init initializes VTGate server. func Init(ctx context.Context, hc discovery.HealthCheck, topoServer topo.Server, serv topo.SrvTopoServer, cell string, retryDelay time.Duration, retryCount int, connTimeoutTotal, connTimeoutPerConn, connLife time.Duration, tabletTypesToWait []topodatapb.TabletType, maxInFlight int, testGateway string) *VTGate { if rpcVTGate != nil { log.Fatalf("VTGate already initialized") } rpcVTGate = &VTGate{ resolver: NewResolver(hc, topoServer, serv, "VttabletCall", cell, retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife, tabletTypesToWait, testGateway), timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), maxInFlight: int64(maxInFlight), inFlight: sync2.NewAtomicInt64(0), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logExecuteShards: logutil.NewThrottledLogger("ExecuteShards", 5*time.Second), logExecuteKeyspaceIds: logutil.NewThrottledLogger("ExecuteKeyspaceIds", 5*time.Second), logExecuteKeyRanges: logutil.NewThrottledLogger("ExecuteKeyRanges", 5*time.Second), logExecuteEntityIds: logutil.NewThrottledLogger("ExecuteEntityIds", 5*time.Second), logExecuteBatchShards: logutil.NewThrottledLogger("ExecuteBatchShards", 5*time.Second), logExecuteBatchKeyspaceIds: logutil.NewThrottledLogger("ExecuteBatchKeyspaceIds", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), logStreamExecuteKeyspaceIds: logutil.NewThrottledLogger("StreamExecuteKeyspaceIds", 5*time.Second), logStreamExecuteKeyRanges: logutil.NewThrottledLogger("StreamExecuteKeyRanges", 5*time.Second), logStreamExecuteShards: logutil.NewThrottledLogger("StreamExecuteShards", 5*time.Second), } // Resuse resolver's scatterConn. rpcVTGate.router = NewRouter(ctx, serv, cell, "VTGateRouter", rpcVTGate.resolver.scatterConn) normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) infoErrors = stats.NewCounters("VtgateInfoErrorCounts") internalErrors = stats.NewCounters("VtgateInternalErrorCounts") qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) for _, f := range RegisterVTGates { f(rpcVTGate) } return rpcVTGate }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(config Config) *QueryEngine { qe := &QueryEngine{enableAutoCommit: config.EnableAutoCommit} qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats) qe.schemaInfo = NewSchemaInfo( config.QueryCacheSize, config.StatsPrefix, map[string]string{ debugQueryPlansKey: config.DebugURLPrefix + "/query_plans", debugQueryStatsKey: config.DebugURLPrefix + "/query_stats", debugTableStatsKey: config.DebugURLPrefix + "/table_stats", debugSchemaKey: config.DebugURLPrefix + "/schema", }, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) // Pools qe.cachePool = NewCachePool( config.PoolNamePrefix+"Rowcache", config.RowCache, time.Duration(config.IdleTimeout*1e9), config.DebugURLPrefix+"/memcache/", config.EnablePublishStats, qe.queryServiceStats, ) qe.connPool = NewConnPool( config.PoolNamePrefix+"ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) qe.streamConnPool = NewConnPool( config.PoolNamePrefix+"StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) // Services qe.txPool = NewTxPool( config.PoolNamePrefix+"TransactionPool", config.StatsPrefix, config.TransactionCap, time.Duration(config.TransactionTimeout*1e9), time.Duration(config.TxPoolTimeout*1e9), time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, ) qe.consolidator = sync2.NewConsolidator() http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator) qe.streamQList = NewQueryList() // Vars qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9)) qe.spotCheckFreq = sync2.NewAtomicInt64(int64(config.SpotCheckRatio * spotCheckMultiplier)) if config.StrictMode { qe.strictMode.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.enableTableAclDryRun = config.EnableTableAclDryRun qe.exemptACL = config.TableAclExemptACL qe.maxResultSize = sync2.NewAtomicInt64(int64(config.MaxResultSize)) qe.maxDMLRows = sync2.NewAtomicInt64(int64(config.MaxDMLRows)) qe.streamBufferSize = sync2.NewAtomicInt64(int64(config.StreamBufferSize)) // Loggers qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) var tableACLAllowedName string var tableACLDeniedName string var tableACLPseudoDeniedName string // Stats if config.EnablePublishStats { stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get)) stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) stats.Publish(config.StatsPrefix+"QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get)) stats.Publish(config.StatsPrefix+"RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 { return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier })) stats.Publish(config.StatsPrefix+"TableACLExemptCount", stats.IntFunc(qe.tableaclExemptCount.Get)) tableACLAllowedName = "TableACLAllowed" tableACLDeniedName = "TableACLDenied" tableACLPseudoDeniedName = "TableACLPseudoDenied" } qe.tableaclAllowed = stats.NewMultiCounters(tableACLAllowedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclDenied = stats.NewMultiCounters(tableACLDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclPseudoDenied = stats.NewMultiCounters(tableACLPseudoDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) return qe }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(checker MySQLChecker, config Config) *QueryEngine { qe := &QueryEngine{config: config} qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats) qe.schemaInfo = NewSchemaInfo( config.StatsPrefix, checker, config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), map[string]string{ debugQueryPlansKey: config.DebugURLPrefix + "/query_plans", debugQueryStatsKey: config.DebugURLPrefix + "/query_stats", debugSchemaKey: config.DebugURLPrefix + "/schema", debugQueryRulesKey: config.DebugURLPrefix + "/query_rules", }, config.EnablePublishStats, qe.queryServiceStats, ) qe.connPool = NewConnPool( config.PoolNamePrefix+"ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, checker, ) qe.streamConnPool = NewConnPool( config.PoolNamePrefix+"StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, checker, ) qe.txPool = NewTxPool( config.PoolNamePrefix+"TransactionPool", config.StatsPrefix, config.TransactionCap, time.Duration(config.TransactionTimeout*1e9), time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, checker, ) // Set the prepared pool capacity to something lower than // tx pool capacity. Those spare connections are needed to // perform metadata state change operations. Without this, // the system can deadlock if all connections get moved to // the TxPreparedPool. prepCap := config.TransactionCap - 2 if prepCap < 0 { // A capacity of 0 means that Prepare will always fail. prepCap = 0 } qe.preparedPool = NewTxPreparedPool(prepCap) qe.twoPC = NewTwoPC() qe.consolidator = sync2.NewConsolidator() http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator) qe.streamQList = NewQueryList() if config.StrictMode { qe.strictMode.Set(1) } if config.EnableAutoCommit { qe.autoCommit.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.enableTableAclDryRun = config.EnableTableAclDryRun if config.TableAclExemptACL != "" { if f, err := tableacl.GetCurrentAclFactory(); err == nil { if exemptACL, err := f.New([]string{config.TableAclExemptACL}); err == nil { log.Infof("Setting Table ACL exempt rule for %v", config.TableAclExemptACL) qe.exemptACL = exemptACL } else { log.Infof("Cannot build exempt ACL for table ACL: %v", err) } } else { log.Infof("Cannot get current ACL Factory: %v", err) } } qe.maxResultSize = sync2.NewAtomicInt64(int64(config.MaxResultSize)) qe.maxDMLRows = sync2.NewAtomicInt64(int64(config.MaxDMLRows)) qe.streamBufferSize = sync2.NewAtomicInt64(int64(config.StreamBufferSize)) qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) var tableACLAllowedName string var tableACLDeniedName string var tableACLPseudoDeniedName string if config.EnablePublishStats { stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get)) stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) stats.Publish(config.StatsPrefix+"TableACLExemptCount", stats.IntFunc(qe.tableaclExemptCount.Get)) tableACLAllowedName = "TableACLAllowed" tableACLDeniedName = "TableACLDenied" tableACLPseudoDeniedName = "TableACLPseudoDenied" } qe.tableaclAllowed = stats.NewMultiCounters(tableACLAllowedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclDenied = stats.NewMultiCounters(tableACLDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclPseudoDenied = stats.NewMultiCounters(tableACLPseudoDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) return qe }
// NewQueryEngine creates a new QueryEngine. // This is a singleton class. // You must call this only once. func NewQueryEngine(checker MySQLChecker, config Config) *QueryEngine { qe := &QueryEngine{config: config} qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats) qe.schemaInfo = NewSchemaInfo( config.StatsPrefix, checker, config.QueryCacheSize, time.Duration(config.SchemaReloadTime*1e9), time.Duration(config.IdleTimeout*1e9), map[string]string{ debugQueryPlansKey: config.DebugURLPrefix + "/query_plans", debugQueryStatsKey: config.DebugURLPrefix + "/query_stats", debugSchemaKey: config.DebugURLPrefix + "/schema", debugQueryRulesKey: config.DebugURLPrefix + "/query_rules", }, config.EnablePublishStats, qe.queryServiceStats, ) qe.connPool = NewConnPool( config.PoolNamePrefix+"ConnPool", config.PoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, checker, ) qe.streamConnPool = NewConnPool( config.PoolNamePrefix+"StreamConnPool", config.StreamPoolSize, time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, checker, ) qe.txPool = NewTxPool( config.PoolNamePrefix+"TransactionPool", config.StatsPrefix, config.TransactionCap, time.Duration(config.TransactionTimeout*1e9), time.Duration(config.IdleTimeout*1e9), config.EnablePublishStats, qe.queryServiceStats, checker, ) qe.consolidator = sync2.NewConsolidator() http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator) qe.streamQList = NewQueryList() if config.StrictMode { qe.strictMode.Set(1) } if config.EnableAutoCommit { qe.autoCommit.Set(1) } qe.strictTableAcl = config.StrictTableAcl qe.enableTableAclDryRun = config.EnableTableAclDryRun if config.TableAclExemptACL != "" { if f, err := tableacl.GetCurrentAclFactory(); err == nil { if exemptACL, err := f.New([]string{config.TableAclExemptACL}); err == nil { log.Infof("Setting Table ACL exempt rule for %v", config.TableAclExemptACL) qe.exemptACL = exemptACL } else { log.Infof("Cannot build exempt ACL for table ACL: %v", err) } } else { log.Infof("Cannot get current ACL Factory: %v", err) } } qe.maxResultSize = sync2.NewAtomicInt64(int64(config.MaxResultSize)) qe.maxDMLRows = sync2.NewAtomicInt64(int64(config.MaxDMLRows)) qe.streamBufferSize = sync2.NewAtomicInt64(int64(config.StreamBufferSize)) qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second) var tableACLAllowedName string var tableACLDeniedName string var tableACLPseudoDeniedName string if config.EnablePublishStats { stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get)) stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get)) stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get)) stats.Publish(config.StatsPrefix+"TableACLExemptCount", stats.IntFunc(qe.tableaclExemptCount.Get)) tableACLAllowedName = "TableACLAllowed" tableACLDeniedName = "TableACLDenied" tableACLPseudoDeniedName = "TableACLPseudoDenied" } qe.tableaclAllowed = stats.NewMultiCounters(tableACLAllowedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclDenied = stats.NewMultiCounters(tableACLDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) qe.tableaclPseudoDenied = stats.NewMultiCounters(tableACLPseudoDeniedName, []string{"TableName", "TableGroup", "PlanID", "Username"}) return qe }
// NewMaxRateModule will create a new module instance and set the initial // rate limit to maxRate. func NewMaxRateModule(maxRate int64) *MaxRateModule { return &MaxRateModule{ maxRate: sync2.NewAtomicInt64(maxRate), } }