コード例 #1
0
// Published creates a wrapper for net.Listener that
// publishes connection stats.
func Published(l net.Listener, countTag, acceptTag string) net.Listener {
	return &CountingListener{
		Listener:   l,
		ConnCount:  stats.NewInt(countTag),
		ConnAccept: stats.NewInt(acceptTag),
	}
}
コード例 #2
0
ファイル: servenv.go プロジェクト: henryanand/vitess
func Init() {
	mu.Lock()
	defer mu.Unlock()
	if inited {
		log.Fatal("servenv.Init called second time")
	}
	inited = true

	// Once you run as root, you pretty much destroy the chances of a
	// non-privileged user starting the program correctly.
	if uid := os.Getuid(); uid == 0 {
		log.Fatalf("servenv.Init: running this as root makes no sense")
	}

	runtime.MemProfileRate = *memProfileRate
	gomaxprocs := os.Getenv("GOMAXPROCS")
	if gomaxprocs == "" {
		gomaxprocs = "1"
	}

	// We used to set this limit directly, but you pretty much have to
	// use a root account to allow increasing a limit reliably. Dropping
	// privileges is also tricky. The best strategy is to make a shell
	// script set up the limits as root and switch users before starting
	// the server.
	fdLimit := &syscall.Rlimit{}
	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, fdLimit); err != nil {
		log.Errorf("max-open-fds failed: %v", err)
	}
	fdl := stats.NewInt("MaxFds")
	fdl.Set(int64(fdLimit.Cur))

	onInitHooks.Fire()
}
コード例 #3
0
ファイル: buildinfo.go プロジェクト: henryanand/vitess
func init() {
	t, err := time.Parse(time.UnixDate, buildTime)
	if buildTime != "" && err != nil {
		panic(fmt.Sprintf("Couldn't parse build timestamp %q: %v", buildTime, err))
	}
	stats.NewString("BuildHost").Set(buildHost)
	stats.NewString("BuildUser").Set(buildUser)
	stats.NewInt("BuildTimestamp").Set(t.Unix())
	stats.NewString("BuildGitRev").Set(buildGitRev)
}
コード例 #4
0
ファイル: agent.go プロジェクト: henryanand/vitess
// NewActionAgent creates a new ActionAgent and registers all the
// associated services
func NewActionAgent(
	tabletAlias topo.TabletAlias,
	dbcfgs *dbconfigs.DBConfigs,
	mycnf *mysqlctl.Mycnf,
	port, securePort int,
	overridesFile string,
	lockTimeout time.Duration,
) (agent *ActionAgent, err error) {
	schemaOverrides := loadSchemaOverrides(overridesFile)

	topoServer := topo.GetServer()
	mysqld := mysqlctl.NewMysqld("Dba", mycnf, &dbcfgs.Dba, &dbcfgs.Repl)

	agent = &ActionAgent{
		TopoServer:         topoServer,
		TabletAlias:        tabletAlias,
		Mysqld:             mysqld,
		MysqlDaemon:        mysqld,
		DBConfigs:          dbcfgs,
		SchemaOverrides:    schemaOverrides,
		LockTimeout:        lockTimeout,
		History:            history.New(historyLength),
		lastHealthMapCount: stats.NewInt("LastHealthMapCount"),
		_healthy:           fmt.Errorf("healthcheck not run yet"),
	}

	// Start the binlog player services, not playing at start.
	agent.BinlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.Filtered, mysqld)
	RegisterBinlogPlayerMap(agent.BinlogPlayerMap)

	// try to figure out the mysql port
	mysqlPort := mycnf.MysqlPort
	if mysqlPort == 0 {
		// we don't know the port, try to get it from mysqld
		var err error
		mysqlPort, err = mysqld.GetMysqlPort()
		if err != nil {
			log.Warningf("Cannot get current mysql port, will use 0 for now: %v", err)
		}
	}

	if err := agent.Start(mysqlPort, port, securePort); err != nil {
		return nil, err
	}

	// register the RPC services from the agent
	agent.registerQueryService()

	// start health check if needed
	agent.initHeathCheck()

	return agent, nil
}
コード例 #5
0
ファイル: rpc.go プロジェクト: henryanand/vitess
func NewZkReader(resolveLocal bool, preload []string) *ZkReader {
	zkr := &ZkReader{zcell: make(map[string]*zkCell), resolveLocal: resolveLocal}
	if resolveLocal {
		zkr.localCell = zk.GuessLocalCell()
	}
	zkr.rpcCalls = stats.NewInt("ZkReaderRpcCalls")
	zkr.unknownCellErrors = stats.NewInt("ZkReaderUnknownCellErrors")
	zkr.zkrStats = newZkrStats()

	stats.PublishJSONFunc("ZkReader", zkr.statsJSON)

	// start some cells
	for _, cellName := range preload {
		_, path, err := zkr.getCell("/zk/" + cellName)
		if err != nil {
			log.Errorf("Cell " + cellName + " could not be preloaded: " + err.Error())
		} else {
			log.Infof("Cell " + cellName + " preloaded for: " + path)
		}
	}
	return zkr
}
コード例 #6
0
ファイル: rpcwrap.go プロジェクト: henryanand/vitess
	"code.google.com/p/go.net/context"

	log "github.com/golang/glog"
	rpc "github.com/henryanand/vitess/go/rpcplus"
	"github.com/henryanand/vitess/go/rpcwrap/auth"
	"github.com/henryanand/vitess/go/rpcwrap/proto"
	"github.com/henryanand/vitess/go/stats"
)

const (
	connected = "200 Connected to Go RPC"
)

var (
	connCount    = stats.NewInt("connection-count")
	connAccepted = stats.NewInt("connection-accepted")
)

type ClientCodecFactory func(conn io.ReadWriteCloser) rpc.ClientCodec

type BufferedConnection struct {
	isClosed bool
	*bufio.Reader
	io.WriteCloser
}

func NewBufferedConnection(conn io.ReadWriteCloser) *BufferedConnection {
	connCount.Add(1)
	connAccepted.Add(1)
	return &BufferedConnection{false, bufio.NewReader(conn), conn}
コード例 #7
0
ファイル: pdns.go プロジェクト: henryanand/vitess
	if len(lines) == 0 {
		emptyCount.Add(1)
		log.Warningf("no results for %v %v", req.qtype, req.qname)
	}
	return lines, nil
}

func write(w io.Writer, line string) {
	_, err := io.WriteString(w, line)
	if err != nil {
		log.Errorf("write failed: %v", err)
	}
}

var (
	requestCount = stats.NewInt("PdnsRequestCount")
	errorCount   = stats.NewInt("PdnsErrorCount")
	emptyCount   = stats.NewInt("PdnsEmptyCount")
)

// Serve runs the PDNS server protol on the given reader/writer.
func (pd *pdns) Serve(r io.Reader, w io.Writer) {
	log.Infof("starting zkns resolver")
	bufr := bufio.NewReader(r)
	needHandshake := true
	for {
		line, isPrefix, err := bufr.ReadLine()
		if err == nil && isPrefix {
			err = errLongLine
		}
		if err == io.EOF {
コード例 #8
0
ファイル: updatestreamctl.go プロジェクト: henryanand/vitess
const (
	DISABLED int64 = iota
	ENABLED
)

var usStateNames = map[int64]string{
	ENABLED:  "Enabled",
	DISABLED: "Disabled",
}

var (
	streamCount          = stats.NewCounters("UpdateStreamStreamCount")
	updateStreamErrors   = stats.NewCounters("UpdateStreamErrors")
	updateStreamEvents   = stats.NewCounters("UpdateStreamEvents")
	keyrangeStatements   = stats.NewInt("UpdateStreamKeyRangeStatements")
	keyrangeTransactions = stats.NewInt("UpdateStreamKeyRangeTransactions")
	tablesStatements     = stats.NewInt("UpdateStreamTablesStatements")
	tablesTransactions   = stats.NewInt("UpdateStreamTablesTransactions")
)

type UpdateStream struct {
	mycnf *mysqlctl.Mycnf

	actionLock     sync.Mutex
	state          sync2.AtomicInt64
	mysqld         *mysqlctl.Mysqld
	stateWaitGroup sync.WaitGroup
	dbname         string
	streams        streamList
}
コード例 #9
0
ファイル: query_engine.go プロジェクト: henryanand/vitess
// NewQueryEngine creates a new QueryEngine.
// This is a singleton class.
// You must call this only once.
func NewQueryEngine(config Config) *QueryEngine {
	qe := &QueryEngine{}
	qe.schemaInfo = NewSchemaInfo(
		config.QueryCacheSize,
		time.Duration(config.SchemaReloadTime*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.queryRuleInfo = NewQueryRuleInfo()

	mysqlStats = stats.NewTimings("Mysql")

	// Pools
	qe.cachePool = NewCachePool(
		"Rowcache",
		config.RowCache,
		time.Duration(config.QueryTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.connPool = dbconnpool.NewConnectionPool(
		"ConnPool",
		config.PoolSize,
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.streamConnPool = dbconnpool.NewConnectionPool(
		"StreamConnPool",
		config.StreamPoolSize,
		time.Duration(config.IdleTimeout*1e9),
	)

	// Services
	qe.txPool = NewTxPool(
		"TransactionPool",
		config.TransactionCap,
		time.Duration(config.TransactionTimeout*1e9),
		time.Duration(config.TxPoolTimeout*1e9),
		time.Duration(config.IdleTimeout*1e9),
	)
	qe.connKiller = NewConnectionKiller(1, time.Duration(config.IdleTimeout*1e9))
	qe.consolidator = NewConsolidator()
	qe.invalidator = NewRowcacheInvalidator(qe)
	qe.streamQList = NewQueryList(qe.connKiller)

	// Vars
	qe.queryTimeout.Set(time.Duration(config.QueryTimeout * 1e9))
	qe.spotCheckFreq = sync2.AtomicInt64(config.SpotCheckRatio * spotCheckMultiplier)
	if config.StrictMode {
		qe.strictMode.Set(1)
	}
	qe.strictTableAcl = config.StrictTableAcl
	qe.maxResultSize = sync2.AtomicInt64(config.MaxResultSize)
	qe.maxDMLRows = sync2.AtomicInt64(config.MaxDMLRows)
	qe.streamBufferSize = sync2.AtomicInt64(config.StreamBufferSize)

	// loggers
	qe.accessCheckerLogger = logutil.NewThrottledLogger("accessChecker", 1*time.Second)

	// Stats
	stats.Publish("MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
	stats.Publish("MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get))
	stats.Publish("StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
	stats.Publish("QueryTimeout", stats.DurationFunc(qe.queryTimeout.Get))
	queryStats = stats.NewTimings("Queries")
	QPSRates = stats.NewRates("QPS", queryStats, 15, 60*time.Second)
	waitStats = stats.NewTimings("Waits")
	killStats = stats.NewCounters("Kills")
	infoErrors = stats.NewCounters("InfoErrors")
	errorStats = stats.NewCounters("Errors")
	internalErrors = stats.NewCounters("InternalErrors")
	resultStats = stats.NewHistogram("Results", resultBuckets)
	stats.Publish("RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
		return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier
	}))
	spotCheckCount = stats.NewInt("RowcacheSpotCheckCount")

	return qe
}