Exemple #1
0
func (s *Server) handleConn(c net.Conn) {
	log.Info("new connection", c.RemoteAddr())

	s.counter.Add("connections", 1)
	client := &session{
		Conn:     c,
		r:        bufio.NewReader(c),
		CreateAt: time.Now(),
	}

	var err error

	defer func() {
		if err != nil { //todo: fix this ugly error check
			if GetOriginError(err.(*errors.Err)).Error() != io.EOF.Error() {
				log.Warningf("close connection %v, %+v, %v", c.RemoteAddr(), client, errors.ErrorStack(err))
			} else {
				log.Infof("close connection %v, %+v", c.RemoteAddr(), client)
			}
		} else {
			log.Infof("close connection %v, %+v", c.RemoteAddr(), client)
		}

		c.Close()
		s.counter.Add("connections", -1)
	}()

	for {
		err = s.redisTunnel(client)
		if err != nil {
			return
		}
		client.Ops++
	}
}
Exemple #2
0
func (p *Proxy) Run() {
	tcpAddr, err := net.ResolveTCPAddr("tcp", p.addr)
	if err != nil {
		log.Fatal(err)
	}

	listener, err := net.ListenTCP("tcp", tcpAddr)
	if err != nil {
		log.Fatal(err)
	} else {
		log.Infof("proxy listens on %s", p.addr)
	}
	defer listener.Close()

	go p.dispatcher.Run()

	for {
		conn, err := listener.AcceptTCP()
		if err != nil {
			log.Error(err)
			continue
		}
		log.Infof("accept client: %s", conn.RemoteAddr())
		go p.handleConnection(conn)
	}
}
Exemple #3
0
// wait for the slot reload chan and reload cluster topology
// at most every slotReloadInterval
// it also reload topology at a relative long periodic interval
func (d *Dispatcher) slotsReloadLoop() {
	periodicReloadInterval := 60 * time.Second
	for {
		select {
		case <-time.After(d.slotReloadInterval):
			select {
			case _, ok := <-d.slotReloadChan:
				if !ok {
					log.Infof("exit reload slot table loop")
					return
				}
				log.Infof("request reload triggered")
				if slotInfos, err := d.reloadTopology(); err != nil {
					log.Errorf("reload slot table failed")
				} else {
					d.slotInfoChan <- slotInfos
				}
			case <-time.After(periodicReloadInterval):
				log.Infof("periodic reload triggered")
				if slotInfos, err := d.reloadTopology(); err != nil {
					log.Errorf("reload slot table failed")
				} else {
					d.slotInfoChan <- slotInfos
				}
			}
		}
	}
}
Exemple #4
0
func (s *Server) loadSchemaInfo() error {
	if err := s.parseShards(); err != nil {
		return errors.Trace(err)
	}

	if err := s.parseSchemas(); err != nil {
		return errors.Trace(err)
	}

	for _, v := range s.cfg.Schemas {
		rc := v.RouterConifg
		var overrides []tabletserver.SchemaOverride
		for _, tr := range rc.TableRule {
			or := tabletserver.SchemaOverride{Name: tr.Table}
			pks := strings.Split(tr.ShardingKey, ",")
			for _, pk := range pks {
				or.PKColumns = append(or.PKColumns, strings.TrimSpace(pk))
			}
			log.Infof("table rule:%+v", tr)
			or.Cache = &tabletserver.OverrideCacheDesc{Type: tr.RowCacheType, Prefix: or.Name, Table: or.Name}
			overrides = append(overrides, or)
		}

		//fix hard code node
		sc := s.cfg.Shards[0]
		si := tabletserver.NewSchemaInfo(s.cfg.RowCacheConf, s.cfg.Shards[0].Master, sc.User, sc.Password, v.DB, overrides)

		log.Infof("%+v", si)
		s.autoSchamas[v.DB] = si
	}

	return nil
}
Exemple #5
0
func (ti *TableInfo) initRowCache(tableType string, createTime sqltypes.Value, comment string, cachePool *CachePool) {
	if cachePool.IsClosed() {
		return
	}

	if strings.Contains(comment, "vtocc_nocache") {
		log.Infof("%s commented as vtocc_nocache. Will not be cached.", ti.Name)
		return
	}

	if tableType == "VIEW" {
		log.Infof("%s is a view. Will not be cached.", ti.Name)
		return
	}

	if ti.PKColumns == nil {
		log.Infof("Table %s has no primary key. Will not be cached.", ti.Name)
		return
	}
	for _, col := range ti.PKColumns {
		if ti.Columns[col].SqlType == mysql.MYSQL_TYPE_NO_CACHE {
			log.Infof("Table %s pk has unsupported column types. Will not be cached.", ti.Name)
			return
		}
	}

	ti.CacheType = schema.CACHE_RW
	ti.Cache = NewRowCache(ti, cachePool)
}
Exemple #6
0
func NewServer(addr string, debugVarAddr string, conf *Conf) *Server {
	log.Infof("%+v", conf)
	s := &Server{
		evtbus:            make(chan interface{}, 100),
		top:               topo.NewTopo(conf.productName, conf.zkAddr, conf.f),
		net_timeout:       conf.net_timeout,
		counter:           stats.NewCounters("router"),
		lastActionSeq:     -1,
		startAt:           time.Now(),
		addr:              addr,
		concurrentLimiter: tokenlimiter.NewTokenLimiter(100),
		moper:             NewMultiOperator(addr),
		pools:             cachepool.NewCachePool(),
	}

	s.broker = conf.broker

	slot_num = conf.slot_num

	s.mu.Lock()
	s.pi.Id = conf.proxyId
	s.pi.State = models.PROXY_STATE_OFFLINE
	hname, err := os.Hostname()
	if err != nil {
		log.Fatal("get host name failed", err)
	}
	s.pi.Addr = hname + ":" + strings.Split(addr, ":")[1]
	s.pi.DebugVarAddr = hname + ":" + strings.Split(debugVarAddr, ":")[1]
	log.Infof("proxy_info:%+v", s.pi)
	s.mu.Unlock()
	//todo:fill more field

	stats.Publish("evtbus", stats.StringFunc(func() string {
		return strconv.Itoa(len(s.evtbus))
	}))
	stats.Publish("startAt", stats.StringFunc(func() string {
		return s.startAt.String()
	}))

	s.RegisterAndWait()

	_, err = s.top.WatchChildren(models.GetWatchActionPath(conf.productName), s.evtbus)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}

	s.FillSlots()

	//start event handler
	go s.handleTopoEvent()

	log.Info("proxy start ok")

	return s
}
Exemple #7
0
func LoadConf(configFile string) (*Conf, error) {
	srvConf := &Conf{}
	conf, err := utils.InitConfigFromFile(configFile)
	if err != nil {
		log.Fatal(err)
	}

	srvConf.productName, _ = conf.ReadString("product", "test")
	if len(srvConf.productName) == 0 {
		log.Fatalf("invalid config: product entry is missing in %s", configFile)
	}
	srvConf.zkAddr, _ = conf.ReadString("zk", "")
	if len(srvConf.zkAddr) == 0 {
		log.Fatalf("invalid config: need zk entry is missing in %s", configFile)
	}
	srvConf.zkAddr = strings.TrimSpace(srvConf.zkAddr)

	srvConf.proxyId, _ = conf.ReadString("proxy_id", "")
	if len(srvConf.proxyId) == 0 {
		log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile)
	}

	srvConf.netTimeout, _ = conf.ReadInt("net_timeout", 5)
	srvConf.proto, _ = conf.ReadString("proto", "tcp")
	srvConf.provider, _ = conf.ReadString("coordinator", "zookeeper")
	log.Infof("%+v", srvConf)

	return srvConf, nil
}
Exemple #8
0
func (c *Cluster) GetConn(key []byte, slave bool) (Conn, error) {
	id := c.topo.GetNodeID(key, slave)
	log.Infof("GetConn %s for key: %s", id, string(key))

	pool, ok := c.pools[id]
	if !ok {
		// opt一定存在要做个判断
		opt := c.opts[id]
		if opt == nil {
			n := c.topo.GetNode(id)
			if n == nil {
				return nil, fmt.Errorf("Cluster GetConn ID %s not exists ", id)
			}

			opt := &Options{
				Network:      "tcp",
				Addr:         fmt.Sprintf("%s:%d", n.host, n.port),
				Dialer:       RedisConnDialer(n.host, n.port, n.id, c.pc),
				DialTimeout:  c.pc.dialTimeout,
				ReadTimeout:  c.pc.readTimeout,
				WriteTimeout: c.pc.writeTimeout,
				PoolSize:     c.pc.poolSize,
				IdleTimeout:  c.pc.idleTimeout,
			}
			c.opts[id] = opt
		}
		pool = NewConnPool(opt)
		c.pools[id] = pool
	}

	return pool.Get()
}
Exemple #9
0
func handleCrashedServer(s *models.Server) error {
	switch s.Type {
	case models.SERVER_TYPE_MASTER:
		//get slave and do promote
		slave, err := getSlave(s)
		if err != nil {
			log.Warning(errors.ErrorStack(err))
			return err
		}

		log.Infof("try promote %+v", slave)
		err = callHttp(nil, genUrl(*apiServer, "/api/server_group/", slave.GroupId, "/promote"), "POST", slave)
		if err != nil {
			log.Errorf("do promote %v failed %v", slave, errors.ErrorStack(err))
			return err
		}
		refreshSlave(s) //刷新
	case models.SERVER_TYPE_SLAVE:
		log.Errorf("slave is down: %+v", s)
	case models.SERVER_TYPE_OFFLINE:
		//no need to handle it
	default:
		log.Fatalf("unkonwn type %+v", s)
	}

	return nil
}
Exemple #10
0
func NewSchemaInfo(rowCacheConf RowCacheConfig, dbAddr string, user, pwd, dbName string, overrides []SchemaOverride) *SchemaInfo {
	si := &SchemaInfo{
		queries:   cache.NewLRUCache(128 * 1024 * 1024),
		tables:    make(map[string]*TableInfo),
		cachePool: NewCachePool(dbName, rowCacheConf, 3*time.Second, 3*time.Second),
	}

	var err error
	si.connPool, err = mysql.Open(dbAddr, user, pwd, dbName)
	if err != nil { //todo: return error
		log.Fatal(err)
	}

	si.overrides = overrides
	si.connPool.SetMaxIdleConnNum(100)
	log.Infof("%+v", si.overrides)
	si.cachePool.Open()

	for _, or := range si.overrides {
		si.CreateOrUpdateTable(or.Name)
	}

	si.override()

	return si
}
Exemple #11
0
func (s *Session) Close() {
	log.Infof("close session %p", s)
	if !s.closed {
		s.closed = true
		s.Conn.Close()
	}
}
Exemple #12
0
func (si *SchemaInfo) override() {
	for _, override := range si.overrides {
		table, ok := si.tables[override.Name]
		if !ok {
			log.Warningf("Table not found for override: %v, %v", override, si.tables)
			continue
		}
		if override.PKColumns != nil {
			log.Infof("SetPK Table name %s, pk %v", override.Name, override.PKColumns)
			if err := table.SetPK(override.PKColumns); err != nil {
				log.Errorf("%s: %v", errors.ErrorStack(err), override)
				continue
			}
		}
		if si.cachePool.IsClosed() || override.Cache == nil {
			log.Infof("%+v", override)
			continue
		}

		switch override.Cache.Type {
		case "RW":
			table.CacheType = schema.CACHE_RW
			table.Cache = NewRowCache(table, si.cachePool)
		case "W":
			table.CacheType = schema.CACHE_W
			if len(override.Cache.Table) == 0 {
				log.Warningf("Incomplete cache specs: %v", override)
				continue
			}

			totable, ok := si.tables[override.Cache.Table]
			if !ok {
				log.Warningf("Table not found: %v", override)
				continue
			}

			if totable.Cache == nil {
				log.Warningf("Table has no cache: %v", override)
				continue
			}

			table.Cache = totable.Cache
		default:
			log.Warningf("Ignoring cache override: %+v", override)
		}
	}
}
Exemple #13
0
// TODO Lius: main event handler waiting for prepared struct (already do read)
func (s *Server) handleTopoEvent() {
	// do Listen in Server.Run by outer main invoke
	for {
		select {
		case r := <-s.reqCh: // Lius: send to backend
			if s.slots[r.slotIdx].slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE {
				s.bufferedReq.PushBack(r)
				continue
			}

			// Lius: buffed request because of migrating keys or other strategy
			for e := s.bufferedReq.Front(); e != nil; {
				next := e.Next()
				s.dispatch(e.Value.(*PipelineRequest))
				s.bufferedReq.Remove(e)
				e = next
			}

			// Lius: send current request
			s.dispatch(r)
		case e := <-s.evtbus:
			switch e.(type) {
			case *killEvent:
				s.handleMarkOffline()
				e.(*killEvent).done <- nil
			default:
				evtPath := GetEventPath(e)
				log.Infof("got event %s, %v, lastActionSeq %d", s.pi.Id, e, s.lastActionSeq)
				if strings.Index(evtPath, models.GetActionResponsePath(s.conf.productName)) == 0 {
					seq, err := strconv.Atoi(path.Base(evtPath))
					if err != nil {
						log.Warning(err)
					} else {
						if seq < s.lastActionSeq {
							log.Info("ignore", seq)
							continue
						}
					}

				}

				log.Infof("got event %s, %v, lastActionSeq %d", s.pi.Id, e, s.lastActionSeq)
				s.processAction(e)
			}
		}
	}
}
Exemple #14
0
func (s *Server) getActionObject(seq int, target interface{}) {
	act := &models.Action{Target: target}
	log.Infof("%+v", act)
	err := s.top.GetActionWithSeqObject(int64(seq), act)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}
}
Exemple #15
0
func main() {
	autoflags.Define(&config)
	flag.Parse()
	log.SetLevelByString(config.LogLevel)
	// to avoid pprof being optimized by gofmt
	log.Debug(pprof.Handler("profile"))
	if len(config.LogFile) != 0 {
		log.SetOutputByName(config.LogFile)
		log.SetRotateByDay()
	}
	if config.LogEveryN <= 0 {
		proxy.LogEveryN = 1
	} else {
		proxy.LogEveryN = config.LogEveryN
	}
	log.Infof("%#v", config)
	sigChan := make(chan os.Signal)
	signal.Notify(sigChan, os.Interrupt, os.Kill)

	log.Infof("pid %d", os.Getpid())
	if len(config.DebugAddr) != 0 {
		http.HandleFunc("/setloglevel", handleSetLogLevel)
		go func() {
			log.Fatal(http.ListenAndServe(config.DebugAddr, nil))
		}()
		log.Infof("debug service listens on %s", config.DebugAddr)
	}

	// shuffle startup nodes
	startupNodes := strings.Split(config.StartupNodes, ",")
	indexes := rand.Perm(len(startupNodes))
	for i, startupNode := range startupNodes {
		startupNodes[i] = startupNodes[indexes[i]]
		startupNodes[indexes[i]] = startupNode
	}
	connPool := proxy.NewConnPool(config.BackendIdleConnections, config.ConnectTimeout, config.ReadPrefer != proxy.READ_PREFER_MASTER)
	dispatcher := proxy.NewDispatcher(startupNodes, config.SlotsReloadInterval, connPool, config.ReadPrefer)
	if err := dispatcher.InitSlotTable(); err != nil {
		log.Fatal(err)
	}
	proxy := proxy.NewProxy(config.Addr, dispatcher, connPool)
	go proxy.Run()
	sig := <-sigChan
	log.Infof("terminated by %#v", sig)
	proxy.Exit()
}
Exemple #16
0
func (s *Server) handleTopoEvent() {
	for {
		select {
		case e := <-s.evtbus:
			log.Infof("got event %s, %v", s.pi.Id, e)
			s.processAction(e)
		}
	}
}
Exemple #17
0
func (s *Server) handleConn(c net.Conn) {
	log.Info("new connection", c.RemoteAddr())

	s.counter.Add("connections", 1)
	client := &session{
		Conn:        c,
		r:           bufio.NewReaderSize(c, 32*1024),
		w:           bufio.NewWriterSize(c, 32*1024),
		CreateAt:    time.Now(),
		backQ:       make(chan *PipelineResponse, 1000),
		closeSignal: &sync.WaitGroup{},
	}
	client.closeSignal.Add(1)

	// Lius: waiting for writing returned data(from backend redis) to user-client
	go client.WritingLoop()

	var err error
	defer func() {
		client.closeSignal.Wait() //waiting for writer goroutine

		if err != nil { //todo: fix this ugly error check
			if GetOriginError(err.(*errors.Err)).Error() != io.EOF.Error() {
				log.Warningf("close connection %v, %v", client, errors.ErrorStack(err))
			} else {
				log.Infof("close connection  %v", client)
			}
		} else {
			log.Infof("close connection %v", client)
		}

		s.counter.Add("connections", -1)
	}()

	for {
		err = s.redisTunnel(client)
		if err != nil {
			close(client.backQ)
			return
		}
		client.Ops++
	}
}
Exemple #18
0
func (c *Conn) getPlanAndTableInfo(stmt sqlparser.Statement) (*planbuilder.ExecPlan, *tabletserver.TableInfo, error) {
	plan, err := planbuilder.GetStmtExecPlan(stmt, c.getTableSchema, c.alloc)
	if err != nil {
		return nil, nil, errors.Trace(err)
	}

	log.Infof("%+v", plan)

	ti := c.getTableInfo(plan.TableName)

	return plan, ti, nil
}
Exemple #19
0
func (c *Conn) handleSelect(stmt *sqlparser.Select, sql string, args []interface{}) error {
	// handle cache
	plan, ti, err := c.getPlanAndTableInfo(stmt)
	if err != nil {
		return errors.Trace(err)
	}

	log.Debugf("handleSelect %s, %+v", sql, plan.PKValues)

	c.server.IncCounter(plan.PlanId.String())

	if ti != nil && len(plan.PKValues) > 0 && ti.CacheType != schema.CACHE_NONE {
		pks := pkValuesToStrings(ti.PKColumns, plan.PKValues)
		items := ti.Cache.Get(pks, ti.Columns)
		count := 0
		for _, item := range items {
			if item.Row != nil {
				count++
			}
		}

		if count == len(pks) { //all cache hint
			c.server.IncCounter("hint")
			log.Info("hit cache!", sql, pks)
			return c.writeCacheResults(plan, ti, pks, items)
		}

		c.server.IncCounter("miss")

		if plan.PlanId == planbuilder.PLAN_PK_IN && len(pks) == 1 {
			log.Infof("%s, %+v, %+v", sql, plan, stmt)
			return c.fillCacheAndReturnResults(plan, ti, pks)
		}
	}

	bindVars := makeBindVars(args)
	conns, err := c.getShardConns(true, stmt, bindVars)
	if err != nil {
		return errors.Trace(err)
	} else if len(conns) == 0 { //todo:handle error
		r := c.newEmptyResultset(stmt)
		return c.writeResultset(c.status, r)
	}

	var rs []*mysql.Result
	rs, err = c.executeInShard(conns, sql, args)
	c.closeShardConns(conns)
	if err == nil {
		err = c.mergeSelectResult(rs, stmt)
	}

	return errors.Trace(err)
}
Exemple #20
0
// must check crontab  valid  first
func (cj *CronJob) IsValid() bool {
	log.Info("check crontab if avalid :", cj.Id, cj.Name)
	if _, err := cronexpr.Parse(cj.Schedule); err != nil {
		log.Warning("cron job parse crontab format failed: ", err)
		return false
	}
	if cj.Runner == "root" {
		log.Warning("cron job must run under non-root user, current is: ", cj.Runner)
		return false
	}

	now := time.Now().Unix()

	if cj.StartAt == 0 && cj.EndAt == 0 {
		return !cj.Disabled
	}

	if cj.StartAt == 1 && cj.EndAt == 2 {
		// delete job by user
		return false
	}

	if cj.StartAt > now && (cj.EndAt > cj.StartAt || cj.EndAt == 0) {
		log.Infof("crontab %d %s unstart, start: %d, end: %d", cj.Id, cj.Name, cj.StartAt, cj.EndAt)
		return false
	}

	if cj.StartAt < cj.EndAt && cj.EndAt < now && !cj.Disabled {
		log.Infof("crontab %d %s exprie, start: %d, end: %d", cj.Id, cj.Name, cj.StartAt, cj.EndAt)
		return false
	}

	if cj.StartAt < now && (cj.EndAt > now || cj.EndAt == 0) {
		log.Infof("crontab %d %s start, start: %d, end: %d", cj.Id, cj.Name, cj.StartAt, cj.EndAt)
		return !cj.Disabled
	}

	return !cj.Disabled
}
Exemple #21
0
// resolve paths like:
// /zk/nyc/vt/tablets/*/action
// /zk/global/vt/keyspaces/*/shards/*/action
// /zk/*/vt/tablets/*/action
// into real existing paths
//
// If you send paths that don't contain any wildcard and
// don't exist, this function will return an empty array.
func ResolveWildcards(zconn zookeeper.Conn, zkPaths []string) ([]string, error) {
	// check all the paths start with /zk/ before doing anything
	// time consuming
	// relax this in case we are not talking to a metaconn and
	// just want to talk to a specified instance.
	// for _, zkPath := range zkPaths {
	// 	if _, err := ZkCellFromZkPath(zkPath); err != nil {
	// 		return nil, err
	// 	}
	// }

	results := make([][]string, len(zkPaths))
	wg := &sync.WaitGroup{}
	mu := &sync.Mutex{}
	var firstError error

	for i, zkPath := range zkPaths {
		wg.Add(1)
		parts := strings.Split(zkPath, "/")
		go func(i int) {
			defer wg.Done()
			subResult, err := resolveRecursive(zconn, parts, true)
			if err != nil {
				mu.Lock()
				if firstError != nil {
					log.Infof("Multiple error: %v", err)
				} else {
					firstError = err
				}
				mu.Unlock()
			} else {
				results[i] = subResult
			}
		}(i)
	}

	wg.Wait()
	if firstError != nil {
		return nil, firstError
	}

	result := make([]string, 0, 32)
	for i := 0; i < len(zkPaths); i++ {
		subResult := results[i]
		if subResult != nil {
			result = append(result, subResult...)
		}
	}

	return result, nil
}
Exemple #22
0
func (s *Server) Run() {
	log.Infof("listening %s on %s", s.conf.proto, s.addr)
	listener, err := net.Listen(s.conf.proto, s.addr)
	if err != nil {
		log.Fatal(err)
	}

	for {
		conn, err := listener.Accept()
		if err != nil {
			log.Warning(errors.ErrorStack(err))
			continue
		}
		go s.handleConn(conn)
	}
}
Exemple #23
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	flag.Parse()

	if len(*configFile) == 0 {
		log.Error("must use a config file")
		return
	}

	cfg, err := config.ParseConfigFile(*configFile)
	if err != nil {
		log.Error(err.Error())
		return
	}

	log.SetLevelByString(cfg.LogLevel)

	log.CrashLog("./cm-proxy.dump")

	var svr *proxy.Server
	svr, err = proxy.NewServer(*configFile)
	if err != nil {
		log.Error(err.Error())
		return
	}

	sc := make(chan os.Signal, 1)
	signal.Notify(sc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)

	go func() {
		sig := <-sc
		log.Infof("Got signal [%d] to exit.", sig)
		svr.Close()
		os.Exit(0)
	}()

	go svr.Run()

	http.HandleFunc("/api/reload", svr.HandleReload)
	//todo: using configuration
	http.ListenAndServe(":8888", nil)
}
Exemple #24
0
func (sm *SessMana) CheckIdleLoop() {
	ticker := time.NewTicker(time.Minute)
	defer ticker.Stop()
	for {
		select {
		case <-ticker.C:
			for id, s := range sm.pool {
				if sm.idle > 0 && time.Since(s.lastUsed) > sm.idle {
					sm.l.Lock()
					delete(sm.pool, id)
					sm.l.Unlock()
					log.Infof("client %s idle timeout quit", id)
					s.Close()
				}
			}
		}
	}
}
Exemple #25
0
func (top *Topology) doWatch(evtch <-chan topo.Event, evtbus chan interface{}) {
	e := <-evtch
	log.Infof("topo event %+v", e)
	if e.State == topo.StateExpired {
		log.Fatalf("session expired: %+v", e)
	}

	switch e.Type {
	//case topo.EventNodeCreated:
	//case topo.EventNodeDataChanged:
	case topo.EventNodeChildrenChanged: //only care children changed
		//todo:get changed node and decode event
	default:
		log.Warningf("%+v", e)
	}

	evtbus <- e
}
Exemple #26
0
// remove unused task runner
func (d *Dispatcher) handleSlotInfoChanged(slotInfos []*SlotInfo) {
	newServers := make(map[string]bool)
	for _, si := range slotInfos {
		d.slotTable.SetSlotInfo(si)
		newServers[si.write] = true
		for _, read := range si.read {
			newServers[read] = true
		}
	}

	for server, tr := range d.taskRunners {
		if _, ok := newServers[server]; !ok {
			log.Infof("exit unused task runner %s", server)
			tr.Exit()
			delete(d.taskRunners, server)
		}
	}
}
Exemple #27
0
func (cp *CachePool) Open() {
	cp.mu.Lock()
	defer cp.mu.Unlock()
	if cp.pool != nil {
		panic("rowcache is already open")
	}
	if cp.rowCacheConfig.Binary == "" {
		panic("rowcache binary not specified")
	}
	cp.startMemcache()
	log.Infof("rowcache is enabled")
	f := func() (pools.Resource, error) {
		return memcache.Connect(cp.port, 10*time.Second)
	}
	cp.pool = pools.NewResourcePool(f, cp.capacity, cp.capacity, cp.idleTimeout)
	if cp.memcacheStats != nil {
		cp.memcacheStats.Open()
	}
}
Exemple #28
0
func NewServer(configFile string) (*Server, error) {
	s := makeServer(configFile)
	err := s.loadSchemaInfo()
	if err != nil {
		log.Fatal(err)
	}

	netProto := "tcp"
	if strings.Contains(netProto, "/") {
		netProto = "unix"
	}

	s.listener, err = net.Listen(netProto, s.addr)
	if err != nil {
		return nil, errors.Trace(err)
	}

	log.Infof("Server run MySql Protocol Listen(%s) at [%s]", netProto, s.addr)
	return s, nil
}
Exemple #29
0
func (s *Server) fillSlot(i int, force bool) {
	if !validSlot(i) {
		return
	}

	if !force && s.slots[i] != nil { //check
		log.Fatalf("slot %d already filled, slot: %+v", i, s.slots[i])
		return
	}

	s.clearSlot(i)

	slotInfo, groupInfo, err := s.top.GetSlotByIndex(i)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}

	slot := &Slot{
		slotInfo:  slotInfo,
		dst:       group.NewGroup(*groupInfo),
		groupInfo: groupInfo,
	}

	log.Infof("fill slot %d, force %v, %+v", i, force, slot.dst)

	s.pools.AddPool(slot.dst.Master())

	if slot.slotInfo.State.Status == models.SLOT_STATUS_MIGRATE {
		//get migrate src group and fill it
		from, err := s.top.GetGroup(slot.slotInfo.State.MigrateStatus.From)
		if err != nil { //todo: retry ?
			log.Fatal(err)
		}
		slot.migrateFrom = group.NewGroup(*from)
		s.pools.AddPool(slot.migrateFrom.Master())
	}

	s.slots[i] = slot
	s.counter.Add("FillSlot", 1)
}
Exemple #30
0
func (s *Server) onConn(c net.Conn) {
	conn := s.newConn(c)
	if err := conn.Handshake(); err != nil {
		log.Errorf("handshake error %s", errors.ErrorStack(err))
		c.Close()
		return
	}

	const key = "connections"

	s.IncCounter(key)
	defer func() {
		s.DecCounter(key)
		log.Infof("close %s", conn)
	}()

	s.rwlock.Lock()
	s.clients[conn.connectionId] = conn
	s.rwlock.Unlock()

	conn.Run()
}