// NewMySQLStorage initialize mysql pool and consistency hash ring. func NewMySQLStorage() *MySQLStorage { var ( err error w int nw []string db *sql.DB ) dbPool := make(map[string]*sql.DB) ring := ketama.NewRing(ketamaBase) for n, source := range Conf.MySQLSource { nw = strings.Split(n, ":") if len(nw) != 2 { err = errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err = strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } db, err = sql.Open("mysql", source) if err != nil { log.Error("sql.Open(\"mysql\", %s) failed (%v)", source, err) panic(err) } dbPool[nw[0]] = db ring.AddNode(nw[0], w) } ring.Bake() s := &MySQLStorage{pool: dbPool, ring: ring} go s.clean() return s }
// NewMySQLStorage initialize mysql pool and consistency hash ring. func NewMySQLStorage() *MySQLStorage { dbPool := make(map[string]*sql.DB) ring := ketama.NewRing(Conf.MySQLKetamaBase) for n, source := range Conf.MySQLSource { // get node weight nw := strings.Split(n, mysqlNodeWeightSpliter) if len(nw) != 2 { glog.Errorf("strings.Split(\"%s\", \"%s\") length error", n, mysqlNodeWeightSpliter) panic(fmt.Sprintf("config mysql.source weight:\"%s\" format error", n)) } w, err := strconv.Atoi(nw[1]) if err != nil { glog.Errorf("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } // open mysql db db, err := sql.Open("mysql", source) if err != nil { glog.Errorf("sql.Open(\"mysql\", %s) failed (%v)", source, err) panic(err) } // add to mysql pool // add node to ketama hash dbPool[nw[0]] = db ring.AddNode(nw[0], w) } ring.Bake() s := &MySQLStorage{pool: dbPool, ring: ring} go s.clean() return s }
// NewRedis initialize the redis pool and consistency hash ring. func NewRedisStorage() *RedisStorage { redisPool := map[string]*redis.Pool{} ring := ketama.NewRing(ketamaBase) reg := regexp.MustCompile("(.+)@(.+)#(.+)|(.+)@(.+)") for n, addr := range Conf.RedisSource { nw := strings.Split(n, ":") if len(nw) != 2 { err := errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err := strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } // get protocol and addr pw := reg.FindStringSubmatch(addr) if len(pw) < 6 { log.Error("strings.regexp(\"%s\", \"%s\") failed (%v)", addr, pw) panic(fmt.Sprintf("config redis.source node:\"%s\" format error", addr)) } tmpProto := "" tmpAddr := "" if pw[1] != "" { tmpProto = pw[1] } else { tmpProto = pw[4] } if pw[2] != "" { tmpAddr = pw[2] } else { tmpAddr = pw[5] } // WARN: closures use redisPool[nw[0]] = &redis.Pool{ MaxIdle: Conf.RedisMaxIdle, MaxActive: Conf.RedisMaxActive, IdleTimeout: Conf.RedisIdleTimeout, Dial: func() (redis.Conn, error) { conn, err := redis.Dial(tmpProto, tmpAddr) if err != nil { log.Error("redis.Dial(\"%s\", \"%s\") error(%v)", tmpProto, tmpAddr, err) return nil, err } if pw[3] != "" { conn.Do("AUTH", pw[3]) } return conn, err }, } // add node to ketama hash ring.AddNode(nw[0], w) } ring.Bake() s := &RedisStorage{pool: redisPool, ring: ring, delCH: make(chan *RedisDelMessage, 10240)} go s.clean() return s }
func InitRouter() (err error) { var ( network, addr string ) routerRing = ketama.NewRing(ketama.Base) for serverId, addrs := range Conf.RouterRPCAddrs { // WARN r must every recycle changed for reconnect var ( r *rpc.Client routerQuit = make(chan struct{}, 1) ) if network, addr, err = inet.ParseNetwork(addrs); err != nil { log.Error("inet.ParseNetwork() error(%v)", err) return } r, err = rpc.Dial(network, addr) if err != nil { log.Error("rpc.Dial(\"%s\", \"%s\") error(%s)", network, addr, err) } go rpc.Reconnect(&r, routerQuit, network, addr) log.Debug("router rpc addr:%s connect", addr) routerServiceMap[serverId] = &r routerRing.AddNode(serverId, 1) } routerRing.Bake() return }
// Migrate migrate portion of connections which don't belong to this comet. func (l *ChannelList) Migrate(nw map[string]int) (err error) { migrate := false // check new/update node for k, v := range nw { weight, ok := nodeWeightMap[k] // not found or weight change if !ok || weight != v { migrate = true break } } // check del node if !migrate { for k, _ := range nodeWeightMap { // node deleted if _, ok := nw[k]; !ok { migrate = true break } } } if !migrate { return } // init ketama ring := ketama.NewRing(ketama.Base) for node, weight := range nw { ring.AddNode(node, weight) } ring.Bake() // atomic update nodeWeightMap = nw CometRing = ring // get all the channel lock channels := []Channel{} for i, c := range l.Channels { c.Lock() for k, v := range c.Data { hn := ring.Hash(k) if hn != Conf.ZookeeperCometNode { channels = append(channels, v) delete(c.Data, k) log.Debug("migrate delete channel key \"%s\"", k) } } c.Unlock() log.Debug("migrate channel bucket:%d finished", i) } // close all the migrate channels log.Info("close all the migrate channels") for _, channel := range channels { if err := channel.Close(); err != nil { log.Error("channel.Close() error(%v)", err) continue } } log.Info("close all the migrate channels finished") return }
// Publish expored a method for publishing a message for the channel func (c *CometRPC) Migrate(args *myrpc.CometMigrateArgs, ret *int) error { if len(args.Nodes) == 0 { return myrpc.ErrParam } // find current node exists in new nodes has := false for str, _ := range args.Nodes { if str == Conf.ZookeeperCometNode { has = true } } if !has { glog.Error("make sure your migrate nodes right, there is no %s in nodes, this will cause all the node hit miss", Conf.ZookeeperCometNode) return ErrMigrate } // init ketama ring := ketama.NewRing(args.Vnode) for k, v := range args.Nodes { ring.AddNode(k, v) } ring.Bake() channels := []Channel{} keys := []string{} // get all the channel lock for i, c := range UserChannel.Channels { c.Lock() for k, v := range c.Data { hn := ring.Hash(k) if hn != Conf.ZookeeperCometNode { channels = append(channels, v) keys = append(keys, k) glog.V(1).Infof("migrate key:\"%s\" hit node:\"%s\"", k, hn) } } for _, k := range keys { delete(c.Data, k) glog.Infof("migrate delete channel key \"%s\"", k) } c.Unlock() glog.Infof("migrate channel bucket:%d finished", i) } // close all the migrate channels glog.Info("close all the migrate channels") for _, channel := range channels { if err := channel.Close(); err != nil { glog.Errorf("channel.Close() error(%v)", err) continue } } glog.Info("close all the migrate channels finished") return nil }
// NewRandLB new a random load balancing object. func NewRandLB(clients map[string]*RPCClient, service string, retry, ping time.Duration, vnode int, check bool) (*RandLB, error) { ring := ketama.NewRing(vnode) for _, client := range clients { ring.AddNode(client.Addr, client.Weight) } ring.Bake() length := len(clients) r := &RandLB{Clients: clients, ring: ring, length: length} if check && length > 0 { glog.Info("rpc ping start") r.ping(service, retry, ping) } return r, nil }
// NewRedis initialize the redis pool and consistency hash ring. func NewRedisStorage() *RedisStorage { redisPool := map[string]*redis.Pool{} ring := ketama.NewRing(Conf.RedisKetamaBase) for n, addr := range Conf.RedisSource { // get node weight nw := strings.Split(n, redisNodeWeightSpliter) if len(nw) != 2 { glog.Errorf("strings.Split(\"%s\", \"%s\") length error", n, redisNodeWeightSpliter) panic(fmt.Sprintf("config redis.source weight:\"%s\" format error", n)) } w, err := strconv.Atoi(nw[1]) if err != nil { glog.Errorf("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } // get protocol and addr pw := strings.Split(addr, redisProtocolSpliter) if len(pw) != 2 { glog.Errorf("strings.Split(\"%s\", \"%s\") failed (%v)", addr, redisNodeWeightSpliter, err) panic(fmt.Sprintf("config redis.source node:\"%s\" format error", addr)) } tmpProto := pw[0] tmpAddr := pw[1] // init redis pool // WARN: closures use redisPool[nw[0]] = &redis.Pool{ MaxIdle: Conf.RedisMaxIdle, MaxActive: Conf.RedisMaxActive, IdleTimeout: Conf.RedisIdleTimeout, Dial: func() (redis.Conn, error) { conn, err := redis.Dial(tmpProto, tmpAddr) if err != nil { glog.Errorf("redis.Dial(\"%s\", \"%s\") error(%v)", tmpProto, tmpAddr, err) return nil, err } return conn, err }, } // add node to ketama hash ring.AddNode(nw[0], w) } ring.Bake() s := &RedisStorage{pool: redisPool, ring: ring, delCH: make(chan *RedisDelMessage, 10240)} go s.clean() return s }
func InitRouterRpc(addrs []string) (err error) { var r *protorpc.Client routerRing = ketama.NewRing(ketama.Base) for _, addr := range addrs { r, err = protorpc.Dial("tcp", addr) if err != nil { log.Error("protorpc.Dial(\"%s\") error(%s)", addr, err) return } var quit chan struct{} go protorpc.Reconnect(&r, quit, "tcp", addr) log.Debug("router protorpc addr:%s connect", addr) routerServiceMap[addr] = r routerRing.AddNode(addr, 1) } routerRing.Bake() return }
// handleCometNodeEvent add and remove CometNodeInfo, copy the src map to a new map then replace the variable. func handleCometNodeEvent(conn *zk.Conn, fpath string, retry, ping time.Duration, vnode int, ch chan *CometNodeEvent) { for { ev := <-ch // copy map from src tmpMap := make(map[string]*CometNodeInfo, len(cometNodeInfoMap)) for k, v := range cometNodeInfoMap { tmpMap[k] = v } // handle event if ev.Event == eventNodeAdd { glog.Infof("add node: \"%s\"", ev.Key) tmpMap[ev.Key] = nil go watchCometNode(conn, ev.Key, fpath, retry, ping, vnode, ch) } else if ev.Event == eventNodeDel { glog.Infof("del node: \"%s\"", ev.Key) delete(tmpMap, ev.Key) } else if ev.Event == eventNodeUpdate { glog.Infof("update node: \"%s\"", ev.Key) tmpMap[ev.Key] = ev.Value } else { glog.Errorf("unknown node event: %d", ev.Event) panic("unknown node event") } // if exist old node info, destroy if info, ok := cometNodeInfoMap[ev.Key]; ok { if info != nil { info.CometRPC.Destroy() } } // update comet hash, cause node has changed tempRing := ketama.NewRing(vnode) for k, v := range tmpMap { if v != nil { tempRing.AddNode(k, v.weight) } } tempRing.Bake() // use the tmpMap atomic replace the global cometNodeInfoMap cometNodeInfoMap = tmpMap cometRing = tempRing glog.V(1).Infof("cometNodeInfoMap len: %d", len(cometNodeInfoMap)) } }
// NewRedis initialize the redis pool and consistency hash ring. func NewRedisStorage() *RedisStorage { var ( err error w int nw []string ) redisPool := map[string]*redis.Pool{} ring := ketama.NewRing(Conf.RedisKetamaBase) for n, addr := range Conf.RedisSource { nw = strings.Split(n, ":") if len(nw) != 2 { err = errors.New("node config error, it's nodeN:W") glog.Errorf("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err = strconv.Atoi(nw[1]) if err != nil { glog.Errorf("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } tmp := addr // WARN: closures use redisPool[nw[0]] = &redis.Pool{ MaxIdle: Conf.RedisMaxIdle, MaxActive: Conf.RedisMaxActive, IdleTimeout: Conf.RedisIdleTimeout, Dial: func() (redis.Conn, error) { conn, err := redis.Dial("tcp", tmp) if err != nil { glog.Errorf("redis.Dial(\"tcp\", \"%s\") error(%v)", tmp, err) return nil, err } return conn, err }, } ring.AddNode(nw[0], w) } ring.Bake() s := &RedisStorage{pool: redisPool, ring: ring, delCH: make(chan *RedisDelMessage, 10240)} go s.clean() return s }
func InitRouter() (err error) { var ( r *rpc.Client i = 0 ) routerRing = ketama.NewRing(ketama.Base) for serverId, addr := range Conf.RouterRPCAddrs { r, err = rpc.Dial(Conf.RouterRPCNetworks[i], addr) if err != nil { log.Error("rpc.Dial(\"%s\", \"%s\") error(%s)", Conf.RouterRPCNetworks[i], addr, err) return } go rpc.Reconnect(&r, routerQuit, Conf.RouterRPCNetworks[i], addr) log.Debug("router rpc addr:%s connect", addr) routerServiceMap[serverId] = r routerRing.AddNode(serverId, 1) i++ } routerRing.Bake() return }
// handleCometNodeEvent add and remove CometNodeInfo, copy the src map to a new map then replace the variable. func handleCometNodeEvent(conn *zk.Conn, migrateLockPath, fpath string, retry, ping time.Duration, ch chan *CometNodeEvent) { for { ev := <-ch var ( update = false znode = path.Join(fpath, ev.Key) ) // copy map from src tmpMap := make(map[string]*CometNodeInfo, len(cometNodeInfoMap)) for k, v := range cometNodeInfoMap { tmpMap[k] = v } // handle event if ev.Event == eventNodeAdd { log.Info("add node: \"%s\"", ev.Key) tmpMap[ev.Key] = &CometNodeInfo{Weight: 1} go watchCometNode(conn, ev.Key, fpath, retry, ping, ch) } else if ev.Event == eventNodeDel { log.Info("del node: \"%s\"", ev.Key) delete(tmpMap, ev.Key) } else if ev.Event == eventNodeUpdate { log.Info("update node: \"%s\"", ev.Key) // when new node added to watchCometNode then trigger node update tmpMap[ev.Key] = ev.Value update = true } else { log.Error("unknown node event: %d", ev.Event) panic("unknown node event") } // if exist old node info, destroy // if node add this may not happan // if node del this will clean the resource // if node update, after reuse rpc connection, this will clean the resource if info, ok := cometNodeInfoMap[ev.Key]; ok { if info != nil && info.Rpc != nil { info.Rpc.Destroy() } } // update comet hash, cause node has changed tempRing := ketama.NewRing(ketama.Base) nodeWeightMap := map[string]int{} for k, v := range tmpMap { log.Debug("AddNode node:%s weight:%d", k, v.Weight) tempRing.AddNode(k, v.Weight) nodeWeightMap[k] = v.Weight } tempRing.Bake() // use the tmpMap atomic replace the global cometNodeInfoMap cometNodeInfoMap = tmpMap cometRing = tempRing // migrate if ev.Event != eventNodeAdd { if err := notifyMigrate(conn, migrateLockPath, znode, ev.Key, update, nodeWeightMap); err != nil { // if err == zk.ErrNodeExists meaning anyone is going through. // we hopefully that only one web node notify comet migrate. // also it was judged in Comet whether it needs migrate or not. if err == zk.ErrNodeExists { log.Info("ignore notify migrate") continue } else { log.Error("notifyMigrate(conn, \"%v\") error(%v)", nodeWeightMap, err) continue } } } log.Debug("cometNodeInfoMap len: %d", len(cometNodeInfoMap)) } }