// NewMySQLStorage initialize mysql pool and consistency hash ring. func NewMySQLStorage() *MySQLStorage { dbPool := make(map[string]*sql.DB) ring := ketama.NewRing(ketamaBase) for n, source := range Conf.MySQLSource { nw := strings.Split(n, mysqlSourceSpliter) if len(nw) != 2 { err := errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err := strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } db, err := sql.Open("mysql", source) if err != nil { log.Error("sql.Open(\"mysql\", %s) failed (%v)", source, err) panic(err) } dbPool[nw[0]] = db ring.AddNode(nw[0], w) } ring.Bake() s := &MySQLStorage{pool: dbPool, ring: ring} go s.clean() return s }
// Migrate migrate portion of connections which don't belong to this comet. func (l *ChannelList) Migrate(nw map[string]int) (err error) { migrate := false // check new/update node for k, v := range nw { weight, ok := nodeWeightMap[k] // not found or weight change if !ok || weight != v { migrate = true break } } // check del node if !migrate { for k, _ := range nodeWeightMap { // node deleted if _, ok := nw[k]; !ok { migrate = true break } } } if !migrate { return } // init ketama ring := ketama.NewRing(ketama.Base) for node, weight := range nw { ring.AddNode(node, weight) } ring.Bake() // atomic update nodeWeightMap = nw CometRing = ring // get all the channel lock channels := []Channel{} for i, c := range l.Channels { c.Lock() for k, v := range c.Data { hn := ring.Hash(k) if hn != Conf.ZookeeperCometNode { channels = append(channels, v) delete(c.Data, k) log.Debug("migrate delete channel key \"%s\"", k) } } c.Unlock() log.Debug("migrate channel bucket:%d finished", i) } // close all the migrate channels log.Info("close all the migrate channels") for _, channel := range channels { if err := channel.Close(); err != nil { log.Error("channel.Close() error(%v)", err) continue } } log.Info("close all the migrate channels finished") return }
// NewRedis initialize the redis pool and consistency hash ring. func NewRedisStorage() *RedisStorage { redisPool := map[string]*redis.Pool{} ring := ketama.NewRing(ketamaBase) for n, addr := range Conf.RedisSource { nw := strings.Split(n, ":") if len(nw) != 2 { err := errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err := strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } // get protocol and addr pw := strings.Split(addr, redisProtocolSpliter) if len(pw) != 2 { log.Error("strings.Split(\"%s\", \"%s\") failed (%v)", addr, redisProtocolSpliter, err) panic(fmt.Sprintf("config redis.source node:\"%s\" format error", addr)) } tmpProto := pw[0] tmpAddr := pw[1] // WARN: closures use redisPool[nw[0]] = &redis.Pool{ MaxIdle: Conf.RedisMaxIdle, MaxActive: Conf.RedisMaxActive, IdleTimeout: Conf.RedisIdleTimeout, Dial: func() (redis.Conn, error) { conn, err := redis.Dial(tmpProto, tmpAddr) if err != nil { log.Error("redis.Dial(\"%s\", \"%s\") error(%v)", tmpProto, tmpAddr, err) return nil, err } return conn, err }, } // add node to ketama hash ring.AddNode(nw[0], w) } ring.Bake() s := &RedisStorage{pool: redisPool, ring: ring, delCH: make(chan *RedisDelMessage, 10240)} go s.clean() return s }
// handleCometNodeEvent add and remove CometNodeInfo, copy the src map to a new map then replace the variable. func handleCometNodeEvent(conn *zk.Conn, migrateLockPath, fpath string, retry, ping time.Duration, ch chan *CometNodeEvent) { for { ev := <-ch var ( update = false znode = path.Join(fpath, ev.Key) ) // copy map from src tmpMap := make(map[string]*CometNodeInfo, len(cometNodeInfoMap)) for k, v := range cometNodeInfoMap { tmpMap[k] = v } // handle event if ev.Event == eventNodeAdd { log.Info("add node: \"%s\"", ev.Key) tmpMap[ev.Key] = &CometNodeInfo{Weight: 1} go watchCometNode(conn, ev.Key, fpath, retry, ping, ch) } else if ev.Event == eventNodeDel { log.Info("del node: \"%s\"", ev.Key) delete(tmpMap, ev.Key) } else if ev.Event == eventNodeUpdate { log.Info("update node: \"%s\"", ev.Key) // when new node added to watchCometNode then trigger node update tmpMap[ev.Key] = ev.Value update = true } else { log.Error("unknown node event: %d", ev.Event) panic("unknown node event") } // if exist old node info, destroy // if node add this may not happan // if node del this will clean the resource // if node update, after reuse rpc connection, this will clean the resource if info, ok := cometNodeInfoMap[ev.Key]; ok { if info != nil && info.Rpc != nil { info.Rpc.Destroy() } } // update comet hash, cause node has changed tempRing := ketama.NewRing(ketama.Base) nodeWeightMap := map[string]int{} for k, v := range tmpMap { log.Debug("AddNode node:%s weight:%d", k, v.Weight) tempRing.AddNode(k, v.Weight) nodeWeightMap[k] = v.Weight } tempRing.Bake() // use the tmpMap atomic replace the global cometNodeInfoMap cometNodeInfoMap = tmpMap cometRing = tempRing // migrate if ev.Event != eventNodeAdd { if err := notifyMigrate(conn, migrateLockPath, znode, ev.Key, update, nodeWeightMap); err != nil { // if err == zk.ErrNodeExists meaning anyone is going through. // we hopefully that only one web node notify comet migrate. // also it was judged in Comet whether it needs migrate or not. if err == zk.ErrNodeExists { log.Info("ignore notify migrate") continue } else { log.Error("notifyMigrate(conn, \"%v\") error(%v)", nodeWeightMap, err) continue } } } log.Debug("cometNodeInfoMap len: %d", len(cometNodeInfoMap)) } }