func main() { flag.Parse() // init config if err := initConfig(); err != nil { log.Error("init config error(%v)", err) return } // init log log.LoadConfiguration(conf.Log) defer log.Close() // init zk zkConn = zkDial() if zkConn == nil { log.Error("zk dial error") return } go watchPath() c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT, syscall.SIGSTOP) for { s := <-c log.Info("twemproxy agent get a signal %s", s.String()) switch s { case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT: return case syscall.SIGHUP: default: return } } }
// unmarshal parses the JSON-encoded data and stores the result in the value pointed to by *nodeInfo. func unmarshal(bs []byte) *nodeInfo { ni := &nodeInfo{} // TODO compress if err := json.Unmarshal(bs, ni); err != nil { log.Error("json.Unmarshal(%s) error(%v)", string(bs), err) return nil } return ni }
// AtomId atomic add id. func (c *Client) AtomId() (id int64, err error) { client, err := c.client() if err != nil { return } if err = client.Call(RPCAtomId, c.workerId, &id); err != nil { log.Error("rpc.Call(\"%s\", %d, &id) error(%v)", RPCAtomId, c.workerId, err) } return }
// Ids generate a snowflake id. func (c *Client) Ids(num int) (ids []int64, err error) { client, err := c.client() if err != nil { return } if err = client.Call(RPCNextIds, &myrpc.NextIdsArgs{WorkerId: c.workerId, Num: num}, &ids); err != nil { log.Error("rpc.Call(\"%s\", %d, &id) error(%v)", RPCNextIds, c.workerId, err) } return }
// pingAndRetry ping the rpc connect and re connect when has an error. func (c *Client) pingAndRetry(stop <-chan bool, client *rpc.Client, addr string) { defer func() { if err := client.Close(); err != nil { log.Error("client.Close() error(%v)", err) } }() var ( failed bool status int err error tmp *rpc.Client ) for { select { case <-stop: log.Info("addr: \"%s\" pingAndRetry goroutine exit", addr) return default: } if !failed { if err = client.Call(RPCPing, 0, &status); err != nil { log.Error("client.Call(%s) error(%v)", RPCPing, err) failed = true continue } else { failed = false time.Sleep(rpcClientPingSleep) continue } } if tmp, err = rpc.Dial("tcp", addr); err != nil { log.Error("rpc.Dial(tcp, %s) error(%v)", addr, err) time.Sleep(rpcClientRetrySleep) continue } client = tmp failed = false log.Info("client reconnect %s ok", addr) } }
// zk get a zookeeper conn. func zkDial() *zk.Conn { conn, session, err := zk.Connect(conf.ZK, zkTimeout) if err != nil { log.Error("zk.Connect(\"%v\", %d) error(%v)", conf.ZK, zkTimeout, err) return nil } go func() { for { event := <-session log.Info("zookeeper get a event: %s", event.State.String()) } }() return conn }
// closeRpc close rpc resource. func closeRpc(clients []*rpc.Client, stop chan bool) { // rpc for _, client := range clients { if client != nil { if err := client.Close(); err != nil { log.Error("client.Close() error(%v)", err) } } } // ping&retry goroutine if stop != nil { close(stop) } }
// watchPath watch path change. func watchPath() { for { bs, _, watch, err := zkConn.GetW(conf.ZKPath) if err != nil { log.Error("zkConn getw error(%v)", err) time.Sleep(zkErrorDelay) continue } ni := unmarshal(bs) if ni == nil { log.Error("unmarshal zk data error") time.Sleep(zkErrorDelay) continue } if err = dealTwemproxy(ni); err != nil { log.Error("deal twemproxy error(%v)", err) time.Sleep(zkErrorDelay) continue } // new zk event event := <-watch log.Error("zk node(\"%s\") changed %s", conf.ZKPath, event.Type.String()) } }
func dealTwemproxy(ni *nodeInfo) error { bs, err := ioutil.ReadFile(conf.TpYml) if err != nil { log.Error("ioutil readall error(%v)", err) return err } twemproxyConfig := map[string]*TwemproxyConfig{} if err = yaml.Unmarshal(bs, &twemproxyConfig); err != nil { log.Error("yaml unmarshal error(%v)", err) return err } tc, ok := twemproxyConfig[conf.TpServer] if !ok { log.Error("no exist twemproxy server name") return fmt.Errorf("no exist twemproxy server name") } for i, tcs := range tc.Servers { sdata := strings.Split(tcs, " ") if len(sdata) != 2 { continue } for _, nis := range ni.Servers { if sdata[1] == nis.Name { temp := fmt.Sprint("%s:%d", nis.Addr, nis.Weight) if sdata[0] == temp { continue } else { tc.Servers[i] = fmt.Sprintf("%s %s", temp, sdata[1]) } continue } } } bs, err = yaml.Marshal(twemproxyConfig) if err != nil { log.Error("yam marshal error(%v)", err) return err } f, err := os.OpenFile(conf.TpYml, os.O_WRONLY|os.O_TRUNC, 222) if err != nil { log.Error("os openfile error(%v)", err) return err } f.Write(bs) f.Close() cmd := exec.Command(conf.TpRestart) if err = cmd.Run(); err != nil { log.Error("restart command error(%v)", err) return err } return nil }
// Init init the gosnowflake client. func Init(zservers []string, zpath string, ztimeout time.Duration) (err error) { mutex.Lock() defer mutex.Unlock() if zkConn != nil { return } zkPath = zpath zkServers = zservers zkTimeout = ztimeout conn, session, err := zk.Connect(zkServers, zkTimeout) if err != nil { log.Error("zk.Connect(\"%v\", %d) error(%v)", zkServers, zkTimeout, err) return } zkConn = conn go func() { for { event := <-session log.Info("zk connect get a event: %s", event.Type.String()) } }() return }
// watchWorkerId watch the zk node change. func (c *Client) watchWorkerId(workerId int64, workerIdStr string) { workerIdPath := path.Join(zkPath, workerIdStr) log.Debug("workerIdPath: %s", workerIdPath) for { rpcs, _, watch, err := zkConn.ChildrenW(workerIdPath) if err != nil { log.Error("zkConn.ChildrenW(%s) error(%v)", workerIdPath, err) time.Sleep(zkNodeDelaySleep) continue } if len(rpcs) == 0 { log.Error("zkConn.ChildrenW(%s) no nodes", workerIdPath) time.Sleep(zkNodeDelaySleep) continue } // leader selection sort.Strings(rpcs) newLeader := rpcs[0] if c.leader == newLeader { log.Info("workerId: %s add a new standby gosnowflake node", workerIdStr) } else { log.Info("workerId: %s oldLeader: \"%s\", newLeader: \"%s\" not equals, continue leader selection", workerIdStr, c.leader, newLeader) // get new leader info workerNodePath := path.Join(zkPath, workerIdStr, newLeader) bs, _, err := zkConn.Get(workerNodePath) if err != nil { log.Error("zkConn.Get(%s) error(%v)", workerNodePath, err) time.Sleep(zkNodeDelaySleep) continue } peer := &Peer{} if err = json.Unmarshal(bs, peer); err != nil { log.Error("json.Unmarshal(%s, peer) error(%v)", string(bs), err) time.Sleep(zkNodeDelaySleep) continue } // init rpc tmpClients := make([]*rpc.Client, len(peer.RPC)) tmpStop := make(chan bool, 1) for i, addr := range peer.RPC { clt, err := rpc.Dial("tcp", addr) if err != nil { log.Error("rpc.Dial(tcp, \"%s\") error(%v)", addr, err) continue } tmpClients[i] = clt go c.pingAndRetry(tmpStop, clt, addr) } // old rpc clients oldClients := c.clients oldStop := c.stop // atomic replace variable c.leader = newLeader c.clients = tmpClients c.stop = tmpStop // if exist, free resource if oldClients != nil { closeRpc(oldClients, oldStop) } } // new zk event event := <-watch log.Error("zk node(\"%s\") changed %s", workerIdPath, event.Type.String()) } }