func (s *Server) rewatchNodes() []string { nodes, err := s.topo.WatchChildren(models.GetWatchActionPath(s.topo.ProductName), s.evtbus) if err != nil { log.PanicErrorf(err, "watch children failed") } return nodes }
func (s *Server) processAction(e interface{}) { if strings.Index(getEventPath(e), models.GetProxyPath(s.topo.ProductName)) == 0 { info, err := s.topo.GetProxyInfo(s.info.Id) if err != nil { log.PanicErrorf(err, "get proxy info failed: %s", s.info.Id) } if info.State == models.PROXY_STATE_MARK_OFFLINE { s.handleMarkOffline() } return } //re-watch nodes, err := s.topo.WatchChildren(models.GetWatchActionPath(s.topo.ProductName), s.evtbus) if err != nil { log.PanicErrorf(err, "rewatch children failed") } seqs, err := models.ExtraSeqList(nodes) if err != nil { log.PanicErrorf(err, "get seq list failed") } if len(seqs) == 0 || !s.topo.IsChildrenChangedEvent(e) { return } //get last pos index := -1 for i, seq := range seqs { if s.lastActionSeq < seq { index = i break } } if index < 0 { return } actions := seqs[index:] for _, seq := range actions { exist, err := s.topo.Exist(path.Join(s.topo.GetActionResponsePath(seq), s.info.Id)) if err != nil { log.PanicErrorf(err, "get action failed") } if exist { continue } if s.checkAndDoTopoChange(seq) { s.responseAction(int64(seq)) } } s.lastActionSeq = seqs[len(seqs)-1] }
func (s *Server) processAction(e interface{}) { if strings.Index(GetEventPath(e), models.GetProxyPath(s.top.ProductName)) == 0 { //proxy event, should be order for me to suicide s.handleProxyCommand() return } //re-watch nodes, err := s.top.WatchChildren(models.GetWatchActionPath(s.top.ProductName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } seqs, err := models.ExtraSeqList(nodes) if err != nil { log.Fatal(errors.ErrorStack(err)) } if len(seqs) == 0 || !s.top.IsChildrenChangedEvent(e) { return } //get last pos index := -1 for i, seq := range seqs { if s.lastActionSeq < seq { index = i break } } if index < 0 { return } actions := seqs[index:] for _, seq := range actions { exist, err := s.top.Exist(path.Join(s.top.GetActionResponsePath(seq), s.pi.Id)) if err != nil { log.Fatal(errors.ErrorStack(err)) } if exist { continue } if s.checkAndDoTopoChange(seq) { s.responseAction(int64(seq)) } } s.lastActionSeq = seqs[len(seqs)-1] }
func NewServer(addr string, debugVarAddr string, conf *Conf) *Server { log.Infof("%+v", conf) s := &Server{ evtbus: make(chan interface{}, 100), top: topo.NewTopo(conf.productName, conf.zkAddr, conf.f), counter: stats.NewCounters("router"), lastActionSeq: -1, startAt: time.Now(), addr: addr, concurrentLimiter: utils.NewTokenLimiter(100), moper: NewMultiOperator("localhost:" + strings.Split(addr, ":")[1]), pools: cachepool.NewCachePool(), } s.mu.Lock() s.pi.Id = conf.proxyId s.pi.State = models.PROXY_STATE_OFFLINE hname, err := os.Hostname() if err != nil { log.Fatal("get host name failed", err) } s.pi.Addr = hname + ":" + strings.Split(addr, ":")[1] s.pi.DebugVarAddr = hname + ":" + strings.Split(debugVarAddr, ":")[1] log.Infof("proxy_info:%+v", s.pi) s.mu.Unlock() //todo:fill more field stats.Publish("evtbus", stats.StringFunc(func() string { return strconv.Itoa(len(s.evtbus)) })) stats.Publish("startAt", stats.StringFunc(func() string { return s.startAt.String() })) s.RegisterAndWait() _, err = s.top.WatchChildren(models.GetWatchActionPath(conf.productName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } s.FillSlots() //start event handler go s.handleTopoEvent() log.Info("proxy start ok") return s }
func init() { conn = zkhelper.NewConn() conf = &Config{ proxyId: "proxy_test", productName: "test", zkAddr: "localhost:2181", fact: func(string, int) (zkhelper.Conn, error) { return conn, nil }, proto: "tcp4", } //init action path prefix := models.GetWatchActionPath(conf.productName) err := models.CreateActionRootPath(conn, prefix) assert.MustNoError(err) //init slot err = models.InitSlotSet(conn, conf.productName, 1024) assert.MustNoError(err) //init server group g1 := models.NewServerGroup(conf.productName, 1) g1.Create(conn) g2 := models.NewServerGroup(conf.productName, 2) g2.Create(conn) redis1, _ = miniredis.Run() redis2, _ = miniredis.Run() s1 := models.NewServer(models.SERVER_TYPE_MASTER, redis1.Addr()) s2 := models.NewServer(models.SERVER_TYPE_MASTER, redis2.Addr()) g1.AddServer(conn, s1, "") g2.AddServer(conn, s2, "") //set slot range err = models.SetSlotRange(conn, conf.productName, 0, 511, 1, models.SLOT_STATUS_ONLINE) assert.MustNoError(err) err = models.SetSlotRange(conn, conf.productName, 512, 1023, 2, models.SLOT_STATUS_ONLINE) assert.MustNoError(err) s = New(":19000", ":11000", conf) err = models.SetProxyStatus(conn, conf.productName, conf.proxyId, models.PROXY_STATE_ONLINE) assert.MustNoError(err) }
func (s *Server) rewatchNodes() []string { var nodes []string var err error for { nodes, err = s.topo.WatchChildren(models.GetWatchActionPath(s.topo.ProductName), s.evtbus) if err != nil { log.ErrorErrorf(err, "watch children failed") if s.topo.IsFatalErr(err) { break } else { time.Sleep(5 * time.Second) } } else { break } } return nodes }
func InitEnv() { go once.Do(func() { conn = zkhelper.NewConn() conf = &Config{ proxyId: "proxy_test", productName: "test", zkAddr: "localhost:2181", fact: func(string) (zkhelper.Conn, error) { return conn, nil }, proto: "tcp4", } //init action path prefix := models.GetWatchActionPath(conf.productName) err := models.CreateActionRootPath(conn, prefix) assert.MustNoError(err) //init slot err = models.InitSlotSet(conn, conf.productName, 1024) assert.MustNoError(err) //init server group g1 := models.NewServerGroup(conf.productName, 1) g1.Create(conn) g2 := models.NewServerGroup(conf.productName, 2) g2.Create(conn) redis1, _ = miniredis.Run() redis2, _ = miniredis.Run() s1 := models.NewServer(models.SERVER_TYPE_MASTER, redis1.Addr()) s2 := models.NewServer(models.SERVER_TYPE_MASTER, redis2.Addr()) g1.AddServer(conn, s1, "") g2.AddServer(conn, s2, "") //set slot range err = models.SetSlotRange(conn, conf.productName, 0, 511, 1, models.SLOT_STATUS_ONLINE) assert.MustNoError(err) err = models.SetSlotRange(conn, conf.productName, 512, 1023, 2, models.SLOT_STATUS_ONLINE) assert.MustNoError(err) go func() { //set proxy online time.Sleep(3 * time.Second) err := models.SetProxyStatus(conn, conf.productName, conf.proxyId, models.PROXY_STATE_ONLINE) assert.MustNoError(err) time.Sleep(2 * time.Second) proxyMutex.Lock() defer proxyMutex.Unlock() assert.Must(s.info.State == models.PROXY_STATE_ONLINE) }() proxyMutex.Lock() s, err = NewServer(":19000", ":11000", conf) assert.MustNoError(err) proxyMutex.Unlock() s.Serve() }) waitonce.Do(func() { time.Sleep(10 * time.Second) }) }
func NewServer(addr string, debugVarAddr string, conf *Conf) *Server { log.Infof("start with configuration: %+v", conf) s := &Server{ conf: conf, evtbus: make(chan interface{}, 1000), top: topo.NewTopo(conf.productName, conf.zkAddr, conf.f, conf.provider), counter: stats.NewCounters("router"), lastActionSeq: -1, startAt: time.Now(), addr: addr, moper: NewMultiOperator(addr), reqCh: make(chan *PipelineRequest, 1000), pools: cachepool.NewCachePool(), pipeConns: make(map[string]*taskRunner), bufferedReq: list.New(), } s.pi.Id = conf.proxyId s.pi.State = models.PROXY_STATE_OFFLINE host := strings.Split(addr, ":")[0] debugHost := strings.Split(debugVarAddr, ":")[0] hname, err := os.Hostname() if err != nil { log.Fatal("get host name failed", err) } if host == "0.0.0.0" || strings.HasPrefix(host, "127.0.0.") { host = hname } if debugHost == "0.0.0.0" || strings.HasPrefix(debugHost, "127.0.0.") { debugHost = hname } s.pi.Addr = host + ":" + strings.Split(addr, ":")[1] s.pi.DebugVarAddr = debugHost + ":" + strings.Split(debugVarAddr, ":")[1] s.pi.Pid = os.Getpid() s.pi.StartAt = time.Now().String() log.Infof("proxy_info:%+v", s.pi) stats.Publish("evtbus", stats.StringFunc(func() string { return strconv.Itoa(len(s.evtbus)) })) stats.Publish("startAt", stats.StringFunc(func() string { return s.startAt.String() })) s.RegisterAndWait() _, err = s.top.WatchChildren(models.GetWatchActionPath(conf.productName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } s.FillSlots() //start event handler go s.handleTopoEvent() log.Info("proxy start ok") return s }
func InitEnv() { go once.Do(func() { conn = zkhelper.NewConn() conf = &Conf{ proxyId: "proxy_test", productName: "test", zkAddr: "localhost:2181", netTimeout: 5, f: func(string) (zkhelper.Conn, error) { return conn, nil }, proto: "tcp4", } //init action path prefix := models.GetWatchActionPath(conf.productName) err := models.CreateActionRootPath(conn, prefix) if err != nil { log.Fatal(err) } //init slot err = models.InitSlotSet(conn, conf.productName, 1024) if err != nil { log.Fatal(err) } //init server group g1 := models.NewServerGroup(conf.productName, 1) g1.Create(conn) g2 := models.NewServerGroup(conf.productName, 2) g2.Create(conn) redis1, _ = miniredis.Run() redis2, _ = miniredis.Run() s1 := models.NewServer(models.SERVER_TYPE_MASTER, redis1.Addr()) s2 := models.NewServer(models.SERVER_TYPE_MASTER, redis2.Addr()) g1.AddServer(conn, s1) g2.AddServer(conn, s2) //set slot range err = models.SetSlotRange(conn, conf.productName, 0, 511, 1, models.SLOT_STATUS_ONLINE) if err != nil { log.Fatal(err) } err = models.SetSlotRange(conn, conf.productName, 512, 1023, 2, models.SLOT_STATUS_ONLINE) if err != nil { log.Fatal(err) } go func() { //set proxy online time.Sleep(3 * time.Second) err := models.SetProxyStatus(conn, conf.productName, conf.proxyId, models.PROXY_STATE_ONLINE) if err != nil { log.Fatal(errors.ErrorStack(err)) } time.Sleep(2 * time.Second) proxyMutex.Lock() defer proxyMutex.Unlock() pi := s.getProxyInfo() if pi.State != models.PROXY_STATE_ONLINE { log.Fatalf("should be online, we got %s", pi.State) } }() proxyMutex.Lock() s = NewServer(":19000", ":11000", conf, ) proxyMutex.Unlock() s.Run() }) waitonce.Do(func() { time.Sleep(10 * time.Second) }) }
func NewServer(addr string, debugVarAddr string, conf *Config) (*Server, error) { log.Infof("start proxy with config: %+v", conf) s := &Server{ evtbus: make(chan interface{}, 1000), conf: conf, topo: topology.NewTopo(conf.productName, conf.zkAddr, conf.fact, conf.provider), pool: make(map[string]*SharedBackendConn), lastActionSeq: -1, } for i := 0; i < MaxSlotNum; i++ { s.slots[i] = &Slot{Id: i} } proxyHost := strings.Split(addr, ":")[0] debugHost := strings.Split(debugVarAddr, ":")[0] hostname, err := os.Hostname() if err != nil { log.PanicErrorf(err, "get host name failed") } if proxyHost == "0.0.0.0" || strings.HasPrefix(proxyHost, "127.0.0.") { proxyHost = hostname } if debugHost == "0.0.0.0" || strings.HasPrefix(debugHost, "127.0.0.") { debugHost = hostname } s.info.Id = conf.proxyId s.info.State = models.PROXY_STATE_OFFLINE s.info.Addr = proxyHost + ":" + strings.Split(addr, ":")[1] s.info.DebugVarAddr = debugHost + ":" + strings.Split(debugVarAddr, ":")[1] s.info.Pid = os.Getpid() s.info.StartAt = time.Now().String() log.Infof("proxy info = %+v", s.info) if l, err := net.Listen(conf.proto, addr); err != nil { return nil, errors.Trace(err) } else { s.Listener = l } stats.Publish("evtbus", stats.StringFunc(func() string { return strconv.Itoa(len(s.evtbus)) })) stats.PublishJSONFunc("router", func() string { var m = make(map[string]interface{}) m["ops"] = cmdstats.requests.Get() m["cmds"] = getAllOpStats() m["info"] = s.info m["build"] = map[string]interface{}{ "version": utils.Version, "compile": utils.Compile, } b, _ := json.Marshal(m) return string(b) }) s.RegisterAndWait() _, err = s.topo.WatchChildren(models.GetWatchActionPath(conf.productName), s.evtbus) if err != nil { log.PanicErrorf(err, "watch children failed") } for i := 0; i < MaxSlotNum; i++ { s.fillSlot(i, false) } go s.handleTopoEvent() log.Info("proxy start ok") return s, nil }
func (top *Topology) GetActionResponsePath(seq int) string { return path.Join(models.GetWatchActionPath(top.ProductName), "action_"+fmt.Sprintf("%0.10d", seq)) }
func (s *Server) processAction(e interface{}) { start := time.Now() s.mu.Lock() defer s.mu.Unlock() if time.Since(start).Seconds() > 10 { log.Warning("take too long to get lock") } actPath := GetEventPath(e) if strings.Index(actPath, models.GetProxyPath(s.top.ProductName)) == 0 { //proxy event, should be order for me to suicide s.handleProxyCommand() return } //re-watch nodes, err := s.top.WatchChildren(models.GetWatchActionPath(s.top.ProductName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } seqs, err := models.ExtraSeqList(nodes) if err != nil { log.Fatal(errors.ErrorStack(err)) } if len(seqs) == 0 || !s.top.IsChildrenChangedEvent(e) { return } //get last pos index := -1 for i, seq := range seqs { if s.lastActionSeq < seq { index = i break } } if index < 0 { log.Warningf("zookeeper restarted or actions were deleted ? lastActionSeq: %d", s.lastActionSeq) if s.lastActionSeq > seqs[len(seqs)-1] { log.Fatalf("unknown error, zookeeper restarted or actions were deleted ? lastActionSeq: %d, %v", s.lastActionSeq, nodes) } if s.lastActionSeq == seqs[len(seqs)-1] { //children change or delete event return } //actions node was remove by someone, seems we can handle it index = 0 } actions := seqs[index:] for _, seq := range actions { exist, err := s.top.Exist(path.Join(s.top.GetActionResponsePath(seq), s.pi.Id)) if err != nil { log.Fatal(errors.ErrorStack(err)) } if exist { continue } if s.checkAndDoTopoChange(seq) { s.responseAction(int64(seq)) } } s.lastActionSeq = seqs[len(seqs)-1] }