func NewServer(conf *Conf) *Server { log.Infof("start with configuration: %+v", conf) f := func(addr string) (*redisconn.Conn, error) { return newRedisConn(addr, conf.NetTimeout, RedisConnReaderSize, RedisConnWiterSize, conf.StoreAuth) } s := &Server{ conf: conf, evtbus: make(chan interface{}, EventBusNum), top: topo.NewTopo(conf.ProductName, conf.CoordinatorAddr, conf.f, conf.Coordinator), counter: stats.NewCounters("router"), lastActionSeq: -1, startAt: time.Now(), moper: newMultiOperator(conf.Addr, conf.ProxyAuth), reqCh: make(chan *PipelineRequest, PipelineRequestNum), pools: redisconn.NewPools(PoolCapability, f), pipeConns: make(map[string]*taskRunner), bufferedReq: list.New(), } s.pi.ID = conf.ProxyID s.pi.State = models.PROXY_STATE_OFFLINE addr := conf.Addr addrs := strings.Split(addr, ":") if len(addrs) != 2 { log.Fatalf("bad addr %s", addr) } hname, err := os.Hostname() if err != nil { log.Fatal("get host name failed", err) } s.pi.Addr = hname + ":" + addrs[1] debugVarAddr := conf.HTTPAddr debugVarAddrs := strings.Split(debugVarAddr, ":") if len(debugVarAddrs) != 2 { log.Fatalf("bad debugVarAddr %s", debugVarAddr) } s.pi.DebugVarAddr = hname + ":" + debugVarAddrs[1] s.pi.Pid = os.Getpid() s.pi.StartAt = time.Now().String() log.Infof("proxy_info:%+v", s.pi) stats.Publish("evtbus", stats.StringFunc(func() string { return strconv.Itoa(len(s.evtbus)) })) stats.Publish("startAt", stats.StringFunc(func() string { return s.startAt.String() })) s.RegisterAndWait(true) s.registerSignal() _, err = s.top.WatchChildren(models.GetWatchActionPath(conf.ProductName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } s.FillSlots() // start event handler go s.handleTopoEvent() go s.dumpCounter() log.Info("proxy start ok") return s }
func (s *testProxyRouterSuite) TestMigrate(c *C) { proxyConn := s.testDialConn(c, proxyAddr, proxyAuth) defer proxyConn.Close() // first generate 100 keys keys := s.testGenKeysInSlot(c, 0, 100) for i := 0; i < len(keys); i++ { _, err := proxyConn.Do("SET", keys[i], keys[i]) c.Assert(err, IsNil) } // set slot 0 migrate from group 1 to 2 slot0 := s.testSetSlotMigrate(c, 0, 1, 2) var err error for i := 0; i < len(keys); i++ { // migrate some keys then close server mustErr := false if i == len(keys)/2 { s.s1.Close() mustErr = true } _, err = proxyConn.Do("GET", keys[i]) if !mustErr { c.Assert(err, IsNil) } else { c.Assert(err, NotNil) break } } // restart s.s1 = s.testCreateServer(c, 16381, storeAuth) c.Assert(s.s1, NotNil) time.Sleep(1 * time.Second) // proxy should closed our connection _, err = proxyConn.Do("SET", keys[0], keys[0]) c.Assert(err, NotNil) proxyConn.Close() // because migrate store server has a connection pool in proxy // we should close pool first and receate it again // to close all old broken connections ss.pools.Close() f := func(addr string) (*redisconn.Conn, error) { return newRedisConn(addr, conf.NetTimeout, RedisConnReaderSize, RedisConnWiterSize, conf.StoreAuth) } ss.pools = redisconn.NewPools(PoolCapability, f) s1Conn := s.testDialConn(c, s.s1.addr, storeAuth) defer s1Conn.Close() // the second half data is still in server1 for i := len(keys) / 2; i < len(keys); i++ { value, err := redis.String(s1Conn.Do("GET", keys[i])) c.Assert(err, IsNil) c.Assert(value, Equals, keys[i]) } // reconnect proxyConn = s.testDialConn(c, proxyAddr, proxyAuth) // do migrate again for i := 0; i < len(keys); i++ { value, err := redis.String(proxyConn.Do("GET", keys[i])) c.Assert(err, IsNil) c.Assert(value, Equals, keys[i]) } s2Conn := s.testDialConn(c, s.s2.addr, storeAuth) defer s2Conn.Close() // now all data is in server2 for i := 0; i < len(keys); i++ { value, err := redis.String(s2Conn.Do("GET", keys[i])) c.Assert(err, IsNil) c.Assert(value, Equals, keys[i]) } // migrate done s.testSetSlotOnline(c, slot0) s.s1.store.Reset() s.s2.store.Reset() }