Beispiel #1
0
func (cp *CachePool) startMemcache() {
	if strings.Contains(cp.port, "/") {
		_ = os.Remove(cp.port)
	}
	commandLine := cp.rowCacheConfig.GetSubprocessFlags()
	cp.cmd = exec.Command(commandLine[0], commandLine[1:]...)
	if err := cp.cmd.Start(); err != nil {
		log.Fatalf("can't start memcache: %v", err)
	}
	attempts := 0
	for {
		time.Sleep(100 * time.Millisecond)
		c, err := memcache.Connect(cp.port, 30*time.Millisecond)
		if err != nil {
			attempts++
			if attempts >= 50 {
				cp.cmd.Process.Kill()
				// Avoid zombies
				go cp.cmd.Wait()
				// FIXME(sougou): Throw proper error if we can recover
				log.Fatal("Can't connect to memcache")
			}
			continue
		}
		if _, err = c.Set("health", 0, 0, []byte("ok")); err != nil {
			log.Fatalf("can't communicate with memcache: %v", err)
		}
		c.Close()
		break
	}
}
Beispiel #2
0
func LoadConf(configFile string) (*Conf, error) {
	srvConf := &Conf{}
	conf, err := utils.InitConfigFromFile(configFile)
	if err != nil {
		log.Fatal(err)
	}

	srvConf.productName, _ = conf.ReadString("product", "test")
	if len(srvConf.productName) == 0 {
		log.Fatalf("invalid config: product entry is missing in %s", configFile)
	}
	srvConf.zkAddr, _ = conf.ReadString("zk", "")
	if len(srvConf.zkAddr) == 0 {
		log.Fatalf("invalid config: need zk entry is missing in %s", configFile)
	}
	srvConf.zkAddr = strings.TrimSpace(srvConf.zkAddr)

	srvConf.proxyId, _ = conf.ReadString("proxy_id", "")
	if len(srvConf.proxyId) == 0 {
		log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile)
	}

	srvConf.netTimeout, _ = conf.ReadInt("net_timeout", 5)
	srvConf.proto, _ = conf.ReadString("proto", "tcp")
	srvConf.provider, _ = conf.ReadString("coordinator", "zookeeper")
	log.Infof("%+v", srvConf)

	return srvConf, nil
}
Beispiel #3
0
func LoadConf(configFile string) (*Conf, error) {
	srvConf := &Conf{}
	conf, err := utils.InitConfigFromFile(configFile)
	if err != nil {
		log.Fatal(err)
	}

	srvConf.productName, _ = conf.ReadString("product", "test")
	if len(srvConf.productName) == 0 {
		log.Fatalf("invalid config: product entry is missing in %s", configFile)
	}
	srvConf.zkAddr, _ = conf.ReadString("zk", "")
	if len(srvConf.zkAddr) == 0 {
		log.Fatalf("invalid config: need zk entry is missing in %s", configFile)
	}
	srvConf.proxyId, _ = conf.ReadString("proxy_id", "")
	if len(srvConf.proxyId) == 0 {
		log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile)
	}

	srvConf.broker, _ = conf.ReadString("broker", "ledisdb")
	if len(srvConf.broker) == 0 {
		log.Fatalf("invalid config: need broker entry is missing in %s", configFile)
	}

	srvConf.slot_num, _ = conf.ReadInt("slot_num", 16)

	srvConf.net_timeout, _ = conf.ReadInt("net_timeout", 5)

	return srvConf, nil
}
Beispiel #4
0
func (s *Server) handleMigrateState(slotIndex int, key []byte) error {
	shd := s.slots[slotIndex]
	if shd.slotInfo.State.Status != models.SLOT_STATUS_MIGRATE {
		return nil
	}

	if shd.migrateFrom == nil {
		log.Fatalf("migrateFrom not exist %+v", shd)
	}

	if shd.dst.Master() == shd.migrateFrom.Master() {
		log.Fatalf("the same migrate src and dst, %+v", shd)
	}

	redisConn, err := s.pools.GetConn(shd.migrateFrom.Master())
	if err != nil {
		return errors.Trace(err)
	}

	defer s.pools.ReleaseConn(redisConn)

	redisReader := redisConn.(*redispool.PooledConn).BufioReader()

	err = WriteMigrateKeyCmd(redisConn.(*redispool.PooledConn), shd.dst.Master(), 30*1000, key)
	if err != nil {
		redisConn.Close()
		log.Warningf("migrate key %s error, from %s to %s",
			string(key), shd.migrateFrom.Master(), shd.dst.Master())
		return errors.Trace(err)
	}

	//handle migrate result
	resp, err := parser.Parse(redisReader)
	if err != nil {
		redisConn.Close()
		return errors.Trace(err)
	}

	result, err := resp.Bytes()

	log.Debug("migrate", string(key), "from", shd.migrateFrom.Master(), "to", shd.dst.Master(),
		string(result))

	if resp.Type == parser.ErrorResp {
		redisConn.Close()
		log.Error(string(key), string(resp.Raw), "migrateFrom", shd.migrateFrom.Master())
		return errors.New(string(resp.Raw))
	}

	s.counter.Add("Migrate", 1)
	return nil
}
Beispiel #5
0
// ParseRedirectInfo parse slot redirect information from MOVED and ASK Error
func ParseRedirectInfo(msg string) (slot int, server string) {
	var err error
	parts := strings.Fields(msg)
	if len(parts) != 3 {
		log.Fatalf("invalid redirect message: %s", msg)
	}
	slot, err = strconv.Atoi(parts[1])
	if err != nil {
		log.Fatalf("invalid redirect message: %s", msg)
	}
	server = parts[2]
	return
}
Beispiel #6
0
func (s *Server) checkAndDoTopoChange(seq int) (needResponse bool) {
	act, err := s.top.GetActionWithSeq(int64(seq))
	if err != nil {
		log.Fatal(errors.ErrorStack(err), "action seq", seq)
	}

	if !StringsContain(act.Receivers, s.pi.Id) { //no need to response
		return false
	}

	switch act.Type {
	case models.ACTION_TYPE_SLOT_MIGRATE, models.ACTION_TYPE_SLOT_CHANGED,
		models.ACTION_TYPE_SLOT_PREMIGRATE:
		slot := &models.Slot{}
		s.getActionObject(seq, slot)
		s.fillSlot(slot.Id, true)
	case models.ACTION_TYPE_SERVER_GROUP_CHANGED:
		serverGroup := &models.ServerGroup{}
		s.getActionObject(seq, serverGroup)
		s.OnGroupChange(serverGroup.Id)
	case models.ACTION_TYPE_SERVER_GROUP_REMOVE:
		//do not care
	case models.ACTION_TYPE_MULTI_SLOT_CHANGED:
		param := &models.SlotMultiSetParam{}
		s.getActionObject(seq, param)
		s.OnSlotRangeChange(param)
	default:
		log.Fatalf("unknown action %+v", act)
	}

	return true
}
Beispiel #7
0
func handleCrashedServer(s *models.Server) error {
	switch s.Type {
	case models.SERVER_TYPE_MASTER:
		//get slave and do promote
		slave, err := getSlave(s)
		if err != nil {
			log.Warning(errors.ErrorStack(err))
			return err
		}

		log.Infof("try promote %+v", slave)
		err = callHttp(nil, genUrl(*apiServer, "/api/server_group/", slave.GroupId, "/promote"), "POST", slave)
		if err != nil {
			log.Errorf("do promote %v failed %v", slave, errors.ErrorStack(err))
			return err
		}
		refreshSlave(s) //刷新
	case models.SERVER_TYPE_SLAVE:
		log.Errorf("slave is down: %+v", s)
	case models.SERVER_TYPE_OFFLINE:
		//no need to handle it
	default:
		log.Fatalf("unkonwn type %+v", s)
	}

	return nil
}
Beispiel #8
0
func NewCachePool(name string, rowCacheConfig RowCacheConfig, queryTimeout time.Duration, idleTimeout time.Duration) *CachePool {
	cp := &CachePool{name: name, idleTimeout: idleTimeout}
	if rowCacheConfig.Binary == "" {
		return cp
	}
	cp.rowCacheConfig = rowCacheConfig

	// Start with memcached defaults
	cp.capacity = 1024 - 50
	cp.port = "11211"
	if rowCacheConfig.Socket != "" {
		cp.port = rowCacheConfig.Socket
	}

	if rowCacheConfig.TcpPort > 0 {
		//liuqi: missing ":" in origin code
		cp.port = ":" + strconv.Itoa(rowCacheConfig.TcpPort)
	}

	if rowCacheConfig.Connections > 0 {
		if rowCacheConfig.Connections <= 50 {
			log.Fatalf("insufficient capacity: %d", rowCacheConfig.Connections)
		}
		cp.capacity = rowCacheConfig.Connections - 50
	}

	seconds := uint64(queryTimeout / time.Second)
	// Add an additional grace period for
	// memcache expiry of deleted items
	if seconds != 0 {
		cp.DeleteExpiry = 2*seconds + 15
	}
	return cp
}
Beispiel #9
0
func str2mysqlType(columnType string) byte {
	b, ok := typesMap[columnType]
	if !ok {
		log.Fatalf("%s not exist", columnType)
	}

	return b
}
Beispiel #10
0
func (pc *ProxyConfig) apply() {
	log.SetLevelByString(pc.logLevel)

	if pc.logFile != "" {
		err := log.SetOutputByName(pc.logFile)
		if err != nil {
			log.Fatalf("ProxyConfig SetOutputByName %s failed %s ", pc.logFile, err.Error())
		}
		log.SetRotateByDay()
	}

	if pc.name == "" {
		log.Fatal("ProxyConfig name must not empty")
	}

	if pc.port == 0 {
		log.Fatal("ProxyConfig port  must not 0")
	}

	if pc.cpu > runtime.NumCPU() {
		log.Warningf("ProxyConfig cpu  %d exceed %d, adjust to %d ", pc.cpu, runtime.NumCPU(), runtime.NumCPU())
		pc.cpu = runtime.NumCPU()
	}

	if pc.maxConn > 10000 {
		log.Warningf("ProxyConfig maxconn %d exceed 10000, adjust to 10000", pc.maxConn)
		pc.maxConn = 10000
	}

	runtime.GOMAXPROCS(pc.cpu)

	if pc.poolSize <= 0 || pc.poolSize > 30 {
		log.Warning("ProxyConfig poolSize %d , adjust to 10 ", pc.poolSize)
		pc.poolSize = 10
	}

	if pc.cpuFile != "" {
		f, err := os.Create(pc.cpuFile)
		if err != nil {
			log.Fatal(err)
		}
		log.Warning("Archer start CPUProfile ", pc.cpuFile)
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	if pc.memFile != "" {
		f, err := os.Create(pc.memFile)
		if err == nil {
			log.Warning("Archer start HeapProfile ", pc.memFile)
			pprof.WriteHeapProfile(f)
		}
	}

	go func() {
		log.Warning(http.ListenAndServe(":6061", nil))
	}()
}
Beispiel #11
0
func NewRedisConn(host string, port int) (*RedisConn, error) {
	conn := &RedisConn{}
	c, err := net.Dial("tcp4", fmt.Sprintf("%s:%d", host, port))
	if err != nil {
		log.Fatalf("Backend Dial  %s:%d failed %s", host, port, err)
	}

	conn.w = bufio.NewWriter(c)
	conn.r = bufio.NewReader(c)
	return conn, nil
}
Beispiel #12
0
func (self *ResMan) handleMesosStatusUpdate(t *cmdMesosStatusUpdate) {
	status := t.status

	defer func() {
		t.wait <- struct{}{}
	}()

	taskId := status.TaskId.GetValue()
	log.Debugf("Received task %+v status: %+v", taskId, status)
	currentTask := self.running.Get(taskId)
	if currentTask == nil {
		task, err := scheduler.GetTaskByTaskId(taskId)
		if err != nil {
			return
		}
		job, err := scheduler.GetJobByName(task.JobName)
		if err != nil {
			return
		}
		currentTask = &Task{Tid: task.TaskId, job: job, SlaveId: status.SlaveId.GetValue(), state: taskRuning}
		self.running.Add(currentTask.Tid, currentTask) //add this alone task to runing queue
	}

	pwd := string(status.Data)
	if len(pwd) > 0 && len(currentTask.Pwd) == 0 {
		currentTask.Pwd = pwd
	}

	currentTask.LastUpdate = time.Now()

	switch *status.State {
	case mesos.TaskState_TASK_FINISHED:
		currentTask.job.LastSuccessTs = time.Now().Unix()
		self.removeRunningTask(taskId)
	case mesos.TaskState_TASK_FAILED, mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
		currentTask.job.LastErrTs = time.Now().Unix()
		self.removeRunningTask(taskId)
	case mesos.TaskState_TASK_STAGING:
		//todo: update something
	case mesos.TaskState_TASK_STARTING:
		//todo:update something
	case mesos.TaskState_TASK_RUNNING:
		//todo:update something
	default:
		log.Fatalf("should never happend %+v", status.State)
	}

	persistentTask, err := scheduler.GetTaskByTaskId(taskId)
	if err != nil {
		log.Error(err)
	}

	self.saveTaskStatus(persistentTask, status, currentTask)
}
Beispiel #13
0
func (s *Server) handleMarkOffline() {
	s.top.Close(s.pi.Id)
	if s.OnSuicide == nil {
		s.OnSuicide = func() error {
			log.Fatalf("suicide %+v", s.pi)
			return nil
		}
	}

	s.OnSuicide()
}
Beispiel #14
0
func CheckUlimit(min int) {
	ulimitN, err := exec.Command("/bin/sh", "-c", "ulimit -n").Output()
	if err != nil {
		log.Warning("get ulimit failed", err)
	}

	n, err := strconv.Atoi(strings.TrimSpace(string(ulimitN)))
	if err != nil || n < min {
		log.Fatalf("ulimit too small: %d, should be at least %d", n, min)
	}
}
Beispiel #15
0
func (ps *ProxyServer) Init() {
	log.Info("Proxy Server Init ....")

	l, err := net.Listen("tcp4", "0.0.0.0:"+ps.Conf.Port)
	// net.Listen(net, laddr)
	if err != nil {
		log.Fatalf("Proxy Server Listen on port : %s failed ", ps.Conf.Port)
	}
	log.Info("Proxy Server Listen on port ", ps.Conf.Port)
	ps.Listen = l
}
Beispiel #16
0
func NewGroup(groupInfo models.ServerGroup) *Group {
	g := &Group{
		redisServers: make(map[string]models.Server),
	}

	for _, server := range groupInfo.Servers {
		if server.Type == models.SERVER_TYPE_MASTER {
			if len(g.master) > 0 {
				log.Fatalf("two master not allowed: %+v", groupInfo)
			}

			g.master = server.Addr
		}
		g.redisServers[server.Addr] = server
	}

	if len(g.master) == 0 {
		log.Fatalf("master not found: %+v", groupInfo)
	}

	return g
}
Beispiel #17
0
func LoadConf(configFile string) (*Conf, error) {
	srvConf := &Conf{}
	conf, err := utils.InitConfigFromFile(configFile)
	if err != nil {
		log.Fatal(err)
	}

	srvConf.productName, _ = conf.ReadString("product", "test")
	if len(srvConf.productName) == 0 {
		log.Fatalf("invalid config: product entry is missing in %s", configFile)
	}
	srvConf.zkAddr, _ = conf.ReadString("zk", "")
	if len(srvConf.zkAddr) == 0 {
		log.Fatalf("invalid config: need zk entry is missing in %s", configFile)
	}
	srvConf.proxyId, _ = conf.ReadString("proxy_id", "")
	if len(srvConf.proxyId) == 0 {
		log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile)
	}

	return srvConf, nil
}
Beispiel #18
0
func (rc *RowCache) Get(keys []string, tcs []schema.TableColumn) (results map[string]RCResult) {
	mkeys := make([]string, 0, len(keys))
	for _, key := range keys {
		if len(key) > MAX_KEY_LEN {
			continue
		}
		mkeys = append(mkeys, rc.prefix+key)
	}

	prefixlen := len(rc.prefix)
	conn := rc.cachePool.Get(0)
	// This is not the same as defer rc.cachePool.Put(conn)
	defer func() { rc.cachePool.Put(conn) }()

	mcresults, err := conn.Gets(mkeys...)
	if err != nil {
		conn.Close()
		conn = nil
		log.Fatalf("%s", err)
	}
	results = make(map[string]RCResult, len(mkeys))
	for _, mcresult := range mcresults {
		if mcresult.Flags == RC_DELETED {
			// The row was recently invalidated.
			// If the caller reads the row from db, they can update it
			// back as long as it's not updated again.
			results[mcresult.Key[prefixlen:]] = RCResult{Cas: mcresult.Cas}
			continue
		}
		row := rc.decodeRow(mcresult.Value, tcs)
		if row == nil {
			log.Fatalf("Corrupt data for %s", mcresult.Key)
		}
		results[mcresult.Key[prefixlen:]] = RCResult{Row: row, Cas: mcresult.Cas}
	}
	return
}
Beispiel #19
0
func (rc *RowCache) Delete(key string) {
	if len(key) > MAX_KEY_LEN {
		return
	}
	conn := rc.cachePool.Get(0)
	defer func() { rc.cachePool.Put(conn) }()
	mkey := rc.prefix + key

	_, err := conn.Set(mkey, RC_DELETED, rc.cachePool.DeleteExpiry, nil)
	if err != nil {
		conn.Close()
		conn = nil
		log.Fatalf("%s", err)
	}
}
Beispiel #20
0
func NewProxy(pc *ProxyConfig) *Proxy {
	p := &Proxy{
		sm:      newSessMana(pc.idleTimeout),
		cluster: NewCluster(pc),
		filter:  &StrFilter{},
		pc:      pc,
	}

	// listen 放到最后
	l, err := net.Listen("tcp4", fmt.Sprintf(":%d", pc.port))
	if err != nil {
		log.Fatalf("Proxy Listen  %d failed %s", pc.port, err.Error())
	}
	p.l = l
	return p
}
Beispiel #21
0
func (top *Topology) doWatch(evtch <-chan topo.Event, evtbus chan interface{}) {
	e := <-evtch
	log.Infof("topo event %+v", e)
	if e.State == topo.StateExpired {
		log.Fatalf("session expired: %+v", e)
	}

	switch e.Type {
	//case topo.EventNodeCreated:
	//case topo.EventNodeDataChanged:
	case topo.EventNodeChildrenChanged: //only care children changed
		//todo:get changed node and decode event
	default:
		log.Warningf("%+v", e)
	}

	evtbus <- e
}
Beispiel #22
0
// initialize conn Pool before Serve
func (c *Cluster) initializePool() {
	log.Info("Cluster start initializePool ", len(c.pc.nodes))
	nodes := make(map[string]*Node, len(c.pc.nodes))
	for _, s := range c.topo.slots {
		if s.master != nil {
			nodes[s.master.id] = s.master
		}
		for _, n := range s.slaves {
			nodes[n.id] = n
		}
	}

	log.Info("initializePool nodes len ", len(nodes))
	for _, n := range nodes {
		log.Info("Cluster nodes ", n.id)
		_, ok := c.pools[n.id]
		if ok {
			log.Fatalf("Cluster initializePool duplicate %s %s:%d", n.id, n.host, n.port)
		}

		opt := &Options{
			Network:      "tcp",
			Addr:         fmt.Sprintf("%s:%d", n.host, n.port),
			Dialer:       RedisConnDialer(n.host, n.port, n.id, c.pc),
			DialTimeout:  c.pc.dialTimeout,
			ReadTimeout:  c.pc.readTimeout,
			WriteTimeout: c.pc.writeTimeout,
			PoolSize:     c.pc.poolSize,
			IdleTimeout:  c.pc.idleTimeout,
		}

		c.pools[n.id] = NewConnPool(opt)
		c.opts[n.id] = opt
		//test
		testConn, err := c.pools[n.id].Get()
		if err != nil {
			log.Warning("test pool failed ", err)
			continue
		}
		c.pools[n.id].Put(testConn)
	}
	log.Info("Cluster initializePool done")
}
Beispiel #23
0
func (s *Server) fillSlot(i int, force bool) {
	if !validSlot(i) {
		return
	}

	if !force && s.slots[i] != nil { //check
		log.Fatalf("slot %d already filled, slot: %+v", i, s.slots[i])
		return
	}

	s.clearSlot(i)

	slotInfo, groupInfo, err := s.top.GetSlotByIndex(i)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}

	slot := &Slot{
		slotInfo:  slotInfo,
		dst:       group.NewGroup(*groupInfo),
		groupInfo: groupInfo,
	}

	log.Infof("fill slot %d, force %v, %+v", i, force, slot.dst)

	s.pools.AddPool(slot.dst.Master())

	if slot.slotInfo.State.Status == models.SLOT_STATUS_MIGRATE {
		//get migrate src group and fill it
		from, err := s.top.GetGroup(slot.slotInfo.State.MigrateStatus.From)
		if err != nil { //todo: retry ?
			log.Fatal(err)
		}
		slot.migrateFrom = group.NewGroup(*from)
		s.pools.AddPool(slot.migrateFrom.Master())
	}

	s.slots[i] = slot
	s.counter.Add("FillSlot", 1)
}
Beispiel #24
0
func (s *Server) checkAndDoTopoChange(seq int) bool {
	act, err := s.top.GetActionWithSeq(int64(seq))
	if err != nil { //todo: error is not "not exist"
		log.Fatal(errors.ErrorStack(err), "action seq", seq)
	}

	if !needResponse(act.Receivers, s.pi) { //no need to response
		return false
	}

	log.Warningf("action %v receivers %v", seq, act.Receivers)

	s.stopTaskRunners()

	switch act.Type {
	case models.ACTION_TYPE_SLOT_MIGRATE, models.ACTION_TYPE_SLOT_CHANGED,
		models.ACTION_TYPE_SLOT_PREMIGRATE:
		slot := &models.Slot{}
		s.getActionObject(seq, slot)
		s.fillSlot(slot.Id, true)
	case models.ACTION_TYPE_SERVER_GROUP_CHANGED:
		serverGroup := &models.ServerGroup{}
		s.getActionObject(seq, serverGroup)
		s.OnGroupChange(serverGroup.Id)
	case models.ACTION_TYPE_SERVER_GROUP_REMOVE:
	//do not care
	case models.ACTION_TYPE_MULTI_SLOT_CHANGED:
		param := &models.SlotMultiSetParam{}
		s.getActionObject(seq, param)
		s.OnSlotRangeChange(param)
	default:
		log.Fatalf("unknown action %+v", act)
	}

	s.createTaskRunners()

	return true
}
Beispiel #25
0
func (s *Session) SpecCommandProcess(req *redis.Request) {
	// log.Info("Spec command Process ", req)

	switch req.Name() {
	case "SINTERSTORE":
		s.SINTERSTORE(req)
	case "SMOVE":
		s.SMOVE(req)
	case "DEL":
		s.DEL(req)
	case "RPOPLPUSH":
		s.RPOPLPUSH(req)
	case "SDIFFSTORE":
		s.SDIFFSTORE(req)
	case "SINTER":
		s.SINTER(req)
	case "SDIFF":
		s.SDIFF(req)
	case "MGET":
		s.MGET(req)
	case "ZINTERSTORE":
		s.ZINTERSTORE(req)
	case "ZUNIONSTORE":
		s.ZUNIONSTORE(req)
	case "RENAME":
		s.RENAME(req)
	case "RENAMENX":
		s.RENAMENX(req)
	case "MSET":
		s.MSET(req)
	case "MSETNX":
		s.MSETNX(req)
	case "PROXY":
		s.PROXY(req)
	default:
		log.Fatalf("Unknown Spec Command: %s, we won't expect this happen ", req.Name())
	}
}
Beispiel #26
0
func (rc *RowCache) Set(key string, row []byte, cas uint64) {
	if len(key) > MAX_KEY_LEN {
		return
	}

	conn := rc.cachePool.Get(0)
	defer func() { rc.cachePool.Put(conn) }()
	mkey := rc.prefix + key

	var err error
	if cas == 0 {
		// Either caller didn't find the value at all
		// or they didn't look for it in the first place.
		_, err = conn.Add(mkey, 0, 0, row)
	} else {
		// Caller is trying to update a row that recently changed.
		_, err = conn.Cas(mkey, 0, 0, row, cas)
	}
	if err != nil {
		conn.Close()
		conn = nil
		log.Fatalf("%s", err)
	}
}
Beispiel #27
0
func (self *Server) handleCloseSession(e *event) error {
	sessionId := e.fromSessionId
	if w, ok := self.worker[sessionId]; ok {
		if sessionId != w.SessionId {
			log.Fatalf("sessionId not match %d-%d, bug found", sessionId, w.SessionId)
		}
		self.removeWorkerBySessionId(w.SessionId)

		//reschedule these jobs, so other workers can handle it
		for handle, j := range w.runningJobs {
			if handle != j.Handle {
				log.Fatal("handle not match %d-%d", handle, j.Handle)
			}
			self.doAddJob(j)
		}
	}
	if c, ok := self.client[sessionId]; ok {
		log.Debug("removeClient sessionId", sessionId)
		delete(self.client, c.SessionId)
	}
	e.result <- true //notify close finish

	return nil
}
Beispiel #28
0
func (s *Server) processAction(e interface{}) {
	start := time.Now()
	s.mu.Lock()
	defer s.mu.Unlock()

	if time.Since(start).Seconds() > 10 {
		log.Warning("take too long to get lock")
	}

	actPath := GetEventPath(e)
	if strings.Index(actPath, models.GetProxyPath(s.top.ProductName)) == 0 {
		//proxy event, should be order for me to suicide
		s.handleProxyCommand()
		return
	}

	//re-watch
	nodes, err := s.top.WatchChildren(models.GetWatchActionPath(s.top.ProductName), s.evtbus)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}

	seqs, err := models.ExtraSeqList(nodes)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}

	if len(seqs) == 0 || !s.top.IsChildrenChangedEvent(e) {
		return
	}

	//get last pos
	index := -1
	for i, seq := range seqs {
		if s.lastActionSeq < seq {
			index = i
			break
		}
	}

	if index < 0 {
		log.Warningf("zookeeper restarted or actions were deleted ? lastActionSeq: %d", s.lastActionSeq)
		if s.lastActionSeq > seqs[len(seqs)-1] {
			log.Fatalf("unknown error, zookeeper restarted or actions were deleted ? lastActionSeq: %d, %v", s.lastActionSeq, nodes)
		}

		if s.lastActionSeq == seqs[len(seqs)-1] { //children change or delete event
			return
		}

		//actions node was remove by someone, seems we can handle it
		index = 0
	}

	actions := seqs[index:]
	for _, seq := range actions {
		exist, err := s.top.Exist(path.Join(s.top.GetActionResponsePath(seq), s.pi.Id))
		if err != nil {
			log.Fatal(errors.ErrorStack(err))
		}

		if exist {
			continue
		}

		if s.checkAndDoTopoChange(seq) {
			s.responseAction(int64(seq))
		}
	}

	s.lastActionSeq = seqs[len(seqs)-1]
}
Beispiel #29
0
func InitEnv() {
	go once.Do(func() {
		conn = zkhelper.NewConn()
		conf = &Conf{
			proxyId:     "proxy_test",
			productName: "test",
			zkAddr:      "localhost:2181",
			netTimeout:  5,
			f:           func(string) (zkhelper.Conn, error) { return conn, nil },
			proto:       "tcp4",
		}

		//init action path
		prefix := models.GetWatchActionPath(conf.productName)
		err := models.CreateActionRootPath(conn, prefix)
		if err != nil {
			log.Fatal(err)
		}

		//init slot
		err = models.InitSlotSet(conn, conf.productName, 1024)
		if err != nil {
			log.Fatal(err)
		}

		//init  server group
		g1 := models.NewServerGroup(conf.productName, 1)
		g1.Create(conn)
		g2 := models.NewServerGroup(conf.productName, 2)
		g2.Create(conn)

		redis1, _ = miniredis.Run()
		redis2, _ = miniredis.Run()

		s1 := models.NewServer(models.SERVER_TYPE_MASTER, redis1.Addr())
		s2 := models.NewServer(models.SERVER_TYPE_MASTER, redis2.Addr())

		g1.AddServer(conn, s1)
		g2.AddServer(conn, s2)

		//set slot range
		err = models.SetSlotRange(conn, conf.productName, 0, 511, 1, models.SLOT_STATUS_ONLINE)
		if err != nil {
			log.Fatal(err)
		}

		err = models.SetSlotRange(conn, conf.productName, 512, 1023, 2, models.SLOT_STATUS_ONLINE)
		if err != nil {
			log.Fatal(err)
		}

		go func() { //set proxy online
			time.Sleep(3 * time.Second)
			err := models.SetProxyStatus(conn, conf.productName, conf.proxyId, models.PROXY_STATE_ONLINE)
			if err != nil {
				log.Fatal(errors.ErrorStack(err))
			}
			time.Sleep(2 * time.Second)
			proxyMutex.Lock()
			defer proxyMutex.Unlock()
			pi := s.getProxyInfo()
			if pi.State != models.PROXY_STATE_ONLINE {
				log.Fatalf("should be online, we got %s", pi.State)
			}
		}()

		proxyMutex.Lock()
		s = NewServer(":19000", ":11000",
			conf,
		)
		proxyMutex.Unlock()
		s.Run()
	})

	waitonce.Do(func() {
		time.Sleep(10 * time.Second)
	})
}
Beispiel #30
0
func (s *Server) handleMigrateState(slotIndex int, op string, group string, keys [][]byte) error {
	shd := s.slots[slotIndex]
	if shd.slotInfo.State.Status != models.SLOT_STATUS_MIGRATE {
		return nil
	}

	if shd.migrateFrom == nil {
		log.Fatalf("migrateFrom not exist %+v", shd)
	}

	if shd.dst.Master() == shd.migrateFrom.Master() {
		log.Fatalf("the same migrate src and dst, %+v", shd)
	}

	redisConn, err := s.pools.GetConn(shd.migrateFrom.Master())
	if err != nil {
		return errors.Trace(err)
	}

	defer s.pools.ReleaseConn(redisConn)

	if redisConn.(*redispool.PooledConn).DB != slotIndex {
		if err := selectDB(redisConn.(*redispool.PooledConn), slotIndex, s.net_timeout); err != nil {
			redisConn.Close()
			return errors.Trace(err)
		}
		redisConn.(*redispool.PooledConn).DB = slotIndex
	}

	redisReader := redisConn.(*redispool.PooledConn).BufioReader()

	//migrate multi keys
	for _, key := range keys {
		if s.broker == LedisBroker {
			err = ledisWriteMigrateKeyCmd(redisConn.(*redispool.PooledConn), shd.dst.Master(), 30*1000, group, key, slotIndex)
		} else {
			err = writeMigrateKeyCmd(redisConn.(*redispool.PooledConn), shd.dst.Master(), 30*1000, key, slotIndex)
		}

		if err != nil {
			redisConn.Close()
			log.Warningf("migrate key %s error", string(key))
			return errors.Trace(err)
		}

		//handle migrate result
		resp, err := parser.Parse(redisReader)
		if err != nil {
			redisConn.Close()
			return errors.Trace(err)
		}

		result, err := resp.Bytes()

		log.Debug("migrate", string(key), "from", shd.migrateFrom.Master(), "to", shd.dst.Master(),
			string(result))

		if resp.Type == parser.ErrorResp {
			redisConn.Close()
			log.Error(string(key), string(resp.Raw), "migrateFrom", shd.migrateFrom.Master())
			return errors.New(string(resp.Raw))
		}

		s.counter.Add("Migrate", 1)
	}

	return nil
}