Пример #1
0
func apiSlotRangeSet(task RangeSetTask) (int, string) {
	conn := CreateZkConn()
	defer conn.Close()

	lock := utils.GetZkLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("set slot range, %+v", task))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	// default set online
	if len(task.Status) == 0 {
		task.Status = string(models.SLOT_STATUS_ONLINE)
	}

	err := models.SetSlotRange(conn, globalEnv.ProductName(), task.FromSlot, task.ToSlot, task.NewGroupId, models.SlotStatus(task.Status))
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Пример #2
0
func (ps *ProxyServer) SaveConfigToFile() {
	ticker := time.NewTicker(3600 * time.Second)
	for {
		select {
		case <-ticker.C:
			newaddr := ps.Backend.GetAddrs()
			oldaddr := ps.Conf.Nodes
			if (len(newaddr) != len(oldaddr)) && (len(newaddr) != 0) {

				ps.Conf.Nodes = newaddr
				// persistent nodes info
				nodes := strings.Join(newaddr, ",")
				log.Warning("addr changed to ", nodes)
				ps.Conf.Config.Set("proxy::nodes", nodes)
				err := ps.Conf.Config.SaveConfigFile(ps.Conf.FileName)
				if err != nil {
					log.Warning("persistent config failed ", err)
				}
			}
		case <-ps.Quit:
			goto quit
		}

	}
quit:
	log.Warning("quit SaveConfigToFile...")
}
Пример #3
0
func apiPromoteServer(server models.Server, param martini.Params) (int, string) {
	conn := CreateZkConn()
	defer conn.Close()

	lock := utils.GetZkLock(conn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("promote server %+v", server))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	group, err := models.GetGroup(conn, globalEnv.ProductName(), server.GroupId)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	err = group.Promote(conn, server.Addr)
	if err != nil {
		log.Warning(errors.ErrorStack(err))
		log.Warning(err)
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Пример #4
0
// we will check output file, if content contain msg_filter, we will change status to Failed
func (agent *Agent) HandleStatusSuccess(s *TaskStatus) {
	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	if !util.HitFilter(s.TaskPtr.LogFilename, s.TaskPtr.Job.MsgFilter) {
		s.TaskPtr.Job.LastSuccessAt = s.CreateAt
		s.TaskPtr.Job.LastTaskId = s.TaskPtr.TaskId
		if agent.Running[s.TaskPtr.JobId].Status == StatusTimeout {
			s.TaskPtr.Job.LastStatus = JobTimeout
		} else {
			s.TaskPtr.Job.LastStatus = JobSuccess
		}
		delete(agent.Process, s.TaskPtr.TaskId)
		delete(agent.Running, s.TaskPtr.JobId)
		s.TaskPtr.Job.SuccessCnt += 1

		log.Warning("Task success : ", s.TaskPtr.TaskId, s.TaskPtr.Job.Name, s.TaskPtr.ExecDuration)
	} else {
		s.TaskPtr.Job.LastErrAt = s.CreateAt
		s.TaskPtr.Job.LastTaskId = s.TaskPtr.TaskId
		s.TaskPtr.Job.LastStatus = JobFail
		s.Status = StatusFailed
		delete(agent.Process, s.TaskPtr.TaskId)
		delete(agent.Running, s.TaskPtr.JobId)
		s.TaskPtr.Job.ErrCnt += 1
		log.Warningf("Task failed : hit msg_filter error", s.TaskPtr.TaskId, s.TaskPtr.Job.Name, s.TaskPtr.ExecDuration)
		s.Err = errors.New(fmt.Sprintf("Task: %s  Job: %s failed.  hit msg_filter error", s.TaskPtr.TaskId, s.TaskPtr.Job.Name))
	}
	s.Message = util.GetFileContent(s.TaskPtr.LogFilename, 65535, 1)
	if ok := agent.store.UpdateTaskStatus(s); !ok {
		log.Warning("Task status Store Or Update failed ", s)
	}
	agent.PostTaskStatus(s)
}
Пример #5
0
func (s *session) WritingLoop() {
	s.lastUnsentResponseSeq = 1
	for {
		select {
		case resp, ok := <-s.backQ:
			if !ok {
				s.Close()
				s.closeSignal.Done()
				return
			}

			flush, err := s.handleResponse(resp)
			if err != nil {
				log.Warning(s.RemoteAddr(), resp.ctx, errors.ErrorStack(err))
				s.Close() //notify reader to exit
				continue
			}

			if flush && len(s.backQ) == 0 {
				err := s.w.Flush()
				if err != nil {
					s.Close() //notify reader to exit
					log.Warning(s.RemoteAddr(), resp.ctx, errors.ErrorStack(err))
					continue
				}
			}
		}
	}
}
Пример #6
0
// direction 0 head, 1 tail
// we only return retain-size string to Caller
func GetFileContent(filename string, retain int64, direction int) string {
	log.Debugf("GetFileContent ", filename, retain)
	f, err := os.Open(filename)
	if err != nil {
		log.Warning("GetOutPut open failed ", filename, err)
		return ""
	}
	defer f.Close()

	fs, err := f.Stat()
	if err != nil {
		log.Warning("GetOutPut get Stat failed ", filename, err)
		return ""
	}
	var buf []byte
	seek_at := int64(0)
	if fs.Size() > retain && direction == 1 {
		seek_at = fs.Size() - retain
		buf = make([]byte, retain)
	}
	buf = make([]byte, fs.Size())

	f.Seek(seek_at, 0)

	if _, err := f.Read(buf); err != nil && err != io.EOF {
		log.Warning("GetOutPut read buf failed ", err)
		return ""
	}
	return string(buf)
}
Пример #7
0
func (self *taskNotify) run() {
	for msg := range self.ch {
		//call webhook
		log.Debug("Send Notify for Job", msg.job, msg.task)
		if len(msg.job.WebHookUrl) == 0 {
			continue
		}
		buf, err := json.Marshal(struct {
			Job  *scheduler.Job  `json:"job"`
			Task *scheduler.Task `json:"task"`
		}{msg.job, msg.task})
		if err != nil {
			log.Warning(err.Error(), msg.job, msg.task)
		}
		body := bytes.NewBuffer(buf)
		_, err = http.Post(msg.job.WebHookUrl, "application/json", body)
		if err != nil {
			log.Warning(err.Error(), msg.job, msg.task)
		}

		if msg.isLast { //no more message
			return
		}
	}
}
Пример #8
0
// we will Save Cron MetaData periodically, currently for 5min
func (agent *Agent) SaveCronMetaData() {
	meta_file := fmt.Sprintf("%s/dcms_agent.metadata", agent.Conf.WorkDir)
	cronSlice := make([]string, 0)

	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	for k, v := range agent.Jobs {
		if data, err := json.Marshal(v); err == nil {
			cronSlice = append(cronSlice, string(data))
		} else {
			log.Warningf("marshal task: %d failed: %s", k, err)
			return
		}
	}

	if data, err := json.Marshal(cronSlice); err != nil {
		log.Warning("json marshal cronslice failed, ", err)
	} else {
		if len(cronSlice) == 0 {
			log.Warning("cronSlice json empty, just skip write MetaData")
			return
		}
		log.Debug("len of cronSlice:", len(data), data)
		log.Debugf("cronSlice length:%d content:%s", len(cronSlice), cronSlice)
		if e := ioutil.WriteFile(meta_file, data, os.ModePerm); e != nil {
			log.Warning("ioutil write meta_file failed,", e)
		}
	}
}
Пример #9
0
// process task status
// todo: send msg to queue
func (agent *Agent) HandleStatusLoop() {
	defer func() {
		if e := recover(); e != nil {
			log.Warning("HandleStatusLoop  fatal, we will reboot this goroutine", e)
			go agent.HandleStatusLoop()
		}
	}()
	for {
		select {
		case s := <-agent.JobStatusChan:
			s.TaskPtr.ExecDuration = s.CreateAt - s.TaskPtr.ExecAt
			if s.Err == nil {
				s.Err = errors.New("")
			}
			if s.Status == StatusRunning && s.Command != nil {
				agent.HandleStatusRunning(s)
			} else if s.Status == StatusSuccess && s.Command != nil {
				agent.HandleStatusSuccess(s)
			} else if s.Status == StatusTimeout {
				agent.HandleStatusTimeout(s)
			} else if s.Status == StatusKilled {
				agent.HandleStatusKilled(s)
			} else {
				agent.HandleStatusFailed(s)
			}

		case <-agent.StatusLoopQuitChan:
			goto quit
		}
	}
quit:
	log.Warning("receive StatusLoopQuitChan chan, quit HandleStatusLoop")
}
Пример #10
0
// create new server group
func apiAddServerGroup(newGroup models.ServerGroup) (int, string) {
	conn := CreateZkConn()
	defer conn.Close()

	lock := utils.GetZkLock(conn, productName)
	lock.Lock(fmt.Sprintf("add group %+v", newGroup))

	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	newGroup.ProductName = productName

	exists, err := newGroup.Exists(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	if exists {
		return jsonRet(map[string]interface{}{
			"ret": 0,
			"msg": "group already exists",
		})
	}
	err = newGroup.Create(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	return jsonRetSucc()
}
Пример #11
0
// add redis server to exist server group
func apiAddServerToGroup(server models.Server, param martini.Params) (int, string) {
	groupId, _ := strconv.Atoi(param["id"])
	lock := utils.GetZkLock(safeZkConn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("add server to group,  %+v", server))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()
	// check group exists first
	serverGroup := models.NewServerGroup(globalEnv.ProductName(), groupId)

	exists, err := serverGroup.Exists(safeZkConn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	// create new group if not exists
	if !exists {
		if err := serverGroup.Create(safeZkConn); err != nil {
			return 500, err.Error()
		}
	}

	if err := serverGroup.AddServer(safeZkConn, &server); err != nil {
		log.Warning(errors.ErrorStack(err))
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Пример #12
0
func (agent *Agent) Clean() {
	// we will wait for all TASK FINISHED
	// but after quit_time, we will KILL subprocess by SIGUSR1
	start_quit := time.Now().Unix()
	for l := len(agent.Process); l > 0; {
		log.Warning("process still running, we should quit after all TASK FINISHED, please wait")
		log.Warning("running task is:")
		for task, _ := range agent.Process {
			log.Warningf("%s ", task)
		}
		time.Sleep(5 * time.Second)
		l = len(agent.Process)
		if now := time.Now().Unix(); now-start_quit > agent.Conf.QuitTime {
			log.Warning("quit_time timeout, we will kill subprocess by SIGUSR1")
			for task_id, p := range agent.Process {
				if err := p.Signal(syscall.SIGUSR1); err != nil {
					log.Warningf("SIGUSR1 task:%s failed...", task_id)
				}
				log.Warningf("SIGUSR1 task:%s OK...wait subprocess quit", task_id)
			}
			goto quit
		}

	}
quit:
	time.Sleep(2 * time.Second)
	close(agent.StatusLoopQuitChan)
	log.Warning("all process DONE, we quit success.")
}
Пример #13
0
// create new server group
func apiAddServerGroup(newGroup models.ServerGroup) (int, string) {
	lock := utils.GetZkLock(safeZkConn, globalEnv.ProductName())
	lock.Lock(fmt.Sprintf("add group %+v", newGroup))

	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()

	newGroup.ProductName = globalEnv.ProductName()

	exists, err := newGroup.Exists(safeZkConn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	if exists {
		return 500, "group already exists"
	}
	err = newGroup.Create(safeZkConn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	return jsonRetSucc()
}
Пример #14
0
// add redis server to exist server group
func apiAddServerToGroup(server models.Server, param martini.Params) (int, string) {
	groupId, _ := strconv.Atoi(param["id"])

	conn := CreateZkConn()
	defer conn.Close()

	lock := utils.GetZkLock(conn, productName)
	lock.Lock(fmt.Sprintf("add server to group,  %+v", server))
	defer func() {
		err := lock.Unlock()
		if err != nil {
			log.Warning(err)
		}
	}()
	// check group exists first
	serverGroup := models.NewServerGroup(productName, groupId)

	exists, err := serverGroup.Exists(conn)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	if !exists {
		return jsonRetFail(-1, "group not exists")
	}

	if err := serverGroup.AddServer(conn, &server); err != nil {
		log.Warning(err)
		return 500, err.Error()
	}

	return jsonRetSucc()
}
Пример #15
0
func (t *Task) genLogFile() {
	defer func() {
		if e := recover(); e != nil {
			log.Warning("genLogFile fatal:", e)
		}
	}()
	d := time.Now().Format("20060102")
	filename := fmt.Sprintf("%s/DCMS-%s/%d-%s-%s.log",
		t.Job.Dcms.Conf.WorkDir,
		d,
		t.Job.Id,
		t.Job.Name,
		t.TaskId)
	log.Info("generate logfile :", filename)

	logdir := fmt.Sprintf("%s/DCMS-%s", t.Job.Dcms.Conf.WorkDir, d)

	if err := os.MkdirAll(logdir, os.ModePerm); err != nil {
		log.Warningf("in run exec goroutine, mkdir workdir %s failed!!!! ", t.Job.Dcms.Conf.WorkDir)
	}

	if f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.ModePerm); err != nil {
		log.Warning("in genLogFile os.OpenFile create failed: ", f)
		t.logfile = nil
		t.LogFilename = ""
	} else {
		t.logfile = f
		t.LogFilename = filename
	}
}
Пример #16
0
func runSlotMigrate(fromSlotId, toSlotId int, newGroupId int, delay int) error {
	t := &MigrateTask{}
	t.Delay = delay
	t.FromSlot = fromSlotId
	t.ToSlot = toSlotId
	t.NewGroupId = newGroupId
	t.Status = "migrating"
	t.CreateAt = strconv.FormatInt(time.Now().Unix(), 10)
	u, err := uuid.NewV4()
	if err != nil {
		log.Warning(err)
		return errors.Trace(err)
	}
	t.Id = u.String()
	t.stopChan = make(chan struct{})

	// run migrate
	if ok, err := preMigrateCheck(t); ok {
		err = RunMigrateTask(t)
		if err != nil {
			log.Warning(err)
			return errors.Trace(err)
		}
	} else {
		log.Warning(err)
		return errors.Trace(err)
	}
	return nil
}
Пример #17
0
func main() {
	log.SetLevelByString("info")

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	signal.Notify(c, syscall.SIGTERM)
	go func() {
		<-c
		if createdDashboardNode {
			releaseDashboardNode()
		}
		Fatal("ctrl-c or SIGTERM found, exit")
	}()

	args, err := docopt.Parse(usage, nil, true, "codis config v0.1", true)
	if err != nil {
		log.Error(err)
	}

	// set config file
	var configFile string
	var config *cfg.Cfg
	if args["-c"] != nil {
		configFile = args["-c"].(string)
		config, err = utils.InitConfigFromFile(configFile)
		if err != nil {
			log.Warning("load config file error")
			Fatal(err)
		}
	} else {
		config, err = utils.InitConfig()
		if err != nil {
			log.Warning("load config file error")
			Fatal(err)
		}
	}

	// load global vars
	globalEnv = env.LoadCodisEnv(config)

	// set output log file
	if args["-L"] != nil {
		log.SetOutputByName(args["-L"].(string))
	}

	// set log level
	if args["--log-level"] != nil {
		log.SetLevelByString(args["--log-level"].(string))
	}

	cmd := args["<command>"].(string)
	cmdArgs := args["<args>"].([]string)

	go http.ListenAndServe(":10086", nil)
	err = runCommand(cmd, cmdArgs)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}
}
Пример #18
0
func (pc *ProxyConfig) apply() {
	log.SetLevelByString(pc.logLevel)

	if pc.logFile != "" {
		err := log.SetOutputByName(pc.logFile)
		if err != nil {
			log.Fatalf("ProxyConfig SetOutputByName %s failed %s ", pc.logFile, err.Error())
		}
		log.SetRotateByDay()
	}

	if pc.name == "" {
		log.Fatal("ProxyConfig name must not empty")
	}

	if pc.port == 0 {
		log.Fatal("ProxyConfig port  must not 0")
	}

	if pc.cpu > runtime.NumCPU() {
		log.Warningf("ProxyConfig cpu  %d exceed %d, adjust to %d ", pc.cpu, runtime.NumCPU(), runtime.NumCPU())
		pc.cpu = runtime.NumCPU()
	}

	if pc.maxConn > 10000 {
		log.Warningf("ProxyConfig maxconn %d exceed 10000, adjust to 10000", pc.maxConn)
		pc.maxConn = 10000
	}

	runtime.GOMAXPROCS(pc.cpu)

	if pc.poolSize <= 0 || pc.poolSize > 30 {
		log.Warning("ProxyConfig poolSize %d , adjust to 10 ", pc.poolSize)
		pc.poolSize = 10
	}

	if pc.cpuFile != "" {
		f, err := os.Create(pc.cpuFile)
		if err != nil {
			log.Fatal(err)
		}
		log.Warning("Archer start CPUProfile ", pc.cpuFile)
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	if pc.memFile != "" {
		f, err := os.Create(pc.memFile)
		if err == nil {
			log.Warning("Archer start HeapProfile ", pc.memFile)
			pprof.WriteHeapProfile(f)
		}
	}

	go func() {
		log.Warning(http.ListenAndServe(":6061", nil))
	}()
}
Пример #19
0
func (ps *ProxyServer) Close() {

	err := ps.Listen.Close()
	if err != nil {
		log.Warning("Close Listener err ", err)
	}
	log.Info("Proxy Server Close Listener ")
	close(ps.Quit)
	ps.Wg.Wait()
	log.Warning("Proxy Server Close ....")
}
Пример #20
0
// experimental simple auto rebalance :)
func Rebalance(zkConn zkhelper.Conn, delay int) error {
	targetQuota, err := getQuotaMap(zkConn)
	if err != nil {
		return errors.Trace(err)
	}
	livingNodes, err := getLivingNodeInfos(zkConn)
	if err != nil {
		return errors.Trace(err)
	}
	log.Info("start rebalance")
	for _, node := range livingNodes {
		for len(node.CurSlots) > targetQuota[node.GroupId] {
			for _, dest := range livingNodes {
				if dest.GroupId != node.GroupId && len(dest.CurSlots) < targetQuota[dest.GroupId] {
					slot := node.CurSlots[len(node.CurSlots)-1]
					// create a migration task
					t := NewMigrateTask(MigrateTaskInfo{
						Delay:      delay,
						FromSlot:   slot,
						ToSlot:     slot,
						NewGroupId: dest.GroupId,
						Status:     MIGRATE_TASK_MIGRATING,
						CreateAt:   strconv.FormatInt(time.Now().Unix(), 10),
					})
					u, err := uuid.NewV4()
					if err != nil {
						return errors.Trace(err)
					}
					t.Id = u.String()

					if ok, err := preMigrateCheck(t); ok {
						// do migrate
						err := t.run()
						if err != nil {
							log.Warning(err)
							return errors.Trace(err)
						}
					} else {
						log.Warning(err)
						return errors.Trace(err)
					}
					node.CurSlots = node.CurSlots[0 : len(node.CurSlots)-1]
					dest.CurSlots = append(dest.CurSlots, slot)
				}
			}
		}
	}
	log.Info("rebalance finish")
	return nil
}
Пример #21
0
// experimental simple auto rebalance :)
func Rebalance(zkConn zkhelper.Conn, delay int) error {
	targetQuota, err := getQuotaMap(zkConn)
	if err != nil {
		return errors.Trace(err)
	}
	livingNodes, err := getLivingNodeInfos(zkConn)
	if err != nil {
		return errors.Trace(err)
	}
	for _, node := range livingNodes {
		for len(node.CurSlots) > targetQuota[node.GroupId] {
			for _, dest := range livingNodes {
				if dest.GroupId != node.GroupId && len(dest.CurSlots) < targetQuota[dest.GroupId] {
					slot := node.CurSlots[len(node.CurSlots)-1]
					// create a migration task
					t := &MigrateTask{
						MigrateTaskForm: MigrateTaskForm{
							Delay:      delay,
							FromSlot:   slot,
							ToSlot:     slot,
							NewGroupId: dest.GroupId,
							Status:     "migrating",
							CreateAt:   strconv.FormatInt(time.Now().Unix(), 10),
						},
						stopChan: make(chan struct{}),
					}
					u, err := uuid.NewV4()
					if err != nil {
						return errors.Trace(err)
					}
					t.Id = u.String()

					if ok, err := preMigrateCheck(t); ok {
						err = RunMigrateTask(t)
						if err != nil {
							log.Warning(err)
							return errors.Trace(err)
						}
					} else {
						log.Warning(err)
						return errors.Trace(err)
					}
					node.CurSlots = node.CurSlots[0 : len(node.CurSlots)-1]
					dest.CurSlots = append(dest.CurSlots, slot)
				}
			}
		}
	}
	return nil
}
Пример #22
0
func (c *Conn) handleQuery(sql string) (err error) {
	sql = strings.TrimRight(sql, ";")
	stmt, err := sqlparser.Parse(sql, c.alloc)
	if err != nil {
		log.Warning(c.connectionId, sql, err)
		return c.handleShow(stmt, sql, nil)
	}

	log.Debugf("connectionId: %d, statement %T , %s", c.connectionId, stmt, sql)

	switch v := stmt.(type) {
	case *sqlparser.Select:
		c.server.IncCounter("select")
		return c.handleSelect(v, sql, nil)
	case *sqlparser.Insert:
		c.server.IncCounter("insert")
		return c.handleExec(stmt, sql, nil, true)
	case *sqlparser.Replace:
		c.server.IncCounter("replace")
		return c.handleExec(stmt, sql, nil, false)
	case *sqlparser.Update:
		c.server.IncCounter("update")
		return c.handleExec(stmt, sql, nil, false)
	case *sqlparser.Delete:
		c.server.IncCounter("delete")
		return c.handleExec(stmt, sql, nil, false)
	case *sqlparser.Set:
		c.server.IncCounter("set")
		return c.handleSet(v, sql)
	case *sqlparser.SimpleSelect:
		c.server.IncCounter("simple_select")
		return c.handleSimpleSelect(sql, v)
	case *sqlparser.Begin:
		c.server.IncCounter("begin")
		return c.handleBegin()
	case *sqlparser.Commit:
		c.server.IncCounter("commit")
		return c.handleCommit()
	case *sqlparser.Rollback:
		c.server.IncCounter("rollback")
		return c.handleRollback()
	case *sqlparser.Other:
		c.server.IncCounter("other")
		log.Warning(sql)
		return c.handleShow(stmt, sql, nil)
	default:
		return errors.Errorf("statement %T not support now, %+v, %s", stmt, stmt, sql)
	}
}
Пример #23
0
func (s *Session) MSET(req *ArrayResp, seq int64) {
	defer func() {
		s.conCurrency <- 1
	}()

	if req.Length()%2 != 0 {
		s.resps <- WrappedErrorResp([]byte("MSET args count must Even"), seq)
		return
	}

	var failed bool
	for i := 0; i < req.Length(); i += 2 {
		ar := &ArrayResp{}
		ar.Rtype = ArrayType
		br := &BulkResp{}
		br.Rtype = BulkType
		br.Args = [][]byte{[]byte("SET")}
		ar.Args = append(ar.Args, br)

		br1 := &BulkResp{}
		br1.Rtype = BulkType
		br1.Args = [][]byte{req.Args[i+1].Args[0]}
		ar.Args = append(ar.Args, br1)

		br2 := &BulkResp{}
		br2.Rtype = BulkType
		br2.Args = [][]byte{req.Args[i+2].Args[0]}
		ar.Args = append(ar.Args, br2)

		resp, err := s.ExecWithRedirect(ar, true)
		if err != nil {
			log.Warning("Session MSET ExecWithRedirect wrong ", ar.String())
			failed = true
			break
		}
		_, ok := resp.(*SimpleResp)
		if !ok {
			log.Warning("Session MSET ExecWithRedirect  wrong must get SimpleResp ")
			failed = true
			break
		}
	}

	if failed {
		s.resps <- WrappedErrorResp([]byte("MGET partitial failed"), seq)
		return
	}
	s.resps <- WrappedOKResp(seq)
}
Пример #24
0
// compare and change cronJob from store
// 1.if cronjob not exists in cjs, we suppose cronjob hase deleted
// 2.if cronjob's create_at changed ,we will fill old cj with new cj
// 3.we won't delete Disabled cronJob
func (agent *Agent) CompareAndChange(cjs []*CronJob) {
	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	for _, oldcj := range agent.Jobs {
		find := false
		for _, newcj := range cjs {
			if oldcj.Id == newcj.Id {
				find = true
				break
			}
		}
		if !find {
			// we just disabled cronJob
			log.Warning("cron job disabled|removed for id: ", oldcj.Id)
			oldcj.Disabled = true
		}
	}

	for _, newcj := range cjs {
		if oldcj, ok := agent.Jobs[newcj.Id]; ok {
			//find job , compare CreateAt
			log.Debug("cron job may changed for id: ", newcj.Id)
			log.Debug("oldcj:", oldcj)
			log.Debug("newcj:", newcj)
			oldcj.Name = newcj.Name
			oldcj.CreateUser = newcj.CreateUser
			oldcj.ExecutorFlags = newcj.ExecutorFlags
			oldcj.Executor = newcj.Executor
			oldcj.Runner = newcj.Runner
			oldcj.Timeout = newcj.Timeout
			oldcj.OnTimeoutTrigger = newcj.OnTimeoutTrigger
			oldcj.Disabled = newcj.Disabled
			oldcj.Schedule = newcj.Schedule
			oldcj.WebHookUrl = newcj.WebHookUrl
			oldcj.MsgFilter = newcj.MsgFilter
			oldcj.Signature = newcj.Signature
			oldcj.CreateAt = newcj.CreateAt
			oldcj.StartAt = newcj.StartAt
			oldcj.EndAt = newcj.EndAt

		} else {
			// not find, just append newcj to Jobs map
			newcj.Dcms = agent

			log.Warning("cron job Added for id: ", newcj.Id)
			agent.Jobs[newcj.Id] = newcj
		}
	}
}
Пример #25
0
func apiGetServerGroup(param martini.Params) (int, string) {
	id := param["id"]
	groupId, err := strconv.Atoi(id)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	group, err := models.GetGroup(safeZkConn, globalEnv.ProductName(), groupId)
	if err != nil {
		log.Warning(err)
		return 500, err.Error()
	}
	b, err := json.MarshalIndent(group, " ", "  ")
	return 200, string(b)
}
Пример #26
0
func cmdAction(argv []string) (err error) {
	usage := `usage: cconfig action (gc [-n <num> | -s <seconds>] | remove-lock)

options:
	gc:
	gc -n N		keep last N actions;
	gc -s Sec	keep last Sec seconds actions;

	remove-lock	force remove zookeeper lock;
`
	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return errors.Trace(err)
	}

	if args["remove-lock"].(bool) {
		return errors.Trace(runRemoveLock())
	}

	zkLock.Lock(fmt.Sprintf("action, %+v", argv))
	defer func() {
		err := zkLock.Unlock()
		if err != nil {
			log.Info(err)
		}
	}()

	if args["gc"].(bool) {
		if args["-n"].(bool) {
			n, err := strconv.Atoi(args["<num>"].(string))
			if err != nil {
				log.Warning(err)
				return err
			}
			return runGCKeepN(n)
		} else if args["-s"].(bool) {
			sec, err := strconv.Atoi(args["<seconds>"].(string))
			if err != nil {
				log.Warning(err)
				return errors.Trace(err)
			}
			return runGCKeepNSec(sec)
		}
	}

	return nil
}
Пример #27
0
func runSetProxyStatus(proxyName, status string) error {
	if err := models.SetProxyStatus(zkConn, productName, proxyName, status); err != nil {
		log.Warning(err)
		return err
	}
	return nil
}
Пример #28
0
func handleCrashedServer(s *models.Server) error {
	switch s.Type {
	case models.SERVER_TYPE_MASTER:
		//get slave and do promote
		slave, err := getSlave(s)
		if err != nil {
			log.Warning(errors.ErrorStack(err))
			return err
		}

		log.Infof("try promote %+v", slave)
		err = callHttp(nil, genUrl(*apiServer, "/api/server_group/", slave.GroupId, "/promote"), "POST", slave)
		if err != nil {
			log.Errorf("do promote %v failed %v", slave, errors.ErrorStack(err))
			return err
		}
		refreshSlave(s) //刷新
	case models.SERVER_TYPE_SLAVE:
		log.Errorf("slave is down: %+v", s)
	case models.SERVER_TYPE_OFFLINE:
		//no need to handle it
	default:
		log.Fatalf("unkonwn type %+v", s)
	}

	return nil
}
Пример #29
0
func jsonRet(output map[string]interface{}) (int, string) {
	b, err := json.Marshal(output)
	if err != nil {
		log.Warning(err)
	}
	return 200, string(b)
}
Пример #30
0
func HandleConn(p *Proxy, c net.Conn) {
	s := NewSession(p, c)
	p.sm.Put(c.RemoteAddr().String(), s)
	s.Serve()
	log.Warning("Close client ", c.RemoteAddr().String())
	c.Close()
}