Exemple #1
0
func taskKill(params martini.Params, user User) string {
	id := params["id"]
	task, err := GetTaskByTaskId(id)
	if err != nil {
		log.Debug(err)
		return err.Error()
	}

	if task != nil {
		j, err := GetJobByName(task.JobName)
		if err != nil {
			log.Debug(err)
			return err.Error()
		}
		if ldapEnable && j != nil && j.Owner != string(user) {
			return "user not allowed"
		}
	}

	if s.notifier != nil {
		err := s.notifier.OnKillTask(id)
		if err != nil {
			return err.Error()
		}
		return "OK"
	}

	return "error:notifier not registered"
}
Exemple #2
0
func (s *ApiServer) WebhookHandler(w http.ResponseWriter, r *http.Request) {
	//todo: post a tyrant job and start task
	err := r.ParseForm()
	if err != nil {
		http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError)
		return
	}

	vars := mux.Vars(r)
	log.Debug(vars)

	repo := r.Form.Get("repo")
	log.Debug(r.Form, "repo", repo)
	h := &task.JobHelper{Server: s.Server, ExecutorUrls: s.ExecutorUrls}
	job := h.BuildRepoJob(repo)
	if err := h.CreateJob(job); err != nil {
		http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError)
		return
	}

	log.Debugf("%+v", job)

	if err := h.RunJob(job); err != nil {
		http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError)
		return
	}
}
Exemple #3
0
// LoadCronMetaData will only be called in GenJobs Function
// when store unavilable , and must be called once, so we needn't get Lock
func (agent *Agent) LoadCronMetaData() {
	cronSlice := make([]string, 0)
	meta_file := fmt.Sprintf("%s/dcms_agent.metadata", agent.Conf.WorkDir)
	f, err := os.Open(meta_file)
	if err != nil {
		log.Warningf("reading metadata file: %s failed %s", meta_file, err)
		return
	}
	if data, err := ioutil.ReadAll(f); err != nil {
		log.Warningf("ioutil metadata file read all failed %s", err)
	} else {
		if err = json.Unmarshal(data, &cronSlice); err != nil {
			log.Warningf("json unmarshal meta data failed: %s", string(data))
			return
		}
		for _, v := range cronSlice {
			log.Debug("receive cron from metadata file:", v)
			var cj *CronJob
			if err = json.Unmarshal([]byte(v), &cj); err != nil {
				log.Warningf("json unmarshal failed for:", v)
				continue
			}

			cj.Dcms = agent
			agent.Jobs[cj.Id] = cj
		}
		for id, job := range agent.Jobs {
			log.Debug("now Agent has jobs:", id, job)
		}
	}
}
Exemple #4
0
func CheckAutoRunJobs(n Notifier) {
	for {
		log.Debug("start check auto run job...")
		jobs := GetScheduledJobList()
		for _, j := range jobs {
			if j.NeedAutoStart() {
				log.Debug("Auto Run Job Found: ", j)
				n.OnRunJob(strconv.FormatInt(j.Id, 10))
			}
		}
		time.Sleep(10 * time.Second)
	}
}
Exemple #5
0
func CheckAutoRunJobs(n Notifier) {
	for {
		log.Debug("start check auto run job...")
		jobs := GetScheduledJobList()
		for i, j := range jobs {
			if j.NeedAutoStart() {
				log.Debug("Auto Run Job Found: ", j)
				n.OnRunJob(&jobs[i])
			}
		}
		time.Sleep(10 * time.Second)
	}
}
Exemple #6
0
func (c *Conn) dispatch(data []byte) error {
	cmd := data[0]
	data = data[1:]

	log.Debug(c.connectionId, cmd, hack.String(data))
	c.lastCmd = hack.String(data)

	token := c.server.GetToken()

	c.server.GetRWlock().RLock()
	defer func() {
		c.server.GetRWlock().RUnlock()
		c.server.ReleaseToken(token)
	}()

	c.server.IncCounter(mysql.MYSQL_COMMAND(cmd).String())

	switch mysql.MYSQL_COMMAND(cmd) {
	case mysql.COM_QUIT:
		c.Close()
		return nil
	case mysql.COM_QUERY:
		return c.handleQuery(hack.String(data))
	case mysql.COM_PING:
		return c.writeOkFlush(nil)
	case mysql.COM_INIT_DB:
		log.Debug(cmd, hack.String(data))
		if err := c.useDB(hack.String(data)); err != nil {
			return errors.Trace(err)
		}

		return c.writeOkFlush(nil)
	case mysql.COM_FIELD_LIST:
		return c.handleFieldList(data)
	case mysql.COM_STMT_PREPARE:
		// not support server side prepare yet
	case mysql.COM_STMT_EXECUTE:
		log.Fatal("not support", data)
	case mysql.COM_STMT_CLOSE:
		return c.handleStmtClose(data)
	case mysql.COM_STMT_SEND_LONG_DATA:
		log.Fatal("not support", data)
	case mysql.COM_STMT_RESET:
		log.Fatal("not support", data)
	default:
		msg := fmt.Sprintf("command %d not supported now", cmd)
		return mysql.NewError(mysql.ER_UNKNOWN_ERROR, msg)
	}

	return nil
}
Exemple #7
0
func (c *Conn) fillCacheAndReturnResults(plan *planbuilder.ExecPlan, ti *tabletserver.TableInfo, keys []string) error {
	rowsql, err := generateSelectSql(ti, plan)
	log.Info(rowsql)

	ti.Lock.Lock(hack.Slice(keys[0]))
	defer ti.Lock.Unlock(hack.Slice(keys[0]))

	conns, err := c.getShardConns(true, nil, nil)
	if err != nil {
		return errors.Trace(err)
	} else if len(conns) == 0 {
		return errors.Errorf("not enough connection for %s", rowsql)
	}

	rs, err := c.executeInShard(conns, rowsql, nil)
	defer c.closeShardConns(conns)
	if err != nil {
		return errors.Trace(err)
	}

	//todo:fix hard code
	result := rs[0]

	if len(result.Values) == 0 {
		log.Debug("empty set")
		return c.writeResultset(result.Status, result.Resultset)
	}

	//log.Debugf("%+v", result.Values[0])

	retValues := applyFilter(plan.ColumnNumbers, result.Values[0])
	//log.Debug(len(retValues), len(keys))

	r, err := c.buildResultset(getFieldNames(plan, ti), []mysql.RowValue{retValues})
	if err != nil {
		log.Error(err)
		return errors.Trace(err)
	}

	//just do simple cache now
	if len(result.Values) == 1 && len(keys) == 1 && ti.CacheType != schema.CACHE_NONE {
		pks := pkValuesToStrings(ti.PKColumns, plan.PKValues)
		log.Debug("fill cache", pks)
		c.server.IncCounter("fill")
		ti.Cache.Set(pks[0], result.RowDatas[0], 0)
	}

	return c.writeResultset(c.status, r)
}
Exemple #8
0
// compare and change cronJob from store
// 1.if cronjob not exists in cjs, we suppose cronjob hase deleted
// 2.if cronjob's create_at changed ,we will fill old cj with new cj
// 3.we won't delete Disabled cronJob
func (agent *Agent) CompareAndChange(cjs []*CronJob) {
	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	for _, oldcj := range agent.Jobs {
		find := false
		for _, newcj := range cjs {
			if oldcj.Id == newcj.Id {
				find = true
				break
			}
		}
		if !find {
			// we just disabled cronJob
			log.Warning("cron job disabled|removed for id: ", oldcj.Id)
			oldcj.Disabled = true
		}
	}

	for _, newcj := range cjs {
		if oldcj, ok := agent.Jobs[newcj.Id]; ok {
			//find job , compare CreateAt
			log.Debug("cron job may changed for id: ", newcj.Id)
			log.Debug("oldcj:", oldcj)
			log.Debug("newcj:", newcj)
			oldcj.Name = newcj.Name
			oldcj.CreateUser = newcj.CreateUser
			oldcj.ExecutorFlags = newcj.ExecutorFlags
			oldcj.Executor = newcj.Executor
			oldcj.Runner = newcj.Runner
			oldcj.Timeout = newcj.Timeout
			oldcj.OnTimeoutTrigger = newcj.OnTimeoutTrigger
			oldcj.Disabled = newcj.Disabled
			oldcj.Schedule = newcj.Schedule
			oldcj.WebHookUrl = newcj.WebHookUrl
			oldcj.MsgFilter = newcj.MsgFilter
			oldcj.Signature = newcj.Signature
			oldcj.CreateAt = newcj.CreateAt
			oldcj.StartAt = newcj.StartAt
			oldcj.EndAt = newcj.EndAt

		} else {
			// not find, just append newcj to Jobs map
			newcj.Dcms = agent

			log.Warning("cron job Added for id: ", newcj.Id)
			agent.Jobs[newcj.Id] = newcj
		}
	}
}
Exemple #9
0
// filter must split by '|', for example "fatal|error|fail|failed"
func HitFilter(filename string, filter string) bool {
	log.Debug("HitFilter run:", filename, filter)
	filterExp, err := regexp.Compile(fmt.Sprintf(`(?i:(%s))`, filter))
	if err != nil {
		log.Warningf("HitFilter regexp.Compile for %s failed:%s", filter, err)
		return false
	}

	if f, err := os.Open(filename); err != nil {
		log.Warning("HitFilter open file failed ", filename, err)
		return false
	} else {
		defer f.Close()
		freader := bufio.NewReader(f)
		for {
			var str string
			str, err = freader.ReadString('\n')
			s := filterExp.FindStringSubmatch(str)
			if len(s) > 0 {
				log.Debugf("HitFilter hit msg_filter ", s, str)
				return true
			}
			if err == io.EOF {
				break
			}
		}
	}
	return false
}
Exemple #10
0
func (self *Server) Start(addr string) {
	ln, err := net.Listen("tcp", addr)
	if err != nil {
		log.Fatal(err)
	}

	go self.EvtLoop()

	log.Debug("listening on", addr)

	go registerWebHandler(self)

	//load background jobs from storage
	err = self.store.Init()
	if err != nil {
		log.Error(err)
		self.store = nil
	} else {
		self.getAllJobs()
	}

	for {
		conn, err := ln.Accept()
		if err != nil { // handle error
			continue
		}

		session := &session{}
		go session.handleConnection(self, conn)
	}
}
Exemple #11
0
func cmdProxy(argv []string) (err error) {
	usage := `usage:
	cconfig proxy list
	cconfig proxy offline <proxy_name>
	cconfig proxy online <proxy_name>
`
	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return err
	}
	log.Debug(args)

	zkLock.Lock(fmt.Sprintf("proxy, %+v", argv))
	defer func() {
		err := zkLock.Unlock()
		if err != nil {
			log.Error(err)
		}
	}()

	if args["list"].(bool) {
		log.Warning(err)
		return runProxyList()
	}

	proxyName := args["<proxy_name>"].(string)
	if args["online"].(bool) {
		return runSetProxyStatus(proxyName, models.PROXY_STATE_ONLINE)
	}
	if args["offline"].(bool) {
		return runSetProxyStatus(proxyName, models.PROXY_STATE_MARK_OFFLINE)
	}
	return nil
}
Exemple #12
0
func (self *ShellExecutor) sendHeartbeat() {
	for taskId, _ := range self.process {
		tid := taskId
		log.Debug("send heartbeat, taskId", tid)
		self.sendStatusUpdate(tid, mesos.TaskState_TASK_RUNNING, "")
	}
}
Exemple #13
0
func (self *taskNotify) run() {
	for msg := range self.ch {
		//call webhook
		log.Debug("Send Notify for Job", msg.job, msg.task)
		if len(msg.job.WebHookUrl) == 0 {
			continue
		}
		buf, err := json.Marshal(struct {
			Job  *scheduler.Job  `json:"job"`
			Task *scheduler.Task `json:"task"`
		}{msg.job, msg.task})
		if err != nil {
			log.Warning(err.Error(), msg.job, msg.task)
		}
		body := bytes.NewBuffer(buf)
		_, err = http.Post(msg.job.WebHookUrl, "application/json", body)
		if err != nil {
			log.Warning(err.Error(), msg.job, msg.task)
		}

		if msg.isLast { //no more message
			return
		}
	}
}
Exemple #14
0
func (self *ResMan) handleMesosOffers(t *cmdMesosOffers) {
	driver := t.driver
	offers := t.offers

	defer func() {
		t.wait <- struct{}{}
	}()

	log.Debug("ResourceOffers")
	ts := self.getReadyTasks()
	log.Debugf("ready tasks:%+v", ts)
	var idx, left int

	for idx = 0; idx < len(offers); idx++ {
		n := self.runTaskUsingOffer(driver, offers[idx], ts[left:])
		if n == 0 {
			break
		}
		left += n
	}

	//decline left offers
	for i := idx; i < len(offers); i++ {
		driver.DeclineOffer(offers[i].Id)
	}
}
Exemple #15
0
func (self *ResMan) Run() {
	master := flag.String("master", "localhost:5050", "Location of leading Mesos master")
	flag.Parse()
	frameworkIdStr := "tyrant"
	frameworkId := &mesos.FrameworkID{Value: &frameworkIdStr}
	failoverTimeout := flag.Float64("failoverTimeout", 60, "failover timeout")

	driver := mesos.SchedulerDriver{
		Master: *master,
		Framework: mesos.FrameworkInfo{
			Name:            proto.String("GoFramework"),
			User:            proto.String(""),
			FailoverTimeout: failoverTimeout,
			Id:              frameworkId,
		},

		Scheduler: &mesos.Scheduler{
			ResourceOffers: self.OnResourceOffers,
			StatusUpdate:   self.OnStatusUpdate,
			Error:          self.OnError,
			Disconnected:   self.OnDisconnected,
			Registered:     self.OnRegister,
			Reregistered:   self.OnReregister,
		},
	}

	driver.Init()
	defer driver.Destroy()
	go self.EventLoop()

	driver.Start()
	<-self.exit
	log.Debug("exit")
	driver.Stop(false)
}
Exemple #16
0
// we will Save Cron MetaData periodically, currently for 5min
func (agent *Agent) SaveCronMetaData() {
	meta_file := fmt.Sprintf("%s/dcms_agent.metadata", agent.Conf.WorkDir)
	cronSlice := make([]string, 0)

	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	for k, v := range agent.Jobs {
		if data, err := json.Marshal(v); err == nil {
			cronSlice = append(cronSlice, string(data))
		} else {
			log.Warningf("marshal task: %d failed: %s", k, err)
			return
		}
	}

	if data, err := json.Marshal(cronSlice); err != nil {
		log.Warning("json marshal cronslice failed, ", err)
	} else {
		if len(cronSlice) == 0 {
			log.Warning("cronSlice json empty, just skip write MetaData")
			return
		}
		log.Debug("len of cronSlice:", len(data), data)
		log.Debugf("cronSlice length:%d content:%s", len(cronSlice), cronSlice)
		if e := ioutil.WriteFile(meta_file, data, os.ModePerm); e != nil {
			log.Warning("ioutil write meta_file failed,", e)
		}
	}
}
Exemple #17
0
func (self *ResMan) Run(master string) {
	frameworkIdStr := FRAMEWORK_ID
	frameworkId := &mesos.FrameworkID{Value: &frameworkIdStr}
	driver := mesos.SchedulerDriver{
		Master: master,
		Framework: mesos.FrameworkInfo{
			Name:            proto.String("TyrantFramework"),
			User:            proto.String(""),
			FailoverTimeout: failoverTimeout,
			Id:              frameworkId,
		},

		Scheduler: &mesos.Scheduler{
			ResourceOffers: self.OnResourceOffers,
			StatusUpdate:   self.OnStatusUpdate,
			Error:          self.OnError,
			Disconnected:   self.OnDisconnected,
			Registered:     self.OnRegister,
			Reregistered:   self.OnReregister,
		},
	}

	driver.Init()
	defer driver.Destroy()
	go self.EventLoop()

	driver.Start()
	<-self.exit
	log.Debug("exit")
	driver.Stop(false)
}
Exemple #18
0
func cmdProxy(argv []string) (err error) {
	usage := `usage:
	codis-config proxy list
	codis-config proxy offline <proxy_name>
	codis-config proxy online <proxy_name>
`
	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return err
	}
	log.Debug(args)

	if args["list"].(bool) {
		return runProxyList()
	}

	proxyName := args["<proxy_name>"].(string)
	if args["online"].(bool) {
		return runSetProxyStatus(proxyName, models.PROXY_STATE_ONLINE)
	}
	if args["offline"].(bool) {
		return runSetProxyStatus(proxyName, models.PROXY_STATE_MARK_OFFLINE)
	}
	return nil
}
Exemple #19
0
//DeleteTaskById will kill subprocess of task
func (s *Server) DeleteTaskById(p martini.Params) (int, string) {
	log.Debug("Server dcms http_api DeleteJobById")
	taskid, ok := p["taskid"]
	if !ok {
		return responseError(500, "GetTaskById without taskid")
	}

	//we will KILL subprocess async, in one goroutine
	go func(taskid string) {
		defer func() {
			if err := recover(); err != nil {
				log.Warningf("Delete Task By Id:%s panic: %s", taskid, err)
			}
		}()
		s.DCMS.Lock.Lock()
		defer s.DCMS.Lock.Unlock()
		for _, task := range s.DCMS.Running {
			if task.TaskId != taskid {
				continue
			}
			s.DCMS.KillTask(task)
			return
		}
		log.Warningf("Delete Task By Id:%s not exists or may be done", taskid)
	}(taskid)

	return responseSuccess(fmt.Sprintf("Task:%s will be killed async, or may be done normal", taskid))

}
Exemple #20
0
func cmdDashboard(argv []string) (err error) {
	usage := `usage: codis-config dashboard [--addr=<address>] [--http-log=<log_file>]

options:
	--addr	listen ip:port, e.g. localhost:12345, :8086, [default: :8086]
	--http-log	http request log [default: request.log ]
`

	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return err
	}
	log.Debug(args)

	logFileName := "request.log"
	if args["--http-log"] != nil {
		logFileName = args["--http-log"].(string)
	}

	addr := ":8086"
	if args["--addr"] != nil {
		addr = args["--addr"].(string)
	}

	runDashboard(addr, logFileName)
	return nil
}
Exemple #21
0
// when agent start, GenJobs() will be called once
func (agent *Agent) GenJobs() {
	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	cjs, err := agent.store.GetMyJobs()
	log.Debug("GenJobs receive cjs: ", cjs)
	if err != nil {
		log.Warningf("get CronJob error: %s ", err)
		log.Warning("we will load cronjob metadata from localhost")
		agent.LoadCronMetaData()
		return
	}
	for _, cj := range cjs {
		cj.Dcms = agent
		agent.Jobs[cj.Id] = cj
		log.Debug("GenJobs receive job: ", cj)
	}
}
Exemple #22
0
func (s *Server) GetAllJobs() (int, string) {
	log.Debug("Server dcms http_api GetAllJobs")
	cronSlice := make([]*CronJob, 0)
	for _, job := range s.DCMS.Jobs {
		cronSlice = append(cronSlice, job)
	}
	return responseSuccess(cronSlice)
}
Exemple #23
0
func (s *Server) GetAllTasks() (int, string) {
	log.Debug("Server dcms http_api GetAllTasks")
	taskSlice := make([]*Task, 0)
	for _, task := range s.DCMS.Running {
		taskSlice = append(taskSlice, task)
	}
	return responseSuccess(taskSlice)
}
Exemple #24
0
func jobRun(ctx *web.Context, id string) string {
	j, err := GetJobById(id)
	if err != nil {
		return responseError(ctx, -1, err.Error())
	}

	if s.notifier != nil && j != nil {
		taskId, err := s.notifier.OnRunJob(id)
		if err != nil {
			log.Debug(err.Error())
			return responseError(ctx, -2, err.Error())
		}

		return responseSuccess(ctx, taskId)
	}
	log.Debug("Notifier not found")
	return responseError(ctx, -3, "notifier not found")
}
Exemple #25
0
func GetTaskList() []Task {
	var tasks []Task
	_, err := sharedDbMap.Select(&tasks, "select * from tasks order by start_ts desc")
	if err != nil {
		log.Debug(err.Error())
		return nil
	}
	return tasks
}
Exemple #26
0
func GetTaskByTaskId(id string) (*Task, error) {
	var task Task
	err := sharedDbMap.SelectOne(&task, "select * from tasks where id=?", id)
	if err != nil {
		log.Debug(err.Error())
		return nil, err
	}
	return &task, nil
}
Exemple #27
0
func GetScheduledJobList() []Job {
	var jobs []Job
	_, err := sharedDbMap.Select(&jobs, "select * from jobs where schedule <> ''")
	if err != nil {
		log.Debug(err.Error())
		return nil
	}
	return jobs
}
Exemple #28
0
// DelKey implements the Storage DelKey method.
func (self *MYSQLStorage) DoneJob(j *Job) error {
	log.Debug("DoneJob:", j.Handle)
	_, err := self.db.Exec(delJobSQL, j.Handle)
	if err != nil {
		log.Error(err, j.Handle)
		return err
	}

	return nil
}
Exemple #29
0
func cmdServer(argv []string) (err error) {
	usage := `usage:
	cconfig server list
	cconfig server add <group_id> <redis_addr> <role>
	cconfig server remove <group_id> <redis_addr>
	cconfig server promote <group_id> <redis_addr>
	cconfig server add-group <group_id>
	cconfig server remove-group <group_id>
`
	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return err
	}
	log.Debug(args)

	zkLock.Lock(fmt.Sprintf("server, %+v", argv))
	defer func() {
		err := zkLock.Unlock()
		if err != nil {
			log.Error(err)
		}
	}()

	if args["list"].(bool) {
		return runListServerGroup()
	}

	groupId, err := strconv.Atoi(args["<group_id>"].(string))
	if err != nil {
		log.Warning(err)
		return err
	}

	if args["remove-group"].(bool) {
		return runRemoveServerGroup(groupId)
	}
	if args["add-group"].(bool) {
		return runAddServerGroup(groupId)
	}

	serverAddr := args["<redis_addr>"].(string)
	if args["add"].(bool) {
		role := args["<role>"].(string)
		return runAddServerToGroup(groupId, serverAddr, role)
	}
	if args["remove"].(bool) {
		return runRemoveServerFromGroup(groupId, serverAddr)
	}
	if args["promote"].(bool) {
		return runPromoteServerToMaster(groupId, serverAddr)
	}

	return nil
}
Exemple #30
0
func (self *ShellExecutor) OnLaunchTask(driver *mesos.ExecutorDriver, taskInfo mesos.TaskInfo) {
	fmt.Println("Launch task:", taskInfo.TaskId.GetValue())
	log.Debug("send running state")
	self.sendStatusUpdate(taskInfo.TaskId.GetValue(), mesos.TaskState_TASK_RUNNING, "task is running!")

	log.Debugf("%+v", os.Args)
	startch := make(chan struct{}, 1)
	if len(os.Args) == 2 {
		fname := taskInfo.TaskId.GetValue()
		ioutil.WriteFile(fname, []byte(os.Args[1]), 0644)
		cmd := exec.Command("/bin/sh", fname)
		go func() {
			defer func() {
				self.finish <- taskInfo.TaskId.GetValue()
				log.Debug("send finish state")
				self.sendStatusUpdate(taskInfo.TaskId.GetValue(), mesos.TaskState_TASK_FINISHED, "Go task is done!")
				time.Sleep(10 * time.Second)
				driver.Stop()
			}()

			self.lock.Lock()
			self.process[taskInfo.TaskId.GetValue()] = cmd
			self.lock.Unlock()
			startch <- struct{}{}
			out, err := cmd.Output()

			if err != nil {
				log.Error(err.Error())
			} else {
				fmt.Println(string(out))
				//	log.Debug(string(out))
			}
		}()
	} else {
		log.Debug("send finish state")
		self.sendStatusUpdate(taskInfo.TaskId.GetValue(), mesos.TaskState_TASK_FINISHED, "Go task is done!")
		time.Sleep(10 * time.Second)
		driver.Stop()

	}
	<-startch
}