Esempio n. 1
0
func (self *ResMan) addReadyTask(id string) (string, error) {
	if self.ready.Exist(id) {
		return "", fmt.Errorf("%s already exist: %+v", id, self.ready.Get(id))
	}

	job, err := scheduler.GetJobById(id)
	if err != nil {
		return "", errors.Trace(err)
	}

	persistentTask := &scheduler.Task{TaskId: self.genTaskId(), Status: scheduler.STATUS_READY,
		StartTs: time.Now().Unix(), JobName: job.Name}
	log.Debugf("%+v", persistentTask)
	err = persistentTask.Save()
	if err != nil {
		return "", errors.Trace(err)
	}

	job.LastTaskId = persistentTask.TaskId
	job.Save()

	t := &Task{Tid: persistentTask.TaskId, job: job, state: taskReady}
	self.ready.Add(t.Tid, t)
	log.Debugf("ready task %+v, total count:%d", t, self.ready.Length())

	return persistentTask.TaskId, nil
}
Esempio n. 2
0
func (self *ResMan) handleMesosOffers(t *cmdMesosOffers) {
	driver := t.driver
	offers := t.offers

	defer func() {
		t.wait <- struct{}{}
	}()

	log.Debugf("ResourceOffers %+v", offers)
	ts := self.getReadyTasks()
	log.Debugf("ready tasks:%+v", ts)
	var idx, left int

	for idx = 0; idx < len(offers); idx++ {
		n := self.runTaskUsingOffer(driver, offers[idx], ts[left:])
		if n == 0 {
			break
		}
		left += n
	}

	//decline left offers
	for i := idx; i < len(offers); i++ {
		driver.DeclineOffer(offers[i].Id)
	}
}
Esempio n. 3
0
func (self *ResMan) getReadyTasks() []*Task {
	var rts []*Task
	self.ready.Each(func(key string, t *Task) bool {
		log.Debugf("ready task:%+v", t)
		rts = append(rts, t)
		return true
	})

	log.Debugf("ready tasks: %+v", rts)

	return rts
}
Esempio n. 4
0
func (self *ResMan) saveTaskStatus(persistentTask *scheduler.Task, status mesos.TaskStatus, currentTask *Task) {
	if persistentTask == nil {
		return
	}

	var url string
	if len(currentTask.Pwd) > 0 {
		url = fmt.Sprintf("http://%v:%v/#/slaves/%s/browse?path=%s",
			Inet_itoa(self.masterInfo.GetIp()), self.masterInfo.GetPort(), currentTask.SlaveId, currentTask.Pwd)
	} else {
		url = fmt.Sprintf("http://%v:%v/#/frameworks/%s", Inet_itoa(self.masterInfo.GetIp()),
			self.masterInfo.GetPort(), self.frameworkId)
	}
	persistentTask.Status = (*status.State).String()
	if len(status.GetMessage()) > 0 {
		persistentTask.Message = status.GetMessage()
	}
	persistentTask.Url = url
	currentTask.job.LastStatus = persistentTask.Status
	currentTask.job.Save()
	persistentTask.UpdateTs = time.Now().Unix()
	persistentTask.Save()
	switch *status.State {
	case mesos.TaskState_TASK_FINISHED, mesos.TaskState_TASK_FAILED,
		mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
		currentTask.job.SendNotify(persistentTask)
	}

	log.Debugf("persistentTask:%+v", persistentTask)
}
Esempio n. 5
0
func readHeader(r io.Reader) (magic uint32, tp uint32, size uint32, err error) {
	magic, err = readUint32(r)
	if err != nil {
		return
	}

	if magic != common.Req && magic != common.Res {
		log.Debugf("magic not match 0x%x", magic)
		err = invalidMagic
		return
	}

	tp, err = readUint32(r)
	if err != nil {
		return
	}

	if !validCmd(tp) {
		//gearman's bug, as protocol, we should treat this an error, but gearman allow it
		if tp == 39 { //wtf: benchmark worker send this, and i can not find it in protocol description
			tp = common.GRAB_JOB_UNIQ
			size, err = readUint32(r)
			return
		}
		err = invalidArg
		return
	}

	size, err = readUint32(r)

	return
}
Esempio n. 6
0
// direction 0 head, 1 tail
// we only return retain-size string to Caller
func GetFileContent(filename string, retain int64, direction int) string {
	log.Debugf("GetFileContent ", filename, retain)
	f, err := os.Open(filename)
	if err != nil {
		log.Warning("GetOutPut open failed ", filename, err)
		return ""
	}
	defer f.Close()

	fs, err := f.Stat()
	if err != nil {
		log.Warning("GetOutPut get Stat failed ", filename, err)
		return ""
	}
	var buf []byte
	seek_at := int64(0)
	if fs.Size() > retain && direction == 1 {
		seek_at = fs.Size() - retain
		buf = make([]byte, retain)
	}
	buf = make([]byte, fs.Size())

	f.Seek(seek_at, 0)

	if _, err := f.Read(buf); err != nil && err != io.EOF {
		log.Warning("GetOutPut read buf failed ", err)
		return ""
	}
	return string(buf)
}
Esempio n. 7
0
// filter must split by '|', for example "fatal|error|fail|failed"
func HitFilter(filename string, filter string) bool {
	log.Debug("HitFilter run:", filename, filter)
	filterExp, err := regexp.Compile(fmt.Sprintf(`(?i:(%s))`, filter))
	if err != nil {
		log.Warningf("HitFilter regexp.Compile for %s failed:%s", filter, err)
		return false
	}

	if f, err := os.Open(filename); err != nil {
		log.Warning("HitFilter open file failed ", filename, err)
		return false
	} else {
		defer f.Close()
		freader := bufio.NewReader(f)
		for {
			var str string
			str, err = freader.ReadString('\n')
			s := filterExp.FindStringSubmatch(str)
			if len(s) > 0 {
				log.Debugf("HitFilter hit msg_filter ", s, str)
				return true
			}
			if err == io.EOF {
				break
			}
		}
	}
	return false
}
Esempio n. 8
0
// we will Save Cron MetaData periodically, currently for 5min
func (agent *Agent) SaveCronMetaData() {
	meta_file := fmt.Sprintf("%s/dcms_agent.metadata", agent.Conf.WorkDir)
	cronSlice := make([]string, 0)

	agent.Lock.Lock()
	defer agent.Lock.Unlock()
	for k, v := range agent.Jobs {
		if data, err := json.Marshal(v); err == nil {
			cronSlice = append(cronSlice, string(data))
		} else {
			log.Warningf("marshal task: %d failed: %s", k, err)
			return
		}
	}

	if data, err := json.Marshal(cronSlice); err != nil {
		log.Warning("json marshal cronslice failed, ", err)
	} else {
		if len(cronSlice) == 0 {
			log.Warning("cronSlice json empty, just skip write MetaData")
			return
		}
		log.Debug("len of cronSlice:", len(data), data)
		log.Debugf("cronSlice length:%d content:%s", len(cronSlice), cronSlice)
		if e := ioutil.WriteFile(meta_file, data, os.ModePerm); e != nil {
			log.Warning("ioutil write meta_file failed,", e)
		}
	}
}
Esempio n. 9
0
func (ti *TableInfo) fetchColumns(conn *mysql.MySqlConn) error {
	columns, err := conn.Execute(fmt.Sprintf("show full columns from `%s`", ti.Name))
	if err != nil {
		return errors.Trace(err)
	}

	for _, row := range columns.Values {
		v, err := sqltypes.BuildValue(row[5])
		if err != nil {
			return errors.Trace(err)
		}

		var collation string
		if row[2] != nil {
			collation = string(row[2].([]byte))
		}
		extra := string(row[6].([]byte))
		columnType := string(row[1].([]byte))
		columnName := string(row[0].([]byte))
		ti.AddColumn(columnName, columnType, collation,
			v, extra)
	}

	log.Debugf("%s %+v", ti.Name, ti.Columns)

	return nil
}
Esempio n. 10
0
func (s *ApiServer) WebhookHandler(w http.ResponseWriter, r *http.Request) {
	//todo: post a tyrant job and start task
	err := r.ParseForm()
	if err != nil {
		http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError)
		return
	}

	vars := mux.Vars(r)
	log.Debug(vars)

	repo := r.Form.Get("repo")
	log.Debug(r.Form, "repo", repo)
	h := &task.JobHelper{Server: s.Server, ExecutorUrls: s.ExecutorUrls}
	job := h.BuildRepoJob(repo)
	if err := h.CreateJob(job); err != nil {
		http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError)
		return
	}

	log.Debugf("%+v", job)

	if err := h.RunJob(job); err != nil {
		http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError)
		return
	}
}
Esempio n. 11
0
func (ti *TableInfo) SetPK(colnames []string) error {
	log.Debugf("table %s SetPK %s", ti.Name, colnames)
	pkIndex := schema.NewIndex("PRIMARY")
	colnums := make([]int, len(colnames))
	for i, colname := range colnames {
		colnums[i] = ti.FindColumn(strings.ToLower(colname))
		if colnums[i] == -1 {
			return errors.Errorf("column %s not found, %+v", colname, ti.Columns)
		}
		pkIndex.AddColumn(strings.ToLower(colname), 1)
	}

	for _, col := range ti.Columns {
		pkIndex.DataColumns = append(pkIndex.DataColumns, strings.ToLower(col.Name))
	}

	if len(ti.Indexes) == 0 {
		ti.Indexes = make([]*schema.Index, 1)
	} else if ti.Indexes[0].Name != "PRIMARY" {
		ti.Indexes = append(ti.Indexes, nil)
		copy(ti.Indexes[1:], ti.Indexes[:len(ti.Indexes)-1])
	} // else we replace the currunt primary key

	ti.Indexes[0] = pkIndex
	ti.PKColumns = colnums
	return nil
}
Esempio n. 12
0
func (self *Server) removeWorker(l *list.List, sessionId int64) {
	for it := l.Front(); it != nil; it = it.Next() {
		if it.Value.(*Worker).SessionId == sessionId {
			log.Debugf("removeWorker sessionId %d", sessionId)
			l.Remove(it)
			return
		}
	}
}
Esempio n. 13
0
func PingServer(checker AliveChecker, errCtx interface{}, errCh chan<- interface{}) {
	err := checker.CheckAlive()
	log.Debugf("check %+v, result:%v, errCtx:%+v", checker, err, errCtx)
	if err != nil {
		errCh <- errCtx
		return
	}
	errCh <- nil
}
Esempio n. 14
0
func (c *MySqlConn) readInitialHandshake() error {
	data, err := c.readPacket()
	if err != nil {
		return err
	}

	if data[0] == ERR_HEADER {
		return errors.Trace(err)
	}

	if data[0] < MinProtocolVersion {
		return errors.Errorf("invalid protocol version %d, must >= 10", data[0])
	}

	//skip mysql version and connection id
	//mysql version end with 0x00
	//connection id length is 4
	pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4

	c.salt = append(c.salt, data[pos:pos+8]...)

	//skip filter
	pos += 8 + 1

	//capability lower 2 bytes
	c.capability = uint32(binary.LittleEndian.Uint16(data[pos : pos+2]))

	pos += 2

	if len(data) > pos {
		//skip server charset
		//c.charset = data[pos : pos+1]
		log.Debugf("charset %+v", data[pos:pos+1])
		pos++

		c.status = binary.LittleEndian.Uint16(data[pos : pos+2])
		pos += 2

		c.capability = uint32(binary.LittleEndian.Uint16(data[pos:pos+2]))<<16 | c.capability

		pos += 2

		//skip auth data len or [00]
		//skip reserved (all [00])
		pos += 10 + 1

		// The documentation is ambiguous about the length.
		// The official Python library uses the fixed length 12
		// mysql-proxy also use 12
		// which is not documented but seems to work.
		c.salt = append(c.salt, data[pos:pos+12]...)
	}

	return nil
}
Esempio n. 15
0
func (self *ResMan) handleMesosStatusUpdate(t *cmdMesosStatusUpdate) {
	status := t.status

	defer func() {
		t.wait <- struct{}{}
	}()

	taskId := status.TaskId.GetValue()
	log.Debugf("Received task %+v status: %+v", taskId, status)
	currentTask := self.running.Get(taskId)
	if currentTask == nil {
		task, err := scheduler.GetTaskByTaskId(taskId)
		if err != nil {
			return
		}
		job, err := scheduler.GetJobByName(task.JobName)
		if err != nil {
			return
		}
		currentTask = &Task{Tid: task.TaskId, job: job, SlaveId: status.SlaveId.GetValue(), state: taskRuning}
		self.running.Add(currentTask.Tid, currentTask) //add this alone task to runing queue
	}

	pwd := string(status.Data)
	if len(pwd) > 0 && len(currentTask.Pwd) == 0 {
		currentTask.Pwd = pwd
	}

	currentTask.LastUpdate = time.Now()

	switch *status.State {
	case mesos.TaskState_TASK_FINISHED:
		currentTask.job.LastSuccessTs = time.Now().Unix()
		self.removeRunningTask(taskId)
	case mesos.TaskState_TASK_FAILED, mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
		currentTask.job.LastErrTs = time.Now().Unix()
		self.removeRunningTask(taskId)
	case mesos.TaskState_TASK_STAGING:
		//todo: update something
	case mesos.TaskState_TASK_STARTING:
		//todo:update something
	case mesos.TaskState_TASK_RUNNING:
		//todo:update something
	default:
		log.Fatalf("should never happend %+v", status.State)
	}

	persistentTask, err := scheduler.GetTaskByTaskId(taskId)
	if err != nil {
		log.Error(err)
	}

	self.saveTaskStatus(persistentTask, status, currentTask)
}
Esempio n. 16
0
func (c *Conn) handleSelect(stmt *sqlparser.Select, sql string, args []interface{}) error {
	// handle cache
	plan, ti, err := c.getPlanAndTableInfo(stmt)
	if err != nil {
		return errors.Trace(err)
	}

	log.Debugf("handleSelect %s, %+v", sql, plan.PKValues)

	c.server.IncCounter(plan.PlanId.String())

	if ti != nil && len(plan.PKValues) > 0 && ti.CacheType != schema.CACHE_NONE {
		pks := pkValuesToStrings(ti.PKColumns, plan.PKValues)
		items := ti.Cache.Get(pks, ti.Columns)
		count := 0
		for _, item := range items {
			if item.Row != nil {
				count++
			}
		}

		if count == len(pks) { //all cache hint
			c.server.IncCounter("hint")
			log.Info("hit cache!", sql, pks)
			return c.writeCacheResults(plan, ti, pks, items)
		}

		c.server.IncCounter("miss")

		if plan.PlanId == planbuilder.PLAN_PK_IN && len(pks) == 1 {
			log.Infof("%s, %+v, %+v", sql, plan, stmt)
			return c.fillCacheAndReturnResults(plan, ti, pks)
		}
	}

	bindVars := makeBindVars(args)
	conns, err := c.getShardConns(true, stmt, bindVars)
	if err != nil {
		return errors.Trace(err)
	} else if len(conns) == 0 { //todo:handle error
		r := c.newEmptyResultset(stmt)
		return c.writeResultset(c.status, r)
	}

	var rs []*mysql.Result
	rs, err = c.executeInShard(conns, sql, args)
	c.closeShardConns(conns)
	if err == nil {
		err = c.mergeSelectResult(rs, stmt)
	}

	return errors.Trace(err)
}
Esempio n. 17
0
func (self *Server) removeJob(j *Job) {
	delete(self.jobs, j.Handle)
	delete(self.worker[j.ProcessBy].runningJobs, j.Handle)
	if j.IsBackGround {
		log.Debugf("done job: %v", j.Handle)
		if self.store != nil {
			if err := self.store.DoneJob(j); err != nil {
				log.Warning(err)
			}
		}
	}
}
Esempio n. 18
0
func (c *Conn) handleExec(stmt sqlparser.Statement, sql string, args []interface{}, skipCache bool) error {
	if !skipCache {
		// handle cache
		plan, ti, err := c.getPlanAndTableInfo(stmt)
		if err != nil {
			return errors.Trace(err)
		}

		if ti == nil {
			return errors.Errorf("sql: %s not support", sql)
		}

		c.server.IncCounter(plan.PlanId.String())

		if ti.CacheType != schema.CACHE_NONE {
			if len(ti.PKColumns) != len(plan.PKValues) {
				return errors.Errorf("updated/delete/replace without primary key not allowed %+v", plan.PKValues)
			}

			if len(plan.PKValues) == 0 {
				return errors.Errorf("pk not exist, sql: %s", sql)
			}

			log.Debugf("%s %+v, %+v", sql, plan, plan.PKValues)
			pks := pkValuesToStrings(ti.PKColumns, plan.PKValues)

			ti.Lock.Lock(hack.Slice(pks[0]))
			defer ti.Lock.Unlock(hack.Slice(pks[0]))

			invalidCache(ti, pks)
		}
	}

	bindVars := makeBindVars(args)
	conns, err := c.getShardConns(false, stmt, bindVars)
	if err != nil {
		return errors.Trace(err)
	} else if len(conns) == 0 { //todo:handle error
		return errors.Errorf("not server found %s", sql)
	}

	var rs []*mysql.Result
	rs, err = c.executeInShard(conns, sql, args)

	c.closeShardConns(conns)

	if err == nil {
		err = c.mergeExecResult(rs)
	}

	return errors.Trace(err)
}
Esempio n. 19
0
func (c *Conn) handleQuery(sql string) (err error) {
	sql = strings.TrimRight(sql, ";")
	stmt, err := sqlparser.Parse(sql, c.alloc)
	if err != nil {
		log.Warning(c.connectionId, sql, err)
		return c.handleShow(stmt, sql, nil)
	}

	log.Debugf("connectionId: %d, statement %T , %s", c.connectionId, stmt, sql)

	switch v := stmt.(type) {
	case *sqlparser.Select:
		c.server.IncCounter("select")
		return c.handleSelect(v, sql, nil)
	case *sqlparser.Insert:
		c.server.IncCounter("insert")
		return c.handleExec(stmt, sql, nil, true)
	case *sqlparser.Replace:
		c.server.IncCounter("replace")
		return c.handleExec(stmt, sql, nil, false)
	case *sqlparser.Update:
		c.server.IncCounter("update")
		return c.handleExec(stmt, sql, nil, false)
	case *sqlparser.Delete:
		c.server.IncCounter("delete")
		return c.handleExec(stmt, sql, nil, false)
	case *sqlparser.Set:
		c.server.IncCounter("set")
		return c.handleSet(v, sql)
	case *sqlparser.SimpleSelect:
		c.server.IncCounter("simple_select")
		return c.handleSimpleSelect(sql, v)
	case *sqlparser.Begin:
		c.server.IncCounter("begin")
		return c.handleBegin()
	case *sqlparser.Commit:
		c.server.IncCounter("commit")
		return c.handleCommit()
	case *sqlparser.Rollback:
		c.server.IncCounter("rollback")
		return c.handleRollback()
	case *sqlparser.Other:
		c.server.IncCounter("other")
		log.Warning(sql)
		return c.handleShow(stmt, sql, nil)
	default:
		return errors.Errorf("statement %T not support now, %+v, %s", stmt, stmt, sql)
	}
}
Esempio n. 20
0
func (self *Server) handleWorkReport(e *event) {
	args := e.args
	slice := args.t0.([][]byte)
	jobhandle := bytes2str(slice[0])
	sessionId := e.fromSessionId
	j, ok := self.worker[sessionId].runningJobs[jobhandle]

	log.Debugf("%v job handle %v", CmdDescription(e.tp), jobhandle)
	if !ok {
		log.Warningf("job information lost, %v job handle %v, %+v",
			CmdDescription(e.tp), jobhandle, self.jobs)
		return
	}

	if j.Handle != jobhandle {
		log.Fatal("job handle not match")
	}

	if WORK_STATUS == e.tp {
		j.Percent, _ = strconv.Atoi(string(slice[1]))
		j.Denominator, _ = strconv.Atoi(string(slice[2]))
	}

	self.checkAndRemoveJob(e.tp, j)

	//the client is not updated with status or notified when the job has completed (it is detached)
	if j.IsBackGround {
		return
	}

	//broadcast all clients, which is a really bad idea
	//for _, c := range self.client {
	//	reply := constructReply(e.tp, slice)
	//	c.Send(reply)
	//}

	//just send to original client, which is a bad idea too.
	//if need work status notification, you should create co-worker.
	//let worker send status to this co-worker
	c, ok := self.client[j.CreateBy]
	if !ok {
		log.Debug(j.Handle, "sessionId", j.CreateBy, "missing")
		return
	}

	reply := constructReply(e.tp, slice)
	c.Send(reply)
	self.forwardReport++
}
Esempio n. 21
0
func (self *Server) getAllJobs() {
	jobs, err := self.store.GetJobs()
	if err != nil {
		log.Error(err)
		return
	}

	log.Debugf("%+v", jobs)

	for _, j := range jobs {
		j.ProcessBy = 0 //no body handle it now
		j.CreateBy = 0  //clear
		self.doAddJob(j)
	}
}
Esempio n. 22
0
func (top *Topology) GetSlotByIndex(i int) (*models.Slot, *models.ServerGroup, error) {
	slot, err := models.GetSlot(top.zkConn, top.ProductName, i)
	if err != nil {
		return nil, nil, errors.Trace(err)
	}

	log.Debugf("get slot %d : %+v", i, slot)
	if slot.State.Status != models.SLOT_STATUS_ONLINE && slot.State.Status != models.SLOT_STATUS_MIGRATE {
		log.Errorf("slot not online, %+v", slot)
	}

	groupServer, err := models.GetGroup(top.zkConn, top.ProductName, slot.GroupId)
	if err != nil {
		return nil, nil, errors.Trace(err)
	}

	return slot, groupServer, nil
}
Esempio n. 23
0
func (self *ShellExecutor) OnLaunchTask(driver *mesos.ExecutorDriver, taskInfo mesos.TaskInfo) {
	fmt.Println("Launch task:", taskInfo.TaskId.GetValue())
	log.Debug("send running state")
	self.sendStatusUpdate(taskInfo.TaskId.GetValue(), mesos.TaskState_TASK_RUNNING, "task is running!")

	log.Debugf("%+v", os.Args)
	startch := make(chan struct{}, 1)
	if len(os.Args) == 2 {
		fname := taskInfo.TaskId.GetValue()
		ioutil.WriteFile(fname, []byte(os.Args[1]), 0644)
		cmd := exec.Command("/bin/sh", fname)
		go func() {
			defer func() {
				self.finish <- taskInfo.TaskId.GetValue()
				log.Debug("send finish state")
				self.sendStatusUpdate(taskInfo.TaskId.GetValue(), mesos.TaskState_TASK_FINISHED, "Go task is done!")
				time.Sleep(10 * time.Second)
				driver.Stop()
			}()

			self.lock.Lock()
			self.process[taskInfo.TaskId.GetValue()] = cmd
			self.lock.Unlock()
			startch <- struct{}{}
			out, err := cmd.Output()

			if err != nil {
				log.Error(err.Error())
			} else {
				fmt.Println(string(out))
				//	log.Debug(string(out))
			}
		}()
	} else {
		log.Debug("send finish state")
		self.sendStatusUpdate(taskInfo.TaskId.GetValue(), mesos.TaskState_TASK_FINISHED, "Go task is done!")
		time.Sleep(10 * time.Second)
		driver.Stop()

	}
	<-startch
}
Esempio n. 24
0
File: main.go Progetto: ngaut/tyrant
func (self *ShellExecutor) OnKillTask(driver *mesos.ExecutorDriver, tid mesos.TaskID) {
	taskId := tid.GetValue()
	log.Warningf("OnKillTask %s", taskId)
	self.lock.Lock()
	defer self.lock.Unlock()
	if contex, ok := self.process[taskId]; ok {
		ret, _ := exec.Command("pgrep", "-P", strconv.Itoa(contex.cmd.Process.Pid)).Output()
		log.Debug("children process", string(ret))
		log.Debug("pid", contex.cmd.Process.Pid)
		ret, err := exec.Command("pkill", "-P", strconv.Itoa(contex.cmd.Process.Pid)).Output()
		if err != nil {
			log.Errorf("kill taskId %s failed, err:%v", taskId, err)
		}
		log.Debugf("kill taskId %s result %v", taskId, ret)
		contex.statusFile.Stop()
	}

	//log.Error("send kill state")
	//self.sendStatusUpdate(tid.GetValue(), mesos.TaskState_TASK_KILLED, "")
}
Esempio n. 25
0
func (c *Conn) handleShow(stmt sqlparser.Statement /*Other*/, sql string, args []interface{}) error {
	log.Debug(sql)
	bindVars := makeBindVars(args)
	conns, err := c.getShardConns(true, stmt, bindVars)
	if err != nil {
		return errors.Trace(err)
	} else if len(conns) == 0 {
		return errors.Errorf("not enough connection for %s", sql)
	}

	var rs []*mysql.Result
	rs, err = c.executeInShard(conns, sql, args)
	defer c.closeShardConns(conns)
	if err != nil {
		return errors.Trace(err)
	}

	r := rs[0].Resultset
	status := c.status | rs[0].Status

	log.Debugf("%+v", rs[0])

	//todo: handle set command when sharding
	if stmt == nil {
		log.Warning(sql)
		err := c.writeOkFlush(rs[0])
		return errors.Trace(err)
	}

	for i := 1; i < len(rs); i++ {
		status |= rs[i].Status
		for j := range rs[i].Values {
			r.Values = append(r.Values, rs[i].Values[j])
			r.RowDatas = append(r.RowDatas, rs[i].RowDatas[j])
		}
	}

	return errors.Trace(c.writeResultset(status, r))
}
Esempio n. 26
0
func (self *Server) handleGetWorker(e *event) (err error) {
	var buf []byte
	defer func() {
		e.result <- string(buf)
	}()
	cando := e.args.t0.(string)
	log.Debug("get worker", cando)
	if len(cando) == 0 {
		workers := make([]*Worker, 0, len(self.worker))
		for _, v := range self.worker {
			workers = append(workers, v)
		}
		buf, err = json.Marshal(workers)
		if err != nil {
			log.Error(err)
			return err
		}
		return nil
	}

	log.Debugf("%+v", self.funcWorker)

	if jw, ok := self.funcWorker[cando]; ok {
		log.Debug(cando, jw.workers.Len())
		workers := make([]*Worker, 0, jw.workers.Len())
		for it := jw.workers.Front(); it != nil; it = it.Next() {
			workers = append(workers, it.Value.(*Worker))
		}
		buf, err = json.Marshal(workers)
		if err != nil {
			log.Error(err)
			return err
		}
		return nil
	}

	return
}
Esempio n. 27
0
func (self *Server) handleSubmitJob(e *event) {
	args := e.args
	c := args.t0.(*Client)
	self.client[c.SessionId] = c
	funcName := bytes2str(args.t1)
	j := &Job{Id: bytes2str(args.t2), Data: args.t3.([]byte),
		Handle: allocJobId(), CreateAt: time.Now(), CreateBy: c.SessionId,
		FuncName: funcName, Priority: PRIORITY_LOW}

	j.IsBackGround = isBackGround(e.tp)
	// persistent job
	log.Debugf("add job %+v", j)
	if self.store != nil {
		if err := self.store.AddJob(j); err != nil {
			log.Warning(err)
		}
	}

	j.Priority = cmd2Priority(e.tp)

	//log.Debugf("%v, job handle %v, %s", CmdDescription(e.tp), j.Handle, string(j.Data))
	e.result <- j.Handle
	self.doAddJob(j)
}
Esempio n. 28
0
func (c *Conn) getConn(n *Shard, isSelect bool) (co *mysql.SqlConn, err error) {
	if !c.needBeginTx() {
		co, err = n.getMasterConn()
		if err != nil {
			return nil, errors.Trace(err)
		}
	} else {
		log.Info("needBeginTx", c.status)
		var ok bool
		co, ok = c.txConns[n.cfg.Id]

		if !ok {
			if co, err = n.getMasterConn(); err != nil {
				return nil, errors.Trace(err)
			}

			log.Debugf("%+v", co)

			if err = co.Begin(); err != nil {
				return nil, errors.Trace(err)
			}

			c.txConns[n.cfg.Id] = co
		}
	}

	if err = co.UseDB(c.db); err != nil {
		return nil, errors.Trace(err)
	}

	if err = co.SetCharset(c.charset); err != nil {
		return nil, errors.Trace(err)
	}

	return
}
Esempio n. 29
0
func main() {
	log.SetLevelByString("info")

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	signal.Notify(c, syscall.SIGTERM)
	go func() {
		<-c
		Fatal("ctrl-c or SIGTERM found, exit")
	}()

	//	productName, _ = config.ReadString("product", "test")
	args, err := docopt.Parse(usage, nil, true, "codis config v0.1", true)
	if err != nil {
		log.Error(err)
	}

	// set config file
	if args["-c"] != nil {
		configFile = args["-c"].(string)
		config, err = utils.InitConfigFromFile(configFile)
		if err != nil {
			Fatal(err)
		}
	} else {
		config, err = utils.InitConfig()
		if err != nil {
			Fatal(err)
		}
	}

	// set output log file
	if args["-L"] != nil {
		log.SetOutputByName(args["-L"].(string))
	}

	// set log level
	if args["--log-level"] != nil {
		log.SetLevelByString(args["--log-level"].(string))
	}

	productName, _ = config.ReadString("product", "test")
	zkAddr, _ = config.ReadString("zk", "localhost:2181")
	zkConn, _ = zkhelper.ConnectToZk(zkAddr)
	zkLock = utils.GetZkLock(zkConn, productName)

	broker, _ = config.ReadString("broker", "ledisdb")
	slot_num, _ = config.ReadInt("slot_num", 16)

	log.Debugf("product: %s", productName)
	log.Debugf("zk: %s", zkAddr)
	log.Debugf("broker: %s", broker)

	if err := registerConfigNode(); err != nil {
		log.Fatal(errors.ErrorStack(err))
	}
	defer unRegisterConfigNode()

	if err := removeOrphanLocks(); err != nil {
		log.Fatal(errors.ErrorStack(err))
	}

	cmd := args["<command>"].(string)
	cmdArgs := args["<args>"].([]string)

	go http.ListenAndServe(":10086", nil)
	err = runCommand(cmd, cmdArgs)
	if err != nil {
		log.Fatal(errors.ErrorStack(err))
	}
}
Esempio n. 30
0
func (t *Task) IsTimeout() bool {
	log.Debugf("ExecTimeAt: %d, Duration: %d Timeout: %d", t.ExecAt, time.Now().Unix()-t.ExecAt, t.Job.Timeout)
	return t.ExecAt > 0 && time.Now().Unix()-t.ExecAt > t.Job.Timeout
	// return false
}