func (this *WatchActord) dueJobsWithin(topic string, timeSpan int64, now time.Time) (backlog int64, archive int64) { jobTable := jm.JobTable(topic) appid := manager.Default.TopicAppid(topic) aid := jm.App_id(appid) sql := fmt.Sprintf("SELECT count(job_id) FROM %s WHERE due_time<=?", jobTable) rows, err := this.mc.Query(jm.AppPool, jobTable, aid, sql, now.Unix()+timeSpan) if err != nil { log.Error("%s: %s", this.ident(), err) return } var n int for rows.Next() { rows.Scan(&n) } rows.Close() backlog += int64(n) archiveTable := jm.HistoryTable(topic) sql = fmt.Sprintf("SELECT count(job_id) FROM %s WHERE due_time>=?", archiveTable) rows, err = this.mc.Query(jm.AppPool, archiveTable, aid, sql, now.Unix()-timeSpan) if err != nil { log.Error("%s: %s", this.ident(), err) return } for rows.Next() { rows.Scan(&n) } rows.Close() archive += int64(n) return }
// TODO batch DELETE/INSERT for better performance. func (this *JobExecutor) handleDueJobs(wg *sync.WaitGroup) { defer wg.Done() var ( // zabbix maintains a in-memory delete queue // delete from history_uint where itemid=? and clock<min_clock sqlDeleteJob = fmt.Sprintf("DELETE FROM %s WHERE job_id=?", this.table) sqlInsertArchive = fmt.Sprintf("INSERT INTO %s(job_id,payload,ctime,due_time,etime,actor_id) VALUES(?,?,?,?,?,?)", jm.HistoryTable(this.topic)) sqlReinject = fmt.Sprintf("INSERT INTO %s(job_id, payload, ctime, due_time) VALUES(?,?,?,?)", this.table) ) for { select { case <-this.stopper: return case item := <-this.dueJobs: now := time.Now() affectedRows, _, err := this.mc.Exec(jm.AppPool, this.table, this.aid, sqlDeleteJob, item.JobId) if err != nil { log.Error("%s: %s", this.ident, err) continue } if affectedRows == 0 { // 2 possibilities: // - client Cancel job wins // - this handler is too slow and the job fetched twice in tick continue } log.Debug("%s land %s", this.ident, item) _, _, err = store.DefaultPubStore.SyncPub(this.cluster, this.topic, nil, item.Payload) if err != nil { err = hh.Default.Append(this.cluster, this.topic, nil, item.Payload) } if err != nil { // pub fails and hinted handoff also fails: reinject job back to mysql log.Error("%s: %s", this.ident, err) this.mc.Exec(jm.AppPool, this.table, this.aid, sqlReinject, item.JobId, item.Payload, item.Ctime, item.DueTime) continue } log.Debug("%s fired %s", this.ident, item) this.auditor.Trace(item.String()) // mv job to archive table _, _, err = this.mc.Exec(jm.AppPool, this.table, this.aid, sqlInsertArchive, item.JobId, item.Payload, item.Ctime, item.DueTime, now.Unix(), this.parentId) if err != nil { log.Error("%s: %s", this.ident, err) } else { log.Debug("%s archived %s", this.ident, item) } } } }
// TODO diagnose all app's jobs status func (this *Job) displayAppJobs(appid string) { aid := this.connectMysqlCluster(appid) lines := make([]string, 0) header := "Topic|Type|JobId|Due|Payload" lines = append(lines, header) // FIXME does not respect appid, show all now this.forSortedJobQueues(func(topic string) { table := jm.JobTable(topic) archiveTable := jm.HistoryTable(topic) sqlRealTime := fmt.Sprintf("SELECT job_id,payload,due_time FROM %s ORDER BY due_time DESC", table) if this.due > 0 { sqlRealTime = fmt.Sprintf("SELECT job_id,payload,due_time FROM %s WHERE due_time<=? ORDER BY due_time DESC", table, time.Now().Unix()+int64(this.due)) } rows, err := this.mc.Query(jm.AppPool, table, aid, sqlRealTime) swallow(err) var item job.JobItem for rows.Next() { err = rows.Scan(&item.JobId, &item.Payload, &item.DueTime) swallow(err) lines = append(lines, fmt.Sprintf("%s|RT|%d|%d|%s", topic, item.JobId, item.DueTime, item.PayloadString(50))) } rows.Close() sqlArchive := fmt.Sprintf("SELECT job_id,payload,due_time FROM %s ORDER BY due_time ASC LIMIT 100", archiveTable) rows, err = this.mc.Query(jm.AppPool, table, aid, sqlArchive) swallow(err) for rows.Next() { err = rows.Scan(&item.JobId, &item.Payload, &item.DueTime) swallow(err) lines = append(lines, fmt.Sprintf("%s|AR|%d|%d|%s", topic, item.JobId, item.DueTime, item.PayloadString(50))) } rows.Close() }) if len(lines) > 1 { this.Ui.Output(columnize.SimpleFormat(lines)) } }