func deleteItemsAccordingToHeartbeat(body []byte) { log.Debug("deleteItemsAccordingToHeartbeat() BEGIN:", string(body)) result := ds.Result{} itemEvent := &Event{} result.Data = itemEvent itemsdelete := []ItemDel{} itemEvent.Data = &itemsdelete if err := json.Unmarshal(body, &result); err == nil { log.Debug("items delete:", itemsdelete) for _, v := range itemsdelete { log.Debugf("delete item according to heartbeat: %v/%v\n", v.Repname, v.Itemname) err := delTagsForDelItem(v.Repname, v.Itemname) if err != nil { log.Error(err) return } err = delItem(v.Repname, v.Itemname) if err != nil { log.Error(err) return } log.Infof("Delete data item %v/%v according to heartbeat successfully.\n", v.Repname, v.Itemname) } } else { log.Warn("Unmarshal error:", err) } }
func delTagsForDelItem(reponame, itemname string) error { log.Println("Begin to remove tags for remove item from db") sqlrpdmid := fmt.Sprintf(`SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY='%s' AND DATAITEM='%s' AND STATUS='A';`, reponame, itemname) row, err := g_ds.QueryRow(sqlrpdmid) if err != nil { l := log.Error("select rpdmid from DH_DP_RPDM_MAP error:", err) logq.LogPutqueue(l) return err } var rpdmId int row.Scan(&rpdmId) if rpdmId == 0 { log.Debug(reponame, itemname, "not exist.") return nil } sqldeltag := fmt.Sprintf(`UPDATE DH_RPDM_TAG_MAP SET STATUS='N' WHERE RPDMID=%d`, rpdmId) _, err = g_ds.Update(sqldeltag) log.Info("sqldeltag", sqldeltag) if err != nil { l := log.Error("delete tag error:", err) logq.LogPutqueue(l) return err } return nil }
func GetTagComment(repo, item, tag string) string { path := "/api/repositories/" + repo + "/" + item + "/" + tag resp, err := commToServerGetRsp("get", path, nil) if err != nil { log.Error(err) return "" } defer resp.Body.Close() if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusBadRequest { err = errors.New("unkown error") log.Error("GET", path, resp.StatusCode) return "" } result := ds.Response{} struComment := &struct { Comment string `json:"comment"` }{} result.Data = struComment respbody, _ := ioutil.ReadAll(resp.Body) log.Println(string(respbody)) unmarshalerr := json.Unmarshal(respbody, &result) if unmarshalerr != nil { log.Error(unmarshalerr) return "" } log.Println(result) return struComment.Comment }
func saveEntryPoint(ep string) { log.Println("TODO save ep to db") count := `SELECT COUNT(*) FROM DH_DAEMON;` row, err := g_ds.QueryRow(count) if err != nil { l := log.Error(count, "error.", err) logq.LogPutqueue(l) } var c int row.Scan(&c) if c > 0 { Update := fmt.Sprintf(`UPDATE DH_DAEMON SET ENTRYPOINT='%s';`, ep) log.Debug(Update) if _, e := g_ds.Update(Update); e != nil { l := log.Error(Update, "error.", e) logq.LogPutqueue(l) } } else { Insert := fmt.Sprintf(`INSERT INTO DH_DAEMON (ENTRYPOINT) VALUES ('%s');`, ep) log.Debug(c, Insert) if _, e := g_ds.Insert(Insert); e != nil { l := log.Error(Insert, "error.", e) logq.LogPutqueue(l) } } }
func getBatchDelTagsName(reponame, itemname, tagname string) ([]string, error) { log.Println("Batch delete tags from db") sqlrpdmid := fmt.Sprintf(`SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY='%s' AND DATAITEM='%s' AND STATUS='A';`, reponame, itemname) var rpdmId int row, err := g_ds.QueryRow(sqlrpdmid) if err != nil { l := log.Error("select rpdmid from DH_DP_RPDM_MAP error:", err) logq.LogPutqueue(l) return nil, err } row.Scan(&rpdmId) tagname = strings.Replace(tagname, "*", "%", -1) log.Println(tagname) sql := fmt.Sprintf(`SELECT TAGNAME FROM DH_RPDM_TAG_MAP WHERE TAGNAME LIKE '%s' AND RPDMID=%d AND STATUS='A';`, tagname, rpdmId) //var tagnames []string tagsname := make([]string, 0) rows, err := g_ds.QueryRows(sql) if err != nil { l := log.Error("batch delete tag from DH_RPDM_TAG_MAP error:", err) logq.LogPutqueue(l) return nil, err } for rows.Next() { rows.Scan(&tagname) tagsname = append(tagsname, tagname) } log.Println(tagsname) /*if len(tagsname) == 0 { return nil, errors.New("没有匹配的tag") }*/ return tagsname, nil }
func (hdfs *hdfsdriver) StoreFile(status, filename, dpconn, dp, itemlocation, destfile string) string { log.Infof("Begin to upload %v to %v\n", filename, dp) client, err := getClient(dpconn) if err != nil { log.Error("Failed to get a client", err) status = "put to hdfs err" return status } defer client.Close() err = client.MkdirAll("/"+itemlocation, 1777) if err != nil { log.Error("Failed to mkdirall in hdfs", err) status = "put to hdfs err" return status } hdfsfile := "/" + itemlocation + "/" + destfile err = client.CopyToRemote(filename, hdfsfile) if err != nil { log.Error("Failed to CopyToRemote", err) status = "put to hdfs err" return status } status = "put to hdfs ok" log.Info("Successfully uploaded to", itemlocation, "in hdfs") return status }
func commToServer(method, path string, buffer []byte, w http.ResponseWriter) (body []byte, err error) { //Trace() s := log.Info("daemon: connecting to", DefaultServer+path) logq.LogPutqueue(s) req, err := http.NewRequest(strings.ToUpper(method), DefaultServer+path, bytes.NewBuffer(buffer)) if len(loginAuthStr) > 0 { req.Header.Set("Authorization", loginAuthStr) } //req.Header.Set("User", "admin") resp, err := http.DefaultClient.Do(req) if err != nil { log.Error(err) d := ds.Result{Code: cmd.ErrorServiceUnavailable, Msg: err.Error()} body, e := json.Marshal(d) if e != nil { log.Error(e) return body, e } w.WriteHeader(http.StatusServiceUnavailable) w.Write(body) return body, err } defer resp.Body.Close() w.WriteHeader(resp.StatusCode) body, err = ioutil.ReadAll(resp.Body) w.Write(body) log.Info(resp.StatusCode, string(body)) return }
func saveDaemonID(id string) { log.Println("TODO save daemonid to db when srv returns code 0.") count := `SELECT COUNT(*) FROM DH_DAEMON;` row, err := g_ds.QueryRow(count) if err != nil { l := log.Error(count, "error.", err) logq.LogPutqueue(l) } var c int row.Scan(&c) if c > 0 { Update := fmt.Sprintf(`UPDATE DH_DAEMON SET DAEMONID='%s';`, id) log.Debug(Update) if _, e := g_ds.Update(Update); e != nil { l := log.Error(Update, "error.", e) logq.LogPutqueue(l) } } else { Insert := fmt.Sprintf(`INSERT INTO DH_DAEMON (DAEMONID) VALUES ('%s');`, id) log.Debug(c, Insert) if _, e := g_ds.Insert(Insert); e != nil { l := log.Error(Insert, "error.", e) logq.LogPutqueue(l) } } }
func delTag(reponame, itemname, tagname string) (int, error) { log.Println("TODO delete tag from db") sqlrpdmid := fmt.Sprintf(`SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY='%s' AND DATAITEM='%s' AND STATUS='A';`, reponame, itemname) var rpdmId int row, err := g_ds.QueryRow(sqlrpdmid) if err != nil { l := log.Error("select rpdmid from DH_DP_RPDM_MAP error:", err) logq.LogPutqueue(l) return 0, err } row.Scan(&rpdmId) sql := fmt.Sprintf(`SELECT TAGID FROM DH_RPDM_TAG_MAP WHERE STATUS='A' AND TAGNAME='%s' AND RPDMID=%d`, tagname, rpdmId) var tagid int row, err = g_ds.QueryRow(sql) if err != nil { l := log.Error("select tagid from DH_DP_RPDM_MAP error:", err) logq.LogPutqueue(l) return 0, err } row.Scan(&tagid) sql = fmt.Sprintf(`UPDATE DH_RPDM_TAG_MAP SET STATUS='N' WHERE TAGNAME='%s' AND RPDMID=%d`, tagname, rpdmId) _, err = g_ds.Update(sql) if err != nil { l := log.Error("delete tag from DH_RPDM_TAG_MAP error:", err) logq.LogPutqueue(l) return 0, err } return tagid, nil }
func GetRepoInfo(dpName, status string, offset int64, limit int) ([]ds.RepoInfo, error) { if status == "published" { status = "Y" } else { status = "N" } sql := fmt.Sprintf(`SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS = 'A';`, dpName) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } var dpid int row.Scan(&dpid) if dpid == 0 { return nil, errors.New(fmt.Sprintf("No datapool named %s.", dpName)) } sql = fmt.Sprintf(`SELECT DISTINCT REPOSITORY FROM DH_DP_RPDM_MAP WHERE DPID = %d AND PUBLISH = '%s' AND STATUS = 'A' ORDER BY RPDMID LIMIT %v OFFSET %v;`, dpid, status, limit, offset) rows, err := g_ds.QueryRows(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } var repository string var itemCount int repoinfo := ds.RepoInfo{} repoInfos := make([]ds.RepoInfo, 0) for rows.Next() { rows.Scan(&repository) repoinfo.RepositoryName = repository sql = fmt.Sprintf(`SELECT COUNT(*) FROM DH_DP_RPDM_MAP WHERE REPOSITORY = '%s' AND PUBLISH = '%s' AND STATUS = 'A';`, repository, status) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } row.Scan(&itemCount) repoinfo.ItemCount = itemCount repoInfos = append(repoInfos, repoinfo) } return repoInfos, err }
func judgeTagExistHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { if len(loginAuthStr) == 0 { HttpNoData(w, http.StatusUnauthorized, cmd.ErrorServiceUnavailable, " ") return } repository := ps.ByName("repo") dataitem := ps.ByName("item") tag := ps.ByName("tag") if strings.ContainsAny(tag, "*") { exist, msg, err := judgeRepoOrItemExist(repository, dataitem) if err != nil { log.Error(err) HttpNoData(w, http.StatusInternalServerError, cmd.ErrorServiceUnavailable, err.Error()) return } if exist == false { HttpNoData(w, http.StatusBadRequest, cmd.RepoOrItemNotExist, msg) return } else { HttpNoData(w, http.StatusOK, cmd.TagExist, msg) return } } else { exist, msg, err := judgeRepoOrItemExist(repository, dataitem) if err != nil { log.Error(err) HttpNoData(w, http.StatusInternalServerError, cmd.ErrorServiceUnavailable, err.Error()) return } if exist == false { HttpNoData(w, http.StatusBadRequest, cmd.RepoOrItemNotExist, msg) return } else { exist, msg, err = judgeTagExist(repository, dataitem, tag) if err != nil { log.Error(err) HttpNoData(w, http.StatusInternalServerError, cmd.ErrorServiceUnavailable, err.Error()) return } if exist == false { HttpNoData(w, http.StatusBadRequest, cmd.TagNotExist, msg) return } else { HttpNoData(w, http.StatusOK, cmd.TagExist, msg) return } } } return }
func GetPulledTagsOfItemInfo(dpname, repo, item string, offset int64, limit int) ([]ds.PulledTagsOfItem, error) { pulledTagOfItem := ds.PulledTagsOfItem{} pulledTagsOfItem := make([]ds.PulledTagsOfItem, 0) sql := fmt.Sprintf(`SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS = 'A';`, dpname) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } var dpid int row.Scan(&dpid) if dpid == 0 { return pulledTagsOfItem, errors.New(fmt.Sprintf("No datapool named %s.", dpname)) } sql = fmt.Sprintf(`SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY = '%s' AND DATAITEM = '%s' AND DPID = %d AND PUBLISH = 'N' AND STATUS = 'A';`, repo, item, dpid) row, err = g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } var rpdmid int row.Scan(&rpdmid) sql = fmt.Sprintf(`SELECT TAGNAME, CREATE_TIME, COMMENT FROM DH_RPDM_TAG_MAP WHERE RPDMID = %d AND STATUS = 'A' LIMIT %v OFFSET %v;`, rpdmid, limit, offset) rows, err := g_ds.QueryRows(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } for rows.Next() { rows.Scan(&pulledTagOfItem.TagName, &pulledTagOfItem.DownloadTime, &pulledTagOfItem.Content) pulledTagsOfItem = append(pulledTagsOfItem, pulledTagOfItem) } return pulledTagsOfItem, err }
func dbinit() { DB_TYPE := os.Getenv("DB_TYPE") if strings.ToUpper(DB_TYPE) == "MYSQL" { for i := 0; i < 3; i++ { connectMysql() if g_ds.Db == nil { select { case <-time.After(time.Second * 5): continue } } else { break } } if g_ds.Db == nil { return } } else { log.Println("connect to db sqlite3") db, err := sql.Open("sqlite3", g_dbfile) //defer db.Close() chk(err) g_ds.Db = db g_ds.DbType = "sqlite" } var RetDhRpdmTagMap string row, err := g_ds.QueryRow(ds.SQLIsExistRpdmTagMap) if err != nil { l := log.Error("Get Dh_Rpdm_Tag_Map error!") logq.LogPutqueue(l) return } row.Scan(&RetDhRpdmTagMap) if len(RetDhRpdmTagMap) > 1 { if false == strings.Contains(RetDhRpdmTagMap, "COMMENT") { // UpdateSql04To05() UpdateSql16To17() } } //if err := UpgradeSql07To08(); err != nil { // panic(err) //} if err := CreateTable(); err != nil { l := log.Error("Get CreateTable error!", err) logq.LogPutqueue(l) panic(err) } }
func GetItemandTagFromServer() { path1 := "/api/repositories?myRelease=1&size=-1" resp1, err := commToServerGetRsp("get", path1, nil) if err != nil { log.Error(err) return } repo := ds.Repositories{} respbody1, _ := ioutil.ReadAll(resp1.Body) json.Unmarshal(respbody1, &repo) RepoSlice := []string{repo.RepositoryName} defer resp1.Body.Close() a := len(RepoSlice) for i := 0; i < a; i++ { path2 := "/api/repositories/" + RepoSlice[i] + "?myRelease=1&size=-1" resp2, err := commToServerGetRsp("get", path2, nil) if err != nil { log.Error(err) return } item := ds.Repository{} respbody2, _ := ioutil.ReadAll(resp2.Body) json.Unmarshal(respbody2, &item) ItemSliceA = item.DataItems defer resp2.Body.Close() b := len(ItemSliceA) for j := 0; j < b; j++ { path3 := "/api/repositories/" + RepoSlice[i] + "/" + ItemSliceA[j] + "?size=-1" resp3, err := commToServerGetRsp("get", path3, nil) if err != nil { log.Error(err) return } tag := ds.Tag{} respbody3, _ := ioutil.ReadAll(resp3.Body) json.Unmarshal(respbody3, &tag) TagSliceA = []string{tag.Tag} defer resp3.Body.Close() } } }
func ErrLogAndResp(c chan int, w http.ResponseWriter, httpcode, errorcode int, err error) (int64, error) { l := log.Error(err) logq.LogPutqueue(l) c <- -1 HttpNoData(w, http.StatusBadRequest, cmd.ErrorNoRecord, err.Error()) return 0, err }
func GetLocalfilePath() (localfilepath []string) { sql := `SELECT DISTINCT DPCONN, ITEMDESC FROM DH_DP A, DH_DP_RPDM_MAP B WHERE A.DPID=B.DPID AND A.DPTYPE='file' AND A.STATUS='A' AND B.PUBLISH='Y' AND B.STATUS='A';` //dpci = make(map[string] string) var conn string var desc string localfilepath = make([]string, 0) rows, err := g_ds.QueryRows(sql) if err != nil { l := log.Error("QueryRow error:", err) logq.LogPutqueue(l) return } else { for rows.Next() { rows.Scan(&conn, &desc) path := conn + "/" + desc localfilepath = append(localfilepath, path) } return } }
func getPulledTagCount(datapool, repo, item string) (int64, error) { sql := fmt.Sprintf(`SELECT COUNT(*) FROM DH_RPDM_TAG_MAP WHERE RPDMID = (SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY = '%s' AND DATAITEM = '%s' AND PUBLISH = 'N' AND STATUS = 'A' AND DPID = (SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS='A')) AND STATUS = 'A';`, repo, item, datapool) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return 0, err } var count int64 row.Scan(&count) log.Debug("Published repository count:", count) return count, err }
func getRepoCountByDp(datapool, status string) int64 { if status == "published" { status = "Y" } else { status = "N" } sql := fmt.Sprintf(`SELECT COUNT(DISTINCT REPOSITORY) FROM DH_DP_RPDM_MAP WHERE DPID IN (SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS='A') AND PUBLISH= '%s' AND STATUS = 'A';`, datapool, status) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return 0 } var count int64 row.Scan(&count) log.Debug("Published repository count:", count) return count }
func pulledOfDatapoolHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Debug(r.URL.Path, "pulled of a datapool") r.ParseForm() dpName := ps.ByName("dpname") status := "pulled" count := getRepoCountByDp(dpName, status) offset, limit := optionalOffsetAndSize(r, 10, 1, 100) validateOffsetAndLimit(count, &offset, &limit) repoInfos, err := GetRepoInfo(dpName, status, offset, limit) if err != nil { log.Error(err) JsonResult(w, http.StatusInternalServerError, cmd.InternalError, err.Error(), nil) return } if len(repoInfos) == 0 { msg := fmt.Sprintf("No pulled dataitem in %s.", dpName) JsonResult(w, http.StatusOK, cmd.ErrorPublishedItemEmpty, msg, nil) } else { msg := fmt.Sprintf("Dataitems have been pulled into %s.", dpName) JsonResult(w, http.StatusOK, cmd.ResultOK, msg, newQueryListResult(count, &repoInfos)) } }
func (s3 *s3driver) CheckDataAndGetSize(dpconn, itemlocation, fileName string) (exist bool, size int64, err error) { bucket := getAwsInfoFromDpconn(dpconn) destFullPathFileName := bucket + "/" + itemlocation + "/" + fileName log.Info(destFullPathFileName) AWS_REGION = Env("AWS_REGION", false) svc := s3aws.New(session.New(&aws.Config{Region: aws.String(AWS_REGION)})) result, err := svc.ListObjects(&s3aws.ListObjectsInput{Bucket: aws.String(bucket), Prefix: aws.String(itemlocation + "/" + fileName)}) if err != nil { log.Error("Failed to list objects", err) return exist, size, err } exist = false for _, v := range result.Contents { log.Infof("Tag:%s, key:%s, size:%v\n", aws.StringValue(v.ETag), aws.StringValue(v.Key), aws.Int64Value(v.Size)) if aws.StringValue(v.Key) == fileName { size = aws.Int64Value(v.Size) exist = true } } return }
func ScanLocalFile(path string) []string { localfiles := make([]string, 0) err := filepath.Walk(path, func(path string, f os.FileInfo, err error) error { if f == nil { return err } if f.IsDir() { return nil } localfiles = append(localfiles, path) return nil }) if err != nil { log.Error("filepath.Walk() returned %v\n", err) } //for _, localfile := range localfiles { // //fmt.Println(localfile) // log.Info("--------------------------------------------------------->localfile:", localfile) //} return localfiles }
func (hdfs *hdfsdriver) CheckDataAndGetSize(dpconn, itemlocation, fileName string) (exist bool, size int64, err error) { destFullPathFileName := "/" + itemlocation + "/" + fileName log.Info(destFullPathFileName) exist = false client, err := getClient(dpconn) if err != nil { log.Error("Failed to get a client", err) return } defer client.Close() fileinfo, _ := client.Stat(destFullPathFileName) if fileinfo != nil { exist = true cs, _ := client.GetContentSummary(destFullPathFileName) size = cs.Size() } else { err = errors.New("文件不存在") return } return }
func (hdfs *hdfsdriver) CheckItemLocation(datapoolname, dpconn, itemlocation string) (err error) { client, err := getClient(dpconn) if err != nil { log.Error("Failed to get a client", err) return } defer client.Close() err = client.MkdirAll("/"+itemlocation, 1777) if err != nil { log.Error(err) } return }
func (s3 *s3driver) StoreFile(status, filename, dpconn, dp, itemlocation, destfile string) string { bucket := getAwsInfoFromDpconn(dpconn) //AWS_SECRET_ACCESS_KEY = Env("AWS_SECRET_ACCESS_KEY", false) //AWS_ACCESS_KEY_ID = Env("AWS_ACCESS_KEY_ID", false) AWS_REGION = Env("AWS_REGION", false) file, err := os.Open(filename) if err != nil { l := log.Error("Failed to open file", err) logq.LogPutqueue(l) status = "put to s3 err" return status } log.Infof("Begin to upload %v to %v\n", filename, dp) // Not required, but you could zip the file before uploading it // using io.Pipe read/writer to stream gzip'd file contents. reader, writer := io.Pipe() go func() { gw := gzip.NewWriter(writer) io.Copy(gw, file) file.Close() gw.Close() writer.Close() //updateJobQueueStatus(jobid, "puttos3ok") }() uploader := s3manager.NewUploader(session.New(&aws.Config{Region: aws.String(AWS_REGION)})) //uploader := s3manager.NewUploader(session.New(aws.NewConfig())) result, err := uploader.Upload(&s3manager.UploadInput{ Body: reader, Bucket: aws.String(bucket), Key: aws.String( /*dp + "/" + */ itemlocation + "/" + destfile + ".gz"), }) if err != nil { log.Error("Failed to upload", err) status = "put to s3 err" return status } status = "put to s3 ok" log.Info("Successfully uploaded to", result.Location) return status }
func (fs *fsdriver) CheckItemLocation(datapoolname, dpconn, itemlocation string) error { log.Println(dpconn + "/" + itemlocation) err := os.MkdirAll(dpconn+"/"+itemlocation, 0777) if err != nil { log.Error(err) } return err }
func delEntryPoint() { log.Println("TODO remove ep from db.") d := `UPDATE DH_DAEMON SET ENTRYPOINT = '';` if _, e := g_ds.Update(d); e != nil { l := log.Error(d, "error.", e) logq.LogPutqueue(l) } }
/*AlterDhJob() Temporarily not use*/ func AlterDhJob() (err error) { sqltoken := `ALTER TABLE DH_JOB ADD ACCESSTOKEN VARCHAR(20);` _, err = g_ds.Exec(sqltoken) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return err } sqlep := `ALTER TABLE DH_JOB ADD ENTRYPOINT VARCHAR(128);` _, err = g_ds.Exec(sqlep) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return err } return nil }
func removeAllJobDB() (e error) { log.Debug("TODO remove all jobs from db") sRmJobs := `DELETE FROM DH_JOB;` _, e = g_ds.Delete(sRmJobs) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } return }
func UpdateSql16To17() (err error) { sqlm := `ALTER TABLE DH_RPDM_TAG_MAP ADD COMMENT VARCHAR(256);` _, err = g_ds.Exec(sqlm) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return err } return nil }
func removeJobDB(job *ds.JobInfo) (e error) { log.Debug("TODO remove jobid from db") sRmJob := fmt.Sprintf(`DELETE FROM DH_JOB WHERE JOBID=%d;`, job.ID) _, e = g_ds.Delete(sRmJob) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } return }