func saveEntryPoint(ep string) { log.Println("TODO save ep to db") count := `SELECT COUNT(*) FROM DH_DAEMON;` row, err := g_ds.QueryRow(count) if err != nil { l := log.Error(count, "error.", err) logq.LogPutqueue(l) } var c int row.Scan(&c) if c > 0 { Update := fmt.Sprintf(`UPDATE DH_DAEMON SET ENTRYPOINT='%s';`, ep) log.Debug(Update) if _, e := g_ds.Update(Update); e != nil { l := log.Error(Update, "error.", e) logq.LogPutqueue(l) } } else { Insert := fmt.Sprintf(`INSERT INTO DH_DAEMON (ENTRYPOINT) VALUES ('%s');`, ep) log.Debug(c, Insert) if _, e := g_ds.Insert(Insert); e != nil { l := log.Error(Insert, "error.", e) logq.LogPutqueue(l) } } }
func saveDaemonID(id string) { log.Println("TODO save daemonid to db when srv returns code 0.") count := `SELECT COUNT(*) FROM DH_DAEMON;` row, err := g_ds.QueryRow(count) if err != nil { l := log.Error(count, "error.", err) logq.LogPutqueue(l) } var c int row.Scan(&c) if c > 0 { Update := fmt.Sprintf(`UPDATE DH_DAEMON SET DAEMONID='%s';`, id) log.Debug(Update) if _, e := g_ds.Update(Update); e != nil { l := log.Error(Update, "error.", e) logq.LogPutqueue(l) } } else { Insert := fmt.Sprintf(`INSERT INTO DH_DAEMON (DAEMONID) VALUES ('%s');`, id) log.Debug(c, Insert) if _, e := g_ds.Insert(Insert); e != nil { l := log.Error(Insert, "error.", e) logq.LogPutqueue(l) } } }
func publishedOfDatapoolHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Debug(r.URL.Path, "published of a datapool", r) r.ParseForm() datapool := ps.ByName("dpname") status := "published" count := getRepoCountByDp(datapool, status) offset, limit := optionalOffsetAndSize(r, 10, 1, 100) log.Debug("offset, limit", offset, limit) validateOffsetAndLimit(count, &offset, &limit) repoInfos, err := GetRepoInfo(datapool, status, offset, limit) log.Debug(repoInfos, offset, limit) if err != nil { log.Error(err) JsonResult(w, http.StatusInternalServerError, cmd.InternalError, err.Error(), nil) return } if len(repoInfos) == 0 { msg := fmt.Sprintf("No published dataitem in %s.", datapool) JsonResult(w, http.StatusOK, cmd.ErrorPublishedItemEmpty, msg, nil) } else { msg := fmt.Sprintf("Dataitems have been published into %s.", datapool) JsonResult(w, http.StatusOK, cmd.ResultOK, msg, newQueryListResult(count, &repoInfos)) } }
func pulledOfRepoHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Debug(r.URL.Path, "item pulled of a repository") r.ParseForm() dpName := ps.ByName("dpname") repoName := ps.ByName("repo") isPublished := "N" count := getItemCountByDpRepo(dpName, repoName, isPublished) offset, limit := optionalOffsetAndSize(r, 10, 1, 100) validateOffsetAndLimit(count, &offset, &limit) pulledRepoItems, err := GetPulledRepoInfo(dpName, repoName, offset, limit) if err != nil { log.Debug(err) JsonResult(w, http.StatusInternalServerError, cmd.InternalError, err.Error(), nil) return } if len(pulledRepoItems) == 0 { msg := fmt.Sprintf("Pulled DataItem of %s is empty.", repoName) JsonResult(w, http.StatusOK, cmd.ErrorPublishedItemEmpty, msg, nil) } else { msg := fmt.Sprintf("All DataItems have been pulled of %s.", repoName) JsonResult(w, http.StatusOK, cmd.ResultOK, msg, newQueryListResult(count, pulledRepoItems)) } }
func deleteItemsAccordingToHeartbeat(body []byte) { log.Debug("deleteItemsAccordingToHeartbeat() BEGIN:", string(body)) result := ds.Result{} itemEvent := &Event{} result.Data = itemEvent itemsdelete := []ItemDel{} itemEvent.Data = &itemsdelete if err := json.Unmarshal(body, &result); err == nil { log.Debug("items delete:", itemsdelete) for _, v := range itemsdelete { log.Debugf("delete item according to heartbeat: %v/%v\n", v.Repname, v.Itemname) err := delTagsForDelItem(v.Repname, v.Itemname) if err != nil { log.Error(err) return } err = delItem(v.Repname, v.Itemname) if err != nil { log.Error(err) return } log.Infof("Delete data item %v/%v according to heartbeat successfully.\n", v.Repname, v.Itemname) } } else { log.Warn("Unmarshal error:", err) } }
func publishedTagOfItemHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Debug(r.URL.Path, "tags published of dataitem") r.ParseForm() dpname := ps.ByName("dpname") repo := ps.ByName("repo") item := ps.ByName("item") count, err := getPublishedTagCount(dpname, repo, item) if err != nil { log.Debug(err) JsonResult(w, http.StatusInternalServerError, cmd.InternalError, err.Error(), nil) return } offset, limit := optionalOffsetAndSize(r, 10, 1, 100) log.Debug("offset, limit", offset, limit) validateOffsetAndLimit(count, &offset, &limit) publishedTagsOfItem, err := GetPublishedTagsOfItemInfo(dpname, repo, item, offset, limit) if err != nil { log.Debug(err) JsonResult(w, http.StatusInternalServerError, cmd.InternalError, err.Error(), nil) return } if len(publishedTagsOfItem) == 0 { msg := fmt.Sprintf("Published tags of %s/%s is empty.", repo, item) JsonResult(w, http.StatusOK, cmd.ErrorPulledTagEmpty, msg, nil) } else { msg := fmt.Sprintf("All tags have been published of %s/%s", repo, item) JsonResult(w, http.StatusOK, cmd.ResultOK, msg, newQueryListResult(count, &publishedTagsOfItem)) } }
func optionalIntParamInQuery(r *http.Request, paramName string, defaultInt int64) int64 { if r.Form.Get(paramName) == "" { log.Debug("paramName nil", paramName, r.Form) return defaultInt } i, err := strconv.ParseInt(r.Form.Get(paramName), 10, 64) if err != nil { log.Debug("ParseInt", err) return defaultInt } else { return i } }
func getRepoCountByDp(datapool, status string) int64 { if status == "published" { status = "Y" } else { status = "N" } sql := fmt.Sprintf(`SELECT COUNT(DISTINCT REPOSITORY) FROM DH_DP_RPDM_MAP WHERE DPID IN (SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS='A') AND PUBLISH= '%s' AND STATUS = 'A';`, datapool, status) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return 0 } var count int64 row.Scan(&count) log.Debug("Published repository count:", count) return count }
func getPulledTagCount(datapool, repo, item string) (int64, error) { sql := fmt.Sprintf(`SELECT COUNT(*) FROM DH_RPDM_TAG_MAP WHERE RPDMID = (SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY = '%s' AND DATAITEM = '%s' AND PUBLISH = 'N' AND STATUS = 'A' AND DPID = (SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS='A')) AND STATUS = 'A';`, repo, item, datapool) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return 0, err } var count int64 row.Scan(&count) log.Debug("Published repository count:", count) return count, err }
func GetDaemonRoleByPubRecord() (role int) { sql := `SELECT COUNT(*) FROM DH_DP_RPDM_MAP WHERE PUBLISH='Y' AND STATUS='A' AND DPID IN (SELECT DPID FROM DH_DP WHERE STATUS='A');` row := g_ds.Db.QueryRow(sql) var count int row.Scan(&count) if count > 0 { role = PUBLISHER log.Debug("This datahub daemon is a publisher.") } else { role = PULLER log.Debug("This datahub daemon is a puller.") } return }
func delTagsForDelItem(reponame, itemname string) error { log.Println("Begin to remove tags for remove item from db") sqlrpdmid := fmt.Sprintf(`SELECT RPDMID FROM DH_DP_RPDM_MAP WHERE REPOSITORY='%s' AND DATAITEM='%s' AND STATUS='A';`, reponame, itemname) row, err := g_ds.QueryRow(sqlrpdmid) if err != nil { l := log.Error("select rpdmid from DH_DP_RPDM_MAP error:", err) logq.LogPutqueue(l) return err } var rpdmId int row.Scan(&rpdmId) if rpdmId == 0 { log.Debug(reponame, itemname, "not exist.") return nil } sqldeltag := fmt.Sprintf(`UPDATE DH_RPDM_TAG_MAP SET STATUS='N' WHERE RPDMID=%d`, rpdmId) _, err = g_ds.Update(sqldeltag) log.Info("sqldeltag", sqldeltag) if err != nil { l := log.Error("delete tag error:", err) logq.LogPutqueue(l) return err } return nil }
func CheckHealthClock() { log.Debug("--------->BEGIN") checkHealth(&Errortagsmap) timer := time.NewTicker(10 * time.Minute) for { select { case <-timer.C: now := time.Now() if now.Hour()%6 == 0 { log.Info("Time:", now) checkHealth(&Errortagsmap) } } } log.Debug("---------->END") }
func removeAllJobDB() (e error) { log.Debug("TODO remove all jobs from db") sRmJobs := `DELETE FROM DH_JOB;` _, e = g_ds.Delete(sRmJobs) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } return }
func removeJobDB(job *ds.JobInfo) (e error) { log.Debug("TODO remove jobid from db") sRmJob := fmt.Sprintf(`DELETE FROM DH_JOB WHERE JOBID=%d;`, job.ID) _, e = g_ds.Delete(sRmJob) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } return }
func GetItemslocationInDatapool(itemslocation map[string]string, dpname string, dpid int, dpconn string) error { sql := fmt.Sprintf("SELECT DISTINCT ITEMDESC, REPOSITORY, DATAITEM FROM DH_DP_RPDM_MAP WHERE DPID=%v AND STATUS='A';", dpid) log.Debug(sql) rows, err := g_ds.QueryRows(sql) if err != nil { l := log.Errorf("datapool name %s, dpid %v, dpconn %v, error:%v", dpname, dpid, dpconn, err) logq.LogPutqueue(l) return err } var location, repo, item string for rows.Next() { rows.Scan(&location, &repo, &item) log.Debug(location, repo, item) itemslocation[location] = repo + "/" + item } log.Trace(itemslocation) return err }
func updateJobStatus(job *ds.JobInfo) (e error) { log.Debug("TODO updata job stat to db.") sUpdateJob := fmt.Sprintf(`UPDATE DH_JOB SET STATUS='%s', STAT_TIME=datetime('now'), DOWNSIZE=%d WHERE JOBID='%s';`, job.Stat, job.Dlsize, job.ID) _, e = g_ds.Update(sUpdateJob) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } return }
func New(name string) (*Datapool, error) { datapooldriver, ok := datapooldrivers[name] for k, _ := range datapooldrivers { log.Debug(k, datapooldrivers[k], reflect.TypeOf(datapooldrivers[k])) } if !ok { s := fmt.Sprintf("Can't find datapooldriver %v", name) log.Error(s) return nil, errors.New(s) } return &Datapool{driver: datapooldriver}, nil }
func saveJobDB(job *ds.JobInfo) (e error) { log.Debug("TODO save job info to db.") sInsertJob := fmt.Sprintf(`INSERT INTO DH_JOB (JOBID, TAG, FILEPATH, STATUS, CREATE_TIME, STAT_TIME, DOWNSIZE, SRCSIZE) VALUES ('%s','%s','%s','%s', datetime('now'), datetime('now'),%d, %d);`, job.ID, job.Tag, job.Path, job.Stat, job.Dlsize, job.Srcsize) _, e = g_ds.Insert(sInsertJob) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } return }
func GetPricePlan(path string) (plans []PricePlan) { config := path + "/" + PriceFile if isFileExists(config) == true { bytes, err := ioutil.ReadFile(config) if err != nil { log.Error(err) return } log.Debug(string(bytes)) type LPrices struct { PricePlans []PricePlan `json:"price,omitempty"` } struPrices := LPrices{} if err = json.Unmarshal(bytes, &struPrices); err != nil { log.Error(err) return } log.Debug(struPrices) plans = struPrices.PricePlans } return }
func itemPulledHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Debug(r.URL.Path, "item pulled or not") repo := ps.ByName("repo") item := ps.ByName("item") itemInfo := ItemInDatapool{} itemInfo.Dpname, itemInfo.Dpconn, itemInfo.Dptype, itemInfo.ItemLocation = GetDpnameDpconnItemdesc(repo, item) if len(itemInfo.ItemLocation) == 0 { JsonResult(w, http.StatusOK, cmd.ErrorItemNotExist, "The DataItem hasn't been pulled.", nil) } else { JsonResult(w, http.StatusOK, cmd.ResultOK, "The DataItem has been pulled.", &itemInfo) } }
func Synchronization() { log.Debug("BEGIN") timer := time.NewTicker(1 * time.Minute) for { select { case <-timer.C: now := time.Now() if now.Hour() == 1 { log.Debug("Time:", now) GetItemandTagFromServer() GetItemandTagFromSqlite() CompareItemSlice(ItemSliceA, ItemSliceB) CompareTagSlice(TagSliceA, TagSliceB) AlterItemStatus(diffItemSlice) AlterTagStatus(diffTagSlice) time.Sleep(time.Hour * 24) } } } log.Debug("END") }
func GetSampleData(itempath string) (sample string) { var filename string for _, v := range SampleFiles { filename = itempath + "/" + v if isFileExists(filename) == true { if bytes, err := ioutil.ReadFile(filename); err == nil { sample = string(bytes) return sample } else { l := log.Error(err) logq.LogPutqueue(l) } } } d, err := os.Open(itempath) //ppen dir if err != nil { log.Println(err) return "" } defer d.Close() ff, _ := d.Readdir(10) // return []fileinfo for i, fi := range ff { log.Printf("sample filename %d: %+v\n", i, fi.Name()) filename = strings.ToLower(fi.Name()) if filename != "sample.md" && filename != "meta.md" && filename != PriceFile { f, err := os.Open(itempath + "/" + fi.Name()) log.Println("filename:", itempath+"/"+fi.Name()) if err != nil { continue } defer f.Close() scanner := bufio.NewScanner(f) scanner.Split(bufio.ScanLines) var i = 0 for scanner.Scan() { if i > 9 { break } i++ sample += scanner.Text() + " \n" //md " \n" is a new line //log.Println(scanner.Text()) } break } } log.Debug("sample data:", sample) //need lenth check return sample }
func authDaemon(w http.ResponseWriter, r *http.Request) bool { log.Println(r.URL, "|", r.RequestURI, "|", r.RemoteAddr, "|", r.URL.RequestURI(), "|", r.Host) if r.Host == "127.0.0.1:35600" { return true } auth, ok := r.Header["X-Daemon-Auth"] log.Debug("DaemonAuthrization:", DaemonAuthrization) if !ok || auth[0] != DaemonAuthrization { JsonResult(w, http.StatusUnauthorized, cmd.ErrorUnAuthorization, "", nil) log.Error("connect daemon refused!", auth, ok, r.Header) return false } return true }
func GetMetaAndSampleAndPricePlan(dpname, itemdesc string) (meta, sample string, plans []PricePlan) { dpconn := GetDataPoolDpconn(dpname) if len(dpconn) == 0 || len(itemdesc) == 0 { l := log.Errorf("dpconn:%s or itemdesc:%s is empty", dpconn, itemdesc) logq.LogPutqueue(l) return } path := dpconn + "/" + itemdesc meta = GetMetaData(path) sample = GetSampleData(path) plans = GetPricePlan(path) log.Debug(plans) return }
func GetMessages() { log.Info("start GetMessages from messages server") var sleepInterval int var srtInterval string var e error url := DefaultServerAPI + "/notifications?forclient=1&type=item_event&status=0" for AutoPull == true { if srtInterval = os.Getenv("DATAHUB_MSG_INTERVAL"); len(srtInterval) > 0 { sleepInterval, e = strconv.Atoi(srtInterval) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } } else { sleepInterval = 600 } time.Sleep(time.Duration(sleepInterval) * time.Second) log.Debug("connecting to", url) req, err := http.NewRequest("GET", url, nil) if len(loginAuthStr) > 0 { req.Header.Set("Authorization", loginAuthStr) } resp, err := http.DefaultClient.Do(req) if err != nil { l := log.Error(err) logq.LogPutqueue(l) continue } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) if resp.StatusCode == http.StatusOK { log.Debugf("HeartBeat http statuscode:%v, http body:%s", resp.StatusCode, body) result := ds.Result{} Pages := ds.ResultPages{} MessagesSlice := []Messages{} Pages.Results = &MessagesSlice result.Data = &Pages if err := json.Unmarshal(body, &result); err == nil { if result.Code == 0 { log.Debug(result) for _, v := range MessagesSlice { if v.Type == "item_event" && v.Data.Event == TAGADDED { InsertToTagadded(v.Data.EventTime, v.Data.Repname, v.Data.Itemname, v.Data.Tag, NOTREAD) } } } else { l := log.Error("Get Messages errror:", result.Code) logq.LogPutqueue(l) } } else { log.Error(err) } } else if resp.StatusCode == http.StatusUnauthorized { log.Debug("not login", http.StatusUnauthorized) urllogin := DefaultServerAPI + "/" reql, err := http.NewRequest("GET", urllogin, nil) if len(loginBasicAuthStr) > 0 { reql.Header.Set("Authorization", loginBasicAuthStr) log.Info("user name:", gstrUsername) } else { log.Warn("not login") continue } respl, err := http.DefaultClient.Do(reql) if err != nil { log.Error(err) continue } defer respl.Body.Close() result := &ds.Result{} log.Println("login return", respl.StatusCode) if respl.StatusCode == 200 { body, _ := ioutil.ReadAll(respl.Body) log.Println(string(body)) result.Data = &tk{} if err = json.Unmarshal(body, result); err != nil { log.Error(err) log.Println(respl.StatusCode, string(body)) continue } else { loginAuthStr = "Token " + result.Data.(*tk).Token loginLogged = true log.Println(loginAuthStr) } } } } }
/*download routine, supports resuming broken downloads.*/ func download(uri string, p ds.DsPull, w http.ResponseWriter, c chan int) (int64, error) { log.Printf("we are going to download %s, save to dp=%s,name=%s\n", uri, p.Datapool, p.DestName) var out *os.File var err error var destfilename, tmpdestfilename, tmpdir, dpconn, dptype string dpconn, dptype = GetDataPoolDpconnAndDptype(p.Datapool) if len(dpconn) == 0 { err = fmt.Errorf("dpconn is null! datapool:%s ", p.Datapool) return ErrLogAndResp(c, w, http.StatusBadRequest, cmd.ErrorNoRecord, err) } //New a datapool object datapool, err := dpdriver.New(dptype) if err != nil { return ErrLogAndResp(c, w, http.StatusInternalServerError, cmd.ErrorNoDatapoolDriver, err) } destfilename, tmpdir, tmpdestfilename = datapool.GetDestFileName(dpconn, p.ItemDesc, p.DestName) os.MkdirAll(tmpdir, 0777) log.Info("open tmp destfile name:", tmpdestfilename) out, err = os.OpenFile(tmpdestfilename, os.O_RDWR|os.O_CREATE, 0644) if err != nil { return ErrLogAndResp(c, w, http.StatusInternalServerError, cmd.ErrorOpenFile, err) } stat, err := out.Stat() if err != nil { return ErrLogAndResp(c, w, http.StatusInternalServerError, cmd.ErrorStatFile, err) } out.Seek(stat.Size(), 0) req, err := http.NewRequest("GET", uri, nil) req.Header.Set("User-Agent", "go-downloader") /* Set download starting position with 'Range' in HTTP header*/ req.Header.Set("Range", "bytes="+strconv.FormatInt(stat.Size(), 10)+"-") log.Printf("%v bytes had already been downloaded.\n", stat.Size()) log.Debug(EnvDebug("http_proxy", false)) resp, err := http.DefaultClient.Do(req) /*Save response body to file only when HTTP 2xx received. TODO*/ if err != nil || (resp != nil && resp.StatusCode/100 != 2) { log.Error("http error", err) if resp != nil { body, _ := ioutil.ReadAll(resp.Body) l := log.Error("http status code:", resp.StatusCode, "response Body:", string(body), err) logq.LogPutqueue(l) struMsg := &ds.MsgResp{} json.Unmarshal(body, struMsg) msg := struMsg.Msg if resp.StatusCode == 416 { msg = tmpdestfilename + " has already been downloaded." } r, _ := buildResp(resp.StatusCode, msg, nil) w.WriteHeader(resp.StatusCode) w.Write(r) } else { HttpNoData(w, http.StatusInternalServerError, cmd.ErrorOtherError, err.Error()) } filesize := stat.Size() out.Close() if filesize == 0 { os.Remove(tmpdestfilename) } c <- -1 return 0, err } defer resp.Body.Close() HttpNoData(w, http.StatusOK, cmd.ResultOK, strret) //write channel c <- 1 jobtag := p.Repository + "/" + p.Dataitem + ":" + p.Tag srcsize, err := strconv.ParseInt(resp.Header.Get("X-Source-FileSize"), DECIMAL_BASE, INT_SIZE_64) md5str := resp.Header.Get("X-Source-MD5") status := "downloading" log.Info("pull tag:", jobtag, tmpdestfilename, status, srcsize) jobid := putToJobQueue(jobtag, tmpdestfilename, status, srcsize) n, err := io.Copy(out, resp.Body) if err != nil { out.Close() bl := log.Error(err) logq.LogPutqueue(bl) dlsize, e := GetFileSize(tmpdestfilename) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } status = "failed" updateJobQueue(jobid, status, dlsize) return 0, err } out.Close() status = "downloaded" if len(md5str) > 0 { bmd5, err := ComputeMd5(tmpdestfilename) bmd5str := fmt.Sprintf("%x", bmd5) log.Debug("md5", md5str, tmpdestfilename, bmd5str) if err != nil { log.Error(tmpdestfilename, err, bmd5) } else if md5str != bmd5str { l := log.Errorf("check md5 code error! src md5:%v, local md5:%v", md5str, bmd5str) logq.LogPutqueue(l) status = "md5 error" updateJobQueue(jobid, status, 0) return n, nil } } log.Printf("%d bytes downloaded.", n) if err := MoveFromTmp(tmpdestfilename, destfilename); err != nil { status = "MoveFromTmp error" } dlsize, e := GetFileSize(destfilename) if e != nil { l := log.Error(e) logq.LogPutqueue(l) } status = datapool.StoreFile(status, destfilename, dpconn, p.Datapool, p.ItemDesc, p.DestName) updateJobQueue(jobid, status, dlsize) tagComment := GetTagComment(p.Repository, p.Dataitem, p.Tag) InsertTagToDb(true, p, tagComment) return n, nil }
/*pull parses filename and target IP from HTTP GET method, and start downloading routine. */ func p2p_pull(rw http.ResponseWriter, r *http.Request, ps httprouter.Params) { l := log.Info("P2P PULL FROM", r.RemoteAddr, r.Method, r.URL.RequestURI(), r.Proto) logq.LogPutqueue(l) r.ParseForm() sRepoName := ps.ByName("repo") sDataItem := ps.ByName("dataitem") sTag := ps.ByName("tag") log.Info(sRepoName, sDataItem, sTag) jobtag := fmt.Sprintf("%s/%s:%s", sRepoName, sDataItem, sTag) var irpdmid, idpid int var stagdetail, itemdesc, dpconn, dpname, dptype string msg := &ds.MsgResp{} msg.Msg = "OK." irpdmid, idpid, itemdesc = GetRpdmidDpidItemdesc(sRepoName, sDataItem) if len(itemdesc) == 0 { itemdesc = sRepoName + "_" + sDataItem } log.Debug("dpid:", idpid, "rpdmid:", irpdmid, "itemdesc:", itemdesc) stagdetail = GetTagDetail(irpdmid, sTag) log.Debug("tagdetail", stagdetail) if len(stagdetail) == 0 { l := log.Warnf("%s(tag:%s) not found", stagdetail, sTag) logq.LogPutqueue(l) http.Error(rw, sTag+" not found", http.StatusNotFound) return } dpconn, dpname, dptype = GetDpconnDpnameDptypeByDpid(idpid) log.Debug("dpconn:", dpconn, "dpname:", dpname, "dptype:", dptype) datapool, err := dpdriver.New(dptype) if err != nil { WriteErrLogAndResp(rw, http.StatusInternalServerError, cmd.ErrorNoDatapoolDriver, err) return } filepathname := datapool.GetFileTobeSend(dpconn, dpname, itemdesc, stagdetail) //filepathname := dpconn + "/" + itemdesc + "/" + stagdetail log.Println("filename:", filepathname) if exists := isFileExists(filepathname); !exists { l := log.Error(filepathname, "not found") logq.LogPutqueue(l) putToJobQueue(jobtag, filepathname, "N/A", -1) msg.Msg = fmt.Sprintf("Tag:%s not found", sTag) resp, _ := json.Marshal(msg) respStr := string(resp) rw.WriteHeader(http.StatusNotFound) fmt.Fprintln(rw, respStr) return } tokenValid := false retmsg := "" token := r.Form.Get("token") username := r.Form.Get("username") log.Debug(r.URL, "----", r.FormValue("username"), "----", r.Form.Get("username")) if len(token) > 0 && len(username) > 0 { log.Println(r.URL.Path, "token:", token, "username:"******"/transaction/" + sRepoName + "/" + sDataItem + "/" + sTag + "?cypt_accesstoken=" + token + "&username="******"Get %s size error, %v", filepathname, err) logq.LogPutqueue(l) } log.Printf("Tag file full path name :%v, size:%v", filepathname, size) //rw.Header().Set("Source-FileName", stagdetail) bmd5, err := ComputeMd5(filepathname) strmd5 := fmt.Sprintf("%x", bmd5) if err != nil { log.Error(filepathname, err, bmd5, strmd5) } else { rw.Header().Set("X-Source-MD5", strmd5) } rw.Header().Set("X-Source-FileSize", strconv.FormatInt(size, 10)) l = log.Info("transfering", filepathname, bmd5, strmd5) logq.LogPutqueue(l) jobid := putToJobQueue(jobtag, filepathname, "transfering", size) http.ServeFile(rw, r, filepathname) updateJobQueue(jobid, "transfered", 0) return }
func GetPulledRepoInfo(dpName, repoName string, offset int64, limit int) ([]ds.PulledItemInfo, error) { var pulledItemInfo ds.PulledItemInfo pulledItemInfos := make([]ds.PulledItemInfo, 0) sql := fmt.Sprintf(`SELECT DPID FROM DH_DP WHERE DPNAME = '%s' AND STATUS = 'A';`, dpName) row, err := g_ds.QueryRow(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } var dpid int row.Scan(&dpid) sql = fmt.Sprintf(`SELECT DATAITEM ,ITEMDESC FROM DH_DP_RPDM_MAP WHERE DPID = %d AND REPOSITORY = '%s' AND PUBLISH = 'N' AND STATUS = 'A' ORDER BY RPDMID LIMIT %v OFFSET %v;`, dpid, repoName, limit, offset) rows, err := g_ds.QueryRows(sql) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } result := ds.Result{} pages := ds.ResultPages{} orderInfoSlice := []ds.OrderInfo{} pages.Results = &orderInfoSlice result.Data = &pages for rows.Next() { rows.Scan(&pulledItemInfo.ItemName, &pulledItemInfo.Location) path := "/api/subscriptions/pull/" + repoName + "/" + pulledItemInfo.ItemName resp, err := commToServerGetRsp("get", path, nil) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusUnauthorized { pulledItemInfo.SignTime = nil log.Debug("resp.StatusCode == http.StatusUnauthorized") } else if resp.StatusCode != http.StatusOK { err = errors.New("request subscriptions api failed.") l := log.Error(err) logq.LogPutqueue(l) return nil, err } else { respbody, err := ioutil.ReadAll(resp.Body) if err != nil { l := log.Error(err) logq.LogPutqueue(l) return nil, err } else { err = json.Unmarshal(respbody, &result) if err != nil { err = errors.New("unmarshal failed.") l := log.Error(err) logq.LogPutqueue(l) return nil, err } if len(orderInfoSlice) > 0 { //Already order by signtime desc. Get the first member. pulledItemInfo.SignTime = &orderInfoSlice[0].Signtime log.Debug("pulledItemInfo.SignTime:", pulledItemInfo.SignTime) } } } pulledItemInfos = append(pulledItemInfos, pulledItemInfo) } log.Debug(pulledItemInfos) return pulledItemInfos, err }