func QiniuUpload(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { var uploadConfigFile string var threadCount int64 var err error if len(params) == 2 { threadCount, err = strconv.ParseInt(params[0], 10, 64) if err != nil { log.Error("Invalid <ThreadCount> value,", params[0]) return } uploadConfigFile = params[1] } else { uploadConfigFile = params[0] } //read upload config fp, err := os.Open(uploadConfigFile) if err != nil { log.Errorf("Open upload config file `%s` error due to `%s`", uploadConfigFile, err) return } defer fp.Close() configData, err := ioutil.ReadAll(fp) if err != nil { log.Errorf("Read upload config file `%s` error due to `%s`", uploadConfigFile, err) return } var uploadConfig qshell.UploadConfig err = json.Unmarshal(configData, &uploadConfig) if err != nil { log.Errorf("Parse upload config file `%s` errror due to `%s`", uploadConfigFile, err) return } if _, err := os.Stat(uploadConfig.SrcDir); err != nil { log.Error("Upload config error for parameter `SrcDir`,", err) return } //upload if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT || threadCount > qshell.MAX_UPLOAD_THREAD_COUNT { fmt.Printf("Tip: you can set <ThreadCount> value between %d and %d to improve speed\n", qshell.MIN_UPLOAD_THREAD_COUNT, qshell.MAX_UPLOAD_THREAD_COUNT) if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT { threadCount = qshell.MIN_UPLOAD_THREAD_COUNT } else if threadCount > qshell.MAX_UPLOAD_THREAD_COUNT { threadCount = qshell.MAX_UPLOAD_THREAD_COUNT } } qshell.QiniuUpload(int(threadCount), &uploadConfig) } else { CmdHelp(cmd) } }
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int64) { cacheResultFileH, err := os.Create(cacheResultFile) if err != nil { log.Errorf("Failed to open cache file `%s'", cacheResultFile) return } defer cacheResultFileH.Close() bWriter := bufio.NewWriter(cacheResultFileH) walkStart := time.Now() log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String())) filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error { var retErr error //log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath)) if err != nil { retErr = err } else { if !fi.IsDir() { relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator)) fsize := fi.Size() //Unit is 100ns flmd := fi.ModTime().UnixNano() / 100 //log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd)) fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd)) if _, err := bWriter.WriteString(fmeta); err != nil { log.Errorf("Failed to write data `%s' to cache file", fmeta) retErr = err } fileCount += 1 } } return retErr }) if err := bWriter.Flush(); err != nil { log.Errorf("Failed to flush to cache file `%s'", cacheResultFile) } walkEnd := time.Now() log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String())) log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart))) return }
func batchRename(client rs.Client, entries []qshell.RenameEntryPath) { ret, err := qshell.BatchRename(client, entries) if err != nil { fmt.Println("Batch rename error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Errorf("Rename '%s' => '%s' Failed, Code :%d", entry.OldKey, entry.NewKey, item.Code) } else { log.Debug(fmt.Sprintf("Rename '%s' => '%s' Success, Code :%d", entry.OldKey, entry.NewKey, item.Code)) } } } }
func batchChgm(client rs.Client, entries []qshell.ChgmEntryPath) { ret, err := qshell.BatchChgm(client, entries) if err != nil { fmt.Println("Batch chgm error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Errorf("Chgm '%s' => '%s' Failed, Code :%d", entry.Key, entry.MimeType, item.Code) } else { log.Debug(fmt.Sprintf("Chgm '%s' => '%s' Success, Code :%d", entry.Key, entry.MimeType, item.Code)) } } } }
func batchDelete(client rs.Client, entries []rs.EntryPath) { ret, err := qshell.BatchDelete(client, entries) if err != nil { fmt.Println("Batch delete error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Errorf("Delete '%s' => '%s' Failed, Code: %d", entry.Bucket, entry.Key, item.Code) } else { log.Debug(fmt.Sprintf("Delete '%s' => '%s' Success, Code: %d", entry.Bucket, entry.Key, item.Code)) } } } }
func batchCopy(client rs.Client, entries []qshell.CopyEntryPath) { ret, err := qshell.BatchCopy(client, entries) if err != nil { fmt.Println("Batch move error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Errorf("Copy '%s:%s' => '%s:%s' Failed, Code :%d", entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code) } else { log.Debug(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Success, Code :%d", entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)) } } } }
func CheckQrsync(cmd string, params ...string) { if len(params) == 3 || len(params) == 4 { dirCacheResultFile := params[0] listBucketResultFile := params[1] ignoreLocalDir, err := strconv.ParseBool(params[2]) if err != nil { log.Errorf("Invalid value `%s' for argument <IgnoreLocalDir>", params[2]) return } prefix := "" if len(params) == 4 { prefix = params[3] } qshell.CheckQrsync(dirCacheResultFile, listBucketResultFile, ignoreLocalDir, prefix) } else { CmdHelp(cmd) } }
func batchDelete(client rs.Client, entries []rs.EntryPath) { ret, err := qshell.BatchDelete(client, entries) if err != nil { if _, ok := err.(*rpc.ErrorInfo); !ok { fmt.Println("Batch delete error", err) return } } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Errorf("Delete '%s' => '%s' failed, Err: %s", entry.Bucket, entry.Key, item.Data.Error) } else { log.Debugf("Delete '%s' => '%s' success", entry.Bucket, entry.Key) } } } }
func M3u8FileList(mac *digest.Mac, bucket string, m3u8Key string, isPrivate bool) (slicesToDelete []rs.EntryPath, err error) { client := rs.NewMac(mac) //check m3u8 file exists _, sErr := client.Stat(nil, bucket, m3u8Key) if sErr != nil { err = fmt.Errorf("stat m3u8 file error, %s", sErr.Error()) return } //get domain list of bucket bucketDomainUrl := fmt.Sprintf("%s/v6/domain/list", DEFAULT_API_HOST) bucketDomainData := map[string][]string{ "tbl": []string{bucket}, } bucketDomains := BucketDomain{} bErr := client.Conn.CallWithForm(nil, &bucketDomains, bucketDomainUrl, bucketDomainData) if bErr != nil { err = fmt.Errorf("get domain of bucket failed due to, %s", bErr.Error()) return } if len(bucketDomains) == 0 { err = errors.New("no domain found for the bucket") return } var domain string for _, d := range bucketDomains { if strings.HasSuffix(d, "qiniudn.com") || strings.HasSuffix(d, "clouddn.com") || strings.HasSuffix(d, "qiniucdn.com") { domain = d break } } //get first if domain == "" { domain = bucketDomains[0] } if domain == "" { err = errors.New("no valid domain found for the bucket") return } //create downoad link dnLink := fmt.Sprintf("http://%s/%s", domain, m3u8Key) if isPrivate { dnLink = PrivateUrl(mac, dnLink, time.Now().Add(time.Second*3600).Unix()) } //get m3u8 file content m3u8Resp, m3u8Err := http.Get(dnLink) if m3u8Err != nil { err = fmt.Errorf("open url %s error due to, %s", dnLink, m3u8Err) return } defer m3u8Resp.Body.Close() if m3u8Resp.StatusCode != 200 { err = fmt.Errorf("download file error due to, %s", m3u8Resp.Status) return } m3u8Bytes, readErr := ioutil.ReadAll(m3u8Resp.Body) if readErr != nil { err = fmt.Errorf("read m3u8 file content error due to, %s", readErr.Error()) return } //check content if !strings.HasPrefix(string(m3u8Bytes), "#EXTM3U") { err = errors.New("invalid m3u8 file") return } slicesToDelete = make([]rs.EntryPath, 0) bReader := bufio.NewScanner(bytes.NewReader(m3u8Bytes)) bReader.Split(bufio.ScanLines) for bReader.Scan() { line := strings.TrimSpace(bReader.Text()) if !strings.HasPrefix(line, "#") { var sliceKey string if strings.HasPrefix(line, "http://") || strings.HasPrefix(line, "https://") { uri, pErr := url.Parse(line) if pErr != nil { log.Errorf("invalid url, %s", line) continue } sliceKey = strings.TrimPrefix(uri.Path, "/") } else { sliceKey = strings.TrimPrefix(line, "/") } //append to delete list slicesToDelete = append(slicesToDelete, rs.EntryPath{bucket, sliceKey}) } } slicesToDelete = append(slicesToDelete, rs.EntryPath{bucket, m3u8Key}) return }
func QiniuUpload(threadCount int, uploadConfig *UploadConfig) { timeStart := time.Now() //make SrcDir the full path uploadConfig.SrcDir, _ = filepath.Abs(uploadConfig.SrcDir) dirCache := DirCache{} pathSep := string(os.PathSeparator) //create job id jobId := Md5Hex(fmt.Sprintf("%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket)) //local storage path storePath := filepath.Join(".qshell", "qupload", jobId) if err := os.MkdirAll(storePath, 0775); err != nil { log.Errorf("Failed to mkdir `%s' due to `%s'", storePath, err) return } //cache file rescanLocalDir := false cacheResultName := filepath.Join(storePath, jobId+".cache") cacheTempName := filepath.Join(storePath, jobId+".cache.temp") cacheCountName := filepath.Join(storePath, jobId+".count") if _, statErr := os.Stat(cacheResultName); statErr == nil { //file already exists rescanLocalDir = uploadConfig.RescanLocal } else { rescanLocalDir = true } var totalFileCount int64 if rescanLocalDir { fmt.Println("Listing local sync dir, this can take a long time, please wait patiently...") totalFileCount = dirCache.Cache(uploadConfig.SrcDir, cacheTempName) if rErr := os.Remove(cacheResultName); rErr != nil { log.Debug("Remove the old cached file error", rErr) } if rErr := os.Rename(cacheTempName, cacheResultName); rErr != nil { fmt.Println("Rename the temp cached file error", rErr) return } //write the total count to local file if cFp, cErr := os.Create(cacheCountName); cErr == nil { func() { defer cFp.Close() uploadInfo := UploadInfo{ TotalFileCount: totalFileCount, } uploadInfoBytes, mErr := json.Marshal(&uploadInfo) if mErr == nil { if _, wErr := cFp.Write(uploadInfoBytes); wErr != nil { log.Errorf("Write local cached count file error %s", cErr) } else { cFp.Close() } } }() } else { log.Errorf("Open local cached count file error %s", cErr) } } else { fmt.Println("Use the last cached local sync dir file list ...") //read from local cache if rFp, rErr := os.Open(cacheCountName); rErr == nil { func() { defer rFp.Close() uploadInfo := UploadInfo{} decoder := json.NewDecoder(rFp) if dErr := decoder.Decode(&uploadInfo); dErr == nil { totalFileCount = uploadInfo.TotalFileCount } }() } else { log.Warnf("Open local cached count file error %s", rErr) } } //leveldb folder leveldbFileName := filepath.Join(storePath, jobId+".ldb") ldb, err := leveldb.OpenFile(leveldbFileName, nil) if err != nil { log.Errorf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err) return } defer ldb.Close() //sync ufp, err := os.Open(cacheResultName) if err != nil { log.Errorf("Open cache file `%s' failed due to `%s'", cacheResultName, err) return } defer ufp.Close() bScanner := bufio.NewScanner(ufp) bScanner.Split(bufio.ScanLines) var currentFileCount int64 = 0 var successFileCount int64 = 0 var failureFileCount int64 = 0 var skippedFileCount int64 = 0 ldbWOpt := opt.WriteOptions{ Sync: true, } upWorkGroup := sync.WaitGroup{} upCounter := 0 threadThreshold := threadCount + 1 //chunk upload threshold putThreshold := DEFAULT_PUT_THRESHOLD if uploadConfig.PutThreshold > 0 { putThreshold = uploadConfig.PutThreshold } //check zone, default nb switch uploadConfig.Zone { case ZoneAWS: SetZone(ZoneAWSConfig) case ZoneBC: SetZone(ZoneBCConfig) default: SetZone(ZoneNBConfig) } //use host if not empty, overwrite the default config if uploadConfig.UpHost != "" { conf.UP_HOST = uploadConfig.UpHost } //set resume upload settings rio.SetSettings(&upSettings) mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)} //check bind net interface card var transport *http.Transport var rsClient rs.Client if uploadConfig.BindNicIp != "" { transport = &http.Transport{ Dial: (&net.Dialer{ LocalAddr: &net.TCPAddr{ IP: net.ParseIP(uploadConfig.BindNicIp), }, }).Dial, } } if transport != nil { rsClient = rs.NewMacEx(&mac, transport, "") } else { rsClient = rs.NewMac(&mac) } //check remote rs ip bind if uploadConfig.BindRsIp != "" { rsClient.Conn.BindRemoteIp = uploadConfig.BindRsIp } //scan lines and upload for bScanner.Scan() { line := strings.TrimSpace(bScanner.Text()) items := strings.Split(line, "\t") if len(items) != 3 { log.Errorf("Invalid cache line `%s'", line) continue } localFpath := items[0] currentFileCount += 1 skip := false //check skip local file or folder if uploadConfig.SkipPathPrefixes != "" { //unpack skip prefix skipPathPrefixes := strings.Split(uploadConfig.SkipPathPrefixes, ",") for _, prefix := range skipPathPrefixes { if strings.HasPrefix(localFpath, strings.TrimSpace(prefix)) { log.Debug(fmt.Sprintf("Skip by path prefix '%s' for local file %s", strings.TrimSpace(prefix), localFpath)) skip = true skippedFileCount += 1 break } } if skip { continue } } if uploadConfig.SkipFilePrefixes != "" { //unpack skip prefix skipFilePrefixes := strings.Split(uploadConfig.SkipFilePrefixes, ",") for _, prefix := range skipFilePrefixes { localFname := filepath.Base(localFpath) if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) { log.Debug(fmt.Sprintf("Skip by file prefix '%s' for local file %s", strings.TrimSpace(prefix), localFpath)) skip = true skippedFileCount += 1 break } } if skip { continue } } if uploadConfig.SkipSuffixes != "" { skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",") for _, suffix := range skipSuffixes { if strings.HasSuffix(localFpath, strings.TrimSpace(suffix)) { log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s", strings.TrimSpace(suffix), localFpath)) skip = true skippedFileCount += 1 break } } if skip { continue } } //pack the upload file key localFlmd, _ := strconv.ParseInt(items[2], 10, 64) uploadFileKey := localFpath if uploadConfig.IgnoreDir { if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 { uploadFileKey = uploadFileKey[i+1:] } } if uploadConfig.KeyPrefix != "" { uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "") } //convert \ to / under windows if runtime.GOOS == "windows" { uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1) } localFilePath := filepath.Join(uploadConfig.SrcDir, localFpath) fstat, err := os.Stat(localFilePath) if err != nil { log.Errorf("Error stat local file `%s' due to `%s'", localFilePath, err) continue } fsize := fstat.Size() ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey) if totalFileCount != 0 { fmt.Println(fmt.Sprintf("Uploading %s [%d/%d, %.1f%%] ...", ldbKey, currentFileCount, totalFileCount, float32(currentFileCount)*100/float32(totalFileCount))) } else { fmt.Println(fmt.Sprintf("Uploading %s ...", ldbKey)) } //check exists if uploadConfig.CheckExists { rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey) if checkErr == nil { //compare hash localEtag, cErr := GetEtag(localFilePath) if cErr != nil { atomic.AddInt64(&failureFileCount, 1) log.Error("Calc local file hash failed,", cErr) continue } if rsEntry.Hash == localEtag { atomic.AddInt64(&skippedFileCount, 1) log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey)) continue } } else { if _, ok := checkErr.(*rpc.ErrorInfo); !ok { //not logic error, should be network error atomic.AddInt64(&failureFileCount, 1) continue } } } else { //check leveldb ldbFlmd, err := ldb.Get([]byte(ldbKey), nil) flmd, _ := strconv.ParseInt(string(ldbFlmd), 10, 64) //not exist, return ErrNotFound //check last modified if err == nil && localFlmd == flmd { log.Debug("Skip by local log for file", localFpath) atomic.AddInt64(&skippedFileCount, 1) continue } } //worker upCounter += 1 if upCounter%threadThreshold == 0 { upWorkGroup.Wait() } upWorkGroup.Add(1) //start to upload go func() { defer upWorkGroup.Done() policy := rs.PutPolicy{} policy.Scope = uploadConfig.Bucket if uploadConfig.Overwrite { policy.Scope = fmt.Sprintf("%s:%s", uploadConfig.Bucket, uploadFileKey) policy.InsertOnly = 0 } policy.Expires = 30 * 24 * 3600 uptoken := policy.Token(&mac) if fsize > putThreshold { var putClient rpc.Client if transport != nil { putClient = rio.NewClientEx(uptoken, transport, uploadConfig.BindUpIp) } else { putClient = rio.NewClient(uptoken, uploadConfig.BindUpIp) } putRet := rio.PutRet{} putExtra := rio.PutExtra{} progressFkey := Md5Hex(fmt.Sprintf("%s:%s|%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket, localFpath, uploadFileKey)) progressFname := fmt.Sprintf("%s.progress", progressFkey) progressFpath := filepath.Join(storePath, progressFname) putExtra.ProgressFile = progressFpath err := rio.PutFile(putClient, nil, &putRet, uploadFileKey, localFilePath, &putExtra) if err != nil { atomic.AddInt64(&failureFileCount, 1) if pErr, ok := err.(*rpc.ErrorInfo); ok { log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err) } else { log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err) } } else { os.Remove(progressFpath) atomic.AddInt64(&successFileCount, 1) perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt) if perr != nil { log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr) } } } else { var putClient rpc.Client if transport != nil { putClient = rpc.NewClientEx(transport, uploadConfig.BindUpIp) } else { putClient = rpc.NewClient(uploadConfig.BindUpIp) } putRet := fio.PutRet{} err := fio.PutFile(putClient, nil, &putRet, uptoken, uploadFileKey, localFilePath, nil) if err != nil { atomic.AddInt64(&failureFileCount, 1) if pErr, ok := err.(*rpc.ErrorInfo); ok { log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err) } else { log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err) } } else { atomic.AddInt64(&successFileCount, 1) perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt) if perr != nil { log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr) } } } }() } upWorkGroup.Wait() fmt.Println() fmt.Println("-------Upload Result-------") fmt.Println("Total: \t", currentFileCount) fmt.Println("Success: \t", successFileCount) fmt.Println("Failure: \t", failureFileCount) fmt.Println("Skipped: \t", skippedFileCount) fmt.Println("Duration:\t", time.Since(timeStart)) fmt.Println("-------------------------") }
func (this *ListBucket) List(bucket string, prefix string, listResultFile string) (retErr error) { var fp *os.File if listResultFile == "stdout" { fp = os.Stdout } else { var openErr error fp, openErr = os.Create(listResultFile) if openErr != nil { retErr = openErr log.Errorf("Failed to open list result file `%s'", listResultFile) return } } defer fp.Close() bw := bufio.NewWriter(fp) mac := digest.Mac{this.AccessKey, []byte(this.SecretKey)} client := rsf.New(&mac) marker := "" limit := 1000 run := true maxRetryTimes := 5 retryTimes := 1 for run { entries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit) if err != nil { if err == io.EOF { run = false } else { log.Errorf("List error for marker `%s', %s", marker, err) if retryTimes <= maxRetryTimes { log.Debug(fmt.Sprintf("Retry list for marker `%s' for `%d' time", marker, retryTimes)) retryTimes += 1 continue } else { log.Errorf("List failed too many times for `%s'", marker) break } } } else { retryTimes = 1 if markerOut == "" { run = false } else { marker = markerOut } } //append entries for _, entry := range entries { lineData := fmt.Sprintf("%s\t%d\t%s\t%d\t%s\t%s\r\n", entry.Key, entry.Fsize, entry.Hash, entry.PutTime, entry.MimeType, entry.EndUser) _, wErr := bw.WriteString(lineData) if wErr != nil { log.Errorf("Write line data `%s' to list result file failed.", lineData) } } fErr := bw.Flush() if fErr != nil { log.Error("Flush data to list result file error", err) } } return }