func Sync(cmd string, params ...string) { if len(params) == 3 || len(params) == 4 { srcResUrl := params[0] bucket := params[1] key := params[2] upHostIp := "" if len(params) == 4 { upHostIp = params[3] } gErr := accountS.Get() if gErr != nil { log.Error(gErr) return } mac := digest.Mac{ accountS.AccessKey, []byte(accountS.SecretKey), } //sync tStart := time.Now() hash, sErr := qshell.Sync(&mac, srcResUrl, bucket, key, upHostIp) if sErr != nil { log.Error(sErr) return } log.Info(fmt.Sprintf("Sync %s => %s:%s (%s) Success, Duration: %s!", srcResUrl, bucket, key, hash, time.Since(tStart))) } else { CmdHelp(cmd) } }
func (this *AliListBucket) ListBucket(listResultFile string) (err error) { //open result file fp, openErr := os.Create(listResultFile) if openErr != nil { err = openErr return } defer fp.Close() bw := bufio.NewWriter(fp) //list bucket by prefix marker := "" prefixLen := len(this.Prefix) ossClient := oss.NewClient(this.DataCenter, this.AccessKeyId, this.AccessKeySecret, 0) maxRetryTimes := 5 retryTimes := 1 log.Info("Listing the oss bucket...") for { lbr, lbrErr := ossClient.GetBucket(this.Bucket, this.Prefix, marker, "", "") if lbrErr != nil { err = lbrErr log.Error("Parse list result error, ", "marker=[", marker, "]", lbrErr) if retryTimes <= maxRetryTimes { log.Debug("Retry marker=", marker, "] for ", retryTimes, "time...") retryTimes += 1 continue } else { break } } else { retryTimes = 1 } for _, object := range lbr.Contents { lmdTime, lmdPErr := time.Parse("2006-01-02T15:04:05.999Z", object.LastModified) if lmdPErr != nil { log.Error("Parse object last modified error, ", lmdPErr) lmdTime = time.Now() } bw.WriteString(fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", object.Key[prefixLen:], object.Size, lmdTime.UnixNano()/100))) } if !lbr.IsTruncated { break } marker = lbr.NextMarker } fErr := bw.Flush() if fErr != nil { log.Error("Write data to buffer writer failed", fErr) err = fErr return } return err }
func downloadFile(downConfig DownloadConfig, fileKey string) (err error) { localFilePath := filepath.Join(downConfig.DestDir, fileKey) ldx := strings.LastIndex(localFilePath, string(os.PathSeparator)) if ldx != -1 { localFileDir := localFilePath[:ldx] mkdirErr := os.MkdirAll(localFileDir, 0775) if mkdirErr != nil { err = mkdirErr log.Error("MkdirAll failed for", localFileDir, mkdirErr.Error()) return } } log.Info("Downloading", fileKey, "=>", localFilePath, "...") downUrl := strings.Join([]string{downConfig.Domain, fileKey}, "/") if downConfig.IsPrivate { now := time.Now().Add(time.Second * 3600 * 24) downUrl = fmt.Sprintf("%s?e=%d", downUrl, now.Unix()) mac := digest.Mac{downConfig.AccessKey, []byte(downConfig.SecretKey)} token := digest.Sign(&mac, []byte(downUrl)) downUrl = fmt.Sprintf("%s&token=%s", downUrl, token) } resp, respErr := rpc.DefaultClient.Get(nil, downUrl) if respErr != nil { err = respErr log.Error("Download", fileKey, "failed by url", downUrl, respErr.Error()) return } defer resp.Body.Close() if resp.StatusCode == 200 { localFp, openErr := os.Create(localFilePath) if openErr != nil { err = openErr log.Error("Open local file", localFilePath, "failed", openErr.Error()) return } defer localFp.Close() _, cpErr := io.Copy(localFp, resp.Body) if cpErr != nil { err = cpErr log.Error("Download", fileKey, "failed", cpErr.Error()) return } } else { err = errors.New("download failed") log.Error("Download", fileKey, "failed by url", downUrl, resp.Status) return } return }
func put(c rpc.Client, l rpc.Logger, ret interface{}, key string, hasKey bool, f io.ReaderAt, fsize int64, extra *PutExtra) error { once.Do(initWorkers) blockCnt := BlockCount(fsize) if extra == nil { extra = new(PutExtra) } //load the progress file var progressWLock = sync.RWMutex{} if extra.ProgressFile != "" { progressRecord := ProgressRecord{} if _, pStatErr := os.Stat(extra.ProgressFile); pStatErr == nil { progressFp, openErr := os.Open(extra.ProgressFile) if openErr == nil { func() { defer progressFp.Close() decoder := json.NewDecoder(progressFp) decodeErr := decoder.Decode(&progressRecord) if decodeErr != nil { log.Debug(fmt.Sprintf("resumable.Put decode progess record error, %s", decodeErr.Error())) } }() } else { log.Debug(fmt.Sprintf("resumable.Put open progress record error, %s", openErr.Error())) } } //load in progresses if progressRecord.Progresses != nil && len(progressRecord.Progresses) > 0 { //check the expire date of the first progress now := time.Now() first := progressRecord.Progresses[0] if now.Add(time.Hour*24).Unix() <= first.ExpiredAt { //not expired, go ahead extra.Progresses = progressRecord.Progresses } } } if extra.Progresses == nil { extra.Progresses = make([]BlkputRet, blockCnt) } else if len(extra.Progresses) != blockCnt { return ErrInvalidPutProgress } if extra.ChunkSize == 0 { extra.ChunkSize = settings.ChunkSize } if extra.TryTimes == 0 { extra.TryTimes = settings.TryTimes } if extra.Notify == nil { extra.Notify = notifyNil } if extra.NotifyErr == nil { extra.NotifyErr = notifyErrNil } var wg sync.WaitGroup wg.Add(blockCnt) last := blockCnt - 1 blkSize := 1 << blockBits nfails := 0 for i := 0; i < blockCnt; i++ { blkIdx := i blkSize1 := blkSize if i == last { offbase := int64(blkIdx) << blockBits blkSize1 = int(fsize - offbase) } task := func() { defer wg.Done() tryTimes := extra.TryTimes lzRetry: err := ResumableBlockput(c, l, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra) if err != nil { if tryTimes > 1 { tryTimes-- log.Info("resumable.Put retrying ...") goto lzRetry } log.Warn("resumable.Put", blkIdx, "failed:", err) extra.NotifyErr(blkIdx, blkSize1, err) nfails++ } else { //record block progress if extra.ProgressFile != "" { progressWLock.Lock() func() { defer progressWLock.Unlock() progressRecord := ProgressRecord{ Progresses: extra.Progresses, } mData, mErr := json.Marshal(progressRecord) if mErr == nil { wErr := ioutil.WriteFile(extra.ProgressFile, mData, 0644) if wErr != nil { log.Warn(fmt.Sprintf("resumable.Put record progress error, %s", wErr.Error())) } } else { log.Info(fmt.Sprintf("resumable.Put marshal progress record error, %s", mErr.Error())) } }() } } } tasks <- task } wg.Wait() if nfails != 0 { return ErrPutFailed } return Mkfile(c, l, ret, key, hasKey, fsize, extra) }
func ResumableBlockput( c rpc.Client, l rpc.Logger, ret *BlkputRet, f io.ReaderAt, blkIdx, blkSize int, extra *PutExtra) (err error) { h := crc32.NewIEEE() offbase := int64(blkIdx) << blockBits chunkSize := extra.ChunkSize var bodyLength int if ret.Ctx == "" { if chunkSize < blkSize { bodyLength = chunkSize } else { bodyLength = blkSize } body1 := io.NewSectionReader(f, offbase, int64(bodyLength)) body := io.TeeReader(body1, h) err = Mkblock(c, l, ret, blkSize, body, bodyLength) if err != nil { return } if ret.Crc32 != h.Sum32() || int(ret.Offset) != bodyLength { err = ErrUnmatchedChecksum return } extra.Notify(blkIdx, blkSize, ret) } for int(ret.Offset) < blkSize { if chunkSize < blkSize-int(ret.Offset) { bodyLength = chunkSize } else { bodyLength = blkSize - int(ret.Offset) } tryTimes := extra.TryTimes lzRetry: h.Reset() body1 := io.NewSectionReader(f, offbase+int64(ret.Offset), int64(bodyLength)) body := io.TeeReader(body1, h) err = Blockput(c, l, ret, body, bodyLength) if err == nil { if ret.Crc32 == h.Sum32() { extra.Notify(blkIdx, blkSize, ret) continue } log.Warn("ResumableBlockput: invalid checksum, retry") err = ErrUnmatchedChecksum } else { if ei, ok := err.(*rpc.ErrorInfo); ok && ei.Code == InvalidCtx { ret.Ctx = "" // reset log.Warn("ResumableBlockput: invalid ctx, please retry") return } log.Warn("ResumableBlockput: bput failed -", err) } if tryTimes > 1 { tryTimes-- log.Info("ResumableBlockput retrying ...") goto lzRetry } break } return }
func QiniuDownload(threadCount int, downloadConfigFile string) { timeStart := time.Now() cnfFp, openErr := os.Open(downloadConfigFile) if openErr != nil { log.Error("Open download config file", downloadConfigFile, "failed,", openErr) return } defer cnfFp.Close() cnfData, rErr := ioutil.ReadAll(cnfFp) if rErr != nil { log.Error("Read download config file error", rErr) return } downConfig := DownloadConfig{} cnfErr := json.Unmarshal(cnfData, &downConfig) if cnfErr != nil { log.Error("Parse download config error", cnfErr) return } cnfJson, _ := json.Marshal(&downConfig) jobId := fmt.Sprintf("%x", md5.Sum(cnfJson)) jobListName := fmt.Sprintf("%s.list.txt", jobId) acct := Account{ AccessKey: downConfig.AccessKey, SecretKey: downConfig.SecretKey, } bLister := ListBucket{ Account: acct, } log.Info("List bucket...") listErr := bLister.List(downConfig.Bucket, downConfig.Prefix, jobListName) if listErr != nil { log.Error("List bucket error", listErr) return } listFp, openErr := os.Open(jobListName) if openErr != nil { log.Error("Open list file error", openErr) return } defer listFp.Close() listScanner := bufio.NewScanner(listFp) listScanner.Split(bufio.ScanLines) downWorkGroup := sync.WaitGroup{} totalCount := 0 existsCount := 0 var successCount int32 = 0 var failCount int32 = 0 threadThreshold := threadCount + 1 for listScanner.Scan() { totalCount += 1 if totalCount%threadThreshold == 0 { downWorkGroup.Wait() } line := strings.TrimSpace(listScanner.Text()) items := strings.Split(line, "\t") if len(items) > 2 { fileKey := items[0] //check suffix if downConfig.Suffix != "" && !strings.HasSuffix(fileKey, downConfig.Suffix) { continue } fileSize, _ := strconv.ParseInt(items[1], 10, 64) //not backup yet if !checkLocalDuplicate(downConfig.DestDir, fileKey, fileSize) { downWorkGroup.Add(1) go func() { defer downWorkGroup.Done() downErr := downloadFile(downConfig, fileKey) if downErr != nil { atomic.AddInt32(&failCount, 1) } else { atomic.AddInt32(&successCount, 1) } }() } else { existsCount += 1 } } } downWorkGroup.Wait() log.Info("-------Download Result-------") log.Info("Total:\t", totalCount) log.Info("Local:\t", existsCount) log.Info("Success:\t", successCount) log.Info("Failure:\t", failCount) log.Info("Duration:\t", time.Since(timeStart)) log.Info("-----------------------------") }
func Sync(mac *digest.Mac, srcResUrl, bucket, key, upHostIp string) (hash string, err error) { if exists, cErr := checkExists(mac, bucket, key); cErr != nil { err = cErr return } else if exists { err = errors.New("File with same key already exists in bucket") return } syncProgress := SyncProgress{} //read from the local progress file if exists, file name by md5(bucket+":"+key) progressFile := createProgressFileName(bucket, key) if statInfo, statErr := os.Stat(progressFile); statErr == nil { //check file last modified time, if older than one week, ignore if statInfo.ModTime().Add(time.Hour * 24 * 5).After(time.Now()) { //try read old progress progressFh, openErr := os.Open(progressFile) if openErr == nil { decoder := json.NewDecoder(progressFh) decoder.Decode(&syncProgress) progressFh.Close() } } } //check offset valid or not if syncProgress.Offset%BLOCK_SIZE != 0 { log.Info("Invalid offset from progress file,", syncProgress.Offset) syncProgress.Offset = 0 syncProgress.TotalSize = 0 syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } //check offset and blk ctxs if syncProgress.Offset != 0 && syncProgress.BlkCtxs != nil { if int(syncProgress.Offset/BLOCK_SIZE) != len(syncProgress.BlkCtxs) { log.Info("Invalid offset and block contexts") syncProgress.Offset = 0 syncProgress.TotalSize = 0 syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } } //check blk ctxs, when no progress found if syncProgress.Offset == 0 || syncProgress.BlkCtxs == nil { syncProgress.Offset = 0 syncProgress.TotalSize = 0 syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } //get total size totalSize, hErr := getRemoteFileLength(srcResUrl) if hErr != nil { err = hErr return } if totalSize != syncProgress.TotalSize { if syncProgress.TotalSize != 0 { log.Info("Remote file length changed, progress file out of date") } syncProgress.Offset = 0 syncProgress.TotalSize = totalSize syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } //get total block count totalBlkCnt := 0 if totalSize%BLOCK_SIZE == 0 { totalBlkCnt = int(totalSize / BLOCK_SIZE) } else { totalBlkCnt = int(totalSize/BLOCK_SIZE) + 1 } //init the range offset rangeStartOffset := syncProgress.Offset fromBlkIndex := int(rangeStartOffset / BLOCK_SIZE) lastBlock := false //create upload token policy := rs.PutPolicy{Scope: bucket} //token is valid for one year policy.Expires = 3600 * 24 * 365 uptoken := policy.Token(mac) putClient := rio.NewClient(uptoken, upHostIp) //range get and mkblk upload for blkIndex := fromBlkIndex; blkIndex < totalBlkCnt; blkIndex++ { if blkIndex == totalBlkCnt-1 { lastBlock = true } syncPercent := fmt.Sprintf("%.2f", float64(blkIndex+1)*100.0/float64(totalBlkCnt)) log.Info(fmt.Sprintf("Syncing block %d [%s%%] ...", blkIndex, syncPercent)) blkCtx, pErr := rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient) if pErr != nil { log.Error(pErr.Error()) time.Sleep(RETRY_INTERVAL) for retryTimes := 1; retryTimes <= RETRY_MAX_TIMES; retryTimes++ { log.Info(fmt.Sprintf("Retrying %d time range & mkblk block [%d]", retryTimes, blkIndex)) blkCtx, pErr = rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient) if pErr != nil { log.Error(pErr) //wait a interval and retry time.Sleep(RETRY_INTERVAL) continue } else { break } } } if pErr != nil { err = errors.New("Max retry reached and range & mkblk still failed, check your network") return } //advance range offset rangeStartOffset += BLOCK_SIZE syncProgress.BlkCtxs = append(syncProgress.BlkCtxs, blkCtx) syncProgress.Offset = rangeStartOffset rErr := recordProgress(progressFile, syncProgress) if rErr != nil { log.Info(rErr.Error()) } } //make file putRet := rio.PutRet{} putExtra := rio.PutExtra{ Progresses: syncProgress.BlkCtxs, } mkErr := rio.Mkfile(putClient, nil, &putRet, key, true, totalSize, &putExtra) if mkErr != nil { err = errors.New(fmt.Sprintf("Mkfile error, %s", mkErr.Error())) return } hash = putRet.Hash //delete progress file os.Remove(progressFile) return }