func QiniuDownload(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { var threadCount int64 = 5 var downConfig string var err error if len(params) == 1 { downConfig = params[0] } else { threadCount, err = strconv.ParseInt(params[0], 10, 64) if err != nil { log.Error("Invalid value for <ThreadCount>", params[0]) return } downConfig = params[1] } if threadCount < qshell.MIN_DOWNLOAD_THREAD_COUNT || threadCount > qshell.MAX_DOWNLOAD_THREAD_COUNT { log.Warn("<ThreadCount> can only between 1 and 100") threadCount = qshell.MIN_DOWNLOAD_THREAD_COUNT } qshell.QiniuDownload(int(threadCount), downConfig) } else { CmdHelp(cmd) } }
func put(c rpc.Client, l rpc.Logger, ret interface{}, key string, hasKey bool, f io.ReaderAt, fsize int64, extra *PutExtra) error { once.Do(initWorkers) blockCnt := BlockCount(fsize) if extra == nil { extra = new(PutExtra) } //load the progress file var progressWLock = sync.RWMutex{} if extra.ProgressFile != "" { progressRecord := ProgressRecord{} if _, pStatErr := os.Stat(extra.ProgressFile); pStatErr == nil { progressFp, openErr := os.Open(extra.ProgressFile) if openErr == nil { func() { defer progressFp.Close() decoder := json.NewDecoder(progressFp) decodeErr := decoder.Decode(&progressRecord) if decodeErr != nil { log.Debug(fmt.Sprintf("resumable.Put decode progess record error, %s", decodeErr.Error())) } }() } else { log.Debug(fmt.Sprintf("resumable.Put open progress record error, %s", openErr.Error())) } } //load in progresses if progressRecord.Progresses != nil && len(progressRecord.Progresses) > 0 { //check the expire date of the first progress now := time.Now() first := progressRecord.Progresses[0] if now.Add(time.Hour*24).Unix() <= first.ExpiredAt { //not expired, go ahead extra.Progresses = progressRecord.Progresses } } } if extra.Progresses == nil { extra.Progresses = make([]BlkputRet, blockCnt) } else if len(extra.Progresses) != blockCnt { return ErrInvalidPutProgress } if extra.ChunkSize == 0 { extra.ChunkSize = settings.ChunkSize } if extra.TryTimes == 0 { extra.TryTimes = settings.TryTimes } if extra.Notify == nil { extra.Notify = notifyNil } if extra.NotifyErr == nil { extra.NotifyErr = notifyErrNil } var wg sync.WaitGroup wg.Add(blockCnt) last := blockCnt - 1 blkSize := 1 << blockBits nfails := 0 for i := 0; i < blockCnt; i++ { blkIdx := i blkSize1 := blkSize if i == last { offbase := int64(blkIdx) << blockBits blkSize1 = int(fsize - offbase) } task := func() { defer wg.Done() tryTimes := extra.TryTimes lzRetry: err := ResumableBlockput(c, l, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra) if err != nil { if tryTimes > 1 { tryTimes-- log.Info("resumable.Put retrying ...") goto lzRetry } log.Warn("resumable.Put", blkIdx, "failed:", err) extra.NotifyErr(blkIdx, blkSize1, err) nfails++ } else { //record block progress if extra.ProgressFile != "" { progressWLock.Lock() func() { defer progressWLock.Unlock() progressRecord := ProgressRecord{ Progresses: extra.Progresses, } mData, mErr := json.Marshal(progressRecord) if mErr == nil { wErr := ioutil.WriteFile(extra.ProgressFile, mData, 0644) if wErr != nil { log.Warn(fmt.Sprintf("resumable.Put record progress error, %s", wErr.Error())) } } else { log.Info(fmt.Sprintf("resumable.Put marshal progress record error, %s", mErr.Error())) } }() } } } tasks <- task } wg.Wait() if nfails != 0 { return ErrPutFailed } return Mkfile(c, l, ret, key, hasKey, fsize, extra) }
func ResumableBlockput( c rpc.Client, l rpc.Logger, ret *BlkputRet, f io.ReaderAt, blkIdx, blkSize int, extra *PutExtra) (err error) { h := crc32.NewIEEE() offbase := int64(blkIdx) << blockBits chunkSize := extra.ChunkSize var bodyLength int if ret.Ctx == "" { if chunkSize < blkSize { bodyLength = chunkSize } else { bodyLength = blkSize } body1 := io.NewSectionReader(f, offbase, int64(bodyLength)) body := io.TeeReader(body1, h) err = Mkblock(c, l, ret, blkSize, body, bodyLength) if err != nil { return } if ret.Crc32 != h.Sum32() || int(ret.Offset) != bodyLength { err = ErrUnmatchedChecksum return } extra.Notify(blkIdx, blkSize, ret) } for int(ret.Offset) < blkSize { if chunkSize < blkSize-int(ret.Offset) { bodyLength = chunkSize } else { bodyLength = blkSize - int(ret.Offset) } tryTimes := extra.TryTimes lzRetry: h.Reset() body1 := io.NewSectionReader(f, offbase+int64(ret.Offset), int64(bodyLength)) body := io.TeeReader(body1, h) err = Blockput(c, l, ret, body, bodyLength) if err == nil { if ret.Crc32 == h.Sum32() { extra.Notify(blkIdx, blkSize, ret) continue } log.Warn("ResumableBlockput: invalid checksum, retry") err = ErrUnmatchedChecksum } else { if ei, ok := err.(*rpc.ErrorInfo); ok && ei.Code == InvalidCtx { ret.Ctx = "" // reset log.Warn("ResumableBlockput: invalid ctx, please retry") return } log.Warn("ResumableBlockput: bput failed -", err) } if tryTimes > 1 { tryTimes-- log.Info("ResumableBlockput retrying ...") goto lzRetry } break } return }
func Sync(mac *digest.Mac, srcResUrl, bucket, key, upHostIp string) (hash string, err error) { if exists, cErr := checkExists(mac, bucket, key); cErr != nil { err = cErr return } else if exists { err = errors.New("File with same key already exists in bucket") return } syncProgress := SyncProgress{} //read from the local progress file if exists, file name by md5(bucket+":"+key) progressFile := createProgressFileName(bucket, key) if statInfo, statErr := os.Stat(progressFile); statErr == nil { //check file last modified time, if older than one week, ignore if statInfo.ModTime().Add(time.Hour * 24 * 5).After(time.Now()) { //try read old progress progressFh, openErr := os.Open(progressFile) if openErr == nil { decoder := json.NewDecoder(progressFh) decoder.Decode(&syncProgress) progressFh.Close() } } } //check offset valid or not if syncProgress.Offset%BLOCK_SIZE != 0 { log.Info("Invalid offset from progress file,", syncProgress.Offset) syncProgress.Offset = 0 syncProgress.TotalSize = 0 syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } //check offset and blk ctxs if syncProgress.Offset != 0 && syncProgress.BlkCtxs != nil { if int(syncProgress.Offset/BLOCK_SIZE) != len(syncProgress.BlkCtxs) { log.Info("Invalid offset and block contexts") syncProgress.Offset = 0 syncProgress.TotalSize = 0 syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } } //check blk ctxs, when no progress found if syncProgress.Offset == 0 || syncProgress.BlkCtxs == nil { syncProgress.Offset = 0 syncProgress.TotalSize = 0 syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } //get total size totalSize, hErr := getRemoteFileLength(srcResUrl) if hErr != nil { err = hErr return } if totalSize != syncProgress.TotalSize { if syncProgress.TotalSize != 0 { log.Warn("Remote file length changed, progress file out of date") } syncProgress.Offset = 0 syncProgress.TotalSize = totalSize syncProgress.BlkCtxs = make([]rio.BlkputRet, 0) } //get total block count totalBlkCnt := 0 if totalSize%BLOCK_SIZE == 0 { totalBlkCnt = int(totalSize / BLOCK_SIZE) } else { totalBlkCnt = int(totalSize/BLOCK_SIZE) + 1 } //init the range offset rangeStartOffset := syncProgress.Offset fromBlkIndex := int(rangeStartOffset / BLOCK_SIZE) lastBlock := false //create upload token policy := rs.PutPolicy{Scope: bucket} //token is valid for one year policy.Expires = 3600 * 24 * 365 uptoken := policy.Token(mac) putClient := rio.NewClient(uptoken, upHostIp) //range get and mkblk upload for blkIndex := fromBlkIndex; blkIndex < totalBlkCnt; blkIndex++ { if blkIndex == totalBlkCnt-1 { lastBlock = true } syncPercent := fmt.Sprintf("%.2f", float64(blkIndex+1)*100.0/float64(totalBlkCnt)) log.Info(fmt.Sprintf("Syncing block %d [%s%%] ...", blkIndex, syncPercent)) blkCtx, pErr := rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient) if pErr != nil { log.Error(pErr.Error()) time.Sleep(RETRY_INTERVAL) for retryTimes := 1; retryTimes <= RETRY_MAX_TIMES; retryTimes++ { log.Info(fmt.Sprintf("Retrying %d time range & mkblk block [%d]", retryTimes, blkIndex)) blkCtx, pErr = rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient) if pErr != nil { log.Error(pErr) //wait a interval and retry time.Sleep(RETRY_INTERVAL) continue } else { break } } } if pErr != nil { err = errors.New("Max retry reached and range & mkblk still failed, check your network") return } //advance range offset rangeStartOffset += BLOCK_SIZE syncProgress.BlkCtxs = append(syncProgress.BlkCtxs, blkCtx) syncProgress.Offset = rangeStartOffset rErr := recordProgress(progressFile, syncProgress) if rErr != nil { log.Info(rErr.Error()) } } //make file putRet := rio.PutRet{} putExtra := rio.PutExtra{ Progresses: syncProgress.BlkCtxs, } mkErr := rio.Mkfile(putClient, nil, &putRet, key, true, totalSize, &putExtra) if mkErr != nil { err = fmt.Errorf("Mkfile error, %s", mkErr.Error()) return } hash = putRet.Hash //delete progress file os.Remove(progressFile) return }