Exemplo n.º 1
0
func GenerateToken(cmd string, params ...string) {
	if len(params) >= 2 {
		bucket := params[0]
		key := params[1]
		time := 360000
		if len(params) == 3 {
			time, _ = strconv.Atoi(params[2])
		}

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)}

		policy := rs.PutPolicy{
			Scope:   fmt.Sprintf("%s:%s", bucket, key),
			Expires: uint32(time),
		}
		uptoken := policy.Token(&mac)
		fmt.Printf("Token:: %s\n", uptoken)
	} else {
		CmdHelp(cmd)
	}
}
Exemplo n.º 2
0
func TestPrivateImageView(t *testing.T) {

	//首先上传一个图片 用于测试
	policy := rs.PutPolicy{
		Scope: bucket + ":" + key,
	}
	err := io.PutFile(nil, nil, policy.Token(nil), key, localFile, nil)
	if err != nil {
		t.Errorf("TestPrivateImageView failed: %v", err)
		return
	}

	rawUrl := makeUrl(key)

	iv := ImageView{
		Mode:    2,
		Height:  250,
		Quality: 80,
	}
	imageViewUrl := iv.MakeRequest(rawUrl)
	p := rs.GetPolicy{}
	imageViewUrlWithToken := p.MakeRequest(imageViewUrl, nil)
	resp, err := http.DefaultClient.Get(imageViewUrlWithToken)
	if err != nil {
		t.Errorf("TestPrivateImageView failed: %v", err)
		return
	}
	defer resp.Body.Close()

	if (resp.StatusCode / 100) != 2 {
		t.Errorf("TestPrivateImageView failed: resp.StatusCode = %v", resp.StatusCode)
		return
	}
}
Exemplo n.º 3
0
func upFile(localFile, bucketName, key string) error {

	policy := rs.PutPolicy{
		Scope: bucketName + ":" + key,
	}
	return qio.PutFile(nil, nil, policy.Token(nil), key, localFile, nil)
}
Exemplo n.º 4
0
// @gist uptoken
func uptoken(bucketName string) string {
	putPolicy := rs.PutPolicy{
		Scope: bucketName,
		// CallbackUrl:  callbackUrl,
		// CallbackBody: callbackBody,
		// ReturnUrl:    returnUrl,
		// ReturnBody:   returnBody,
		// AsyncOps:     asyncOps,
		// EndUser:      endUser,
		// Expires:      expires,
	}
	return putPolicy.Token(nil)
}
Exemplo n.º 5
0
func TestAll(t *testing.T) {

	policy := rs.PutPolicy{
		Scope: bucket,
	}
	token := policy.Token(nil)
	params := map[string]string{"x:1": "1"}
	extra := &PutExtra{
		ChunkSize: 128,
		MimeType:  "text/plain",
		Notify:    blockNotify,
		Params:    params,
	}

	testPut(t, token, nil)
	testPutWithoutKey(t, token, extra)
	testPutFile(t, token, extra)
	testPutFileWithoutKey(t, token, extra)
	testXVar(t, token, extra)
}
Exemplo n.º 6
0
func FormPut(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 || len(params) == 5 || len(params) == 6 {
		bucket := params[0]
		key := params[1]
		localFile := params[2]
		mimeType := ""
		upHost := ""
		overwrite := false

		optionalParams := params[3:]
		for _, param := range optionalParams {
			if val, pErr := strconv.ParseBool(param); pErr == nil {
				overwrite = val
				continue
			}

			if strings.HasPrefix(param, "http://") || strings.HasPrefix(param, "https://") {
				upHost = param
				continue
			}

			mimeType = param
		}

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)}
		policy := rs.PutPolicy{}
		if overwrite {
			policy.Scope = fmt.Sprintf("%s:%s", bucket, key)
		} else {
			policy.Scope = bucket
		}

		putExtra := fio.PutExtra{}
		if mimeType != "" {
			putExtra.MimeType = mimeType
		}
		if upHost != "" {
			conf.UP_HOST = upHost
		}
		uptoken := policy.Token(&mac)
		putRet := fio.PutRet{}
		startTime := time.Now()
		fStat, statErr := os.Stat(localFile)
		if statErr != nil {
			fmt.Println("Local file error", statErr)
			return
		}
		fsize := fStat.Size()
		putClient := rpc.NewClient("")
		fmt.Println(fmt.Sprintf("Uploading %s => %s : %s ...", localFile, bucket, key))
		doneSignal := make(chan bool)
		go func(ch chan bool) {
			progressSigns := []string{"|", "/", "-", "\\", "|"}
			for {
				for _, p := range progressSigns {
					fmt.Print("\rProgress: ", p)
					os.Stdout.Sync()
					<-time.After(time.Millisecond * 200)
				}

				select {
				case <-ch:
					break
				}
			}
		}(doneSignal)

		err := fio.PutFile(putClient, nil, &putRet, uptoken, key, localFile, &putExtra)
		doneSignal <- true
		fmt.Print("\rProgress: 100%")
		os.Stdout.Sync()
		fmt.Println()

		if err != nil {
			if v, ok := err.(*rpc.ErrorInfo); ok {
				fmt.Println(fmt.Sprintf("Put file error, %d %s, Reqid: %s", v.Code, v.Err, v.Reqid))
			} else {
				fmt.Println("Put file error,", err)
			}
		} else {
			fmt.Println("Put file", localFile, "=>", bucket, ":", putRet.Key, "(", putRet.Hash, ")", "success!")
		}
		lastNano := time.Now().UnixNano() - startTime.UnixNano()
		lastTime := fmt.Sprintf("%.2f", float32(lastNano)/1e9)
		avgSpeed := fmt.Sprintf("%.1f", float32(fsize)*1e6/float32(lastNano))
		fmt.Println("Last time:", lastTime, "s, Average Speed:", avgSpeed, "KB/s")
	} else {
Exemplo n.º 7
0
func ResumablePut(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 || len(params) == 5 || len(params) == 6 {
		bucket := params[0]
		key := params[1]
		localFile := params[2]
		mimeType := ""
		upHost := ""
		overwrite := false

		optionalParams := params[3:]
		for _, param := range optionalParams {
			if val, pErr := strconv.ParseBool(param); pErr == nil {
				overwrite = val
				continue
			}

			if strings.HasPrefix(param, "http://") || strings.HasPrefix(param, "https://") {
				upHost = param
				continue
			}

			mimeType = param
		}

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		fStat, statErr := os.Stat(localFile)
		if statErr != nil {
			fmt.Println("Local file error", statErr)
			return
		}
		fsize := fStat.Size()

		mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)}
		policy := rs.PutPolicy{}

		if overwrite {
			policy.Scope = fmt.Sprintf("%s:%s", bucket, key)
		} else {
			policy.Scope = bucket
		}

		putExtra := rio.PutExtra{}
		if mimeType != "" {
			putExtra.MimeType = mimeType
		}

		if upHost != "" {
			conf.UP_HOST = upHost
		}

		progressHandler := ProgressHandler{
			rwLock:  &sync.RWMutex{},
			fsize:   fsize,
			offsets: make(map[int]int64, 0),
		}

		putExtra.Notify = progressHandler.Notify
		putExtra.NotifyErr = progressHandler.NotifyErr
		uptoken := policy.Token(&mac)
		putRet := rio.PutRet{}
		startTime := time.Now()

		rio.SetSettings(&upSettings)
		putClient := rio.NewClient(uptoken, "")
		fmt.Println(fmt.Sprintf("Uploading %s => %s : %s ...", localFile, bucket, key))
		err := rio.PutFile(putClient, nil, &putRet, key, localFile, &putExtra)
		fmt.Println()
		if err != nil {
			if v, ok := err.(*rpc.ErrorInfo); ok {
				fmt.Println(fmt.Sprintf("Put file error, %d %s, Reqid: %s", v.Code, v.Err, v.Reqid))
			} else {
				fmt.Println("Put file error,", err)
			}
		} else {
			fmt.Println("Put file", localFile, "=>", bucket, ":", putRet.Key, "(", putRet.Hash, ")", "success!")
		}
		lastNano := time.Now().UnixNano() - startTime.UnixNano()
		lastTime := fmt.Sprintf("%.2f", float32(lastNano)/1e9)
		avgSpeed := fmt.Sprintf("%.1f", float32(fsize)*1e6/float32(lastNano))
		fmt.Println("Last time:", lastTime, "s, Average Speed:", avgSpeed, "KB/s")
	} else {
		CmdHelp(cmd)
	}
}
Exemplo n.º 8
0
func QiniuUpload(threadCount int, uploadConfig *UploadConfig) {
	timeStart := time.Now()

	//make SrcDir the full path
	uploadConfig.SrcDir, _ = filepath.Abs(uploadConfig.SrcDir)

	dirCache := DirCache{}

	pathSep := string(os.PathSeparator)
	//create job id
	jobId := Md5Hex(fmt.Sprintf("%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket))

	//local storage path
	storePath := filepath.Join(".qshell", "qupload", jobId)
	if err := os.MkdirAll(storePath, 0775); err != nil {
		log.Errorf("Failed to mkdir `%s' due to `%s'", storePath, err)
		return
	}

	//cache file
	rescanLocalDir := false
	cacheResultName := filepath.Join(storePath, jobId+".cache")
	cacheTempName := filepath.Join(storePath, jobId+".cache.temp")
	cacheCountName := filepath.Join(storePath, jobId+".count")

	if _, statErr := os.Stat(cacheResultName); statErr == nil {
		//file already exists
		rescanLocalDir = uploadConfig.RescanLocal
	} else {
		rescanLocalDir = true
	}

	var totalFileCount int64
	if rescanLocalDir {
		fmt.Println("Listing local sync dir, this can take a long time, please wait patiently...")
		totalFileCount = dirCache.Cache(uploadConfig.SrcDir, cacheTempName)
		if rErr := os.Remove(cacheResultName); rErr != nil {
			log.Debug("Remove the old cached file error", rErr)
		}
		if rErr := os.Rename(cacheTempName, cacheResultName); rErr != nil {
			fmt.Println("Rename the temp cached file error", rErr)
			return
		}
		//write the total count to local file
		if cFp, cErr := os.Create(cacheCountName); cErr == nil {
			func() {
				defer cFp.Close()
				uploadInfo := UploadInfo{
					TotalFileCount: totalFileCount,
				}
				uploadInfoBytes, mErr := json.Marshal(&uploadInfo)
				if mErr == nil {
					if _, wErr := cFp.Write(uploadInfoBytes); wErr != nil {
						log.Errorf("Write local cached count file error %s", cErr)
					} else {
						cFp.Close()
					}
				}
			}()
		} else {
			log.Errorf("Open local cached count file error %s", cErr)
		}
	} else {
		fmt.Println("Use the last cached local sync dir file list ...")
		//read from local cache
		if rFp, rErr := os.Open(cacheCountName); rErr == nil {
			func() {
				defer rFp.Close()
				uploadInfo := UploadInfo{}
				decoder := json.NewDecoder(rFp)
				if dErr := decoder.Decode(&uploadInfo); dErr == nil {
					totalFileCount = uploadInfo.TotalFileCount
				}
			}()
		} else {
			log.Warnf("Open local cached count file error %s", rErr)
		}
	}

	//leveldb folder
	leveldbFileName := filepath.Join(storePath, jobId+".ldb")
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Errorf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheResultName)
	if err != nil {
		log.Errorf("Open cache file `%s' failed due to `%s'", cacheResultName, err)
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)

	var currentFileCount int64 = 0
	var successFileCount int64 = 0
	var failureFileCount int64 = 0
	var skippedFileCount int64 = 0

	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//chunk upload threshold
	putThreshold := DEFAULT_PUT_THRESHOLD
	if uploadConfig.PutThreshold > 0 {
		putThreshold = uploadConfig.PutThreshold
	}

	//check zone, default nb
	switch uploadConfig.Zone {
	case ZoneAWS:
		SetZone(ZoneAWSConfig)
	case ZoneBC:
		SetZone(ZoneBCConfig)
	default:
		SetZone(ZoneNBConfig)
	}

	//use host if not empty, overwrite the default config
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set resume upload settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}

	//check bind net interface card
	var transport *http.Transport
	var rsClient rs.Client
	if uploadConfig.BindNicIp != "" {
		transport = &http.Transport{
			Dial: (&net.Dialer{
				LocalAddr: &net.TCPAddr{
					IP: net.ParseIP(uploadConfig.BindNicIp),
				},
			}).Dial,
		}
	}

	if transport != nil {
		rsClient = rs.NewMacEx(&mac, transport, "")
	} else {
		rsClient = rs.NewMac(&mac)
	}

	//check remote rs ip bind
	if uploadConfig.BindRsIp != "" {
		rsClient.Conn.BindRemoteIp = uploadConfig.BindRsIp
	}

	//scan lines and upload
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) != 3 {
			log.Errorf("Invalid cache line `%s'", line)
			continue
		}

		localFpath := items[0]
		currentFileCount += 1

		skip := false
		//check skip local file or folder
		if uploadConfig.SkipPathPrefixes != "" {
			//unpack skip prefix
			skipPathPrefixes := strings.Split(uploadConfig.SkipPathPrefixes, ",")
			for _, prefix := range skipPathPrefixes {
				if strings.HasPrefix(localFpath, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by path prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipFilePrefixes != "" {
			//unpack skip prefix
			skipFilePrefixes := strings.Split(uploadConfig.SkipFilePrefixes, ",")
			for _, prefix := range skipFilePrefixes {
				localFname := filepath.Base(localFpath)
				if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by file prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipSuffixes != "" {
			skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",")
			for _, suffix := range skipSuffixes {
				if strings.HasSuffix(localFpath, strings.TrimSpace(suffix)) {
					log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s",
						strings.TrimSpace(suffix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		//pack the upload file key
		localFlmd, _ := strconv.ParseInt(items[2], 10, 64)
		uploadFileKey := localFpath

		if uploadConfig.IgnoreDir {
			if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
				uploadFileKey = uploadFileKey[i+1:]
			}
		}
		if uploadConfig.KeyPrefix != "" {
			uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
		}
		//convert \ to / under windows
		if runtime.GOOS == "windows" {
			uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
		}

		localFilePath := filepath.Join(uploadConfig.SrcDir, localFpath)
		fstat, err := os.Stat(localFilePath)
		if err != nil {
			log.Errorf("Error stat local file `%s' due to `%s'", localFilePath, err)
			continue
		}

		fsize := fstat.Size()
		ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey)

		if totalFileCount != 0 {
			fmt.Println(fmt.Sprintf("Uploading %s [%d/%d, %.1f%%] ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount)))
		} else {
			fmt.Println(fmt.Sprintf("Uploading %s ...", ldbKey))
		}

		//check exists
		if uploadConfig.CheckExists {
			rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
			if checkErr == nil {
				//compare hash
				localEtag, cErr := GetEtag(localFilePath)
				if cErr != nil {
					atomic.AddInt64(&failureFileCount, 1)
					log.Error("Calc local file hash failed,", cErr)
					continue
				}
				if rsEntry.Hash == localEtag {
					atomic.AddInt64(&skippedFileCount, 1)
					log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey))
					continue
				}
			} else {
				if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
					//not logic error, should be network error
					atomic.AddInt64(&failureFileCount, 1)
					continue
				}
			}
		} else {
			//check leveldb
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.ParseInt(string(ldbFlmd), 10, 64)
			//not exist, return ErrNotFound
			//check last modified

			if err == nil && localFlmd == flmd {
				log.Debug("Skip by local log for file", localFpath)
				atomic.AddInt64(&skippedFileCount, 1)
				continue
			}
		}

		//worker
		upCounter += 1
		if upCounter%threadThreshold == 0 {
			upWorkGroup.Wait()
		}
		upWorkGroup.Add(1)

		//start to upload
		go func() {
			defer upWorkGroup.Done()

			policy := rs.PutPolicy{}
			policy.Scope = uploadConfig.Bucket
			if uploadConfig.Overwrite {
				policy.Scope = fmt.Sprintf("%s:%s", uploadConfig.Bucket, uploadFileKey)
				policy.InsertOnly = 0
			}
			policy.Expires = 30 * 24 * 3600
			uptoken := policy.Token(&mac)
			if fsize > putThreshold {
				var putClient rpc.Client
				if transport != nil {
					putClient = rio.NewClientEx(uptoken, transport, uploadConfig.BindUpIp)
				} else {
					putClient = rio.NewClient(uptoken, uploadConfig.BindUpIp)
				}

				putRet := rio.PutRet{}
				putExtra := rio.PutExtra{}
				progressFkey := Md5Hex(fmt.Sprintf("%s:%s|%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket, localFpath, uploadFileKey))
				progressFname := fmt.Sprintf("%s.progress", progressFkey)
				progressFpath := filepath.Join(storePath, progressFname)
				putExtra.ProgressFile = progressFpath

				err := rio.PutFile(putClient, nil, &putRet, uploadFileKey, localFilePath, &putExtra)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					os.Remove(progressFpath)
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			} else {
				var putClient rpc.Client
				if transport != nil {
					putClient = rpc.NewClientEx(transport, uploadConfig.BindUpIp)
				} else {
					putClient = rpc.NewClient(uploadConfig.BindUpIp)
				}

				putRet := fio.PutRet{}
				err := fio.PutFile(putClient, nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			}
		}()

	}
	upWorkGroup.Wait()

	fmt.Println()
	fmt.Println("-------Upload Result-------")
	fmt.Println("Total:   \t", currentFileCount)
	fmt.Println("Success: \t", successFileCount)
	fmt.Println("Failure: \t", failureFileCount)
	fmt.Println("Skipped: \t", skippedFileCount)
	fmt.Println("Duration:\t", time.Since(timeStart))
	fmt.Println("-------------------------")

}
Exemplo n.º 9
0
Arquivo: sync.go Projeto: auqf/qshell
func Sync(mac *digest.Mac, srcResUrl, bucket, key, upHostIp string) (hash string, err error) {
	if exists, cErr := checkExists(mac, bucket, key); cErr != nil {
		err = cErr
		return
	} else if exists {
		err = errors.New("File with same key already exists in bucket")
		return
	}

	syncProgress := SyncProgress{}
	//read from the local progress file if exists, file name by md5(bucket+":"+key)
	progressFile := createProgressFileName(bucket, key)
	if statInfo, statErr := os.Stat(progressFile); statErr == nil {
		//check file last modified time, if older than one week, ignore
		if statInfo.ModTime().Add(time.Hour * 24 * 5).After(time.Now()) {
			//try read old progress
			progressFh, openErr := os.Open(progressFile)
			if openErr == nil {
				decoder := json.NewDecoder(progressFh)
				decoder.Decode(&syncProgress)
				progressFh.Close()
			}
		}
	}

	//check offset valid or not
	if syncProgress.Offset%BLOCK_SIZE != 0 {
		log.Info("Invalid offset from progress file,", syncProgress.Offset)
		syncProgress.Offset = 0
		syncProgress.TotalSize = 0
		syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
	}

	//check offset and blk ctxs
	if syncProgress.Offset != 0 && syncProgress.BlkCtxs != nil {
		if int(syncProgress.Offset/BLOCK_SIZE) != len(syncProgress.BlkCtxs) {
			log.Info("Invalid offset and block contexts")
			syncProgress.Offset = 0
			syncProgress.TotalSize = 0
			syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
		}
	}

	//check blk ctxs, when no progress found
	if syncProgress.Offset == 0 || syncProgress.BlkCtxs == nil {
		syncProgress.Offset = 0
		syncProgress.TotalSize = 0
		syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
	}

	//get total size
	totalSize, hErr := getRemoteFileLength(srcResUrl)
	if hErr != nil {
		err = hErr
		return
	}

	if totalSize != syncProgress.TotalSize {
		if syncProgress.TotalSize != 0 {
			log.Info("Remote file length changed, progress file out of date")
		}
		syncProgress.Offset = 0
		syncProgress.TotalSize = totalSize
		syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
	}

	//get total block count
	totalBlkCnt := 0
	if totalSize%BLOCK_SIZE == 0 {
		totalBlkCnt = int(totalSize / BLOCK_SIZE)
	} else {
		totalBlkCnt = int(totalSize/BLOCK_SIZE) + 1
	}

	//init the range offset
	rangeStartOffset := syncProgress.Offset
	fromBlkIndex := int(rangeStartOffset / BLOCK_SIZE)

	lastBlock := false

	//create upload token
	policy := rs.PutPolicy{Scope: bucket}
	//token is valid for one year
	policy.Expires = 3600 * 24 * 365
	uptoken := policy.Token(mac)
	putClient := rio.NewClient(uptoken, upHostIp)

	//range get and mkblk upload
	for blkIndex := fromBlkIndex; blkIndex < totalBlkCnt; blkIndex++ {
		if blkIndex == totalBlkCnt-1 {
			lastBlock = true
		}

		syncPercent := fmt.Sprintf("%.2f", float64(blkIndex+1)*100.0/float64(totalBlkCnt))
		log.Info(fmt.Sprintf("Syncing block %d [%s%%] ...", blkIndex, syncPercent))
		blkCtx, pErr := rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient)
		if pErr != nil {
			log.Error(pErr.Error())
			time.Sleep(RETRY_INTERVAL)

			for retryTimes := 1; retryTimes <= RETRY_MAX_TIMES; retryTimes++ {
				log.Info(fmt.Sprintf("Retrying %d time range & mkblk block [%d]", retryTimes, blkIndex))
				blkCtx, pErr = rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient)
				if pErr != nil {
					log.Error(pErr)
					//wait a interval and retry
					time.Sleep(RETRY_INTERVAL)
					continue
				} else {
					break
				}
			}
		}

		if pErr != nil {
			err = errors.New("Max retry reached and range & mkblk still failed, check your network")
			return
		}

		//advance range offset
		rangeStartOffset += BLOCK_SIZE

		syncProgress.BlkCtxs = append(syncProgress.BlkCtxs, blkCtx)
		syncProgress.Offset = rangeStartOffset

		rErr := recordProgress(progressFile, syncProgress)
		if rErr != nil {
			log.Info(rErr.Error())
		}
	}

	//make file
	putRet := rio.PutRet{}
	putExtra := rio.PutExtra{
		Progresses: syncProgress.BlkCtxs,
	}
	mkErr := rio.Mkfile(putClient, nil, &putRet, key, true, totalSize, &putExtra)
	if mkErr != nil {
		err = errors.New(fmt.Sprintf("Mkfile error, %s", mkErr.Error()))
		return
	}

	hash = putRet.Hash

	//delete progress file
	os.Remove(progressFile)

	return
}