Esempio n. 1
0
File: utils.go Progetto: auqf/qshell
func ReqId(cmd string, params ...string) {
	if len(params) == 1 {
		reqId := params[0]
		decodedBytes, err := base64.URLEncoding.DecodeString(reqId)
		if err != nil || len(decodedBytes) < 4 {
			log.Error("Invalid reqid", reqId, err)
			return
		}

		newBytes := decodedBytes[4:]
		newBytesLen := len(newBytes)
		newStr := ""
		for i := newBytesLen - 1; i >= 0; i-- {
			newStr += fmt.Sprintf("%02X", newBytes[i])
		}
		unixNano, err := strconv.ParseInt(newStr, 16, 64)
		if err != nil {
			log.Error("Invalid reqid", reqId, err)
			return
		}
		dstDate := time.Unix(0, unixNano)
		fmt.Println(fmt.Sprintf("%04d-%02d-%02d/%02d-%02d", dstDate.Year(), dstDate.Month(), dstDate.Day(),
			dstDate.Hour(), dstDate.Minute()))
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 2
0
File: utils.go Progetto: auqf/qshell
func Base64Decode(cmd string, params ...string) {
	if len(params) == 1 || len(params) == 2 {
		urlSafe := true
		var err error
		var dataToDecode string
		if len(params) == 2 {
			urlSafe, err = strconv.ParseBool(params[0])
			if err != nil {
				log.Error("Invalid bool value or <UrlSafe>, must true or false")
				return
			}
			dataToDecode = params[1]
		} else {
			dataToDecode = params[0]
		}
		var dataDecoded []byte
		if urlSafe {
			dataDecoded, err = base64.URLEncoding.DecodeString(dataToDecode)
			if err != nil {
				log.Error("Failed to decode `", dataToDecode, "' in url safe mode.")
				return
			}
		} else {
			dataDecoded, err = base64.StdEncoding.DecodeString(dataToDecode)
			if err != nil {
				log.Error("Failed to decode `", dataToDecode, "' in standard mode.")
				return
			}
		}
		fmt.Println(string(dataDecoded))
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 3
0
File: sync.go Progetto: auqf/qshell
func Sync(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 {
		srcResUrl := params[0]
		bucket := params[1]
		key := params[2]
		upHostIp := ""
		if len(params) == 4 {
			upHostIp = params[3]
		}

		gErr := accountS.Get()
		if gErr != nil {
			log.Error(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}

		//sync
		tStart := time.Now()
		hash, sErr := qshell.Sync(&mac, srcResUrl, bucket, key, upHostIp)
		if sErr != nil {
			log.Error(sErr)
			return
		}

		log.Info(fmt.Sprintf("Sync %s => %s:%s (%s) Success, Duration: %s!",
			srcResUrl, bucket, key, hash, time.Since(tStart)))
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 4
0
func QiniuUpload(cmd string, params ...string) {
	if len(params) == 1 || len(params) == 2 {
		var uploadConfigFile string
		var threadCount int64
		var err error
		if len(params) == 2 {
			threadCount, err = strconv.ParseInt(params[0], 10, 64)
			if err != nil {
				log.Error("Invalid <ThreadCount> value,", params[0])
				return
			}
			uploadConfigFile = params[1]
		} else {
			uploadConfigFile = params[0]
		}

		//read upload config
		fp, err := os.Open(uploadConfigFile)
		if err != nil {
			log.Errorf("Open upload config file `%s` error due to `%s`", uploadConfigFile, err)
			return
		}
		defer fp.Close()
		configData, err := ioutil.ReadAll(fp)
		if err != nil {
			log.Errorf("Read upload config file `%s` error due to `%s`", uploadConfigFile, err)
			return
		}
		var uploadConfig qshell.UploadConfig
		err = json.Unmarshal(configData, &uploadConfig)
		if err != nil {
			log.Errorf("Parse upload config file `%s` errror due to `%s`", uploadConfigFile, err)
			return
		}
		if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
			log.Error("Upload config error for parameter `SrcDir`,", err)
			return
		}
		//upload

		if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT || threadCount > qshell.MAX_UPLOAD_THREAD_COUNT {
			fmt.Printf("Tip: you can set <ThreadCount> value between %d and %d to improve speed\n",
				qshell.MIN_UPLOAD_THREAD_COUNT, qshell.MAX_UPLOAD_THREAD_COUNT)

			if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT {
				threadCount = qshell.MIN_UPLOAD_THREAD_COUNT
			} else if threadCount > qshell.MAX_UPLOAD_THREAD_COUNT {
				threadCount = qshell.MAX_UPLOAD_THREAD_COUNT
			}
		}

		qshell.QiniuUpload(int(threadCount), &uploadConfig)
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 5
0
func (this *AliListBucket) ListBucket(listResultFile string) (err error) {
	//open result file
	fp, openErr := os.Create(listResultFile)
	if openErr != nil {
		err = openErr
		return
	}
	defer fp.Close()
	bw := bufio.NewWriter(fp)
	//list bucket by prefix
	marker := ""
	prefixLen := len(this.Prefix)
	ossClient := oss.NewClient(this.DataCenter, this.AccessKeyId, this.AccessKeySecret, 0)
	maxRetryTimes := 5
	retryTimes := 1

	log.Info("Listing the oss bucket...")
	for {
		lbr, lbrErr := ossClient.GetBucket(this.Bucket, this.Prefix, marker, "", "")
		if lbrErr != nil {
			err = lbrErr
			log.Error("Parse list result error, ", "marker=[", marker, "]", lbrErr)
			if retryTimes <= maxRetryTimes {
				log.Debug("Retry marker=", marker, "] for ", retryTimes, "time...")
				retryTimes += 1
				continue
			} else {
				break
			}
		} else {
			retryTimes = 1
		}
		for _, object := range lbr.Contents {
			lmdTime, lmdPErr := time.Parse("2006-01-02T15:04:05.999Z", object.LastModified)
			if lmdPErr != nil {
				log.Error("Parse object last modified error, ", lmdPErr)
				lmdTime = time.Now()
			}
			bw.WriteString(fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", object.Key[prefixLen:], object.Size, lmdTime.UnixNano()/100)))
		}
		if !lbr.IsTruncated {
			break
		}
		marker = lbr.NextMarker
	}
	fErr := bw.Flush()
	if fErr != nil {
		log.Error("Write data to buffer writer failed", fErr)
		err = fErr
		return
	}
	return err
}
Esempio n. 6
0
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int64) {
	if _, err := os.Stat(cacheResultFile); err != nil {
		log.Debug(fmt.Sprintf("No cache file `%s' found, will create one", cacheResultFile))
	} else {
		os.Remove(cacheResultFile + ".old")
		if rErr := os.Rename(cacheResultFile, cacheResultFile+".old"); rErr != nil {
			log.Error(fmt.Sprintf("Unable to rename cache file, plz manually delete `%s' and `%s.old'",
				cacheResultFile, cacheResultFile))
			log.Error(rErr)
			return
		}
	}
	cacheResultFileH, err := os.OpenFile(cacheResultFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to open cache file `%s'", cacheResultFile))
		return
	}
	defer cacheResultFileH.Close()
	bWriter := bufio.NewWriter(cacheResultFileH)
	walkStart := time.Now()
	log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String()))
	filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error {
		var retErr error
		//log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath))
		if err != nil {
			retErr = err
		} else {
			if !fi.IsDir() {
				relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator))
				fsize := fi.Size()
				//Unit is 100ns
				flmd := fi.ModTime().UnixNano() / 100
				//log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd))
				fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd))
				if _, err := bWriter.WriteString(fmeta); err != nil {
					log.Error(fmt.Sprintf("Failed to write data `%s' to cache file", fmeta))
					retErr = err
				}
				fileCount += 1
			}
		}
		return retErr
	})
	if err := bWriter.Flush(); err != nil {
		log.Error(fmt.Sprintf("Failed to flush to cache file `%s'", cacheResultFile))
	}

	walkEnd := time.Now()
	log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String()))
	log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart)))
	return
}
Esempio n. 7
0
func downloadFile(downConfig DownloadConfig, fileKey string) (err error) {
	localFilePath := filepath.Join(downConfig.DestDir, fileKey)
	ldx := strings.LastIndex(localFilePath, string(os.PathSeparator))
	if ldx != -1 {
		localFileDir := localFilePath[:ldx]
		mkdirErr := os.MkdirAll(localFileDir, 0775)
		if mkdirErr != nil {
			err = mkdirErr
			log.Error("MkdirAll failed for", localFileDir, mkdirErr.Error())
			return
		}
	}
	log.Info("Downloading", fileKey, "=>", localFilePath, "...")
	downUrl := strings.Join([]string{downConfig.Domain, fileKey}, "/")
	if downConfig.IsPrivate {
		now := time.Now().Add(time.Second * 3600 * 24)
		downUrl = fmt.Sprintf("%s?e=%d", downUrl, now.Unix())
		mac := digest.Mac{downConfig.AccessKey, []byte(downConfig.SecretKey)}
		token := digest.Sign(&mac, []byte(downUrl))
		downUrl = fmt.Sprintf("%s&token=%s", downUrl, token)
	}
	resp, respErr := rpc.DefaultClient.Get(nil, downUrl)
	if respErr != nil {
		err = respErr
		log.Error("Download", fileKey, "failed by url", downUrl, respErr.Error())
		return
	}
	defer resp.Body.Close()
	if resp.StatusCode == 200 {
		localFp, openErr := os.Create(localFilePath)
		if openErr != nil {
			err = openErr
			log.Error("Open local file", localFilePath, "failed", openErr.Error())
			return
		}
		defer localFp.Close()
		_, cpErr := io.Copy(localFp, resp.Body)
		if cpErr != nil {
			err = cpErr
			log.Error("Download", fileKey, "failed", cpErr.Error())
			return
		}
	} else {
		err = errors.New("download failed")
		log.Error("Download", fileKey, "failed by url", downUrl, resp.Status)
		return
	}
	return
}
Esempio n. 8
0
func AliListBucket(cmd string, params ...string) {
	if len(params) == 5 || len(params) == 6 {
		dataCenter := params[0]
		bucket := params[1]
		accessKeyId := params[2]
		accessKeySecret := params[3]
		listBucketResultFile := ""
		prefix := ""
		if len(params) == 6 {
			prefix = params[4]
			listBucketResultFile = params[5]
		} else {
			listBucketResultFile = params[4]
		}
		aliListBucket := qshell.AliListBucket{
			DataCenter:      dataCenter,
			Bucket:          bucket,
			AccessKeyId:     accessKeyId,
			AccessKeySecret: accessKeySecret,
			Prefix:          prefix,
		}
		err := aliListBucket.ListBucket(listBucketResultFile)
		if err != nil {
			log.Error("List bucket error,", err)
		}
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 9
0
func QiniuDownload(cmd string, params ...string) {
	if len(params) == 1 || len(params) == 2 {
		var threadCount int64 = 5
		var downConfig string
		var err error
		if len(params) == 1 {
			downConfig = params[0]
		} else {
			threadCount, err = strconv.ParseInt(params[0], 10, 64)
			if err != nil {
				log.Error("Invalid value for <ThreadCount>", params[0])
				return
			}
			downConfig = params[1]
		}
		if threadCount < qshell.MIN_DOWNLOAD_THREAD_COUNT ||
			threadCount > qshell.MAX_DOWNLOAD_THREAD_COUNT {
			log.Warn("<ThreadCount> can only between 1 and 100")
			threadCount = qshell.MIN_DOWNLOAD_THREAD_COUNT
		}
		qshell.QiniuDownload(int(threadCount), downConfig)
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 10
0
File: utils.go Progetto: auqf/qshell
func Base64Encode(cmd string, params ...string) {
	if len(params) == 1 || len(params) == 2 {
		urlSafe := true
		var err error
		var dataToEncode string
		if len(params) == 2 {
			urlSafe, err = strconv.ParseBool(params[0])
			if err != nil {
				log.Error("Invalid bool value or <UrlSafe>, must true or false")
				return
			}
			dataToEncode = params[1]
		} else {
			dataToEncode = params[0]
		}
		dataEncoded := ""
		if urlSafe {
			dataEncoded = base64.URLEncoding.EncodeToString([]byte(dataToEncode))
		} else {
			dataEncoded = base64.StdEncoding.EncodeToString([]byte(dataToEncode))
		}
		fmt.Println(dataEncoded)
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 11
0
func QiniuUpload(cmd string, params ...string) {
	if len(params) == 1 || len(params) == 2 {
		var uploadConfigFile string
		var threadCount int64
		var err error
		if len(params) == 2 {
			threadCount, err = strconv.ParseInt(params[0], 10, 64)
			if err != nil {
				log.Error("Invalid <ThreadCount> value,", params[0])
				return
			}
			uploadConfigFile = params[1]
		} else {
			uploadConfigFile = params[0]
		}
		if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT ||
			threadCount > qshell.MAX_UPLOAD_THREAD_COUNT {
			fmt.Println("You can set <ThreadCount> value between 1 and 100 to improve speed")
			threadCount = qshell.MIN_UPLOAD_THREAD_COUNT
		}
		qshell.QiniuUpload(int(threadCount), uploadConfigFile)
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 12
0
File: utils.go Progetto: auqf/qshell
func Urldecode(cmd string, params ...string) {
	if len(params) == 1 {
		dataToDecode := params[0]
		dataDecoded, err := url.QueryUnescape(dataToDecode)
		if err != nil {
			log.Error("Failed to unescape data `", dataToDecode, "'")
		} else {
			fmt.Println(dataDecoded)
		}
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 13
0
File: utils.go Progetto: auqf/qshell
func TimestampNano2Date(cmd string, params ...string) {
	if len(params) == 1 {
		tns, err := strconv.ParseInt(params[0], 10, 64)
		if err != nil {
			log.Error("Invalid nano timestamp value,", params[0])
			return
		}
		t := time.Unix(0, tns*100)
		fmt.Println(t.String())
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 14
0
File: utils.go Progetto: auqf/qshell
func TimestampMilli2Date(cmd string, params ...string) {
	if len(params) == 1 {
		tms, err := strconv.ParseInt(params[0], 10, 64)
		if err != nil {
			log.Error("Invalid milli timestamp value,", params[0])
			return
		}
		t := time.Unix(tms/1000, 0)
		fmt.Println(t.String())
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 15
0
File: utils.go Progetto: auqf/qshell
func Unzip(cmd string, params ...string) {
	if len(params) == 1 || len(params) == 2 {
		zipFilePath := params[0]
		unzipToDir, err := os.Getwd()
		if err != nil {
			log.Error("Get current work directory failed due to error", err)
			return
		}
		if len(params) == 2 {
			unzipToDir = params[1]
			if _, statErr := os.Stat(unzipToDir); statErr != nil {
				log.Error("Specified <UnzipToDir> is not a valid directory")
				return
			}
		}
		unzipErr := qshell.Unzip(zipFilePath, unzipToDir)
		if unzipErr != nil {
			log.Error("Unzip file failed due to error", unzipErr)
		}
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 16
0
File: utils.go Progetto: auqf/qshell
func Date2Timestamp(cmd string, params ...string) {
	if len(params) == 1 {
		secs, err := strconv.ParseInt(params[0], 10, 64)
		if err != nil {
			log.Error("Invalid seconds to now,", params[0])
			return
		}
		t := time.Now()
		t = t.Add(time.Second * time.Duration(secs))
		fmt.Println(t.Unix())
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 17
0
File: rs.go Progetto: auqf/qshell
func batchRename(client rs.Client, entries []qshell.RenameEntryPath) {
	ret, err := qshell.BatchRename(client, entries)
	if err != nil {
		fmt.Println("Batch rename error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Error(fmt.Sprintf("Rename '%s' => '%s' Failed, Code :%d", entry.OldKey, entry.NewKey, item.Code))
			} else {
				log.Debug(fmt.Sprintf("Rename '%s' => '%s' Success, Code :%d", entry.OldKey, entry.NewKey, item.Code))
			}
		}
	}
}
Esempio n. 18
0
File: rs.go Progetto: auqf/qshell
func batchDelete(client rs.Client, entries []rs.EntryPath) {
	ret, err := qshell.BatchDelete(client, entries)
	if err != nil {
		fmt.Println("Batch delete error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Error(fmt.Sprintf("Delete '%s' => '%s' Failed, Code: %d", entry.Bucket, entry.Key, item.Code))
			} else {
				log.Debug(fmt.Sprintf("Delete '%s' => '%s' Success, Code: %d", entry.Bucket, entry.Key, item.Code))
			}
		}
	}
}
Esempio n. 19
0
File: rs.go Progetto: auqf/qshell
func batchChgm(client rs.Client, entries []qshell.ChgmEntryPath) {
	ret, err := qshell.BatchChgm(client, entries)
	if err != nil {
		fmt.Println("Batch chgm error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Error(fmt.Sprintf("Chgm '%s' => '%s' Failed, Code :%d", entry.Key, entry.MimeType, item.Code))
			} else {
				log.Debug(fmt.Sprintf("Chgm '%s' => '%s' Success, Code :%d", entry.Key, entry.MimeType, item.Code))
			}
		}
	}
}
Esempio n. 20
0
func CheckQrsync(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 {
		dirCacheResultFile := params[0]
		listBucketResultFile := params[1]
		ignoreLocalDir, err := strconv.ParseBool(params[2])
		if err != nil {
			log.Error(fmt.Sprintf("Invalid value `%s' for argument <IgnoreLocalDir>", params[2]))
			return
		}
		prefix := ""
		if len(params) == 4 {
			prefix = params[3]
		}
		qshell.CheckQrsync(dirCacheResultFile, listBucketResultFile, ignoreLocalDir, prefix)
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 21
0
File: ip.go Progetto: arkfang/qshell
func IpQuery(cmd string, params ...string) {
	if len(params) > 0 {
		client := rs.NewEx(nil)
		for _, ip := range params {
			url := fmt.Sprintf("%s?ip=%s", TAOBAO_IP_QUERY, ip)
			var ipInfo IpInfo
			err := client.Conn.Call(nil, &ipInfo, url)
			if err != nil {
				log.Error("Query ip info failed for", ip, "due to", err)
			} else {
				fmt.Println(fmt.Sprintf("Ip: %-20s => %s", ip, ipInfo))
			}
			<-time.After(time.Second * 1)
		}
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 22
0
File: rs.go Progetto: auqf/qshell
func batchCopy(client rs.Client, entries []qshell.CopyEntryPath) {
	ret, err := qshell.BatchCopy(client, entries)
	if err != nil {
		fmt.Println("Batch move error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Error(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Failed, Code :%d",
					entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code))
			} else {
				log.Debug(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Success, Code :%d",
					entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code))
			}
		}
	}
}
Esempio n. 23
0
func GetBuckets(cmd string, params ...string) {
	if len(params) == 0 {
		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		buckets, err := qshell.GetBuckets(&mac)
		if err != nil {
			log.Error("Get buckets error,", err)
		} else {
			for _, bucket := range buckets {
				fmt.Println(bucket)
			}
		}
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 24
0
func GetDomainsOfBucket(cmd string, params ...string) {
	if len(params) == 1 {
		bucket := params[0]
		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		domains, err := qshell.GetDomainsOfBucket(&mac, bucket)
		if err != nil {
			log.Error("Get domains error,", err)
		} else {
			for _, domain := range domains {
				fmt.Println(domain)
			}
		}
	} else {
		CmdHelp(cmd)
	}
}
Esempio n. 25
0
func QiniuUpload2(cmd string, params ...string) {
	flagSet := flag.NewFlagSet("qupload2", flag.ExitOnError)

	var threadCount int64
	var srcDir string
	var accessKey string
	var secretKey string
	var bucket string
	var putThreshold int64
	var keyPrefix string
	var ignoreDir bool
	var overwrite bool
	var checkExists bool
	var skipFilePrefixes string
	var skipPathPrefixes string
	var skipSuffixes string
	var upHost string
	var zone string
	var bindUpIp string
	var bindRsIp string
	var bindNicIp string
	var rescanLocal bool

	flagSet.Int64Var(&threadCount, "thread-count", 0, "multiple thread count")
	flagSet.StringVar(&srcDir, "src-dir", "", "src dir to upload")
	flagSet.StringVar(&accessKey, "access-key", "", "access key")
	flagSet.StringVar(&secretKey, "secret-key", "", "secret key")
	flagSet.StringVar(&bucket, "bucket", "", "bucket")
	flagSet.Int64Var(&putThreshold, "put-threshold", 0, "chunk upload threshold")
	flagSet.StringVar(&keyPrefix, "key-prefix", "", "key prefix prepended to dest file key")
	flagSet.BoolVar(&ignoreDir, "ignore-dir", false, "ignore the dir in the dest file key")
	flagSet.BoolVar(&overwrite, "overwrite", false, "overwrite the file of same key in bucket")
	flagSet.BoolVar(&checkExists, "check-exists", false, "check file key whether in bucket before upload")
	flagSet.StringVar(&skipFilePrefixes, "skip-file-prefixes", "", "skip files with these file prefixes")
	flagSet.StringVar(&skipPathPrefixes, "skip-path-prefixes", "", "skip files with these relative path prefixes")
	flagSet.StringVar(&skipSuffixes, "skip-suffixes", "", "skip files with these suffixes")
	flagSet.StringVar(&upHost, "up-host", "", "upload host")
	flagSet.StringVar(&zone, "zone", "", "zone of the bucket")
	flagSet.StringVar(&bindUpIp, "bind-up-ip", "", "upload host ip to bind")
	flagSet.StringVar(&bindRsIp, "bind-rs-ip", "", "rs host ip to bind")
	flagSet.StringVar(&bindNicIp, "bind-nic-ip", "", "local network interface card to bind")
	flagSet.BoolVar(&rescanLocal, "rescan-local", false, "rescan local dir to upload newly add files")

	flagSet.Parse(params)

	uploadConfig := qshell.UploadConfig{
		SrcDir:           srcDir,
		AccessKey:        accessKey,
		SecretKey:        secretKey,
		Bucket:           bucket,
		PutThreshold:     putThreshold,
		KeyPrefix:        keyPrefix,
		IgnoreDir:        ignoreDir,
		Overwrite:        overwrite,
		CheckExists:      checkExists,
		SkipFilePrefixes: skipFilePrefixes,
		SkipPathPrefixes: skipPathPrefixes,
		SkipSuffixes:     skipSuffixes,
		RescanLocal:      rescanLocal,
		Zone:             zone,
		UpHost:           upHost,
		BindUpIp:         bindUpIp,
		BindRsIp:         bindRsIp,
		BindNicIp:        bindNicIp,
	}

	//check params
	if uploadConfig.SrcDir == "" {
		fmt.Println("Upload config no `--src-dir` specified")
		return
	}

	if uploadConfig.AccessKey == "" {
		fmt.Println("Upload config no `--access-key` specified")
		return
	}

	if uploadConfig.SecretKey == "" {
		fmt.Println("Upload config no `--secret-key` specified")
		return
	}

	if uploadConfig.Bucket == "" {
		fmt.Println("Upload config no `--bucket` specified")
		return
	}

	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config `SrcDir` not exist error,", err)
		return
	}

	if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT ||
		threadCount > qshell.MAX_UPLOAD_THREAD_COUNT {
		fmt.Println("You can set `--thread-count` value between 1 and 100 to improve speed")
		threadCount = qshell.MIN_UPLOAD_THREAD_COUNT
	}

	qshell.QiniuUpload(int(threadCount), &uploadConfig)
}
Esempio n. 26
0
File: sync.go Progetto: auqf/qshell
func Sync(mac *digest.Mac, srcResUrl, bucket, key, upHostIp string) (hash string, err error) {
	if exists, cErr := checkExists(mac, bucket, key); cErr != nil {
		err = cErr
		return
	} else if exists {
		err = errors.New("File with same key already exists in bucket")
		return
	}

	syncProgress := SyncProgress{}
	//read from the local progress file if exists, file name by md5(bucket+":"+key)
	progressFile := createProgressFileName(bucket, key)
	if statInfo, statErr := os.Stat(progressFile); statErr == nil {
		//check file last modified time, if older than one week, ignore
		if statInfo.ModTime().Add(time.Hour * 24 * 5).After(time.Now()) {
			//try read old progress
			progressFh, openErr := os.Open(progressFile)
			if openErr == nil {
				decoder := json.NewDecoder(progressFh)
				decoder.Decode(&syncProgress)
				progressFh.Close()
			}
		}
	}

	//check offset valid or not
	if syncProgress.Offset%BLOCK_SIZE != 0 {
		log.Info("Invalid offset from progress file,", syncProgress.Offset)
		syncProgress.Offset = 0
		syncProgress.TotalSize = 0
		syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
	}

	//check offset and blk ctxs
	if syncProgress.Offset != 0 && syncProgress.BlkCtxs != nil {
		if int(syncProgress.Offset/BLOCK_SIZE) != len(syncProgress.BlkCtxs) {
			log.Info("Invalid offset and block contexts")
			syncProgress.Offset = 0
			syncProgress.TotalSize = 0
			syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
		}
	}

	//check blk ctxs, when no progress found
	if syncProgress.Offset == 0 || syncProgress.BlkCtxs == nil {
		syncProgress.Offset = 0
		syncProgress.TotalSize = 0
		syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
	}

	//get total size
	totalSize, hErr := getRemoteFileLength(srcResUrl)
	if hErr != nil {
		err = hErr
		return
	}

	if totalSize != syncProgress.TotalSize {
		if syncProgress.TotalSize != 0 {
			log.Info("Remote file length changed, progress file out of date")
		}
		syncProgress.Offset = 0
		syncProgress.TotalSize = totalSize
		syncProgress.BlkCtxs = make([]rio.BlkputRet, 0)
	}

	//get total block count
	totalBlkCnt := 0
	if totalSize%BLOCK_SIZE == 0 {
		totalBlkCnt = int(totalSize / BLOCK_SIZE)
	} else {
		totalBlkCnt = int(totalSize/BLOCK_SIZE) + 1
	}

	//init the range offset
	rangeStartOffset := syncProgress.Offset
	fromBlkIndex := int(rangeStartOffset / BLOCK_SIZE)

	lastBlock := false

	//create upload token
	policy := rs.PutPolicy{Scope: bucket}
	//token is valid for one year
	policy.Expires = 3600 * 24 * 365
	uptoken := policy.Token(mac)
	putClient := rio.NewClient(uptoken, upHostIp)

	//range get and mkblk upload
	for blkIndex := fromBlkIndex; blkIndex < totalBlkCnt; blkIndex++ {
		if blkIndex == totalBlkCnt-1 {
			lastBlock = true
		}

		syncPercent := fmt.Sprintf("%.2f", float64(blkIndex+1)*100.0/float64(totalBlkCnt))
		log.Info(fmt.Sprintf("Syncing block %d [%s%%] ...", blkIndex, syncPercent))
		blkCtx, pErr := rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient)
		if pErr != nil {
			log.Error(pErr.Error())
			time.Sleep(RETRY_INTERVAL)

			for retryTimes := 1; retryTimes <= RETRY_MAX_TIMES; retryTimes++ {
				log.Info(fmt.Sprintf("Retrying %d time range & mkblk block [%d]", retryTimes, blkIndex))
				blkCtx, pErr = rangeMkblkPipe(srcResUrl, rangeStartOffset, BLOCK_SIZE, lastBlock, putClient)
				if pErr != nil {
					log.Error(pErr)
					//wait a interval and retry
					time.Sleep(RETRY_INTERVAL)
					continue
				} else {
					break
				}
			}
		}

		if pErr != nil {
			err = errors.New("Max retry reached and range & mkblk still failed, check your network")
			return
		}

		//advance range offset
		rangeStartOffset += BLOCK_SIZE

		syncProgress.BlkCtxs = append(syncProgress.BlkCtxs, blkCtx)
		syncProgress.Offset = rangeStartOffset

		rErr := recordProgress(progressFile, syncProgress)
		if rErr != nil {
			log.Info(rErr.Error())
		}
	}

	//make file
	putRet := rio.PutRet{}
	putExtra := rio.PutExtra{
		Progresses: syncProgress.BlkCtxs,
	}
	mkErr := rio.Mkfile(putClient, nil, &putRet, key, true, totalSize, &putExtra)
	if mkErr != nil {
		err = errors.New(fmt.Sprintf("Mkfile error, %s", mkErr.Error()))
		return
	}

	hash = putRet.Hash

	//delete progress file
	os.Remove(progressFile)

	return
}
Esempio n. 27
0
func QiniuDownload(threadCount int, downloadConfigFile string) {
	timeStart := time.Now()
	cnfFp, openErr := os.Open(downloadConfigFile)
	if openErr != nil {
		log.Error("Open download config file", downloadConfigFile, "failed,", openErr)
		return
	}
	defer cnfFp.Close()
	cnfData, rErr := ioutil.ReadAll(cnfFp)
	if rErr != nil {
		log.Error("Read download config file error", rErr)
		return
	}
	downConfig := DownloadConfig{}
	cnfErr := json.Unmarshal(cnfData, &downConfig)
	if cnfErr != nil {
		log.Error("Parse download config error", cnfErr)
		return
	}
	cnfJson, _ := json.Marshal(&downConfig)
	jobId := fmt.Sprintf("%x", md5.Sum(cnfJson))
	jobListName := fmt.Sprintf("%s.list.txt", jobId)
	acct := Account{
		AccessKey: downConfig.AccessKey,
		SecretKey: downConfig.SecretKey,
	}
	bLister := ListBucket{
		Account: acct,
	}
	log.Info("List bucket...")
	listErr := bLister.List(downConfig.Bucket, downConfig.Prefix, jobListName)
	if listErr != nil {
		log.Error("List bucket error", listErr)
		return
	}
	listFp, openErr := os.Open(jobListName)
	if openErr != nil {
		log.Error("Open list file error", openErr)
		return
	}
	defer listFp.Close()
	listScanner := bufio.NewScanner(listFp)
	listScanner.Split(bufio.ScanLines)
	downWorkGroup := sync.WaitGroup{}

	totalCount := 0
	existsCount := 0

	var successCount int32 = 0
	var failCount int32 = 0

	threadThreshold := threadCount + 1
	for listScanner.Scan() {
		totalCount += 1
		if totalCount%threadThreshold == 0 {
			downWorkGroup.Wait()
		}
		line := strings.TrimSpace(listScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) > 2 {
			fileKey := items[0]
			//check suffix
			if downConfig.Suffix != "" && !strings.HasSuffix(fileKey, downConfig.Suffix) {
				continue
			}
			fileSize, _ := strconv.ParseInt(items[1], 10, 64)
			//not backup yet
			if !checkLocalDuplicate(downConfig.DestDir, fileKey, fileSize) {
				downWorkGroup.Add(1)
				go func() {
					defer downWorkGroup.Done()
					downErr := downloadFile(downConfig, fileKey)
					if downErr != nil {
						atomic.AddInt32(&failCount, 1)
					} else {
						atomic.AddInt32(&successCount, 1)
					}
				}()
			} else {
				existsCount += 1
			}
		}
	}
	downWorkGroup.Wait()

	log.Info("-------Download Result-------")
	log.Info("Total:\t", totalCount)
	log.Info("Local:\t", existsCount)
	log.Info("Success:\t", successCount)
	log.Info("Failure:\t", failCount)
	log.Info("Duration:\t", time.Since(timeStart))
	log.Info("-----------------------------")
}
Esempio n. 28
0
func CheckQrsync(dirCacheResultFile string, listBucketResultFile string, ignoreLocalDir bool, prefix string) {
	dirCacheFp, err := os.Open(dirCacheResultFile)
	if err != nil {
		log.Error("Open DirCacheResultFile failed,", err)
		return
	}
	defer dirCacheFp.Close()
	listBucketFp, err := os.Open(listBucketResultFile)
	if err != nil {
		log.Error("Open ListBucketResultFile failed,", err)
		return
	}
	defer dirCacheFp.Close()
	//read all list result
	listResultDataMap := make(map[string]int64)
	lbfScanner := bufio.NewScanner(listBucketFp)
	lbfScanner.Split(bufio.ScanLines)
	for lbfScanner.Scan() {
		line := strings.TrimSpace(lbfScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) >= 2 {
			fname := items[0]
			fsize, err := strconv.ParseInt(items[1], 10, 64)
			if err != nil {
				continue
			} else {
				listResultDataMap[fname] = fsize
			}
		} else {
			continue
		}
	}
	allUploaded := true
	//iter each local file and compare
	dcrScanner := bufio.NewScanner(dirCacheFp)
	dcrScanner.Split(bufio.ScanLines)
	for dcrScanner.Scan() {
		line := strings.TrimSpace(dcrScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) >= 2 {
			localFname := items[0]
			remoteFname := localFname
			if ignoreLocalDir {
				ldx := strings.LastIndex(remoteFname, string(os.PathSeparator))
				remoteFname = remoteFname[ldx+1:]
			}
			if prefix != "" {
				remoteFname = prefix + remoteFname
			}
			//convert \ to / under windows
			if runtime.GOOS == "windows" {
				remoteFname = strings.Replace(remoteFname, "\\", "/", -1)
			}
			fsize, err := strconv.ParseInt(items[1], 10, 64)
			if err != nil {
				continue
			}
			if rFsize, ok := listResultDataMap[remoteFname]; ok {
				if rFsize != fsize {
					allUploaded = false
					log.Error("Uploaded but size not ok for file ", localFname, "=>", remoteFname)
				}
			} else {
				allUploaded = false
				log.Error("Not uploaded for file ", localFname, "=>", remoteFname)
			}
		} else {
			continue
		}
	}
	if allUploaded {
		fmt.Println("All Uploaded!")
	}
}
Esempio n. 29
0
func (this *ListBucket) List(bucket string, prefix string, listResultFile string) (retErr error) {
	var fp *os.File
	if listResultFile == "stdout" {
		fp = os.Stdout
	} else {
		var openErr error
		fp, openErr = os.Create(listResultFile)
		if openErr != nil {
			retErr = openErr
			log.Errorf("Failed to open list result file `%s'", listResultFile)
			return
		}
	}
	defer fp.Close()
	bw := bufio.NewWriter(fp)

	mac := digest.Mac{this.AccessKey, []byte(this.SecretKey)}
	client := rsf.New(&mac)
	marker := ""
	limit := 1000
	run := true
	maxRetryTimes := 5
	retryTimes := 1
	for run {
		entries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit)
		if err != nil {
			if err == io.EOF {
				run = false
			} else {
				log.Errorf("List error for marker `%s', %s", marker, err)
				if retryTimes <= maxRetryTimes {
					log.Debug(fmt.Sprintf("Retry list for marker `%s' for `%d' time", marker, retryTimes))
					retryTimes += 1
					continue
				} else {
					log.Errorf("List failed too many times for `%s'", marker)
					break
				}
			}
		} else {
			retryTimes = 1
			if markerOut == "" {
				run = false
			} else {
				marker = markerOut
			}
		}
		//append entries
		for _, entry := range entries {
			lineData := fmt.Sprintf("%s\t%d\t%s\t%d\t%s\t%s\r\n", entry.Key, entry.Fsize, entry.Hash, entry.PutTime, entry.MimeType, entry.EndUser)
			_, wErr := bw.WriteString(lineData)
			if wErr != nil {
				log.Errorf("Write line data `%s' to list result file failed.", lineData)
			}
		}
		fErr := bw.Flush()
		if fErr != nil {
			log.Error("Flush data to list result file error", err)
		}
	}
	return
}
Esempio n. 30
0
func QiniuUpload(threadCount int, uploadConfig *UploadConfig) {
	timeStart := time.Now()

	//make SrcDir the full path
	uploadConfig.SrcDir, _ = filepath.Abs(uploadConfig.SrcDir)

	dirCache := DirCache{}

	pathSep := string(os.PathSeparator)
	//create job id
	jobId := Md5Hex(fmt.Sprintf("%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket))

	//local storage path
	storePath := filepath.Join(".qshell", "qupload", jobId)
	if err := os.MkdirAll(storePath, 0775); err != nil {
		log.Errorf("Failed to mkdir `%s' due to `%s'", storePath, err)
		return
	}

	//cache file
	rescanLocalDir := false
	cacheResultName := filepath.Join(storePath, jobId+".cache")
	cacheTempName := filepath.Join(storePath, jobId+".cache.temp")
	cacheCountName := filepath.Join(storePath, jobId+".count")

	if _, statErr := os.Stat(cacheResultName); statErr == nil {
		//file already exists
		rescanLocalDir = uploadConfig.RescanLocal
	} else {
		rescanLocalDir = true
	}

	var totalFileCount int64
	if rescanLocalDir {
		fmt.Println("Listing local sync dir, this can take a long time, please wait patiently...")
		totalFileCount = dirCache.Cache(uploadConfig.SrcDir, cacheTempName)
		if rErr := os.Remove(cacheResultName); rErr != nil {
			log.Debug("Remove the old cached file error", rErr)
		}
		if rErr := os.Rename(cacheTempName, cacheResultName); rErr != nil {
			fmt.Println("Rename the temp cached file error", rErr)
			return
		}
		//write the total count to local file
		if cFp, cErr := os.Create(cacheCountName); cErr == nil {
			func() {
				defer cFp.Close()
				uploadInfo := UploadInfo{
					TotalFileCount: totalFileCount,
				}
				uploadInfoBytes, mErr := json.Marshal(&uploadInfo)
				if mErr == nil {
					if _, wErr := cFp.Write(uploadInfoBytes); wErr != nil {
						log.Errorf("Write local cached count file error %s", cErr)
					} else {
						cFp.Close()
					}
				}
			}()
		} else {
			log.Errorf("Open local cached count file error %s", cErr)
		}
	} else {
		fmt.Println("Use the last cached local sync dir file list ...")
		//read from local cache
		if rFp, rErr := os.Open(cacheCountName); rErr == nil {
			func() {
				defer rFp.Close()
				uploadInfo := UploadInfo{}
				decoder := json.NewDecoder(rFp)
				if dErr := decoder.Decode(&uploadInfo); dErr == nil {
					totalFileCount = uploadInfo.TotalFileCount
				}
			}()
		} else {
			log.Warnf("Open local cached count file error %s", rErr)
		}
	}

	//leveldb folder
	leveldbFileName := filepath.Join(storePath, jobId+".ldb")
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Errorf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheResultName)
	if err != nil {
		log.Errorf("Open cache file `%s' failed due to `%s'", cacheResultName, err)
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)

	var currentFileCount int64 = 0
	var successFileCount int64 = 0
	var failureFileCount int64 = 0
	var skippedFileCount int64 = 0

	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//chunk upload threshold
	putThreshold := DEFAULT_PUT_THRESHOLD
	if uploadConfig.PutThreshold > 0 {
		putThreshold = uploadConfig.PutThreshold
	}

	//check zone, default nb
	switch uploadConfig.Zone {
	case ZoneAWS:
		SetZone(ZoneAWSConfig)
	case ZoneBC:
		SetZone(ZoneBCConfig)
	default:
		SetZone(ZoneNBConfig)
	}

	//use host if not empty, overwrite the default config
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set resume upload settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}

	//check bind net interface card
	var transport *http.Transport
	var rsClient rs.Client
	if uploadConfig.BindNicIp != "" {
		transport = &http.Transport{
			Dial: (&net.Dialer{
				LocalAddr: &net.TCPAddr{
					IP: net.ParseIP(uploadConfig.BindNicIp),
				},
			}).Dial,
		}
	}

	if transport != nil {
		rsClient = rs.NewMacEx(&mac, transport, "")
	} else {
		rsClient = rs.NewMac(&mac)
	}

	//check remote rs ip bind
	if uploadConfig.BindRsIp != "" {
		rsClient.Conn.BindRemoteIp = uploadConfig.BindRsIp
	}

	//scan lines and upload
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) != 3 {
			log.Errorf("Invalid cache line `%s'", line)
			continue
		}

		localFpath := items[0]
		currentFileCount += 1

		skip := false
		//check skip local file or folder
		if uploadConfig.SkipPathPrefixes != "" {
			//unpack skip prefix
			skipPathPrefixes := strings.Split(uploadConfig.SkipPathPrefixes, ",")
			for _, prefix := range skipPathPrefixes {
				if strings.HasPrefix(localFpath, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by path prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipFilePrefixes != "" {
			//unpack skip prefix
			skipFilePrefixes := strings.Split(uploadConfig.SkipFilePrefixes, ",")
			for _, prefix := range skipFilePrefixes {
				localFname := filepath.Base(localFpath)
				if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by file prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipSuffixes != "" {
			skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",")
			for _, suffix := range skipSuffixes {
				if strings.HasSuffix(localFpath, strings.TrimSpace(suffix)) {
					log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s",
						strings.TrimSpace(suffix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		//pack the upload file key
		localFlmd, _ := strconv.ParseInt(items[2], 10, 64)
		uploadFileKey := localFpath

		if uploadConfig.IgnoreDir {
			if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
				uploadFileKey = uploadFileKey[i+1:]
			}
		}
		if uploadConfig.KeyPrefix != "" {
			uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
		}
		//convert \ to / under windows
		if runtime.GOOS == "windows" {
			uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
		}

		localFilePath := filepath.Join(uploadConfig.SrcDir, localFpath)
		fstat, err := os.Stat(localFilePath)
		if err != nil {
			log.Errorf("Error stat local file `%s' due to `%s'", localFilePath, err)
			continue
		}

		fsize := fstat.Size()
		ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey)

		if totalFileCount != 0 {
			fmt.Println(fmt.Sprintf("Uploading %s [%d/%d, %.1f%%] ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount)))
		} else {
			fmt.Println(fmt.Sprintf("Uploading %s ...", ldbKey))
		}

		//check exists
		if uploadConfig.CheckExists {
			rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
			if checkErr == nil {
				//compare hash
				localEtag, cErr := GetEtag(localFilePath)
				if cErr != nil {
					atomic.AddInt64(&failureFileCount, 1)
					log.Error("Calc local file hash failed,", cErr)
					continue
				}
				if rsEntry.Hash == localEtag {
					atomic.AddInt64(&skippedFileCount, 1)
					log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey))
					continue
				}
			} else {
				if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
					//not logic error, should be network error
					atomic.AddInt64(&failureFileCount, 1)
					continue
				}
			}
		} else {
			//check leveldb
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.ParseInt(string(ldbFlmd), 10, 64)
			//not exist, return ErrNotFound
			//check last modified

			if err == nil && localFlmd == flmd {
				log.Debug("Skip by local log for file", localFpath)
				atomic.AddInt64(&skippedFileCount, 1)
				continue
			}
		}

		//worker
		upCounter += 1
		if upCounter%threadThreshold == 0 {
			upWorkGroup.Wait()
		}
		upWorkGroup.Add(1)

		//start to upload
		go func() {
			defer upWorkGroup.Done()

			policy := rs.PutPolicy{}
			policy.Scope = uploadConfig.Bucket
			if uploadConfig.Overwrite {
				policy.Scope = fmt.Sprintf("%s:%s", uploadConfig.Bucket, uploadFileKey)
				policy.InsertOnly = 0
			}
			policy.Expires = 30 * 24 * 3600
			uptoken := policy.Token(&mac)
			if fsize > putThreshold {
				var putClient rpc.Client
				if transport != nil {
					putClient = rio.NewClientEx(uptoken, transport, uploadConfig.BindUpIp)
				} else {
					putClient = rio.NewClient(uptoken, uploadConfig.BindUpIp)
				}

				putRet := rio.PutRet{}
				putExtra := rio.PutExtra{}
				progressFkey := Md5Hex(fmt.Sprintf("%s:%s|%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket, localFpath, uploadFileKey))
				progressFname := fmt.Sprintf("%s.progress", progressFkey)
				progressFpath := filepath.Join(storePath, progressFname)
				putExtra.ProgressFile = progressFpath

				err := rio.PutFile(putClient, nil, &putRet, uploadFileKey, localFilePath, &putExtra)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					os.Remove(progressFpath)
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			} else {
				var putClient rpc.Client
				if transport != nil {
					putClient = rpc.NewClientEx(transport, uploadConfig.BindUpIp)
				} else {
					putClient = rpc.NewClient(uploadConfig.BindUpIp)
				}

				putRet := fio.PutRet{}
				err := fio.PutFile(putClient, nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			}
		}()

	}
	upWorkGroup.Wait()

	fmt.Println()
	fmt.Println("-------Upload Result-------")
	fmt.Println("Total:   \t", currentFileCount)
	fmt.Println("Success: \t", successFileCount)
	fmt.Println("Failure: \t", failureFileCount)
	fmt.Println("Skipped: \t", skippedFileCount)
	fmt.Println("Duration:\t", time.Since(timeStart))
	fmt.Println("-------------------------")

}