示例#1
0
func QiniuUpload(threadCount int, uploadConfigFile string) {
	fp, err := os.Open(uploadConfigFile)
	if err != nil {
		log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	defer fp.Close()
	configData, err := ioutil.ReadAll(fp)
	if err != nil {
		log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	var uploadConfig UploadConfig
	err = json.Unmarshal(configData, &uploadConfig)
	if err != nil {
		log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err))
		return
	}
	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config error for parameter `SrcDir`,", err)
		return
	}
	dirCache := DirCache{}
	currentUser, err := user.Current()
	if err != nil {
		log.Error("Failed to get current user", err)
		return
	}
	pathSep := string(os.PathSeparator)
	//create job id
	md5Hasher := md5.New()
	md5Hasher.Write([]byte(uploadConfig.SrcDir + ":" + uploadConfig.Bucket))
	jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil))

	//local storage path
	storePath := fmt.Sprintf("%s%s.qshell%squpload%s%s", currentUser.HomeDir, pathSep, pathSep, pathSep, jobId)
	err = os.MkdirAll(storePath, 0775)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err))
		return
	}

	//cache file
	cacheFileName := fmt.Sprintf("%s%s%s.cache", storePath, pathSep, jobId)
	//leveldb folder
	leveldbFileName := fmt.Sprintf("%s%s%s.ldb", storePath, pathSep, jobId)

	totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err))
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheFileName)
	if err != nil {
		log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err))
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)
	currentFileCount := 0
	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//use host if not empty
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}
	//check thread count
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) > 1 {
			cacheFname := items[0]
			cacheFlmd, _ := strconv.Atoi(items[2])
			uploadFileKey := cacheFname
			if uploadConfig.IgnoreDir {
				if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
					uploadFileKey = uploadFileKey[i+1:]
				}
			}
			if uploadConfig.KeyPrefix != "" {
				uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
			}
			//convert \ to / under windows
			if runtime.GOOS == "windows" {
				uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
			}
			cacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep)
			fstat, err := os.Stat(cacheFilePath)
			if err != nil {
				log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", cacheFilePath, err))
				return
			}
			fsize := fstat.Size()

			//check leveldb
			currentFileCount += 1
			ldbKey := fmt.Sprintf("%s => %s", cacheFilePath, uploadFileKey)
			log.Debug(fmt.Sprintf("Checking %s ...", ldbKey))
			//check last modified
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.Atoi(string(ldbFlmd))
			//not exist, return ErrNotFound
			if err == nil && cacheFlmd == flmd {
				continue
			}

			fmt.Print("\033[2K\r")
			fmt.Printf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount))
			os.Stdout.Sync()
			rsClient := rs.New(&mac)
			//worker
			upCounter += 1
			if upCounter%threadThreshold == 0 {
				upWorkGroup.Wait()
			}
			upWorkGroup.Add(1)
			go func() {
				defer upWorkGroup.Done()
				//check exists
				if uploadConfig.CheckExists {
					rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
					if checkErr == nil {
						//compare hash
						localEtag, cErr := GetEtag(cacheFilePath)
						if cErr != nil {
							log.Error("Calc local file hash failed,", cErr)
							return
						}
						if rsEntry.Hash == localEtag {
							log.Info("File already exists in bucket, ignore this upload")
							return
						}
					} else {
						if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
							//not logic error, should be network error
							return
						}
					}
				}
				//upload
				policy := rs.PutPolicy{}
				policy.Scope = uploadConfig.Bucket
				if uploadConfig.Overwrite {
					policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey
					policy.InsertOnly = 0
				}
				policy.Expires = 24 * 3600
				uptoken := policy.Token(&mac)
				if fsize > PUT_THRESHOLD {
					putRet := rio.PutRet{}
					err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				} else {
					putRet := fio.PutRet{}
					err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				}
			}()
		} else {
			log.Error(fmt.Sprintf("Error cache line `%s'", line))
		}
	}
	upWorkGroup.Wait()
	fmt.Println()
	fmt.Println("Upload done!")
}
示例#2
0
文件: qupload.go 项目: micooz/qshell
func QiniuUpload(threadCount int, uploadConfigFile string) {
	fp, err := os.Open(uploadConfigFile)
	if err != nil {
		log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	defer fp.Close()
	configData, err := ioutil.ReadAll(fp)
	if err != nil {
		log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	var uploadConfig UploadConfig
	err = json.Unmarshal(configData, &uploadConfig)
	if err != nil {
		log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err))
		return
	}
	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config error for parameter `SrcDir`,", err)
		return
	}
	dirCache := DirCache{}
	currentUser, err := user.Current()
	if err != nil {
		log.Error("Failed to get current user", err)
		return
	}

	pathSep := string(os.PathSeparator)
	//create job id
	md5Hasher := md5.New()
	md5Hasher.Write([]byte(strings.TrimSuffix(uploadConfig.SrcDir, pathSep) + ":" + uploadConfig.Bucket))
	jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil))

	//local storage path
	storePath := filepath.Join(currentUser.HomeDir, ".qshell", "qupload", jobId)
	err = os.MkdirAll(storePath, 0775)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err))
		return
	}

	//cache file
	cacheFileName := filepath.Join(storePath, jobId+".cache")
	//leveldb folder
	leveldbFileName := filepath.Join(storePath, jobId+".ldb")

	totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err))
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheFileName)
	if err != nil {
		log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err))
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)

	var currentFileCount int64 = 0
	var successFileCount int64 = 0
	var failureFileCount int64 = 0
	var skippedFileCount int64 = 0

	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//use host if not empty
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set resume upload settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}

	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) != 3 {
			log.Error(fmt.Sprintf("Invalid cache line `%s'", line))
			continue
		}

		localFname := items[0]
		currentFileCount += 1

		skip := false
		//check skip local file or folder
		if uploadConfig.SkipPrefixes != "" {
			//unpack skip prefix
			skipPrefixes := strings.Split(uploadConfig.SkipPrefixes, ",")
			for _, prefix := range skipPrefixes {
				if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFname))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {

				continue
			}
		}

		if uploadConfig.SkipSuffixes != "" {
			skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",")
			for _, suffix := range skipSuffixes {
				if strings.HasSuffix(localFname, strings.TrimSpace(suffix)) {
					log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s",
						strings.TrimSpace(suffix), localFname))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		//pack the upload file key
		localFlmd, _ := strconv.Atoi(items[2])
		uploadFileKey := localFname

		if uploadConfig.IgnoreDir {
			if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
				uploadFileKey = uploadFileKey[i+1:]
			}
		}
		if uploadConfig.KeyPrefix != "" {
			uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
		}
		//convert \ to / under windows
		if runtime.GOOS == "windows" {
			uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
		}

		localFilePath := filepath.Join(uploadConfig.SrcDir, localFname)
		fstat, err := os.Stat(localFilePath)
		if err != nil {
			log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", localFilePath, err))
			return
		}

		fsize := fstat.Size()
		ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey)

		log.Info(fmt.Sprintf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount,
			float32(currentFileCount)*100/float32(totalFileCount)))

		rsClient := rs.New(&mac)

		//check exists
		if uploadConfig.CheckExists {
			rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
			if checkErr == nil {
				//compare hash
				localEtag, cErr := GetEtag(localFilePath)
				if cErr != nil {
					atomic.AddInt64(&failureFileCount, 1)
					log.Error("Calc local file hash failed,", cErr)
					continue
				}
				if rsEntry.Hash == localEtag {
					atomic.AddInt64(&skippedFileCount, 1)
					log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey))
					continue
				}
			} else {
				if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
					//not logic error, should be network error
					atomic.AddInt64(&failureFileCount, 1)
					continue
				}
			}
		} else {
			//check leveldb
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.Atoi(string(ldbFlmd))
			//not exist, return ErrNotFound
			//check last modified

			if err == nil && localFlmd == flmd {
				log.Debug("Skip by local log for file", localFname)
				atomic.AddInt64(&skippedFileCount, 1)
				continue
			}
		}

		//worker
		upCounter += 1
		if upCounter%threadThreshold == 0 {
			upWorkGroup.Wait()
		}
		upWorkGroup.Add(1)

		//start to upload
		go func() {
			defer upWorkGroup.Done()

			policy := rs.PutPolicy{}
			policy.Scope = uploadConfig.Bucket
			if uploadConfig.Overwrite {
				policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey
				policy.InsertOnly = 0
			}
			policy.Expires = 24 * 3600
			uptoken := policy.Token(&mac)
			if fsize > PUT_THRESHOLD {
				putRet := rio.PutRet{}
				err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err))
					} else {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err))
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt)
					if perr != nil {
						log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
					}
				}
			} else {
				putRet := fio.PutRet{}
				err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err))
					} else {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err))
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
					}
				}
			}
		}()

	}
	upWorkGroup.Wait()

	log.Info("-------Upload Done-------")
	log.Info("Total:\t", currentFileCount)
	log.Info("Success:\t", successFileCount)
	log.Info("Failure:\t", failureFileCount)
	log.Info("Skipped:\t", skippedFileCount)
	log.Info("-------------------------")

}
示例#3
0
func (this *Unzipper) Do(req ufop.UfopRequest) (result interface{}, resultType int, contentType string, err error) {
	//parse command
	bucket, prefix, overwrite, pErr := this.parse(req.Cmd)
	if pErr != nil {
		err = pErr
		return
	}

	//check mimetype
	if req.Src.MimeType != "application/zip" {
		err = errors.New("unsupported mimetype to unzip")
		return
	}
	//check zip file length
	if req.Src.Fsize > this.maxZipFileLength {
		err = errors.New("src zip file length exceeds the limit")
		return
	}

	//get resource
	resUrl := req.Src.Url
	resResp, respErr := http.Get(resUrl)
	if respErr != nil || resResp.StatusCode != 200 {
		if respErr != nil {
			err = errors.New(fmt.Sprintf("retrieve resource data failed, %s", respErr.Error()))
		} else {
			err = errors.New(fmt.Sprintf("retrieve resource data failed, %s", resResp.Status))
			if resResp.Body != nil {
				resResp.Body.Close()
			}
		}
		return
	}
	defer resResp.Body.Close()

	respData, readErr := ioutil.ReadAll(resResp.Body)
	if readErr != nil {
		err = errors.New(fmt.Sprintf("read resource data failed, %s", readErr.Error()))
		return
	}

	//read zip
	respReader := bytes.NewReader(respData)
	zipReader, zipErr := zip.NewReader(respReader, int64(respReader.Len()))
	if zipErr != nil {
		err = errors.New(fmt.Sprintf("invalid zip file, %s", zipErr.Error()))
		return
	}
	zipFiles := zipReader.File
	//check file count
	zipFileCount := len(zipFiles)
	if zipFileCount > this.maxFileCount {
		err = errors.New("zip files count exceeds the limit")
		return
	}
	//check file size
	for _, zipFile := range zipFiles {
		fileSize := zipFile.UncompressedSize64
		//check file size
		if fileSize > this.maxFileLength {
			err = errors.New("zip file length exceeds the limit")
			return
		}
	}

	//set up host
	conf.UP_HOST = "http://up.qiniu.com"
	rputSettings := rio.Settings{
		ChunkSize: 4 * 1024 * 1024,
		Workers:   1,
	}
	rio.SetSettings(&rputSettings)
	var rputThreshold uint64 = 100 * 1024 * 1024
	policy := rs.PutPolicy{
		Scope: bucket,
	}
	var unzipResult UnzipResult
	unzipResult.Files = make([]UnzipFile, 0)
	var tErr error
	//iterate the zip file
	for _, zipFile := range zipFiles {
		fileInfo := zipFile.FileHeader.FileInfo()
		fileName := zipFile.FileHeader.Name
		fileSize := zipFile.UncompressedSize64

		if !utf8.Valid([]byte(fileName)) {
			fileName, tErr = utils.Gbk2Utf8(fileName)
			if tErr != nil {
				err = errors.New(fmt.Sprintf("unsupported file name encoding, %s", tErr.Error()))
				return
			}
		}

		if fileInfo.IsDir() {
			continue
		}

		zipFileReader, zipErr := zipFile.Open()
		if zipErr != nil {
			err = errors.New(fmt.Sprintf("open zip file content failed, %s", zipErr.Error()))
			return
		}
		defer zipFileReader.Close()

		unzipData, unzipErr := ioutil.ReadAll(zipFileReader)
		if unzipErr != nil {
			err = errors.New(fmt.Sprintf("unzip the file content failed, %s", unzipErr.Error()))
			return
		}
		unzipReader := bytes.NewReader(unzipData)

		//save file to bucket
		fileName = prefix + fileName
		if overwrite {
			policy.Scope = bucket + ":" + fileName
		}
		uptoken := policy.Token(this.mac)
		var unzipFile UnzipFile
		unzipFile.Key = fileName
		if fileSize <= rputThreshold {
			var fputRet fio.PutRet
			fErr := fio.Put(nil, &fputRet, uptoken, fileName, unzipReader, nil)
			if fErr != nil {
				unzipFile.Error = fmt.Sprintf("save unzip file to bucket error, %s", fErr.Error())
			} else {
				unzipFile.Hash = fputRet.Hash
			}

		} else {
			var rputRet rio.PutRet
			rErr := rio.Put(nil, &rputRet, uptoken, fileName, unzipReader, int64(fileSize), nil)
			if rErr != nil {
				unzipFile.Error = fmt.Sprintf("save unzip file to bucket error, %s", rErr.Error())
			} else {
				unzipFile.Hash = rputRet.Hash
			}
		}
		unzipResult.Files = append(unzipResult.Files, unzipFile)
	}

	//write result
	result = unzipResult
	resultType = ufop.RESULT_TYPE_JSON
	contentType = ufop.CONTENT_TYPE_JSON

	return
}