Exemple #1
0
func resumableUploadFileDemo(localFile, key, uptoken string) {
	// @gist resumableUploadFile
	var err error
	var ret rio.PutRet
	var extra = &rio.PutExtra{
	// Params:     params,
	// MimeType:   mieType,
	// ChunkSize:  chunkSize,
	// TryTimes:   tryTimes,
	// Progresses: progresses,
	// Notify:     notify,
	// NotifyErr:  NotifyErr,
	}

	// ret       变量用于存取返回的信息,详情见 resumable.io.PutRet
	// uptoken   为业务服务器生成的上传口令
	// key       为文件存储的标识
	// localFile 为本地文件名
	// extra     为上传文件的额外信息,可为空, 详情见 resumable.io.PutExtra
	err = rio.PutFile(nil, ret, uptoken, key, localFile, extra)

	if err != nil {
		//上传产生错误
		log.Print("resumable.io.Put failed:", err)
		return
	}

	//上传成功,处理返回值
	log.Print(ret.Hash)
	// @endgist
}
Exemple #2
0
func ResumablePut(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 || len(params) == 5 {
		bucket := params[0]
		key := params[1]
		localFile := params[2]
		mimeType := ""
		upHost := "http://upload.qiniu.com"
		if len(params) == 4 {
			param := params[3]
			if strings.HasPrefix(param, "http") {
				upHost = param
			} else {
				mimeType = param
			}
		}
		if len(params) == 5 {
			mimeType = params[3]
			upHost = params[4]
		}
		accountS.Get()
		mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)}
		policy := rs.PutPolicy{}
		policy.Scope = bucket
		putExtra := rio.PutExtra{}
		if mimeType != "" {
			putExtra.MimeType = mimeType
		}
		conf.UP_HOST = upHost
		progressHandler := ProgressHandler{
			BlockIndices:    make([]int, 0),
			BlockProgresses: make(map[int]float32),
		}
		putExtra.Notify = progressHandler.Notify
		putExtra.NotifyErr = progressHandler.NotifyErr
		uptoken := policy.Token(&mac)
		putRet := rio.PutRet{}
		startTime := time.Now()
		fStat, statErr := os.Stat(localFile)
		if statErr != nil {
			log.Error("Local file error", statErr)
			return
		}
		fsize := fStat.Size()
		err := rio.PutFile(nil, &putRet, uptoken, key, localFile, &putExtra)
		if err != nil {
			log.Error("Put file error", err)
		} else {
			fmt.Println("\r\nPut file", localFile, "=>", bucket, ":", putRet.Key, "(", putRet.Hash, ")", "success!")
		}
		lastNano := time.Now().UnixNano() - startTime.UnixNano()
		lastTime := fmt.Sprintf("%.2f", float32(lastNano)/1e9)
		avgSpeed := fmt.Sprintf("%.1f", float32(fsize)*1e6/float32(lastNano))
		fmt.Println("Last time:", lastTime, "s, Average Speed:", avgSpeed, "KB/s")
	} else {
		CmdHelp(cmd)
	}
}
Exemple #3
0
func QiniuUpload(threadCount int, uploadConfigFile string) {
	fp, err := os.Open(uploadConfigFile)
	if err != nil {
		log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	defer fp.Close()
	configData, err := ioutil.ReadAll(fp)
	if err != nil {
		log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	var uploadConfig UploadConfig
	err = json.Unmarshal(configData, &uploadConfig)
	if err != nil {
		log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err))
		return
	}
	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config error for parameter `SrcDir`,", err)
		return
	}
	dirCache := DirCache{}
	currentUser, err := user.Current()
	if err != nil {
		log.Error("Failed to get current user", err)
		return
	}
	pathSep := string(os.PathSeparator)
	jobId := base64.URLEncoding.EncodeToString([]byte(uploadConfig.SrcDir + ":" + uploadConfig.Bucket))
	storePath := fmt.Sprintf("%s%s.qshell%squpload%s%s", currentUser.HomeDir, pathSep, pathSep, pathSep, jobId)
	err = os.MkdirAll(storePath, 0775)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err))
		return
	}
	cacheFileName := fmt.Sprintf("%s%s%s.cache", storePath, pathSep, jobId)
	leveldbFileName := fmt.Sprintf("%s%s%s.ldb", storePath, pathSep, jobId)
	totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err))
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheFileName)
	if err != nil {
		log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err))
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)
	currentFileCount := 0
	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//use host if not empty
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}
	//check thread count
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) > 1 {
			cacheFname := items[0]
			cacheFlmd, _ := strconv.Atoi(items[2])
			uploadFileKey := cacheFname
			if uploadConfig.IgnoreDir {
				if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
					uploadFileKey = uploadFileKey[i+1:]
				}
			}
			if uploadConfig.KeyPrefix != "" {
				uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
			}
			//convert \ to / under windows
			if runtime.GOOS == "windows" {
				uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
			}
			cacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep)
			fstat, err := os.Stat(cacheFilePath)
			if err != nil {
				log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", cacheFilePath, err))
				return
			}
			fsize := fstat.Size()

			//check leveldb
			currentFileCount += 1
			ldbKey := fmt.Sprintf("%s => %s", cacheFilePath, uploadFileKey)
			log.Debug(fmt.Sprintf("Checking %s ...", ldbKey))
			//check last modified
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.Atoi(string(ldbFlmd))
			//not exist, return ErrNotFound
			if err == nil && cacheFlmd == flmd {
				continue
			}

			fmt.Print("\033[2K\r")
			fmt.Printf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount))
			os.Stdout.Sync()
			rsClient := rs.New(&mac)
			//worker
			upCounter += 1
			if upCounter%threadThreshold == 0 {
				upWorkGroup.Wait()
			}
			upWorkGroup.Add(1)
			go func() {
				defer upWorkGroup.Done()
				//check exists
				if uploadConfig.CheckExists {
					rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
					if checkErr != nil {
						log.Error(fmt.Sprintf("Stat `%s' error due to `%s'", uploadFileKey, checkErr))
						return
					} else if rsEntry.Fsize == fsize {
						log.Debug("File already exists in bucket, ignore this upload")
						return
					}
				}
				//upload
				policy := rs.PutPolicy{}
				policy.Scope = uploadConfig.Bucket
				if uploadConfig.Overwrite {
					policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey
					policy.InsertOnly = 0
				}
				policy.Expires = 24 * 3600
				uptoken := policy.Token(&mac)
				if fsize > PUT_THRESHOLD {
					putRet := rio.PutRet{}
					err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				} else {
					putRet := fio.PutRet{}
					err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				}
			}()
		} else {
			log.Error(fmt.Sprintf("Error cache line `%s'", line))
		}
	}
	upWorkGroup.Wait()
	fmt.Println()
	fmt.Println("Upload done!")
}