func qiniusave(file string) (url string, err error) { var key string for _, key = range strings.Split(file, "/") { } conf.ACCESS_KEY = setting.AccessKeyID conf.SECRET_KEY = setting.AccessKeysecret url = "http://" + setting.Endpoint + "/" + key putPolicy := rs.PutPolicy{Scope: setting.Bucket} uptoken := putPolicy.Token(nil) var ret io.PutRet var extra = &io.PutExtra{} err = io.PutFile(nil, &ret, uptoken, key, file, extra) if err != nil { return "", err } else { return url, nil } }
func upFile(localFile, bucketName, key string) error { policy := rs.PutPolicy{ Scope: bucketName + ":" + key, } return qio.PutFile(nil, nil, policy.Token(nil), key, localFile, nil) }
func TestPrivateImageView(t *testing.T) { //首先上传一个图片 用于测试 policy := rs.PutPolicy{ Scope: bucket + ":" + key, } err := io.PutFile(nil, nil, policy.Token(nil), key, localFile, nil) if err != nil { t.Errorf("TestPrivateImageView failed: %v", err) return } rawUrl := makeUrl(key) iv := ImageView{ Mode: 2, Height: 250, Quality: 80, } imageViewUrl := iv.MakeRequest(rawUrl) p := rs.GetPolicy{} imageViewUrlWithToken := p.MakeRequest(imageViewUrl, nil) resp, err := http.DefaultClient.Get(imageViewUrlWithToken) if err != nil { t.Errorf("TestPrivateImageView failed: %v", err) return } defer resp.Body.Close() if (resp.StatusCode / 100) != 2 { t.Errorf("TestPrivateImageView failed: resp.StatusCode = %v", resp.StatusCode) return } }
func (helper *QiniuHelper) token(bucketName string) string { if helper.cache != nil { tk, ok := helper.cache.Get(fmt.Sprintf("%s:%s", helper.AccessKey, bucketName)) if ok { return tk.(string) } } duration := time.Hour putPolicy := rs.PutPolicy{ Scope: bucketName, Expires: 3600, // CallbackUrl: callbackUrl, // CallbackBody: callbackBody, // ReturnUrl: returnUrl, // ReturnBody: returnBody, // AsyncOps: asyncOps, // EndUser: endUser, // Expires: expires, } tk2 := putPolicy.Token(nil) if helper.cache != nil { helper.cache.Set(fmt.Sprintf("%s:%s", helper.AccessKey, bucketName), tk2, duration) } return tk2 }
func genUptoken(bucket, key string) string { gr := cfg.Gorelease if gr.Token != "" { log.Println("Use gorelease, key:", key) u := url.URL{ Scheme: "http", Host: gr.Host, Path: "/uptoken", } query := u.Query() query.Set("private_token", gr.Token) query.Set("bucket", bucket) query.Set("key", key) u.RawQuery = query.Encode() resp, err := http.Get(u.String()) if err != nil { log.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { sysio.Copy(os.Stdout, resp.Body) log.Fatalf("status: %d", resp.StatusCode) } uptoken, _ := ioutil.ReadAll(resp.Body) return string(uptoken) } putPolicy := rs.PutPolicy{ Scope: bucket + ":" + key, } return putPolicy.Token(nil) }
func genUptoken(bucket string, key string) string { policy := rs.PutPolicy{ Scope: bucket + ":" + key, } policy.Expires = uint32(time.Now().Unix()) + 1800 policy.FsizeLimit = 20 << 20 // 20M return policy.Token(nil) }
func ResumablePut(cmd string, params ...string) { if len(params) == 3 || len(params) == 4 || len(params) == 5 { bucket := params[0] key := params[1] localFile := params[2] mimeType := "" upHost := "http://upload.qiniu.com" if len(params) == 4 { param := params[3] if strings.HasPrefix(param, "http") { upHost = param } else { mimeType = param } } if len(params) == 5 { mimeType = params[3] upHost = params[4] } accountS.Get() mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)} policy := rs.PutPolicy{} policy.Scope = bucket putExtra := rio.PutExtra{} if mimeType != "" { putExtra.MimeType = mimeType } conf.UP_HOST = upHost progressHandler := ProgressHandler{ BlockIndices: make([]int, 0), BlockProgresses: make(map[int]float32), } putExtra.Notify = progressHandler.Notify putExtra.NotifyErr = progressHandler.NotifyErr uptoken := policy.Token(&mac) putRet := rio.PutRet{} startTime := time.Now() fStat, statErr := os.Stat(localFile) if statErr != nil { log.Error("Local file error", statErr) return } fsize := fStat.Size() err := rio.PutFile(nil, &putRet, uptoken, key, localFile, &putExtra) if err != nil { log.Error("Put file error", err) } else { fmt.Println("\r\nPut file", localFile, "=>", bucket, ":", putRet.Key, "(", putRet.Hash, ")", "success!") } lastNano := time.Now().UnixNano() - startTime.UnixNano() lastTime := fmt.Sprintf("%.2f", float32(lastNano)/1e9) avgSpeed := fmt.Sprintf("%.1f", float32(fsize)*1e6/float32(lastNano)) fmt.Println("Last time:", lastTime, "s, Average Speed:", avgSpeed, "KB/s") } else { CmdHelp(cmd) } }
func UploadReader(reader io.Reader, name string) error { var err error var ret qio.PutRet ACCESS_KEY = "NPfcHtb0e2EH7lCmmJot21MRr0lCel81S-QlUaJF" SECRET_KEY = "6DzF_oVRYhBkq0mqb4txThza_IfQEUey107VXaPq" var policy = qrs.PutPolicy{ Scope: "isaac", } err = qio.Put(nil, &ret, policy.Token(nil), name, reader, nil) return err }
func uptoken(bucketName string) string { putPolicy := qnrs.PutPolicy{ Scope: bucketName, //CallbackUrl: callbackUrl, //CallbackBody:callbackBody, //ReturnUrl: returnUrl, //ReturnBody: returnBody, //AsyncOps: asyncOps, //EndUser: endUser, //Expires: expires, } return putPolicy.Token(nil) }
func UploadFile(localFile string, destName string) (addr string, err error) { policy := new(rs.PutPolicy) policy.Scope = QiniuScope uptoken := policy.Token(nil) var ret io.PutRet var extra = new(io.PutExtra) err = io.PutFile(nil, &ret, uptoken, destName, localFile, extra) if err != nil { return } addr = "http://" + QiniuScope + ".qiniudn.com/" + destName return }
func InitQiniu() { conf.ACCESS_KEY = config.Config["qiniu_access_key"] conf.SECRET_KEY = config.Config["qiniu_secret_key"] putPolicy := rs.PutPolicy{ Scope: config.Config["qiniu_bucket_name"], // CallbackUrl: callbackUrl, // CallbackBody: callbackBody, // ReturnUrl: returnUrl, // ReturnBody: returnBody, // AsyncOps: asyncOps, // EndUser: endUser, // Expires: expires, } uptoken = putPolicy.Token(nil) }
func (driver *QiniuDriver) PutFile(key string, data io.Reader, appendData bool) (int64, error) { var err error var ret qio.PutRet var extra = &qio.PutExtra{} putPolicy := rs.PutPolicy{ Scope: driver.bucket, } uptoken := putPolicy.Token(nil) rd := CountReader(data) err = qio.Put(nil, &ret, uptoken, strings.TrimLeft(key, "/"), rd, extra) if err != nil { return 0, err } return int64(rd.Size()), nil }
// 上传到七牛,并返回文件名 func uploadAvatarToQiniu(file io.ReadCloser, contentType string) (filename string, err error) { isValidateType := false for _, imgType := range []string{"image/png", "image/jpeg"} { if imgType == contentType { isValidateType = true break } } if !isValidateType { return "", errors.New("文件类型错误") } filenameExtension := ".jpg" if contentType == "image/png" { filenameExtension = ".png" } // 文件名:32位uuid,不带减号和后缀组成 filename = strings.Replace(uuid.NewUUID().String(), "-", "", -1) + filenameExtension key := "avatar/" + filename ret := new(qiniuIo.PutRet) var policy = rs.PutPolicy{ Scope: "gopher", } err = qiniuIo.Put( nil, ret, policy.Token(nil), key, file, nil, ) if err != nil { return "", err } return filename, nil }
func TestAll(t *testing.T) { policy := rs.PutPolicy{ Scope: bucket, } token := policy.Token(nil) params := map[string]string{"x:1": "1"} extra := &PutExtra{ ChunkSize: 128, MimeType: "text/plain", Notify: blockNotify, Params: params, } testPut(t, token, nil) testPutWithoutKey(t, token, extra) testPutFile(t, token, extra) testPutFileWithoutKey(t, token, extra) testXVar(t, token, extra) }
//内存中new一个uptoken,没有持久化的,有效期为从现在开始的第X天 func (this *Bucket) NewUptoken() error { if this.Name == "" || this.Ak == "" || this.Sk == "" { return errors.New("Bucket的Name/Ak/Sk为空,无法生成Uptoken") } if this.Life == 0 { this.Life = 380 } this.Expires = time.Now().Add(time.Duration(this.Life) * DAY) putPolicy := rs.PutPolicy{ Scope: this.Name, Expires: uint32(this.Expires.Unix()), //CallbackUrl: callbackUrl, //CallbackBody:callbackBody, //ReturnUrl: returnUrl, //ReturnBody: returnBody, //AsyncOps: asyncOps, //EndUser: endUser, } this.Uptoken = putPolicy.Token(&digest.Mac{this.Ak, []byte(this.Sk)}) this.HasError = false return nil }
// 生成上传凭证 func (this *UploaderLogic) genUpToken() { // 避免服务器时间不同步,45分钟就更新 token if this.uptoken != "" && this.tokenTime.Add(45*time.Minute).After(time.Now()) { return } putPolicy := rs.PutPolicy{ Scope: this.bucketName, // CallbackUrl: callbackUrl, // CallbackBody: callbackBody, // ReturnUrl: returnUrl, // ReturnBody: returnBody, // AsyncOps: asyncOps, // EndUser: endUser, // 指定上传凭证有效期(默认1小时) // Expires: expires, } this.locker.Lock() this.uptoken = putPolicy.Token(nil) this.locker.Unlock() this.tokenTime = time.Now() }
func main() { ACCESS_KEY = "xxxxxxxx" SECRET_KEY = "xxxxxxxx" var ret qnio.PutRet var extra = &qnio.PutExtra{ MimeType: "image/jepg", CheckCrc: 0, } key := "1024x1024.jpg" scope := fmt.Sprintf("skypixeltest:%s", key) putPolicy := rs.PutPolicy{ Scope: scope, // Expires: expires, } uptoken := putPolicy.Token(nil) fi, err := os.Open("/Users/qpzhang/Downloads/1024x1024.jpg") st, _ := fi.Stat() if err != nil { panic(err) } defer fi.Close() data := bufio.NewReader(fi) fmt.Println("size ", st.Size()) err = qnio.Put2(nil, &ret, uptoken, key, data, st.Size(), extra) if err != nil { fmt.Println(err) } else { fmt.Println("put sucess......", ret) } }
// URL: /upload/image // 编辑器上传图片,接收后上传到七牛 func uploadImageHandler(handler *Handler) { defer dps.Persist() file, header, err := handler.Request.FormFile("editormd-image-file") if err != nil { panic(err) return } defer file.Close() // 检查是否是jpg或png文件 uploadFileType := header.Header["Content-Type"][0] filenameExtension := "" if uploadFileType == "image/jpeg" { filenameExtension = ".jpg" } else if uploadFileType == "image/png" { filenameExtension = ".png" } else if uploadFileType == "image/gif" { filenameExtension = ".gif" } if filenameExtension == "" { handler.renderJson(map[string]interface{}{ "success": 0, "message": "不支持的文件格式,请上传 jpg/png/gif 图片", }) return } // 上传到七牛 // 文件名:32位uuid+后缀组成 filename := strings.Replace(uuid.NewUUID().String(), "-", "", -1) + filenameExtension key := "upload/image/" + filename ret := new(qiniuIo.PutRet) var policy = rs.PutPolicy{ Scope: "gopher", } err = qiniuIo.Put( nil, ret, policy.Token(nil), key, file, nil, ) if err != nil { panic(err) handler.renderJson(map[string]interface{}{ "success": 0, "message": "图片上传到七牛失败", }) return } handler.renderJson(map[string]interface{}{ "success": 1, "url": "http://77fkk5.com1.z0.glb.clouddn.com/" + key, }) }
func QiniuUpload(threadCount int, uploadConfigFile string) { fp, err := os.Open(uploadConfigFile) if err != nil { log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } defer fp.Close() configData, err := ioutil.ReadAll(fp) if err != nil { log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } var uploadConfig UploadConfig err = json.Unmarshal(configData, &uploadConfig) if err != nil { log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err)) return } if _, err := os.Stat(uploadConfig.SrcDir); err != nil { log.Error("Upload config error for parameter `SrcDir`,", err) return } dirCache := DirCache{} currentUser, err := user.Current() if err != nil { log.Error("Failed to get current user", err) return } pathSep := string(os.PathSeparator) //create job id md5Hasher := md5.New() md5Hasher.Write([]byte(strings.TrimSuffix(uploadConfig.SrcDir, pathSep) + ":" + uploadConfig.Bucket)) jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil)) //local storage path storePath := filepath.Join(currentUser.HomeDir, ".qshell", "qupload", jobId) err = os.MkdirAll(storePath, 0775) if err != nil { log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err)) return } //cache file cacheFileName := filepath.Join(storePath, jobId+".cache") //leveldb folder leveldbFileName := filepath.Join(storePath, jobId+".ldb") totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName) ldb, err := leveldb.OpenFile(leveldbFileName, nil) if err != nil { log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)) return } defer ldb.Close() //sync ufp, err := os.Open(cacheFileName) if err != nil { log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err)) return } defer ufp.Close() bScanner := bufio.NewScanner(ufp) bScanner.Split(bufio.ScanLines) var currentFileCount int64 = 0 var successFileCount int64 = 0 var failureFileCount int64 = 0 var skippedFileCount int64 = 0 ldbWOpt := opt.WriteOptions{ Sync: true, } upWorkGroup := sync.WaitGroup{} upCounter := 0 threadThreshold := threadCount + 1 //use host if not empty if uploadConfig.UpHost != "" { conf.UP_HOST = uploadConfig.UpHost } //set resume upload settings rio.SetSettings(&upSettings) mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)} for bScanner.Scan() { line := strings.TrimSpace(bScanner.Text()) items := strings.Split(line, "\t") if len(items) != 3 { log.Error(fmt.Sprintf("Invalid cache line `%s'", line)) continue } localFname := items[0] currentFileCount += 1 skip := false //check skip local file or folder if uploadConfig.SkipPrefixes != "" { //unpack skip prefix skipPrefixes := strings.Split(uploadConfig.SkipPrefixes, ",") for _, prefix := range skipPrefixes { if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) { log.Debug(fmt.Sprintf("Skip by prefix '%s' for local file %s", strings.TrimSpace(prefix), localFname)) skip = true skippedFileCount += 1 break } } if skip { continue } } if uploadConfig.SkipSuffixes != "" { skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",") for _, suffix := range skipSuffixes { if strings.HasSuffix(localFname, strings.TrimSpace(suffix)) { log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s", strings.TrimSpace(suffix), localFname)) skip = true skippedFileCount += 1 break } } if skip { continue } } //pack the upload file key localFlmd, _ := strconv.Atoi(items[2]) uploadFileKey := localFname if uploadConfig.IgnoreDir { if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 { uploadFileKey = uploadFileKey[i+1:] } } if uploadConfig.KeyPrefix != "" { uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "") } //convert \ to / under windows if runtime.GOOS == "windows" { uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1) } localFilePath := filepath.Join(uploadConfig.SrcDir, localFname) fstat, err := os.Stat(localFilePath) if err != nil { log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", localFilePath, err)) return } fsize := fstat.Size() ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey) log.Info(fmt.Sprintf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount, float32(currentFileCount)*100/float32(totalFileCount))) rsClient := rs.New(&mac) //check exists if uploadConfig.CheckExists { rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey) if checkErr == nil { //compare hash localEtag, cErr := GetEtag(localFilePath) if cErr != nil { atomic.AddInt64(&failureFileCount, 1) log.Error("Calc local file hash failed,", cErr) continue } if rsEntry.Hash == localEtag { atomic.AddInt64(&skippedFileCount, 1) log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey)) continue } } else { if _, ok := checkErr.(*rpc.ErrorInfo); !ok { //not logic error, should be network error atomic.AddInt64(&failureFileCount, 1) continue } } } else { //check leveldb ldbFlmd, err := ldb.Get([]byte(ldbKey), nil) flmd, _ := strconv.Atoi(string(ldbFlmd)) //not exist, return ErrNotFound //check last modified if err == nil && localFlmd == flmd { log.Debug("Skip by local log for file", localFname) atomic.AddInt64(&skippedFileCount, 1) continue } } //worker upCounter += 1 if upCounter%threadThreshold == 0 { upWorkGroup.Wait() } upWorkGroup.Add(1) //start to upload go func() { defer upWorkGroup.Done() policy := rs.PutPolicy{} policy.Scope = uploadConfig.Bucket if uploadConfig.Overwrite { policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey policy.InsertOnly = 0 } policy.Expires = 24 * 3600 uptoken := policy.Token(&mac) if fsize > PUT_THRESHOLD { putRet := rio.PutRet{} err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil) if err != nil { atomic.AddInt64(&failureFileCount, 1) if pErr, ok := err.(*rpc.ErrorInfo); ok { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)) } else { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)) } } else { atomic.AddInt64(&successFileCount, 1) perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } else { putRet := fio.PutRet{} err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil) if err != nil { atomic.AddInt64(&failureFileCount, 1) if pErr, ok := err.(*rpc.ErrorInfo); ok { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)) } else { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)) } } else { atomic.AddInt64(&successFileCount, 1) perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(localFlmd)), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } }() } upWorkGroup.Wait() log.Info("-------Upload Done-------") log.Info("Total:\t", currentFileCount) log.Info("Success:\t", successFileCount) log.Info("Failure:\t", failureFileCount) log.Info("Skipped:\t", skippedFileCount) log.Info("-------------------------") }
func GenUptoken() string { putPolicy := rs.PutPolicy{ Scope: setting.BucketName, } return putPolicy.Token(nil) }
//bucketName 空间名称 func upToken(bucketName string) string { putPolicy := rs.PutPolicy{ Scope: bucketName, } return putPolicy.Token(nil) }
func QiniuUpload(threadCount int, uploadConfigFile string) { fp, err := os.Open(uploadConfigFile) if err != nil { log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } defer fp.Close() configData, err := ioutil.ReadAll(fp) if err != nil { log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } var uploadConfig UploadConfig err = json.Unmarshal(configData, &uploadConfig) if err != nil { log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err)) return } if _, err := os.Stat(uploadConfig.SrcDir); err != nil { log.Error("Upload config error for parameter `SrcDir`,", err) return } dirCache := DirCache{} currentUser, err := user.Current() if err != nil { log.Error("Failed to get current user", err) return } pathSep := string(os.PathSeparator) //create job id md5Hasher := md5.New() md5Hasher.Write([]byte(uploadConfig.SrcDir + ":" + uploadConfig.Bucket)) jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil)) //local storage path storePath := fmt.Sprintf("%s%s.qshell%squpload%s%s", currentUser.HomeDir, pathSep, pathSep, pathSep, jobId) err = os.MkdirAll(storePath, 0775) if err != nil { log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err)) return } //cache file cacheFileName := fmt.Sprintf("%s%s%s.cache", storePath, pathSep, jobId) //leveldb folder leveldbFileName := fmt.Sprintf("%s%s%s.ldb", storePath, pathSep, jobId) totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName) ldb, err := leveldb.OpenFile(leveldbFileName, nil) if err != nil { log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)) return } defer ldb.Close() //sync ufp, err := os.Open(cacheFileName) if err != nil { log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err)) return } defer ufp.Close() bScanner := bufio.NewScanner(ufp) bScanner.Split(bufio.ScanLines) currentFileCount := 0 ldbWOpt := opt.WriteOptions{ Sync: true, } upWorkGroup := sync.WaitGroup{} upCounter := 0 threadThreshold := threadCount + 1 //use host if not empty if uploadConfig.UpHost != "" { conf.UP_HOST = uploadConfig.UpHost } //set settings rio.SetSettings(&upSettings) mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)} //check thread count for bScanner.Scan() { line := strings.TrimSpace(bScanner.Text()) items := strings.Split(line, "\t") if len(items) > 1 { cacheFname := items[0] cacheFlmd, _ := strconv.Atoi(items[2]) uploadFileKey := cacheFname if uploadConfig.IgnoreDir { if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 { uploadFileKey = uploadFileKey[i+1:] } } if uploadConfig.KeyPrefix != "" { uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "") } //convert \ to / under windows if runtime.GOOS == "windows" { uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1) } cacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep) fstat, err := os.Stat(cacheFilePath) if err != nil { log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", cacheFilePath, err)) return } fsize := fstat.Size() //check leveldb currentFileCount += 1 ldbKey := fmt.Sprintf("%s => %s", cacheFilePath, uploadFileKey) log.Debug(fmt.Sprintf("Checking %s ...", ldbKey)) //check last modified ldbFlmd, err := ldb.Get([]byte(ldbKey), nil) flmd, _ := strconv.Atoi(string(ldbFlmd)) //not exist, return ErrNotFound if err == nil && cacheFlmd == flmd { continue } fmt.Print("\033[2K\r") fmt.Printf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount, float32(currentFileCount)*100/float32(totalFileCount)) os.Stdout.Sync() rsClient := rs.New(&mac) //worker upCounter += 1 if upCounter%threadThreshold == 0 { upWorkGroup.Wait() } upWorkGroup.Add(1) go func() { defer upWorkGroup.Done() //check exists if uploadConfig.CheckExists { rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey) if checkErr == nil { //compare hash localEtag, cErr := GetEtag(cacheFilePath) if cErr != nil { log.Error("Calc local file hash failed,", cErr) return } if rsEntry.Hash == localEtag { log.Info("File already exists in bucket, ignore this upload") return } } else { if _, ok := checkErr.(*rpc.ErrorInfo); !ok { //not logic error, should be network error return } } } //upload policy := rs.PutPolicy{} policy.Scope = uploadConfig.Bucket if uploadConfig.Overwrite { policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey policy.InsertOnly = 0 } policy.Expires = 24 * 3600 uptoken := policy.Token(&mac) if fsize > PUT_THRESHOLD { putRet := rio.PutRet{} err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil) if err != nil { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err)) } else { perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } else { putRet := fio.PutRet{} err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil) if err != nil { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err)) } else { perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } }() } else { log.Error(fmt.Sprintf("Error cache line `%s'", line)) } } upWorkGroup.Wait() fmt.Println() fmt.Println("Upload done!") }
func (this *Unzipper) Do(req ufop.UfopRequest) (result interface{}, resultType int, contentType string, err error) { //parse command bucket, prefix, overwrite, pErr := this.parse(req.Cmd) if pErr != nil { err = pErr return } //check mimetype if req.Src.MimeType != "application/zip" { err = errors.New("unsupported mimetype to unzip") return } //check zip file length if req.Src.Fsize > this.maxZipFileLength { err = errors.New("src zip file length exceeds the limit") return } //get resource resUrl := req.Src.Url resResp, respErr := http.Get(resUrl) if respErr != nil || resResp.StatusCode != 200 { if respErr != nil { err = errors.New(fmt.Sprintf("retrieve resource data failed, %s", respErr.Error())) } else { err = errors.New(fmt.Sprintf("retrieve resource data failed, %s", resResp.Status)) if resResp.Body != nil { resResp.Body.Close() } } return } defer resResp.Body.Close() respData, readErr := ioutil.ReadAll(resResp.Body) if readErr != nil { err = errors.New(fmt.Sprintf("read resource data failed, %s", readErr.Error())) return } //read zip respReader := bytes.NewReader(respData) zipReader, zipErr := zip.NewReader(respReader, int64(respReader.Len())) if zipErr != nil { err = errors.New(fmt.Sprintf("invalid zip file, %s", zipErr.Error())) return } zipFiles := zipReader.File //check file count zipFileCount := len(zipFiles) if zipFileCount > this.maxFileCount { err = errors.New("zip files count exceeds the limit") return } //check file size for _, zipFile := range zipFiles { fileSize := zipFile.UncompressedSize64 //check file size if fileSize > this.maxFileLength { err = errors.New("zip file length exceeds the limit") return } } //set up host conf.UP_HOST = "http://up.qiniu.com" rputSettings := rio.Settings{ ChunkSize: 4 * 1024 * 1024, Workers: 1, } rio.SetSettings(&rputSettings) var rputThreshold uint64 = 100 * 1024 * 1024 policy := rs.PutPolicy{ Scope: bucket, } var unzipResult UnzipResult unzipResult.Files = make([]UnzipFile, 0) var tErr error //iterate the zip file for _, zipFile := range zipFiles { fileInfo := zipFile.FileHeader.FileInfo() fileName := zipFile.FileHeader.Name fileSize := zipFile.UncompressedSize64 if !utf8.Valid([]byte(fileName)) { fileName, tErr = utils.Gbk2Utf8(fileName) if tErr != nil { err = errors.New(fmt.Sprintf("unsupported file name encoding, %s", tErr.Error())) return } } if fileInfo.IsDir() { continue } zipFileReader, zipErr := zipFile.Open() if zipErr != nil { err = errors.New(fmt.Sprintf("open zip file content failed, %s", zipErr.Error())) return } defer zipFileReader.Close() unzipData, unzipErr := ioutil.ReadAll(zipFileReader) if unzipErr != nil { err = errors.New(fmt.Sprintf("unzip the file content failed, %s", unzipErr.Error())) return } unzipReader := bytes.NewReader(unzipData) //save file to bucket fileName = prefix + fileName if overwrite { policy.Scope = bucket + ":" + fileName } uptoken := policy.Token(this.mac) var unzipFile UnzipFile unzipFile.Key = fileName if fileSize <= rputThreshold { var fputRet fio.PutRet fErr := fio.Put(nil, &fputRet, uptoken, fileName, unzipReader, nil) if fErr != nil { unzipFile.Error = fmt.Sprintf("save unzip file to bucket error, %s", fErr.Error()) } else { unzipFile.Hash = fputRet.Hash } } else { var rputRet rio.PutRet rErr := rio.Put(nil, &rputRet, uptoken, fileName, unzipReader, int64(fileSize), nil) if rErr != nil { unzipFile.Error = fmt.Sprintf("save unzip file to bucket error, %s", rErr.Error()) } else { unzipFile.Hash = rputRet.Hash } } unzipResult.Files = append(unzipResult.Files, unzipFile) } //write result result = unzipResult resultType = ufop.RESULT_TYPE_JSON contentType = ufop.CONTENT_TYPE_JSON return }