Example #1
0
func upFile(localFile, bucketName, key string) error {

	policy := PutPolicy{
		Scope: bucketName + ":" + key,
	}
	return io.PutFile(nil, nil, policy.Token(nil), key, localFile, nil)
}
Example #2
0
func uploadFileDemo(localFile, key, uptoken string) {
	// @gist uploadFile
	var err error
	var ret io.PutRet
	var extra = &io.PutExtra{
	// Params:   params,
	// MimeType: mieType,
	// Crc32:    crc32,
	// CheckCrc: CheckCrc,
	}

	// ret       变量用于存取返回的信息,详情见 io.PutRet
	// uptoken   为业务服务器生成的上传口令
	// key       为文件存储的标识
	// localFile 为本地文件名
	// extra     为上传文件的额外信息,详情见 io.PutExtra,可选
	err = io.PutFile(nil, &ret, uptoken, key, localFile, extra)

	if err != nil {
		//上传产生错误
		log.Print("io.PutFile failed:", err)
		return
	}

	//上传成功,处理返回值
	log.Print(ret.Hash, ret.Key)
	// @endgist
}
Example #3
0
func uploadLocalFile(key, fileName string) {
	var ret qiniuio.PutRet
	var extra = &qiniuio.PutExtra{
	//Params:    params,
	//MimeType:  mieType,
	//Crc32:     crc32,
	//CheckCrc:  CheckCrc,
	}

	// ret       变量用于存取返回的信息,详情见 io.PutRet
	// uptoken   为业务服务器生成的上传口令
	// key       为文件存储的标识
	// localFile 为本地文件名
	// extra     为上传文件的额外信息,详情见 io.PutExtra,可选
	err := qiniuio.PutFile(nil, &ret, uptoken(Bucket), key, fileName, extra)

	if err != nil {
		//上传产生错误
		log.Print("io.PutFile failed:", err)
		return
	}

	//上传成功,处理返回值
	log.Printf("hash:%s,key:%s", ret.Hash, ret.Key)
}
Example #4
0
func uploadLocalFile(localFile, key string) (err error) {
	InitQiniu()
	var ret io.PutRet
	var extra = &io.PutExtra{
	// Params:   params,
	// MimeType: mieType,
	// Crc32:    crc32,
	// CheckCrc: CheckCrc,
	}

	// ret       变量用于存取返回的信息,详情见 io.PutRet
	// uptoken   为业务服务器生成的上传口令
	// key       为文件存储的标识(文件名)
	// localFile 为本地文件名
	// extra     为上传文件的额外信息,详情见 io.PutExtra,可选
	err = io.PutFile(nil, &ret, uptoken, key, localFile, extra)

	if err != nil {
		//上传产生错误
		logger.Errorln("io.PutFile failed:", err)
		return
	}

	//上传成功,处理返回值
	logger.Debugln(ret.Hash, ret.Key)

	return
}
Example #5
0
func TestPrivateImageView(t *testing.T) {

	//首先上传一个图片 用于测试
	policy := rs.PutPolicy{
		Scope: bucket + ":" + key,
	}
	err := io.PutFile(nil, nil, policy.Token(nil), key, localFile, nil)
	if err != nil {
		t.Errorf("TestPrivateImageView failed: %v", err)
		return
	}

	rawUrl := makeUrl(key)

	iv := ImageView{
		Mode:    2,
		Height:  250,
		Quality: 80,
	}
	imageViewUrl := iv.MakeRequest(rawUrl)
	p := rs.GetPolicy{}
	imageViewUrlWithToken := p.MakeRequest(imageViewUrl, nil)
	resp, err := http.DefaultClient.Get(imageViewUrlWithToken)
	if err != nil {
		t.Errorf("TestPrivateImageView failed: %v", err)
		return
	}
	defer resp.Body.Close()

	if (resp.StatusCode / 100) != 2 {
		t.Errorf("TestPrivateImageView failed: resp.StatusCode = %v", resp.StatusCode)
		return
	}
}
Example #6
0
func upload() {
	policy := up.AuthPolicy{
		Scope:               "needkane",
		InsertOnly:          0, //如果设置为非0值, 则无论scope设置为什么形式, 仅能以新增模式上传文件: 1,
		Deadline:            time.Now().Unix() + 3600,
		CallbackUrl:         "125.64.9.132:8090/callback",
		CallbackBody:        "callbackBody",
		PersistentNotifyUrl: "125.64.9.132:8090/callback",
		//CallbackBody:callbackBody,
		//ReturnUrl: "http://101.71.89.171:6666",
		//DetectMime: detectMime,
		PersistentOps:      "vframe/jpg/offset/2",
		PersistentPipeline: "kane",
	}
	uptoken := up.MakeAuthTokenString(ACCESS_KEY, SECRET_KEY, &policy)
	var ret qio.PutRet
	var extra = &qio.PutExtra{
	//Params: params,
	//MimeType: mieType,
	//Crc32: crc32,
	//CheckCrc: CheckCrc,
	}
	err := qio.PutFile(nil, &ret, uptoken, "key123", "/home/qboxtest/Desktop/123.mp4", extra)

	if err != nil {
		//上传产生错误
		log.Print("io.PutFile failed:", err)
		return
	}

	//上传成功,处理返回值
	log.Print(ret.Hash, ret.Key)
}
Example #7
0
func main() {
	uptoken := uptoken("aaaa")
	fmt.Printf("uptoken:%s\n", uptoken)

	var err error
	var ret io.PutRet
	var extra = &io.PutExtra{
	//Params:    params,
	//MimeType:  mieType,
	//Crc32:     crc32,
	//CheckCrc:  CheckCrc,
	}

	var key = "communtiy_1"
	var localFile = "d:\\566594.jpg"

	// ret       变量用于存取返回的信息,详情见 io.PutRet
	// uptoken   为业务服务器生成的上传口令
	// key       为文件存储的标识
	// localFile 为本地文件名
	// extra     为上传文件的额外信息,详情见 io.PutExtra,可选
	err = io.PutFile(nil, &ret, uptoken, key, localFile, extra)

	if err != nil {
		//上传产生错误
		log.Print("io.PutFile failed:", err)
		return
	}

	//上传成功,处理返回值
	log.Print(ret.Hash, ret.Key)

}
Example #8
0
func upFile(bucket string, key string, localPath string) error {
	var ret qiniuio.PutRet

	policy := rs.PutPolicy{
		Scope: bucket + ":" + key,
	}
	err := qiniuio.PutFile(nil, &ret, policy.Token(mac), key, localPath, nil)
	log.Printf("ret : %+v", ret)
	return err
}
Example #9
0
// Deploys a site to QiniuCloudStorage.
func (s *Site) DeployToQiniu(key, secret, bucket string) error {
	q6cfg.ACCESS_KEY = key
	q6cfg.SECRET_KEY = secret

	// walks _site directory and uploads file to QiniuCloudStorage
	walker := func(fn string, fi os.FileInfo, err error) error {
		if fi.IsDir() {
			return nil
		}

		rel, _ := filepath.Rel(s.Dest, fn)
		logf(MsgUploadFile, rel)
		if err != nil {
			return err
		}

		key := filepath.ToSlash(rel)
		policy := q6rs.PutPolicy{
			Scope:   bucket + ":" + key,
			Expires: 60,
		}
		uptoken := policy.Token()

		ret := new(q6io.PutRet)
		extra := &q6io.PutExtra{MimeType: mime.TypeByExtension(filepath.Ext(rel)), Bucket: bucket}

		// try to upload the file ... sometimes this fails due to QiniuCloudStorage
		// issues. If so, we'll re-try
		if err := q6io.PutFile(nil, ret, uptoken, key, fn, extra); err != nil {
			time.Sleep(100 * time.Millisecond) // sleep so that we don't immediately retry
			return q6io.PutFile(nil, ret, uptoken, key, fn, extra)
		}

		// file upload was a success, return nil
		return nil
	}

	return filepath.Walk(s.Dest, walker)
}
Example #10
0
func FormPut(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 || len(params) == 5 {
		bucket := params[0]
		key := params[1]
		localFile := params[2]
		mimeType := ""
		upHost := "http://upload.qiniu.com"
		if len(params) == 4 {
			param := params[3]
			if strings.HasPrefix(param, "http") {
				upHost = param
			} else {
				mimeType = param
			}
		}
		if len(params) == 5 {
			mimeType = params[3]
			upHost = params[4]
		}
		accountS.Get()
		mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)}
		policy := rs.PutPolicy{}
		policy.Scope = bucket
		putExtra := fio.PutExtra{}
		if mimeType != "" {
			putExtra.MimeType = mimeType
		}
		conf.UP_HOST = upHost
		uptoken := policy.Token(&mac)
		putRet := fio.PutRet{}
		startTime := time.Now()
		fStat, statErr := os.Stat(localFile)
		if statErr != nil {
			log.Error("Local file error", statErr)
			return
		}
		fsize := fStat.Size()
		err := fio.PutFile(nil, &putRet, uptoken, key, localFile, &putExtra)
		if err != nil {
			log.Error("Put file error", err)
		} else {
			fmt.Println("Put file", localFile, "=>", bucket, ":", putRet.Key, "(", putRet.Hash, ")", "success!")
		}
		lastNano := time.Now().UnixNano() - startTime.UnixNano()
		lastTime := fmt.Sprintf("%.2f", float32(lastNano)/1e9)
		avgSpeed := fmt.Sprintf("%.1f", float32(fsize)*1e6/float32(lastNano))
		fmt.Println("Last time:", lastTime, "s, Average Speed:", avgSpeed, "KB/s")
	} else {
		CmdHelp(cmd)
	}
}
Example #11
0
func UploadFile(localFile string, destName string) (addr string, err error) {
	policy := new(rs.PutPolicy)
	policy.Scope = Bulket
	uptoken := policy.Token(nil)

	var ret io.PutRet
	var extra = new(io.PutExtra)
	err = io.PutFile(nil, &ret, uptoken, destName, localFile, extra)
	if err != nil {
		return
	}
	addr = "http://" + Bulket + ".qiniudn.com/" + destName
	return
}
Example #12
0
//上传文件,检查返回的hash和需要的hash是否一致
func UploadFileWithHash(ctx *Context, localPath string, remotePath string, expectHash string) (err error) {
	var ret qiniuIo.PutRet
	var extra = &qiniuIo.PutExtra{
		CheckCrc: 1,
	}
	putPolicy := rs.PutPolicy{
		Scope: ctx.bucket + ":" + remotePath,
	}
	uptoken := putPolicy.Token(nil)
	err = qiniuIo.PutFile(nil, &ret, uptoken, remotePath, localPath, extra)
	//fmt.Println(localPath,remotePath,err)
	if err != nil {
		return
	}
	if ret.Hash != expectHash {
		return fmt.Errorf("[UploadFileWithHash][remotePath:%s] ret.Hash:[%s]!=expectHash[%s] ", remotePath, ret.Hash, expectHash)
	}

	return
}
Example #13
0
func qiniucloudsave(file string) (url string, err error) {

	var key string
	//get the filename from the file , eg,get "1.txt" from /home/liugenping/1.txt
	for _, key = range strings.Split(file, "/") {

	}

	url = "http://" + g_qiniuEndpoint + "/" + key

	putPolicy := rs.PutPolicy{Scope: g_qiniuBucket}
	uptoken := putPolicy.Token(nil)

	var ret io.PutRet
	var extra = &io.PutExtra{}
	err = io.PutFile(nil, &ret, uptoken, key, file, extra)
	if err != nil {
		return "", err
	} else {
		return url, nil
	}

}
Example #14
0
func QiniuUpload(threadCount int, uploadConfigFile string) {
	fp, err := os.Open(uploadConfigFile)
	if err != nil {
		log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	defer fp.Close()
	configData, err := ioutil.ReadAll(fp)
	if err != nil {
		log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	var uploadConfig UploadConfig
	err = json.Unmarshal(configData, &uploadConfig)
	if err != nil {
		log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err))
		return
	}
	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config error for parameter `SrcDir`,", err)
		return
	}
	dirCache := DirCache{}
	currentUser, err := user.Current()
	if err != nil {
		log.Error("Failed to get current user", err)
		return
	}
	pathSep := string(os.PathSeparator)
	jobId := base64.URLEncoding.EncodeToString([]byte(uploadConfig.SrcDir + ":" + uploadConfig.Bucket))
	storePath := fmt.Sprintf("%s%s.qshell%squpload%s%s", currentUser.HomeDir, pathSep, pathSep, pathSep, jobId)
	err = os.MkdirAll(storePath, 0775)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err))
		return
	}
	cacheFileName := fmt.Sprintf("%s%s%s.cache", storePath, pathSep, jobId)
	leveldbFileName := fmt.Sprintf("%s%s%s.ldb", storePath, pathSep, jobId)
	totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err))
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheFileName)
	if err != nil {
		log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err))
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)
	currentFileCount := 0
	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//use host if not empty
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}
	//check thread count
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) > 1 {
			cacheFname := items[0]
			cacheFlmd, _ := strconv.Atoi(items[2])
			uploadFileKey := cacheFname
			if uploadConfig.IgnoreDir {
				if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
					uploadFileKey = uploadFileKey[i+1:]
				}
			}
			if uploadConfig.KeyPrefix != "" {
				uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
			}
			//convert \ to / under windows
			if runtime.GOOS == "windows" {
				uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
			}
			cacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep)
			fstat, err := os.Stat(cacheFilePath)
			if err != nil {
				log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", cacheFilePath, err))
				return
			}
			fsize := fstat.Size()

			//check leveldb
			currentFileCount += 1
			ldbKey := fmt.Sprintf("%s => %s", cacheFilePath, uploadFileKey)
			log.Debug(fmt.Sprintf("Checking %s ...", ldbKey))
			//check last modified
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.Atoi(string(ldbFlmd))
			//not exist, return ErrNotFound
			if err == nil && cacheFlmd == flmd {
				continue
			}

			fmt.Print("\033[2K\r")
			fmt.Printf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount))
			os.Stdout.Sync()
			rsClient := rs.New(&mac)
			//worker
			upCounter += 1
			if upCounter%threadThreshold == 0 {
				upWorkGroup.Wait()
			}
			upWorkGroup.Add(1)
			go func() {
				defer upWorkGroup.Done()
				//check exists
				if uploadConfig.CheckExists {
					rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
					if checkErr != nil {
						log.Error(fmt.Sprintf("Stat `%s' error due to `%s'", uploadFileKey, checkErr))
						return
					} else if rsEntry.Fsize == fsize {
						log.Debug("File already exists in bucket, ignore this upload")
						return
					}
				}
				//upload
				policy := rs.PutPolicy{}
				policy.Scope = uploadConfig.Bucket
				if uploadConfig.Overwrite {
					policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey
					policy.InsertOnly = 0
				}
				policy.Expires = 24 * 3600
				uptoken := policy.Token(&mac)
				if fsize > PUT_THRESHOLD {
					putRet := rio.PutRet{}
					err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				} else {
					putRet := fio.PutRet{}
					err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				}
			}()
		} else {
			log.Error(fmt.Sprintf("Error cache line `%s'", line))
		}
	}
	upWorkGroup.Wait()
	fmt.Println()
	fmt.Println("Upload done!")
}