Пример #1
0
func upload() {
	policy := up.AuthPolicy{
		Scope:               "needkane",
		InsertOnly:          0, //如果设置为非0值, 则无论scope设置为什么形式, 仅能以新增模式上传文件: 1,
		Deadline:            time.Now().Unix() + 3600,
		CallbackUrl:         "125.64.9.132:8090/callback",
		CallbackBody:        "callbackBody",
		PersistentNotifyUrl: "125.64.9.132:8090/callback",
		//CallbackBody:callbackBody,
		//ReturnUrl: "http://101.71.89.171:6666",
		//DetectMime: detectMime,
		PersistentOps:      "vframe/jpg/offset/2",
		PersistentPipeline: "kane",
	}
	uptoken := up.MakeAuthTokenString(ACCESS_KEY, SECRET_KEY, &policy)
	var ret qio.PutRet
	var extra = &qio.PutExtra{
	//Params: params,
	//MimeType: mieType,
	//Crc32: crc32,
	//CheckCrc: CheckCrc,
	}
	err := qio.PutFile(nil, &ret, uptoken, "key123", "/home/qboxtest/Desktop/123.mp4", extra)

	if err != nil {
		//上传产生错误
		log.Print("io.PutFile failed:", err)
		return
	}

	//上传成功,处理返回值
	log.Print(ret.Hash, ret.Key)
}
Пример #2
0
func upload3() {
	ACCESS_KEY = ""
	SECRET_KEY = ""
	UP_HOST = "http://127.0.0.1:11200"
	file, err := os.Open("/home/qboxtest/Desktop/123.mp4")
	if err != nil {
		log.Println("os.Open failed")
	}
	defer file.Close()
	policy := up.AuthPolicy{
		Scope:               "need10",
		InsertOnly:          0, //如果设置为非0值, 则无论scope设置为什么形式, 仅能以新增模式上传文件: 1,
		Deadline:            time.Now().Unix() + 3600,
		CallbackUrl:         "125.64.9.132:8090/callback",
		CallbackBody:        "callbackBody",
		PersistentNotifyUrl: "125.64.9.132:8090/callback",
		//CallbackBody:callbackBody,
		//DetectMime: detectMime,
		PersistentOps:      "vframe/jpg/offset/2|saveas/bmVlZDEwOjEzMw==",
		PersistentPipeline: "need",
	}
	uptoken := up.MakeAuthTokenString(ACCESS_KEY, SECRET_KEY, &policy)

	url := UP_HOST
	//bucket:filename
	entry := "needbc:132"
	//encodeEntryUri := base64.URLEncoding.EncodeToString([]byte(entry))
	//action := "/rs-put/" + encodeEntryUri
	extraParams := map[string]string{
		"token": uptoken,
		//"action": action,
		"key": "13v3",
	}
	filename := entry
	idx := strings.Index(entry, ":")
	if idx != -1 {
		filename = entry[idx+1:]
	}
	req, err := newUploadRequest(url, "file", filename, extraParams, file)
	if err != nil {
		log.Println("---------err:", err)
	}
	hc := http.Client{}
	resp, err := hc.Do(req)
	log.Println("--------create resp")
	data, _ := ioutil.ReadAll(resp.Body)
	log.Println(resp.StatusCode, "-----------", err, "---------", string(data))
}
Пример #3
0
// WriteStream stores the contents of the provided io.ReadCloser at a
// location designated by the given path.
// May be used to resume writing a stream by providing a nonzero offset.
// The offset must be no larger than the CurrentSize for this path.
func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {

	uptoken := qiniuup.MakeAuthTokenString(d.client.AccessKey, d.client.SecretKey, &qiniuup.AuthPolicy{
		Scope:    d.bucket.Name + ":" + d.getKey(path),
		Deadline: 3600 + uint32(time.Now().Unix()),
		Accesses: []string{d.getKey(path)},
	})

	writeWholeFile := false

	pathNotFoundErr := storagedriver.PathNotFoundError{Path: path}

	stat, err := d.Stat(ctx, path)
	if err != nil {
		if err.Error() == pathNotFoundErr.Error() {
			writeWholeFile = true
		} else {
			return 0, err
		}
	}

	path = d.getKey(path)

	//write reader to local temp file
	tmpF, err := ioutil.TempFile("/tmp", "qiniu_driver")
	if err != nil {
		return 0, err
	}

	defer os.Remove(tmpF.Name())
	defer tmpF.Close()

	written, err := io.Copy(tmpF, reader)
	if err != nil {
		return 0, err
	}
	tmpF.Sync()
	_, err = tmpF.Seek(0, os.SEEK_SET)
	if err != nil {
		return 0, err
	}

	//------------------------

	if writeWholeFile == false {
		parts := make([]qiniurs.Part, 0)

		if offset == 0 {
			part_Reader := qiniurs.Part{
				FileName: "",
				R:        tmpF,
			}
			parts = append(parts, part_Reader)

			if written < stat.Size() {
				part_OriginFile2 := qiniurs.Part{
					Key:  path,
					From: written,
					To:   -1,
				}
				parts = append(parts, part_OriginFile2)
			}

		} else if offset == stat.Size() { //因为parts_api有闭区间写错了,故这里先特殊判断offset == stat.Size()
			part_OriginFile1 := qiniurs.Part{
				Key:  path,
				From: 0,
				To:   -1,
			}
			parts = append(parts, part_OriginFile1)

			part_Reader := qiniurs.Part{
				FileName: "",
				R:        tmpF,
			}
			parts = append(parts, part_Reader)
		} else if offset < stat.Size() {
			part_OriginFile1 := qiniurs.Part{
				Key:  path,
				From: 0,
				To:   offset,
			}
			parts = append(parts, part_OriginFile1)

			appendSize := written + offset
			part_Reader := qiniurs.Part{
				FileName: "",
				R:        tmpF,
			}
			parts = append(parts, part_Reader)

			if appendSize < stat.Size() {
				part_OriginFile2 := qiniurs.Part{
					Key:  path,
					From: appendSize,
					To:   -1,
				}
				parts = append(parts, part_OriginFile2)
			}
		} else if offset > stat.Size() {
			part_OriginFile1 := qiniurs.Part{
				Key:  path,
				From: 0,
				To:   -1,
			}
			parts = append(parts, part_OriginFile1)

			zeroBytes := make([]byte, offset-stat.Size())
			part_ZeroPart := qiniurs.Part{
				R: bytes.NewReader(zeroBytes),
			}
			parts = append(parts, part_ZeroPart)

			part_Reader := qiniurs.Part{
				R: tmpF,
			}
			parts = append(parts, part_Reader)
		}
		err = qiniurs.PutParts(nil, nil, uptoken, path, true, parts, nil)
		if err != nil {
			return 0, err
		}
	} else {
		err := d.bucket.PutFile(ctx, nil, path, tmpF.Name(), nil)
		if err != nil {
			return 0, err
		}
	}

	return written, nil
}