Example #1
0
func testXVar(t *testing.T, token string, extra *PutExtra) {

	type Ret struct {
		PutRet
		X1 string `json:"x:1"`
	}
	var ret Ret
	f, err := os.Open(testFile)
	if err != nil {
		t.Fatal(err)
	}
	defer f.Close()

	fi, err := f.Stat()
	if err != nil {
		t.Fatal(err)
	}

	err = Put(nil, &ret, token, testKey, f, fi.Size(), extra)
	if err != nil {
		t.Fatal(err)
	}
	defer rs.New(nil).Delete(nil, bucket, ret.Key)

	if ret.X1 != "1" {
		t.Fatal("test xVar failed:", ret.X1)
	}
}
Example #2
0
func GetBuckets(mac *digest.Mac) (buckets []string, err error) {
	buckets = make([]string, 0)
	client := rs.New(mac)
	bucketsUri := fmt.Sprintf("%s/buckets", conf.RS_HOST)
	err = client.Conn.Call(nil, &buckets, bucketsUri)
	return
}
Example #3
0
File: rs.go Project: micooz/qshell
func Copy(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 {
		srcBucket := params[0]
		srcKey := params[1]
		destBucket := params[2]
		destKey := srcKey
		if len(params) == 4 {
			destKey = params[3]
		}
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		err := client.Copy(nil, srcBucket, srcKey, destBucket, destKey)
		if err != nil {
			log.Error("Copy error,", err)
		} else {
			fmt.Println("Done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Example #4
0
File: rs.go Project: micooz/qshell
func BatchRename(cmd string, params ...string) {
	//confirm
	rcode := CreateRandString(6)
	if rcode == "" {
		log.Error("Create confirm code failed")
		return
	}

	rcode2 := ""
	fmt.Print(fmt.Sprintf("\033[31m<DANGER>\033[0m Input \033[32m%s\033[0m to confirm operation: ", rcode))
	fmt.Scanln(&rcode2)

	if rcode != rcode2 {
		fmt.Println("Task quit!")
		return
	}

	if len(params) == 2 {
		bucket := params[0]
		oldNewKeyMapFile := params[1]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		fp, err := os.Open(oldNewKeyMapFile)
		if err != nil {
			log.Error("Open old new key map file error")
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]qshell.RenameEntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) == 2 {
				oldKey := items[0]
				newKey := items[1]
				if oldKey != "" && newKey != "" {
					entry := qshell.RenameEntryPath{bucket, oldKey, newKey}
					entries = append(entries, entry)
				}
			}
			if len(entries) == BATCH_ALLOW_MAX {
				batchRename(client, entries)
				entries = make([]qshell.RenameEntryPath, 0)
			}
		}
		if len(entries) > 0 {
			batchRename(client, entries)
		}
		fmt.Println("All Renamed!")
	} else {
		CmdHelp(cmd)
	}
}
Example #5
0
func GetDomainsOfBucket(mac *digest.Mac, bucket string) (domains []string, err error) {
	domains = make([]string, 0)
	client := rs.New(mac)
	getDomainsUrl := "http://api.qiniu.com/v6/domain/list"
	postData := map[string][]string{
		"tbl": []string{bucket},
	}
	err = client.Conn.CallWithForm(nil, &domains, getDomainsUrl, postData)
	return
}
Example #6
0
func Fetch(mac *digest.Mac, remoteResUrl, bucket, key string) (fetchResult FetchResult, err error) {
	client := rs.New(mac)
	entry := bucket
	if key != "" {
		entry += ":" + key
	}
	fetchUri := fmt.Sprintf("/fetch/%s/to/%s",
		base64.URLEncoding.EncodeToString([]byte(remoteResUrl)),
		base64.URLEncoding.EncodeToString([]byte(entry)))
	err = client.Conn.Call(nil, &fetchResult, conf.IO_HOST+fetchUri)
	return
}
Example #7
0
func init() {

	ACCESS_KEY = os.Getenv("QINIU_ACCESS_KEY")
	SECRET_KEY = os.Getenv("QINIU_SECRET_KEY")
	if ACCESS_KEY == "" || SECRET_KEY == "" {
		panic("require ACCESS_KEY & SECRET_KEY")
	}
	bucket = os.Getenv("QINIU_TEST_BUCKET")
	if bucket == "" {
		panic("require QINIU_TEST_BUCKET")
	}
	rs.New(nil).Delete(nil, bucket, testKey)
}
Example #8
0
func testPutFile(t *testing.T, token string, extra *PutExtra) {

	var ret PutRet
	f, err := os.Open(testFile)
	if err != nil {
		t.Fatal(err)
	}
	defer f.Close()

	err = PutFile(nil, &ret, token, testKey, testFile, extra)
	if err != nil {
		t.Fatal(err)
	}
	defer rs.New(nil).Delete(nil, bucket, ret.Key)
}
Example #9
0
func BatchDelete(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		keyListFile := params[1]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		fp, err := os.Open(keyListFile)
		if err != nil {
			log.Error("Open key list file error", err)
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]rs.EntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) > 0 {
				key := items[0]
				if key != "" {
					entry := rs.EntryPath{
						bucket, key,
					}
					entries = append(entries, entry)
				}
			}
			//check 1000 limit
			if len(entries) == BATCH_ALLOW_MAX {
				batchDelete(client, entries)
				//reset slice
				entries = make([]rs.EntryPath, 0)
			}
		}
		//delete the last batch
		if len(entries) > 0 {
			batchDelete(client, entries)
		}
		fmt.Println("All deleted!")
	} else {
		CmdHelp(cmd)
	}
}
Example #10
0
func rsDemo(bucket, key, bucketSrc, keySrc, bucketDest, keyDest string) {

	// @gist rsPre
	//此操作前 请确保 accesskey和secretkey 已被正确赋值
	var rsCli = rs.New(nil)
	var err error
	// @endgist

	// @gist rsStat
	var ret rs.Entry
	ret, err = rsCli.Stat(nil, bucket, key)
	if err != nil {
		// 产生错误
		log.Println("rs.Stat failed:", err)
		return
	}
	// 处理返回值
	log.Println(ret)
	// @endgist

	// @gist rsCopy
	err = rsCli.Copy(nil, bucketSrc, keySrc, bucketDest, keyDest)
	if err != nil {
		// 产生错误
		log.Println("rs.Copy failed:", err)
		return
	}
	// @endgist

	// @gist rsMove
	err = rsCli.Move(nil, bucketSrc, keySrc, bucketDest, keyDest)
	if err != nil {
		//产生错误
		log.Println("rs.Copy failed:", err)
		return
	}
	// @endgist

	// @gist rsDelete
	err = rsCli.Delete(nil, bucket, key)
	if err != nil {
		// 产生错误
		log.Println("rs.Copy failed:", err)
		return
	}
	// @endgist
}
Example #11
0
func BatchCopy(cmd string, params ...string) {
	if len(params) == 3 {
		srcBucket := params[0]
		destBucket := params[1]
		srcDestKeyMapFile := params[2]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		fp, err := os.Open(srcDestKeyMapFile)
		if err != nil {
			log.Error("Open src dest key map file error")
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]qshell.CopyEntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) == 1 || len(items) == 2 {
				srcKey := items[0]
				destKey := srcKey
				if len(items) == 2 {
					destKey = items[1]
				}
				if srcKey != "" && destKey != "" {
					entry := qshell.CopyEntryPath{srcBucket, destBucket, srcKey, destKey}
					entries = append(entries, entry)
				}
			}
			if len(entries) == BATCH_ALLOW_MAX {
				batchCopy(client, entries)
				entries = make([]qshell.CopyEntryPath, 0)
			}
		}
		if len(entries) > 0 {
			batchCopy(client, entries)
		}
		fmt.Println("All Copyed!")
	} else {
		CmdHelp(cmd)
	}
}
Example #12
0
func BatchRename(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		oldNewKeyMapFile := params[1]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		fp, err := os.Open(oldNewKeyMapFile)
		if err != nil {
			log.Error("Open old new key map file error")
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]qshell.RenameEntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) == 2 {
				oldKey := items[0]
				newKey := items[1]
				if oldKey != "" && newKey != "" {
					entry := qshell.RenameEntryPath{bucket, oldKey, newKey}
					entries = append(entries, entry)
				}
			}
			if len(entries) == BATCH_ALLOW_MAX {
				batchRename(client, entries)
				entries = make([]qshell.RenameEntryPath, 0)
			}
		}
		if len(entries) > 0 {
			batchRename(client, entries)
		}
		fmt.Println("All Renamed!")
	} else {
		CmdHelp(cmd)
	}
}
Example #13
0
func DeleteHandlers() []martini.Handler {
	var bind = binding.Bind(DeleteReqData{})
	var deleteHandler = func(data DeleteReqData, w http.ResponseWriter) {
		index := strings.LastIndex(data.Url, "/")
		runes := []rune(data.Url)
		key := string(runes[index+1:])
		bucket, err := bucketdb.FindByName(data.Bucket)
		rsCli := rs.New(&digest.Mac{bucket.Ak, []byte(bucket.Sk)})
		err = rsCli.Delete(nil, bucket.Name, key)
		if err != nil {
			//产生错误
			glog.Infoln("删除图片错误:", err)
			io.WriteString(w, data.Callback+"({error:1})")
			return
		}
		io.WriteString(w, data.Callback+"({error:0})")
	}
	return []martini.Handler{bind, deleteHandler}
}
Example #14
0
File: rs.go Project: micooz/qshell
func M3u8Delete(cmd string, params ...string) {
	if len(params) == 2 || len(params) == 3 {
		bucket := params[0]
		m3u8Key := params[1]
		isPrivate := false
		if len(params) == 3 {
			isPrivate, _ = strconv.ParseBool(params[2])
		}
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		m3u8FileList, err := qshell.M3u8FileList(&mac, bucket, m3u8Key, isPrivate)
		if err != nil {
			log.Error(err)
			return
		}
		client := rs.New(&mac)
		entryCnt := len(m3u8FileList)
		if entryCnt == 0 {
			log.Error("no m3u8 slices found")
			return
		}
		if entryCnt <= BATCH_ALLOW_MAX {
			batchDelete(client, m3u8FileList)
		} else {
			batchCnt := entryCnt / BATCH_ALLOW_MAX
			for i := 0; i < batchCnt; i++ {
				end := (i + 1) * BATCH_ALLOW_MAX
				if end > entryCnt {
					end = entryCnt
				}
				entriesToDelete := m3u8FileList[i*BATCH_ALLOW_MAX : end]
				batchDelete(client, entriesToDelete)
			}
		}
		fmt.Println("All deleted!")
	} else {
		CmdHelp(cmd)
	}
}
Example #15
0
File: rs.go Project: micooz/qshell
func Stat(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		key := params[1]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		entry, err := client.Stat(nil, bucket, key)
		if err != nil {
			log.Error("Stat error,", err)
		} else {
			printStat(bucket, key, entry)
		}
	} else {
		CmdHelp(cmd)
	}
}
Example #16
0
func TestAll(t *testing.T) {

	//先上传文件到空间做初始化准备
	for i := 0; i < 10; i++ {
		key := "rsf_test_put_" + strconv.Itoa(i)
		err := upFile("rsf_api.go", bucketName, key)
		if err != nil {
			t.Fatal(err)
		}
		keys = append(keys, key)
	}
	defer func() {
		for _, k := range keys {
			rs.New(nil).Delete(nil, bucketName, k)
		}
	}()

	testList(t)
	testEof(t)
}
Example #17
0
func DeleteFile(fileName string) bool {
	entryPathes := []qrs.EntryPath{
		qrs.EntryPath{
			Bucket: BulketName,
			Key:    fmt.Sprintf("%s.html", fileName),
		},
		qrs.EntryPath{
			Bucket: BulketName,
			Key:    fmt.Sprintf("%s_code.html", fileName),
		},
	}
	rs := qrs.New(nil)
	_, err := rs.BatchDelete(nil, entryPathes)
	if err != nil {
		//产生错误
		revel.ERROR.Printf("rs.BatchMove failed:%v", err)
		return false
	}
	return true
}
Example #18
0
File: rs.go Project: micooz/qshell
func Delete(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		key := params[1]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		err := client.Delete(nil, bucket, key)
		if err != nil {
			log.Error("Delete error,", err)
		} else {
			fmt.Println("Done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Example #19
0
func testPutWithoutKey(t *testing.T, token string, extra *PutExtra) {

	var ret PutRet
	f, err := os.Open(testFile)
	if err != nil {
		t.Fatal(err)
	}
	defer f.Close()

	fi, err := f.Stat()
	if err != nil {
		t.Fatal(err)
	}

	err = PutWithoutKey(nil, &ret, token, f, fi.Size(), extra)
	if err != nil {
		t.Fatal(err)
	}
	defer rs.New(nil).Delete(nil, bucket, ret.Key)
}
Example #20
0
File: rs.go Project: micooz/qshell
func Chgm(cmd string, params ...string) {
	if len(params) == 3 {
		bucket := params[0]
		key := params[1]
		newMimeType := params[2]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		err := client.ChangeMime(nil, bucket, key, newMimeType)
		if err != nil {
			log.Error("Change mimeType error,", err)
		} else {
			fmt.Println("Done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Example #21
0
func TestAll(t *testing.T) {

	testPut(t, key1)
	k1 := testPutWithoutKey(t)
	testPutFile(t, localFile, key2)
	k2 := testPutFileWithoutKey(t, localFile)

	testPut(t, key3)
	k3 := testPutWithoutKey2(t)

	//clear all keys
	rs.New(nil).Delete(nil, bucket, key1)
	rs.New(nil).Delete(nil, bucket, key2)
	rs.New(nil).Delete(nil, bucket, key3)
	rs.New(nil).Delete(nil, bucket, k1)
	rs.New(nil).Delete(nil, bucket, k2)
	rs.New(nil).Delete(nil, bucket, k3)
}
Example #22
0
func (this *Mkziper) Do(req UfopRequest) (result interface{}, contentType string, err error) {
	//set mkzip check criteria
	if this.maxFileCount <= 0 {
		this.maxFileCount = MKZIP_MAX_FILE_COUNT
	}
	if this.maxFileLength <= 0 {
		this.maxFileLength = MKZIP_MAX_FILE_LENGTH
	}
	//parse command
	bucket, encoding, zipFiles, pErr := this.parse(req.Cmd)
	if pErr != nil {
		err = pErr
		return
	}

	//check file count
	if len(zipFiles) > this.maxFileCount {
		err = errors.New("zip file count exceeds the limit")
		return
	}
	if len(zipFiles) > MKZIP_MAX_FILE_LIMIT {
		err = errors.New("only support items less than 1000")
		return
	}
	//check whether file in bucket and exceeds the limit
	statItems := make([]rs.EntryPath, 0)
	for _, zipFile := range zipFiles {
		entryPath := rs.EntryPath{
			bucket, zipFile.key,
		}
		statItems = append(statItems, entryPath)
	}
	qclient := rs.New(this.mac)

	statRet, statErr := qclient.BatchStat(nil, statItems)
	if statErr != nil {
		err = errors.New(fmt.Sprintf("batch stat error, %s", statErr))
		return
	}
	for _, ret := range statRet {
		if ret.Error != "" {
			err = errors.New(fmt.Sprintf("stat resource in bucket error, %s", ret.Error))
			return
		}
		if ret.Data.Fsize > this.maxFileLength {
			err = errors.New(fmt.Sprintf("stat resource length exceeds the limit, %d", ret.Data.Fsize))
			return
		}
	}
	//retrieve resource and create zip file
	var tErr error
	zipBuffer := new(bytes.Buffer)
	zipWriter := zip.NewWriter(zipBuffer)

	for _, zipFile := range zipFiles {
		//convert encoding
		fname := zipFile.alias
		if encoding == "gbk" {
			fname, tErr = utf82GBK(fname)
			if tErr != nil {
				err = errors.New(fmt.Sprintf("unsupported encoding gbk, %s", tErr))
				return
			}
		}

		//create each zip file writer
		fw, fErr := zipWriter.Create(fname)
		if fErr != nil {
			err = errors.New(fmt.Sprintf("create zip file error, %s", fErr))
			return
		}
		//read data and write
		resResp, respErr := http.Get(zipFile.url)
		if respErr != nil || resResp.StatusCode != 200 {
			if respErr != nil {
				err = errors.New("get zip file resource error, " + respErr.Error())
			} else {
				err = errors.New(fmt.Sprintf("get zip file resource error, %s", resResp.Status))
				if resResp.Body != nil {
					resResp.Body.Close()
				}
			}
			return
		}
		respData, readErr := ioutil.ReadAll(resResp.Body)
		if readErr != nil {
			err = errors.New(fmt.Sprintf("read zip file resource content error, %s", readErr))
			return
		}
		resResp.Body.Close()

		_, writeErr := fw.Write(respData)
		if writeErr != nil {
			err = errors.New(fmt.Sprintf("write zip file content error, %s", writeErr))
			return
		}
	}
	//close zip file
	if cErr := zipWriter.Close(); cErr != nil {
		err = errors.New(fmt.Sprintf("close zip file error, %s", cErr))
		return
	}
	result = zipBuffer.Bytes()
	contentType = "application/zip"
	return
}
Example #23
0
func Fetch(job, filePath, bucket, accessKey, secretKey string, worker int, zone string) {
	//open file
	fh, openErr := os.Open(filePath)
	if openErr != nil {
		fmt.Println("Open resource file error,", openErr)
		return
	}
	defer fh.Close()

	//open leveldb
	proFile := fmt.Sprintf(".%s.job", job)
	ldb, lerr := leveldb.OpenFile(proFile, nil)

	if lerr != nil {
		fmt.Println("Open fetch progress file error,", lerr)
		return
	}
	defer ldb.Close()
	//fetch prepare
	switch zone {
	case "bc":
		conf.IO_HOST = "http://iovip-z1.qbox.me"
	case "aws":
		conf.IO_HOST = "http://iovip.gdipper.com"
	default:
		conf.IO_HOST = "http://iovip.qbox.me"
	}

	mac := digest.Mac{
		accessKey, []byte(secretKey),
	}
	client := rs.New(&mac)
	wg := sync.WaitGroup{}
	var seq int = 0
	//scan each line
	bReader := bufio.NewScanner(fh)
	bReader.Split(bufio.ScanLines)
	for bReader.Scan() {
		line := strings.TrimSpace(bReader.Text())
		if line == "" {
			continue
		}

		items := strings.Split(line, "\t")
		if !(len(items) == 1 || len(items) == 2) {
			fmt.Println("Invalid resource line,", line)
			continue
		}

		resUrl := items[0]
		resKey := ""

		if len(items) == 1 {
			resUri, pErr := url.Parse(resUrl)
			if pErr != nil {
				fmt.Println("Invalid resource url", resUrl)
				continue
			}
			resKey = resUri.Path
			if strings.HasPrefix(resKey, "/") {
				resKey = resKey[1:]
			}
		} else if len(items) == 2 {
			resKey = items[1]
		}

		//check from leveldb whether it is done
		val, exists := ldb.Get([]byte(resUrl), nil)
		if exists == nil && string(val) == resKey {
			continue
		}

		//otherwise fetch it
		seq += 1
		if seq%worker == 0 {
			wg.Wait()
		}

		wg.Add(1)
		go func() {
			defer wg.Done()

			fErr := client.Fetch(nil, bucket, resKey, resUrl)
			if fErr == nil {
				ldb.Put([]byte(resUrl), []byte(resKey), nil)
			} else {
				fmt.Println("Fetch", resUrl, " error due to", fErr)
			}
		}()
	}
	wg.Wait()
}
Example #24
0
func batchDemo(bucket, key, bucket1, key1, bucket2, key2, bucket3, key3, bucket4, key4 string) {

	// @gist rsBatchPre
	// 此操作前 请确保 accesskey和secretkey 已被正确赋值
	var rsCli = rs.New(nil)
	var err error
	// @endgist

	// @gist rsEntryPathes
	entryPathes := []rs.EntryPath{
		rs.EntryPath{
			Bucket: bucket1,
			Key:    key1,
		},
		rs.EntryPath{
			Bucket: bucket2,
			Key:    key2,
		},
	}
	// @endgist

	// @gist rsPathPairs
	// 每个复制操作都含有源文件和目标文件
	entryPairs := []rs.EntryPathPair{
		rs.EntryPathPair{
			Src: rs.EntryPath{
				Bucket: bucket1,
				Key:    key1,
			},
			Dest: rs.EntryPath{
				Bucket: bucket2,
				Key:    key2,
			},
		}, rs.EntryPathPair{
			Src: rs.EntryPath{
				Bucket: bucket3,
				Key:    key3,
			},
			Dest: rs.EntryPath{
				Bucket: bucket4,
				Key:    key4,
			},
		},
	}
	// @endgist

	// @gist rsBatchStat
	var batchStatRets []rs.BatchStatItemRet
	batchStatRets, err = rsCli.BatchStat(nil, entryPathes) // []rs.BatchStatItemRet, error
	if err != nil {
		// 产生错误
		log.Println("rs.BatchStat failed:", err)
		return
	}
	// 处理返回值
	for _, item := range batchStatRets {
		log.Println(item)
	}
	// @endgist

	// @gist rsBatchCopy
	var batchCopyRets []rs.BatchItemRet
	batchCopyRets, err = rsCli.BatchCopy(nil, entryPairs)
	if err != nil {
		// 产生错误
		log.Println("rs.BatchCopy failed:", err)
		return
	}
	for _, item := range batchCopyRets {
		// 遍历每个操作的返回结果
		log.Println(item.Code, item.Error)
	}
	// @endgist

	// @gist rsBatchMove
	var batchMoveRets []rs.BatchItemRet
	batchMoveRets, err = rsCli.BatchMove(nil, entryPairs)
	if err != nil {
		// 产生错误
		log.Println("rs.BatchMove failed:", err)
		return
	}
	for _, item := range batchMoveRets {
		// 遍历每个操作的返回结果
		log.Println(item.Code, item.Error)
	}
	// @endgist

	// @gist rsBatchDelete
	var batchDeleteRets []rs.BatchItemRet
	batchDeleteRets, err = rsCli.BatchDelete(nil, entryPathes)
	if err != nil {
		// 产生错误
		log.Println("rs.BatchDelete failed:", err)
		return
	}
	for _, item := range batchDeleteRets {
		// 遍历每个操作的返回结果
		log.Println(item.Code, item.Error)
	}
	// @endgist

	// @gist rsBatchAdv
	ops := []string{
		rs.URIStat(bucket, key1),
		rs.URICopy(bucket, key1, bucket, key2), // 复制key1到key2
		rs.URIDelete(bucket, key1),             // 删除key1
		rs.URIMove(bucket, key2, bucket, key1), //将key2移动到key1
	}

	rets := new([]rs.BatchItemRet)
	err = rsCli.Batch(nil, rets, ops)
	if err != nil {
		// 产生错误
		log.Println("rs.Batch failed:", err)
		return
	}
	for _, ret := range *rets {
		log.Println(ret.Code, ret.Error)
	}
	// @endgist
}
Example #25
0
func QiniuUpload(threadCount int, uploadConfigFile string) {
	fp, err := os.Open(uploadConfigFile)
	if err != nil {
		log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	defer fp.Close()
	configData, err := ioutil.ReadAll(fp)
	if err != nil {
		log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	var uploadConfig UploadConfig
	err = json.Unmarshal(configData, &uploadConfig)
	if err != nil {
		log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err))
		return
	}
	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config error for parameter `SrcDir`,", err)
		return
	}
	dirCache := DirCache{}
	currentUser, err := user.Current()
	if err != nil {
		log.Error("Failed to get current user", err)
		return
	}

	pathSep := string(os.PathSeparator)
	//create job id
	md5Hasher := md5.New()
	md5Hasher.Write([]byte(strings.TrimSuffix(uploadConfig.SrcDir, pathSep) + ":" + uploadConfig.Bucket))
	jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil))

	//local storage path
	storePath := filepath.Join(currentUser.HomeDir, ".qshell", "qupload", jobId)
	err = os.MkdirAll(storePath, 0775)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err))
		return
	}

	//cache file
	cacheFileName := filepath.Join(storePath, jobId+".cache")
	//leveldb folder
	leveldbFileName := filepath.Join(storePath, jobId+".ldb")

	totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err))
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheFileName)
	if err != nil {
		log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err))
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)

	var currentFileCount int64 = 0
	var successFileCount int64 = 0
	var failureFileCount int64 = 0
	var skippedFileCount int64 = 0

	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//use host if not empty
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set resume upload settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}

	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) != 3 {
			log.Error(fmt.Sprintf("Invalid cache line `%s'", line))
			continue
		}

		localFname := items[0]
		currentFileCount += 1

		skip := false
		//check skip local file or folder
		if uploadConfig.SkipPrefixes != "" {
			//unpack skip prefix
			skipPrefixes := strings.Split(uploadConfig.SkipPrefixes, ",")
			for _, prefix := range skipPrefixes {
				if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFname))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {

				continue
			}
		}

		if uploadConfig.SkipSuffixes != "" {
			skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",")
			for _, suffix := range skipSuffixes {
				if strings.HasSuffix(localFname, strings.TrimSpace(suffix)) {
					log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s",
						strings.TrimSpace(suffix), localFname))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		//pack the upload file key
		localFlmd, _ := strconv.Atoi(items[2])
		uploadFileKey := localFname

		if uploadConfig.IgnoreDir {
			if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
				uploadFileKey = uploadFileKey[i+1:]
			}
		}
		if uploadConfig.KeyPrefix != "" {
			uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
		}
		//convert \ to / under windows
		if runtime.GOOS == "windows" {
			uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
		}

		localFilePath := filepath.Join(uploadConfig.SrcDir, localFname)
		fstat, err := os.Stat(localFilePath)
		if err != nil {
			log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", localFilePath, err))
			return
		}

		fsize := fstat.Size()
		ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey)

		log.Info(fmt.Sprintf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount,
			float32(currentFileCount)*100/float32(totalFileCount)))

		rsClient := rs.New(&mac)

		//check exists
		if uploadConfig.CheckExists {
			rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
			if checkErr == nil {
				//compare hash
				localEtag, cErr := GetEtag(localFilePath)
				if cErr != nil {
					atomic.AddInt64(&failureFileCount, 1)
					log.Error("Calc local file hash failed,", cErr)
					continue
				}
				if rsEntry.Hash == localEtag {
					atomic.AddInt64(&skippedFileCount, 1)
					log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey))
					continue
				}
			} else {
				if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
					//not logic error, should be network error
					atomic.AddInt64(&failureFileCount, 1)
					continue
				}
			}
		} else {
			//check leveldb
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.Atoi(string(ldbFlmd))
			//not exist, return ErrNotFound
			//check last modified

			if err == nil && localFlmd == flmd {
				log.Debug("Skip by local log for file", localFname)
				atomic.AddInt64(&skippedFileCount, 1)
				continue
			}
		}

		//worker
		upCounter += 1
		if upCounter%threadThreshold == 0 {
			upWorkGroup.Wait()
		}
		upWorkGroup.Add(1)

		//start to upload
		go func() {
			defer upWorkGroup.Done()

			policy := rs.PutPolicy{}
			policy.Scope = uploadConfig.Bucket
			if uploadConfig.Overwrite {
				policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey
				policy.InsertOnly = 0
			}
			policy.Expires = 24 * 3600
			uptoken := policy.Token(&mac)
			if fsize > PUT_THRESHOLD {
				putRet := rio.PutRet{}
				err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err))
					} else {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err))
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt)
					if perr != nil {
						log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
					}
				}
			} else {
				putRet := fio.PutRet{}
				err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err))
					} else {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err))
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
					}
				}
			}
		}()

	}
	upWorkGroup.Wait()

	log.Info("-------Upload Done-------")
	log.Info("Total:\t", currentFileCount)
	log.Info("Success:\t", successFileCount)
	log.Info("Failure:\t", failureFileCount)
	log.Info("Skipped:\t", skippedFileCount)
	log.Info("-------------------------")

}
Example #26
0
// DeleteArchive deletes a archive from QiNiu.
func DeleteArchive(key string) error {
	rsCli := rs.New(nil)
	return rsCli.Delete(nil, setting.BucketName, key)
}
Example #27
0
func (helper *QiniuHelper) Del(bucket string, key string) error {
	client := rs.New(nil)
	return client.Delete(nil, bucket, key)
}
Example #28
0
func (this *Mkzipper) Do(req ufop.UfopRequest) (result interface{}, resultType int, contentType string, err error) {
	//parse command
	bucket, encoding, zipFiles, pErr := this.parse(req.Cmd)
	if pErr != nil {
		err = pErr
		return
	}

	//check file count
	if len(zipFiles) > this.maxFileCount {
		err = errors.New("zip file count exceeds the limit")
		return
	}
	if len(zipFiles) > MKZIP_MAX_FILE_LIMIT {
		err = errors.New("only support items less than 1000")
		return
	}
	//check whether file in bucket and exceeds the limit
	statItems := make([]rs.EntryPath, 0)
	statUrls := make([]string, 0)
	for _, zipFile := range zipFiles {
		entryPath := rs.EntryPath{
			bucket, zipFile.key,
		}
		statItems = append(statItems, entryPath)
		statUrls = append(statUrls, zipFile.url)
	}
	qclient := rs.New(this.mac)

	statRet, statErr := qclient.BatchStat(nil, statItems)

	if statErr != nil {
		if _, ok := statErr.(*rpc.ErrorInfo); !ok {
			err = errors.New(fmt.Sprintf("batch stat error, %s", statErr.Error()))
			return
		}
	}

	for index := 0; index < len(statRet); index++ {
		ret := statRet[index]
		if ret.Code != 200 {
			if ret.Code == 612 {
				err = errors.New(fmt.Sprintf("batch stat '%s' error, no such file or directory", statUrls[index]))
			} else if ret.Code == 631 {
				err = errors.New(fmt.Sprintf("batch stat '%s' error, no such bucket", statUrls[index]))
			} else {
				err = errors.New(fmt.Sprintf("batch stat '%s' error, %d", statUrls[index], ret.Code))
			}
			return
		}
	}

	//retrieve resource and create zip file
	var tErr error
	zipBuffer := new(bytes.Buffer)
	zipWriter := zip.NewWriter(zipBuffer)

	for _, zipFile := range zipFiles {
		//convert encoding
		fname := zipFile.alias
		if encoding == "gbk" {
			fname, tErr = utils.Utf82Gbk(fname)
			if tErr != nil {
				err = errors.New(fmt.Sprintf("unsupported encoding gbk, %s", tErr))
				return
			}
		}

		//create each zip file writer
		fw, fErr := zipWriter.Create(fname)
		if fErr != nil {
			err = errors.New(fmt.Sprintf("create zip file error, %s", fErr))
			return
		}
		//read data and write
		resResp, respErr := http.Get(zipFile.url)
		if respErr != nil || resResp.StatusCode != 200 {
			if respErr != nil {
				err = errors.New("get zip file resource error, " + respErr.Error())
			} else {
				err = errors.New(fmt.Sprintf("get zip file resource error, %s", resResp.Status))
				if resResp.Body != nil {
					resResp.Body.Close()
				}
			}
			return
		}
		respData, readErr := ioutil.ReadAll(resResp.Body)
		if readErr != nil {
			err = errors.New(fmt.Sprintf("read zip file resource content error, %s", readErr))
			return
		}
		resResp.Body.Close()

		_, writeErr := fw.Write(respData)
		if writeErr != nil {
			err = errors.New(fmt.Sprintf("write zip file content error, %s", writeErr))
			return
		}
	}
	//close zip file
	if cErr := zipWriter.Close(); cErr != nil {
		err = errors.New(fmt.Sprintf("close zip file error, %s", cErr))
		return
	}

	//write result
	result = zipBuffer.Bytes()
	resultType = ufop.RESULT_TYPE_OCTECT_BYTES
	contentType = "application/zip"
	return
}
Example #29
0
func QiniuUpload(threadCount int, uploadConfigFile string) {
	fp, err := os.Open(uploadConfigFile)
	if err != nil {
		log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	defer fp.Close()
	configData, err := ioutil.ReadAll(fp)
	if err != nil {
		log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err))
		return
	}
	var uploadConfig UploadConfig
	err = json.Unmarshal(configData, &uploadConfig)
	if err != nil {
		log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err))
		return
	}
	if _, err := os.Stat(uploadConfig.SrcDir); err != nil {
		log.Error("Upload config error for parameter `SrcDir`,", err)
		return
	}
	dirCache := DirCache{}
	currentUser, err := user.Current()
	if err != nil {
		log.Error("Failed to get current user", err)
		return
	}
	pathSep := string(os.PathSeparator)
	//create job id
	md5Hasher := md5.New()
	md5Hasher.Write([]byte(uploadConfig.SrcDir + ":" + uploadConfig.Bucket))
	jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil))

	//local storage path
	storePath := fmt.Sprintf("%s%s.qshell%squpload%s%s", currentUser.HomeDir, pathSep, pathSep, pathSep, jobId)
	err = os.MkdirAll(storePath, 0775)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err))
		return
	}

	//cache file
	cacheFileName := fmt.Sprintf("%s%s%s.cache", storePath, pathSep, jobId)
	//leveldb folder
	leveldbFileName := fmt.Sprintf("%s%s%s.ldb", storePath, pathSep, jobId)

	totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err))
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheFileName)
	if err != nil {
		log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err))
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)
	currentFileCount := 0
	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//use host if not empty
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}
	//check thread count
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) > 1 {
			cacheFname := items[0]
			cacheFlmd, _ := strconv.Atoi(items[2])
			uploadFileKey := cacheFname
			if uploadConfig.IgnoreDir {
				if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
					uploadFileKey = uploadFileKey[i+1:]
				}
			}
			if uploadConfig.KeyPrefix != "" {
				uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
			}
			//convert \ to / under windows
			if runtime.GOOS == "windows" {
				uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
			}
			cacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep)
			fstat, err := os.Stat(cacheFilePath)
			if err != nil {
				log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", cacheFilePath, err))
				return
			}
			fsize := fstat.Size()

			//check leveldb
			currentFileCount += 1
			ldbKey := fmt.Sprintf("%s => %s", cacheFilePath, uploadFileKey)
			log.Debug(fmt.Sprintf("Checking %s ...", ldbKey))
			//check last modified
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.Atoi(string(ldbFlmd))
			//not exist, return ErrNotFound
			if err == nil && cacheFlmd == flmd {
				continue
			}

			fmt.Print("\033[2K\r")
			fmt.Printf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount))
			os.Stdout.Sync()
			rsClient := rs.New(&mac)
			//worker
			upCounter += 1
			if upCounter%threadThreshold == 0 {
				upWorkGroup.Wait()
			}
			upWorkGroup.Add(1)
			go func() {
				defer upWorkGroup.Done()
				//check exists
				if uploadConfig.CheckExists {
					rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
					if checkErr == nil {
						//compare hash
						localEtag, cErr := GetEtag(cacheFilePath)
						if cErr != nil {
							log.Error("Calc local file hash failed,", cErr)
							return
						}
						if rsEntry.Hash == localEtag {
							log.Info("File already exists in bucket, ignore this upload")
							return
						}
					} else {
						if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
							//not logic error, should be network error
							return
						}
					}
				}
				//upload
				policy := rs.PutPolicy{}
				policy.Scope = uploadConfig.Bucket
				if uploadConfig.Overwrite {
					policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey
					policy.InsertOnly = 0
				}
				policy.Expires = 24 * 3600
				uptoken := policy.Token(&mac)
				if fsize > PUT_THRESHOLD {
					putRet := rio.PutRet{}
					err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				} else {
					putRet := fio.PutRet{}
					err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)
					if err != nil {
						log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err))
					} else {
						perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt)
						if perr != nil {
							log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr))
						}
					}
				}
			}()
		} else {
			log.Error(fmt.Sprintf("Error cache line `%s'", line))
		}
	}
	upWorkGroup.Wait()
	fmt.Println()
	fmt.Println("Upload done!")
}
Example #30
0
File: rs.go Project: micooz/qshell
func BatchDelete(cmd string, params ...string) {
	//confirm
	rcode := CreateRandString(6)
	if rcode == "" {
		log.Error("Create confirm code failed")
		return
	}

	rcode2 := ""
	fmt.Print(fmt.Sprintf("\033[31m<DANGER>\033[0m Input \033[32m%s\033[0m to confirm operation: ", rcode))
	fmt.Scanln(&rcode2)

	if rcode != rcode2 {
		fmt.Println("Task quit!")
		return
	}

	if len(params) == 2 {
		bucket := params[0]
		keyListFile := params[1]
		accountS.Get()
		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.New(&mac)
		fp, err := os.Open(keyListFile)
		if err != nil {
			log.Error("Open key list file error", err)
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]rs.EntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) > 0 {
				key := items[0]
				if key != "" {
					entry := rs.EntryPath{
						bucket, key,
					}
					entries = append(entries, entry)
				}
			}
			//check 1000 limit
			if len(entries) == BATCH_ALLOW_MAX {
				batchDelete(client, entries)
				//reset slice
				entries = make([]rs.EntryPath, 0)
			}
		}
		//delete the last batch
		if len(entries) > 0 {
			batchDelete(client, entries)
		}
		fmt.Println("All deleted!")
	} else {
		CmdHelp(cmd)
	}
}