Ejemplo n.º 1
0
Archivo: rs.go Proyecto: mtinf/qshell
func Chgm(cmd string, params ...string) {
	if len(params) == 3 {
		bucket := params[0]
		key := params[1]
		newMimeType := params[2]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		err := client.ChangeMime(nil, bucket, key, newMimeType)
		if err != nil {
			if v, ok := err.(*rpc.ErrorInfo); ok {
				fmt.Println("Change mimetype error,", v.Code, v.Err)
			} else {
				fmt.Println("Change mimetype error,", err.Error())
			}
		} else {
			fmt.Println("Change mimetype done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 2
0
Archivo: rs.go Proyecto: mtinf/qshell
func Copy(cmd string, params ...string) {
	if len(params) == 3 || len(params) == 4 {
		srcBucket := params[0]
		srcKey := params[1]
		destBucket := params[2]
		destKey := srcKey
		if len(params) == 4 {
			destKey = params[3]
		}

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		err := client.Copy(nil, srcBucket, srcKey, destBucket, destKey)
		if err != nil {
			if v, ok := err.(*rpc.ErrorInfo); ok {
				fmt.Println("Copy error,", v.Code, v.Err)
			} else {
				fmt.Println("Copy error,", err.Error())
			}
		} else {
			fmt.Println("Copy done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 3
0
Archivo: rs.go Proyecto: mtinf/qshell
func Stat(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		key := params[1]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		entry, err := client.Stat(nil, bucket, key)
		if err != nil {
			if v, ok := err.(*rpc.ErrorInfo); ok {
				fmt.Println("Stat error,", v.Code, v.Err)
			} else {
				fmt.Println("Stat error,", err.Error())
			}
		} else {
			printStat(bucket, key, entry)
		}
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 4
0
func GetBuckets(mac *digest.Mac) (buckets []string, err error) {
	buckets = make([]string, 0)
	client := rs.NewMac(mac)
	bucketsUri := fmt.Sprintf("%s/buckets", conf.RS_HOST)
	err = client.Conn.Call(nil, &buckets, bucketsUri)
	return
}
Ejemplo n.º 5
0
Archivo: rs.go Proyecto: auqf/qshell
func Move(cmd string, params ...string) {
	if len(params) == 4 {
		srcBucket := params[0]
		srcKey := params[1]
		destBucket := params[2]
		destKey := params[3]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		err := client.Move(nil, srcBucket, srcKey, destBucket, destKey)
		if err != nil {
			fmt.Println("Move error,", err)
		} else {
			fmt.Println("Done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 6
0
Archivo: rs.go Proyecto: auqf/qshell
func Delete(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		key := params[1]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		err := client.Delete(nil, bucket, key)
		if err != nil {
			fmt.Println("Delete error,", err.Error())
		} else {
			fmt.Println("Done!")
		}
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 7
0
func GetDomainsOfBucket(mac *digest.Mac, bucket string) (domains []string, err error) {
	domains = make([]string, 0)
	client := rs.NewMac(mac)
	getDomainsUrl := fmt.Sprintf("%s/v6/domain/list", DEFAULT_API_HOST)
	postData := map[string][]string{
		"tbl": []string{bucket},
	}
	err = client.Conn.CallWithForm(nil, &domains, getDomainsUrl, postData)
	return
}
Ejemplo n.º 8
0
Archivo: rs.go Proyecto: arkfang/qshell
func Fetch(mac *digest.Mac, remoteResUrl, bucket, key string) (fetchResult FetchResult, err error) {
	client := rs.NewMac(mac)
	entry := bucket
	if key != "" {
		entry += ":" + key
	}
	fetchUri := fmt.Sprintf("/fetch/%s/to/%s",
		base64.URLEncoding.EncodeToString([]byte(remoteResUrl)),
		base64.URLEncoding.EncodeToString([]byte(entry)))
	err = client.Conn.Call(nil, &fetchResult, conf.IO_HOST+fetchUri)
	return
}
Ejemplo n.º 9
0
Archivo: rs.go Proyecto: mtinf/qshell
func BatchStat(cmd string, params ...string) {
	if len(params) == 2 {
		bucket := params[0]
		keyListFile := params[1]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		fp, err := os.Open(keyListFile)
		if err != nil {
			fmt.Println("Open key list file error", err)
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]rs.EntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) > 0 {
				key := items[0]
				if key != "" {
					entry := rs.EntryPath{
						bucket, key,
					}
					entries = append(entries, entry)
				}
			}
			//check 1000 limit
			if len(entries) == BATCH_ALLOW_MAX {
				batchStat(client, entries)
				//reset slice
				entries = make([]rs.EntryPath, 0)
			}
		}
		//stat the last batch
		if len(entries) > 0 {
			batchStat(client, entries)
		}
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 10
0
Archivo: rs.go Proyecto: mtinf/qshell
func BatchRefresh(cmd string, params ...string) {
	if len(params) == 1 {
		urlListFile := params[0]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}

		client := rs.NewMac(&mac)
		fp, err := os.Open(urlListFile)
		if err != nil {
			fmt.Println("Open url list file error", err)
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)

		urlsToRefresh := make([]string, 0, 10)
		for scanner.Scan() {
			url := strings.TrimSpace(scanner.Text())
			if url == "" {
				continue
			}
			urlsToRefresh = append(urlsToRefresh, url)

			if len(urlsToRefresh) == BATCH_CDN_REFRESH_ALLOW_MAX {
				batchRefresh(&client, urlsToRefresh)
				urlsToRefresh = make([]string, 0, 10)
			}
		}

		if len(urlsToRefresh) > 0 {
			batchRefresh(&client, urlsToRefresh)
		}

		fmt.Println("All refresh requests sent!")
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 11
0
Archivo: rs.go Proyecto: mtinf/qshell
func M3u8Delete(cmd string, params ...string) {
	if len(params) == 2 || len(params) == 3 {
		bucket := params[0]
		m3u8Key := params[1]
		isPrivate := false
		if len(params) == 3 {
			isPrivate, _ = strconv.ParseBool(params[2])
		}

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		m3u8FileList, err := qshell.M3u8FileList(&mac, bucket, m3u8Key, isPrivate)
		if err != nil {
			fmt.Println(err)
			return
		}
		client := rs.NewMac(&mac)
		entryCnt := len(m3u8FileList)
		if entryCnt == 0 {
			fmt.Println("no m3u8 slices found")
			return
		}
		if entryCnt <= BATCH_ALLOW_MAX {
			batchDelete(client, m3u8FileList)
		} else {
			batchCnt := entryCnt / BATCH_ALLOW_MAX
			for i := 0; i < batchCnt; i++ {
				end := (i + 1) * BATCH_ALLOW_MAX
				if end > entryCnt {
					end = entryCnt
				}
				entriesToDelete := m3u8FileList[i*BATCH_ALLOW_MAX : end]
				batchDelete(client, entriesToDelete)
			}
		}
		fmt.Println("All deleted!")
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 12
0
Archivo: sync.go Proyecto: auqf/qshell
func checkExists(mac *digest.Mac, bucket, key string) (exists bool, err error) {
	client := rs.NewMac(mac)
	entry, sErr := client.Stat(nil, bucket, key)
	if sErr != nil {
		if v, ok := sErr.(*rpc.ErrorInfo); !ok {
			err = errors.New(fmt.Sprintf("Check file exists error, %s", sErr.Error()))
			return
		} else {
			if v.Code != 612 {
				err = errors.New(fmt.Sprintf("Check file exists error, %s", v.Err))
				return
			} else {
				exists = false
				return
			}
		}
	}

	if entry.Hash != "" {
		exists = true
	}

	return
}
Ejemplo n.º 13
0
Archivo: rs.go Proyecto: mtinf/qshell
func BatchCopy(cmd string, params ...string) {
	//confirm
	rcode := CreateRandString(6)
	if rcode == "" {
		fmt.Println("Create confirm code failed")
		return
	}

	rcode2 := ""
	if runtime.GOOS == "windows" {
		fmt.Print(fmt.Sprintf("<DANGER> Input %s to confirm operation: ", rcode))
	} else {
		fmt.Print(fmt.Sprintf("\033[31m<DANGER>\033[0m Input \033[32m%s\033[0m to confirm operation: ", rcode))
	}
	fmt.Scanln(&rcode2)

	if rcode != rcode2 {
		fmt.Println("Task quit!")
		return
	}

	if len(params) == 3 {
		srcBucket := params[0]
		destBucket := params[1]
		srcDestKeyMapFile := params[2]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		fp, err := os.Open(srcDestKeyMapFile)
		if err != nil {
			fmt.Println("Open src dest key map file error")
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]qshell.CopyEntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) == 1 || len(items) == 2 {
				srcKey := items[0]
				destKey := srcKey
				if len(items) == 2 {
					destKey = items[1]
				}
				if srcKey != "" && destKey != "" {
					entry := qshell.CopyEntryPath{srcBucket, destBucket, srcKey, destKey}
					entries = append(entries, entry)
				}
			}
			if len(entries) == BATCH_ALLOW_MAX {
				batchCopy(client, entries)
				entries = make([]qshell.CopyEntryPath, 0)
			}
		}
		if len(entries) > 0 {
			batchCopy(client, entries)
		}
		fmt.Println("All Copyed!")
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 14
0
Archivo: rs.go Proyecto: mtinf/qshell
func BatchDelete(cmd string, params ...string) {
	//confirm
	rcode := CreateRandString(6)
	if rcode == "" {
		fmt.Println("Create confirm code failed")
		return
	}

	rcode2 := ""
	if runtime.GOOS == "windows" {
		fmt.Print(fmt.Sprintf("<DANGER> Input %s to confirm operation: ", rcode))
	} else {
		fmt.Print(fmt.Sprintf("\033[31m<DANGER>\033[0m Input \033[32m%s\033[0m to confirm operation: ", rcode))
	}
	fmt.Scanln(&rcode2)

	if rcode != rcode2 {
		fmt.Println("Task quit!")
		return
	}

	if len(params) == 2 {
		bucket := params[0]
		keyListFile := params[1]

		gErr := accountS.Get()
		if gErr != nil {
			fmt.Println(gErr)
			return
		}

		mac := digest.Mac{
			accountS.AccessKey,
			[]byte(accountS.SecretKey),
		}
		client := rs.NewMac(&mac)
		fp, err := os.Open(keyListFile)
		if err != nil {
			fmt.Println("Open key list file error", err)
			return
		}
		defer fp.Close()
		scanner := bufio.NewScanner(fp)
		scanner.Split(bufio.ScanLines)
		entries := make([]rs.EntryPath, 0)
		for scanner.Scan() {
			line := strings.TrimSpace(scanner.Text())
			items := strings.Split(line, "\t")
			if len(items) > 0 {
				key := items[0]
				if key != "" {
					entry := rs.EntryPath{
						bucket, key,
					}
					entries = append(entries, entry)
				}
			}
			//check 1000 limit
			if len(entries) == BATCH_ALLOW_MAX {
				batchDelete(client, entries)
				//reset slice
				entries = make([]rs.EntryPath, 0)
			}
		}
		//delete the last batch
		if len(entries) > 0 {
			batchDelete(client, entries)
		}
		fmt.Println("All deleted!")
	} else {
		CmdHelp(cmd)
	}
}
Ejemplo n.º 15
0
func QiniuUpload(threadCount int, uploadConfig *UploadConfig) {
	timeStart := time.Now()

	//make SrcDir the full path
	uploadConfig.SrcDir, _ = filepath.Abs(uploadConfig.SrcDir)

	dirCache := DirCache{}

	pathSep := string(os.PathSeparator)
	//create job id
	jobId := Md5Hex(fmt.Sprintf("%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket))

	//local storage path
	storePath := filepath.Join(".qshell", "qupload", jobId)
	if err := os.MkdirAll(storePath, 0775); err != nil {
		log.Errorf("Failed to mkdir `%s' due to `%s'", storePath, err)
		return
	}

	//cache file
	rescanLocalDir := false
	cacheResultName := filepath.Join(storePath, jobId+".cache")
	cacheTempName := filepath.Join(storePath, jobId+".cache.temp")
	cacheCountName := filepath.Join(storePath, jobId+".count")

	if _, statErr := os.Stat(cacheResultName); statErr == nil {
		//file already exists
		rescanLocalDir = uploadConfig.RescanLocal
	} else {
		rescanLocalDir = true
	}

	var totalFileCount int64
	if rescanLocalDir {
		fmt.Println("Listing local sync dir, this can take a long time, please wait patiently...")
		totalFileCount = dirCache.Cache(uploadConfig.SrcDir, cacheTempName)
		if rErr := os.Remove(cacheResultName); rErr != nil {
			log.Debug("Remove the old cached file error", rErr)
		}
		if rErr := os.Rename(cacheTempName, cacheResultName); rErr != nil {
			fmt.Println("Rename the temp cached file error", rErr)
			return
		}
		//write the total count to local file
		if cFp, cErr := os.Create(cacheCountName); cErr == nil {
			func() {
				defer cFp.Close()
				uploadInfo := UploadInfo{
					TotalFileCount: totalFileCount,
				}
				uploadInfoBytes, mErr := json.Marshal(&uploadInfo)
				if mErr == nil {
					if _, wErr := cFp.Write(uploadInfoBytes); wErr != nil {
						log.Errorf("Write local cached count file error %s", cErr)
					} else {
						cFp.Close()
					}
				}
			}()
		} else {
			log.Errorf("Open local cached count file error %s", cErr)
		}
	} else {
		fmt.Println("Use the last cached local sync dir file list ...")
		//read from local cache
		if rFp, rErr := os.Open(cacheCountName); rErr == nil {
			func() {
				defer rFp.Close()
				uploadInfo := UploadInfo{}
				decoder := json.NewDecoder(rFp)
				if dErr := decoder.Decode(&uploadInfo); dErr == nil {
					totalFileCount = uploadInfo.TotalFileCount
				}
			}()
		} else {
			log.Warnf("Open local cached count file error %s", rErr)
		}
	}

	//leveldb folder
	leveldbFileName := filepath.Join(storePath, jobId+".ldb")
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Errorf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheResultName)
	if err != nil {
		log.Errorf("Open cache file `%s' failed due to `%s'", cacheResultName, err)
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)

	var currentFileCount int64 = 0
	var successFileCount int64 = 0
	var failureFileCount int64 = 0
	var skippedFileCount int64 = 0

	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//chunk upload threshold
	putThreshold := DEFAULT_PUT_THRESHOLD
	if uploadConfig.PutThreshold > 0 {
		putThreshold = uploadConfig.PutThreshold
	}

	//check zone, default nb
	switch uploadConfig.Zone {
	case ZoneAWS:
		SetZone(ZoneAWSConfig)
	case ZoneBC:
		SetZone(ZoneBCConfig)
	default:
		SetZone(ZoneNBConfig)
	}

	//use host if not empty, overwrite the default config
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set resume upload settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}

	//check bind net interface card
	var transport *http.Transport
	var rsClient rs.Client
	if uploadConfig.BindNicIp != "" {
		transport = &http.Transport{
			Dial: (&net.Dialer{
				LocalAddr: &net.TCPAddr{
					IP: net.ParseIP(uploadConfig.BindNicIp),
				},
			}).Dial,
		}
	}

	if transport != nil {
		rsClient = rs.NewMacEx(&mac, transport, "")
	} else {
		rsClient = rs.NewMac(&mac)
	}

	//check remote rs ip bind
	if uploadConfig.BindRsIp != "" {
		rsClient.Conn.BindRemoteIp = uploadConfig.BindRsIp
	}

	//scan lines and upload
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) != 3 {
			log.Errorf("Invalid cache line `%s'", line)
			continue
		}

		localFpath := items[0]
		currentFileCount += 1

		skip := false
		//check skip local file or folder
		if uploadConfig.SkipPathPrefixes != "" {
			//unpack skip prefix
			skipPathPrefixes := strings.Split(uploadConfig.SkipPathPrefixes, ",")
			for _, prefix := range skipPathPrefixes {
				if strings.HasPrefix(localFpath, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by path prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipFilePrefixes != "" {
			//unpack skip prefix
			skipFilePrefixes := strings.Split(uploadConfig.SkipFilePrefixes, ",")
			for _, prefix := range skipFilePrefixes {
				localFname := filepath.Base(localFpath)
				if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by file prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipSuffixes != "" {
			skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",")
			for _, suffix := range skipSuffixes {
				if strings.HasSuffix(localFpath, strings.TrimSpace(suffix)) {
					log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s",
						strings.TrimSpace(suffix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		//pack the upload file key
		localFlmd, _ := strconv.ParseInt(items[2], 10, 64)
		uploadFileKey := localFpath

		if uploadConfig.IgnoreDir {
			if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
				uploadFileKey = uploadFileKey[i+1:]
			}
		}
		if uploadConfig.KeyPrefix != "" {
			uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
		}
		//convert \ to / under windows
		if runtime.GOOS == "windows" {
			uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
		}

		localFilePath := filepath.Join(uploadConfig.SrcDir, localFpath)
		fstat, err := os.Stat(localFilePath)
		if err != nil {
			log.Errorf("Error stat local file `%s' due to `%s'", localFilePath, err)
			continue
		}

		fsize := fstat.Size()
		ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey)

		if totalFileCount != 0 {
			fmt.Println(fmt.Sprintf("Uploading %s [%d/%d, %.1f%%] ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount)))
		} else {
			fmt.Println(fmt.Sprintf("Uploading %s ...", ldbKey))
		}

		//check exists
		if uploadConfig.CheckExists {
			rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
			if checkErr == nil {
				//compare hash
				localEtag, cErr := GetEtag(localFilePath)
				if cErr != nil {
					atomic.AddInt64(&failureFileCount, 1)
					log.Error("Calc local file hash failed,", cErr)
					continue
				}
				if rsEntry.Hash == localEtag {
					atomic.AddInt64(&skippedFileCount, 1)
					log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey))
					continue
				}
			} else {
				if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
					//not logic error, should be network error
					atomic.AddInt64(&failureFileCount, 1)
					continue
				}
			}
		} else {
			//check leveldb
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.ParseInt(string(ldbFlmd), 10, 64)
			//not exist, return ErrNotFound
			//check last modified

			if err == nil && localFlmd == flmd {
				log.Debug("Skip by local log for file", localFpath)
				atomic.AddInt64(&skippedFileCount, 1)
				continue
			}
		}

		//worker
		upCounter += 1
		if upCounter%threadThreshold == 0 {
			upWorkGroup.Wait()
		}
		upWorkGroup.Add(1)

		//start to upload
		go func() {
			defer upWorkGroup.Done()

			policy := rs.PutPolicy{}
			policy.Scope = uploadConfig.Bucket
			if uploadConfig.Overwrite {
				policy.Scope = fmt.Sprintf("%s:%s", uploadConfig.Bucket, uploadFileKey)
				policy.InsertOnly = 0
			}
			policy.Expires = 30 * 24 * 3600
			uptoken := policy.Token(&mac)
			if fsize > putThreshold {
				var putClient rpc.Client
				if transport != nil {
					putClient = rio.NewClientEx(uptoken, transport, uploadConfig.BindUpIp)
				} else {
					putClient = rio.NewClient(uptoken, uploadConfig.BindUpIp)
				}

				putRet := rio.PutRet{}
				putExtra := rio.PutExtra{}
				progressFkey := Md5Hex(fmt.Sprintf("%s:%s|%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket, localFpath, uploadFileKey))
				progressFname := fmt.Sprintf("%s.progress", progressFkey)
				progressFpath := filepath.Join(storePath, progressFname)
				putExtra.ProgressFile = progressFpath

				err := rio.PutFile(putClient, nil, &putRet, uploadFileKey, localFilePath, &putExtra)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					os.Remove(progressFpath)
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			} else {
				var putClient rpc.Client
				if transport != nil {
					putClient = rpc.NewClientEx(transport, uploadConfig.BindUpIp)
				} else {
					putClient = rpc.NewClient(uploadConfig.BindUpIp)
				}

				putRet := fio.PutRet{}
				err := fio.PutFile(putClient, nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			}
		}()

	}
	upWorkGroup.Wait()

	fmt.Println()
	fmt.Println("-------Upload Result-------")
	fmt.Println("Total:   \t", currentFileCount)
	fmt.Println("Success: \t", successFileCount)
	fmt.Println("Failure: \t", failureFileCount)
	fmt.Println("Skipped: \t", skippedFileCount)
	fmt.Println("Duration:\t", time.Since(timeStart))
	fmt.Println("-------------------------")

}
Ejemplo n.º 16
0
func M3u8FileList(mac *digest.Mac, bucket string, m3u8Key string, isPrivate bool) (slicesToDelete []rs.EntryPath, err error) {
	client := rs.NewMac(mac)
	//check m3u8 file exists
	_, sErr := client.Stat(nil, bucket, m3u8Key)
	if sErr != nil {
		err = fmt.Errorf("stat m3u8 file error, %s", sErr.Error())
		return
	}
	//get domain list of bucket
	bucketDomainUrl := fmt.Sprintf("%s/v6/domain/list", DEFAULT_API_HOST)
	bucketDomainData := map[string][]string{
		"tbl": []string{bucket},
	}
	bucketDomains := BucketDomain{}
	bErr := client.Conn.CallWithForm(nil, &bucketDomains, bucketDomainUrl, bucketDomainData)
	if bErr != nil {
		err = fmt.Errorf("get domain of bucket failed due to, %s", bErr.Error())
		return
	}
	if len(bucketDomains) == 0 {
		err = errors.New("no domain found for the bucket")
		return
	}
	var domain string
	for _, d := range bucketDomains {
		if strings.HasSuffix(d, "qiniudn.com") ||
			strings.HasSuffix(d, "clouddn.com") ||
			strings.HasSuffix(d, "qiniucdn.com") {
			domain = d
			break
		}
	}

	//get first
	if domain == "" {
		domain = bucketDomains[0]
	}

	if domain == "" {
		err = errors.New("no valid domain found for the bucket")
		return
	}
	//create downoad link
	dnLink := fmt.Sprintf("http://%s/%s", domain, m3u8Key)
	if isPrivate {
		dnLink = PrivateUrl(mac, dnLink, time.Now().Add(time.Second*3600).Unix())
	}
	//get m3u8 file content
	m3u8Resp, m3u8Err := http.Get(dnLink)
	if m3u8Err != nil {
		err = fmt.Errorf("open url %s error due to, %s", dnLink, m3u8Err)
		return
	}
	defer m3u8Resp.Body.Close()
	if m3u8Resp.StatusCode != 200 {
		err = fmt.Errorf("download file error due to, %s", m3u8Resp.Status)
		return
	}
	m3u8Bytes, readErr := ioutil.ReadAll(m3u8Resp.Body)
	if readErr != nil {
		err = fmt.Errorf("read m3u8 file content error due to, %s", readErr.Error())
		return
	}
	//check content
	if !strings.HasPrefix(string(m3u8Bytes), "#EXTM3U") {
		err = errors.New("invalid m3u8 file")
		return
	}
	slicesToDelete = make([]rs.EntryPath, 0)
	bReader := bufio.NewScanner(bytes.NewReader(m3u8Bytes))
	bReader.Split(bufio.ScanLines)
	for bReader.Scan() {
		line := strings.TrimSpace(bReader.Text())
		if !strings.HasPrefix(line, "#") {
			var sliceKey string
			if strings.HasPrefix(line, "http://") ||
				strings.HasPrefix(line, "https://") {
				uri, pErr := url.Parse(line)
				if pErr != nil {
					log.Errorf("invalid url, %s", line)
					continue
				}
				sliceKey = strings.TrimPrefix(uri.Path, "/")
			} else {
				sliceKey = strings.TrimPrefix(line, "/")
			}
			//append to delete list
			slicesToDelete = append(slicesToDelete, rs.EntryPath{bucket, sliceKey})
		}
	}
	slicesToDelete = append(slicesToDelete, rs.EntryPath{bucket, m3u8Key})
	return
}
Ejemplo n.º 17
0
Archivo: rs.go Proyecto: arkfang/qshell
func Prefetch(mac *digest.Mac, bucket, key string) (err error) {
	client := rs.NewMac(mac)
	prefetchUri := fmt.Sprintf("/prefetch/%s", base64.URLEncoding.EncodeToString([]byte(bucket+":"+key)))
	err = client.Conn.Call(nil, nil, conf.IO_HOST+prefetchUri)
	return
}