Exemplo n.º 1
0
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int64) {
	if _, err := os.Stat(cacheResultFile); err != nil {
		log.Debug(fmt.Sprintf("No cache file `%s' found, will create one", cacheResultFile))
	} else {
		os.Remove(cacheResultFile + ".old")
		if rErr := os.Rename(cacheResultFile, cacheResultFile+".old"); rErr != nil {
			log.Error(fmt.Sprintf("Unable to rename cache file, plz manually delete `%s' and `%s.old'",
				cacheResultFile, cacheResultFile))
			log.Error(rErr)
			return
		}
	}
	cacheResultFileH, err := os.OpenFile(cacheResultFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
	if err != nil {
		log.Error(fmt.Sprintf("Failed to open cache file `%s'", cacheResultFile))
		return
	}
	defer cacheResultFileH.Close()
	bWriter := bufio.NewWriter(cacheResultFileH)
	walkStart := time.Now()
	log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String()))
	filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error {
		var retErr error
		//log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath))
		if err != nil {
			retErr = err
		} else {
			if !fi.IsDir() {
				relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator))
				fsize := fi.Size()
				//Unit is 100ns
				flmd := fi.ModTime().UnixNano() / 100
				//log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd))
				fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd))
				if _, err := bWriter.WriteString(fmeta); err != nil {
					log.Error(fmt.Sprintf("Failed to write data `%s' to cache file", fmeta))
					retErr = err
				}
				fileCount += 1
			}
		}
		return retErr
	})
	if err := bWriter.Flush(); err != nil {
		log.Error(fmt.Sprintf("Failed to flush to cache file `%s'", cacheResultFile))
	}

	walkEnd := time.Now()
	log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String()))
	log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart)))
	return
}
Exemplo n.º 2
0
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int64) {
	cacheResultFileH, err := os.Create(cacheResultFile)
	if err != nil {
		log.Errorf("Failed to open cache file `%s'", cacheResultFile)
		return
	}
	defer cacheResultFileH.Close()
	bWriter := bufio.NewWriter(cacheResultFileH)
	walkStart := time.Now()
	log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String()))
	filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error {
		var retErr error
		//log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath))
		if err != nil {
			retErr = err
		} else {
			if !fi.IsDir() {
				relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator))
				fsize := fi.Size()
				//Unit is 100ns
				flmd := fi.ModTime().UnixNano() / 100
				//log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd))
				fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd))
				if _, err := bWriter.WriteString(fmeta); err != nil {
					log.Errorf("Failed to write data `%s' to cache file", fmeta)
					retErr = err
				}
				fileCount += 1
			}
		}
		return retErr
	})
	if err := bWriter.Flush(); err != nil {
		log.Errorf("Failed to flush to cache file `%s'", cacheResultFile)
	}

	walkEnd := time.Now()
	log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String()))
	log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart)))
	return
}
Exemplo n.º 3
0
func (this *AliListBucket) ListBucket(listResultFile string) (err error) {
	//open result file
	fp, openErr := os.Create(listResultFile)
	if openErr != nil {
		err = openErr
		return
	}
	defer fp.Close()
	bw := bufio.NewWriter(fp)
	//list bucket by prefix
	marker := ""
	prefixLen := len(this.Prefix)
	ossClient := oss.NewClient(this.DataCenter, this.AccessKeyId, this.AccessKeySecret, 0)
	maxRetryTimes := 5
	retryTimes := 1

	log.Info("Listing the oss bucket...")
	for {
		lbr, lbrErr := ossClient.GetBucket(this.Bucket, this.Prefix, marker, "", "")
		if lbrErr != nil {
			err = lbrErr
			log.Error("Parse list result error, ", "marker=[", marker, "]", lbrErr)
			if retryTimes <= maxRetryTimes {
				log.Debug("Retry marker=", marker, "] for ", retryTimes, "time...")
				retryTimes += 1
				continue
			} else {
				break
			}
		} else {
			retryTimes = 1
		}
		for _, object := range lbr.Contents {
			lmdTime, lmdPErr := time.Parse("2006-01-02T15:04:05.999Z", object.LastModified)
			if lmdPErr != nil {
				log.Error("Parse object last modified error, ", lmdPErr)
				lmdTime = time.Now()
			}
			bw.WriteString(fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", object.Key[prefixLen:], object.Size, lmdTime.UnixNano()/100)))
		}
		if !lbr.IsTruncated {
			break
		}
		marker = lbr.NextMarker
	}
	fErr := bw.Flush()
	if fErr != nil {
		log.Error("Write data to buffer writer failed", fErr)
		err = fErr
		return
	}
	return err
}
Exemplo n.º 4
0
Arquivo: rs.go Projeto: mtinf/qshell
func batchRename(client rs.Client, entries []qshell.RenameEntryPath) {
	ret, err := qshell.BatchRename(client, entries)
	if err != nil {
		fmt.Println("Batch rename error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Errorf("Rename '%s' => '%s' Failed, Code :%d", entry.OldKey, entry.NewKey, item.Code)
			} else {
				log.Debug(fmt.Sprintf("Rename '%s' => '%s' Success, Code :%d", entry.OldKey, entry.NewKey, item.Code))
			}
		}
	}
}
Exemplo n.º 5
0
Arquivo: rs.go Projeto: mtinf/qshell
func batchChgm(client rs.Client, entries []qshell.ChgmEntryPath) {
	ret, err := qshell.BatchChgm(client, entries)
	if err != nil {
		fmt.Println("Batch chgm error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Errorf("Chgm '%s' => '%s' Failed, Code :%d", entry.Key, entry.MimeType, item.Code)
			} else {
				log.Debug(fmt.Sprintf("Chgm '%s' => '%s' Success, Code :%d", entry.Key, entry.MimeType, item.Code))
			}
		}
	}
}
Exemplo n.º 6
0
Arquivo: rs.go Projeto: mtinf/qshell
func batchDelete(client rs.Client, entries []rs.EntryPath) {
	ret, err := qshell.BatchDelete(client, entries)
	if err != nil {
		fmt.Println("Batch delete error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Errorf("Delete '%s' => '%s' Failed, Code: %d", entry.Bucket, entry.Key, item.Code)
			} else {
				log.Debug(fmt.Sprintf("Delete '%s' => '%s' Success, Code: %d", entry.Bucket, entry.Key, item.Code))
			}
		}
	}
}
Exemplo n.º 7
0
Arquivo: rs.go Projeto: mtinf/qshell
func batchCopy(client rs.Client, entries []qshell.CopyEntryPath) {
	ret, err := qshell.BatchCopy(client, entries)
	if err != nil {
		fmt.Println("Batch move error", err)
	}
	if len(ret) > 0 {
		for i, entry := range entries {
			item := ret[i]
			if item.Data.Error != "" {
				log.Errorf("Copy '%s:%s' => '%s:%s' Failed, Code :%d",
					entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)
			} else {
				log.Debug(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Success, Code :%d",
					entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code))
			}
		}
	}
}
Exemplo n.º 8
0
func put(c rpc.Client, l rpc.Logger, ret interface{}, key string, hasKey bool, f io.ReaderAt, fsize int64, extra *PutExtra) error {

	once.Do(initWorkers)

	blockCnt := BlockCount(fsize)

	if extra == nil {
		extra = new(PutExtra)
	}

	//load the progress file
	var progressWLock = sync.RWMutex{}

	if extra.ProgressFile != "" {
		progressRecord := ProgressRecord{}
		if _, pStatErr := os.Stat(extra.ProgressFile); pStatErr == nil {
			progressFp, openErr := os.Open(extra.ProgressFile)
			if openErr == nil {
				func() {
					defer progressFp.Close()
					decoder := json.NewDecoder(progressFp)
					decodeErr := decoder.Decode(&progressRecord)
					if decodeErr != nil {
						log.Debug(fmt.Sprintf("resumable.Put decode progess record error, %s", decodeErr.Error()))
					}
				}()
			} else {
				log.Debug(fmt.Sprintf("resumable.Put open progress record error, %s", openErr.Error()))
			}
		}

		//load in progresses
		if progressRecord.Progresses != nil && len(progressRecord.Progresses) > 0 {
			//check the expire date of the first progress
			now := time.Now()
			first := progressRecord.Progresses[0]
			if now.Add(time.Hour*24).Unix() <= first.ExpiredAt {
				//not expired, go ahead
				extra.Progresses = progressRecord.Progresses
			}
		}
	}

	if extra.Progresses == nil {
		extra.Progresses = make([]BlkputRet, blockCnt)
	} else if len(extra.Progresses) != blockCnt {
		return ErrInvalidPutProgress
	}

	if extra.ChunkSize == 0 {
		extra.ChunkSize = settings.ChunkSize
	}
	if extra.TryTimes == 0 {
		extra.TryTimes = settings.TryTimes
	}
	if extra.Notify == nil {
		extra.Notify = notifyNil
	}
	if extra.NotifyErr == nil {
		extra.NotifyErr = notifyErrNil
	}

	var wg sync.WaitGroup
	wg.Add(blockCnt)

	last := blockCnt - 1
	blkSize := 1 << blockBits
	nfails := 0

	for i := 0; i < blockCnt; i++ {
		blkIdx := i
		blkSize1 := blkSize
		if i == last {
			offbase := int64(blkIdx) << blockBits
			blkSize1 = int(fsize - offbase)
		}
		task := func() {
			defer wg.Done()
			tryTimes := extra.TryTimes
		lzRetry:
			err := ResumableBlockput(c, l, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra)
			if err != nil {
				if tryTimes > 1 {
					tryTimes--
					log.Info("resumable.Put retrying ...")
					goto lzRetry
				}
				log.Warn("resumable.Put", blkIdx, "failed:", err)
				extra.NotifyErr(blkIdx, blkSize1, err)
				nfails++
			} else {
				//record block progress
				if extra.ProgressFile != "" {
					progressWLock.Lock()
					func() {
						defer progressWLock.Unlock()
						progressRecord := ProgressRecord{
							Progresses: extra.Progresses,
						}
						mData, mErr := json.Marshal(progressRecord)
						if mErr == nil {
							wErr := ioutil.WriteFile(extra.ProgressFile, mData, 0644)
							if wErr != nil {
								log.Warn(fmt.Sprintf("resumable.Put record progress error, %s", wErr.Error()))
							}
						} else {
							log.Info(fmt.Sprintf("resumable.Put marshal progress record error, %s", mErr.Error()))
						}
					}()
				}
			}
		}
		tasks <- task
	}

	wg.Wait()
	if nfails != 0 {
		return ErrPutFailed
	}

	return Mkfile(c, l, ret, key, hasKey, fsize, extra)
}
Exemplo n.º 9
0
func QiniuUpload(threadCount int, uploadConfig *UploadConfig) {
	timeStart := time.Now()

	//make SrcDir the full path
	uploadConfig.SrcDir, _ = filepath.Abs(uploadConfig.SrcDir)

	dirCache := DirCache{}

	pathSep := string(os.PathSeparator)
	//create job id
	jobId := Md5Hex(fmt.Sprintf("%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket))

	//local storage path
	storePath := filepath.Join(".qshell", "qupload", jobId)
	if err := os.MkdirAll(storePath, 0775); err != nil {
		log.Errorf("Failed to mkdir `%s' due to `%s'", storePath, err)
		return
	}

	//cache file
	rescanLocalDir := false
	cacheResultName := filepath.Join(storePath, jobId+".cache")
	cacheTempName := filepath.Join(storePath, jobId+".cache.temp")
	cacheCountName := filepath.Join(storePath, jobId+".count")

	if _, statErr := os.Stat(cacheResultName); statErr == nil {
		//file already exists
		rescanLocalDir = uploadConfig.RescanLocal
	} else {
		rescanLocalDir = true
	}

	var totalFileCount int64
	if rescanLocalDir {
		fmt.Println("Listing local sync dir, this can take a long time, please wait patiently...")
		totalFileCount = dirCache.Cache(uploadConfig.SrcDir, cacheTempName)
		if rErr := os.Remove(cacheResultName); rErr != nil {
			log.Debug("Remove the old cached file error", rErr)
		}
		if rErr := os.Rename(cacheTempName, cacheResultName); rErr != nil {
			fmt.Println("Rename the temp cached file error", rErr)
			return
		}
		//write the total count to local file
		if cFp, cErr := os.Create(cacheCountName); cErr == nil {
			func() {
				defer cFp.Close()
				uploadInfo := UploadInfo{
					TotalFileCount: totalFileCount,
				}
				uploadInfoBytes, mErr := json.Marshal(&uploadInfo)
				if mErr == nil {
					if _, wErr := cFp.Write(uploadInfoBytes); wErr != nil {
						log.Errorf("Write local cached count file error %s", cErr)
					} else {
						cFp.Close()
					}
				}
			}()
		} else {
			log.Errorf("Open local cached count file error %s", cErr)
		}
	} else {
		fmt.Println("Use the last cached local sync dir file list ...")
		//read from local cache
		if rFp, rErr := os.Open(cacheCountName); rErr == nil {
			func() {
				defer rFp.Close()
				uploadInfo := UploadInfo{}
				decoder := json.NewDecoder(rFp)
				if dErr := decoder.Decode(&uploadInfo); dErr == nil {
					totalFileCount = uploadInfo.TotalFileCount
				}
			}()
		} else {
			log.Warnf("Open local cached count file error %s", rErr)
		}
	}

	//leveldb folder
	leveldbFileName := filepath.Join(storePath, jobId+".ldb")
	ldb, err := leveldb.OpenFile(leveldbFileName, nil)
	if err != nil {
		log.Errorf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)
		return
	}
	defer ldb.Close()
	//sync
	ufp, err := os.Open(cacheResultName)
	if err != nil {
		log.Errorf("Open cache file `%s' failed due to `%s'", cacheResultName, err)
		return
	}
	defer ufp.Close()
	bScanner := bufio.NewScanner(ufp)
	bScanner.Split(bufio.ScanLines)

	var currentFileCount int64 = 0
	var successFileCount int64 = 0
	var failureFileCount int64 = 0
	var skippedFileCount int64 = 0

	ldbWOpt := opt.WriteOptions{
		Sync: true,
	}

	upWorkGroup := sync.WaitGroup{}
	upCounter := 0
	threadThreshold := threadCount + 1

	//chunk upload threshold
	putThreshold := DEFAULT_PUT_THRESHOLD
	if uploadConfig.PutThreshold > 0 {
		putThreshold = uploadConfig.PutThreshold
	}

	//check zone, default nb
	switch uploadConfig.Zone {
	case ZoneAWS:
		SetZone(ZoneAWSConfig)
	case ZoneBC:
		SetZone(ZoneBCConfig)
	default:
		SetZone(ZoneNBConfig)
	}

	//use host if not empty, overwrite the default config
	if uploadConfig.UpHost != "" {
		conf.UP_HOST = uploadConfig.UpHost
	}
	//set resume upload settings
	rio.SetSettings(&upSettings)
	mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}

	//check bind net interface card
	var transport *http.Transport
	var rsClient rs.Client
	if uploadConfig.BindNicIp != "" {
		transport = &http.Transport{
			Dial: (&net.Dialer{
				LocalAddr: &net.TCPAddr{
					IP: net.ParseIP(uploadConfig.BindNicIp),
				},
			}).Dial,
		}
	}

	if transport != nil {
		rsClient = rs.NewMacEx(&mac, transport, "")
	} else {
		rsClient = rs.NewMac(&mac)
	}

	//check remote rs ip bind
	if uploadConfig.BindRsIp != "" {
		rsClient.Conn.BindRemoteIp = uploadConfig.BindRsIp
	}

	//scan lines and upload
	for bScanner.Scan() {
		line := strings.TrimSpace(bScanner.Text())
		items := strings.Split(line, "\t")
		if len(items) != 3 {
			log.Errorf("Invalid cache line `%s'", line)
			continue
		}

		localFpath := items[0]
		currentFileCount += 1

		skip := false
		//check skip local file or folder
		if uploadConfig.SkipPathPrefixes != "" {
			//unpack skip prefix
			skipPathPrefixes := strings.Split(uploadConfig.SkipPathPrefixes, ",")
			for _, prefix := range skipPathPrefixes {
				if strings.HasPrefix(localFpath, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by path prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipFilePrefixes != "" {
			//unpack skip prefix
			skipFilePrefixes := strings.Split(uploadConfig.SkipFilePrefixes, ",")
			for _, prefix := range skipFilePrefixes {
				localFname := filepath.Base(localFpath)
				if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) {
					log.Debug(fmt.Sprintf("Skip by file prefix '%s' for local file %s",
						strings.TrimSpace(prefix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		if uploadConfig.SkipSuffixes != "" {
			skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",")
			for _, suffix := range skipSuffixes {
				if strings.HasSuffix(localFpath, strings.TrimSpace(suffix)) {
					log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s",
						strings.TrimSpace(suffix), localFpath))
					skip = true
					skippedFileCount += 1
					break
				}
			}

			if skip {
				continue
			}
		}

		//pack the upload file key
		localFlmd, _ := strconv.ParseInt(items[2], 10, 64)
		uploadFileKey := localFpath

		if uploadConfig.IgnoreDir {
			if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {
				uploadFileKey = uploadFileKey[i+1:]
			}
		}
		if uploadConfig.KeyPrefix != "" {
			uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "")
		}
		//convert \ to / under windows
		if runtime.GOOS == "windows" {
			uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1)
		}

		localFilePath := filepath.Join(uploadConfig.SrcDir, localFpath)
		fstat, err := os.Stat(localFilePath)
		if err != nil {
			log.Errorf("Error stat local file `%s' due to `%s'", localFilePath, err)
			continue
		}

		fsize := fstat.Size()
		ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey)

		if totalFileCount != 0 {
			fmt.Println(fmt.Sprintf("Uploading %s [%d/%d, %.1f%%] ...", ldbKey, currentFileCount, totalFileCount,
				float32(currentFileCount)*100/float32(totalFileCount)))
		} else {
			fmt.Println(fmt.Sprintf("Uploading %s ...", ldbKey))
		}

		//check exists
		if uploadConfig.CheckExists {
			rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey)
			if checkErr == nil {
				//compare hash
				localEtag, cErr := GetEtag(localFilePath)
				if cErr != nil {
					atomic.AddInt64(&failureFileCount, 1)
					log.Error("Calc local file hash failed,", cErr)
					continue
				}
				if rsEntry.Hash == localEtag {
					atomic.AddInt64(&skippedFileCount, 1)
					log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey))
					continue
				}
			} else {
				if _, ok := checkErr.(*rpc.ErrorInfo); !ok {
					//not logic error, should be network error
					atomic.AddInt64(&failureFileCount, 1)
					continue
				}
			}
		} else {
			//check leveldb
			ldbFlmd, err := ldb.Get([]byte(ldbKey), nil)
			flmd, _ := strconv.ParseInt(string(ldbFlmd), 10, 64)
			//not exist, return ErrNotFound
			//check last modified

			if err == nil && localFlmd == flmd {
				log.Debug("Skip by local log for file", localFpath)
				atomic.AddInt64(&skippedFileCount, 1)
				continue
			}
		}

		//worker
		upCounter += 1
		if upCounter%threadThreshold == 0 {
			upWorkGroup.Wait()
		}
		upWorkGroup.Add(1)

		//start to upload
		go func() {
			defer upWorkGroup.Done()

			policy := rs.PutPolicy{}
			policy.Scope = uploadConfig.Bucket
			if uploadConfig.Overwrite {
				policy.Scope = fmt.Sprintf("%s:%s", uploadConfig.Bucket, uploadFileKey)
				policy.InsertOnly = 0
			}
			policy.Expires = 30 * 24 * 3600
			uptoken := policy.Token(&mac)
			if fsize > putThreshold {
				var putClient rpc.Client
				if transport != nil {
					putClient = rio.NewClientEx(uptoken, transport, uploadConfig.BindUpIp)
				} else {
					putClient = rio.NewClient(uptoken, uploadConfig.BindUpIp)
				}

				putRet := rio.PutRet{}
				putExtra := rio.PutExtra{}
				progressFkey := Md5Hex(fmt.Sprintf("%s:%s|%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket, localFpath, uploadFileKey))
				progressFname := fmt.Sprintf("%s.progress", progressFkey)
				progressFpath := filepath.Join(storePath, progressFname)
				putExtra.ProgressFile = progressFpath

				err := rio.PutFile(putClient, nil, &putRet, uploadFileKey, localFilePath, &putExtra)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					os.Remove(progressFpath)
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			} else {
				var putClient rpc.Client
				if transport != nil {
					putClient = rpc.NewClientEx(transport, uploadConfig.BindUpIp)
				} else {
					putClient = rpc.NewClient(uploadConfig.BindUpIp)
				}

				putRet := fio.PutRet{}
				err := fio.PutFile(putClient, nil, &putRet, uptoken, uploadFileKey, localFilePath, nil)
				if err != nil {
					atomic.AddInt64(&failureFileCount, 1)
					if pErr, ok := err.(*rpc.ErrorInfo); ok {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)
					} else {
						log.Errorf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)
					}
				} else {
					atomic.AddInt64(&successFileCount, 1)
					perr := ldb.Put([]byte(ldbKey), []byte(fmt.Sprintf("%d", localFlmd)), &ldbWOpt)
					if perr != nil {
						log.Errorf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)
					}
				}
			}
		}()

	}
	upWorkGroup.Wait()

	fmt.Println()
	fmt.Println("-------Upload Result-------")
	fmt.Println("Total:   \t", currentFileCount)
	fmt.Println("Success: \t", successFileCount)
	fmt.Println("Failure: \t", failureFileCount)
	fmt.Println("Skipped: \t", skippedFileCount)
	fmt.Println("Duration:\t", time.Since(timeStart))
	fmt.Println("-------------------------")

}
Exemplo n.º 10
0
func (this *ListBucket) List(bucket string, prefix string, listResultFile string) (retErr error) {
	var fp *os.File
	if listResultFile == "stdout" {
		fp = os.Stdout
	} else {
		var openErr error
		fp, openErr = os.Create(listResultFile)
		if openErr != nil {
			retErr = openErr
			log.Errorf("Failed to open list result file `%s'", listResultFile)
			return
		}
	}
	defer fp.Close()
	bw := bufio.NewWriter(fp)

	mac := digest.Mac{this.AccessKey, []byte(this.SecretKey)}
	client := rsf.New(&mac)
	marker := ""
	limit := 1000
	run := true
	maxRetryTimes := 5
	retryTimes := 1
	for run {
		entries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit)
		if err != nil {
			if err == io.EOF {
				run = false
			} else {
				log.Errorf("List error for marker `%s', %s", marker, err)
				if retryTimes <= maxRetryTimes {
					log.Debug(fmt.Sprintf("Retry list for marker `%s' for `%d' time", marker, retryTimes))
					retryTimes += 1
					continue
				} else {
					log.Errorf("List failed too many times for `%s'", marker)
					break
				}
			}
		} else {
			retryTimes = 1
			if markerOut == "" {
				run = false
			} else {
				marker = markerOut
			}
		}
		//append entries
		for _, entry := range entries {
			lineData := fmt.Sprintf("%s\t%d\t%s\t%d\t%s\t%s\r\n", entry.Key, entry.Fsize, entry.Hash, entry.PutTime, entry.MimeType, entry.EndUser)
			_, wErr := bw.WriteString(lineData)
			if wErr != nil {
				log.Errorf("Write line data `%s' to list result file failed.", lineData)
			}
		}
		fErr := bw.Flush()
		if fErr != nil {
			log.Error("Flush data to list result file error", err)
		}
	}
	return
}
Exemplo n.º 11
0
Arquivo: unzip.go Projeto: auqf/qshell
func Unzip(zipFilePath string, unzipPath string) (err error) {
	zipReader, zipErr := zip.OpenReader(zipFilePath)
	if zipErr != nil {
		err = errors.New(fmt.Sprintf("Open zip file error, %s", zipErr))
		return
	}
	defer zipReader.Close()

	zipFiles := zipReader.File

	//list dir
	for _, zipFile := range zipFiles {
		fileInfo := zipFile.FileHeader.FileInfo()
		fileName := zipFile.FileHeader.Name

		//check charset utf8 or gbk
		if !utf8.Valid([]byte(fileName)) {
			fileName, err = gbk2Utf8(fileName)
			if err != nil {
				err = errors.New("Unsupported filename encoding")
				continue
			}
		}

		fullPath := filepath.Join(unzipPath, fileName)
		if fileInfo.IsDir() {
			log.Debug("Mkdir", fullPath)
			mErr := os.MkdirAll(fullPath, 0775)
			if mErr != nil {
				err = errors.New(fmt.Sprintf("Mkdir error, %s", mErr))
				continue
			}
		}
	}

	//list file
	for _, zipFile := range zipFiles {
		fileInfo := zipFile.FileHeader.FileInfo()
		fileName := zipFile.FileHeader.Name

		//check charset utf8 or gbk
		if !utf8.Valid([]byte(fileName)) {
			fileName, err = gbk2Utf8(fileName)
			if err != nil {
				err = errors.New("Unsupported filename encoding")
				continue
			}
		}

		fullPath := filepath.Join(unzipPath, fileName)
		if !fileInfo.IsDir() {
			//to be compatible with pkzip(4.5)
			lastIndex := strings.LastIndex(fullPath, string(os.PathSeparator))
			if lastIndex != -1 {
				fullPathDir := fullPath[:lastIndex]
				mErr := os.MkdirAll(fullPathDir, 0775)
				if mErr != nil {
					err = errors.New(fmt.Sprintf("Mkdir error, %s", mErr))
					continue
				}
			}
			log.Debug("Creating file", fullPath)
			localFp, openErr := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
			if openErr != nil {
				err = errors.New(fmt.Sprintf("Open local file error, %s", openErr))
				continue
			}
			defer localFp.Close()

			zipFp, openErr := zipFile.Open()
			if openErr != nil {
				err = errors.New(fmt.Sprintf("Read zip content error, %s", openErr))
				continue
			}
			defer zipFp.Close()

			_, wErr := io.Copy(localFp, zipFp)
			if wErr != nil {
				err = errors.New(fmt.Sprintf("Save zip content error, %s", wErr))
				continue
			}
		}
	}
	return
}