func Base64Decode(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { urlSafe := true var err error var dataToDecode string if len(params) == 2 { urlSafe, err = strconv.ParseBool(params[0]) if err != nil { log.Error("Invalid bool value or <UrlSafe>, must true or false") return } dataToDecode = params[1] } else { dataToDecode = params[0] } var dataDecoded []byte if urlSafe { dataDecoded, err = base64.URLEncoding.DecodeString(dataToDecode) if err != nil { log.Error("Failed to decode `", dataToDecode, "' in url safe mode.") return } } else { dataDecoded, err = base64.StdEncoding.DecodeString(dataToDecode) if err != nil { log.Error("Failed to decode `", dataToDecode, "' in standard mode.") return } } fmt.Println(string(dataDecoded)) } else { CmdHelp(cmd) } }
func main() { if len(os.Args) < 2 { fmt.Println("qrsb <dir>") os.Exit(1) } p, err := NewRsf(os.Args[1]) if err != nil { log.Error("err:", err) os.Exit(2) } if p.isFirstRun() { p.firstRun() } // check pos pos := Pos{} err = loadJsonFile(&pos, p.baseDir+"/qrsb.pos") // pos.Marker = "" //置空,重新开始 if err != nil { log.Error("err:load qrsb.pos file failed,ABORT!") os.Exit(2) } err = p.Run(&pos) if err != nil { log.Error("err:", err) p.printResult() os.Exit(2) } log.Info("Done!") p.printResult() return }
func GetStream(sessionId, accessToken string, gResult *GetStreamResult) { userId, valid := CheckAuthValid(sessionId, accessToken, &gResult.ApiResult) if !valid { return } //get stream by user streamId, gErr := model.GetStreamIdOfUser(userId) if gErr != nil { gResult.SetCode(API_SERVER_ERROR) return } if streamId == "" { newStreamId, newStreamData, newStreamErr := pilis.CreateDynamicStream() if newStreamErr != nil { gResult.SetCode(API_SERVER_ERROR) return } //update user stream sErr := model.SetStreamIdOfUser(userId, newStreamId) if sErr != nil { gResult.SetCode(API_SERVER_ERROR) return } gResult.StreamId = streamId gResult.Stream = newStreamData } else { streamData, sCode, sErr := pilis.GetStream(streamId) if sErr != nil { if sCode == 404 { //the stream is recycled, try to create a new one newStreamId, newStreamData, newStreamErr := pilis.CreateDynamicStream() if newStreamErr != nil { log.Error(newStreamErr) gResult.SetCode(API_SERVER_ERROR) return } //update user stream sErr := model.SetStreamIdOfUser(userId, newStreamId) if sErr != nil { log.Error(sErr) gResult.SetCode(API_SERVER_ERROR) return } gResult.StreamId = streamId gResult.Stream = newStreamData } else { log.Error(sErr) gResult.SetCode(API_SERVER_ERROR) return } } gResult.StreamId = streamId gResult.Stream = streamData } gResult.SetOk() return }
//下载文件item.Name //retry:重试次数 func (p *Rsf) download(item rsf.DumpItem, retry int) (err error) { log.Info("downloading file :", item.Name, " ,file size = ", item.Fsize, "bytes") for i := 1; i <= retry; i++ { code, err := p.getFile(item.Name) if err == nil { log.Info("download completed!") return nil } log.Error("getFile err:", code, err) if code == 612 { break } log.Error("retry download ", item.Name, " ", i, " times") time.Sleep(SLEEP_TIME) } //重试retry次后,下载失败 ffail, err := os.OpenFile(p.baseDir+"/qrsb.failkeys", os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660) if err == nil { defer ffail.Close() b, err := json.Marshal(item) if err == nil { ffail.WriteString(string(b)) } } else { log.Error("Open qrsb.failkeys err") return err } return }
func ReqId(cmd string, params ...string) { if len(params) == 1 { reqId := params[0] decodedBytes, err := base64.URLEncoding.DecodeString(reqId) if err != nil || len(decodedBytes) < 4 { log.Error("Invalid reqid", reqId, err) return } newBytes := decodedBytes[4:] newBytesLen := len(newBytes) newStr := "" for i := newBytesLen - 1; i >= 0; i-- { newStr += fmt.Sprintf("%02X", newBytes[i]) } unixNano, err := strconv.ParseInt(newStr, 16, 64) if err != nil { log.Error("Invalid reqid", reqId, err) return } dstDate := time.Unix(0, unixNano) fmt.Println(fmt.Sprintf("%04d-%02d-%02d/%02d-%02d", dstDate.Year(), dstDate.Month(), dstDate.Day(), dstDate.Hour(), dstDate.Minute())) } else { CmdHelp(cmd) } }
func BatchRename(cmd string, params ...string) { //confirm rcode := CreateRandString(6) if rcode == "" { log.Error("Create confirm code failed") return } rcode2 := "" fmt.Print(fmt.Sprintf("\033[31m<DANGER>\033[0m Input \033[32m%s\033[0m to confirm operation: ", rcode)) fmt.Scanln(&rcode2) if rcode != rcode2 { fmt.Println("Task quit!") return } if len(params) == 2 { bucket := params[0] oldNewKeyMapFile := params[1] accountS.Get() mac := digest.Mac{ accountS.AccessKey, []byte(accountS.SecretKey), } client := rs.New(&mac) fp, err := os.Open(oldNewKeyMapFile) if err != nil { log.Error("Open old new key map file error") return } defer fp.Close() scanner := bufio.NewScanner(fp) scanner.Split(bufio.ScanLines) entries := make([]qshell.RenameEntryPath, 0) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) items := strings.Split(line, "\t") if len(items) == 2 { oldKey := items[0] newKey := items[1] if oldKey != "" && newKey != "" { entry := qshell.RenameEntryPath{bucket, oldKey, newKey} entries = append(entries, entry) } } if len(entries) == BATCH_ALLOW_MAX { batchRename(client, entries) entries = make([]qshell.RenameEntryPath, 0) } } if len(entries) > 0 { batchRename(client, entries) } fmt.Println("All Renamed!") } else { CmdHelp(cmd) } }
func ResumablePut(cmd string, params ...string) { if len(params) == 3 || len(params) == 4 || len(params) == 5 { bucket := params[0] key := params[1] localFile := params[2] mimeType := "" upHost := "http://upload.qiniu.com" if len(params) == 4 { param := params[3] if strings.HasPrefix(param, "http") { upHost = param } else { mimeType = param } } if len(params) == 5 { mimeType = params[3] upHost = params[4] } accountS.Get() mac := digest.Mac{accountS.AccessKey, []byte(accountS.SecretKey)} policy := rs.PutPolicy{} policy.Scope = bucket putExtra := rio.PutExtra{} if mimeType != "" { putExtra.MimeType = mimeType } conf.UP_HOST = upHost progressHandler := ProgressHandler{ BlockIndices: make([]int, 0), BlockProgresses: make(map[int]float32), } putExtra.Notify = progressHandler.Notify putExtra.NotifyErr = progressHandler.NotifyErr uptoken := policy.Token(&mac) putRet := rio.PutRet{} startTime := time.Now() fStat, statErr := os.Stat(localFile) if statErr != nil { log.Error("Local file error", statErr) return } fsize := fStat.Size() err := rio.PutFile(nil, &putRet, uptoken, key, localFile, &putExtra) if err != nil { log.Error("Put file error", err) } else { fmt.Println("\r\nPut file", localFile, "=>", bucket, ":", putRet.Key, "(", putRet.Hash, ")", "success!") } lastNano := time.Now().UnixNano() - startTime.UnixNano() lastTime := fmt.Sprintf("%.2f", float32(lastNano)/1e9) avgSpeed := fmt.Sprintf("%.1f", float32(fsize)*1e6/float32(lastNano)) fmt.Println("Last time:", lastTime, "s, Average Speed:", avgSpeed, "KB/s") } else { CmdHelp(cmd) } }
func (this *ListBucket) List(bucket string, prefix string, listResultFile string) (retErr error) { fp, openErr := os.OpenFile(listResultFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if openErr != nil { retErr = openErr log.Error(fmt.Sprintf("Failed to open list result file `%s'", listResultFile)) return } defer fp.Close() bw := bufio.NewWriter(fp) mac := digest.Mac{this.AccessKey, []byte(this.SecretKey)} client := rsf.New(&mac) marker := "" limit := 1000 run := true maxRetryTimes := 5 retryTimes := 1 for run { entries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit) if err != nil { if err == io.EOF { run = false } else { log.Error(fmt.Sprintf("List error for marker `%s'", marker), err) if retryTimes <= maxRetryTimes { log.Debug(fmt.Sprintf("Retry list for marker `%s' for `%d' time", marker, retryTimes)) retryTimes += 1 continue } else { log.Error(fmt.Sprintf("List failed too many times for `%s'", marker)) break } } } else { retryTimes = 1 if markerOut == "" { run = false } else { marker = markerOut } } //append entries for _, entry := range entries { lineData := fmt.Sprintf("%s\t%d\t%s\t%d\t%s\t%s\r\n", entry.Key, entry.Fsize, entry.Hash, entry.PutTime, entry.MimeType, entry.EndUser) _, wErr := bw.WriteString(lineData) if wErr != nil { log.Error(fmt.Sprintf("Write line data `%s' to list result file failed.", lineData)) } } fErr := bw.Flush() if fErr != nil { log.Error("Flush data to list result file error", err) } } return }
func (this *AliListBucket) ListBucket(listResultFile string) (err error) { //open result file mode := os.O_CREATE | os.O_TRUNC | os.O_WRONLY fp, openErr := os.OpenFile(listResultFile, mode, 0666) if openErr != nil { err = openErr return } defer fp.Close() bw := bufio.NewWriter(fp) //list bucket by prefix marker := "" prefixLen := len(this.Prefix) ossClient := oss.NewClient(this.DataCenter, this.AccessKeyId, this.AccessKeySecret, 0) maxRetryTimes := 5 retryTimes := 1 for { lbr, lbrErr := ossClient.GetBucket(this.Bucket, this.Prefix, marker, "", "") if lbrErr != nil { err = lbrErr log.Error("Parse list result error, ", "marker=[", marker, "]", lbrErr) if retryTimes <= maxRetryTimes { log.Debug("Retry marker=", marker, "] for ", retryTimes, "time...") retryTimes += 1 continue } else { break } } else { retryTimes = 1 } for _, object := range lbr.Contents { lmdTime, lmdPErr := time.Parse("2006-01-02T15:04:05.999Z", object.LastModified) if lmdPErr != nil { log.Error("Parse object last modified error, ", lmdPErr) lmdTime = time.Now() } bw.WriteString(fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", object.Key[prefixLen:], object.Size, lmdTime.UnixNano()/100))) } if !lbr.IsTruncated { break } marker = lbr.NextMarker } fErr := bw.Flush() if fErr != nil { log.Error("Write data to buffer writer failed", fErr) err = fErr return } return err }
func loadJsonFile(i interface{}, file string) error { d, err := ioutil.ReadFile(file) if err != nil { log.Error("load json file", file, "err:", err) return err } err = json.Unmarshal(d, i) if err != nil { log.Error("unmarshal json file", file, "err:", err) return err } return nil }
func SendQueue(queue string, content string) { beanstalkd, err := lentil.Dial(Config.Genral.BeanstalkServer) if err != nil { log.Error(err) } else { err = beanstalkd.Use(queue) } if err != nil { log.Error(err) } else { beanstalkd.Put(0, 0, 30, []byte(content)) } }
func writeJsonResult(w http.ResponseWriter, statusCode int, result interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(statusCode) data, err := json.Marshal(result) if err != nil { log.Error("encode ufop result error,", err) writeJsonError(w, 500, "encode ufop result error") } else { _, err := io.WriteString(w, string(data)) if err != nil { log.Error("write json response error", err) } } }
func main() { log.SetOutput(os.Stdout) setQiniuHosts() args := os.Args argc := len(args) var configFilePath string switch argc { case 2: configFilePath = args[1] default: help() return } //load config ufopConf := &ufop.UfopConfig{} confErr := ufopConf.LoadFromFile(configFilePath) if confErr != nil { log.Error("load config file error,", confErr) return } ufopServ := ufop.NewServer(ufopConf) //register job handlers if err := ufopServ.RegisterJobHandler("amerge.conf", &amerge.AudioMerger{}); err != nil { log.Error(err) } if err := ufopServ.RegisterJobHandler("html2image.conf", &html2image.Html2Imager{}); err != nil { log.Error(err) } if err := ufopServ.RegisterJobHandler("html2pdf.conf", &html2pdf.Html2Pdfer{}); err != nil { log.Error(err) } if err := ufopServ.RegisterJobHandler("mkzip.conf", &mkzip.Mkzipper{}); err != nil { log.Error(err) } if err := ufopServ.RegisterJobHandler("unzip.conf", &unzip.Unzipper{}); err != nil { log.Error(err) } if err := ufopServ.RegisterJobHandler("imagecomp.conf", &imagecomp.ImageComposer{}); err != nil { log.Error(err) } if err := ufopServ.RegisterJobHandler("roundpic.conf", &roundpic.RoundPicer{}); err != nil { log.Error(err) } //listen ufopServ.Listen() }
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int) { if _, err := os.Stat(cacheResultFile); err != nil { log.Debug(fmt.Sprintf("No cache file `%s' found, will create one", cacheResultFile)) } else { os.Remove(cacheResultFile + ".old") if rErr := os.Rename(cacheResultFile, cacheResultFile+".old"); rErr != nil { log.Error(fmt.Sprintf("Unable to rename cache file, plz manually delete `%s' and `%s.old'", cacheResultFile, cacheResultFile)) log.Error(rErr) return } } cacheResultFileH, err := os.OpenFile(cacheResultFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666) if err != nil { log.Error(fmt.Sprintf("Failed to open cache file `%s'", cacheResultFile)) return } defer cacheResultFileH.Close() bWriter := bufio.NewWriter(cacheResultFileH) walkStart := time.Now() log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String())) log.Debug(fmt.Sprintf("Save dir cache result to `%s' and may take some time...", cacheResultFile)) filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error { var retErr error log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath)) if !fi.IsDir() { relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator)) fsize := fi.Size() //Unit is 100ns flmd := fi.ModTime().UnixNano() / 100 log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd)) fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd)) if _, err := bWriter.WriteString(fmeta); err != nil { log.Error(fmt.Sprintf("Failed to write data `%s' to cache file", fmeta)) retErr = err } fileCount += 1 } return retErr }) if err := bWriter.Flush(); err != nil { log.Error(fmt.Sprintf("Failed to flush to cache file `%s'", cacheResultFile)) } walkEnd := time.Now() log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String())) log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart))) return }
func batchRename(client rs.Client, entries []qshell.RenameEntryPath) { ret, err := qshell.BatchRename(client, entries) if err != nil { log.Error("Batch rename error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Rename '%s' => '%s' Failed, Code :%d", entry.OldKey, entry.NewKey, item.Code)) } else { log.Debug(fmt.Sprintf("Rename '%s' => '%s' Success, Code :%d", entry.OldKey, entry.NewKey, item.Code)) } } } }
func batchChgm(client rs.Client, entries []qshell.ChgmEntryPath) { ret, err := qshell.BatchChgm(client, entries) if err != nil { log.Error("Batch chgm error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Chgm '%s' => '%s' Failed, Code :%d", entry.Key, entry.MimeType, item.Code)) } else { log.Debug(fmt.Sprintf("Chgm '%s' => '%s' Success, Code :%d", entry.Key, entry.MimeType, item.Code)) } } } }
func Copy(cmd string, params ...string) { if len(params) == 3 || len(params) == 4 { srcBucket := params[0] srcKey := params[1] destBucket := params[2] destKey := srcKey if len(params) == 4 { destKey = params[3] } accountS.Get() mac := digest.Mac{ accountS.AccessKey, []byte(accountS.SecretKey), } client := rs.New(&mac) err := client.Copy(nil, srcBucket, srcKey, destBucket, destKey) if err != nil { log.Error("Copy error,", err) } else { fmt.Println("Done!") } } else { CmdHelp(cmd) } }
func QiniuUpload(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { var uploadConfigFile string var threadCount int64 var err error if len(params) == 2 { threadCount, err = strconv.ParseInt(params[0], 10, 64) if err != nil { log.Error("Invalid <ThreadCount> value,", params[0]) return } uploadConfigFile = params[1] } else { uploadConfigFile = params[0] } if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT || threadCount > qshell.MAX_UPLOAD_THREAD_COUNT { log.Info("You can set <ThreadCount> value between 1 and 100 to improve speed") threadCount = qshell.MIN_UPLOAD_THREAD_COUNT } qshell.QiniuUpload(int(threadCount), uploadConfigFile) } else { CmdHelp(cmd) } }
func main() { var err error err = database.InitDB(opts.Driver, opts.DataSource) if err != nil { log.Fatal(err) } log.Info("gobuild service stated ...") http.Handle("/", m) http.Handle("/websocket/", websocket.Handler(WsBuildServer)) http.HandleFunc("/hello", HelloServer) if *secure { go func() { er := http.ListenAndServeTLS(":443", "bin/ssl.crt", "bin/ssl.key", nil) if er != nil { log.Error(er) } }() } err = http.ListenAndServe(opts.ListenAddr, nil) if err != nil { log.Fatal(err) } }
//是否需要重新下载文件 //判断准则:文件名->文件大小->文件HASH func (p *Rsf) isNeedReload(item rsf.DumpItem, pos *Pos) bool { if item.Time <= p.Config.StartTime { return false } var path string if p.Config.EncodeFname == 0 { path = p.baseDir + "/data/" + item.Name } else { key := base64.URLEncoding.EncodeToString([]byte(item.Name)) h := sha1.New() io.WriteString(h, key) s := hex.EncodeToString(h.Sum(nil)) dir := p.baseDir + "/data/" + s[:2] + "/" + s[2:4] path = dir + "/" + key } info, err := os.Stat(path) if err != nil || info.Size() != item.Fsize { return true } etag, err := GetEtag(path) if err != nil { log.Error("GET Etag err:", item) return true } return etag != item.Hash }
func Base64Encode(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { urlSafe := true var err error var dataToEncode string if len(params) == 2 { urlSafe, err = strconv.ParseBool(params[0]) if err != nil { log.Error("Invalid bool value or <UrlSafe>, must true or false") return } dataToEncode = params[1] } else { dataToEncode = params[0] } dataEncoded := "" if urlSafe { dataEncoded = base64.URLEncoding.EncodeToString([]byte(dataToEncode)) } else { dataEncoded = base64.StdEncoding.EncodeToString([]byte(dataToEncode)) } fmt.Println(dataEncoded) } else { CmdHelp(cmd) } }
func GetPublishingList(sessionId, accessToken string, vResult *PublishingVideoListResult) { if _, valid := CheckAuthValid(sessionId, accessToken, &vResult.ApiResult); !valid { return } videoList := make([]model.LiveVideo, 0) qErr := model.GetLiveStreamList(&videoList) if qErr != nil { log.Error("get live video list error,", qErr.Error()) vResult.SetCode(API_SERVER_ERROR) return } publishingVideoList := make([]PublishingVideo, 0) for _, video := range videoList { publishingVideo := PublishingVideo{ User: video.User.Name, Title: video.Title, PublishId: video.PublishId, CreateTime: video.CreateTime.Unix(), } publishingVideoList = append(publishingVideoList, publishingVideo) } vResult.VideoList = publishingVideoList vResult.SetOk() return }
func (p *Rsf) getFile(filename string) (int, error) { key := base64.URLEncoding.EncodeToString([]byte(filename)) // key2 := p.Bucket + ":" + string(filename) // data, code, err := p.rss.Get(key2, "") // if err != nil { // log.Error("rss.Get err:", code, err) // return code, err // } // resp, err := http.Get(data.URL) resp, err := p.HttpGet(filename) if err != nil { log.Error("get io url err:", err, filename) return 0, err } defer resp.Body.Close() xl := xlog.NewWith(resp.Header.Get("X-Reqid")) if resp.StatusCode != 200 { xl.Error("get io url code != 200:", resp.StatusCode) return 0, errors.New("get io url err:" + strconv.Itoa(resp.StatusCode)) } if p.EncodeFname == 0 { return 0, p.saveKeyPath(xl, filename, key, resp.Body) } return 0, p.saveKey(xl, key, resp.Body) }
func PrivateUrl(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { publicUrl := params[0] var deadline int64 if len(params) == 2 { if val, err := strconv.ParseInt(params[1], 10, 64); err != nil { log.Error("Invalid <Deadline>") return } else { deadline = val } } else { deadline = time.Now().Add(time.Second * 3600).Unix() } accountS.Get() mac := digest.Mac{ accountS.AccessKey, []byte(accountS.SecretKey), } url := qshell.PrivateUrl(&mac, publicUrl, deadline) fmt.Println(url) } else { CmdHelp(cmd) } }
func batchDelete(client rs.Client, entries []rs.EntryPath) { ret, err := qshell.BatchDelete(client, entries) if err != nil { log.Error("Batch delete error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Delete '%s' => '%s' Failed, Code: %d", entry.Bucket, entry.Key, item.Code)) } else { log.Debug(fmt.Sprintf("Delete '%s' => '%s' Success, Code: %d", entry.Bucket, entry.Key, item.Code)) } } } }
func QiniuDownload(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { var threadCount int64 = 5 var downConfig string var err error if len(params) == 1 { downConfig = params[0] } else { threadCount, err = strconv.ParseInt(params[0], 10, 64) if err != nil { log.Error("Invalid value for <ThreadCount>", params[0]) return } downConfig = params[1] } if threadCount < qshell.MIN_DOWNLOAD_THREAD_COUNT || threadCount > qshell.MAX_DOWNLOAD_THREAD_COUNT { log.Warn("<ThreadCount> can only between 1 and 100") threadCount = qshell.MIN_DOWNLOAD_THREAD_COUNT } qshell.QiniuDownload(int(threadCount), downConfig) } else { CmdHelp(cmd) } }
func AliListBucket(cmd string, params ...string) { if len(params) == 5 || len(params) == 6 { dataCenter := params[0] bucket := params[1] accessKeyId := params[2] accessKeySecret := params[3] listBucketResultFile := "" prefix := "" if len(params) == 6 { prefix = params[4] listBucketResultFile = params[5] } else { listBucketResultFile = params[4] } aliListBucket := qshell.AliListBucket{ DataCenter: dataCenter, Bucket: bucket, AccessKeyId: accessKeyId, AccessKeySecret: accessKeySecret, Prefix: prefix, } err := aliListBucket.ListBucket(listBucketResultFile) if err != nil { log.Error("List bucket error,", err) } } else { CmdHelp(cmd) } }
//get watermark operation parameters func (this *OSSImager) parseWatermarkOperation(oper string) (operation OSSImageOperation) { paramItems := strings.Split(oper, "&") params := map[string]string{} for _, paramItem := range paramItems { kvp := strings.Split(paramItem, "=") if len(kvp) == 2 { key := strings.TrimSpace(kvp[0]) value := strings.TrimSpace(kvp[1]) params[key] = value } } operation = OSSImageOperation{ Name: OSS_OPER_WATERMARK, } operation.WMType = this.wmInt(params["watermark"]) //wmText operation.WMText = this.wmBase64Decode("text", params["text"]) //wmFontType operation.WMFontType = this.wmBase64Decode("type", params["type"]) //wmFontColor operation.WMFontColor = this.wmBase64Decode("color", params["color"]) //wmFontSize if wmFontSize, pErr := strconv.Atoi(params["size"]); pErr != nil { log.Error(fmt.Sprintf("invalid watermark font size, '%s'", params["size"])) } else { operation.WMFontSize = wmFontSize } //wmImage operation.WMImage = this.wmBase64Decode("object", params["object"]) //position operation.WMGravity = this.wmInt(params["p"]) //dissolve operation.WMDissolve = this.wmInt(params["t"]) //offsetX operation.WMOffsetX = this.wmInt(params["x"]) //offsetY operation.WMOffsetY = this.wmInt(params["y"]) //voffset operation.WMVOffset = this.wmInt(params["voffset"]) //fix offset //{@link http://help.aliyun.com/document_detail/oss/oss-img-guide/watermark/basic-parameters.html} if _, ok := params["size"]; !ok { operation.WMFontSize = 40 } if _, ok := params["x"]; !ok { operation.WMOffsetX = 10 } if _, ok := params["y"]; !ok { operation.WMOffsetY = 10 } return }
// achieve and upload func (b *Builder) publish(file string) (addr string, err error) { var path string if b.framework == "" { path, err = b.pack([]string{file}, filepath.Join(b.srcDir, ".gobuild.yml")) } else { path, err = utils.TempFile("files", "tmp-", "-"+filepath.Base(file)) if err != nil { return } err = sh.Command("mv", "-v", file, path).Run() } if err != nil { return } // file ext<zip|tar.gz> suffix := ".zip" if strings.HasSuffix(path, ".tar.gz") { suffix = ".tar.gz" } go func() { defer func() { log.Debug("delete history:", b.tag) delete(history, b.tag) go func() { // leave 5min gap for unfinished downloading. time.Sleep(time.Minute * 5) //time.Sleep(time.Second * 5) os.Remove(path) }() }() // upload var cdnAddr string var err error if *environment == "development" { cdnAddr, err = UploadLocal(path) } else { name := fmt.Sprintf("%s-%s-%s-%s", filepath.Base(b.project), b.os, b.arch, b.ref) + suffix cdnAddr, err = UploadFile(path, uuid.New()+"/"+name) } if err != nil { return } log.Debug("upload ok:", cdnAddr) output := "" if b.wbc != nil { output = string(b.wbc.Bytes()) } err = database.AddFile(b.pid, b.tag, cdnAddr, output) if err != nil { log.Error(err) } }() tmpAddr := "http://" + opts.Hostname + "/" + path history[b.tag] = tmpAddr return tmpAddr, nil }
func batchCopy(client rs.Client, entries []qshell.CopyEntryPath) { ret, err := qshell.BatchCopy(client, entries) if err != nil { log.Error("Batch move error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Failed, Code :%d", entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)) } else { log.Debug(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Success, Code :%d", entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)) } } } }