//下载文件item.Name //retry:重试次数 func (p *Rsf) download(item rsf.DumpItem, retry int) (err error) { log.Info("downloading file :", item.Name, " ,file size = ", item.Fsize, "bytes") for i := 1; i <= retry; i++ { code, err := p.getFile(item.Name) if err == nil { log.Info("download completed!") return nil } log.Error("getFile err:", code, err) if code == 612 { break } log.Error("retry download ", item.Name, " ", i, " times") time.Sleep(SLEEP_TIME) } //重试retry次后,下载失败 ffail, err := os.OpenFile(p.baseDir+"/qrsb.failkeys", os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660) if err == nil { defer ffail.Close() b, err := json.Marshal(item) if err == nil { ffail.WriteString(string(b)) } } else { log.Error("Open qrsb.failkeys err") return err } return }
func main() { var err error err = database.InitDB(opts.Driver, opts.DataSource) if err != nil { log.Fatal(err) } log.Info("gobuild service stated ...") http.Handle("/", m) http.Handle("/websocket/", websocket.Handler(WsBuildServer)) http.HandleFunc("/hello", HelloServer) if *secure { go func() { er := http.ListenAndServeTLS(":443", "bin/ssl.crt", "bin/ssl.key", nil) if er != nil { log.Error(er) } }() } err = http.ListenAndServe(opts.ListenAddr, nil) if err != nil { log.Fatal(err) } }
func (p *Instance) Run(addr string) error { mux := http.NewServeMux() mux.HandleFunc("/short", p.HandleShort) mux.HandleFunc("/", p.HandleRedirect) log.Info("running at:", addr) return http.ListenAndServe(addr, mux) }
func (p *Program) RunWithRetry() { for p.retry = 0; p.retry < p.Info.StartRetries+1; p.retry += 1 { // wait program to exit errc := GoFunc(p.Run) var err error PROGRAM_WAIT: // Here is RUNNING State select { case err = <-errc: log.Info(p.Info.Name, err) case <-time.After(time.Second * time.Duration(p.Info.StartSeconds)): // reset retry p.retry = 0 goto PROGRAM_WAIT case <-p.stopc: return } // Enter RETRY_WAIT State if p.retry < p.Info.StartRetries { p.setStatus(ST_RETRYWAIT) select { case <-p.stopc: return case <-time.After(time.Second * 2): } } } p.setStatus(ST_FATAL) }
func QiniuUpload(cmd string, params ...string) { if len(params) == 1 || len(params) == 2 { var uploadConfigFile string var threadCount int64 var err error if len(params) == 2 { threadCount, err = strconv.ParseInt(params[0], 10, 64) if err != nil { log.Error("Invalid <ThreadCount> value,", params[0]) return } uploadConfigFile = params[1] } else { uploadConfigFile = params[0] } if threadCount < qshell.MIN_UPLOAD_THREAD_COUNT || threadCount > qshell.MAX_UPLOAD_THREAD_COUNT { log.Info("You can set <ThreadCount> value between 1 and 100 to improve speed") threadCount = qshell.MIN_UPLOAD_THREAD_COUNT } qshell.QiniuUpload(int(threadCount), uploadConfigFile) } else { CmdHelp(cmd) } }
func main() { if len(os.Args) < 2 { fmt.Println("qrsb <dir>") os.Exit(1) } p, err := NewRsf(os.Args[1]) if err != nil { log.Error("err:", err) os.Exit(2) } if p.isFirstRun() { p.firstRun() } // check pos pos := Pos{} err = loadJsonFile(&pos, p.baseDir+"/qrsb.pos") // pos.Marker = "" //置空,重新开始 if err != nil { log.Error("err:load qrsb.pos file failed,ABORT!") os.Exit(2) } err = p.Run(&pos) if err != nil { log.Error("err:", err) p.printResult() os.Exit(2) } log.Info("Done!") p.printResult() return }
func handleRead(ctx context.Context, c *client.Client) error { _, data, _, err := c.Read(ctx, readName, readOffset, readLen, readExpChecksum) if err != nil { log.Fatalf("Read err (%v)", err) } log.Info(string(data)) return nil }
func (r *RESTFullArgsParse) Parse(env *ReqEnv, typ reflect.Type) (vl reflect.Value, parsed bool) { parsed = strings.HasSuffix(typ.Name(), "ArgsRest") if !parsed { return } if _, ok := parserCache[typ]; !ok { parserCache[typ] = r } vl = reflect.New(typ).Elem() qrys := parseQuery(env.Req.URL.Path) if env.Req.Method == "POST" { env.Req.ParseForm() } post := func(key string) string { return env.Req.FormValue(key) } fun := func(qryName string) string { if qry, ok := qrys[qryName]; ok { return qry } return post(qryName) } for i := 0; i < typ.NumField(); i++ { qryName := typ.Field(i).Name qryName = strings.ToLower(qryName) qry := fun(qryName) log.Info(qryName, qry) tn := typ.Field(i).Type.Name() switch tn { case "string": vl.Field(i).SetString(qry) case "int": if ival, err := strconv.Atoi(qry); err == nil { vl.Field(i).SetInt(int64(ival)) } case "int64": if ival, err := strconv.ParseInt(qry, 10, 64); err == nil { vl.Field(i).SetInt(int64(ival)) } case "bool": if ival, err := strconv.ParseBool(qry); err == nil { vl.Field(i).SetBool(ival) } case "float64": if ival, err := strconv.ParseFloat(qry, 64); err == nil { vl.Field(i).SetFloat(ival) } } } return }
func handleRemove(ctx context.Context, c *client.Client) error { err := c.Remove(ctx, removeName, removeAll) if err != nil { log.Fatalf("Read err (%v)", err) } log.Info("remove succeeded") return nil }
func (p *Rsf) saveKeyPath(xl *xlog.Logger, rawKey, key string, r io.Reader) error { filename := p.baseDir + "/data/" + rawKey dir := path.Dir(filename) err := os.MkdirAll(dir, 0700) if err != nil { log.Info("saveKeyPath os.MkdirAll fail:", err) return p.saveKey(xl, key, r) } f, err := os.Create(filename) if err != nil { log.Info("saveKeyPath os.Create fail:", err) return p.saveKey(xl, key, r) } defer f.Close() _, err = io.Copy(f, r) log.Info("save key (path) done:", rawKey, err) return err }
func newLogger(logPath string) { os.MkdirAll(path.Dir(logPath), os.ModePerm) f, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModePerm) if err != nil { qlog.Fatal(err) } qlog.SetOutput(f) qlog.Info("Start logging serv...") }
func hello(rw http.ResponseWriter) { ticker := time.NewTicker(2e9) for { select { case <-ticker.C: rw.Write(ToByte("hello.")) log.Info("hello") default: } } }
func newUpdateLogger(execDir string) { logPath := execDir + "/log/update.log" os.MkdirAll(path.Dir(logPath), os.ModePerm) f, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModePerm) if err != nil { qlog.Fatal(err) } qlog.SetOutput(f) qlog.Info("Start logging update...") }
func OpenChangeLog(stateDir, eigen string) (cl changelog.Logger, urls []string, err error) { h := md5.New() h.Write([]byte(eigen)) hash := h.Sum([]byte{'2', '0'}) clname := stateDir + base64.URLEncoding.EncodeToString(hash) + ".log" log.Info("Processing file:", clname) cl, urls, err = changelog.Open(clname) return }
func newLogger(execDir string) { logPath := execDir + "/log/serv.log" os.MkdirAll(path.Dir(logPath), os.ModePerm) f, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModePerm) if err != nil { qlog.Fatal(err) } qlog.SetOutput(f) //qlog.SetOutputLevel(qlog.Ldebug) qlog.Info("Start logging serv...") }
//获取文件列表并下载文件 //1)获取列表 //2)依次下载列表中每一项对应的文件 //bool:true,fetch 结束 func (p *Rsf) fetch(pos *Pos) (bool, error) { log.Info("fetching, marker=", pos.Marker) xl := xlog.NewDummy() ret, err := p.rsfs.ListPrefix(xl, p.Bucket, p.Config.Prefix, pos.Marker, MAX_LIMIT) if err != nil { xl.Error("fetch err:", err, pos, p.Bucket) return false, err } if len(ret.Items) == 0 { return true, nil } for _, i := range ret.Items { if p.MaxSize <= 0 || i.Fsize <= p.MaxSize { if p.isNeedReload(i, pos) { p.download(i, RETRY_TIMES) } else { log.Info(i.Name + " already exists,skip!") } } } pos.Marker = ret.Marker return len(ret.Items) < MAX_LIMIT, nil }
func (this *UfopServer) serveUfop(w http.ResponseWriter, req *http.Request) { //check method if req.Method != "POST" { writeJsonError(w, 405, "method not allowed") return } defer req.Body.Close() var err error var ufopReq UfopRequest var ufopResult interface{} var ufopResultType int var ufopResultContentType string ufopReqData, err := ioutil.ReadAll(req.Body) if err != nil { writeJsonError(w, 500, "read ufop request body error") return } reqId := utils.NewRequestId() log.Info(reqId, string(ufopReqData)) err = json.Unmarshal(ufopReqData, &ufopReq) if err != nil { writeJsonError(w, 500, "parse ufop request body error") return } ufopReq.ReqId = reqId ufopResult, ufopResultType, ufopResultContentType, err = handleJob(ufopReq, this.cfg.UfopPrefix, this.jobHandlers) if err != nil { ufopErr := UfopError{ Request: ufopReq, Error: err.Error(), } logBytes, _ := json.Marshal(&ufopErr) log.Error(reqId, string(logBytes)) writeJsonError(w, 400, err.Error()) } else { switch ufopResultType { case RESULT_TYPE_JSON: writeJsonResult(w, 200, ufopResult) case RESULT_TYPE_OCTECT_BYTES: writeOctetResultFromBytes(w, ufopResult, ufopResultContentType) case RESULT_TYPE_OCTECT_FILE: writeOctetResultFromFile(w, ufopResult, ufopResultContentType) case RESULT_TYPE_OCTECT_URL: writeOctectResultFromUrl(w, ufopResult) } } }
func NewRsf(baseDir string) (*Rsf, error) { // load config conf := Config{} err := loadJsonFile(&conf, baseDir+"/qrsb.conf") if err != nil { log.Error("load qrsb.conf file error!") return nil, err } log.Info("use conf of:", conf) t := digest_auth.NewTransport(conf.AccessKey, conf.SecretKey, nil) rsfs := rsf.New(t, conf.RsfHost) RS_HOST = conf.RsHost rss := rs.New(t) return &Rsf{&conf, baseDir, rsfs, rss}, nil }
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int) { if _, err := os.Stat(cacheResultFile); err != nil { log.Info(fmt.Sprintf("No cache file `%s' found, will create one", cacheResultFile)) } else { os.Remove(cacheResultFile + ".old") if rErr := os.Rename(cacheResultFile, cacheResultFile+".old"); rErr != nil { log.Error(fmt.Sprintf("Unable to rename cache file, plz manually delete `%s' and `%s.old'", cacheResultFile, cacheResultFile)) log.Error(rErr) return } } cacheResultFileH, err := os.OpenFile(cacheResultFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666) if err != nil { log.Error(fmt.Sprintf("Failed to open cache file `%s'", cacheResultFile)) return } defer cacheResultFileH.Close() bWriter := bufio.NewWriter(cacheResultFileH) walkStart := time.Now() log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String())) filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error { var retErr error //log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath)) if !fi.IsDir() { relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator)) fsize := fi.Size() //Unit is 100ns flmd := fi.ModTime().UnixNano() / 100 //log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd)) fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd)) if _, err := bWriter.WriteString(fmeta); err != nil { log.Error(fmt.Sprintf("Failed to write data `%s' to cache file", fmeta)) retErr = err } fileCount += 1 } return retErr }) if err := bWriter.Flush(); err != nil { log.Error(fmt.Sprintf("Failed to flush to cache file `%s'", cacheResultFile)) } walkEnd := time.Now() log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String())) log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart))) return }
func (this *LiveServer) Listen() (err error) { log.Info("start server listening") endPoint := fmt.Sprintf("%s:%d", this.cfg.ListenHost, this.cfg.ListenPort) server := &http.Server{ Addr: endPoint, ReadTimeout: time.Duration(this.cfg.ReadTimeout) * time.Second, WriteTimeout: time.Duration(this.cfg.WriteTimeout) * time.Second, MaxHeaderBytes: this.cfg.MaxHeaderBytes, Handler: this, } listenErr := server.ListenAndServe() if listenErr != nil { err = listenErr return } return }
func pkgZip(root string, files []string) (path string, err error) { log.Info("package to zip:", path) tmpFile, err := utils.TempFile("files", "tmp-", "-"+filepath.Base(root)+".zip") if err != nil { return } z, err := zip.Create(tmpFile) if err != nil { return } for _, f := range files { var save string if f == "" { continue } // binary file use abspath //fmt.Println(root, f) if strings.HasSuffix(f, root) { save = f[len(root):] } else { save = filepath.Base(f) } info, er := os.Stat(f) if er != nil { continue } log.Debug("add", save, f) if info.IsDir() { if err = z.AddDir(save, f); err != nil { return } } else { if err = z.AddFile(save, f); err != nil { return } } } if err = z.Close(); err != nil { log.Error(err) return } return tmpFile, nil }
func InitOrm(cfg *config.OrmConfig) (err error) { log.Info("init orm") regErr := orm.RegisterDataBase("default", cfg.DriverName, cfg.DataSource, cfg.MaxIdleConn, cfg.MaxOpenConn) if regErr != nil { err = regErr return } orm.Debug = cfg.DebugMode orm.RegisterModel(new(User)) orm.RegisterModel(new(Session)) orm.RegisterModel(new(LiveStream)) orm.RegisterModel(new(LiveVideo)) return }
func (this *UfopServer) serveUfop(w http.ResponseWriter, req *http.Request) { //check method if req.Method != "POST" { writeJsonError(w, 405, "method not allowed") return } defer req.Body.Close() var err error var ufopReq UfopRequest var ufopResult interface{} var ufopResultContentType string ufopReqData, err := ioutil.ReadAll(req.Body) if err != nil { writeJsonError(w, 500, "read ufop request body error") return } log.Info(string(ufopReqData)) err = json.Unmarshal(ufopReqData, &ufopReq) if err != nil { writeJsonError(w, 500, "parse ufop request body error") return } ufopResult, ufopResultContentType, err = handleJob(ufopReq, this.cfg.UfopPrefix, this.jobHandlers) if err != nil { ufopErr := UfopError{ Request: ufopReq, Error: err.Error(), } logBytes, _ := json.Marshal(&ufopErr) log.Error(string(logBytes)) writeJsonError(w, 400, err.Error()) } else { switch ufopResultContentType { case "application/json": writeJsonResult(w, 200, ufopResult) default: writeOctetResultWithMime(w, 200, ufopResult, ufopResultContentType) } } }
func initLog(logLevel int, logFile string) (err error) { log.Info("init log") log.SetOutputLevel(logLevel) var logFp *os.File if logFile == "stdout" { logFp = os.Stdout } else { var openErr error logFp, openErr = os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if openErr != nil { err = openErr return } } log.SetOutput(logFp) return }
// CommitRepoAction adds new action for committing repository. func CommitRepoAction(userId, repoUserId int64, userName, actEmail string, repoId int64, repoUserName, repoName string, refName string, commit *base.PushCommits) error { // log.Trace("action.CommitRepoAction(start): %d/%s", userId, repoName) opType := OP_COMMIT_REPO // Check it's tag push or branch. if strings.HasPrefix(refName, "refs/tags/") { opType = OP_PUSH_TAG commit = &base.PushCommits{} } refName = git.RefEndName(refName) bs, err := json.Marshal(commit) if err != nil { qlog.Error("action.CommitRepoAction(json): %d/%s", repoUserId, repoName) return err } // Change repository bare status and update last updated time. repo, err := GetRepositoryByName(repoUserId, repoName) if err != nil { qlog.Error("action.CommitRepoAction(GetRepositoryByName): %d/%s", repoUserId, repoName) return err } repo.IsBare = false if err = UpdateRepository(repo); err != nil { qlog.Error("action.CommitRepoAction(UpdateRepository): %d/%s", repoUserId, repoName) return err } if err = NotifyWatchers(&Action{ActUserId: userId, ActUserName: userName, ActEmail: actEmail, OpType: opType, Content: string(bs), RepoId: repoId, RepoUserName: repoUserName, RepoName: repoName, RefName: refName, IsPrivate: repo.IsPrivate}); err != nil { qlog.Error("action.CommitRepoAction(notify watchers): %d/%s", userId, repoName) return err } qlog.Info("action.CommitRepoAction(end): %d/%s", repoUserId, repoName) return nil }
func root(rw http.ResponseWriter, req *http.Request) { notify := rw.(http.CloseNotifier).CloseNotify() go func() { select { case <-notify: log.Println("closed>", req.RemoteAddr) } }() log.Info(req.RemoteAddr) // ticker := time.NewTicker(2e9) // for { // select { // case <-ticker.C: // go hello(rw) rw.Write(ToByte("hello.")) // log.Info("hello") // default: // } // } }
func (p *Program) RunWithRetry() { for p.retry = 0; p.retry < p.Info.StartRetries; p.retry += 1 { // wait program to exit select { case err := <-GoFunc(p.Run): log.Info(p.Info.Name, err) case <-p.stopc: return } // retry if p.retry+1 < p.Info.StartRetries { p.setStatus(ST_RETRYWAIT) select { case <-p.stopc: return case <-time.After(time.Second * 2): } } } p.setStatus(ST_FATAL) }
func (s *Server) doUnlock(w http.ResponseWriter, r *http.Request) { /*if s.ReadOnly { w.WriteHeader(StatusForbidden) return } if s.isLockedRequest(r) { w.WriteHeader(StatusLocked) return } // TODO: unlock w.WriteHeader(StatusNotImplemented) return*/ //dc = self.IFACE_CLASS //if self._config.DAV.getboolean('verbose') is True: qlog.Info("UNLOCKing resource", r.Header) //uri := urlparse.urljoin(self.get_baseuri(dc), self.path) //uri = urllib.unquote(uri) uri := r.RequestURI // check lock token - must contain a dash lockToken := r.Header.Get("Lock-Token") if !strings.Contains(lockToken, "-") { w.WriteHeader(400) return } ifHeader := r.Header.Get("If") token := tokenFinder(lockToken) if s.isLocked(uri, ifHeader) { s.delLock(token) } w.WriteHeader(204) //self.send_body(None, '204', 'Ok', 'Ok') }
func handleRevConn(pURL *url.URL, lis net.Listener) { switch pURL.Scheme { case "tcp": for { rconn, err := lis.Accept() if err != nil { log.Errorf("accept error: %v", err) return } log.Info("dial local:", pURL) lconn, err := net.Dial("tcp", pURL.Host) if err != nil { // wsclient log.Println(err) rconn.Close() break } // start forward local proxy pc := &ProxyConn{ lconn: lconn, rconn: rconn, stats: proxyStats, } go pc.start() } case "http", "https": remote := pURL rp := &httputil.ReverseProxy{ Director: func(req *http.Request) { req.Host = remote.Host req.URL.Scheme = remote.Scheme req.URL.Host = remote.Host }, } http.Serve(lis, rp) default: log.Println("Unknown protocol:", pURL.Scheme) } }
func serveRevConn(proto ProxyProtocol, pAddr string, lis net.Listener) error { switch proto { case TCP: for { rconn, err := lis.Accept() if err != nil { log.Errorf("accept error: %v", err) return err } log.Info("local dial tcp", pAddr) lconn, err := net.Dial("tcp", pAddr) if err != nil { log.Warn(err) rconn.Close() return err } // start forward local proxy pc := &proxyConn{ lconn: lconn, rconn: rconn, stats: proxyStats, } go pc.start() } case HTTP: rp := &httputil.ReverseProxy{ Director: func(req *http.Request) { req.Host = pAddr req.URL.Scheme = "http" req.URL.Host = pAddr }, } return http.Serve(lis, rp) default: log.Println("Unknown protocol:", proto) return ErrUnknownProtocol } }