// achieve and upload func (b *Builder) publish(file string) (addr string, err error) { var path string if b.framework == "" { path, err = b.pack([]string{file}, filepath.Join(b.srcDir, ".gobuild.yml")) } else { path, err = utils.TempFile("files", "tmp-", "-"+filepath.Base(file)) if err != nil { return } err = sh.Command("mv", "-v", file, path).Run() } if err != nil { return } // file ext<zip|tar.gz> suffix := ".zip" if strings.HasSuffix(path, ".tar.gz") { suffix = ".tar.gz" } go func() { defer func() { log.Debug("delete history:", b.tag) delete(history, b.tag) go func() { // leave 5min gap for unfinished downloading. time.Sleep(time.Minute * 5) //time.Sleep(time.Second * 5) os.Remove(path) }() }() // upload var cdnAddr string var err error if *environment == "development" { cdnAddr, err = UploadLocal(path) } else { name := fmt.Sprintf("%s-%s-%s-%s", filepath.Base(b.project), b.os, b.arch, b.ref) + suffix cdnAddr, err = UploadFile(path, uuid.New()+"/"+name) } if err != nil { return } log.Debug("upload ok:", cdnAddr) output := "" if b.wbc != nil { output = string(b.wbc.Bytes()) } err = database.AddFile(b.pid, b.tag, cdnAddr, output) if err != nil { log.Error(err) } }() tmpAddr := "http://" + opts.Hostname + "/" + path history[b.tag] = tmpAddr return tmpAddr, nil }
// Listen and forward connections func NewTcpProxyListener(tunnel *Tunnel, port int) (listener *net.TCPListener, err error) { var laddr *net.TCPAddr if port != 0 { laddr, _ = net.ResolveTCPAddr("tcp", ":"+strconv.Itoa(port)) listener, err = net.ListenTCP("tcp", laddr) } else { laddr, listener, err = freeport.ListenTCP() } if err != nil { return nil, err } port = laddr.Port //port, listener, err = listenTcpInRangePort(port, TCP_MIN_PORT, TCP_MAX_PORT) //if err != nil { //return nil, err //} // hook here err = hook(HOOK_TCP_POST_CONNECT, []string{ "PORT=" + strconv.Itoa(port), "REMOTE_ADDR=" + tunnel.wsconn.RemoteAddr().String(), "CLIENT_ADDRESS=" + tunnel.wsconn.RemoteAddr().String(), "REMOTE_DATA=" + tunnel.data, }) if err != nil { listener.Close() return } go func() { for { rconn, err := listener.AcceptTCP() if err != nil { log.Warn(err) break } // find proxy to where log.Debug("Receive new connections from", rconn.RemoteAddr()) lconn, err := tunnel.RequestNewConn(rconn.RemoteAddr().String()) if err != nil { log.Debug("request new conn err:", err) rconn.Close() continue } log.Debug("request new conn:", lconn, err) pc := &ProxyConn{ lconn: lconn, rconn: rconn, stats: proxyStats, } go pc.start() } }() return listener, nil }
func StartAgent(pURL *url.URL, subdomain, serverAddr string, remoteListenPort int, data string) { log.Debug("start proxy", pURL) if !regexp.MustCompile("^http[s]://").MatchString(serverAddr) { serverAddr = "http://" + serverAddr } sURL, err := url.Parse(serverAddr) if err != nil { log.Fatal(err) } sURL.Path = "/ws" log.Debug("server host:", sURL.Host) conn, err := net.Dial("tcp", sURL.Host) if err != nil { log.Fatal(err) } // specify remote listen port sURL.Scheme = "ws" query := sURL.Query() query.Add("protocol", pURL.Scheme) query.Add("subdomain", subdomain) query.Add("data", data) if remoteListenPort != 0 { query.Add("port", strconv.Itoa(remoteListenPort)) } sURL.RawQuery = query.Encode() log.Debug(sURL) wsclient, _, err := websocket.NewClient(conn, sURL, nil, 1024, 1024) if err != nil { log.Fatal(err) } defer wsclient.Close() go idleWsSend(wsclient) for { var msg Msg if err := wsclient.ReadJSON(&msg); err != nil { fmt.Println("client exit: " + err.Error()) break } log.Debug("recv:", msg) // sURL: serverURL rnl := NewRevNetListener() go handleRevConn(pURL, rnl) handleWsMsg(msg, sURL, rnl) } }
func (p *ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { // http://stackoverflow.com/questions/6899069/why-are-request-url-host-and-scheme-blank-in-the-development-server r.URL.Scheme = "http" // ?? r.URL.Host = r.Host // ?? log.Debug("URL path:", r.URL.Path) log.Debugf("proxy lists: %v", p.revProxies) if rpx, ok := p.revProxies[r.Host]; ok { log.Debug("server http rev proxy") rpx.ServeHTTP(w, r) return } h, _ := p.Handler(r) h.ServeHTTP(w, r) }
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int) { if _, err := os.Stat(cacheResultFile); err != nil { log.Info(fmt.Sprintf("No cache file `%s' found, will create one", cacheResultFile)) } else { os.Remove(cacheResultFile + ".old") if rErr := os.Rename(cacheResultFile, cacheResultFile+".old"); rErr != nil { log.Error(fmt.Sprintf("Unable to rename cache file, plz manually delete `%s' and `%s.old'", cacheResultFile, cacheResultFile)) log.Error(rErr) return } } cacheResultFileH, err := os.OpenFile(cacheResultFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666) if err != nil { log.Error(fmt.Sprintf("Failed to open cache file `%s'", cacheResultFile)) return } defer cacheResultFileH.Close() bWriter := bufio.NewWriter(cacheResultFileH) walkStart := time.Now() log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String())) filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error { var retErr error //log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath)) if !fi.IsDir() { relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator)) fsize := fi.Size() //Unit is 100ns flmd := fi.ModTime().UnixNano() / 100 //log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd)) fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd)) if _, err := bWriter.WriteString(fmeta); err != nil { log.Error(fmt.Sprintf("Failed to write data `%s' to cache file", fmeta)) retErr = err } fileCount += 1 } return retErr }) if err := bWriter.Flush(); err != nil { log.Error(fmt.Sprintf("Failed to flush to cache file `%s'", cacheResultFile)) } walkEnd := time.Now() log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String())) log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart))) return }
func Register(dict *Dict, nsprefix string, self interface{}) (ctx RegisterCtx) { typ := reflect.TypeOf(self) selfv := reflect.ValueOf(self) nmethod := typ.NumMethod() for i := 0; i < nmethod; i++ { method := typ.Method(i) mtype := method.Type mname := method.Name if mtype.PkgPath() != "" || !strings.HasPrefix(mname, "Py_") { continue } nin := mtype.NumIn() name := mname[3:] fullname := nsprefix + name if nin == 3 && sigMatches(mtype, typTernaryCallFunc) || nin == 2 && sigMatches(mtype, typBinaryCallFunc) { closure := &Closure{selfv, method.Func} f := closure.NewFunction(fullname, nin, "") dict.SetItemString(name, f) f.Decref() ctx = append(ctx, closure) log.Debug("Register", fullname) } else { log.Warnf("Invalid signature of method %s, register failed", fullname) continue } } return }
func (p *ProxyConn) pipe(src, dst net.Conn) chan error { //data direction errch := make(chan error, 1) islocal := src == p.lconn //directional copy (64k buffer) buff := make([]byte, 0xffff) go func() { for { n, err := src.Read(buff) if err != nil { errch <- err return } b := buff[:n] //write out result n, err = dst.Write(b) if err != nil { errch <- err log.Printf("Write failed '%s'\n", err) return } log.Debug("pipe --> local:", islocal, "write:", n) //, string(b[:n])) if islocal { p.sentBytes += uint64(n) p.stats.sentBytes += uint64(n) } else { p.receivedBytes += uint64(n) p.stats.receivedBytes += uint64(n) } } }() return errch }
// output websocket func WsBuildServer(ws *websocket.Conn) { defer ws.Close() var err error recvMsg := new(RecvMsg) sendMsg := new(SendMsg) err = websocket.JSON.Receive(ws, &recvMsg) if err != nil { sendMsg.Error = err websocket.JSON.Send(ws, sendMsg) utils.Debugf("read json error: %v", err) return } name := ws.RemoteAddr().String() log.Debug(name) sout := NewStreamOutput(recvMsg.Project, recvMsg.Branch, recvMsg.GOOS, recvMsg.GOARCH) defer sout.Close() sendMsg.Data = sout.BufferStr err = websocket.JSON.Send(ws, sendMsg) if err != nil { utils.Debugf("send first sendMsg error: %v", err) return } // send the rest outputs buf := make([]byte, 100) for { n, err := sout.Reader.Read(buf) if n > 0 { sendMsg.Data = string(buf[:n]) deadline := time.Now().Add(time.Second * 1) ws.SetWriteDeadline(deadline) if er := websocket.JSON.Send(ws, sendMsg); er != nil { log.Debug("write failed timeout, user logout") return } } if err != nil { return } } }
func (this *ListBucket) List(bucket string, prefix string, listResultFile string) (retErr error) { fp, openErr := os.OpenFile(listResultFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if openErr != nil { retErr = openErr log.Error(fmt.Sprintf("Failed to open list result file `%s'", listResultFile)) return } defer fp.Close() bw := bufio.NewWriter(fp) mac := digest.Mac{this.AccessKey, []byte(this.SecretKey)} client := rsf.New(&mac) marker := "" limit := 1000 run := true maxRetryTimes := 5 retryTimes := 1 for run { entries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit) if err != nil { if err == io.EOF { run = false } else { log.Error(fmt.Sprintf("List error for marker `%s'", marker), err) if retryTimes <= maxRetryTimes { log.Debug(fmt.Sprintf("Retry list for marker `%s' for `%d' time", marker, retryTimes)) retryTimes += 1 continue } else { log.Error(fmt.Sprintf("List failed too many times for `%s'", marker)) break } } } else { retryTimes = 1 if markerOut == "" { run = false } else { marker = markerOut } } //append entries for _, entry := range entries { lineData := fmt.Sprintf("%s\t%d\t%s\t%d\t%s\t%s\r\n", entry.Key, entry.Fsize, entry.Hash, entry.PutTime, entry.MimeType, entry.EndUser) _, wErr := bw.WriteString(lineData) if wErr != nil { log.Error(fmt.Sprintf("Write line data `%s' to list result file failed.", lineData)) } } fErr := bw.Flush() if fErr != nil { log.Error("Flush data to list result file error", err) } } return }
func closeWrite(c net.Conn) error { if x, ok := c.(interface { CloseWrite() error }); ok { return x.CloseWrite() } else { log.Debug("force close", c) return c.Close() } }
func (this *AliListBucket) ListBucket(listResultFile string) (err error) { //open result file mode := os.O_CREATE | os.O_TRUNC | os.O_WRONLY fp, openErr := os.OpenFile(listResultFile, mode, 0666) if openErr != nil { err = openErr return } defer fp.Close() bw := bufio.NewWriter(fp) //list bucket by prefix marker := "" prefixLen := len(this.Prefix) ossClient := oss.NewClient(this.DataCenter, this.AccessKeyId, this.AccessKeySecret, 0) maxRetryTimes := 5 retryTimes := 1 for { lbr, lbrErr := ossClient.GetBucket(this.Bucket, this.Prefix, marker, "", "") if lbrErr != nil { err = lbrErr log.Error("Parse list result error, ", "marker=[", marker, "]", lbrErr) if retryTimes <= maxRetryTimes { log.Debug("Retry marker=", marker, "] for ", retryTimes, "time...") retryTimes += 1 continue } else { break } } else { retryTimes = 1 } for _, object := range lbr.Contents { lmdTime, lmdPErr := time.Parse("2006-01-02T15:04:05.999Z", object.LastModified) if lmdPErr != nil { log.Error("Parse object last modified error, ", lmdPErr) lmdTime = time.Now() } bw.WriteString(fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", object.Key[prefixLen:], object.Size, lmdTime.UnixNano()/100))) } if !lbr.IsTruncated { break } marker = lbr.NextMarker } fErr := bw.Flush() if fErr != nil { log.Error("Write data to buffer writer failed", fErr) err = fErr return } return err }
func (p *ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { //log.Println("request info:", r.Method, r.Host, r.RequestURI) //host, _, _ := net.SplitHostPort(r.Host) // http://stackoverflow.com/questions/6899069/why-are-request-url-host-and-scheme-blank-in-the-development-server r.URL.Scheme = "http" // ?? r.URL.Host = r.Host // ?? log.Debug("URL path:", r.URL.Path) log.Debugf("proxy lists: %v", p.revProxies) if rpx, ok := p.revProxies[r.Host]; ok { log.Debug("server http rev proxy") rpx.ServeHTTP(w, r) return } //if p.domain != host { // http.Error(w, fmt.Sprintf("%s not ready", host), 504) // return //} h, _ := p.Handler(r) h.ServeHTTP(w, r) }
func (p *proxyConn) start() { defer p.lconn.Close() defer p.rconn.Close() // FIXME: may need to set a flag if tcpconn, ok := p.lconn.(*net.TCPConn); ok { tcpconn.SetNoDelay(true) } if tcpconn, ok := p.rconn.(*net.TCPConn); ok { tcpconn.SetNoDelay(true) } // p.lconn.SetNoDelay(true) // p.rconn.SetNoDelay(true) //display both ends // log.Printf("Opened %s >>> %s", p.lconn.RemoteAddr().String(), p.rconn.RemoteAddr().String()) //bidirectional copy wg := sync.WaitGroup{} wg.Add(2) go func() { ch1 := p.pipe(p.lconn, p.rconn) <-ch1 closeRead(p.lconn) closeWrite(p.rconn) log.Debug("close local -> remote") wg.Done() }() go func() { ch2 := p.pipe(p.rconn, p.lconn) <-ch2 closeRead(p.rconn) closeWrite(p.lconn) log.Debug("close remote -> local") wg.Done() }() wg.Wait() //wait for close... // log.Printf("Closed (%d bytes sent, %d bytes recieved)", p.sentBytes, p.receivedBytes) }
func (b *Builder) pack(bins []string, rcfile string) (path string, err error) { log.Debug(bins) log.Debug(rcfile) data, err := ioutil.ReadFile(rcfile) if err != nil { log.Debug("use default rc") data, err = ioutil.ReadFile("public/gobuildrc") if err != nil { log.Error(err) } } ass := new(Assembly) err = goyaml.Unmarshal(data, ass) if err != nil { return } dir := filepath.Dir(rcfile) fs, err := ioutil.ReadDir(dir) if err != nil { return } var includes = bins // this may change slice bins for _, f := range fs { var ok = false for _, patten := range ass.Includes { if match(patten, f.Name()) { ok = true break } } if ok { includes = append(includes, filepath.Join(dir, f.Name())) } } return pkgZip(dir, includes) }
func runShellScript(ctx *macaron.Context, script string) ([]byte, error) { ctx.Req.ParseForm() //log.Println(ctx.Req.Form) envs := map[string]string{ "REQ_PATH": ctx.Req.URL.Path, "REQ_URI": ctx.Req.RequestURI, "REQ_METHOD": ctx.Req.Method, } for key, vals := range ctx.Req.Form { log.Debug("Form value:", key, vals) envs["FORM_"+key] = vals[0] } for key, vals := range ctx.Req.PostForm { log.Debug("Form value:", key, vals) envs["POST_FORM_"+key] = vals[0] } environ := os.Environ() for key, val := range envs { environ = append(environ, key+"="+val) } cmd := exec.Command("/bin/bash", script) //.Output() cmd.Env = environ return cmd.Output() }
func batchChgm(client rs.Client, entries []qshell.ChgmEntryPath) { ret, err := qshell.BatchChgm(client, entries) if err != nil { log.Error("Batch chgm error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Chgm '%s' => '%s' Failed, Code :%d", entry.Key, entry.MimeType, item.Code)) } else { log.Debug(fmt.Sprintf("Chgm '%s' => '%s' Success, Code :%d", entry.Key, entry.MimeType, item.Code)) } } } }
func batchDelete(client rs.Client, entries []rs.EntryPath) { ret, err := qshell.BatchDelete(client, entries) if err != nil { log.Error("Batch delete error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Delete '%s' => '%s' Failed, Code: %d", entry.Bucket, entry.Key, item.Code)) } else { log.Debug(fmt.Sprintf("Delete '%s' => '%s' Success, Code: %d", entry.Bucket, entry.Key, item.Code)) } } } }
func batchRename(client rs.Client, entries []qshell.RenameEntryPath) { ret, err := qshell.BatchRename(client, entries) if err != nil { log.Error("Batch rename error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Rename '%s' => '%s' Failed, Code :%d", entry.OldKey, entry.NewKey, item.Code)) } else { log.Debug(fmt.Sprintf("Rename '%s' => '%s' Success, Code :%d", entry.OldKey, entry.NewKey, item.Code)) } } } }
// download src func (b *Builder) get() (err error) { log.Debug("start get src to:", b.srcDir) exists := beeutils.FileExists(b.srcDir) b.sh.Command("go", "version").Run() if !exists { err = b.sh.Command("go", "get", "-v", "-d", b.project).Run() if err != nil { return } } b.sh.SetDir(b.srcDir) if b.ref == "-" { b.ref = "master" } // get code from remote if err = b.sh.Command("git", "fetch", "origin").Run(); err != nil { return } // change branch if err = b.sh.Command("git", "checkout", "-q", b.ref).Run(); err != nil { return } // update code if err = b.sh.Command("git", "merge", "origin/"+b.ref).Run(); err != nil { log.Warn("git merge error:", err) //return } // get sha out, err := sh.Command("git", "rev-parse", "HEAD", sh.Dir(b.srcDir)).Output() if err != nil { return } b.sha = strings.TrimSpace(string(out)) // parse .gobuild b.rc = new(Assembly) rcfile := "public/gobuildrc" if b.sh.Test("f", ".gobuild") { rcfile = filepath.Join(b.srcDir, ".gobuild") } data, err := ioutil.ReadFile(rcfile) if err != nil { return } err = goyaml.Unmarshal(data, b.rc) return }
func pkgZip(root string, files []string) (path string, err error) { log.Info("package to zip:", path) tmpFile, err := utils.TempFile("files", "tmp-", "-"+filepath.Base(root)+".zip") if err != nil { return } z, err := zip.Create(tmpFile) if err != nil { return } for _, f := range files { var save string if f == "" { continue } // binary file use abspath //fmt.Println(root, f) if strings.HasSuffix(f, root) { save = f[len(root):] } else { save = filepath.Base(f) } info, er := os.Stat(f) if er != nil { continue } log.Debug("add", save, f) if info.IsDir() { if err = z.AddDir(save, f); err != nil { return } } else { if err = z.AddFile(save, f); err != nil { return } } } if err = z.Close(); err != nil { log.Error(err) return } return tmpFile, nil }
func deployPackage(pkgName, path string, binDir string) error { cmd := exec.Command("unzip", "-o", "-d", getInsPath("opt", pkgName), path) log.Debug("zip command:", cmd.Args) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } baseName := filepath.Base(pkgName) symlink := filepath.Join(GOBIN, baseName) prompt("Symlink %v", symlink) // for linux and darwin os.Remove(symlink) // TODO: need to resolve multi binaries return os.Symlink(getInsPath("opt", pkgName, filepath.Base(pkgName)), symlink) }
func batchCopy(client rs.Client, entries []qshell.CopyEntryPath) { ret, err := qshell.BatchCopy(client, entries) if err != nil { log.Error("Batch move error", err) } if len(ret) > 0 { for i, entry := range entries { item := ret[i] if item.Data.Error != "" { log.Error(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Failed, Code :%d", entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)) } else { log.Debug(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Success, Code :%d", entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)) } } } }
// http://dn-gobuild5.qbox.me/gorelease/gorelease/gorelease/master/darwin-amd64/gorelease.zip // Need to update func downloadSource(name string) (dest string, err error) { parts := strings.Split(name, "/") if len(parts) != 2 { return "", fmt.Errorf("name: %s can only contains on /", name) } owner, repo := parts[0], parts[1] osarch := runtime.GOOS + "-" + runtime.GOARCH url := fmt.Sprintf("http://dn-gobuild5.qbox.me/gorelease/%s/%s/master/%s/%s.zip", owner, repo, osarch, repo) prompt("Downloading %v", url) log.Debug("download:", url) dest = getInsPath("src", fmt.Sprintf("%s.zip", repo)) resp, err := http.Get(url) if err != nil { return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = errors.New("Http status not 200") return } lengthStr := resp.Header.Get("Content-Length") if lengthStr == "" { err = errors.New("Content length is empty") return } length, _ := strconv.Atoi(lengthStr) bar := pb.New(length).SetUnits(pb.U_BYTES) outFd, err := os.Create(dest) if err != nil { return } defer outFd.Close() bar.Start() reader := bar.NewProxyReader(resp.Body) _, err = io.Copy(outFd, reader) return }
/* get image width or height */ func (this *OSSImager) getImageInfo(imageUrl string) (imageInfo *ImageInfo, err error) { imageInfoUrl := fmt.Sprintf("%s?imageInfo", imageUrl) log.Debug(imageInfoUrl) resp, respErr := http.Get(imageInfoUrl) if respErr != nil { err = respErr return } defer resp.Body.Close() buffer := bytes.NewBuffer(nil) _, cpErr := io.Copy(buffer, resp.Body) if cpErr != nil { err = cpErr return } imageInfo = &ImageInfo{} decodeErr := json.Unmarshal(buffer.Bytes(), imageInfo) if decodeErr != nil { err = decodeErr return } return }
func handleWsMsg(msg Msg, sURL *url.URL, rnl *RevNetListener) { u := sURL switch msg.Type { case TYPE_NEWCONN: log.Debug("dial remote:", u.Host) sconn, err := net.Dial("tcp", u.Host) if err != nil { log.Println(err) break } _, err = sconn.Write([]byte(fmt.Sprintf( "GET /proxyhijack HTTP/1.1\r\nX-Proxy-For: %s \r\n\r\n", msg.Name))) if err != nil { log.Println(err) break } rnl.connCh <- sconn case TYPE_MESSAGE: fmt.Printf("Recv Message: %v\n", msg.Body) default: log.Warnf("Type: %v not support", msg.Type) } }
func InitRouter() { m.Get("/", func(r render.Render) { r.HTML(200, "index", map[string]interface{}{ "Hostname": opts.Hostname, }) }) m.Get("/github.com/**/:ref/(?P<os>(windows|linux|darwin))/:arch", func(p martini.Params, w http.ResponseWriter, r *http.Request) { project := "github.com/" + p["_1"] ref := p["ref"] os, arch := p["os"], p["arch"] fullname := strings.Join([]string{project, ref, os, arch}, "-") wb, _ := GetWriteBroadcaster(fullname) job := NewBuilder(project, ref, os, arch, wb) addr, err := job.Auto() if err != nil { log.Error("auto build error:", err) http.Error(w, "project build error: "+err.Error(), 500) return } http.Redirect(w, r, addr, http.StatusTemporaryRedirect) }) m.Get("/github.com/**", func(p martini.Params, w http.ResponseWriter, r *http.Request) { newAddr := "/download/github.com/" + p["_1"] http.Redirect(w, r, newAddr, http.StatusTemporaryRedirect) }) m.Get("/download/github.com/**", func(params martini.Params, r render.Render) { addr := params["_1"] r.HTML(200, "download", map[string]interface{}{ "Project": "github.com/" + addr, "ProjectShortName": addr, "Hostname": opts.Hostname, "Name": filepath.Base(addr), }) }) m.Get("/build/**", func(params martini.Params, r render.Render) { project := params["_1"] log.Debug(project, "END") r.HTML(200, "build", map[string]string{ "FullName": project, "WsServer": opts.Hostname + "/websocket", }) }) // all out links call redirect m.Get("/redirect", func(w http.ResponseWriter, r *http.Request) { url := r.FormValue("url") log.Info(url) http.Redirect(w, r, url, http.StatusTemporaryRedirect) }) // about && document m.Get("/about", func(r render.Render) { r.HTML(200, "about", nil) }) m.Get("/document", func(r render.Render) { path := filepath.Join(filepath.Dir(os.Args[0]), "README.md") readme, err := ioutil.ReadFile(path) if err != nil { log.Error(err) } data := make(map[string]interface{}, 0) data["Readme"] = string(readme) r.HTML(200, "document", data) }) m.Get("/search/**", func(params martini.Params, r render.Render) { packages, err := NewSearch(params["_1"]) r.HTML(200, "search", map[string]interface{}{ "Keyword": params["keyword"], "Packages": packages.Packages, "PackagesLength": len(packages.Packages), "Error": err, }) }) initBadge() }
func QiniuDownload(threadCount int, downloadConfigFile string) { cnfFp, err := os.Open(downloadConfigFile) if err != nil { log.Error("Open download config file", downloadConfigFile, "failed,", err) return } defer cnfFp.Close() cnfData, err := ioutil.ReadAll(cnfFp) if err != nil { log.Error("Read download config file error", err) return } downConfig := DownloadConfig{} cnfErr := json.Unmarshal(cnfData, &downConfig) if cnfErr != nil { log.Error("Parse download config error", err) return } cnfJson, _ := json.Marshal(&downConfig) jobId := fmt.Sprintf("%x", md5.Sum(cnfJson)) jobListName := fmt.Sprintf("%s.list.txt", jobId) acct := Account{ AccessKey: downConfig.AccessKey, SecretKey: downConfig.SecretKey, } bLister := ListBucket{ Account: acct, } log.Debug("List bucket...") listErr := bLister.List(downConfig.Bucket, downConfig.Prefix, jobListName) if listErr != nil { log.Error("List bucket error", listErr) return } listFp, openErr := os.Open(jobListName) if openErr != nil { log.Error("Open list file error", openErr) return } defer listFp.Close() listScanner := bufio.NewScanner(listFp) listScanner.Split(bufio.ScanLines) downWorkGroup := sync.WaitGroup{} downCounter := 0 threadThresold := threadCount + 1 for listScanner.Scan() { downCounter += 1 if downCounter%threadThresold == 0 { downWorkGroup.Wait() } line := strings.TrimSpace(listScanner.Text()) items := strings.Split(line, "\t") if len(items) > 2 { fileKey := items[0] //check suffix if downConfig.Suffix != "" && !strings.HasSuffix(fileKey, downConfig.Suffix) { continue } fileSize, _ := strconv.ParseInt(items[1], 10, 64) //not backup yet if !checkLocalDuplicate(downConfig.DestDir, fileKey, fileSize) { downWorkGroup.Add(1) go func() { defer downWorkGroup.Done() downloadFile(downConfig, fileKey) }() } } } downWorkGroup.Wait() fmt.Println("All downloaded!") }
func QiniuUpload(threadCount int, uploadConfigFile string) { fp, err := os.Open(uploadConfigFile) if err != nil { log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } defer fp.Close() configData, err := ioutil.ReadAll(fp) if err != nil { log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } var uploadConfig UploadConfig err = json.Unmarshal(configData, &uploadConfig) if err != nil { log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err)) return } if _, err := os.Stat(uploadConfig.SrcDir); err != nil { log.Error("Upload config error for parameter `SrcDir`,", err) return } dirCache := DirCache{} currentUser, err := user.Current() if err != nil { log.Error("Failed to get current user", err) return } pathSep := string(os.PathSeparator) //create job id md5Hasher := md5.New() md5Hasher.Write([]byte(strings.TrimSuffix(uploadConfig.SrcDir, pathSep) + ":" + uploadConfig.Bucket)) jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil)) //local storage path storePath := filepath.Join(currentUser.HomeDir, ".qshell", "qupload", jobId) err = os.MkdirAll(storePath, 0775) if err != nil { log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err)) return } //cache file cacheFileName := filepath.Join(storePath, jobId+".cache") //leveldb folder leveldbFileName := filepath.Join(storePath, jobId+".ldb") totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName) ldb, err := leveldb.OpenFile(leveldbFileName, nil) if err != nil { log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)) return } defer ldb.Close() //sync ufp, err := os.Open(cacheFileName) if err != nil { log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err)) return } defer ufp.Close() bScanner := bufio.NewScanner(ufp) bScanner.Split(bufio.ScanLines) var currentFileCount int64 = 0 var successFileCount int64 = 0 var failureFileCount int64 = 0 var skippedFileCount int64 = 0 ldbWOpt := opt.WriteOptions{ Sync: true, } upWorkGroup := sync.WaitGroup{} upCounter := 0 threadThreshold := threadCount + 1 //use host if not empty if uploadConfig.UpHost != "" { conf.UP_HOST = uploadConfig.UpHost } //set resume upload settings rio.SetSettings(&upSettings) mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)} for bScanner.Scan() { line := strings.TrimSpace(bScanner.Text()) items := strings.Split(line, "\t") if len(items) != 3 { log.Error(fmt.Sprintf("Invalid cache line `%s'", line)) continue } localFname := items[0] currentFileCount += 1 skip := false //check skip local file or folder if uploadConfig.SkipPrefixes != "" { //unpack skip prefix skipPrefixes := strings.Split(uploadConfig.SkipPrefixes, ",") for _, prefix := range skipPrefixes { if strings.HasPrefix(localFname, strings.TrimSpace(prefix)) { log.Debug(fmt.Sprintf("Skip by prefix '%s' for local file %s", strings.TrimSpace(prefix), localFname)) skip = true skippedFileCount += 1 break } } if skip { continue } } if uploadConfig.SkipSuffixes != "" { skipSuffixes := strings.Split(uploadConfig.SkipSuffixes, ",") for _, suffix := range skipSuffixes { if strings.HasSuffix(localFname, strings.TrimSpace(suffix)) { log.Debug(fmt.Sprintf("Skip by suffix '%s' for local file %s", strings.TrimSpace(suffix), localFname)) skip = true skippedFileCount += 1 break } } if skip { continue } } //pack the upload file key localFlmd, _ := strconv.Atoi(items[2]) uploadFileKey := localFname if uploadConfig.IgnoreDir { if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 { uploadFileKey = uploadFileKey[i+1:] } } if uploadConfig.KeyPrefix != "" { uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "") } //convert \ to / under windows if runtime.GOOS == "windows" { uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1) } localFilePath := filepath.Join(uploadConfig.SrcDir, localFname) fstat, err := os.Stat(localFilePath) if err != nil { log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", localFilePath, err)) return } fsize := fstat.Size() ldbKey := fmt.Sprintf("%s => %s", localFilePath, uploadFileKey) log.Info(fmt.Sprintf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount, float32(currentFileCount)*100/float32(totalFileCount))) rsClient := rs.New(&mac) //check exists if uploadConfig.CheckExists { rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey) if checkErr == nil { //compare hash localEtag, cErr := GetEtag(localFilePath) if cErr != nil { atomic.AddInt64(&failureFileCount, 1) log.Error("Calc local file hash failed,", cErr) continue } if rsEntry.Hash == localEtag { atomic.AddInt64(&skippedFileCount, 1) log.Debug(fmt.Sprintf("File %s already exists in bucket, ignore this upload", uploadFileKey)) continue } } else { if _, ok := checkErr.(*rpc.ErrorInfo); !ok { //not logic error, should be network error atomic.AddInt64(&failureFileCount, 1) continue } } } else { //check leveldb ldbFlmd, err := ldb.Get([]byte(ldbKey), nil) flmd, _ := strconv.Atoi(string(ldbFlmd)) //not exist, return ErrNotFound //check last modified if err == nil && localFlmd == flmd { log.Debug("Skip by local log for file", localFname) atomic.AddInt64(&skippedFileCount, 1) continue } } //worker upCounter += 1 if upCounter%threadThreshold == 0 { upWorkGroup.Wait() } upWorkGroup.Add(1) //start to upload go func() { defer upWorkGroup.Done() policy := rs.PutPolicy{} policy.Scope = uploadConfig.Bucket if uploadConfig.Overwrite { policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey policy.InsertOnly = 0 } policy.Expires = 24 * 3600 uptoken := policy.Token(&mac) if fsize > PUT_THRESHOLD { putRet := rio.PutRet{} err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil) if err != nil { atomic.AddInt64(&failureFileCount, 1) if pErr, ok := err.(*rpc.ErrorInfo); ok { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)) } else { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)) } } else { atomic.AddInt64(&successFileCount, 1) perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } else { putRet := fio.PutRet{} err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, localFilePath, nil) if err != nil { atomic.AddInt64(&failureFileCount, 1) if pErr, ok := err.(*rpc.ErrorInfo); ok { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, pErr.Err)) } else { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", localFilePath, uploadFileKey, err)) } } else { atomic.AddInt64(&successFileCount, 1) perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(localFlmd)), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } }() } upWorkGroup.Wait() log.Info("-------Upload Done-------") log.Info("Total:\t", currentFileCount) log.Info("Success:\t", successFileCount) log.Info("Failure:\t", failureFileCount) log.Info("Skipped:\t", skippedFileCount) log.Info("-------------------------") }
func QiniuUpload(threadCount int, uploadConfigFile string) { fp, err := os.Open(uploadConfigFile) if err != nil { log.Error(fmt.Sprintf("Open upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } defer fp.Close() configData, err := ioutil.ReadAll(fp) if err != nil { log.Error(fmt.Sprintf("Read upload config file `%s' error due to `%s'", uploadConfigFile, err)) return } var uploadConfig UploadConfig err = json.Unmarshal(configData, &uploadConfig) if err != nil { log.Error(fmt.Sprintf("Parse upload config file `%s' errror due to `%s'", uploadConfigFile, err)) return } if _, err := os.Stat(uploadConfig.SrcDir); err != nil { log.Error("Upload config error for parameter `SrcDir`,", err) return } dirCache := DirCache{} currentUser, err := user.Current() if err != nil { log.Error("Failed to get current user", err) return } pathSep := string(os.PathSeparator) //create job id md5Hasher := md5.New() md5Hasher.Write([]byte(uploadConfig.SrcDir + ":" + uploadConfig.Bucket)) jobId := fmt.Sprintf("%x", md5Hasher.Sum(nil)) //local storage path storePath := fmt.Sprintf("%s%s.qshell%squpload%s%s", currentUser.HomeDir, pathSep, pathSep, pathSep, jobId) err = os.MkdirAll(storePath, 0775) if err != nil { log.Error(fmt.Sprintf("Failed to mkdir `%s' due to `%s'", storePath, err)) return } //cache file cacheFileName := fmt.Sprintf("%s%s%s.cache", storePath, pathSep, jobId) //leveldb folder leveldbFileName := fmt.Sprintf("%s%s%s.ldb", storePath, pathSep, jobId) totalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName) ldb, err := leveldb.OpenFile(leveldbFileName, nil) if err != nil { log.Error(fmt.Sprintf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)) return } defer ldb.Close() //sync ufp, err := os.Open(cacheFileName) if err != nil { log.Error(fmt.Sprintf("Open cache file `%s' failed due to `%s'", cacheFileName, err)) return } defer ufp.Close() bScanner := bufio.NewScanner(ufp) bScanner.Split(bufio.ScanLines) currentFileCount := 0 ldbWOpt := opt.WriteOptions{ Sync: true, } upWorkGroup := sync.WaitGroup{} upCounter := 0 threadThreshold := threadCount + 1 //use host if not empty if uploadConfig.UpHost != "" { conf.UP_HOST = uploadConfig.UpHost } //set settings rio.SetSettings(&upSettings) mac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)} //check thread count for bScanner.Scan() { line := strings.TrimSpace(bScanner.Text()) items := strings.Split(line, "\t") if len(items) > 1 { cacheFname := items[0] cacheFlmd, _ := strconv.Atoi(items[2]) uploadFileKey := cacheFname if uploadConfig.IgnoreDir { if i := strings.LastIndex(uploadFileKey, pathSep); i != -1 { uploadFileKey = uploadFileKey[i+1:] } } if uploadConfig.KeyPrefix != "" { uploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, "") } //convert \ to / under windows if runtime.GOOS == "windows" { uploadFileKey = strings.Replace(uploadFileKey, "\\", "/", -1) } cacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep) fstat, err := os.Stat(cacheFilePath) if err != nil { log.Error(fmt.Sprintf("Error stat local file `%s' due to `%s'", cacheFilePath, err)) return } fsize := fstat.Size() //check leveldb currentFileCount += 1 ldbKey := fmt.Sprintf("%s => %s", cacheFilePath, uploadFileKey) log.Debug(fmt.Sprintf("Checking %s ...", ldbKey)) //check last modified ldbFlmd, err := ldb.Get([]byte(ldbKey), nil) flmd, _ := strconv.Atoi(string(ldbFlmd)) //not exist, return ErrNotFound if err == nil && cacheFlmd == flmd { continue } fmt.Print("\033[2K\r") fmt.Printf("Uploading %s (%d/%d, %.1f%%) ...", ldbKey, currentFileCount, totalFileCount, float32(currentFileCount)*100/float32(totalFileCount)) os.Stdout.Sync() rsClient := rs.New(&mac) //worker upCounter += 1 if upCounter%threadThreshold == 0 { upWorkGroup.Wait() } upWorkGroup.Add(1) go func() { defer upWorkGroup.Done() //check exists if uploadConfig.CheckExists { rsEntry, checkErr := rsClient.Stat(nil, uploadConfig.Bucket, uploadFileKey) if checkErr == nil { //compare hash localEtag, cErr := GetEtag(cacheFilePath) if cErr != nil { log.Error("Calc local file hash failed,", cErr) return } if rsEntry.Hash == localEtag { log.Info("File already exists in bucket, ignore this upload") return } } else { if _, ok := checkErr.(*rpc.ErrorInfo); !ok { //not logic error, should be network error return } } } //upload policy := rs.PutPolicy{} policy.Scope = uploadConfig.Bucket if uploadConfig.Overwrite { policy.Scope = uploadConfig.Bucket + ":" + uploadFileKey policy.InsertOnly = 0 } policy.Expires = 24 * 3600 uptoken := policy.Token(&mac) if fsize > PUT_THRESHOLD { putRet := rio.PutRet{} err := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil) if err != nil { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err)) } else { perr := ldb.Put([]byte(ldbKey), []byte("Y"), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } else { putRet := fio.PutRet{} err := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil) if err != nil { log.Error(fmt.Sprintf("Put file `%s' => `%s' failed due to `%s'", cacheFilePath, uploadFileKey, err)) } else { perr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt) if perr != nil { log.Error(fmt.Sprintf("Put key `%s' into leveldb error due to `%s'", ldbKey, perr)) } } } }() } else { log.Error(fmt.Sprintf("Error cache line `%s'", line)) } } upWorkGroup.Wait() fmt.Println() fmt.Println("Upload done!") }
func (ps *ProxyServer) newControlHandler() func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { // read listen port from request protocol, subdomain, port := parseConnectRequest(r) log.Debugf("proxy listen proto: %v, subdomain: %v port: %v", protocol, subdomain, port) // create websocket connection conn, err := upgrader.Upgrade(w, r, nil) if err != nil { http.Error(w, err.Error(), 502) return } defer conn.Close() log.Debug("remote client addr:", conn.RemoteAddr()) tunnel := &Tunnel{ wsconn: conn, } // TCP: create new port to listen log.Infof("New %s proxy for %v", protocol, conn.RemoteAddr()) switch protocol { case "tcp": // proxyAddr := fmt.Sprintf("0.0.0.0:%d", port) listener, err := NewTcpProxyListener(tunnel, port) if err != nil { log.Warnf("new tcp proxy err: %v", err) http.Error(w, err.Error(), 501) return } defer listener.Close() _, port, _ := net.SplitHostPort(listener.Addr().String()) wsSendMessage(conn, fmt.Sprintf( "Local tcp conn is now publicly available via:\n%v:%v\n", ps.domain, port)) case "http", "https": tr := &http.Transport{ Dial: tunnel.generateTransportDial(), } revProxy := &httputil.ReverseProxy{ Director: func(req *http.Request) { log.Println("director:", req.RequestURI) }, Transport: tr, } // should hook here // hook(HOOK_CREATE_HTTP_SUBDOMAIN, subdomain) // generate a uniq domain if subdomain == "" { subdomain = uniqName(5) + ".t" } pxDomain := subdomain + "." + ps.domain log.Println("http px use domain:", pxDomain) if _, exists := ps.revProxies[pxDomain]; exists { wsSendMessage(conn, fmt.Sprintf("subdomain [%s] has already been taken", pxDomain)) return } ps.Lock() ps.revProxies[pxDomain] = revProxy ps.Unlock() wsSendMessage(conn, fmt.Sprintf( "Local server is now publicly available via:\nhttp://%s\n", pxDomain)) defer func() { ps.Lock() delete(ps.revProxies, pxDomain) ps.Unlock() }() default: log.Warn("unknown protocol:", protocol) return } // HTTP: use httputil.ReverseProxy for { var msg Msg if err := conn.ReadJSON(&msg); err != nil { log.Warn(err) break } log.Info("recv json:", msg) } } }