//TCPStream handles http proxy processes func TCPStream(he ErrorHandler) *TCP { return &TCP{ NewProxyStream(func(c *ConnInsight, se NotifierError) { dest := c.Dest() src := c.Src() if dest == nil || src == nil { log.Info("Invalid Connections") go func() { se <- ErrBadConn }() return } c.open.Emit(true) ws := new(sync.WaitGroup) ws.Add(2) rdest := dest.Reader() wdest := dest.Writer() rsrc := src.Reader() wsrc := src.Writer() destwriter := NopWriter(io.MultiWriter(wdest, c.In())) srcwriter := NopWriter(io.MultiWriter(wsrc, c.Out())) flux.GoDefer("connCloser", func() { ws.Wait() c.Close() }) flux.GoDefer("dest2src", func() { log.Info("Copying to destination for tcp") _, ex := io.Copy(destwriter, rsrc) if ex != nil { go func() { se <- ex }() } ws.Done() }) flux.GoDefer("src2dest", func() { log.Info("Copying to src for tcp") _, ex := io.Copy(srcwriter, rdest) if ex != nil { go func() { se <- ex }() } ws.Done() }) }, he), } }
//DialServerConn serves in place of ssh.NewServerConn func DialServerConn(ds time.Duration, con net.Conn, conf *ssh.ServerConfig) (sc *ssh.ServerConn, cs <-chan ssh.NewChannel, rs <-chan *ssh.Request, ex error) { done := make(chan struct{}) reset := make(chan struct{}) authlog := conf.AuthLogCallback logger := func(conn ssh.ConnMetadata, method string, err error) { flux.GoDefer("AuthLogCallback", func() { flux.GoDefer("AuthLog", func() { if authlog != nil { authlog(conn, method, err) } }) reset <- struct{}{} }) } conf.AuthLogCallback = logger flux.GoDefer("NewServerConn", func() { defer close(done) sc, cs, rs, ex = ssh.NewServerConn(con, conf) return }) expiration := threshold(ds) func() { nsloop: for { select { case <-done: expiration = nil break nsloop case <-reset: expiration = threshold(ds) case <-expiration: if sc != nil { sc.Close() } sc = nil cs = nil rs = nil ex = fmt.Errorf("Expired NewServerConn call for ip:%+s ", con.RemoteAddr()) break nsloop } } }() return }
// NewSockets returns a new websocket port func NewSockets(upgrader *websocket.Upgrader, headers http.Header, hs SocketHandler) FlatChains { return NewFlatChain(func(c *Context, nx NextHandler) { if headers != nil { origin, ok := c.Req.Header["Origin"] if ok { headers.Set("Access-Control-Allow-Credentials", "true") headers.Set("Access-Control-Allow-Origin", strings.Join(origin, ";")) } else { headers.Set("Access-Control-Allow-Origin", "*") } } conn, err := upgrader.Upgrade(c.Res, c.Req, headers) if err != nil { return } flux.GoDefer(fmt.Sprintf("WebSocketPort.Handler"), func() { hs(NewSocketWorker(&Websocket{ Conn: conn, Ctx: c, })) nx(c) }) }) }
//DialClient returns two channels where one returns a ssh.Client and the other and error func DialClient(dial, expire time.Duration, ip string, conf *ssh.ClientConfig, retry <-chan struct{}) (*ssh.Client, error) { flux.Report(nil, fmt.Sprintf("MakeDial for %s for dailing at %+s and expiring in %+s", conf.User, dial, expire)) cons := make(chan *ssh.Client) errs := make(chan error) var con net.Conn var sc ssh.Conn var chans <-chan ssh.NewChannel var req <-chan *ssh.Request var err error flux.GoDefer("MakeDial", func() { con, err = net.DialTimeout("tcp", ip, dial) if err != nil { flux.Report(err, fmt.Sprintf("MakeDial:Before for %s net.DailTimeout", ip)) errs <- err return } sc, chans, req, err = ssh.NewClientConn(con, ip, conf) if err != nil { flux.Report(err, fmt.Sprintf("MakeDial:After for %s ssh.NewClientConn", ip)) errs <- err return } flux.Report(nil, fmt.Sprintf("MakeDial initiating NewClient for %s", ip)) cons <- ssh.NewClient(sc, chans, req) return }) expiration := threshold(expire) go func() { for _ = range retry { expiration = threshold(expire) } }() select { case err := <-errs: flux.Report(err, fmt.Sprintf("NewClient Ending!")) return nil, err case som := <-cons: flux.Report(nil, fmt.Sprintf("NewClient Created!")) expiration = nil return som, nil case <-expiration: flux.Report(nil, fmt.Sprintf("MakeDial Expired for %s!", ip)) defer con.Close() if sc != nil { sc.Close() } return nil, ErrTimeout } }
func addCommander(pm *PluginManager) { pm.Add("commandWatch", func(config *BuildConfig, options Plugins, c chan bool) { /*Expects to receive a plugin config follow this format tag: dirWatch config: path: "./static/less" args: - lessc ./static/less/main.less ./static/css/main.css - lessc ./static/less/svg.less ./static/css/svg.css where the config.path is the path to be watched */ //get the current directory pwd, _ := os.Getwd() //get the dir we should watch dir := options.Config["path"] //get the command we should run on change commands := options.Args if dir == "" { fmt.Printf("---> dirWatch.error: no path set in config map for plug") return } //get the absolute path absDir := filepath.Join(pwd, dir) //create the file watcher watcher := fs.Watch(fs.WatchConfig{ Path: absDir, }) watcher.React(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) { if ev, ok := data.(fsnotify.Event); ok { fmt.Printf("--> commandWatch:File as changed: %+s\n", ev.String()) } }), true) // create the command runner set to run the args watcher.Bind(builders.CommandLauncher(commands), true) flux.GoDefer("CommandWatch:kill", func() { <-c watcher.Close() }) }) }
func addBuilder(pm *PluginManager) { //these are internally used pm.Add("builder", func(config *BuildConfig, options Plugins, c chan bool) { pwd, _ := os.Getwd() _, binName := filepath.Split(config.Package) // bin := filepath.Join(pwd, config.Bin) var clientdir string outputdir := filepath.Join(pwd, config.Client.StaticDir) if config.Client.Dir != "" { clientdir = filepath.Join(pwd, config.Client.Dir) } goget := builders.GoInstallerWith("./") jsbuild := builders.JSLauncher(builders.JSBuildConfig{ Package: config.ClientPackage, Folder: outputdir, FileName: config.Client.Name, Tags: config.Client.BuildTags, Verbose: config.Client.UseVerbose, PackageDir: clientdir, }) gobuild := builders.GoBuilderWith(builders.BuildConfig{ Path: filepath.Join(pwd, config.Bin), Name: binName, Args: config.BinArgs, }) goget.Bind(jsbuild, true) //send out the build command after js build jsbuild.React(func(root flux.Reactor, _ error, _ interface{}) { gobuild.Send(true) }, true) //run go installer goget.Send(true) flux.GoDefer("watchBuildRun:kill", func() { <-c //close our builders goget.Close() gobuild.Close() }) }) }
// Messages returns a receive only channel for socket messages func (s *SocketWorker) Messages() <-chan *WebsocketMessage { if s.writing { return s.mesgs } flux.GoDefer("Socket:Message:Receiver", func() { for dag := range s.data { if mg, ok := dag.(*WebsocketMessage); ok { s.mesgs <- mg } } }) return s.mesgs }
func (a *Engine) prepareServer() error { var err error var ls net.Listener //run the before init function if a.BeforeInit != nil { a.BeforeInit(a) } ls, err = relay.MakeBaseListener(a.Addr, a.C.Certs) if err != nil { log.Fatalf("Server failed to start: %+s", err.Error()) return err } a.ls = ls a.li = &graceful.Server{ NoSignalHandling: true, Timeout: a.stop, Server: &http.Server{ Addr: a.Addr, Handler: a, }, } flux.GoDefer("ServerGracefulServer", func() { a.li.Serve(ls) }) //load up configurations if err := a.loadup(); err != nil { return err } if a.AfterInit != nil { a.AfterInit(a) } return nil }
//handleStreams initiates the processing of streams func (p *ProxyStream) handleStreams(id int64) { go func() { hloop: for { select { case k := <-p.endWorker: if id == k { break hloop } case wk := <-p.work: flux.GoDefer("ProxyStreamProcessor", func() { p.Wait() p.processor(wk, p.errors) }) case <-p.closer: break hloop } } }() }
//Serve handles the operations of the servicer func (s *SMTPService) Serve() { defer s.listener.Close() for { con, err := s.listener.Accept() if err != nil { log.Error("SMTPService Listener Error", err) continue } dcon, ok := con.(*DeferConn) if !ok { log.Error("SMTPService ConnType", con.Close()) continue } flux.GoDefer("SMTPDeferConOp", func() { conf := s.config conf.Config = s.tlsconfig log.Debug("Creating SMTPProxyConnector for Remote:%s Local:%s", dcon.RemoteAddr().String(), dcon.LocalAddr().String()) err = (SMTPProxyConn{ dcon, conf, }).Proxy() if err != nil { log.Error("Proxy Error for %s", con.RemoteAddr().String(), err) } }) } }
func addWatchBuildRun(pm *PluginManager) { //these are internally used pm.Add("watchBuildRun", func(config *BuildConfig, options Plugins, c chan bool) { pwd, _ := os.Getwd() _, binName := filepath.Split(config.Package) binDir := filepath.Join(pwd, config.Bin) binfile := filepath.Join(binDir, binName) pkgs := append([]string{}, config.Package, "github.com/influx6/relay/relay", "github.com/influx6/relay/engine") packages, err := assets.GetAllPackageLists(pkgs) if err != nil { panic(err) } fmt.Printf("--> Retrieved package directories %s \n", config.Package) goget := builders.GoInstallerWith("./") goget.React(func(root flux.Reactor, err error, data interface{}) { if err != nil { fmt.Printf("---> goget.Error occured: %s\n", err) } else { fmt.Printf("--> Sending signal for 'go get'\n") } }, true) buildbin := builders.BinaryBuildLauncher(builders.BinaryBuildConfig{ Path: binDir, Name: binName, RunArgs: config.BinArgs, }) buildbin.React(func(root flux.Reactor, err error, data interface{}) { if err != nil { fmt.Printf("---> buildbin.Error occured: %s\n", err) } else { fmt.Printf("--> Building Binary") } }, true) goget.Bind(buildbin, true) fmt.Printf("--> Initializing File Watcher using package dependecies at %d\n", len(packages)) watcher := fs.WatchSet(fs.WatchSetConfig{ Path: packages, Validator: func(base string, info os.FileInfo) bool { if strings.Contains(base, ".git") { return false } if strings.Contains(base, binDir) || base == binDir { return false } if strings.Contains(base, binfile) || base == binfile { return false } if info != nil && info.IsDir() { return true } if filepath.Ext(base) != ".go" { return false } return true }, }) watcher.React(func(root flux.Reactor, err error, data interface{}) { if err != nil { fmt.Printf("---> watcher.Error occured: %s\n", err) } else { if ev, ok := data.(fsnotify.Event); ok { fmt.Printf("--> File as changed: %+s\n", ev.String()) } } }, true) watcher.Bind(buildbin, true) watcher.Bind(goget, true) //run go installer goget.Send(true) fmt.Printf("--> Initializing Interrupt Signal Watcher for %s@%s\n", binName, binfile) flux.GoDefer("watchBuildRun:kill", func() { <-c //close our builders watcher.Close() goget.Close() buildbin.Close() }) }) }
// WatchSet unlike Watch is not set for only working with one directory, by providing a WatchSetConfig you can supply multiple directories and files which will be sorted and watch if all paths were found to be invalid then the watcher will be closed and so will the task, an invalid file error will be forwarded down the reactor chain func WatchSet(m WatchSetConfig) flux.Reactor { var running bool mo := flux.Reactive(func(root flux.Reactor, err error, _ interface{}) { if err != nil { root.ReplyError(err) return } if running { return } running = true var dirlistings []*assets.DirListing var files []string var dirsAdded = make(map[string]bool) for _, path := range m.Path { if dirsAdded[path] { continue } stat, err := os.Stat(path) if err != nil { // log.Printf("stat error: %s", err) root.ReplyError(err) continue } if stat.IsDir() { if dir, err := assets.DirListings(path, m.Validator, m.Mux); err == nil { dirsAdded[path] = true dirlistings = append(dirlistings, dir) } else { root.ReplyError(err) } } else { if !dirsAdded[filepath.Dir(path)] { files = append(files, path) } } } if len(dirlistings) <= 0 && len(files) <= 0 { log.Printf("no dirlistings, will close") go root.Close() log.Printf("no dirlistings, will close") return } flux.GoDefer("Watch", func() { defer root.Close() for { wo, err := fsnotify.NewWatcher() if err != nil { root.ReplyError(err) break } // var watched = make(map[string]bool) //reload all concerned directories into watcher for _, dir := range dirlistings { dir.Listings.Wo.RLock() for _, files := range dir.Listings.Tree { // log.Printf("Checking folder: %s", files.Dir) // if !watched[files.AbsDir] { // watched[files.AbsDir] = true wo.Add(files.AbsDir) // } files.Tree.Each(func(mod, real string) { // if watched[real] { // log.Printf("duplicate found %s -> %s -> %s", mod, real, files.AbsDir) // return // } // watched[real] = true rel, _ := filepath.Abs(real) wo.Add(rel) // if err != nil { // rel = real // } // wo.Add(filepath.Join(files.Dir, real)) // wo.Add(filepath.Join(files.AbsDir, real)) }) } dir.Listings.Wo.RUnlock() } //reload all concerned files found in the path for _, file := range files { wo.Add(file) } select { case <-root.CloseNotify(): break case ev, ok := <-wo.Events: if ok { if (&m).Validator != nil { file := filepath.Clean(ev.Name) // log.Printf("checking file: %s", file) if (&m).Validator(file, nil) { // log.Printf("passed file: %s", file) root.Reply(ev) } } else { // log.Printf("backdrop file: %s", ev) root.Reply(ev) } } case erx, ok := <-wo.Errors: if ok { root.ReplyError(erx) } } wo.Close() //reload all concerned directories for _, dir := range dirlistings { dir.Reload() } } }) }) mo.Send(true) return mo }
func addJsClient(pm *PluginManager) { //these are internally used for js building pm.Add("jsClients", func(config *BuildConfig, options Plugins, c chan bool) { for _, pkg := range options.Args { var pg Plugins pg.Config = make(PluginConfig) pg.Tag = "jsClient" pg.Config["package"] = pkg pg.Args = nil pm.Activate(pg, config, c) } }) pm.Add("jsClient", func(config *BuildConfig, options Plugins, c chan bool) { pkg := options.Config["package"] _, jsName := filepath.Split(pkg) pkgs := append([]string{}, pkg) packages, err := assets.GetAllPackageLists(pkgs) if err != nil { panic(err) } dir, err := assets.GetPackageDir(pkg) if err != nil { panic(err) } jsbuild := builders.JSLauncher(builders.JSBuildConfig{ Package: pkg, Folder: dir, FileName: jsName, Tags: options.Args, Verbose: config.Client.UseVerbose, }) jsbuild.React(func(root flux.Reactor, err error, _ interface{}) { if err != nil { fmt.Printf("--> Js.client.Build complete: Dir: %s \n -----> Error: %s \n", dir, err) } }, true) watcher := fs.WatchSet(fs.WatchSetConfig{ Path: packages, Validator: func(base string, info os.FileInfo) bool { if strings.Contains(base, ".git") { return false } if info != nil && info.IsDir() { return true } if filepath.Ext(base) != ".go" { return false } return true }, }) watcher.React(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) { if ev, ok := data.(fsnotify.Event); ok { fmt.Printf("--> Client:File as changed: %+s\n", ev.String()) } }), true) watcher.Bind(jsbuild, true) jsbuild.Send(true) flux.GoDefer("jsClient:kill", func() { <-c watcher.Close() jsbuild.Close() }) }) }
// Watch returns a task handler that watches a path for changes and passes down the file which changed func Watch(m WatchConfig) flux.Reactor { var running bool mo := flux.Reactive(func(root flux.Reactor, err error, _ interface{}) { if err != nil { root.ReplyError(err) return } if running { return } stat, err := os.Stat(m.Path) if err != nil { root.ReplyError(err) go root.Close() return } running = true if !stat.IsDir() { flux.GoDefer("Watch", func() { defer root.Close() for { wo, err := fsnotify.NewWatcher() if err != nil { root.ReplyError(err) break } if err := wo.Add(m.Path); err != nil { wo.Close() break } select { case ev, ok := <-wo.Events: if ok { root.Reply(ev) } case erx, ok := <-wo.Errors: if ok { root.ReplyError(erx) } case <-root.CloseNotify(): wo.Close() break } wo.Close() } }) return } dir, err := assets.DirListings(m.Path, m.Validator, m.Mux) if err != nil { root.ReplyError(err) go root.Close() return } flux.GoDefer("Watch", func() { defer root.Close() for { wo, err := fsnotify.NewWatcher() if err != nil { root.ReplyError(err) break } dir.Listings.Wo.RLock() for _, files := range dir.Listings.Tree { wo.Add(files.AbsDir) files.Tree.Each(func(mod, real string) { rel, _ := filepath.Abs(real) wo.Add(rel) // wo.Add(filepath.Join(files.AbsDir, real)) }) } dir.Listings.Wo.RUnlock() select { case <-root.CloseNotify(): wo.Close() break case ev, ok := <-wo.Events: if ok { file := filepath.Clean(ev.Name) // stat, _ := os.Stat(file) if (&m).Validator != nil { if (&m).Validator(file, nil) { root.Reply(ev) } } else { root.Reply(ev) } } case erx, ok := <-wo.Errors: if ok { root.ReplyError(erx) } } wo.Close() if err = dir.Reload(); err != nil { root.ReplyError(err) } } }) }) mo.Send(true) return mo }
func addGoStaticBundle(pm *PluginManager) { pm.Add("goStatic", func(config *BuildConfig, options Plugins, c chan bool) { /*Expects to receive a plugin config follow this format: you can control all aspects of the assets.BindFS using the following tag: gostatic # add commands to run on file changes args: - touch ./templates/smirf.go config: in: ./markdown out: ./templates package: smirf file: smirf gzipped: true nodecompression: true production: true // generally you want to leave this to the cli to set where the config.path is the path to be watched */ //get the current directory pwd, _ := os.Getwd() //get the dir we should watch inDir := options.Config["in"] outDir := options.Config["out"] packageName := options.Config["package"] fileName := options.Config["file"] ignore := options.Config["ignore"] absDir := filepath.Join(pwd, inDir) absFile := filepath.Join(pwd, outDir, fileName+".go") if inDir == "" || outDir == "" || packageName == "" || fileName == "" { fmt.Println("---> goStatic.error: the following keys(in,out,package,file) must not be empty") return } //set up the boolean values var prod bool var gzip bool var nodcom bool var err error if gz, err := strconv.ParseBool(options.Config["gzipped"]); err == nil { gzip = gz } else { if config.Mode > 0 { gzip = true } } if br, err := strconv.ParseBool(options.Config["nodecompression"]); err == nil { nodcom = br } if pr, err := strconv.ParseBool(options.Config["production"]); err == nil { prod = pr } else { if config.Mode <= 0 { prod = false } else { prod = true } } var ignoreReg *regexp.Regexp if ignore != "" { ignoreReg = regexp.MustCompile(ignore) } gostatic, err := builders.BundleAssets(&assets.BindFSConfig{ InDir: inDir, OutDir: outDir, Package: packageName, File: fileName, Gzipped: gzip, NoDecompression: nodcom, Production: prod, }) if err != nil { fmt.Printf("---> goStatic.error: %s", err) return } gostatic.React(func(root flux.Reactor, err error, data interface{}) { fmt.Printf("--> goStatic.Reacted: State %t Error: (%+s)\n", data, err) }, true) //bundle up the assets for the main time gostatic.Send(true) var command []string if prod { if runtime.GOOS != "windows" { command = append(command, fmt.Sprintf("touch %s", absFile)) } else { command = append(command, fmt.Sprintf("copy /b %s+,,", absFile)) // command = append(command, fmt.Sprintf("powershell (ls %s).LastWriteTime = Get-Date", absFile)) } } //add the args from the options command = append(command, options.Args...) // log.Printf("command %s", command) //adds a CommandLauncher to touch the output file to force a file change notification touchCommand := builders.CommandLauncher(command) gostatic.Bind(touchCommand, true) //create the file watcher watcher := fs.Watch(fs.WatchConfig{ Path: absDir, Validator: func(path string, info os.FileInfo) bool { if ignoreReg != nil && ignoreReg.MatchString(path) { return false } return true }, }) // create the command runner set to run the args watcher.Bind(gostatic, true) watcher.React(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) { if ev, ok := data.(fsnotify.Event); ok { fmt.Printf("--> goStatic:File as changed: %+s\n", ev.String()) } }), true) flux.GoDefer("goStatic:kill", func() { <-c gostatic.Close() }) }) }
func addGoFriday(pm *PluginManager) { pm.Add("goFriday", func(config *BuildConfig, options Plugins, c chan bool) { /*Expects to receive a plugin config follow this format tag: gofriday config: markdown: ./markdown templates: ./templates where the config.path is the path to be watched */ //get the current directory pwd, _ := os.Getwd() //get the dir we should watch markdownDir := options.Config["markdown"] templateDir := options.Config["templates"] //optional args ext := options.Config["ext"] //must be a bool sanitizeString := options.Config["sanitize"] var sanitize bool if svz, err := strconv.ParseBool(sanitizeString); err == nil { sanitize = svz } if markdownDir == "" || templateDir == "" { fmt.Println("---> gofriday.error: expected to find keys (markdown and templates) in config map") return } //get the absolute path absDir := filepath.Join(pwd, markdownDir) tbsDir := filepath.Join(pwd, templateDir) gofriday, err := builders.GoFridayStream(builders.MarkStreamConfig{ InputDir: absDir, SaveDir: tbsDir, Ext: ext, Sanitize: sanitize, }) if err != nil { fmt.Printf("---> gofriday.error: %s", err) return } //create the file watcher watcher := fs.Watch(fs.WatchConfig{ Path: absDir, }) watcher.React(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) { if ev, ok := data.(fsnotify.Event); ok { fmt.Printf("--> goFriday:File as changed: %+s\n", ev.String()) } }), true) // create the command runner set to run the args watcher.Bind(gofriday, true) flux.GoDefer("goFiday:kill", func() { <-c watcher.Close() }) }) }
func addJSWatchBuild(pm *PluginManager) { //these are internally used for js building pm.Add("jsWatchBuild", func(config *BuildConfig, options Plugins, c chan bool) { pwd, _ := os.Getwd() _, binName := filepath.Split(config.Package) binDir := filepath.Join(pwd, config.Bin) binfile := filepath.Join(binDir, binName) pkgs := append([]string{}, config.ClientPackage) packages, err := assets.GetAllPackageLists(pkgs) if err != nil { panic(err) } // packages = append(packages, pwd) fmt.Printf("--> Retrieved js package directories %s \n", config.Package) var clientdir string outputdir := filepath.Join(pwd, config.Client.StaticDir) if config.Client.Dir != "" { clientdir = filepath.Join(pwd, config.Client.Dir) } jsbuild := builders.JSLauncher(builders.JSBuildConfig{ Package: config.ClientPackage, Folder: outputdir, FileName: config.Client.Name, Tags: config.Client.BuildTags, Verbose: config.Client.UseVerbose, PackageDir: clientdir, }) jsbuild.React(func(root flux.Reactor, err error, _ interface{}) { if err != nil { fmt.Printf("--> Js.client.Build complete: Dir: %s \n -----> Error: %s \n", clientdir, err) } }, true) fmt.Printf("--> Initializing File Watcher using js package dependecies at %d\n", len(packages)) watcher := fs.WatchSet(fs.WatchSetConfig{ Path: packages, Validator: func(base string, info os.FileInfo) bool { if strings.Contains(base, ".git") { return false } if strings.Contains(base, binDir) || base == binDir { return false } if strings.Contains(base, binfile) || base == binfile { return false } if info != nil && info.IsDir() { return true } if filepath.Ext(base) != ".go" { return false } // log.Printf("allowed: %s", base) return true }, }) watcher.React(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) { if ev, ok := data.(fsnotify.Event); ok { fmt.Printf("--> Client:File as changed: %+s\n", ev.String()) } }), true) watcher.Bind(jsbuild, true) jsbuild.Send(true) flux.GoDefer("jsWatchBuild:kill", func() { <-c //close our builders watcher.Close() jsbuild.Close() }) }) }
//PlugPlay just runs a basic test case func PlugPlay() { mc := somtp.MetaConfig{Hostname: "localhost"} // so, err := somtp.NewSMTP("0.0.0.0:25", ":3040", nil) so, err := somtp.NewSMTP(mc, "mailtrap.io:2525", ":3040", nil) if err != nil { log.Fatal(err) } flux.GoDefer("SMTPServiceServe", so.Serve) auth := smtp.PlainAuth("", "4200103e6055279ac", "52688238e8872b", "") // Connect to the remote SMTP server. // c, err := smtp.Dial("mail.example.com:25") // c, err := smtp.Dial("localhost:431") c, err := smtp.Dial(":3040") if err != nil { log.Fatal("Connecting to SMTP:", err) } if err := c.Auth(auth); err != nil { log.Fatal("Auth:Error ", err) } // Set the sender and recipient first if err := c.Mail("*****@*****.**"); err != nil { log.Fatal("Setting sender", err) } if err := c.Rcpt("*****@*****.**"); err != nil { log.Fatal("Setting receiver", err) } // Send the email body. wc, err := c.Data() if err != nil { log.Fatal("Sending data error: ", err) } log.Printf("Writing data to io.Writer") _, err = fmt.Fprintf(wc, "This is the email body") log.Printf("Writen data to io.Writer") if err != nil { log.Fatal("Writing body", err) } log.Printf("Closing data to io.Writer") err = wc.Close() log.Printf("Closed data to io.Writer") if err != nil { log.Fatal("Closing body", err) } // Send the QUIT command and close the connection. log.Printf("Killing data to io.Writer") err = c.Quit() log.Printf("Killed data to io.Writer") if err != nil { log.Fatal(err) } }
//SSHStream returns a new SSH stream handler func SSHStream(he ErrorHandler) *SSH { return &SSH{ NewProxyStream(func(c *ConnInsight, se NotifierError) { src := c.Src() dest := c.Dest() kill := make(chan struct{}) scon, ok := src.(*SSHServerConn) if !ok { flux.GoDefer("ErrSubmit", func() { se <- ErrBadConn }) return } ccon, ok := dest.(*SSHClientConn) if !ok { flux.GoDefer("ErrSubmit", func() { se <- ErrBadConn }) return } // c.open.Emit(true) c.closed.Listen(func(_ interface{}) { close(kill) }) con, req := scon.Channels, scon.Requests cli := ccon.Client flux.GoDefer("ErrSubmit", func() { ssh.DiscardRequests(req) }) flux.GoDefer("ProxySubmit", func() { ploop: for { select { case ch, ok := <-con: if !ok { break ploop } coc, ceq, err := ch.Accept() checkError(err, fmt.Sprintf("Accepting Channel %s", ch.ChannelType())) if err != nil { return } err = proxyChannel(c, coc, ceq, ch, cli, kill) checkError(err, fmt.Sprintf("Creating Proxy Strategy for %s", ch.ChannelType())) if err != nil { return } case <-kill: break ploop } } }) }, he), } }
func proxyChannel(c *ConnInsight, mcha ssh.Channel, mreq <-chan *ssh.Request, master ssh.NewChannel, client *SSHClient, killer <-chan struct{}) error { do := new(sync.Once) cochan, coreq, err := client.OpenChannel(master.ChannelType(), master.ExtraData()) checkError(err, fmt.Sprintf("Creating Client Channel for %s", client.RemoteAddr().String())) if err != nil { return err } stop := make(chan struct{}) endClose := func() { close(stop) } flux.GoDefer("proxyChannelCopy", func() { defer cochan.Close() defer mcha.Close() func() { ploop: for { select { case <-stop: break ploop case <-killer: break ploop case slx, ok := <-coreq: if !ok { return } Reply(slx, mcha, c) switch slx.Type { case "exit-status": break ploop } case mlx, ok := <-mreq: if !ok { return } Reply(mlx, cochan, c) switch mlx.Type { case "exit-status": break ploop } } } }() }) mastercloser := io.ReadCloser(mcha) slavecloser := io.ReadCloser(cochan) wrapmaster := io.MultiWriter(mcha, c.Out()) wrapsl := io.MultiWriter(cochan, c.In()) flux.GoDefer("CopyToSlave", func() { defer do.Do(endClose) io.Copy(wrapsl, mastercloser) }) flux.GoDefer("CopyToMaster", func() { defer do.Do(endClose) io.Copy(wrapmaster, slavecloser) }) flux.GoDefer("CopyCloser", func() { defer c.Close() <-stop mx := mastercloser.Close() checkError(mx, "Master Writer Closer") sx := slavecloser.Close() checkError(sx, "Slave Writer Closer") ex := client.Close() checkError(ex, "Client Writer Closer") }) return nil }
//handleOperations manages the operations and behaviours of the connserver func (c *ConnServe) handleOperations() { var killsig int64 flux.GoDefer(fmt.Sprintf("%s:ConnectionCycleManager", c.ID), func() { flux.Report(nil, fmt.Sprintf("ConnServe:Listener Connection Cycle Management Started for %s", c.ID)) defer flux.Report(nil, fmt.Sprintf("ConnServe:Listener Closed for %s!", c.ID)) func() { log.Info("ConnServer checking HealthCheck Status: %t For: %s", c.checkable, c.ID) connloop: defer func() { defer flux.Report(nil, fmt.Sprintf("ConnServe:Listener Setting Kill Singal for %s!", c.ID)) atomic.StoreInt64(&killsig, 1) }() for { select { case <-c.director.HealthNotify(): log.Info("ConnServer checking HealthCheck Status: %t For: %s", c.checkable, c.ID) if c.checkable { age := c.director.MaxAge() idle := time.Duration(c.idle.Unix()) if idle > age { break connloop } } case <-c.closer: flux.Report(nil, fmt.Sprintf("ConnServe:Listener User Closing Operation for %s", c.ID)) break connloop case <-c.director.CloseNotify(): flux.Report(nil, fmt.Sprintf("ConnServe:Listener Director Closing Operation for %s", c.ID)) break connloop } } }() }) flux.GoDefer(fmt.Sprintf("%s:AcceptCycleHandler", c.ID), func() { flux.Report(nil, fmt.Sprintf("ConnServe:Listener Accept Cycle Started for %s", c.ID)) for { if killsig > 0 { break } defer flux.Report(nil, fmt.Sprintf("ConnServe:Listener Finished Handling Accept for %s", c.ID)) con, err := c.listener.Accept() flux.Report(err, fmt.Sprintf("ConnServe:Listener Operation Processing for %s", c.ID)) if err != nil { flux.GoDefer("ReportError", func() { errs, ok := c.director.Errors() if !ok { errs <- err } }) return } c.idle = time.Now() //send the process in to a goroutine,lets not block flux.GoDefer("Handling net.Conn from ConnServe", func() { c.director.Requests().Emit(con) c.director.Wait() err := c.target(con, c.director) flux.Report(err, fmt.Sprintf("Received On Request for %s by %s", con.RemoteAddr().String(), c.ID)) if err != nil { fmt.Fprint(con, EOFV) flux.Report(con.Close(), fmt.Sprintf("Closing ConnServe Request for %s by %s", con.RemoteAddr().String(), c.ID)) flux.GoDefer("ReportError", func() { errs, ok := c.director.Errors() if !ok { errs <- err } }) } }) } }) }
//ServeHTTPWith provides a different approach,instead of using the base net.Conn ,itself uses the http.Request and http.Response as means of proxying using the director func ServeHTTPWith(t TargetReqResOp, addr string, d Directors, conf *tls.Config, check bool) (*HTTPServe, error) { var hs *http.Server var ls net.Listener var err error handler := http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { defer flux.Report(err, fmt.Sprintf("Request Processor Completed!")) flux.GoDefer("HttpServer", func() { d.Requests().Emit(true) flux.Report(err, fmt.Sprintf("Request Process Begin!")) // d.Wait() err := t(res, req, d) flux.Report(err, fmt.Sprintf("Request Processed Finished!")) if err != nil { res.WriteHeader(404) res.Write([]byte(err.Error())) flux.GoDefer("ReportError", func() { eos, ok := d.Errors() if ok { eos <- err } }) } }) }) if conf != nil { hs, ls, err = CreateTLS(addr, conf, handler) } else { hs, ls, err = CreateHTTP(addr, handler) } flux.Report(err, fmt.Sprintf("HttpServe Listener Processor!")) if err != nil { return nil, err } hps := &HTTPServe{ listener: ls, server: hs, director: d, idle: time.Now(), closer: make(Notifier), } go func() { defer ls.Close() flux.Report(nil, fmt.Sprintf("HttpServer HealthCheck Status %t", check)) nloop: for { select { case <-d.HealthNotify(): flux.Report(nil, fmt.Sprintf("HttpServer checking HealthCheck status %t", check)) if check { age := hps.director.MaxAge() idle := time.Duration(hps.idle.Unix()) if idle > age { break nloop } } case <-hps.closer: break nloop } } }() return hps, nil }