func main() { eng := engine.New() c, err := net.Dial("unix", "beam.sock") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return } defer c.Close() f, err := c.(*net.UnixConn).File() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return } child, err := beam.FileConn(f) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return } defer child.Close() sender := engine.NewSender(child) sender.Install(eng) cmd := eng.Job(os.Args[1], os.Args[2:]...) cmd.Stdout.Add(os.Stdout) cmd.Stderr.Add(os.Stderr) if err := cmd.Run(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
// Spawn starts a new Engine in a child process and returns // a proxy Engine through which it can be controlled. // // The commands available on the child engine are determined // by an earlier call to Init. It is important that Init be // called at the very beginning of the current program - this // allows it to be called as a re-execution hook in the child // process. // // Long story short, if you want to expose `myservice` in a child // process, do this: // // func main() { // spawn.Init(myservice) // [..] // child, err := spawn.Spawn() // [..] // child.Job("dosomething").Run() // } func Spawn() (*engine.Engine, error) { if !initCalled { return nil, fmt.Errorf("spawn.Init must be called at the top of the main() function") } cmd := exec.Command(utils.SelfPath()) cmd.Env = append(cmd.Env, "ENGINESPAWN=1") local, remote, err := beam.SocketPair() if err != nil { return nil, err } child, err := beam.FileConn(local) if err != nil { local.Close() remote.Close() return nil, err } local.Close() cmd.ExtraFiles = append(cmd.ExtraFiles, remote) // FIXME: the beam/engine glue has no way to inform the caller // of the child's termination. The next call will simply return // an error. if err := cmd.Start(); err != nil { child.Close() return nil, err } eng := engine.New() if err := engine.NewSender(child).Install(eng); err != nil { child.Close() return nil, err } return eng, nil }
// Init checks if the current process has been created by Spawn. // // If no, it returns nil and the original program can continue // unmodified. // // If no, it hijacks the process to run as a child worker controlled // by its parent over a beam connection, with f exposed as a remote // service. In this case Init never returns. // // The hijacking process takes place as follows: // - Open file descriptor 3 as a beam endpoint. If this fails, // terminate the current process. // - Start a new engine. // - Call f.Install on the engine. Any handlers registered // will be available for remote invocation by the parent. // - Listen for beam messages from the parent and pass them to // the handlers. // - When the beam endpoint is closed by the parent, terminate // the current process. // // NOTE: Init must be called at the beginning of the same program // calling Spawn. This is because Spawn approximates a "fork" by // re-executing the current binary - where it expects spawn.Init // to intercept the control flow and execute the worker code. func Init(f engine.Installer) error { initCalled = true if os.Getenv("ENGINESPAWN") != "1" { return nil } fmt.Printf("[%d child]\n", os.Getpid()) // Hijack the process childErr := func() error { fd3 := os.NewFile(3, "beam-introspect") introsp, err := beam.FileConn(fd3) if err != nil { return fmt.Errorf("beam introspection error: %v", err) } fd3.Close() defer introsp.Close() eng := engine.NewReceiver(introsp) if err := f.Install(eng.Engine); err != nil { return err } if err := eng.Run(); err != nil { return err } return nil }() if childErr != nil { os.Exit(1) } os.Exit(0) return nil // Never reached }
func CmdExec(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { cmd := exec.Command(args[1], args[2:]...) cmd.Stdout = stdout cmd.Stderr = stderr //cmd.Stdin = os.Stdin local, remote, err := beam.SocketPair() if err != nil { fmt.Fprintf(stderr, "%v\n", err) return } child, err := beam.FileConn(local) if err != nil { local.Close() remote.Close() fmt.Fprintf(stderr, "%v\n", err) return } local.Close() cmd.ExtraFiles = append(cmd.ExtraFiles, remote) var tasks sync.WaitGroup tasks.Add(1) go func() { defer Debugf("done copying to child\n") defer tasks.Done() defer child.CloseWrite() beam.Copy(child, in) }() tasks.Add(1) go func() { defer Debugf("done copying from child %d\n") defer tasks.Done() r := beam.NewRouter(out) r.NewRoute().All().Handler(func(p []byte, a *os.File) error { return out.Send(data.Message(p).Set("pid", fmt.Sprintf("%d", cmd.Process.Pid)).Bytes(), a) }) beam.Copy(r, child) }() execErr := cmd.Run() // We can close both ends of the socket without worrying about data stuck in the buffer, // because unix socket writes are fully synchronous. child.Close() tasks.Wait() var status string if execErr != nil { status = execErr.Error() } else { status = "ok" } out.Send(data.Empty().Set("status", status).Set("cmd", args...).Bytes(), nil) }
func (rcv *Receiver) Run() error { r := beam.NewRouter(nil) r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error { // Use the attachment as a beam return channel peer, err := beam.FileConn(f) if err != nil { f.Close() return err } f.Close() defer peer.Close() msg := data.Message(p) cmd := msg.Get("cmd") job := rcv.Engine.Job(cmd[0], cmd[1:]...) // Decode env env, err := data.Decode(msg.GetOne("env")) if err != nil { return fmt.Errorf("error decoding 'env': %v", err) } job.Env().InitMultiMap(env) stdout, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) if err != nil { return err } job.Stdout.Add(stdout) stderr, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) if err != nil { return err } job.Stderr.Add(stderr) stdin, err := beam.SendWPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) if err != nil { return err } job.Stdin.Add(stdin) // ignore error because we pass the raw status job.Run() err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil) if err != nil { return err } return nil }) _, err := beam.Copy(r, rcv.peer) return err }
func (rcv *Receiver) Run() error { r := beam.NewRouter(nil) r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error { // Use the attachment as a beam return channel peer, err := beam.FileConn(f) if err != nil { f.Close() return err } cmd := data.Message(p).Get("cmd") job := rcv.Engine.Job(cmd[0], cmd[1:]...) stdout, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) if err != nil { return err } job.Stdout.Add(stdout) stderr, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) if err != nil { return err } job.Stderr.Add(stderr) stdin, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) if err != nil { return err } job.Stdin.Add(stdin) // ignore error because we pass the raw status job.Run() err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil) if err != nil { return err } return nil }) _, err := beam.Copy(r, rcv.peer) return err }
func main() { fd3 := os.NewFile(3, "beam-introspect") if introsp, err := beam.FileConn(fd3); err == nil { introspect = introsp Logf("introspection enabled\n") } else { Logf("introspection disabled\n") } fd3.Close() flag.BoolVar(&flX, "x", false, "print commands as they are being executed") flag.Parse() if flag.NArg() == 0 { if term.IsTerminal(0) { // No arguments, stdin is terminal --> interactive mode input := bufio.NewScanner(os.Stdin) for { fmt.Printf("[%d] beamsh> ", os.Getpid()) if !input.Scan() { break } line := input.Text() if len(line) != 0 { cmd, err := dockerscript.Parse(strings.NewReader(line)) if err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) continue } if err := executeRootScript(cmd); err != nil { Fatal(err) } } if err := input.Err(); err == io.EOF { break } else if err != nil { Fatal(err) } } } else { // No arguments, stdin not terminal --> batch mode script, err := dockerscript.Parse(os.Stdin) if err != nil { Fatal("parse error: %v\n", err) } if err := executeRootScript(script); err != nil { Fatal(err) } } } else { // 1+ arguments: parse them as script files for _, scriptpath := range flag.Args() { f, err := os.Open(scriptpath) if err != nil { Fatal(err) } script, err := dockerscript.Parse(f) if err != nil { Fatal("parse error: %v\n", err) } if err := executeRootScript(script); err != nil { Fatal(err) } } } }
func Handlers(sink beam.Sender) (*beam.UnixConn, error) { var tasks sync.WaitGroup pub, priv, err := beam.USocketPair() if err != nil { return nil, err } go func() { defer func() { Debugf("[handlers] closewrite() on endpoint\n") // FIXME: this is not yet necessary but will be once // there is synchronization over standard beam messages priv.CloseWrite() Debugf("[handlers] done closewrite() on endpoint\n") }() r := beam.NewRouter(sink) r.NewRoute().HasAttachment().KeyIncludes("type", "job").Handler(func(payload []byte, attachment *os.File) error { conn, err := beam.FileConn(attachment) if err != nil { attachment.Close() return err } // attachment.Close() tasks.Add(1) go func() { defer tasks.Done() defer func() { Debugf("[handlers] '%s' closewrite\n", payload) conn.CloseWrite() Debugf("[handlers] '%s' done closewrite\n", payload) }() cmd := data.Message(payload).Get("cmd") Debugf("[handlers] received %s\n", strings.Join(cmd, " ")) if len(cmd) == 0 { return } handler := GetHandler(cmd[0]) if handler == nil { return } stdout, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stdout").Set("fromcmd", cmd...).Bytes()) if err != nil { return } defer stdout.Close() stderr, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stderr").Set("fromcmd", cmd...).Bytes()) if err != nil { return } defer stderr.Close() Debugf("[handlers] calling %s\n", strings.Join(cmd, " ")) handler(cmd, stdout, stderr, beam.Receiver(conn), beam.Sender(conn)) Debugf("[handlers] returned: %s\n", strings.Join(cmd, " ")) }() return nil }) beam.Copy(r, priv) Debugf("[handlers] waiting for all tasks\n") tasks.Wait() Debugf("[handlers] all tasks returned\n") }() return pub, nil }