func TestSendConn(t *testing.T) { a, b, err := USocketPair() if err != nil { t.Fatal(err) } defer a.Close() defer b.Close() go func() { conn, err := SendConn(a, data.Empty().Set("type", "connection").Bytes()) if err != nil { t.Fatal(err) } if err := conn.Send(data.Empty().Set("foo", "bar").Bytes(), nil); err != nil { t.Fatal(err) } conn.CloseWrite() }() payload, conn, err := ReceiveConn(b) if err != nil { t.Fatal(err) } if val := data.Message(string(payload)).Get("type"); val == nil || val[0] != "connection" { t.Fatalf("%v != %v\n", val, "connection") } msg, _, err := conn.Receive() if err != nil { t.Fatal(err) } if val := data.Message(string(msg)).Get("foo"); val == nil || val[0] != "bar" { t.Fatalf("%v != %v\n", val, "bar") } }
func CmdRender(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { if len(args) != 2 { fmt.Fprintf(stderr, "Usage: %s FORMAT\n", args[0]) out.Send(data.Empty().Set("status", "1").Bytes(), nil) return } txt := args[1] if !strings.HasSuffix(txt, "\n") { txt += "\n" } t := template.Must(template.New("render").Parse(txt)) for { payload, attachment, err := in.Receive() if err != nil { return } msg, err := data.Decode(string(payload)) if err != nil { fmt.Fprintf(stderr, "decode error: %v\n") } if err := t.Execute(stdout, msg); err != nil { fmt.Fprintf(stderr, "rendering error: %v\n", err) out.Send(data.Empty().Set("status", "1").Bytes(), nil) return } if err := out.Send(payload, attachment); err != nil { return } } }
func CmdListen(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { if len(args) != 2 { out.Send(data.Empty().Set("status", "1").Set("message", "wrong number of arguments").Bytes(), nil) return } u, err := url.Parse(args[1]) if err != nil { out.Send(data.Empty().Set("status", "1").Set("message", err.Error()).Bytes(), nil) return } l, err := net.Listen(u.Scheme, u.Host) if err != nil { out.Send(data.Empty().Set("status", "1").Set("message", err.Error()).Bytes(), nil) return } for { conn, err := l.Accept() if err != nil { out.Send(data.Empty().Set("status", "1").Set("message", err.Error()).Bytes(), nil) return } f, err := connToFile(conn) if err != nil { conn.Close() continue } out.Send(data.Empty().Set("type", "socket").Set("remoteaddr", conn.RemoteAddr().String()).Bytes(), f) } }
func CmdConnect(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { if len(args) != 2 { out.Send(data.Empty().Set("status", "1").Set("message", "wrong number of arguments").Bytes(), nil) return } u, err := url.Parse(args[1]) if err != nil { out.Send(data.Empty().Set("status", "1").Set("message", err.Error()).Bytes(), nil) return } var tasks sync.WaitGroup for { _, attachment, err := in.Receive() if err != nil { break } if attachment == nil { continue } Logf("connecting to %s/%s\n", u.Scheme, u.Host) conn, err := net.Dial(u.Scheme, u.Host) if err != nil { out.Send(data.Empty().Set("cmd", "msg", "connect error: "+err.Error()).Bytes(), nil) return } out.Send(data.Empty().Set("cmd", "msg", "connection established").Bytes(), nil) tasks.Add(1) go func(attachment *os.File, conn net.Conn) { defer tasks.Done() // even when successful, conn.File() returns a duplicate, // so we must close the original var iotasks sync.WaitGroup iotasks.Add(2) go func(attachment *os.File, conn net.Conn) { defer iotasks.Done() io.Copy(attachment, conn) }(attachment, conn) go func(attachment *os.File, conn net.Conn) { defer iotasks.Done() io.Copy(conn, attachment) }(attachment, conn) iotasks.Wait() conn.Close() attachment.Close() }(attachment, conn) } tasks.Wait() }
func CmdBeamreceive(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { if len(args) != 2 { if err := out.Send(data.Empty().Set("status", "1").Set("message", "wrong number of arguments").Bytes(), nil); err != nil { Fatal(err) } return } var connector func(string) (chan net.Conn, error) connector = listener connections, err := connector(args[1]) if err != nil { out.Send(data.Empty().Set("status", "1").Set("message", err.Error()).Bytes(), nil) return } // Copy in to conn ReceiveFromConn(connections, out) }
func (rcv *Receiver) Run() error { r := beam.NewRouter(nil) r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error { // Use the attachment as a beam return channel peer, err := beam.FileConn(f) if err != nil { f.Close() return err } f.Close() defer peer.Close() msg := data.Message(p) cmd := msg.Get("cmd") job := rcv.Engine.Job(cmd[0], cmd[1:]...) // Decode env env, err := data.Decode(msg.GetOne("env")) if err != nil { return fmt.Errorf("error decoding 'env': %v", err) } job.Env().InitMultiMap(env) stdout, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) if err != nil { return err } job.Stdout.Add(stdout) stderr, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) if err != nil { return err } job.Stderr.Add(stderr) stdin, err := beam.SendWPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) if err != nil { return err } job.Stdin.Add(stdin) // ignore error because we pass the raw status job.Run() err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil) if err != nil { return err } return nil }) _, err := beam.Copy(r, rcv.peer) return err }
func (s *Sender) Handle(job *Job) Status { cmd := append([]string{job.Name}, job.Args...) env := data.Encode(job.Env().MultiMap()) msg := data.Empty().Set("cmd", cmd...).Set("env", env) peer, err := beam.SendConn(s, msg.Bytes()) if err != nil { return job.Errorf("beamsend: %v", err) } defer peer.Close() var tasks sync.WaitGroup defer tasks.Wait() r := beam.NewRouter(nil) r.NewRoute().KeyStartsWith("cmd", "log", "stdout").HasAttachment().Handler(func(p []byte, stdout *os.File) error { tasks.Add(1) go func() { io.Copy(job.Stdout, stdout) stdout.Close() tasks.Done() }() return nil }) r.NewRoute().KeyStartsWith("cmd", "log", "stderr").HasAttachment().Handler(func(p []byte, stderr *os.File) error { tasks.Add(1) go func() { io.Copy(job.Stderr, stderr) stderr.Close() tasks.Done() }() return nil }) r.NewRoute().KeyStartsWith("cmd", "log", "stdin").HasAttachment().Handler(func(p []byte, stdin *os.File) error { go func() { io.Copy(stdin, job.Stdin) stdin.Close() }() return nil }) var status int r.NewRoute().KeyStartsWith("cmd", "status").Handler(func(p []byte, f *os.File) error { cmd := data.Message(p).Get("cmd") if len(cmd) != 2 { return fmt.Errorf("usage: %s <0-127>", cmd[0]) } s, err := strconv.ParseUint(cmd[1], 10, 8) if err != nil { return fmt.Errorf("usage: %s <0-127>", cmd[0]) } status = int(s) return nil }) if _, err := beam.Copy(r, peer); err != nil { return job.Errorf("%v", err) } return Status(status) }
func executeRootScript(script []*dockerscript.Command) error { if len(rootPlugins) > 0 { // If there are root plugins, wrap the script inside them var ( rootCmd *dockerscript.Command lastCmd *dockerscript.Command ) for _, plugin := range rootPlugins { pluginCmd := &dockerscript.Command{ Args: []string{plugin}, } if rootCmd == nil { rootCmd = pluginCmd } else { lastCmd.Children = []*dockerscript.Command{pluginCmd} } lastCmd = pluginCmd } lastCmd.Children = script script = []*dockerscript.Command{rootCmd} } handlers, err := Handlers(introspect) if err != nil { return err } defer handlers.Close() var tasks sync.WaitGroup defer func() { Debugf("Waiting for introspection...\n") tasks.Wait() Debugf("DONE Waiting for introspection\n") }() if introspect != nil { tasks.Add(1) go func() { Debugf("starting introspection\n") defer Debugf("done with introspection\n") defer tasks.Done() introspect.Send(data.Empty().Set("cmd", "log", "stdout").Set("message", "introspection worked!").Bytes(), nil) Debugf("XXX starting reading introspection messages\n") r := beam.NewRouter(handlers) r.NewRoute().All().Handler(func(p []byte, a *os.File) error { Logf("[INTROSPECTION] %s\n", beam.MsgDesc(p, a)) return handlers.Send(p, a) }) n, err := beam.Copy(r, introspect) Debugf("XXX done reading %d introspection messages: %v\n", n, err) }() } if err := executeScript(handlers, script); err != nil { return err } return nil }
func CmdOpenfile(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { for _, name := range args { f, err := os.Open(name) if err != nil { continue } if err := out.Send(data.Empty().Set("path", name).Set("type", "file").Bytes(), f); err != nil { f.Close() } } }
func CmdExec(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { cmd := exec.Command(args[1], args[2:]...) cmd.Stdout = stdout cmd.Stderr = stderr //cmd.Stdin = os.Stdin local, remote, err := beam.SocketPair() if err != nil { fmt.Fprintf(stderr, "%v\n", err) return } child, err := beam.FileConn(local) if err != nil { local.Close() remote.Close() fmt.Fprintf(stderr, "%v\n", err) return } local.Close() cmd.ExtraFiles = append(cmd.ExtraFiles, remote) var tasks sync.WaitGroup tasks.Add(1) go func() { defer Debugf("done copying to child\n") defer tasks.Done() defer child.CloseWrite() beam.Copy(child, in) }() tasks.Add(1) go func() { defer Debugf("done copying from child %d\n") defer tasks.Done() r := beam.NewRouter(out) r.NewRoute().All().Handler(func(p []byte, a *os.File) error { return out.Send(data.Message(p).Set("pid", fmt.Sprintf("%d", cmd.Process.Pid)).Bytes(), a) }) beam.Copy(r, child) }() execErr := cmd.Run() // We can close both ends of the socket without worrying about data stuck in the buffer, // because unix socket writes are fully synchronous. child.Close() tasks.Wait() var status string if execErr != nil { status = execErr.Error() } else { status = "ok" } out.Send(data.Empty().Set("status", status).Set("cmd", args...).Bytes(), nil) }
func (rcv *Receiver) Run() error { r := beam.NewRouter(nil) r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error { // Use the attachment as a beam return channel peer, err := beam.FileConn(f) if err != nil { f.Close() return err } cmd := data.Message(p).Get("cmd") job := rcv.Engine.Job(cmd[0], cmd[1:]...) stdout, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) if err != nil { return err } job.Stdout.Add(stdout) stderr, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) if err != nil { return err } job.Stderr.Add(stderr) stdin, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) if err != nil { return err } job.Stdin.Add(stdin) // ignore error because we pass the raw status job.Run() err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil) if err != nil { return err } return nil }) _, err := beam.Copy(r, rcv.peer) return err }
// 1) Find a handler for the command (if no handler, fail) // 2) Attach new in & out pair to the handler // 3) [in the background] Copy handler output to our own output // 4) [in the background] Run the handler // 5) Recursively executeScript() all children commands and wait for them to complete // 6) Wait for handler to return and (shortly afterwards) output copy to complete // 7) Profit func executeCommand(out beam.Sender, cmd *dockerscript.Command) error { if flX { fmt.Printf("+ %v\n", strings.Replace(strings.TrimRight(cmd.String(), "\n"), "\n", "\n+ ", -1)) } Debugf("executeCommand(%s)\n", strings.Join(cmd.Args, " ")) defer Debugf("executeCommand(%s) DONE\n", strings.Join(cmd.Args, " ")) if len(cmd.Args) == 0 { return fmt.Errorf("empty command") } Debugf("[executeCommand] sending job '%s'\n", strings.Join(cmd.Args, " ")) job, err := beam.SendConn(out, data.Empty().Set("cmd", cmd.Args...).Set("type", "job").Bytes()) if err != nil { return fmt.Errorf("%v\n", err) } var tasks sync.WaitGroup tasks.Add(1) Debugf("[executeCommand] spawning background copy of the output of '%s'\n", strings.Join(cmd.Args, " ")) go func() { if out != nil { Debugf("[executeCommand] background copy of the output of '%s'\n", strings.Join(cmd.Args, " ")) n, err := beam.Copy(out, job) if err != nil { Fatalf("[executeCommand] [%s] error during background copy: %v\n", strings.Join(cmd.Args, " "), err) } Debugf("[executeCommand] background copy done of the output of '%s': copied %d messages\n", strings.Join(cmd.Args, " "), n) } tasks.Done() }() // depth-first execution of children commands // executeScript() blocks until all commands are completed Debugf("[executeCommand] recursively running children of '%s'\n", strings.Join(cmd.Args, " ")) executeScript(job, cmd.Children) Debugf("[executeCommand] DONE recursively running children of '%s'\n", strings.Join(cmd.Args, " ")) job.CloseWrite() Debugf("[executeCommand] closing the input of '%s' (all children are completed)\n", strings.Join(cmd.Args, " ")) Debugf("[executeCommand] waiting for background copy of '%s' to complete...\n", strings.Join(cmd.Args, " ")) tasks.Wait() Debugf("[executeCommand] background copy of '%s' complete! This means the job completed.\n", strings.Join(cmd.Args, " ")) return nil }
func CmdPrompt(args []string, stdout, stderr io.Writer, in beam.Receiver, out beam.Sender) { if len(args) < 2 { fmt.Fprintf(stderr, "usage: %s PROMPT...\n", args[0]) return } if !term.IsTerminal(0) { fmt.Fprintf(stderr, "can't prompt: no tty available...\n") return } fmt.Printf("%s: ", strings.Join(args[1:], " ")) oldState, _ := term.SaveState(0) term.DisableEcho(0, oldState) line, _, err := bufio.NewReader(os.Stdin).ReadLine() if err != nil { fmt.Fprintln(stderr, err.Error()) return } val := string(line) fmt.Printf("\n") term.RestoreTerminal(0, oldState) out.Send(data.Empty().Set("fromcmd", args...).Set("value", val).Bytes(), nil) }
func Handlers(sink beam.Sender) (*beam.UnixConn, error) { var tasks sync.WaitGroup pub, priv, err := beam.USocketPair() if err != nil { return nil, err } go func() { defer func() { Debugf("[handlers] closewrite() on endpoint\n") // FIXME: this is not yet necessary but will be once // there is synchronization over standard beam messages priv.CloseWrite() Debugf("[handlers] done closewrite() on endpoint\n") }() r := beam.NewRouter(sink) r.NewRoute().HasAttachment().KeyIncludes("type", "job").Handler(func(payload []byte, attachment *os.File) error { conn, err := beam.FileConn(attachment) if err != nil { attachment.Close() return err } // attachment.Close() tasks.Add(1) go func() { defer tasks.Done() defer func() { Debugf("[handlers] '%s' closewrite\n", payload) conn.CloseWrite() Debugf("[handlers] '%s' done closewrite\n", payload) }() cmd := data.Message(payload).Get("cmd") Debugf("[handlers] received %s\n", strings.Join(cmd, " ")) if len(cmd) == 0 { return } handler := GetHandler(cmd[0]) if handler == nil { return } stdout, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stdout").Set("fromcmd", cmd...).Bytes()) if err != nil { return } defer stdout.Close() stderr, err := beam.SendRPipe(conn, data.Empty().Set("cmd", "log", "stderr").Set("fromcmd", cmd...).Bytes()) if err != nil { return } defer stderr.Close() Debugf("[handlers] calling %s\n", strings.Join(cmd, " ")) handler(cmd, stdout, stderr, beam.Receiver(conn), beam.Sender(conn)) Debugf("[handlers] returned: %s\n", strings.Join(cmd, " ")) }() return nil }) beam.Copy(r, priv) Debugf("[handlers] waiting for all tasks\n") tasks.Wait() Debugf("[handlers] all tasks returned\n") }() return pub, nil }