func runLog(cmd *Command, args []string, client *controller.Client) error { if len(args) != 1 { cmd.printUsage(true) } rc, err := client.GetJobLog(mustApp(), args[0]) if err != nil { return err } var stderr io.Writer if logSplitOut { stderr = os.Stderr } demultiplex.Copy(os.Stdout, stderr, rc) rc.Close() return nil }
func jobLog(req *http.Request, app *ct.App, params martini.Params, cluster cluster.Host, w http.ResponseWriter, r ResponseHelper) { attachReq := &host.AttachReq{ JobID: params["jobs_id"], Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagLogs, } tail := req.FormValue("tail") != "" if tail { attachReq.Flags |= host.AttachFlagStream } stream, _, err := cluster.Attach(attachReq, false) if err != nil { // TODO: handle AttachWouldWait r.Error(err) return } defer stream.Close() sse := strings.Contains(req.Header.Get("Accept"), "text/event-stream") if sse { w.Header().Set("Content-Type", "text/event-stream; charset=utf-8") } else { w.Header().Set("Content-Type", "application/vnd.flynn.attach") } w.WriteHeader(200) // Send headers right away if tailing if wf, ok := w.(http.Flusher); ok && tail { wf.Flush() } // TODO: use http.CloseNotifier to clean up when client disconnects if sse { ssew := NewSSELogWriter(w) demultiplex.Copy(flushWriter{ssew.Stream("stdout"), tail}, flushWriter{ssew.Stream("stderr"), tail}, stream) // TODO: include exit code here if tailing flushWriter{w, tail}.Write([]byte("event: eof\ndata: {}\n\n")) } else { io.Copy(flushWriter{w, tail}, stream) } }
func (d *DockerBackend) Attach(req *AttachRequest) error { outR, outW := io.Pipe() opts := docker.AttachToContainerOptions{ Container: req.Job.ContainerID, InputStream: req.Stdin, OutputStream: outW, Logs: req.Logs, Stream: req.Stream, Success: req.Attached, Stdout: req.Stdout != nil, Stderr: req.Stderr != nil, Stdin: req.Stdin != nil, } if req.Job.Job.Config.TTY { go func() { io.Copy(req.Stdout, outR) req.Stdout.Close() }() } else if req.Stdout != nil || req.Stderr != nil { go func() { demultiplex.Copy(req.Stdout, req.Stderr, outR) req.Stdout.Close() req.Stderr.Close() }() } if req.Job.Job.Config.TTY && opts.Stdin { resize := func() { d.docker.ResizeContainerTTY(req.Job.ContainerID, int(req.Height), int(req.Width)) } if req.Job.Status == host.StatusRunning { resize() } else { var once sync.Once go func() { ch := d.state.AddListener(req.Job.Job.ID) defer d.state.RemoveListener(req.Job.Job.ID, ch) go func() { // There is a race that can result in the listener being // added after the container has started, so check the // status *after* subscribing. // This can deadlock if we try to get a state lock while an // event is being sent on the listen channel, so we do it // in the goroutine and wrap in a sync.Once. j := d.state.GetJob(req.Job.Job.ID) if j.Status == host.StatusRunning { once.Do(resize) } }() for event := range ch { if event.Event == "start" { once.Do(resize) return } if event.Event == "stop" { return } } }() } } err := d.docker.AttachToContainer(opts) outW.Close() if err != nil { return err } if req.Job.Job.Config.TTY || req.Stream { exited := make(chan struct{}) ch := d.state.AddListener(req.Job.Job.ID) go func() { defer d.state.RemoveListener(req.Job.Job.ID, ch) for e := range ch { if e.Event == "stop" { close(exited) return } } }() job := d.state.GetJob(req.Job.Job.ID) if job.Status != host.StatusDone && job.Status != host.StatusCrashed { <-exited job = d.state.GetJob(req.Job.Job.ID) } return ExitError(job.ExitStatus) } return nil }
func (c *Cmd) Start() error { if c.started { return errors.New("exec: already started") } c.started = true if c.cluster == nil { var err error c.cluster, err = cluster.NewClient() if err != nil { return err } c.closeCluster = true } hosts, err := c.cluster.ListHosts() if err != nil { return err } if c.HostID == "" { // TODO: check if this is actually random for c.HostID = range hosts { break } } if c.JobID == "" { c.JobID = cluster.RandomJobID("") } job := &host.Job{ ID: c.JobID, Config: &docker.Config{ Image: c.Image, Cmd: c.Cmd, Tty: c.TTY, Env: formatEnv(c.Env), }, Attributes: c.Attrs, } if c.Stdout != nil || c.stdoutPipe != nil { job.Config.AttachStdout = true } if c.Stderr != nil || c.stderrPipe != nil { job.Config.AttachStderr = true } if c.Stdin != nil || c.stdinPipe != nil { job.Config.AttachStdin = true job.Config.OpenStdin = true job.Config.StdinOnce = true } c.host, err = c.cluster.DialHost(c.HostID) if err != nil { return err } // subscribe to host events ch := make(chan *host.Event) stream := c.host.StreamEvents(job.ID, ch) go func() { for event := range ch { if event.Event == "stop" || event.Event == "error" { close(c.done) return } } c.streamErr = stream.Err() close(c.done) // TODO: handle disconnections }() var rwc cluster.ReadWriteCloser var attachWait func() error if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil || c.stdoutPipe != nil || c.stderrPipe != nil || c.stdinPipe != nil { req := &host.AttachReq{ JobID: job.ID, Height: c.TermHeight, Width: c.TermWidth, Flags: host.AttachFlagStream, } if job.Config.AttachStdout { req.Flags |= host.AttachFlagStdout } if job.Config.AttachStderr { req.Flags |= host.AttachFlagStderr } if job.Config.AttachStdin { req.Flags |= host.AttachFlagStdin } rwc, attachWait, err = c.host.Attach(req, true) if err != nil { c.close() return err } } goroutines := make([]func() error, 0, 4) c.attachConn = rwc if attachWait != nil { goroutines = append(goroutines, attachWait) } if c.stdinPipe != nil { c.stdinPipe.set(writeCloseCloser{rwc}) } else if c.Stdin != nil { goroutines = append(goroutines, func() error { _, err := io.Copy(rwc, c.Stdin) rwc.CloseWrite() return err }) } if !c.TTY { if c.stdoutPipe != nil || c.stderrPipe != nil { stdout, stderr := demultiplex.Streams(rwc) if c.stdoutPipe != nil { c.stdoutPipe.set(stdout) } else if c.Stdout != nil { goroutines = append(goroutines, cpFunc(c.Stdout, stdout)) } if c.stderrPipe != nil { c.stderrPipe.set(stderr) } else if c.Stderr != nil { goroutines = append(goroutines, cpFunc(c.Stderr, stderr)) } } else if c.Stdout != nil || c.Stderr != nil { goroutines = append(goroutines, func() error { return demultiplex.Copy(c.Stdout, c.Stderr, rwc) }) } } else if c.stdoutPipe != nil { c.stdoutPipe.set(rwc) } else if c.Stdout != nil { goroutines = append(goroutines, cpFunc(c.Stdout, rwc)) } c.errCh = make(chan error, len(goroutines)) for _, fn := range goroutines { go func(fn func() error) { c.errCh <- fn() }(fn) } _, err = c.cluster.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{c.HostID: {job}}}) return err }
func runRun(cmd *Command, args []string, client *controller.Client) error { if len(args) == 0 { cmd.printUsage(true) } if runRelease == "" { release, err := client.GetAppRelease(mustApp()) if err == controller.ErrNotFound { return errors.New("No app release, specify a release with -release") } if err != nil { return err } runRelease = release.ID } req := &ct.NewJob{ Cmd: args, TTY: term.IsTerminal(os.Stdin) && term.IsTerminal(os.Stdout) && !runDetached, ReleaseID: runRelease, } if req.TTY { cols, err := term.Cols() if err != nil { return err } lines, err := term.Lines() if err != nil { return err } req.Columns = cols req.Lines = lines req.Env = map[string]string{ "COLUMNS": strconv.Itoa(cols), "LINES": strconv.Itoa(lines), "TERM": os.Getenv("TERM"), } } if runDetached { job, err := client.RunJobDetached(mustApp(), req) if err != nil { return err } log.Println(job.ID) return nil } rwc, err := client.RunJobAttached(mustApp(), req) if err != nil { return err } defer rwc.Close() if req.TTY { if err := term.MakeRaw(os.Stdin); err != nil { return err } defer term.Restore(os.Stdin) } go func() { io.Copy(rwc, os.Stdin) rwc.CloseWrite() }() if req.TTY { _, err = io.Copy(os.Stdout, rwc) } else { err = demultiplex.Copy(os.Stdout, os.Stderr, rwc) } // TODO: get exit code and use it return err }