func (c *Cmd) Start() error { if c.started { return errors.New("exec: already started") } c.started = true if c.cluster == nil { var err error c.cluster, err = cluster.NewClient() if err != nil { return err } c.closeCluster = true } hosts, err := c.cluster.ListHosts() if err != nil { return err } if c.HostID == "" { // TODO: check if this is actually random for c.HostID = range hosts { break } } if c.JobID == "" { c.JobID = cluster.RandomJobID("") } job := &host.Job{ ID: c.JobID, Config: &docker.Config{ Image: c.Image, Cmd: c.Cmd, Tty: c.TTY, Env: formatEnv(c.Env), }, Attributes: c.Attrs, } if c.Stdout != nil || c.stdoutPipe != nil { job.Config.AttachStdout = true } if c.Stderr != nil || c.stderrPipe != nil { job.Config.AttachStderr = true } if c.Stdin != nil || c.stdinPipe != nil { job.Config.AttachStdin = true job.Config.OpenStdin = true job.Config.StdinOnce = true } c.host, err = c.cluster.DialHost(c.HostID) if err != nil { return err } // subscribe to host events ch := make(chan *host.Event) stream := c.host.StreamEvents(job.ID, ch) go func() { for event := range ch { if event.Event == "stop" || event.Event == "error" { close(c.done) return } } c.streamErr = stream.Err() close(c.done) // TODO: handle disconnections }() var rwc cluster.ReadWriteCloser var attachWait func() error if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil || c.stdoutPipe != nil || c.stderrPipe != nil || c.stdinPipe != nil { req := &host.AttachReq{ JobID: job.ID, Height: c.TermHeight, Width: c.TermWidth, Flags: host.AttachFlagStream, } if job.Config.AttachStdout { req.Flags |= host.AttachFlagStdout } if job.Config.AttachStderr { req.Flags |= host.AttachFlagStderr } if job.Config.AttachStdin { req.Flags |= host.AttachFlagStdin } rwc, attachWait, err = c.host.Attach(req, true) if err != nil { c.close() return err } } goroutines := make([]func() error, 0, 4) c.attachConn = rwc if attachWait != nil { goroutines = append(goroutines, attachWait) } if c.stdinPipe != nil { c.stdinPipe.set(writeCloseCloser{rwc}) } else if c.Stdin != nil { goroutines = append(goroutines, func() error { _, err := io.Copy(rwc, c.Stdin) rwc.CloseWrite() return err }) } if !c.TTY { if c.stdoutPipe != nil || c.stderrPipe != nil { stdout, stderr := demultiplex.Streams(rwc) if c.stdoutPipe != nil { c.stdoutPipe.set(stdout) } else if c.Stdout != nil { goroutines = append(goroutines, cpFunc(c.Stdout, stdout)) } if c.stderrPipe != nil { c.stderrPipe.set(stderr) } else if c.Stderr != nil { goroutines = append(goroutines, cpFunc(c.Stderr, stderr)) } } else if c.Stdout != nil || c.Stderr != nil { goroutines = append(goroutines, func() error { return demultiplex.Copy(c.Stdout, c.Stderr, rwc) }) } } else if c.stdoutPipe != nil { c.stdoutPipe.set(rwc) } else if c.Stdout != nil { goroutines = append(goroutines, cpFunc(c.Stdout, rwc)) } c.errCh = make(chan error, len(goroutines)) for _, fn := range goroutines { go func(fn func() error) { c.errCh <- fn() }(fn) } _, err = c.cluster.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{c.HostID: {job}}}) return err }
func runJob(app *ct.App, newJob ct.NewJob, releases *ReleaseRepo, artifacts *ArtifactRepo, cl clusterClient, req *http.Request, w http.ResponseWriter, r ResponseHelper) { data, err := releases.Get(newJob.ReleaseID) if err != nil { r.Error(err) return } release := data.(*ct.Release) data, err = artifacts.Get(release.ArtifactID) if err != nil { r.Error(err) return } artifact := data.(*ct.Artifact) image, err := utils.DockerImage(artifact.URI) if err != nil { log.Println("error parsing artifact uri", err) r.Error(ct.ValidationError{ Field: "artifact.uri", Message: "is invalid", }) return } attach := strings.Contains(req.Header.Get("Accept"), "application/vnd.flynn.attach") job := &host.Job{ ID: cluster.RandomJobID(""), Attributes: map[string]string{ "flynn-controller.app": app.ID, "flynn-controller.release": release.ID, }, Config: &docker.Config{ Cmd: newJob.Cmd, Env: utils.FormatEnv(release.Env, newJob.Env), Image: image, AttachStdout: true, AttachStderr: true, }, } if newJob.TTY { job.Config.Tty = true } if attach { job.Config.AttachStdin = true job.Config.StdinOnce = true job.Config.OpenStdin = true } hosts, err := cl.ListHosts() if err != nil { r.Error(err) return } // pick a random host var hostID string for hostID = range hosts { break } if hostID == "" { r.Error(errors.New("no hosts found")) return } var attachConn cluster.ReadWriteCloser var attachWait func() error if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: newJob.Lines, Width: newJob.Columns, } client, err := cl.DialHost(hostID) if err != nil { r.Error(fmt.Errorf("lorne connect failed: %s", err.Error())) return } defer client.Close() attachConn, attachWait, err = client.Attach(attachReq, true) if err != nil { r.Error(fmt.Errorf("attach failed: %s", err.Error())) return } defer attachConn.Close() } _, err = cl.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{hostID: {job}}}) if err != nil { r.Error(fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { if err := attachWait(); err != nil { r.Error(fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Content-Type", "application/vnd.flynn.attach") w.Header().Set("Content-Length", "0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to cluster.ReadWriteCloser, from io.Reader) { io.Copy(to, from) to.CloseWrite() done <- struct{}{} } go cp(conn.(cluster.ReadWriteCloser), attachConn) go cp(attachConn, conn) <-done <-done return } else { r.JSON(200, &ct.Job{ ID: hostID + "-" + job.ID, ReleaseID: newJob.ReleaseID, Cmd: newJob.Cmd, }) } }