func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) data, err = c.artifactRepo.Get(release.ArtifactID) if err != nil { respondWithError(w, err) return } artifact := data.(*ct.Artifact) attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.ListHosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } hostID := schedutil.PickHost(hosts).ID id := cluster.RandomJobID("") app := c.getApp(ctx) env := make(map[string]string, len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = hostID + "-" + id if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Artifact: host.Artifact{ Type: artifact.Type, URI: artifact.URI, }, Config: host.ContainerConfig{ Cmd: newJob.Cmd, Env: env, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, } if len(newJob.Entrypoint) > 0 { job.Config.Entrypoint = newJob.Entrypoint } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } client, err := c.clusterClient.DialHost(hostID) if err != nil { respondWithError(w, fmt.Errorf("host connect failed: %s", err.Error())) return } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } _, err = c.clusterClient.AddJobs(map[string][]*host.Job{hostID: {job}}) if err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) <-done <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: hostID + "-" + job.ID, ReleaseID: newJob.ReleaseID, Cmd: newJob.Cmd, }) } }
func runJob(app *ct.App, newJob ct.NewJob, releases *ReleaseRepo, artifacts *ArtifactRepo, cl clusterClient, req *http.Request, w http.ResponseWriter, r ResponseHelper) { data, err := releases.Get(newJob.ReleaseID) if err != nil { r.Error(err) return } release := data.(*ct.Release) data, err = artifacts.Get(release.ArtifactID) if err != nil { r.Error(err) return } artifact := data.(*ct.Artifact) attach := strings.Contains(req.Header.Get("Accept"), "application/vnd.flynn.attach") env := make(map[string]string, len(release.Env)+len(newJob.Env)) for k, v := range release.Env { env[k] = v } for k, v := range newJob.Env { env[k] = v } job := &host.Job{ ID: cluster.RandomJobID(""), Metadata: map[string]string{ "flynn-controller.app": app.ID, "flynn-controller.app_name": app.Name, "flynn-controller.release": release.ID, }, Artifact: host.Artifact{ Type: artifact.Type, URI: artifact.URI, }, Config: host.ContainerConfig{ Cmd: newJob.Cmd, Env: env, TTY: newJob.TTY, Stdin: attach, }, } if len(newJob.Entrypoint) > 0 { job.Config.Entrypoint = newJob.Entrypoint } hosts, err := cl.ListHosts() if err != nil { r.Error(err) return } // pick a random host var hostID string for hostID = range hosts { break } if hostID == "" { r.Error(errors.New("no hosts found")) return } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } client, err := cl.DialHost(hostID) if err != nil { r.Error(fmt.Errorf("host connect failed: %s", err.Error())) return } defer client.Close() attachClient, err = client.Attach(attachReq, true) if err != nil { r.Error(fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } _, err = cl.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{hostID: {job}}}) if err != nil { r.Error(fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { if err := attachClient.Wait(); err != nil { r.Error(fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Content-Type", "application/vnd.flynn.attach") w.Header().Set("Content-Length", "0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) <-done <-done return } else { r.JSON(200, &ct.Job{ ID: hostID + "-" + job.ID, ReleaseID: newJob.ReleaseID, Cmd: newJob.Cmd, }) } }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) if release.ImageArtifactID() == "" { httphelper.ValidationError(w, "release.ImageArtifact", "must be set") return } attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.Hosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } client := hosts[random.Math.Intn(len(hosts))] uuid := random.UUID() hostID := client.ID() id := cluster.GenerateJobID(hostID, uuid) app := c.getApp(ctx) env := make(map[string]string, len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = id if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Config: host.ContainerConfig{ Cmd: newJob.Cmd, Env: env, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, Resources: newJob.Resources, } resource.SetDefaults(&job.Resources) if len(newJob.Entrypoint) > 0 { job.Config.Entrypoint = newJob.Entrypoint } if len(release.ArtifactIDs) > 0 { artifacts, err := c.artifactRepo.ListIDs(release.ArtifactIDs...) if err != nil { respondWithError(w, err) return } job.ImageArtifact = artifacts[release.ImageArtifactID()].HostArtifact() job.FileArtifacts = make([]*host.Artifact, len(release.FileArtifactIDs())) for i, id := range release.FileArtifactIDs() { job.FileArtifacts[i] = artifacts[id].HostArtifact() } } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } if err := client.AddJob(job); err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) <-done <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: job.ID, UUID: uuid, HostID: hostID, ReleaseID: newJob.ReleaseID, Cmd: newJob.Cmd, }) } }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) var artifactIDs []string if len(newJob.ArtifactIDs) > 0 { artifactIDs = newJob.ArtifactIDs } else if len(release.ArtifactIDs) > 0 { artifactIDs = release.ArtifactIDs } else { httphelper.ValidationError(w, "release.ArtifactIDs", "cannot be empty") return } artifacts := make([]*ct.Artifact, len(artifactIDs)) artifactList, err := c.artifactRepo.ListIDs(artifactIDs...) if err != nil { respondWithError(w, err) return } for i, id := range artifactIDs { artifacts[i] = artifactList[id] } var entrypoint ct.ImageEntrypoint if e := utils.GetEntrypoint(artifacts, ""); e != nil { entrypoint = *e } attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.Hosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } client := hosts[random.Math.Intn(len(hosts))] uuid := random.UUID() hostID := client.ID() id := cluster.GenerateJobID(hostID, uuid) app := c.getApp(ctx) env := make(map[string]string, len(entrypoint.Env)+len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = id for k, v := range entrypoint.Env { env[k] = v } if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Config: host.ContainerConfig{ Args: entrypoint.Args, Env: env, WorkingDir: entrypoint.WorkingDir, Uid: entrypoint.Uid, Gid: entrypoint.Gid, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, Resources: newJob.Resources, Partition: string(newJob.Partition), } resource.SetDefaults(&job.Resources) if len(newJob.Args) > 0 { job.Config.Args = newJob.Args } utils.SetupMountspecs(job, artifacts) // provision data volume if required if newJob.Data { vol := &ct.VolumeReq{Path: "/data", DeleteOnStop: true} if _, err := utils.ProvisionVolume(vol, client, job); err != nil { respondWithError(w, err) return } } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } if err := client.AddJob(job); err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { // TODO(titanous): This Wait could block indefinitely if something goes // wrong, a context should be threaded in that cancels if the client // goes away. if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) // Wait for one of the connections to be closed or interrupted. EOF is // framed inside the attach protocol, so a read/write error indicates // that we're done and should clean up. <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: job.ID, UUID: uuid, HostID: hostID, ReleaseID: newJob.ReleaseID, Args: newJob.Args, }) } }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) if release.ImageArtifactID() == "" { httphelper.ValidationError(w, "release.ImageArtifact", "must be set") return } attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.Hosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } client := hosts[random.Math.Intn(len(hosts))] uuid := random.UUID() hostID := client.ID() id := cluster.GenerateJobID(hostID, uuid) app := c.getApp(ctx) env := make(map[string]string, len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = id if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Config: host.ContainerConfig{ Env: env, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, Resources: newJob.Resources, } resource.SetDefaults(&job.Resources) if len(newJob.Args) > 0 { job.Config.Args = newJob.Args } if len(release.ArtifactIDs) > 0 { artifacts, err := c.artifactRepo.ListIDs(release.ArtifactIDs...) if err != nil { respondWithError(w, err) return } job.ImageArtifact = artifacts[release.ImageArtifactID()].HostArtifact() job.FileArtifacts = make([]*host.Artifact, len(release.FileArtifactIDs())) for i, id := range release.FileArtifactIDs() { job.FileArtifacts[i] = artifacts[id].HostArtifact() } } // ensure slug apps use /runner/init if release.IsGitDeploy() && (len(job.Config.Args) == 0 || job.Config.Args[0] != "/runner/init") { job.Config.Args = append([]string{"/runner/init"}, job.Config.Args...) } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } if err := client.AddJob(job); err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { // TODO(titanous): This Wait could block indefinitely if something goes // wrong, a context should be threaded in that cancels if the client // goes away. if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) // Wait for one of the connections to be closed or interrupted. EOF is // framed inside the attach protocol, so a read/write error indicates // that we're done and should clean up. <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: job.ID, UUID: uuid, HostID: hostID, ReleaseID: newJob.ReleaseID, Args: newJob.Args, }) } }