func (r *ReleaseRepo) Add(data interface{}) error { release := data.(*ct.Release) releaseCopy := *release releaseCopy.ID = "" releaseCopy.ArtifactID = "" releaseCopy.CreatedAt = nil for typ, proc := range releaseCopy.Processes { resource.SetDefaults(&proc.Resources) releaseCopy.Processes[typ] = proc } data, err := json.Marshal(&releaseCopy) if err != nil { return err } if release.ID == "" { release.ID = random.UUID() } var artifactID *string if release.ArtifactID != "" { artifactID = &release.ArtifactID } err = r.db.QueryRow("INSERT INTO releases (release_id, artifact_id, data) VALUES ($1, $2, $3) RETURNING created_at", release.ID, artifactID, data).Scan(&release.CreatedAt) release.ID = postgres.CleanUUID(release.ID) if release.ArtifactID != "" { release.ArtifactID = postgres.CleanUUID(release.ArtifactID) } return err }
func testResources() resource.Resources { r := resource.Resources{ resource.TypeMemory: resource.Spec{Limit: typeconv.Int64Ptr(resourceMem)}, resource.TypeMaxFD: resource.Spec{Limit: typeconv.Int64Ptr(resourceMaxFD)}, } resource.SetDefaults(&r) return r }
func (r *ReleaseRepo) Add(data interface{}) error { release := data.(*ct.Release) releaseCopy := *release releaseCopy.ID = "" releaseCopy.ArtifactID = "" releaseCopy.CreatedAt = nil for typ, proc := range releaseCopy.Processes { resource.SetDefaults(&proc.Resources) releaseCopy.Processes[typ] = proc } data, err := json.Marshal(&releaseCopy) if err != nil { return err } if release.ID == "" { release.ID = random.UUID() } var artifactID *string if release.ArtifactID != "" { artifactID = &release.ArtifactID } tx, err := r.db.Begin() if err != nil { return err } err = tx.QueryRow("INSERT INTO releases (release_id, artifact_id, data) VALUES ($1, $2, $3) RETURNING created_at", release.ID, artifactID, data).Scan(&release.CreatedAt) if err != nil { tx.Rollback() return err } release.ID = postgres.CleanUUID(release.ID) if release.ArtifactID != "" { release.ArtifactID = postgres.CleanUUID(release.ArtifactID) } if err := createEvent(tx.Exec, &ct.Event{ ObjectID: release.ID, ObjectType: ct.EventTypeRelease, }, release); err != nil { tx.Rollback() return err } return tx.Commit() }
func (s *CLISuite) TestRelease(t *c.C) { releaseJSON := []byte(`{ "env": {"GLOBAL": "FOO"}, "processes": { "echoer": { "cmd": ["/bin/echoer"], "env": {"ECHOER_ONLY": "BAR"} }, "env": { "cmd": ["sh", "-c", "env; while true; do sleep 60; done"], "env": {"ENV_ONLY": "BAZ"} } } }`) release := &ct.Release{} t.Assert(json.Unmarshal(releaseJSON, &release), c.IsNil) for typ, proc := range release.Processes { resource.SetDefaults(&proc.Resources) release.Processes[typ] = proc } file, err := ioutil.TempFile("", "") t.Assert(err, c.IsNil) file.Write(releaseJSON) file.Close() app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("release", "add", "-f", file.Name(), imageURIs["test-apps"]), Succeeds) r, err := s.controller.GetAppRelease(app.name) t.Assert(err, c.IsNil) t.Assert(r.Env, c.DeepEquals, release.Env) t.Assert(r.Processes, c.DeepEquals, release.Processes) scaleCmd := app.flynn("scale", "--no-wait", "env=1", "foo=1") t.Assert(scaleCmd, c.Not(Succeeds)) t.Assert(scaleCmd, OutputContains, "ERROR: unknown process types: \"foo\"") // create a job watcher for the new release watcher, err := s.controllerClient(t).WatchJobEvents(app.name, r.ID) t.Assert(err, c.IsNil) defer watcher.Close() scaleCmd = app.flynn("scale", "--no-wait", "env=1") t.Assert(watcher.WaitFor(ct.JobEvents{"env": {ct.JobStateUp: 1}}, scaleTimeout, nil), c.IsNil) envLog := app.flynn("log") t.Assert(envLog, Succeeds) t.Assert(envLog, SuccessfulOutputContains, "GLOBAL=FOO") t.Assert(envLog, SuccessfulOutputContains, "ENV_ONLY=BAZ") t.Assert(envLog, c.Not(SuccessfulOutputContains), "ECHOER_ONLY=BAR") }
func (r *ReleaseRepo) Add(data interface{}) error { release := data.(*ct.Release) for typ, proc := range release.Processes { resource.SetDefaults(&proc.Resources) release.Processes[typ] = proc } if release.ID == "" { release.ID = random.UUID() } if release.LegacyArtifactID != "" && len(release.ArtifactIDs) == 0 { release.ArtifactIDs = []string{release.LegacyArtifactID} } tx, err := r.db.Begin() if err != nil { return err } err = tx.QueryRow("release_insert", release.ID, release.Env, release.Processes, release.Meta).Scan(&release.CreatedAt) if err != nil { tx.Rollback() return err } for i, artifactID := range release.ArtifactIDs { if err := tx.Exec("release_artifacts_insert", release.ID, artifactID, i); err != nil { tx.Rollback() if e, ok := err.(pgx.PgError); ok && e.Code == postgres.CheckViolation { return ct.ValidationError{ Field: "artifacts", Message: e.Message, } } return err } } if err := createEvent(tx.Exec, &ct.Event{ ObjectID: release.ID, ObjectType: ct.EventTypeRelease, }, release); err != nil { tx.Rollback() return err } return tx.Commit() }
func (r *ReleaseRepo) Add(data interface{}) error { release := data.(*ct.Release) releaseCopy := *release releaseCopy.ID = "" releaseCopy.ArtifactID = "" releaseCopy.CreatedAt = nil releaseCopy.Meta = nil for typ, proc := range releaseCopy.Processes { resource.SetDefaults(&proc.Resources) releaseCopy.Processes[typ] = proc } if release.ID == "" { release.ID = random.UUID() } var artifactID *string if release.ArtifactID != "" { artifactID = &release.ArtifactID } tx, err := r.db.Begin() if err != nil { return err } err = tx.QueryRow("release_insert", release.ID, artifactID, release.Env, release.Processes, release.Meta).Scan(&release.CreatedAt) if err != nil { tx.Rollback() return err } if err := createEvent(tx.Exec, &ct.Event{ ObjectID: release.ID, ObjectType: ct.EventTypeRelease, }, release); err != nil { tx.Rollback() return err } return tx.Commit() }
func (r *ReleaseRepo) Add(data interface{}) error { release := data.(*ct.Release) for typ, proc := range release.Processes { // handle deprecated Entrypoint and Cmd if len(proc.DeprecatedEntrypoint) > 0 { proc.Args = proc.DeprecatedEntrypoint } if len(proc.DeprecatedCmd) > 0 { proc.Args = append(proc.Args, proc.DeprecatedCmd...) } resource.SetDefaults(&proc.Resources) release.Processes[typ] = proc } if release.ID == "" { release.ID = random.UUID() } if release.LegacyArtifactID != "" && len(release.ArtifactIDs) == 0 { release.ArtifactIDs = []string{release.LegacyArtifactID} } if value, ok := release.Env[""]; ok { return ct.ValidationError{ Field: "env", Message: fmt.Sprintf("you can't create an env var with an empty key (tried to set \"\"=%q)", value), } } tx, err := r.db.Begin() if err != nil { return err } err = tx.QueryRow("release_insert", release.ID, release.Env, release.Processes, release.Meta).Scan(&release.CreatedAt) if err != nil { tx.Rollback() return err } for i, artifactID := range release.ArtifactIDs { if err := tx.Exec("release_artifacts_insert", release.ID, artifactID, i); err != nil { tx.Rollback() if e, ok := err.(pgx.PgError); ok && e.Code == postgres.CheckViolation { return ct.ValidationError{ Field: "artifacts", Message: e.Message, } } return err } } if err := createEvent(tx.Exec, &ct.Event{ ObjectID: release.ID, ObjectType: ct.EventTypeRelease, }, release); err != nil { tx.Rollback() return err } return tx.Commit() }
func (s *CLISuite) TestRelease(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() release := &ct.Release{ ArtifactIDs: []string{s.createArtifact(t, "test-apps").ID}, Env: map[string]string{"GLOBAL": "FOO"}, Processes: map[string]ct.ProcessType{ "echoer": { Args: []string{"/bin/echoer"}, Env: map[string]string{"ECHOER_ONLY": "BAR"}, }, "env": { Args: []string{"sh", "-c", "env; while true; do sleep 60; done"}, Env: map[string]string{"ENV_ONLY": "BAZ"}, }, }, } client := s.controllerClient(t) t.Assert(client.CreateRelease(release), c.IsNil) t.Assert(client.SetAppRelease(app.id, release.ID), c.IsNil) updateFile := filepath.Join(t.MkDir(), "updates.json") updateJSON := []byte(`{ "processes": { "echoer": { "env": {"ECHOER_ONLY": "BAT"} }, "env": { "env": {"ENV_UPDATE": "QUUX"} } } }`) t.Assert(ioutil.WriteFile(updateFile, updateJSON, 0644), c.IsNil) t.Assert(app.flynn("release", "update", updateFile), Succeeds) resultJSON := []byte(`{ "env": {"GLOBAL": "FOO"}, "processes": { "echoer": { "args": ["/bin/echoer"], "env": { "ECHOER_ONLY": "BAT" } }, "env": { "args": ["sh", "-c", "env; while true; do sleep 60; done"], "env": { "ENV_ONLY": "BAZ", "ENV_UPDATE": "QUUX" } } } }`) result := &ct.Release{} t.Assert(json.Unmarshal(resultJSON, &result), c.IsNil) for typ, proc := range result.Processes { resource.SetDefaults(&proc.Resources) result.Processes[typ] = proc } release, err := s.controller.GetAppRelease(app.name) t.Assert(err, c.IsNil) t.Assert(release.Env, c.DeepEquals, result.Env) t.Assert(release.Processes, c.DeepEquals, result.Processes) scaleCmd := app.flynn("scale", "--no-wait", "env=1", "foo=1") t.Assert(scaleCmd, c.Not(Succeeds)) t.Assert(scaleCmd, OutputContains, "ERROR: unknown process types: \"foo\"") // create a job watcher for the new release watcher, err := client.WatchJobEvents(app.name, release.ID) t.Assert(err, c.IsNil) defer watcher.Close() scaleCmd = app.flynn("scale", "--no-wait", "env=1") t.Assert(watcher.WaitFor(ct.JobEvents{"env": {ct.JobStateUp: 1}}, scaleTimeout, nil), c.IsNil) envLog := app.flynn("log") t.Assert(envLog, Succeeds) t.Assert(envLog, SuccessfulOutputContains, "GLOBAL=FOO") t.Assert(envLog, SuccessfulOutputContains, "ENV_ONLY=BAZ") t.Assert(envLog, SuccessfulOutputContains, "ENV_UPDATE=QUUX") t.Assert(envLog, c.Not(SuccessfulOutputContains), "ECHOER_ONLY=BAR") t.Assert(envLog, c.Not(SuccessfulOutputContains), "ECHOER_UPDATE=BAT") }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) data, err = c.artifactRepo.Get(release.ArtifactID) if err != nil { respondWithError(w, err) return } artifact := data.(*ct.Artifact) attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.Hosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } client := hosts[random.Math.Intn(len(hosts))] id := cluster.GenerateJobID(client.ID(), "") app := c.getApp(ctx) env := make(map[string]string, len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = id if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Artifact: host.Artifact{ Type: artifact.Type, URI: artifact.URI, }, Config: host.ContainerConfig{ Cmd: newJob.Cmd, Env: env, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, Resources: newJob.Resources, } resource.SetDefaults(&job.Resources) if len(newJob.Entrypoint) > 0 { job.Config.Entrypoint = newJob.Entrypoint } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } if err := client.AddJob(job); err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) <-done <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: job.ID, ReleaseID: newJob.ReleaseID, Cmd: newJob.Cmd, }) } }
func (a *RunAppAction) Run(s *State) error { if a.AppStep != "" { data, err := getAppStep(s, a.AppStep) if err != nil { return err } a.App = data.App procs := a.Processes a.ExpandedFormation = data.ExpandedFormation a.Processes = procs } as := &RunAppState{ ExpandedFormation: a.ExpandedFormation, Resources: make([]*resource.Resource, 0, len(a.Resources)), Providers: make([]*ct.Provider, 0, len(a.Resources)), } s.StepData[a.ID] = as if a.App == nil { a.App = &ct.App{} } if a.App.ID == "" { a.App.ID = random.UUID() } if a.ImageArtifact == nil { return errors.New("bootstrap: artifact must be set") } if a.ImageArtifact.ID == "" { a.ImageArtifact.ID = random.UUID() } if a.Release == nil { return errors.New("bootstrap: release must be set") } if a.Release.ID == "" { a.Release.ID = random.UUID() } a.Release.ArtifactIDs = []string{a.ImageArtifact.ID} if a.Release.Env == nil { a.Release.Env = make(map[string]string) } interpolateRelease(s, a.Release) for _, p := range a.Resources { u, err := url.Parse(p.URL) if err != nil { return err } lookupDiscoverdURLHost(s, u, time.Second) res, err := resource.Provision(u.String(), nil) if err != nil { return err } as.Providers = append(as.Providers, p) as.Resources = append(as.Resources, res) for k, v := range res.Env { a.Release.Env[k] = v } } for typ, count := range a.Processes { if s.Singleton && count > 1 { a.Processes[typ] = 1 count = 1 } hosts := s.ShuffledHosts() if a.ExpandedFormation.Release.Processes[typ].Omni { count = len(hosts) } for i := 0; i < count; i++ { host := hosts[i%len(hosts)] config := utils.JobConfig(a.ExpandedFormation, typ, host.ID(), "") hostresource.SetDefaults(&config.Resources) if a.ExpandedFormation.Release.Processes[typ].Data { if err := utils.ProvisionVolume(host, config); err != nil { return err } } if err := startJob(s, host, config); err != nil { return err } } } return nil }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) var artifactIDs []string if len(newJob.ArtifactIDs) > 0 { artifactIDs = newJob.ArtifactIDs } else if len(release.ArtifactIDs) > 0 { artifactIDs = release.ArtifactIDs } else { httphelper.ValidationError(w, "release.ArtifactIDs", "cannot be empty") return } artifacts := make([]*ct.Artifact, len(artifactIDs)) artifactList, err := c.artifactRepo.ListIDs(artifactIDs...) if err != nil { respondWithError(w, err) return } for i, id := range artifactIDs { artifacts[i] = artifactList[id] } var entrypoint ct.ImageEntrypoint if e := utils.GetEntrypoint(artifacts, ""); e != nil { entrypoint = *e } attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.Hosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } client := hosts[random.Math.Intn(len(hosts))] uuid := random.UUID() hostID := client.ID() id := cluster.GenerateJobID(hostID, uuid) app := c.getApp(ctx) env := make(map[string]string, len(entrypoint.Env)+len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = id for k, v := range entrypoint.Env { env[k] = v } if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Config: host.ContainerConfig{ Args: entrypoint.Args, Env: env, WorkingDir: entrypoint.WorkingDir, Uid: entrypoint.Uid, Gid: entrypoint.Gid, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, Resources: newJob.Resources, Partition: string(newJob.Partition), } resource.SetDefaults(&job.Resources) if len(newJob.Args) > 0 { job.Config.Args = newJob.Args } utils.SetupMountspecs(job, artifacts) // provision data volume if required if newJob.Data { vol := &ct.VolumeReq{Path: "/data", DeleteOnStop: true} if _, err := utils.ProvisionVolume(vol, client, job); err != nil { respondWithError(w, err) return } } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } if err := client.AddJob(job); err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { // TODO(titanous): This Wait could block indefinitely if something goes // wrong, a context should be threaded in that cancels if the client // goes away. if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) // Wait for one of the connections to be closed or interrupted. EOF is // framed inside the attach protocol, so a read/write error indicates // that we're done and should clean up. <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: job.ID, UUID: uuid, HostID: hostID, ReleaseID: newJob.ReleaseID, Args: newJob.Args, }) } }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) if release.ImageArtifactID() == "" { httphelper.ValidationError(w, "release.ImageArtifact", "must be set") return } attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.Hosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } client := hosts[random.Math.Intn(len(hosts))] uuid := random.UUID() hostID := client.ID() id := cluster.GenerateJobID(hostID, uuid) app := c.getApp(ctx) env := make(map[string]string, len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = id if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Config: host.ContainerConfig{ Env: env, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, Resources: newJob.Resources, } resource.SetDefaults(&job.Resources) if len(newJob.Args) > 0 { job.Config.Args = newJob.Args } if len(release.ArtifactIDs) > 0 { artifacts, err := c.artifactRepo.ListIDs(release.ArtifactIDs...) if err != nil { respondWithError(w, err) return } job.ImageArtifact = artifacts[release.ImageArtifactID()].HostArtifact() job.FileArtifacts = make([]*host.Artifact, len(release.FileArtifactIDs())) for i, id := range release.FileArtifactIDs() { job.FileArtifacts[i] = artifacts[id].HostArtifact() } } // ensure slug apps use /runner/init if release.IsGitDeploy() && (len(job.Config.Args) == 0 || job.Config.Args[0] != "/runner/init") { job.Config.Args = append([]string{"/runner/init"}, job.Config.Args...) } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } if err := client.AddJob(job); err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { // TODO(titanous): This Wait could block indefinitely if something goes // wrong, a context should be threaded in that cancels if the client // goes away. if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) // Wait for one of the connections to be closed or interrupted. EOF is // framed inside the attach protocol, so a read/write error indicates // that we're done and should clean up. <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: job.ID, UUID: uuid, HostID: hostID, ReleaseID: newJob.ReleaseID, Args: newJob.Args, }) } }
func run() error { client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY")) if err != nil { return fmt.Errorf("Unable to connect to controller: %s", err) } usage := ` Usage: flynn-receiver <app> <rev> [-e <var>=<val>]... [-m <key>=<val>]... Options: -e,--env <var>=<val> -m,--meta <key>=<val> `[1:] args, _ := docopt.Parse(usage, nil, true, version.String(), false) appName := args.String["<app>"] env, err := parsePairs(args, "--env") if err != nil { return err } meta, err := parsePairs(args, "--meta") if err != nil { return err } slugBuilder, err := client.GetArtifact(os.Getenv("SLUGBUILDER_IMAGE_ID")) if err != nil { return fmt.Errorf("Error getting slugbuilder image: %s", err) } slugRunnerID := os.Getenv("SLUGRUNNER_IMAGE_ID") if _, err := client.GetArtifact(slugRunnerID); err != nil { return fmt.Errorf("Error getting slugrunner image: %s", err) } app, err := client.GetApp(appName) if err == controller.ErrNotFound { return fmt.Errorf("Unknown app %q", appName) } else if err != nil { return fmt.Errorf("Error retrieving app: %s", err) } prevRelease, err := client.GetAppRelease(app.Name) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { return fmt.Errorf("Error getting current app release: %s", err) } fmt.Printf("-----> Building %s...\n", app.Name) jobEnv := make(map[string]string) jobEnv["BUILD_CACHE_URL"] = fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID) if buildpackURL, ok := env["BUILDPACK_URL"]; ok { jobEnv["BUILDPACK_URL"] = buildpackURL } else if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { jobEnv["BUILDPACK_URL"] = buildpackURL } for _, k := range []string{"SSH_CLIENT_KEY", "SSH_CLIENT_HOSTS"} { if v := os.Getenv(k); v != "" { jobEnv[k] = v } } slugURL := fmt.Sprintf("%s/%s/slug.tgz", blobstoreURL, random.UUID()) job := &host.Job{ Config: host.ContainerConfig{ Args: []string{"/tmp/builder/build.sh", slugURL}, Env: jobEnv, Stdin: true, DisableLog: true, }, Partition: "background", Metadata: map[string]string{ "flynn-controller.app": app.ID, "flynn-controller.app_name": app.Name, "flynn-controller.release": prevRelease.ID, "flynn-controller.type": "slugbuilder", }, } if sb, ok := prevRelease.Processes["slugbuilder"]; ok { job.Resources = sb.Resources } else if rawLimit := os.Getenv("SLUGBUILDER_DEFAULT_MEMORY_LIMIT"); rawLimit != "" { if limit, err := resource.ParseLimit(resource.TypeMemory, rawLimit); err == nil { r := make(resource.Resources) resource.SetDefaults(&r) r[resource.TypeMemory] = resource.Spec{Limit: &limit, Request: &limit} job.Resources = r } } cmd := exec.Job(*slugBuilder.HostArtifact(), job) var output bytes.Buffer cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr if len(prevRelease.Env) > 0 { stdin, err := cmd.StdinPipe() if err != nil { return err } go func() { if err := appendEnvDir(os.Stdin, stdin, prevRelease.Env); err != nil { log.Fatalln("ERROR:", err) } }() } else { cmd.Stdin = os.Stdin } shutdown.BeforeExit(func() { cmd.Kill() }) if err := cmd.Run(); err != nil { return fmt.Errorf("Build failed: %s", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") slugArtifact := &ct.Artifact{ Type: host.ArtifactTypeFile, URI: slugURL, Meta: map[string]string{"blobstore": "true"}, } if err := client.CreateArtifact(slugArtifact); err != nil { return fmt.Errorf("Error creating slug artifact: %s", err) } release := &ct.Release{ ArtifactIDs: []string{slugRunnerID, slugArtifact.ID}, Env: prevRelease.Env, Meta: prevRelease.Meta, } if release.Meta == nil { release.Meta = make(map[string]string, len(meta)) } if release.Env == nil { release.Env = make(map[string]string, len(env)) } for k, v := range env { release.Env[k] = v } for k, v := range meta { release.Meta[k] = v } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Args = []string{"/runner/init", "start", t} if t == "web" || strings.HasSuffix(t, "-web") { proc.Service = app.Name + "-" + t proc.Ports = []ct.Port{{ Port: 8080, Proto: "tcp", Service: &host.Service{ Name: proc.Service, Create: true, Check: &host.HealthCheck{Type: "tcp"}, }, }} } procs[t] = proc } release.Processes = procs if err := client.CreateRelease(release); err != nil { return fmt.Errorf("Error creating release: %s", err) } if err := client.DeployAppRelease(app.Name, release.ID, nil); err != nil { return fmt.Errorf("Error deploying app release: %s", err) } // if the app has a web job and has not been scaled before, create a // web=1 formation and wait for the "APPNAME-web" service to start // (whilst also watching job events so the deploy fails if the job // crashes) if needsDefaultScale(app.ID, prevRelease.ID, procs, client) { fmt.Println("=====> Scaling initial release to web=1") formation := &ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, } jobEvents := make(chan *ct.Job) jobStream, err := client.StreamJobEvents(app.ID, jobEvents) if err != nil { return fmt.Errorf("Error streaming job events: %s", err) } defer jobStream.Close() serviceEvents := make(chan *discoverd.Event) serviceStream, err := discoverd.NewService(app.Name + "-web").Watch(serviceEvents) if err != nil { return fmt.Errorf("Error streaming service events: %s", err) } defer serviceStream.Close() if err := client.PutFormation(formation); err != nil { return fmt.Errorf("Error putting formation: %s", err) } fmt.Println("-----> Waiting for initial web job to start...") err = func() error { for { select { case e, ok := <-serviceEvents: if !ok { return fmt.Errorf("Service stream closed unexpectedly: %s", serviceStream.Err()) } if e.Kind == discoverd.EventKindUp && e.Instance.Meta["FLYNN_RELEASE_ID"] == release.ID { fmt.Println("=====> Initial web job started") return nil } case e, ok := <-jobEvents: if !ok { return fmt.Errorf("Job stream closed unexpectedly: %s", jobStream.Err()) } if e.State == ct.JobStateDown { return errors.New("Initial web job failed to start") } case <-time.After(time.Duration(app.DeployTimeout) * time.Second): return errors.New("Timed out waiting for initial web job to start") } } }() if err != nil { fmt.Println("-----> WARN: scaling initial release down to web=0 due to error") formation.Processes["web"] = 0 if err := client.PutFormation(formation); err != nil { // just print this error and return the original error fmt.Println("-----> WARN: could not scale the initial release down (it may continue to run):", err) } return err } } fmt.Println("=====> Application deployed") return nil }