func randomHost(cc *cluster.Client) (string, error) { hosts, err := cc.ListHosts() if err != nil { return "", err } if len(hosts) == 0 { return "", cluster.ErrNoServers } return schedutil.PickHost(hosts).ID, nil }
func (s *HostSuite) TestSignalJob(t *c.C) { cluster := s.clusterClient(t) // pick a host to run the job on hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) client := schedutil.PickHost(hosts) // start a signal-service job cmd := exec.JobUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), &host.Job{ Config: host.ContainerConfig{ Args: []string{"/bin/signal"}, DisableLog: true, }, }) cmd.HostID = client.ID() var out bytes.Buffer cmd.Stdout = &out t.Assert(cmd.Start(), c.IsNil) _, err = s.discoverdClient(t).Instances("signal-service", 10*time.Second) t.Assert(err, c.IsNil) // send the job a signal t.Assert(client.SignalJob(cmd.Job.ID, int(syscall.SIGTERM)), c.IsNil) // wait for the job to exit done := make(chan error) go func() { done <- cmd.Wait() }() select { case err := <-done: t.Assert(err, c.IsNil) case <-time.After(12 * time.Second): t.Fatal("timed out waiting for job to stop") } // check the output t.Assert(out.String(), c.Equals, "got signal: terminated") }
func (c *Cmd) Start() error { if c.started { return errors.New("exec: already started") } c.done = make(chan struct{}) c.started = true if c.host == nil && c.cluster == nil { var err error c.cluster = cluster.NewClient() if err != nil { return err } c.closeCluster = true } if c.HostID == "" { hosts, err := c.cluster.Hosts() if err != nil { return err } if len(hosts) == 0 { return errors.New("exec: no hosts found") } host := schedutil.PickHost(hosts) c.HostID = host.ID() c.host = host } // Use the pre-defined host.Job configuration if provided; // otherwise generate one from the fields on exec.Cmd that mirror stdlib's os.exec. if c.Job == nil { c.Job = &host.Job{ ImageArtifact: &c.ImageArtifact, Config: host.ContainerConfig{ Args: c.Args, TTY: c.TTY, Env: c.Env, Stdin: c.Stdin != nil || c.stdinPipe != nil, }, Metadata: c.Meta, } // if attaching to stdout / stderr, avoid round tripping the // streams via on-disk log files. if c.Stdout != nil || c.Stderr != nil { c.Job.Config.DisableLog = true } } else { c.Job.ImageArtifact = &c.ImageArtifact } if c.Job.ID == "" { c.Job.ID = cluster.GenerateJobID(c.HostID, "") } if c.host == nil { var err error c.host, err = c.cluster.Host(c.HostID) if err != nil { return err } } if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil || c.stdinPipe != nil { req := &host.AttachReq{ JobID: c.Job.ID, Height: c.TermHeight, Width: c.TermWidth, Flags: host.AttachFlagStream, } if c.Stdout != nil { req.Flags |= host.AttachFlagStdout } if c.Stderr != nil { req.Flags |= host.AttachFlagStderr } if c.Job.Config.Stdin { req.Flags |= host.AttachFlagStdin } var err error c.attachClient, err = c.host.Attach(req, true) if err != nil { c.close() return err } } if c.stdinPipe != nil { c.stdinPipe.set(writeCloseCloser{c.attachClient}) } else if c.Stdin != nil { go func() { io.Copy(c.attachClient, c.Stdin) c.attachClient.CloseWrite() }() } if c.attachClient == nil { c.eventChan = make(chan *host.Event) var err error c.eventStream, err = c.host.StreamEvents(c.Job.ID, c.eventChan) if err != nil { return err } } go func() { defer close(c.done) if c.attachClient != nil { c.exitStatus, c.streamErr = c.attachClient.Receive(c.Stdout, c.Stderr) } else { outer: for e := range c.eventChan { switch e.Event { case "stop": c.exitStatus = *e.Job.ExitStatus break outer case "error": c.streamErr = errors.New(*e.Job.Error) break outer } } c.eventStream.Close() if c.streamErr == nil { c.streamErr = c.eventStream.Err() } } }() return c.host.AddJob(c.Job) }
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) { var newJob ct.NewJob if err := httphelper.DecodeJSON(req, &newJob); err != nil { respondWithError(w, err) return } if err := schema.Validate(newJob); err != nil { respondWithError(w, err) return } data, err := c.releaseRepo.Get(newJob.ReleaseID) if err != nil { respondWithError(w, err) return } release := data.(*ct.Release) data, err = c.artifactRepo.Get(release.ArtifactID) if err != nil { respondWithError(w, err) return } artifact := data.(*ct.Artifact) attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0") hosts, err := c.clusterClient.ListHosts() if err != nil { respondWithError(w, err) return } if len(hosts) == 0 { respondWithError(w, errors.New("no hosts found")) return } hostID := schedutil.PickHost(hosts).ID id := cluster.RandomJobID("") app := c.getApp(ctx) env := make(map[string]string, len(release.Env)+len(newJob.Env)+4) env["FLYNN_APP_ID"] = app.ID env["FLYNN_RELEASE_ID"] = release.ID env["FLYNN_PROCESS_TYPE"] = "" env["FLYNN_JOB_ID"] = hostID + "-" + id if newJob.ReleaseEnv { for k, v := range release.Env { env[k] = v } } for k, v := range newJob.Env { env[k] = v } metadata := make(map[string]string, len(newJob.Meta)+3) for k, v := range newJob.Meta { metadata[k] = v } metadata["flynn-controller.app"] = app.ID metadata["flynn-controller.app_name"] = app.Name metadata["flynn-controller.release"] = release.ID job := &host.Job{ ID: id, Metadata: metadata, Artifact: host.Artifact{ Type: artifact.Type, URI: artifact.URI, }, Config: host.ContainerConfig{ Cmd: newJob.Cmd, Env: env, TTY: newJob.TTY, Stdin: attach, DisableLog: newJob.DisableLog, }, } if len(newJob.Entrypoint) > 0 { job.Config.Entrypoint = newJob.Entrypoint } var attachClient cluster.AttachClient if attach { attachReq := &host.AttachReq{ JobID: job.ID, Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream, Height: uint16(newJob.Lines), Width: uint16(newJob.Columns), } client, err := c.clusterClient.DialHost(hostID) if err != nil { respondWithError(w, fmt.Errorf("host connect failed: %s", err.Error())) return } attachClient, err = client.Attach(attachReq, true) if err != nil { respondWithError(w, fmt.Errorf("attach failed: %s", err.Error())) return } defer attachClient.Close() } _, err = c.clusterClient.AddJobs(map[string][]*host.Job{hostID: {job}}) if err != nil { respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error())) return } if attach { if err := attachClient.Wait(); err != nil { respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error())) return } w.Header().Set("Connection", "upgrade") w.Header().Set("Upgrade", "flynn-attach/0") w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := w.(http.Hijacker).Hijack() if err != nil { panic(err) } defer conn.Close() done := make(chan struct{}, 2) cp := func(to io.Writer, from io.Reader) { io.Copy(to, from) done <- struct{}{} } go cp(conn, attachClient.Conn()) go cp(attachClient.Conn(), conn) <-done <-done return } else { httphelper.JSON(w, 200, &ct.Job{ ID: hostID + "-" + job.ID, ReleaseID: newJob.ReleaseID, Cmd: newJob.Cmd, }) } }