// sendStderrWarnings sends the last X lines in the stderr output and to the job's warnings // field func sendStderrWarnings(buffer io.Reader, job baseworker.Job, warningLines int) error { scanner := bufio.NewScanner(buffer) // Create a circular buffer for the last X lines lastStderrLines := ring.New(warningLines) for scanner.Scan() { lastStderrLines = lastStderrLines.Next() lastStderrLines.Value = scanner.Bytes() } // Walk forward through the buffer to get all the last X entries. Note that we call next first // so that we start at the oldest entry. for i := 0; i < lastStderrLines.Len(); i++ { if lastStderrLines = lastStderrLines.Next(); lastStderrLines.Value != nil { job.SendWarning(append(lastStderrLines.Value.([]byte), byte('\n'))) } } return scanner.Err() }
func (conf TaskConfig) doProcess(job baseworker.Job, envVars []string, tryCount int) error { defer func() { // If we panicked then set the panic message as a warning. Gearman-go will // handle marking this job as failed. if r := recover(); r != nil { err := r.(error) job.SendWarning([]byte(err.Error())) } }() // shutdownTicker will effectively control the executution of the ticker. shutdownTicker := make(chan interface{}) defer func() { shutdownTicker <- 1 }() // every minute we will output a heartbeat kayvee log for the job. tickUnit := time.Minute ticker := time.NewTicker(tickUnit) go func() { defer ticker.Stop() units := 0 for { select { case <-shutdownTicker: close(shutdownTicker) return case <-ticker.C: units++ lg.GaugeIntD("heartbeat", units, logger.M{ "try_number": tryCount, "function": job.Fn(), "job_id": getJobID(job), "unit": tickUnit.String(), }) } } }() var args []string var err error if conf.ParseArgs { args, err = argsparser.ParseArgs(string(job.Data())) if err != nil { return fmt.Errorf("Failed to parse args: %s", err.Error()) } } else { args = []string{string(job.Data())} } cmd := exec.Command(conf.FunctionCmd, args...) // insert provided env vars into the job cmd.Env = append(os.Environ(), envVars...) // create new pgid for this process so we can later kill all subprocess launched by it cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Write the stdout and stderr of the process to both this process' stdout and stderr // and also write it to a byte buffer so that we can return it with the Gearman job // data as necessary. var stderrbuf bytes.Buffer cmd.Stderr = io.MultiWriter(os.Stderr, &stderrbuf) defer sendStderrWarnings(&stderrbuf, job, conf.WarningLines) stdoutReader, stdoutWriter := io.Pipe() cmd.Stdout = io.MultiWriter(os.Stdout, stdoutWriter) done := make(chan error) // Track when the job has started so that we don't try and sigterm a nil process started := make(chan struct{}) go func() { defer close(done) finishedProcessingStdout := make(chan error) go func() { finishedProcessingStdout <- streamToGearman(stdoutReader, job) }() if err := cmd.Start(); err != nil { done <- err return } close(started) // Save the cmdErr. We want to process stdout and stderr before we return it cmdErr := cmd.Wait() stdoutWriter.Close() stdoutErr := <-finishedProcessingStdout if cmdErr != nil { done <- cmdErr } else if stdoutErr != nil { done <- stdoutErr } }() <-started // No timeout if conf.CmdTimeout == 0 { select { case err := <-done: // Will be nil if the channel was closed without any errors return err case <-conf.Halt: if err := stopProcess(cmd.Process, conf.CmdTimeout); err != nil { return fmt.Errorf("error stopping process: %s", err) } return fmt.Errorf("killed process due to sigterm") } } select { case err := <-done: // Will be nil if the channel was closed without any errors return err case <-conf.Halt: if err := stopProcess(cmd.Process, conf.CmdTimeout); err != nil { return fmt.Errorf("error stopping process: %s", err) } return nil case <-time.After(conf.CmdTimeout): if err := stopProcess(cmd.Process, 0); err != nil { return fmt.Errorf("error timing out process after %s: %s", conf.CmdTimeout.String(), err) } return fmt.Errorf("process timed out after %s", conf.CmdTimeout.String()) } }