func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { var rdr io.Reader = f if tail > 0 { ls, err := tailfile.TailFile(f, tail) if err != nil { logWatcher.Err <- err return } rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) } dec := json.NewDecoder(rdr) l := &jsonlog.JSONLog{} for { msg, err := decodeLogLine(dec, l) if err != nil { if err != io.EOF { logWatcher.Err <- err } return } if !since.IsZero() && msg.Timestamp.Before(since) { continue } logWatcher.Msg <- msg } }
func tailN(n int, id string) (string, error) { job := jobs[id] logFilePath := job.logDir + "/log.log" file, err := os.Open(logFilePath) if err != nil { return "", err } byteMatrix, err := tailfile.TailFile(file, n) if err != nil { return "", err } out := bytes.Join(byteMatrix, []byte("\n")) return string(out) + "\n", err }
func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error { var ( lines = -1 format string ) if !(config.UseStdout || config.UseStderr) { return fmt.Errorf("You must choose at least one stream") } if config.Timestamps { format = timeutils.RFC3339NanoFixed } if config.Tail == "" { config.Tail = "all" } container, err := daemon.Get(name) if err != nil { return err } var ( outStream = config.OutStream errStream io.Writer ) if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } if container.LogDriverType() != jsonfilelog.Name { return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver") } logDriver, err := container.getLogger() cLog, err := logDriver.GetReader() if err != nil { logrus.Errorf("Error reading logs: %s", err) } else { // json-file driver if config.Tail != "all" { var err error lines, err = strconv.Atoi(config.Tail) if err != nil { logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return err } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) l := &jsonlog.JSONLog{} for { l.Reset() if err := dec.Decode(l); err == io.EOF { break } else if err != nil { logrus.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if !config.Since.IsZero() && l.Created.Before(config.Since) { continue } if config.Timestamps { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } if l.Stream == "stdout" && config.UseStdout { io.WriteString(outStream, logLine) } if l.Stream == "stderr" && config.UseStderr { io.WriteString(errStream, logLine) } } } } if config.Follow && container.IsRunning() { chErr := make(chan error) var stdoutPipe, stderrPipe io.ReadCloser // write an empty chunk of data (this is to ensure that the // HTTP Response is sent immediatly, even if the container has // not yet produced any data) outStream.Write(nil) if config.UseStdout { stdoutPipe = container.StdoutLogPipe() go func() { logrus.Debug("logs: stdout stream begin") chErr <- jsonlog.WriteLog(stdoutPipe, outStream, format, config.Since) logrus.Debug("logs: stdout stream end") }() } if config.UseStderr { stderrPipe = container.StderrLogPipe() go func() { logrus.Debug("logs: stderr stream begin") chErr <- jsonlog.WriteLog(stderrPipe, errStream, format, config.Since) logrus.Debug("logs: stderr stream end") }() } err = <-chErr if stdoutPipe != nil { stdoutPipe.Close() } if stderrPipe != nil { stderrPipe.Close() } <-chErr // wait for 2nd goroutine to exit, otherwise bad things will happen if err != nil && err != io.EOF && err != io.ErrClosedPipe { if e, ok := err.(*net.OpError); ok && e.Err != syscall.EPIPE { logrus.Errorf("error streaming logs: %v", err) } } } return nil }
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") tail = job.Getenv("tail") follow = job.GetenvBool("follow") times = job.GetenvBool("timestamps") lines = -1 format string ) if !(stdout || stderr) { return job.Errorf("You must choose at least one stream") } if times { format = time.RFC3339Nano } if tail == "" { tail = "all" } container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs utils.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { utils.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { utils.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { utils.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { utils.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { utils.Errorf("Error reading logs (json): %s", err) } else { if tail != "all" { var err error lines, err = strconv.Atoi(tail) if err != nil { utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return job.Error(err) } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) for { l := &utils.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { utils.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if times { logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) } if l.Stream == "stdout" && stdout { fmt.Fprintf(job.Stdout, "%s", logLine) } if l.Stream == "stderr" && stderr { fmt.Fprintf(job.Stderr, "%s", logLine) } } } } if follow { errors := make(chan error, 2) if stdout { stdoutPipe := container.StdoutLogPipe() go func() { errors <- utils.WriteLog(stdoutPipe, job.Stdout, format) }() } if stderr { stderrPipe := container.StderrLogPipe() go func() { errors <- utils.WriteLog(stderrPipe, job.Stderr, format) }() } err := <-errors if err != nil { utils.Errorf("%s", err) } } return engine.StatusOK }
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") tail = job.Getenv("tail") follow = job.GetenvBool("follow") times = job.GetenvBool("timestamps") lines = -1 format string ) if !(stdout || stderr) { return job.Errorf("You must choose at least one stream") } if times { format = timeutils.RFC3339NanoFixed } if tail == "" { tail = "all" } container, err := daemon.Get(name) if err != nil { return job.Error(err) } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs log.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { log.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { log.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { log.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { log.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { log.Errorf("Error reading logs (json): %s", err) } else { if tail != "all" { var err error lines, err = strconv.Atoi(tail) if err != nil { log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return job.Error(err) } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) l := &jsonlog.JSONLog{} for { if err := dec.Decode(l); err == io.EOF { break } else if err != nil { log.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if times { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } if l.Stream == "stdout" && stdout { io.WriteString(job.Stdout, logLine) } if l.Stream == "stderr" && stderr { io.WriteString(job.Stderr, logLine) } l.Reset() } } } if follow && container.IsRunning() { errors := make(chan error, 2) wg := sync.WaitGroup{} if stdout { wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) wg.Done() }() } if stderr { wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) wg.Done() }() } wg.Wait() close(errors) for err := range errors { if err != nil { log.Errorf("%s", err) } } } return engine.StatusOK }
func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error { var ( lines = -1 format string ) if !(config.UseStdout || config.UseStderr) { return fmt.Errorf("You must choose at least one stream") } if config.Timestamps { format = timeutils.RFC3339NanoFixed } if config.Tail == "" { config.Tail = "all" } container, err := daemon.Get(name) if err != nil { return err } var ( outStream = config.OutStream errStream io.Writer ) if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } if container.LogDriverType() != "json-file" { return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver") } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs logrus.Debugf("Old logs format") if config.UseStdout { cLog, err := container.ReadLog("stdout") if err != nil { logrus.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(outStream, cLog); err != nil { logrus.Errorf("Error streaming logs (stdout): %s", err) } } if config.UseStderr { cLog, err := container.ReadLog("stderr") if err != nil { logrus.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(errStream, cLog); err != nil { logrus.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { logrus.Errorf("Error reading logs (json): %s", err) } else { if config.Tail != "all" { var err error lines, err = strconv.Atoi(config.Tail) if err != nil { logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return err } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) l := &jsonlog.JSONLog{} for { if err := dec.Decode(l); err == io.EOF { break } else if err != nil { logrus.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if config.Timestamps { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } if l.Stream == "stdout" && config.UseStdout { io.WriteString(outStream, logLine) } if l.Stream == "stderr" && config.UseStderr { io.WriteString(errStream, logLine) } l.Reset() } } } if config.Follow && container.IsRunning() { errors := make(chan error, 2) wg := sync.WaitGroup{} if config.UseStdout { wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { errors <- jsonlog.WriteLog(stdoutPipe, outStream, format) wg.Done() }() } if config.UseStderr { wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { errors <- jsonlog.WriteLog(stderrPipe, errStream, format) wg.Done() }() } wg.Wait() close(errors) for err := range errors { if err != nil { logrus.Errorf("%s", err) } } } return nil }