// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) if versions.GreaterThan(version, "1.21") { if err := httputils.CheckForJSON(r); err != nil { return err } } var ( execName = vars["name"] stdin, inStream io.ReadCloser stdout, stderr, outStream io.Writer ) execStartCheck := &types.ExecStartCheck{} if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { return err } if exists, err := s.backend.ExecExists(execName); !exists { return err } if !execStartCheck.Detach { var err error // Setting up the streaming http interface. inStream, outStream, err = httputils.HijackConnection(w) if err != nil { return err } defer httputils.CloseStreams(inStream, outStream) if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } stdin = inStream stdout = outStream if !execStartCheck.Tty { stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } } // Now run the user process in container. if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { if execStartCheck.Detach { return err } stdout.Write([]byte(err.Error() + "\r\n")) logrus.Errorf("Error running exec in container: %v", err) } return nil }
func handleContainerLogs(w http.ResponseWriter, r *http.Request) { var outStream, errStream io.Writer outStream = ioutils.NewWriteFlusher(w) // not sure how to test follow if err := r.ParseForm(); err != nil { http.Error(w, err.Error(), 500) } stdout, stderr := getBoolValue(r.Form.Get("stdout")), getBoolValue(r.Form.Get("stderr")) if stderr { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) } if stdout { outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } var i int if tail, err := strconv.Atoi(r.Form.Get("tail")); err == nil && tail > 0 { i = 50 - tail if i < 0 { i = 0 } } for ; i < 50; i++ { line := fmt.Sprintf("line %d", i) if getBoolValue(r.Form.Get("timestamps")) { l := &jsonlog.JSONLog{Log: line, Created: time.Now()} line = fmt.Sprintf("%s %s", l.Created.Format(timeutils.RFC3339NanoFixed), line) } if i%2 == 0 && stderr { fmt.Fprintln(errStream, line) } else if i%2 == 1 && stdout { fmt.Fprintln(outStream, line) } } }
func (daemon *Daemon) ContainerAttachWithLogs(name string, c *ContainerAttachWithLogsConfig) error { container, err := daemon.Get(name) if err != nil { return err } var errStream io.Writer if !container.Config.Tty && c.Multiplex { errStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stderr) c.OutStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stdout) } else { errStream = c.OutStream } var stdin io.ReadCloser var stdout, stderr io.Writer if c.UseStdin { stdin = c.InStream } if c.UseStdout { stdout = c.OutStream } if c.UseStderr { stderr = errStream } return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream) }
func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error { var errStream io.Writer if !container.Config.Tty { errStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stderr) c.OutStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stdout) } else { errStream = c.OutStream } var stdin io.ReadCloser var stdout, stderr io.Writer if c.UseStdin { stdin = c.InStream } if c.UseStdout { stdout = c.OutStream } if c.UseStderr { stderr = errStream } return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream) }
func (c *Container) attach(stdin io.ReadCloser, stdout io.WriteCloser, winsize *hypervisor.WindowSize, rsp chan<- error) error { if c.p.sandbox == nil || c.descript == nil { err := fmt.Errorf("container not ready for attach") c.Log(ERROR, err) return err } tty := &hypervisor.TtyIO{ Stdin: stdin, Stdout: stdout, Callback: make(chan *runvtypes.VmResponse, 1), } if stdout != nil { if !c.hasTty() { tty.Stderr = stdcopy.NewStdWriter(stdout, stdcopy.Stderr) tty.Stdout = stdcopy.NewStdWriter(stdout, stdcopy.Stdout) } } if rsp != nil { go func() { rsp <- tty.WaitForFinish() }() } return c.p.sandbox.Attach(tty, c.Id(), winsize) }
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } var ( name = vars["name"] job = eng.Job("execStart", name) errOut io.Writer = os.Stderr ) if err := job.DecodeEnv(r.Body); err != nil { return err } if !job.GetenvBool("Detach") { // Setting up the streaming http interface. inStream, outStream, err := hijackServer(w) if err != nil { return err } defer func() { if tcpc, ok := inStream.(*net.TCPConn); ok { tcpc.CloseWrite() } else { inStream.Close() } }() defer func() { if tcpc, ok := outStream.(*net.TCPConn); ok { tcpc.CloseWrite() } else if closer, ok := outStream.(io.Closer); ok { closer.Close() } }() var errStream io.Writer fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } job.Stdin.Add(inStream) job.Stdout.Add(outStream) job.Stderr.Set(errStream) errOut = outStream } // Now run the user process in container. job.SetCloseIO(false) if err := job.Run(); err != nil { fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) return err } w.WriteHeader(http.StatusNoContent) return nil }
func (p *XPod) StartExec(stdin io.ReadCloser, stdout io.WriteCloser, containerId, execId string) error { p.statusLock.RLock() es, ok := p.execs[execId] p.statusLock.RUnlock() if !ok { err := fmt.Errorf("no exec %s exists for container %s", execId, containerId) p.Log(ERROR, err) return err } tty := &hypervisor.TtyIO{ Stdin: stdin, Stdout: stdout, Callback: make(chan *types.VmResponse, 1), } if !es.Terminal && stdout != nil { tty.Stderr = stdcopy.NewStdWriter(stdout, stdcopy.Stderr) tty.Stdout = stdcopy.NewStdWriter(stdout, stdcopy.Stdout) } var fin = true for fin { select { case fin = <-es.finChan: es.Log(DEBUG, "try to drain the sync chan") default: fin = false es.Log(DEBUG, "the sync chan is empty") } } go func(es *Exec) { result := p.sandbox.WaitProcess(false, []string{execId}, -1) if result == nil { es.Log(ERROR, "can not wait exec") return } r, ok := <-result if !ok { es.Log(ERROR, "waiting exec interrupted") return } es.Log(DEBUG, "exec terminated at %v with code %d", r.FinishedAt, r.Code) es.ExitCode = uint8(r.Code) select { case es.finChan <- true: es.Log(DEBUG, "wake exec stopped chan") default: es.Log(WARNING, "exec already set as stopped") } }(es) return p.sandbox.Exec(es.Container, es.Id, es.Cmds, es.Terminal, tty) }
func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } d := getDaemon(eng) cont, err := d.Get(vars["name"]) if err != nil { return err } inStream, outStream, err := hijackServer(w) if err != nil { return err } defer closeStreams(inStream, outStream) var errStream io.Writer if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } if !cont.Config.Tty && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } logs := toBool(r.Form.Get("logs")) stream := toBool(r.Form.Get("stream")) var stdin io.ReadCloser var stdout, stderr io.Writer if toBool(r.Form.Get("stdin")) { stdin = inStream } if toBool(r.Form.Get("stdout")) { stdout = outStream } if toBool(r.Form.Get("stderr")) { stderr = errStream } if err := cont.AttachWithLogs(stdin, stdout, stderr, logs, stream); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil }
// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig. func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error { if c.Hijacker == nil { return derr.ErrorCodeNoHijackConnection.WithArgs(prefixOrName) } container, err := daemon.GetContainer(prefixOrName) if err != nil { return derr.ErrorCodeNoSuchContainer.WithArgs(prefixOrName) } if container.IsPaused() { return derr.ErrorCodePausedContainer.WithArgs(prefixOrName) } conn, _, err := c.Hijacker.Hijack() if err != nil { return err } defer conn.Close() // Flush the options to make sure the client sets the raw mode conn.Write([]byte{}) inStream := conn.(io.ReadCloser) outStream := conn.(io.Writer) if c.Upgrade { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } var errStream io.Writer if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } var stdin io.ReadCloser var stdout, stderr io.Writer if c.UseStdin { stdin = inStream } if c.UseStdout { stdout = outStream } if c.UseStderr { stderr = errStream } if err := daemon.attachWithLogs(container, stdin, stdout, stderr, c.Logs, c.Stream, c.DetachKeys); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil }
func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { return err } if err = job.Run(); err != nil { return err } inStream, outStream, err := hijackServer(w) if err != nil { return err } defer closeStreams(inStream, outStream) var errStream io.Writer if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } job = eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(inStream) job.Stdout.Add(outStream) job.Stderr.Set(errStream) if err := job.Run(); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil }
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. func (s *Server) postContainerExecStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } var ( execName = vars["name"] stdin io.ReadCloser stdout io.Writer stderr io.Writer ) execStartCheck := &types.ExecStartCheck{} if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { return err } if !execStartCheck.Detach { // Setting up the streaming http interface. inStream, outStream, err := hijackServer(w) if err != nil { return err } defer closeStreams(inStream, outStream) var errStream io.Writer if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } if !execStartCheck.Tty && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } stdin = inStream stdout = outStream stderr = errStream } // Now run the user process in container. if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { logrus.Errorf("Error starting exec command in container %s: %s", execName, err) return err } w.WriteHeader(http.StatusNoContent) return nil }
func (daemon *Daemon) StartExec(stdin io.ReadCloser, stdout io.WriteCloser, containerId, execId string) error { tty := &hypervisor.TtyIO{ Stdin: stdin, Stdout: stdout, Callback: make(chan *types.VmResponse, 1), } glog.V(1).Infof("Get container id is %s", containerId) pod, _, err := daemon.GetPodByContainerIdOrName(containerId) if err != nil { return err } status := pod.Status() if status == nil || status.Status != types.S_POD_RUNNING { return fmt.Errorf("container %s is not running", containerId) } es := status.GetExec(execId) if es == nil { return fmt.Errorf("Can not find exec %s", execId) } vmId, err := daemon.GetVmByPodId(pod.Id) if err != nil { return err } vm, ok := daemon.VmList.Get(vmId) if !ok { err = fmt.Errorf("Can not find VM whose Id is %s!", vmId) return err } if !es.Terminal { tty.Stderr = stdcopy.NewStdWriter(stdout, stdcopy.Stderr) tty.Stdout = stdcopy.NewStdWriter(stdout, stdcopy.Stdout) tty.OutCloser = stdout } if err := vm.Exec(es.Container, es.Id, es.Cmds, es.Terminal, tty); err != nil { return err } defer func() { glog.V(2).Info("Defer function for exec!") }() return nil }
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. func (s *Server) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if err := checkForJSON(r); err != nil { return err } var ( execName = vars["name"] stdin, inStream io.ReadCloser stdout, stderr, outStream io.Writer ) execStartCheck := &types.ExecStartCheck{} if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { return err } if !execStartCheck.Detach { var err error // Setting up the streaming http interface. inStream, outStream, err = hijackServer(w) if err != nil { return err } defer closeStreams(inStream, outStream) if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } stdin = inStream stdout = outStream if !execStartCheck.Tty { stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } } else { outStream = w } // Now run the user process in container. if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { fmt.Fprintf(outStream, "Error running exec in container: %v\n", err) } return nil }
// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { keys := []byte{} var err error if c.DetachKeys != "" { keys, err = term.ToBytes(c.DetachKeys) if err != nil { return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) } } container, err := daemon.GetContainer(prefixOrName) if err != nil { return err } if container.IsPaused() { err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) return errors.NewRequestConflictError(err) } inStream, outStream, errStream, err := c.GetStreams() if err != nil { return err } defer inStream.Close() if !container.Config.Tty && c.MuxStreams { errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } var stdin io.ReadCloser var stdout, stderr io.Writer if c.UseStdin { stdin = inStream } if c.UseStdout { stdout = outStream } if c.UseStderr { stderr = errStream } if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil }
// ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. func (c *Container) ContainerLogs(name string, config *backend.ContainerLogsConfig, started chan struct{}) error { defer trace.End(trace.Begin("")) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } name = vc.ContainerID tailLines, since, err := c.validateContainerLogsConfig(vc, config) if err != nil { return err } // Outstream modification (from Docker's code) so the stream is streamed with the // necessary headers that the CLI expects. This is Docker's scheme. wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() wf.Flush() outStream := io.Writer(wf) if !vc.Config.Tty { outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } // Make a call to our proxy to handle the remoting err = c.containerProxy.StreamContainerLogs(name, outStream, started, config.Timestamps, config.Follow, since, tailLines) return err }
func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( inspectJob = eng.Job("container_inspect", vars["name"]) logsJob = eng.Job("logs", vars["name"]) c, err = inspectJob.Stdout.AddEnv() ) if err != nil { return err } logsJob.Setenv("follow", r.Form.Get("follow")) logsJob.Setenv("tail", r.Form.Get("tail")) logsJob.Setenv("stdout", r.Form.Get("stdout")) logsJob.Setenv("stderr", r.Form.Get("stderr")) logsJob.Setenv("timestamps", r.Form.Get("timestamps")) // Validate args here, because we can't return not StatusOK after job.Run() call stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } if err = inspectJob.Run(); err != nil { return err } var outStream, errStream io.Writer outStream = utils.NewWriteFlusher(w) if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } logsJob.Stdout.Add(outStream) logsJob.Stderr.Set(errStream) if err := logsJob.Run(); err != nil { fmt.Fprintf(outStream, "Error running logs job: %s\n", err) } return nil }
func (daemon *Daemon) Attach(stdin io.ReadCloser, stdout io.WriteCloser, container string) error { var ( vmId string err error ) tty := &hypervisor.TtyIO{ Stdin: stdin, Stdout: stdout, Callback: make(chan *types.VmResponse, 1), } pod, idx, err := daemon.GetPodByContainerIdOrName(container) if err != nil { return err } vmId, err = daemon.GetVmByPodId(pod.Id) if err != nil { return err } vm, ok := daemon.VmList.Get(vmId) if !ok { err = fmt.Errorf("Can find VM whose Id is %s!", vmId) return err } if !pod.Spec.Containers[idx].Tty { tty.Stderr = stdcopy.NewStdWriter(stdout, stdcopy.Stderr) tty.Stdout = stdcopy.NewStdWriter(stdout, stdcopy.Stdout) tty.OutCloser = stdout } err = vm.Attach(tty, container, nil) if err != nil { return err } defer func() { glog.V(2).Info("Defer function for attach!") }() err = tty.WaitForFinish() return err }
func (daemon *Daemon) StartPod(stdin io.ReadCloser, stdout io.WriteCloser, podId, vmId string, attach bool) (int, string, error) { var ttys []*hypervisor.TtyIO = []*hypervisor.TtyIO{} glog.Infof("pod:%s, vm:%s", podId, vmId) p, ok := daemon.PodList.Get(podId) if !ok { return -1, "", fmt.Errorf("The pod(%s) can not be found, please create it first", podId) } var lazy bool = hypervisor.HDriver.SupportLazyMode() && vmId == "" if attach { glog.V(1).Info("Run pod with tty attached") tty := &hypervisor.TtyIO{ Stdin: stdin, Stdout: stdout, Callback: make(chan *types.VmResponse, 1), } if !p.Spec.Containers[0].Tty { tty.Stderr = stdcopy.NewStdWriter(stdout, stdcopy.Stderr) tty.Stdout = stdcopy.NewStdWriter(stdout, stdcopy.Stdout) tty.OutCloser = stdout } ttys = append(ttys, tty) } code, cause, err := daemon.StartInternal(p, vmId, nil, lazy, ttys) if err != nil { glog.Error(err.Error()) return -1, "", err } if err := p.InitializeFinished(daemon); err != nil { glog.Error(err.Error()) return -1, "", err } if len(ttys) > 0 { ttys[0].WaitForFinish() } return code, cause, nil }
// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { container, err := daemon.GetContainer(prefixOrName) if err != nil { return derr.ErrorCodeNoSuchContainer.WithArgs(prefixOrName) } if container.IsPaused() { return derr.ErrorCodePausedContainer.WithArgs(prefixOrName) } inStream, outStream, errStream, err := c.GetStreams() if err != nil { return err } defer inStream.Close() if !container.Config.Tty && c.MuxStreams { errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } var stdin io.ReadCloser var stdout, stderr io.Writer if c.UseStdin { stdin = inStream } if c.UseStdout { stdout = outStream } if c.UseStderr { stderr = errStream } if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, c.DetachKeys); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil }
func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["id"] container, _, err := s.findContainer(id) if err != nil { http.Error(w, err.Error(), http.StatusNotFound) return } hijacker, ok := w.(http.Hijacker) if !ok { http.Error(w, "cannot hijack connection", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/vnd.docker.raw-stream") w.WriteHeader(http.StatusOK) conn, _, err := hijacker.Hijack() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } wg := sync.WaitGroup{} if r.URL.Query().Get("stdin") == "1" { wg.Add(1) go func() { ioutil.ReadAll(conn) wg.Done() }() } outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout) if container.State.Running { fmt.Fprintf(outStream, "Container is running\n") } else { fmt.Fprintf(outStream, "Container is not running\n") } fmt.Fprintln(outStream, "What happened?") fmt.Fprintln(outStream, "Something happened") wg.Wait() if r.URL.Query().Get("stream") == "1" { for { time.Sleep(1e6) s.cMut.RLock() if !container.State.StartedAt.IsZero() && !container.State.Running { s.cMut.RUnlock() break } s.cMut.RUnlock() } } conn.Close() }
// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. func (c *Container) ContainerAttach(name string, ca *backend.ContainerAttachConfig) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } id := vc.ContainerID client := c.containerProxy.Client() handle, err := c.Handle(id, name) if err != nil { return err } bind, err := client.Interaction.InteractionBind(interaction.NewInteractionBindParamsWithContext(ctx). WithConfig(&models.InteractionBindConfig{ Handle: handle, })) if err != nil { return InternalServerError(err.Error()) } handle, ok := bind.Payload.Handle.(string) if !ok { return InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) } // commit the handle; this will reconfigure the vm _, err = client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(handle)) if err != nil { switch err := err.(type) { case *containers.CommitNotFound: return NotFoundError(name) case *containers.CommitConflict: return ConflictError(err.Error()) case *containers.CommitDefault: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } clStdin, clStdout, clStderr, err := ca.GetStreams() if err != nil { return InternalServerError("Unable to get stdio streams for calling client") } defer clStdin.Close() if !vc.Config.Tty && ca.MuxStreams { // replace the stdout/stderr with Docker's multiplex stream if ca.UseStdout { clStderr = stdcopy.NewStdWriter(clStderr, stdcopy.Stderr) } if ca.UseStderr { clStdout = stdcopy.NewStdWriter(clStdout, stdcopy.Stdout) } } err = c.containerProxy.AttachStreams(context.Background(), vc, clStdin, clStdout, clStderr, ca) if err != nil { if _, ok := err.(DetachError); ok { log.Infof("Detach detected, tearing down connection") client = c.containerProxy.Client() handle, err = c.Handle(id, name) if err != nil { return err } unbind, err := client.Interaction.InteractionUnbind(interaction.NewInteractionUnbindParamsWithContext(ctx). WithConfig(&models.InteractionUnbindConfig{ Handle: handle, })) if err != nil { return InternalServerError(err.Error()) } handle, ok = unbind.Payload.Handle.(string) if !ok { return InternalServerError("type assertion failed") } // commit the handle; this will reconfigure the vm _, err = client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(handle)) if err != nil { switch err := err.(type) { case *containers.CommitNotFound: return NotFoundError(name) case *containers.CommitDefault: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } } return err } return nil }
func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error { var ( lines = -1 format string ) if !(config.UseStdout || config.UseStderr) { return fmt.Errorf("You must choose at least one stream") } if config.Timestamps { format = timeutils.RFC3339NanoFixed } if config.Tail == "" { config.Tail = "all" } container, err := daemon.Get(name) if err != nil { return err } var ( outStream = config.OutStream errStream io.Writer ) if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } if container.LogDriverType() != "json-file" { return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver") } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs logrus.Debugf("Old logs format") if config.UseStdout { cLog, err := container.ReadLog("stdout") if err != nil { logrus.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(outStream, cLog); err != nil { logrus.Errorf("Error streaming logs (stdout): %s", err) } } if config.UseStderr { cLog, err := container.ReadLog("stderr") if err != nil { logrus.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(errStream, cLog); err != nil { logrus.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { logrus.Errorf("Error reading logs (json): %s", err) } else { if config.Tail != "all" { var err error lines, err = strconv.Atoi(config.Tail) if err != nil { logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return err } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) l := &jsonlog.JSONLog{} for { if err := dec.Decode(l); err == io.EOF { break } else if err != nil { logrus.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if config.Timestamps { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } if l.Stream == "stdout" && config.UseStdout { io.WriteString(outStream, logLine) } if l.Stream == "stderr" && config.UseStderr { io.WriteString(errStream, logLine) } l.Reset() } } } if config.Follow && container.IsRunning() { errors := make(chan error, 2) wg := sync.WaitGroup{} if config.UseStdout { wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { errors <- jsonlog.WriteLog(stdoutPipe, outStream, format) wg.Done() }() } if config.UseStderr { wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { errors <- jsonlog.WriteLog(stderrPipe, errStream, format) wg.Done() }() } wg.Wait() close(errors) for err := range errors { if err != nil { logrus.Errorf("%s", err) } } } return nil }
func (daemon *Daemon) GetContainerLogs(container string, config *ContainerLogsConfig) (err error) { var ( tailLines int ) p, id, ok := daemon.PodList.GetByContainerIdOrName(container) if !ok { err = fmt.Errorf("cannot find container %s", container) glog.Error(err) return err } l := p.ContainerLogger(id) if l == nil { err = fmt.Errorf("cannot get logger for container %s", container) glog.Error(err) return err } logReader, ok := l.(logger.LogReader) if !ok { err = fmt.Errorf("container %s: logger not support read", container) glog.Error(err) return err } follow := config.Follow && p.IsContainerAlive(id) tailLines, err = strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } readConfig := logger.ReadConfig{ Since: config.Since, Tail: tailLines, Follow: follow, } logs := logReader.ReadLogs(readConfig) wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() wf.Flush() var outStream io.Writer = wf errStream := outStream if !p.ContainerHasTty(id) { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } for { select { case <-config.Stop: return nil case e := <-logs.Err: glog.Errorf("Error streaming logs: %v", e) return nil case msg, ok := <-logs.Msg: if !ok { glog.V(1).Info("logs: end stream") logs.Close() return nil } logLine := msg.Line if config.Timestamps { logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) } if msg.Source == "stdout" && config.UseStdout { glog.V(2).Info("print stdout log: ", logLine) _, err := outStream.Write(logLine) if err != nil { return nil } } if msg.Source == "stderr" && config.UseStderr { glog.V(2).Info("print stderr log: ", logLine) _, err := errStream.Write(logLine) if err != nil { return nil } } } } }
func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error { var ( lines = -1 format string ) if !(config.UseStdout || config.UseStderr) { return fmt.Errorf("You must choose at least one stream") } if config.Timestamps { format = timeutils.RFC3339NanoFixed } if config.Tail == "" { config.Tail = "all" } container, err := daemon.Get(name) if err != nil { return err } var ( outStream = config.OutStream errStream io.Writer ) if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } if container.LogDriverType() != jsonfilelog.Name { return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver") } logDriver, err := container.getLogger() cLog, err := logDriver.GetReader() if err != nil { logrus.Errorf("Error reading logs: %s", err) } else { // json-file driver if config.Tail != "all" { var err error lines, err = strconv.Atoi(config.Tail) if err != nil { logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return err } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) l := &jsonlog.JSONLog{} for { l.Reset() if err := dec.Decode(l); err == io.EOF { break } else if err != nil { logrus.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if !config.Since.IsZero() && l.Created.Before(config.Since) { continue } if config.Timestamps { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } if l.Stream == "stdout" && config.UseStdout { io.WriteString(outStream, logLine) } if l.Stream == "stderr" && config.UseStderr { io.WriteString(errStream, logLine) } } } } if config.Follow && container.IsRunning() { chErr := make(chan error) var stdoutPipe, stderrPipe io.ReadCloser // write an empty chunk of data (this is to ensure that the // HTTP Response is sent immediatly, even if the container has // not yet produced any data) outStream.Write(nil) if config.UseStdout { stdoutPipe = container.StdoutLogPipe() go func() { logrus.Debug("logs: stdout stream begin") chErr <- jsonlog.WriteLog(stdoutPipe, outStream, format, config.Since) logrus.Debug("logs: stdout stream end") }() } if config.UseStderr { stderrPipe = container.StderrLogPipe() go func() { logrus.Debug("logs: stderr stream begin") chErr <- jsonlog.WriteLog(stderrPipe, errStream, format, config.Since) logrus.Debug("logs: stderr stream end") }() } err = <-chErr if stdoutPipe != nil { stdoutPipe.Close() } if stderrPipe != nil { stderrPipe.Close() } <-chErr // wait for 2nd goroutine to exit, otherwise bad things will happen if err != nil && err != io.EOF && err != io.ErrClosedPipe { if e, ok := err.(*net.OpError); ok && e.Err != syscall.EPIPE { logrus.Errorf("error streaming logs: %v", err) } } } return nil }
// ServiceLogs collects service logs and writes them back to `config.OutStream` func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error { c.RLock() if !c.isActiveManager() { c.RUnlock() return c.errNoManager() } service, err := getService(ctx, c.client, input) if err != nil { c.RUnlock() return err } stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ Selector: &swarmapi.LogSelector{ ServiceIDs: []string{service.ID}, }, Options: &swarmapi.LogSubscriptionOptions{ Follow: true, }, }) if err != nil { c.RUnlock() return err } wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() close(started) wf.Flush() outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) // Release the lock before starting the stream. c.RUnlock() for { // Check the context before doing anything. select { case <-ctx.Done(): return ctx.Err() default: } subscribeMsg, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } for _, msg := range subscribeMsg.Messages { data := []byte{} if config.Timestamps { ts, err := ptypes.Timestamp(msg.Timestamp) if err != nil { return err } data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) } data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", contextPrefix, msg.Context.NodeID, contextPrefix, msg.Context.ServiceID, contextPrefix, msg.Context.TaskID, ))...) data = append(data, msg.Data...) switch msg.Stream { case swarmapi.LogStreamStdout: outStream.Write(data) case swarmapi.LogStreamStderr: errStream.Write(data) } } } }
func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { return err } if err = job.Run(); err != nil { return err } monitorDriver := c.Get("MonitorDriver") if monitorDriver == daemon.MonitorExternal { Id := c.Get("Id") // Redirect to monitor socket return fmt.Errorf("redirect to:%s", fmt.Sprintf("unix://%s/%s.sock", daemon.MonitorSockDir, Id)) } inStream, outStream, err := hijackServer(w) if err != nil { return err } defer func() { if tcpc, ok := inStream.(*net.TCPConn); ok { tcpc.CloseWrite() } else { inStream.Close() } }() defer func() { if tcpc, ok := outStream.(*net.TCPConn); ok { tcpc.CloseWrite() } else if closer, ok := outStream.(io.Closer); ok { closer.Close() } }() var errStream io.Writer fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } job = eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(inStream) job.Stdout.Add(outStream) job.Stderr.Set(errStream) if err := job.Run(); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil }
// ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { container, err := daemon.GetContainer(containerName) if err != nil { return err } if !(config.ShowStdout || config.ShowStderr) { return fmt.Errorf("You must choose at least one stream") } cLog, err := daemon.getLogger(container) if err != nil { return err } logReader, ok := cLog.(logger.LogReader) if !ok { return logger.ErrReadLogsNotSupported } follow := config.Follow && container.IsRunning() tailLines, err := strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } logrus.Debug("logs: begin stream") var since time.Time if config.Since != "" { s, n, err := timetypes.ParseTimestamps(config.Since, 0) if err != nil { return err } since = time.Unix(s, n) } readConfig := logger.ReadConfig{ Since: since, Tail: tailLines, Follow: follow, } logs := logReader.ReadLogs(readConfig) wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() close(started) wf.Flush() var outStream io.Writer outStream = wf errStream := outStream if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } for { select { case err := <-logs.Err: logrus.Errorf("Error streaming logs: %v", err) return nil case <-ctx.Done(): logs.Close() return nil case msg, ok := <-logs.Msg: if !ok { logrus.Debug("logs: end stream") logs.Close() if cLog != container.LogDriver { // Since the logger isn't cached in the container, which occurs if it is running, it // must get explicitly closed here to avoid leaking it and any file handles it has. if err := cLog.Close(); err != nil { logrus.Errorf("Error closing logger: %v", err) } } return nil } logLine := msg.Line if config.Details { logLine = append([]byte(msg.Attrs.String()+" "), logLine...) } if config.Timestamps { logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) } if msg.Source == "stdout" && config.ShowStdout { outStream.Write(logLine) } if msg.Source == "stderr" && config.ShowStderr { errStream.Write(logLine) } } } }
// ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. func (daemon *Daemon) ContainerLogs(containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { container, err := daemon.GetContainer(containerName) if err != nil { return err } if !(config.ShowStdout || config.ShowStderr) { return fmt.Errorf("You must choose at least one stream") } cLog, err := daemon.getLogger(container) if err != nil { return err } logReader, ok := cLog.(logger.LogReader) if !ok { return logger.ErrReadLogsNotSupported } follow := config.Follow && container.IsRunning() tailLines, err := strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } logrus.Debug("logs: begin stream") var since time.Time if config.Since != "" { s, n, err := timetypes.ParseTimestamps(config.Since, 0) if err != nil { return err } since = time.Unix(s, n) } readConfig := logger.ReadConfig{ Since: since, Tail: tailLines, Follow: follow, } logs := logReader.ReadLogs(readConfig) wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() close(started) wf.Flush() var outStream io.Writer = wf errStream := outStream if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } for { select { case err := <-logs.Err: logrus.Errorf("Error streaming logs: %v", err) return nil case <-config.Stop: logs.Close() return nil case msg, ok := <-logs.Msg: if !ok { logrus.Debugf("logs: end stream") return nil } logLine := msg.Line if config.Timestamps { logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) } if msg.Source == "stdout" && config.ShowStdout { outStream.Write(logLine) } if msg.Source == "stderr" && config.ShowStderr { errStream.Write(logLine) } } } }
// ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error { if !(config.UseStdout || config.UseStderr) { return fmt.Errorf("You must choose at least one stream") } outStream := config.OutStream errStream := outStream if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } config.OutStream = outStream cLog, err := container.getLogger() if err != nil { return err } logReader, ok := cLog.(logger.LogReader) if !ok { return logger.ErrReadLogsNotSupported } follow := config.Follow && container.IsRunning() tailLines, err := strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } logrus.Debug("logs: begin stream") readConfig := logger.ReadConfig{ Since: config.Since, Tail: tailLines, Follow: follow, } logs := logReader.ReadLogs(readConfig) for { select { case err := <-logs.Err: logrus.Errorf("Error streaming logs: %v", err) return nil case <-config.Stop: logs.Close() return nil case msg, ok := <-logs.Msg: if !ok { logrus.Debugf("logs: end stream") return nil } logLine := msg.Line if config.Timestamps { logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) } if msg.Source == "stdout" && config.UseStdout { outStream.Write(logLine) } if msg.Source == "stderr" && config.UseStderr { errStream.Write(logLine) } } } }
func (daemon *Daemon) GetContainerLogs(container string, config *ContainerLogsConfig) (err error) { var ( pod *Pod cidx int tailLines int ) pod, cidx, err = daemon.GetPodByContainerIdOrName(container) if err != nil { return err } err = pod.getLogger(daemon) if err != nil { return err } logReader, ok := pod.PodStatus.Containers[cidx].Logs.Driver.(logger.LogReader) if !ok { return fmt.Errorf("logger not support read") } follow := config.Follow && (pod.PodStatus.Status == types.S_POD_RUNNING) tailLines, err = strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } readConfig := logger.ReadConfig{ Since: config.Since, Tail: tailLines, Follow: follow, } logs := logReader.ReadLogs(readConfig) wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() wf.Flush() var outStream io.Writer = wf errStream := outStream if !pod.Spec.Containers[cidx].Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } for { select { case <-config.Stop: return nil case e := <-logs.Err: glog.Errorf("Error streaming logs: %v", e) return nil case msg, ok := <-logs.Msg: if !ok { glog.V(1).Info("logs: end stream") logs.Close() return nil } logLine := msg.Line if config.Timestamps { logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) } if msg.Source == "stdout" && config.UseStdout { glog.V(2).Info("print stdout log: ", logLine) _, err := outStream.Write(logLine) if err != nil { return nil } } if msg.Source == "stderr" && config.UseStderr { glog.V(2).Info("print stderr log: ", logLine) _, err := errStream.Write(logLine) if err != nil { return nil } } } } }