コード例 #1
0
func (c *libvirtContainer) followLogs(g *grohl.Context, buffer host.LogBuffer) error {
	c.l.logStreamMtx.Lock()
	defer c.l.logStreamMtx.Unlock()
	if _, ok := c.l.logStreams[c.job.ID]; ok {
		return nil
	}

	g.Log(grohl.Data{"at": "get_stdout"})
	stdout, stderr, initLog, err := c.Client.GetStreams()
	if err != nil {
		g.Log(grohl.Data{"at": "get_streams", "status": "error", "err": err.Error()})
		return err
	}

	nonblocking := func(file *os.File) (net.Conn, error) {
		// convert to a net.Conn so we do non-blocking I/O on the fd and Close
		// will make calls to Read return straight away (using read(2) would
		// not have this same behaviour, meaning we could potentially read
		// from the stream after we have closed and returned the buffer).
		defer file.Close()
		return net.FileConn(file)
	}

	muxConfig := logmux.Config{
		AppID:   c.job.Metadata["flynn-controller.app"],
		HostID:  c.l.state.id,
		JobType: c.job.Metadata["flynn-controller.type"],
		JobID:   c.job.ID,
	}

	logStreams := make(map[string]*logmux.LogStream, 3)
	stdoutR, err := nonblocking(stdout)
	if err != nil {
		g.Log(grohl.Data{"at": "log_stream", "type": "stdout", "status": "error", "err": err.Error()})
		return err
	}
	logStreams["stdout"] = c.l.mux.Follow(stdoutR, buffer["stdout"], 1, muxConfig)

	stderrR, err := nonblocking(stderr)
	if err != nil {
		g.Log(grohl.Data{"at": "log_stream", "type": "stderr", "status": "error", "err": err.Error()})
		return err
	}
	logStreams["stderr"] = c.l.mux.Follow(stderrR, buffer["stderr"], 2, muxConfig)

	initLogR, err := nonblocking(initLog)
	if err != nil {
		g.Log(grohl.Data{"at": "log_stream", "type": "initLog", "status": "error", "err": err.Error()})
		return err
	}
	logStreams["initLog"] = c.l.mux.Follow(initLogR, buffer["initLog"], 3, muxConfig)
	c.l.logStreams[c.job.ID] = logStreams

	return nil
}
コード例 #2
0
ファイル: libvirt_lxc_backend.go プロジェクト: joshteng/flynn
func (c *libvirtContainer) followLogs(g *grohl.Context, buffer host.LogBuffer) error {
	c.l.logStreamMtx.Lock()
	defer c.l.logStreamMtx.Unlock()
	if _, ok := c.l.logStreams[c.job.ID]; ok {
		return nil
	}

	g.Log(grohl.Data{"at": "get_stdout"})
	stdout, stderr, initLog, err := c.Client.GetStreams()
	if err != nil {
		g.Log(grohl.Data{"at": "get_streams", "status": "error", "err": err.Error()})
		return err
	}

	var stdoutR, stderrR, initLogR io.Reader
	logStreams := make(map[string]*logStream, 3)
	logStreams["stdout"], stdoutR, err = newLogStream(stdout, buffer["stdout"])
	if err != nil {
		g.Log(grohl.Data{"at": "log_stream", "type": "stdout", "status": "error", "err": err.Error()})
		return err
	}
	logStreams["stderr"], stderrR, err = newLogStream(stderr, buffer["stderr"])
	if err != nil {
		g.Log(grohl.Data{"at": "log_stream", "type": "stderr", "status": "error", "err": err.Error()})
		return err
	}
	logStreams["initLog"], initLogR, err = newLogStream(initLog, buffer["initLog"])
	if err != nil {
		g.Log(grohl.Data{"at": "log_stream", "type": "initLog", "status": "error", "err": err.Error()})
		return err
	}
	c.l.logStreams[c.job.ID] = logStreams

	log := c.l.openLog(c.job.ID)
	go func() {
		// close the log once all logStreams have finished
		var wg sync.WaitGroup
		wg.Add(len(logStreams))
		for _, s := range logStreams {
			go func(s *logStream) {
				<-s.done
				wg.Done()
			}(s)
		}
		wg.Wait()
		log.Close()
	}()

	muxConfig := logmux.Config{
		AppID:   c.job.Metadata["flynn-controller.app"],
		HostID:  c.l.state.id,
		JobType: c.job.Metadata["flynn-controller.type"],
		JobID:   c.job.ID,
	}

	// TODO(benburkert): remove file logging once attach proto uses logaggregator
	streams := []io.Reader{stdoutR, stderrR}
	for i, stream := range streams {
		fd := i + 1
		bufr, bufw := io.Pipe()
		muxr, muxw := io.Pipe()
		go func(r io.Reader, pw1, pw2 *io.PipeWriter, fd int) {
			mw := io.MultiWriter(pw1, pw2)
			_, err = io.Copy(mw, r)
			pw1.CloseWithError(err)
			pw2.CloseWithError(err)
		}(stream, bufw, muxw, fd)

		go log.Follow(fd, bufr)
		go c.l.mux.Follow(muxr, fd, muxConfig)
	}

	go log.Follow(3, initLogR)
	return nil
}