func (s *Server) drainSyslogConn(conn net.Conn) { connDone := make(chan struct{}) defer close(connDone) go func() { select { case <-connDone: case <-s.shutdown: } conn.Close() }() sc := bufio.NewScanner(conn) sc.Split(rfc6587.Split) for sc.Scan() { msgBytes := sc.Bytes() // slice in msgBytes could get modified on next Scan(), need to copy it msgCopy := make([]byte, len(msgBytes)) copy(msgCopy, msgBytes) msg, cursor, err := utils.ParseMessage(msgCopy) if err != nil { log15.Error("rfc5424 parse error", "err", err) } else { s.Cursors.Update(string(msg.Hostname), cursor) s.Aggregator.Feed(msg) } if s.testMessageHook != nil { s.testMessageHook <- struct{}{} } } }
func (m *Mux) streamWithHistory(appID, jobID string, follow bool, ch chan<- *rfc5424.Message) (stream.Stream, error) { l := m.logger.New("fn", "streamWithHistory", "app.id", appID, "job.id", jobID) logs, err := m.logFiles(appID) if err != nil { return nil, err } if len(logs) == 0 { return m.followLog(appID, jobID, ch) } msgs := make(chan message) unsubscribeFn := make(chan func(), 1) s := stream.New() var jobDone <-chan struct{} if jobID != "" { jobDone = m.jobDoneCh(jobID, s.StopCh) } go func() { var cursor *utils.HostCursor var unsubscribe func() var done bool defer func() { close(ch) if unsubscribe != nil { unsubscribe() } }() for { select { case msg, ok := <-msgs: if !ok { return } if jobID != "" && !strings.HasSuffix(string(msg.Message.Header.ProcID), jobID) { // skip messages that aren't from the job we care about continue } if cursor != nil && !msg.HostCursor.After(*cursor) { // skip messages with old cursors continue } cursor = msg.HostCursor select { case ch <- msg.Message: case <-s.StopCh: return } case <-jobDone: if unsubscribe != nil { return } // we haven't finished reading the historical logs, exit when finished done = true jobDone = nil case fn, ok := <-unsubscribeFn: if !ok { if done { // historical logs done, and job already exited return } unsubscribeFn = nil continue } unsubscribe = fn case <-s.StopCh: return } } }() go func() { defer close(unsubscribeFn) for i, name := range logs[appID] { if err := func() (err error) { l := l.New("log", name) f, err := os.Open(name) if err != nil { l.Error("error reading log", "error", err) return err } defer f.Close() sc := bufio.NewScanner(f) sc.Split(rfc6587.SplitWithNewlines) var eof bool scan: for sc.Scan() { msgBytes := sc.Bytes() // slice in msgBytes could get modified on next Scan(), need to copy it msgCopy := make([]byte, len(msgBytes)-1) copy(msgCopy, msgBytes) msg, cursor, err := utils.ParseMessage(msgCopy) if err != nil { l.Error("error parsing log message", "error", err) return err } select { case msgs <- message{cursor, msg}: case <-s.StopCh: return nil } } if err := sc.Err(); err != nil { l.Error("error scanning log message", "error", err) return err } if follow && !eof && i == len(logs[appID])-1 { // got EOF on last file, subscribe to stream eof = true unsubscribeFn <- m.subscribe(appID, msgs) // read to end of file again goto scan } return nil }(); err != nil { close(msgs) s.Error = err return } } if !follow { close(msgs) } }() return s, nil }
func (m *Mux) addAggregator(addr string) { l := m.logger.New("fn", "addAggregator", "addr", addr) // TODO(titanous): add dial timeout conn, err := net.Dial("tcp", addr) if err != nil { l.Error("failed to connect to aggregator", "error", err) return } l.Info("connected to aggregator") host, _, _ := net.SplitHostPort(addr) c, _ := client.New("http://" + host) cursors, err := c.GetCursors() if err != nil { // TODO(titanous): retry l.Error("failed to get cursors from aggregator", "error", err) conn.Close() return } var aggCursor *utils.HostCursor if c, ok := cursors[m.hostID]; ok { aggCursor = &c } if aggCursor != nil { l.Info("got cursor", "cursor.timestamp", aggCursor.Time, "cursor.seq", aggCursor.Seq) } else { l.Info("no cursor for host") } appLogs, err := m.logFiles("") if err != nil { l.Error("failed to get local log files", "error", err) conn.Close() return } bufferedMessages := make(chan message) firehose := make(chan message) done := make(chan struct{}) // subscribe to all messages unsubscribe := m.subscribe(firehoseApp, firehose) bufferCursors := make(map[string]utils.HostCursor) var bufferCursorsMtx sync.Mutex go func() { l := m.logger.New("fn", "sendToAggregator", "addr", addr) defer unsubscribe() defer conn.Close() defer close(done) bm := bufferedMessages // make a copy so we can nil it later for { var m message var ok bool select { case m, ok = <-bm: if !ok { bm = nil continue } case m, ok = <-firehose: if !ok { return } // if app in list of app logs and cursor from reading files, skip appID := string(m.Message.AppName) if _, ok := appLogs[appID]; ok { bufferCursorsMtx.Lock() c, ok := bufferCursors[appID] bufferCursorsMtx.Unlock() if !ok || c.After(*m.HostCursor) { continue } } } if _, err := conn.Write(rfc6587.Bytes(m.Message)); err != nil { l.Error("failed to write message", "error", err) return } } }() for appID, logs := range appLogs { for i, name := range logs { func() { l := l.New("log", name) f, err := os.Open(name) if err != nil { l.Error("failed to open log file", "error", err) return } defer f.Close() sc := bufio.NewScanner(f) sc.Split(rfc6587.SplitWithNewlines) var cursor *utils.HostCursor cursorSaved := false scan: for sc.Scan() { msgBytes := sc.Bytes() // slice in msgBytes could get modified on next Scan(), need to copy it msgCopy := make([]byte, len(msgBytes)-1) copy(msgCopy, msgBytes) var msg *rfc5424.Message msg, cursor, err = utils.ParseMessage(msgCopy) if err != nil { l.Error("failed to parse message", "msg", string(msgCopy), "error", err) continue } if aggCursor != nil && !cursor.After(*aggCursor) { continue } select { case bufferedMessages <- message{cursor, msg}: case <-done: return } } if err := sc.Err(); err != nil { l.Error("failed to scan message", "error", err) return } if !cursorSaved && i == len(appLogs[appID])-1 { // last file, send cursor to processing goroutine bufferCursorsMtx.Lock() bufferCursors[appID] = *cursor bufferCursorsMtx.Unlock() cursorSaved = true // read to end of file again goto scan } }() } } close(bufferedMessages) }