// Close closes a journal opened with NewJournal. func (j *Journal) Close() error { j.mu.Lock() C.sd_journal_close(j.cjournal) j.mu.Unlock() return nil }
func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor string) { s.readers.mu.Lock() s.readers.readers[logWatcher] = logWatcher s.readers.mu.Unlock() go func() { // Keep copying journal data out until we're notified to stop // or we hit an error. status := C.wait_for_data_or_close(j, pfd[0]) for status == 1 { cursor = s.drainJournal(logWatcher, config, j, cursor) status = C.wait_for_data_or_close(j, pfd[0]) } if status < 0 { cerrstr := C.strerror(C.int(-status)) errstr := C.GoString(cerrstr) fmtstr := "error %q while attempting to follow journal for container %q" logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) } // Clean up. C.close(pfd[0]) s.readers.mu.Lock() delete(s.readers.readers, logWatcher) s.readers.mu.Unlock() C.sd_journal_close(j) close(logWatcher.Msg) }() // Wait until we're told to stop. select { case <-logWatcher.WatchClose(): // Notify the other goroutine that its work is done. C.close(pfd[1]) } }
func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor string) { s.readers.mu.Lock() s.readers.readers[logWatcher] = logWatcher s.readers.mu.Unlock() go func() { // Keep copying journal data out until we're notified to stop. for C.wait_for_data_or_close(j, pfd[0]) == 1 { cursor = s.drainJournal(logWatcher, config, j, cursor) } // Clean up. C.close(pfd[0]) s.readers.mu.Lock() delete(s.readers.readers, logWatcher) s.readers.mu.Unlock() C.sd_journal_close(j) close(logWatcher.Msg) }() // Wait until we're told to stop. select { case <-logWatcher.WatchClose(): // Notify the other goroutine that its work is done. C.close(pfd[1]) } }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" defer close(logWatcher.Msg) // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") return } defer C.sd_journal_close(j) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Create a pipe that we can poll at the same time as the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) } } return }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") close(logWatcher.Msg) return } // If we end up following the log, we can set the journal context // pointer and the channel pointer to nil so that we won't close them // here, potentially while the goroutine that uses them is still // running. Otherwise, close them when we return from this function. following := false defer func(pfollowing *bool) { if !*pfollowing { C.sd_journal_close(j) close(logWatcher.Msg) } }(&following) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Allocate a descriptor for following the journal, if we'll // need one. Do it here so that we can report if it fails. if fd := C.sd_journal_get_fd(j); fd < C.int(0) { logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) } else { // Create a pipe that we can poll at the same time as // the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) // Let followJournal handle freeing the journal context // object and closing the channel. following = true } } } return }