func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { var msg, cursor *C.char var length C.size_t var stamp C.uint64_t var priority C.int // Walk the journal from here forward until we run out of new entries. drain: for { // Try not to send a given entry twice. if oldCursor != "" { ccursor := C.CString(oldCursor) defer C.free(unsafe.Pointer(ccursor)) for C.sd_journal_test_cursor(j, ccursor) > 0 { if C.sd_journal_next(j) <= 0 { break drain } } } // Read and send the logged message, if there is one to read. i := C.get_message(j, &msg, &length) if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { // Read the entry's timestamp. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } // Set up the time and text of the entry. timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) // Recover the stream name by mapping // from the journal priority back to // the stream that we would have // assigned that value. source := "" if C.get_priority(j, &priority) != 0 { source = "" } else if priority == C.int(journal.PriErr) { source = "stderr" } else if priority == C.int(journal.PriInfo) { source = "stdout" } // Send the log message. cid := s.vars["CONTAINER_ID_FULL"] logWatcher.Msg <- &logger.Message{ContainerID: cid, Line: line, Source: source, Timestamp: timestamp} } // If we're at the end of the journal, we're done (for now). if C.sd_journal_next(j) <= 0 { break } } retCursor := "" if C.sd_journal_get_cursor(j, &cursor) == 0 { retCursor = C.GoString(cursor) C.free(unsafe.Pointer(cursor)) } return retCursor }
// Next advances the read pointer into the journal by one entry. func (j *Journal) Next() (int, error) { j.mu.Lock() r := C.sd_journal_next(j.cjournal) j.mu.Unlock() if r < 0 { return int(r), fmt.Errorf("failed to iterate journal: %d", r) } return int(r), nil }
func (s *Service) ProcessStream(hostname *string) { s.Indexer.Start() defer s.Indexer.Stop() for { r := C.sd_journal_next(s.Journal) if r < 0 { panic(fmt.Sprintf("failed to iterate to next entry: %s", C.strerror(-r))) } if r == 0 { r = C.sd_journal_wait(s.Journal, 1000000) if r < 0 { panic(fmt.Sprintf("failed to wait for changes: %s", C.strerror(-r))) } continue } s.ProcessEntry(hostname) } }
func (s *Service) ProcessStream() { s.Indexer.Start() defer s.Indexer.Stop() for { r := C.sd_journal_next(s.Journal) if r < 0 { log.Fatalf("failed to iterate to next entry: %s\n", C.strerror(-r)) } if r == 0 { r = C.sd_journal_wait(s.Journal, 1000000) if r < 0 { log.Fatalf("failed to wait for changes: %s\n", C.strerror(-r)) } continue } s.ProcessEntry() } }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" defer close(logWatcher.Msg) // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") return } defer C.sd_journal_close(j) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Create a pipe that we can poll at the same time as the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) } } return }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") close(logWatcher.Msg) return } // If we end up following the log, we can set the journal context // pointer and the channel pointer to nil so that we won't close them // here, potentially while the goroutine that uses them is still // running. Otherwise, close them when we return from this function. following := false defer func(pfollowing *bool) { if !*pfollowing { C.sd_journal_close(j) close(logWatcher.Msg) } }(&following) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Allocate a descriptor for following the journal, if we'll // need one. Do it here so that we can report if it fails. if fd := C.sd_journal_get_fd(j); fd < C.int(0) { logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) } else { // Create a pipe that we can poll at the same time as // the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) // Let followJournal handle freeing the journal context // object and closing the channel. following = true } } } return }
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { var msg, data, cursor *C.char var length C.size_t var stamp C.uint64_t var priority C.int // Walk the journal from here forward until we run out of new entries. drain: for { // Try not to send a given entry twice. if oldCursor != "" { ccursor := C.CString(oldCursor) defer C.free(unsafe.Pointer(ccursor)) for C.sd_journal_test_cursor(j, ccursor) > 0 { if C.sd_journal_next(j) <= 0 { break drain } } } // Read and send the logged message, if there is one to read. i := C.get_message(j, &msg, &length) if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { // Read the entry's timestamp. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } // Set up the time and text of the entry. timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) // Recover the stream name by mapping // from the journal priority back to // the stream that we would have // assigned that value. source := "" if C.get_priority(j, &priority) != 0 { source = "" } else if priority == C.int(journal.PriErr) { source = "stderr" } else if priority == C.int(journal.PriInfo) { source = "stdout" } // Retrieve the values of any variables we're adding to the journal. attrs := make(map[string]string) C.sd_journal_restart_data(j) for C.get_attribute_field(j, &data, &length) > C.int(0) { kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) attrs[kv[0]] = kv[1] } if len(attrs) == 0 { attrs = nil } // Send the log message. logWatcher.Msg <- &logger.Message{ Line: line, Source: source, Timestamp: timestamp.In(time.UTC), Attrs: attrs, } } // If we're at the end of the journal, we're done (for now). if C.sd_journal_next(j) <= 0 { break } } retCursor := "" if C.sd_journal_get_cursor(j, &cursor) == 0 { retCursor = C.GoString(cursor) C.free(unsafe.Pointer(cursor)) } return retCursor }