// SetDataThresold sets the data field size threshold for data returned by // GetData. To retrieve the complete data fields this threshold should be // turned off by setting it to 0, so that the library always returns the // complete data objects. func (j *Journal) SetDataThreshold(threshold uint64) error { j.mu.Lock() r := C.sd_journal_set_data_threshold(j.cjournal, C.size_t(threshold)) j.mu.Unlock() if r < 0 { return fmt.Errorf("failed to set data threshold: %d", r) } return nil }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" defer close(logWatcher.Msg) // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") return } defer C.sd_journal_close(j) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Create a pipe that we can poll at the same time as the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) } } return }
func (j *Journal) GetDataAll() (JournalEntry, error) { data := make(JournalEntry) var d unsafe.Pointer var l C.size_t var cboot_id C.sd_id128_t var csid = C.CString("123456789012345678901234567890123") defer C.free(unsafe.Pointer(csid)) var crealtime C.uint64_t var cmonotonic C.uint64_t var ccursor *C.char j.mu.Lock() // not in their own fields C.sd_journal_set_data_threshold(j.cjournal, 0) C.sd_journal_get_realtime_usec(j.cjournal, &crealtime) C.sd_journal_get_monotonic_usec(j.cjournal, &cmonotonic, &cboot_id) C.sd_id128_to_string(cboot_id, csid) C.sd_journal_get_cursor(j.cjournal, (**C.char)(&ccursor)) defer C.free(unsafe.Pointer(ccursor)) // reset to start the loop C.sd_journal_restart_data(j.cjournal) j.mu.Unlock() realtime := uint64(crealtime) monotonic := uint64(cmonotonic) cursor := C.GoString(ccursor) bootid := C.GoString(csid) data["__CURSOR"] = cursor data["__REALTIME_TIMESTAMP"] = realtime data["__MONOTONIC_TIMESTAMP"] = monotonic data["__BOOT_ID"] = bootid for { // retrieve new field j.mu.Lock() r := C.sd_journal_enumerate_data(j.cjournal, &d, &l) j.mu.Unlock() if r <= 0 { break } fieldData := C.GoBytes(d, C.int(l)) name, value := splitNameValue(fieldData) addToMap(data, name, value) } // Add catalog data as well if there is a MESSAGE_ID _, ok := data["MESSAGE_ID"] if ok { catalogEntry, err := j.GetCatalog() if err == nil { data["CATALOG_ENTRY"] = catalogEntry } } return data, nil }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") close(logWatcher.Msg) return } // If we end up following the log, we can set the journal context // pointer and the channel pointer to nil so that we won't close them // here, potentially while the goroutine that uses them is still // running. Otherwise, close them when we return from this function. following := false defer func(pfollowing *bool) { if !*pfollowing { C.sd_journal_close(j) close(logWatcher.Msg) } }(&following) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Allocate a descriptor for following the journal, if we'll // need one. Do it here so that we can report if it fails. if fd := C.sd_journal_get_fd(j); fd < C.int(0) { logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) } else { // Create a pipe that we can poll at the same time as // the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) // Let followJournal handle freeing the journal context // object and closing the channel. following = true } } } return }