func (s *Service) ProcessEntry(hostname *string) { var realtime C.uint64_t r := C.sd_journal_get_realtime_usec(s.Journal, &realtime) if r < 0 { panic(fmt.Sprintf("failed to get realtime timestamp: %s", C.strerror(-r))) } var cursor *C.char r = C.sd_journal_get_cursor(s.Journal, &cursor) if r < 0 { panic(fmt.Sprintf("failed to get cursor: %s", C.strerror(-r))) } row := make(map[string]interface{}) timestamp := time.Unix(int64(realtime/1000000), int64(realtime%1000000)).UTC() row["ts"] = timestamp.Format("2006-01-02T15:04:05Z") row["host"] = hostname s.ProcessEntryFields(row) message, _ := json.Marshal(row) indexName := fmt.Sprintf("%v-%v", s.Config.IndexPrefix, timestamp.Format("2006-01-02")) cursorId := C.GoString(cursor) s.Indexer.Index( indexName, // index "journal", // type cursorId, // id "", // ttl nil, // date string(message), // content false) // immediate index refresh }
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { var msg, cursor *C.char var length C.size_t var stamp C.uint64_t var priority C.int // Walk the journal from here forward until we run out of new entries. drain: for { // Try not to send a given entry twice. if oldCursor != "" { ccursor := C.CString(oldCursor) defer C.free(unsafe.Pointer(ccursor)) for C.sd_journal_test_cursor(j, ccursor) > 0 { if C.sd_journal_next(j) <= 0 { break drain } } } // Read and send the logged message, if there is one to read. i := C.get_message(j, &msg, &length) if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { // Read the entry's timestamp. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } // Set up the time and text of the entry. timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) // Recover the stream name by mapping // from the journal priority back to // the stream that we would have // assigned that value. source := "" if C.get_priority(j, &priority) != 0 { source = "" } else if priority == C.int(journal.PriErr) { source = "stderr" } else if priority == C.int(journal.PriInfo) { source = "stdout" } // Send the log message. cid := s.vars["CONTAINER_ID_FULL"] logWatcher.Msg <- &logger.Message{ContainerID: cid, Line: line, Source: source, Timestamp: timestamp} } // If we're at the end of the journal, we're done (for now). if C.sd_journal_next(j) <= 0 { break } } retCursor := "" if C.sd_journal_get_cursor(j, &cursor) == 0 { retCursor = C.GoString(cursor) C.free(unsafe.Pointer(cursor)) } return retCursor }
// GetRealtimeUsec gets the realtime (wallclock) timestamp of the current // journal entry. func (j *Journal) GetRealtimeUsec() (uint64, error) { var usec C.uint64_t j.mu.Lock() r := C.sd_journal_get_realtime_usec(j.cjournal, &usec) j.mu.Unlock() if r < 0 { return 0, fmt.Errorf("error getting timestamp for entry: %d", r) } return uint64(usec), nil }
func (j *Journal) GetDataAll() (JournalEntry, error) { data := make(JournalEntry) var d unsafe.Pointer var l C.size_t var cboot_id C.sd_id128_t var csid = C.CString("123456789012345678901234567890123") defer C.free(unsafe.Pointer(csid)) var crealtime C.uint64_t var cmonotonic C.uint64_t var ccursor *C.char j.mu.Lock() // not in their own fields C.sd_journal_set_data_threshold(j.cjournal, 0) C.sd_journal_get_realtime_usec(j.cjournal, &crealtime) C.sd_journal_get_monotonic_usec(j.cjournal, &cmonotonic, &cboot_id) C.sd_id128_to_string(cboot_id, csid) C.sd_journal_get_cursor(j.cjournal, (**C.char)(&ccursor)) defer C.free(unsafe.Pointer(ccursor)) // reset to start the loop C.sd_journal_restart_data(j.cjournal) j.mu.Unlock() realtime := uint64(crealtime) monotonic := uint64(cmonotonic) cursor := C.GoString(ccursor) bootid := C.GoString(csid) data["__CURSOR"] = cursor data["__REALTIME_TIMESTAMP"] = realtime data["__MONOTONIC_TIMESTAMP"] = monotonic data["__BOOT_ID"] = bootid for { // retrieve new field j.mu.Lock() r := C.sd_journal_enumerate_data(j.cjournal, &d, &l) j.mu.Unlock() if r <= 0 { break } fieldData := C.GoBytes(d, C.int(l)) name, value := splitNameValue(fieldData) addToMap(data, name, value) } // Add catalog data as well if there is a MESSAGE_ID _, ok := data["MESSAGE_ID"] if ok { catalogEntry, err := j.GetCatalog() if err == nil { data["CATALOG_ENTRY"] = catalogEntry } } return data, nil }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" defer close(logWatcher.Msg) // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") return } defer C.sd_journal_close(j) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Create a pipe that we can poll at the same time as the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) } } return }
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") close(logWatcher.Msg) return } // If we end up following the log, we can set the journal context // pointer and the channel pointer to nil so that we won't close them // here, potentially while the goroutine that uses them is still // running. Otherwise, close them when we return from this function. following := false defer func(pfollowing *bool) { if !*pfollowing { C.sd_journal_close(j) close(logWatcher.Msg) } }(&following) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Allocate a descriptor for following the journal, if we'll // need one. Do it here so that we can report if it fails. if fd := C.sd_journal_get_fd(j); fd < C.int(0) { logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) } else { // Create a pipe that we can poll at the same time as // the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) // Let followJournal handle freeing the journal context // object and closing the channel. following = true } } } return }
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { var msg, data, cursor *C.char var length C.size_t var stamp C.uint64_t var priority C.int // Walk the journal from here forward until we run out of new entries. drain: for { // Try not to send a given entry twice. if oldCursor != "" { ccursor := C.CString(oldCursor) defer C.free(unsafe.Pointer(ccursor)) for C.sd_journal_test_cursor(j, ccursor) > 0 { if C.sd_journal_next(j) <= 0 { break drain } } } // Read and send the logged message, if there is one to read. i := C.get_message(j, &msg, &length) if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { // Read the entry's timestamp. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } // Set up the time and text of the entry. timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) // Recover the stream name by mapping // from the journal priority back to // the stream that we would have // assigned that value. source := "" if C.get_priority(j, &priority) != 0 { source = "" } else if priority == C.int(journal.PriErr) { source = "stderr" } else if priority == C.int(journal.PriInfo) { source = "stdout" } // Retrieve the values of any variables we're adding to the journal. attrs := make(map[string]string) C.sd_journal_restart_data(j) for C.get_attribute_field(j, &data, &length) > C.int(0) { kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) attrs[kv[0]] = kv[1] } if len(attrs) == 0 { attrs = nil } // Send the log message. logWatcher.Msg <- &logger.Message{ Line: line, Source: source, Timestamp: timestamp.In(time.UTC), Attrs: attrs, } } // If we're at the end of the journal, we're done (for now). if C.sd_journal_next(j) <= 0 { break } } retCursor := "" if C.sd_journal_get_cursor(j, &cursor) == 0 { retCursor = C.GoString(cursor) C.free(unsafe.Pointer(cursor)) } return retCursor }