// Logs returns the log entries parsed from the log files stored on // the server. Log entries are returned in reverse chronological order. The // following options are available: // * "starttime" query parameter filters the log entries to only ones that // occurred on or after the "starttime". Defaults to a day ago. // * "endtime" query parameter filters the log entries to only ones that // occurred before on on the "endtime". Defaults to the current time. // * "pattern" query parameter filters the log entries by the provided regexp // pattern if it exists. Defaults to nil. // * "max" query parameter is the hard limit of the number of returned log // entries. Defaults to defaultMaxLogEntries. // * "level" query parameter filters the log entries to be those of the // corresponding severity level or worse. Defaults to "info". func (s *statusServer) Logs(ctx context.Context, req *serverpb.LogsRequest) (*serverpb.JSONResponse, error) { log.Flush() var sev log.Severity if len(req.Level) == 0 { sev = log.InfoLog } else { var sevFound bool sev, sevFound = log.SeverityByName(req.Level) if !sevFound { return nil, fmt.Errorf("level could not be determined: %s", req.Level) } } startTimestamp, err := parseInt64WithDefault( req.StartTime, timeutil.Now().AddDate(0, 0, -1).UnixNano()) if err != nil { return nil, grpc.Errorf(codes.InvalidArgument, "StartTime could not be parsed: %s", err) } endTimestamp, err := parseInt64WithDefault(req.EndTime, timeutil.Now().UnixNano()) if err != nil { return nil, grpc.Errorf(codes.InvalidArgument, "EndTime could not be parsed: %s", err) } if startTimestamp > endTimestamp { return nil, grpc.Errorf(codes.InvalidArgument, "StartTime: %d should not be greater than endtime: %d", startTimestamp, endTimestamp) } maxEntries, err := parseInt64WithDefault(req.Max, defaultMaxLogEntries) if err != nil { return nil, grpc.Errorf(codes.InvalidArgument, "Max could not be parsed: %s", err) } if maxEntries < 1 { return nil, grpc.Errorf(codes.InvalidArgument, "Max: %d should be set to a value greater than 0", maxEntries) } var regex *regexp.Regexp if len(req.Pattern) > 0 { if regex, err = regexp.Compile(req.Pattern); err != nil { return nil, grpc.Errorf(codes.InvalidArgument, "regex pattern could not be compiled: %s", err) } } entries, err := log.FetchEntriesFromFiles(sev, startTimestamp, endTimestamp, int(maxEntries), regex) if err != nil { return nil, err } return marshalJSONResponse(entries) }
// handleLocalLog returns the log entries parsed from the log files stored on // the server. Log entries are returned in reverse chronological order. The // following options are available: // * "starttime" query parameter filters the log entries to only ones that // occurred on or after the "starttime". Defaults to a day ago. // * "endtime" query parameter filters the log entries to only ones that // occurred before on on the "endtime". Defaults to the current time. // * "pattern" query parameter filters the log entries by the provided regexp // pattern if it exists. Defaults to nil. // * "max" query parameter is the hard limit of the number of returned log // entries. Defaults to defaultMaxLogEntries. // * "level" which is an optional part of the URL filters the log entries to be // those of the corresponding severity level or worse. Defaults to "info". func (s *statusServer) handleLocalLog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Flush() level := ps.ByName("level") var sev log.Severity if len(level) == 0 { sev = log.InfoLog } else { var sevFound bool sev, sevFound = log.SeverityByName(level) if !sevFound { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "could not determine level %s", level) return } } startTimestamp, err := parseInt64WithDefault( r.URL.Query().Get("starttime"), time.Now().AddDate(0, 0, -1).UnixNano()) if err != nil { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "starttime could not be parsed:%s", err) return } endTimestamp, err := parseInt64WithDefault( r.URL.Query().Get("endtime"), time.Now().UnixNano()) if err != nil { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "endtime could not be parsed:%s", err) return } if startTimestamp > endTimestamp { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "startime:%d is greater than endtime:%d", startTimestamp, endTimestamp) return } maxEntries, err := parseInt64WithDefault( r.URL.Query().Get("max"), defaultMaxLogEntries) if err != nil { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "max could not be parsed:%s", err) return } if maxEntries < 1 { w.WriteHeader(http.StatusBadRequest) fmt.Fprint(w, "max must be set to a value greater than 0") return } pattern := r.URL.Query().Get("pattern") var regex *regexp.Regexp if len(pattern) > 0 { if regex, err = regexp.Compile(pattern); err != nil { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "could not compile regex pattern:%s", err) return } } entries, err := log.FetchEntriesFromFiles(sev, startTimestamp, endTimestamp, int(maxEntries), regex) if err != nil { log.Error(err) w.WriteHeader(http.StatusInternalServerError) return } b, contentType, err := util.MarshalResponse(r, entries, []util.EncodingType{util.JSONEncoding}) if err != nil { log.Error(err) w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set(util.ContentTypeHeader, contentType) w.Write(b) }