func ReadFile(filename string, stream chan<- string) { seekinfo := tail.SeekInfo{Whence: os.SEEK_END} t, _ := tail.TailFile(filename, tail.Config{Follow: true, Location: &seekinfo}) for line := range t.Lines { stream <- line.Text + "\n" } }
func runAgent(aSender sender, fileToTail string) { // prepare filter compileFilters() if len(fileToTail) > 0 { // keep watching for the file for { if t, err := tail.TailFile(fileToTail, tail.Config{Follow: true, MustExist: true}); err != nil { fmt.Printf("Error in receive: %v\n", err) } else { //filterAndSend(zmqSender, t.Lines) filterAndBulkSend(aSender, t.Lines) } // the reopen option is not working in the TailFile config. Let's reopen after 1s in case of file lost timer := time.NewTimer(time.Second * 1) <-timer.C } } else { //read from stdin fmt.Printf("Reading from STDIN") lines := make(chan *tail.Line) go func(lines chan *tail.Line) { bio := bufio.NewReader(os.Stdin) for { line, _, err := bio.ReadLine() lines <- &(tail.Line{string(line), time.Now(), err}) } }(lines) filterAndBulkSend(aSender, lines) } }
func NewFile(fpath string) (*File, error) { seekInfo := &tail.SeekInfo{Offset: 0, Whence: 2} file := &File{} var err error file.Tail, err = tail.TailFile(fpath, tail.Config{Follow: true, ReOpen: true, Location: seekInfo}) return file, err }
func tailLog(cs chan []byte, filePath string, w io.Writer, closechan chan bool) { t, _ := tail.TailFile(filePath, tail.Config{Follow: true}) for line := range t.Lines { fmt.Fprintf(w, line.Text) } closechan <- true }
func main() { var token = flag.String("token", "nil", "log token") var logfile = flag.String("logfile", "/tmp/foo.txt", "log file to follow") var seekInfoOnStart = &tail.SeekInfo{Offset: 0, Whence: os.SEEK_END} flag.Parse() fmt.Println("using token: ", *token) if _, err := os.Stat(*logfile); os.IsNotExist(err) { fmt.Printf("no such file or directory: %s\n", *logfile) return } le, err := le_go.Connect(*token) // replace with token if err != nil { panic(err) } defer le.Close() t, err := tail.TailFile(*logfile, tail.Config{Follow: true, ReOpen: true, Location: seekInfoOnStart, Logger: tail.DiscardingLogger}) if err == nil { for line := range t.Lines { le.Println(line.Text) } } }
func tailFile(ctx context.Context, file string, dest *os.File) { defer wg.Done() t, err := tail.TailFile(file, tail.Config{ Follow: true, ReOpen: true, //Poll: true, Logger: tail.DiscardingLogger, }) if err != nil { log.Fatalf("unable to tail %s: %s", "foo", err) } // main loop for { select { // if the channel is done, then exit the loop case <-ctx.Done(): t.Stop() tail.Cleanup() return // get the next log line and echo it out case line := <-t.Lines: fmt.Fprintln(dest, line.Text) } } }
// Tails a single file func tailOne(file string, excludePatterns []*regexp.Regexp, logger *syslog.Logger, wr *WorkerRegistry, severity syslog.Priority, facility syslog.Priority, poll bool) { defer wr.Remove(file) wr.Add(file) tailConfig := tail.Config{ReOpen: true, Follow: true, MustExist: true, Poll: poll, Location: &tail.SeekInfo{0, os.SEEK_END}} t, err := tail.TailFile(file, tailConfig) if err != nil { log.Errorf("%s", err) return } for line := range t.Lines { if !matchExps(line.Text, excludePatterns) { logger.Packets <- syslog.Packet{ Severity: severity, Facility: facility, Time: time.Now(), Hostname: logger.ClientHostname, Tag: path.Base(file), Message: line.Text, } log.Tracef("Forwarding: %s", line.Text) } else { log.Tracef("Not Forwarding: %s", line.Text) } } log.Errorf("Tail worker executed abnormally") }
func main() { flag.Parse() config := loadConfig() // Determine the parsing function for this log file parserFn, err := parsingFunctionForType(config.Parser) if err != nil { log.Fatal(err) } // Connect to BigQuery & create tables tabledataService := connectToBigquery(&config) // Start tailing the log file & parsing the entries seek := tail.SeekInfo{Offset: 0, Whence: 2} t, _ := tail.TailFile(config.LogFilename, tail.Config{ Location: &seek, Follow: true, Logger: tail.DiscardingLogger, }) for line := range t.Lines { parsed, err := parserFn(config.Host, strings.Replace(line.Text, "\\", "", -1)) if err == nil { go stream(&config, tabledataService, parsed) } } }
// MAIN func main() { parseCommandLine() t, err := tail.TailFile(accessLog, tail.Config{Follow: true, ReOpen: true, MustExist: false}) if err != nil { panic(err) } hits := make(chan *loghit.LogHit) defer close(hits) errors := make(chan error) defer close(errors) for i := 0; i < *parserRoutines; i++ { go parseLines(t.Lines, hits, errors) } for i := 0; i < *posterRoutines; i++ { go postStats(*statPrefix, ezKey, hits) } logWriter, err := syslog.New(syslog.LOG_ERR, "nginx2stathat") if err != nil { panic(err) } for err := range errors { logWriter.Err(err.Error()) } }
func tailFile(path string) { mkfifo(path) t, _ := tail.TailFile(path, tail.Config{Follow: true}) for line := range t.Lines { log.Info(line.Text) } }
func main() { hsLogFile := flag.String("log", "no-log-file-specified", "The file path to the Hearthstone log file.") hsUsername := flag.String("username", "no-username-specified", "Your battlenet ID (without the #1234).") flag.Parse() createManaUpdateParser(*hsUsername) log, _ := tail.TailFile(*hsLogFile, tail.Config{Follow: true}) gs := GameState{} gs.resetGameState() solutionChan := make(chan *DecisionTreeNode) seenUsername := false var deepestSolution, shortestSolution *DecisionTreeNode var abortChan *chan time.Time for { select { case line := <-log.Lines: if !seenUsername && strings.Contains(line.Text, *hsUsername) { seenUsername = true } if turnStart, somethingHappened := ParseHearthstoneLogLine(line.Text, &gs); turnStart || somethingHappened { if !seenUsername { fmt.Println("WARN: Waiting to see --username before looking for solutions.") continue } //fmt.Println("It is the start of turn for:", gs.LastManaAdjustPlayer) if abortChan != nil { *abortChan <- time.Now() abortChan = nil deepestSolution = nil shortestSolution = nil } newAbortChan := make(chan time.Time, 1) abortChan = &newAbortChan go WalkDecisionTree(gs.DeepCopy(), solutionChan, newAbortChan) } case solution := <-solutionChan: if deepestSolution == nil { deepestSolution = solution shortestSolution = solution fmt.Println("INFO: Solution found") prettyPrintDecisionTreeNode(solution) } if len(deepestSolution.Moves) < len(solution.Moves) { deepestSolution = solution fmt.Println("INFO: Another solution with more BM:") prettyPrintDecisionTreeNode(solution) } if len(shortestSolution.Moves) > len(solution.Moves) { shortestSolution = solution fmt.Println("INFO: Another solution with fewer steps:") prettyPrintDecisionTreeNode(solution) } } } }
func fileToTail(fileName string) *tail.Tail { tail, err := tail.TailFile(fileName, tail.Config{ Follow: true, ReOpen: true, }) if err != nil { log.Fatal(err) } return tail }
func liveStatus(c web.C, w http.ResponseWriter, r *http.Request) { log.Printf("Running Task Id => %s\n", c.URLParams["id"]) id, err := strconv.ParseInt(c.URLParams["id"], 10, 32) if err != nil { http.Error(w, "Invalid id", http.StatusInternalServerError) return } runningTask := gSubakoCtx.RunningTasks.Get(int(id)) if runningTask == nil { http.Error(w, "task is nil", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") // important w.WriteHeader(http.StatusOK) flusher, ok := w.(http.Flusher) if !ok { // ERROR http.Error(w, "Failed to cast to http.Flusher", http.StatusInternalServerError) } t, err := tail.TailFile(runningTask.LogFilePath, tail.Config{Follow: true}) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } // finish when timeout go func() { time.Sleep(time.Duration(60) * time.Second) // timeout: 60sec t.Stop() }() // finish when task finished go func() { for { if !runningTask.IsActive() { t.Stop() break } time.Sleep(time.Duration(1) * time.Second) } }() // show logs for line := range t.Lines { fmt.Fprintln(w, line.Text) flusher.Flush() // Trigger "chunked" encoding and send a chunk... } fmt.Fprintf(w, "Current Status => %s\n", runningTask.Status) log.Printf("Task %d has been finished!!!!", runningTask.Id) }
// readLogsFromFile reads log lines from file and send them to `queue` // notify `shutdown` when file is completely read func readLogsFromFile(fname string, queue chan<- Logline, shutdown chan<- string, savestate <-chan bool) { var statefile string var offset int64 var inode uint64 var doFollowFile bool = !*options.nofollow if *options.verbose { log.Printf("readLogsFromFile: dofollow=%v", doFollowFile) } if doFollowFile { statefile = fname + ".state" inode = readFileInode(fname) offset = readStateFile(fname, statefile, inode) } // setup config := tail.Config{ Follow: doFollowFile, ReOpen: doFollowFile, MustExist: true, Logger: tail.DiscardingLogger, Location: &tail.SeekInfo{ Offset: offset, Whence: 0, }, } t, err := tail.TailFile(fname, config) if err != nil { shutdown <- fmt.Sprintf("cannot tail file %s: %v", fname, err) } else if *options.verbose { log.Printf("opened log file %s", fname) } // now just sleep and wait for input and control channel for { select { case line := <-t.Lines: if line != nil { queue <- Logline(line.Text) } else { shutdown <- "Logfile closed" return } case <-savestate: offset, _ := t.Tell() if doFollowFile { writeStateFile(statefile, inode, offset) } if *options.verbose { log.Printf("reading %s, now at offset %d", fname, offset) } } } }
func TailServiceLog(service *services.Service, wg *sync.WaitGroup) { defer wg.Done() spacingLength := services.MaxServiceNameLength + 2 - len(service.Name) t, err := tail.TailFile(service.LogFilePath, tail.Config{Follow: true}) if err != nil { log.Error(err.Error()) } for line := range t.Lines { logReceiver <- fmt.Sprintf("@{%s}%s@{|}%s| %s\n", service.Color, service.Name, strings.Repeat(" ", spacingLength), line.Text) } }
func main() { t, err := tail.TailFile("/tmp/test/log.log", tail.Config{Follow: true}) if err != nil { fmt.Println(err) os.Exit(1) } for line := range t.Lines { processLine(line) } }
// 创建File对象 func NewFile(fpath string) (*File, error) { file := &File{} var err error // 判断读取方式 if Conf.ReadWholeLog && Conf.ReadOnce { // 读整个文件并且读一次 Conf.Logger.Printf("read whole file once %+v", fpath) file.Tail, err = tail.TailFile(fpath, tail.Config{}) } else if Conf.ReadWholeLog { // 读整个文件和增量内容 Conf.Logger.Printf("read whole file and continue %+v", fpath) file.Tail, err = tail.TailFile(fpath, tail.Config{Follow: true, ReOpen: true}) } else { // 从文件末尾开始读取 // offset 文件指针的位置 // whence 相对位置标识: 0代表相对文件开始的位置,1代表相对当前位置,2代表相对文件结尾的位置 seekInfo := &tail.SeekInfo{Offset: 0, Whence: 2} file.Tail, err = tail.TailFile(fpath, tail.Config{Follow: true, ReOpen: true, Location: seekInfo}) } return file, err }
func (t *Tailer) AddFile(filename string) error { if _, ok := t.files[filename]; ok { return nil } config := tail.Config{Location: &tail.SeekInfo{Whence: os.SEEK_END}, Follow: true} tf, err := tail.TailFile(filename, config) if err != nil { return err } t.files[filename] = tf return nil }
func tailFile(logfile string, readall bool) (*tail.Tail, error) { whence := os.SEEK_END if readall { whence = os.SEEK_SET } return tail.TailFile(logfile, tail.Config{ MustExist: true, // Fail early if the file does not exist ReOpen: true, // Reopen recreated files (tail -F) Follow: true, // Continue looking for new lines (tail -f) Logger: tail.DiscardingLogger, // Disable logging Location: &tail.SeekInfo{0, whence}, // Start at the beginning or end of the file? }) }
func Filewatcher(comp Component, conf Configuration, outgoing *fifo.Queue) { // tail a given file, add appended lines into a Message queue t, _ := tail.TailFile(comp.File, tail.Config{Follow: true}) for line := range t.Lines { outgoing.Add(&Message{ Project: conf.Project, Env: conf.Env, Component: comp.Name, Text: line.Text, Timestamp: time.Now().Unix(), }) } }
func WatchFile(filepath string, shippingChan chan<- string) { tailed, err := tail.TailFile(filepath, tail.Config{ Location: &tail.SeekInfo{Whence: os.SEEK_END}, Follow: true, ReOpen: true}) checkErr(err) for line := range tailed.Lines { if len(line.Text) > 0 { shippingChan <- line.Text } } }
func main() { log.Println("Started!") readconf("./conf/parse.conf") fmt.Println("config:", config) con, err := init_fluxdb(config.Backend_influxdb) if err != nil { fmt.Println("init_fluxdb error") } config.con = con t, _ := tail.TailFile(config.Filepath, tail.Config{Follow: true}) for line := range t.Lines { processlog(line.Text) } }
func readFileWithLines(filename string) (string, error) { // make a buffer to keep chunks that are read t, err := tail.TailFile(filename, tail.Config{Follow: false, Poll: true}) if err != nil { return "", err } data := "" for line := range t.Lines { data += line.Text + "\\n" } return data, nil }
func (instance *Instance) tailFile(name, filename string, stopCh chan bool, tracker storage.Tracker) { var err error var location *tail.SeekInfo var limit int64 var shouldInitialize bool pub := logyard.Broker.NewPublisherMust() defer pub.Stop() if tracker.IsChildNodeInitialized(instance.getShortDockerId(), filename) { offset := tracker.GetFileCachedOffset(instance.getShortDockerId(), filename) location = &tail.SeekInfo{offset, os.SEEK_SET} } else { limit, err = instance.getReadLimit(pub, name, filename) location = &tail.SeekInfo{-limit, os.SEEK_END} shouldInitialize = true } if err != nil { log.Warn(err) instance.SendTimelineEvent("WARN -- %v", err) return } rateLimiter := GetConfig().GetLeakyBucket() t, err := tail.TailFile(filename, tail.Config{ MaxLineSize: GetConfig().MaxRecordSize, MustExist: true, Follow: true, Location: location, ReOpen: false, Poll: false, RateLimiter: rateLimiter}) // IMPORTANT: this registration happens everytime app restarts if shouldInitialize { tracker.InitializeChildNode(instance.getShortDockerId(), filename, INITIAL_OFFSET) } if err != nil { log.Warnf("Cannot tail file (%s); %s", filename, err) instance.SendTimelineEvent("ERROR -- Cannot tail file (%s); %s", name, err) return } instance.readFromTail(t, pub, name, stopCh, filename, tracker) }
func readFileBytes(filename string) ([]byte, error) { // make a buffer to keep chunks that are read var data []byte t, err := tail.TailFile(filename, tail.Config{Follow: false, Poll: true}) if err != nil { log.Println("reading " + filename) log.Fatal(err.Error()) return nil, err } for line := range t.Lines { data = append(data, line.Text...) } return data, nil }
func tailFile(filename string, config tail.Config, done chan bool) { defer func() { done <- true }() t, err := tail.TailFile(filename, config) if err != nil { fmt.Println(err) return } for line := range t.Lines { fmt.Println(line.Text) } err = t.Wait() if err != nil { fmt.Println(err) } }
func monitorTunnels(url, ngrokLogPath string) { update, _ := tail.TailFile(ngrokLogPath, tail.Config{ Follow: true, ReOpen: true}) for line := range update.Lines { if strings.Contains(line.Text, "[INFO] [client] Tunnel established at") { terms := strings.Split(line.Text, " ") tunnel := terms[len(terms)-1] Logger.Printf("Found new tunnel: %s", tunnel) if tunnel != "" { patchTunnel(url, tunnel) } } } }
func (self *ShellExecutor) tailf(fileName string, taskId string) *tail.Tail { t, err := tail.TailFile(fileName, tail.Config{Follow: true, ReOpen: true}) if err != nil { log.Fatal(err) return nil } go func() { for line := range t.Lines { fmt.Println(line.Text) self.sendStatusUpdate(taskId, mesos.TaskState_TASK_RUNNING, line.Text) } }() return t }
func newTailer(filename string, colorCode int, maxWidth int) (*Tailer, error) { t, err := tail.TailFile(filename, tail.Config{ Follow: true, Location: seekInfoOnStart, Logger: tail.DiscardingLogger, }) if err != nil { return nil, err } return &Tailer{ Tail: t, colorCode: colorCode, maxWidth: maxWidth, }, nil }
func tailLogFile( name string, filepath string, nodeid string) (*tail.Tail, error) { if filepath == "" { filepath = fmt.Sprintf("/s/logs/%s.log", name) } log.Info("Tailing... ", filepath) t, err := tail.TailFile(filepath, tail.Config{ MaxLineSize: systail.GetConfig().MaxRecordSize, MustExist: false, Follow: true, // ignore existing content, to support subsequent re-runs of systail Location: &tail.SeekInfo{0, os.SEEK_END}, ReOpen: true, Poll: false}) if err != nil { return nil, err } go func(name string, tail *tail.Tail) { pub := logyard.Broker.NewPublisherMust() defer pub.Stop() for line := range tail.Lines { // JSON must be a valid UTF-8 string if !utf8.ValidString(line.Text) { line.Text = string([]rune(line.Text)) } data, err := json.Marshal(systail.Message{ name, common.NewMessageCommon(line.Text, line.Time, nodeid), }) if err != nil { tail.Killf("Failed to encode to JSON: %v", err) break } pub.MustPublish("systail."+name+"."+nodeid, string(data)) } }(name, t) return t, nil }