func (instance *Instance) tailStream(stream string, filename string, stopCh chan bool, tracker storage.Tracker) { var err error pub := logyard.Broker.NewPublisherMust() defer pub.Stop() limit, err := instance.getReadLimit(pub, stream, filename) if err != nil { log.Warn(err) instance.SendTimelineEvent("WARN -- %v", err) return } rateLimiter := GetConfig().GetLeakyBucket() reqUrl, err := url.Parse(fmt.Sprintf("http://localhost:4243/containers/%s/logs", instance.DockerId)) if err != nil { log.Warn(err) return } q := reqUrl.Query() q.Set(stream, "true") q.Set("follow", "true") reqUrl.RawQuery = q.Encode() resp, err := http.Get(reqUrl.String()) if err != nil { log.Warn(err) instance.SendTimelineEvent("WARN -- %v", err) return } if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest { log.Warnf("HTTP error response %v from %v", resp.Status, reqUrl) return } t, err := tail.TailReader(util.WrapReadSeekClose(resp.Body), tail.Config{ MaxLineSize: GetConfig().MaxRecordSize, MustExist: false, Follow: true, Location: &tail.SeekInfo{-limit, os.SEEK_END}, ReOpen: false, Poll: false, RateLimiter: rateLimiter}) if err != nil { log.Warnf("Cannot tail docker stream (%s); %s", stream, err) instance.SendTimelineEvent("ERROR -- Cannot tail file (%s); %s", stream, err) return } instance.readFromTail(t, pub, stream, stopCh, filename, tracker) }
func (c *Cron) Stop() { log.Warn("Stopping cron") c.Cron.Stop() log.Info("Waiting") c.wg.Wait() log.Info("Exiting") }
func (instance *Instance) readFromTail(t *tail.Tail, pub *zmqpubsub.Publisher, name string, stopCh chan bool, filename string, tracker storage.Tracker) { var err error FORLOOP: for { select { case line, ok := <-t.Lines: if !ok { err = t.Wait() break FORLOOP } currentOffset, err := t.Tell() if err != nil { log.Error(err.Error()) } tracker.Update(instance.getShortDockerId(), filename, currentOffset) instance.publishLine(pub, name, line) case <-stopCh: err = t.Stop() break FORLOOP } } if err != nil { log.Warn(err) instance.SendTimelineEvent("WARN -- Error tailing file (%s); %s", name, err) } log.Infof("Completed tailing %v log for %v", name, instance.Identifier()) }
func (l *dockerListener) BlockUntilContainerStops(id string) { var total int ch := make(chan bool) id = id[:ID_LENGTH] if len(id) != ID_LENGTH { common.Fatal("Invalid docker ID length: %v", len(id)) } // Add a wait channel func() { l.mux.Lock() if _, ok := l.waiters[id]; ok { log.Warn("already added") } else { l.waiters[id] = ch } total = len(l.waiters) l.mux.Unlock() runtime.Gosched() }() // Wait log.Infof("Waiting for container %v to exit (total waiters: %d)", id, total) <-ch }
func (instance *Instance) tailFile(name, filename string, stopCh chan bool, tracker storage.Tracker) { var err error var location *tail.SeekInfo var limit int64 var shouldInitialize bool pub := logyard.Broker.NewPublisherMust() defer pub.Stop() if tracker.IsChildNodeInitialized(instance.getShortDockerId(), filename) { offset := tracker.GetFileCachedOffset(instance.getShortDockerId(), filename) location = &tail.SeekInfo{offset, os.SEEK_SET} } else { limit, err = instance.getReadLimit(pub, name, filename) location = &tail.SeekInfo{-limit, os.SEEK_END} shouldInitialize = true } if err != nil { log.Warn(err) instance.SendTimelineEvent("WARN -- %v", err) return } rateLimiter := GetConfig().GetLeakyBucket() t, err := tail.TailFile(filename, tail.Config{ MaxLineSize: GetConfig().MaxRecordSize, MustExist: true, Follow: true, Location: location, ReOpen: false, Poll: false, RateLimiter: rateLimiter}) // IMPORTANT: this registration happens everytime app restarts if shouldInitialize { tracker.InitializeChildNode(instance.getShortDockerId(), filename, INITIAL_OFFSET) } if err != nil { log.Warnf("Cannot tail file (%s); %s", filename, err) instance.SendTimelineEvent("ERROR -- Cannot tail file (%s); %s", name, err) return } instance.readFromTail(t, pub, name, stopCh, filename, tracker) }
// Make relevant cloud events available in application logs. Heroku style. func MonitorCloudEvents() { sub := logyard.Broker.Subscribe("event.timeline") defer sub.Stop() pub := logyard.Broker.NewPublisherMust() defer pub.Stop() log.Info("Listening for app relevant cloud events...") for msg := range sub.Ch { var event sieve.Event err := json.Unmarshal([]byte(msg.Value), &event) if err != nil { common.Fatal("%v", err) // not expected at all } // Re-parse the event json record into a TimelineEvent structure. var t TimelineEvent if data, err := json.Marshal(event.Info); err != nil { common.Fatal("%v", err) } else { err = json.Unmarshal(data, &t) if err != nil { common.Fatal("Invalid timeline event: %v", err) } } var source string brandname := util.GetBrandName() if t.InstanceIndex > -1 { source = fmt.Sprintf("%v[%v.%v]", brandname, event.Process, t.InstanceIndex) } else { source = fmt.Sprintf("%v[%v]", brandname, event.Process) } PublishAppLog(pub, t, source, &event) } log.Warn("Finished listening for app relevant cloud events.") err := sub.Wait() if err != nil { common.Fatal("%v", err) } }