func LoadConfig() { var err error c, err = server.NewConfig("systail", Config{}) if err != nil { common.Fatal("Unable to load systail config; %v", err) } }
func (instance *Instance) publishLineAs(pub *zmqpubsub.Publisher, source string, logname string, line *tail.Line) { if line == nil { panic("line is nil") } msg := &message.Message{ LogFilename: logname, Source: source, InstanceIndex: instance.Index, AppGUID: instance.AppGUID, AppName: instance.AppName, AppSpace: instance.AppSpace, MessageCommon: common.NewMessageCommon(line.Text, line.Time, util.LocalNodeId()), } if line.Err != nil { // Mark this as a special error record, as it is // coming from tail, not the app. msg.Source = fmt.Sprintf("%v[apptail]", util.GetBrandName()) msg.LogFilename = "" log.Warnf("[%s] %s", instance.AppName, line.Text) } err := msg.Publish(pub, false) if err != nil { common.Fatal("Unable to publish: %v", err) } }
func loadConfig() { var err error c, err = server.NewConfig("cloud_controller_ng", Config{}) if err != nil { common.Fatal("Unable to load cc_ng config; %v", err) } }
func (l *dockerListener) BlockUntilContainerStops(id string) { var total int ch := make(chan bool) id = id[:ID_LENGTH] if len(id) != ID_LENGTH { common.Fatal("Invalid docker ID length: %v", len(id)) } // Add a wait channel func() { l.mux.Lock() if _, ok := l.waiters[id]; ok { log.Warn("already added") } else { l.waiters[id] = ch } total = len(l.waiters) l.mux.Unlock() runtime.Gosched() }() // Wait log.Infof("Waiting for container %v to exit (total waiters: %d)", id, total) <-ch }
func main() { go common.RegisterTailCleanup() major, minor, patch := gozmq.Version() log.Infof("Starting systail (zeromq %d.%d.%d)", major, minor, patch) systail.LoadConfig() nodeid, err := server.LocalIP() if err != nil { common.Fatal("Failed to determine IP addr: %v", err) } log.Info("Host IP: ", nodeid) tailers := []*tail.Tail{} logFiles := systail.GetConfig().LogFiles fmt.Printf("%+v\n", logFiles) if len(logFiles) == 0 { common.Fatal("No log files exist in configuration.") } for name, logfile := range logFiles { t, err := tailLogFile(name, logfile, nodeid) if err != nil { common.Fatal("%v", err) } tailers = append(tailers, t) } server.MarkRunning("systail") for _, tail := range tailers { err := tail.Wait() if err != nil { log.Errorf("Cannot tail [%s]: %s", tail.Filename, err) } } // we don't expect any of the tailers to exit with or without // error. log.Error("No file left to tail; exiting.") os.Exit(1) }
// Make relevant cloud events available in application logs. Heroku style. func MonitorCloudEvents() { sub := logyard.Broker.Subscribe("event.timeline") defer sub.Stop() pub := logyard.Broker.NewPublisherMust() defer pub.Stop() log.Info("Listening for app relevant cloud events...") for msg := range sub.Ch { var event sieve.Event err := json.Unmarshal([]byte(msg.Value), &event) if err != nil { common.Fatal("%v", err) // not expected at all } // Re-parse the event json record into a TimelineEvent structure. var t TimelineEvent if data, err := json.Marshal(event.Info); err != nil { common.Fatal("%v", err) } else { err = json.Unmarshal(data, &t) if err != nil { common.Fatal("Invalid timeline event: %v", err) } } var source string brandname := util.GetBrandName() if t.InstanceIndex > -1 { source = fmt.Sprintf("%v[%v.%v]", brandname, event.Process, t.InstanceIndex) } else { source = fmt.Sprintf("%v[%v]", brandname, event.Process) } PublishAppLog(pub, t, source, &event) } log.Warn("Finished listening for app relevant cloud events.") err := sub.Wait() if err != nil { common.Fatal("%v", err) } }
// LocalNodeId returns the node ID of the local node. func LocalNodeId() string { once.Do(func() { var err error nodeid, err = server.LocalIP() if err != nil { common.Fatal("Failed to determine IP addr: %v", err) } log.Info("Local Node ID: ", nodeid) }) return nodeid }
// getUID returns the UID of the aggregator running on this node. the UID is // also shared between the local dea/stager, so that we send/receive messages // only from the local dea/stagers. func getUID() string { var UID string uidFile := "/tmp/logyard.uid" if _, err := os.Stat(uidFile); os.IsNotExist(err) { uid, err := uuid.NewV4() if err != nil { common.Fatal("%v", err) } UID = uid.String() if err = ioutil.WriteFile(uidFile, []byte(UID), 0644); err != nil { common.Fatal("%v", err) } } else { data, err := ioutil.ReadFile(uidFile) if err != nil { common.Fatal("%v", err) } UID = string(data) } log.Infof("detected logyard UID: %s\n", UID) return UID }
func (l *dockerListener) Listen() { for evt := range docker_events.Stream() { id := evt.Id[:ID_LENGTH] if len(id) != ID_LENGTH { common.Fatal("Invalid docker ID length: %v (orig: %v)", len(id), len(evt.Id)) } // Notify container stop events by closing the appropriate ch. if !(evt.Status == "die" || evt.Status == "kill") { continue } l.mux.Lock() if ch, ok := l.waiters[id]; ok { close(ch) delete(l.waiters, id) } l.mux.Unlock() } }
func PublishAppLog( pub *zmqpubsub.Publisher, t TimelineEvent, source string, event *sieve.Event) { err := (&message.Message{ LogFilename: "", Source: source, InstanceIndex: t.InstanceIndex, AppGUID: t.App.GUID, AppName: t.App.Name, AppSpace: t.App.Space, MessageCommon: common.NewMessageCommon(event.Desc, time.Unix(event.UnixTime, 0), util.LocalNodeId()), }).Publish(pub, true) if err != nil { common.Fatal("%v", err) } }
func (pubch *PubChannel) loop(stopCh chan bool) { if pubch.pub != nil { panic("loop called twice?") } pubch.pub = logyard.Broker.NewPublisherMust() select { // XXX: this delay is unfortunately required, else the publish calls // (instance.notify) below for warnings will get ignored. case <-time.After(100 * time.Millisecond): case <-stopCh: return } for data := range pubch.Ch { b, err := json.Marshal(data) if err != nil { common.Fatal("%v", err) } pubch.pub.MustPublish(pubch.key, string(b)) } }