func (c *Cron) Stop() { log.Warn("Stopping cron") c.Cron.Stop() log.Info("Waiting") c.wg.Wait() log.Info("Exiting") }
func (s *StartedInstance) delete(dockerId string, mux *sync.Mutex) { mux.Lock() delete((*s), dockerId) log.Info("available instances: ", (*s)) mux.Unlock() runtime.Gosched() }
func LoadConfig() { var err error c, err = server.NewConfig("logyard_sieve", Config{}) if err != nil { log.Fatalf("Unable to load logyard_sieve config; %v", err) } log.Info(getConfig().Events) }
// LocalNodeId returns the node ID of the local node. func LocalNodeId() string { once.Do(func() { var err error nodeid, err = server.LocalIP() if err != nil { common.Fatal("Failed to determine IP addr: %v", err) } log.Info("Local Node ID: ", nodeid) }) return nodeid }
func main() { log.Info("Starting docker_events") pub := logyard.Broker.NewPublisherMust() defer pub.Stop() server.MarkRunning("docker_events") for event := range docker_events.Stream() { SendToLogyard(pub, event) } }
func (s *StartedInstance) checkInstanceAndUpdate(n int, dockerId string, mux *sync.Mutex) bool { var exist bool mux.Lock() if _, key_exist := (*s)[dockerId]; !key_exist { (*s)[dockerId] = n log.Info("all available instances:", (*s)) exist = true } else { exist = false } mux.Unlock() runtime.Gosched() return exist }
func main() { go common.RegisterTailCleanup() major, minor, patch := gozmq.Version() log.Infof("Starting systail (zeromq %d.%d.%d)", major, minor, patch) systail.LoadConfig() nodeid, err := server.LocalIP() if err != nil { common.Fatal("Failed to determine IP addr: %v", err) } log.Info("Host IP: ", nodeid) tailers := []*tail.Tail{} logFiles := systail.GetConfig().LogFiles fmt.Printf("%+v\n", logFiles) if len(logFiles) == 0 { common.Fatal("No log files exist in configuration.") } for name, logfile := range logFiles { t, err := tailLogFile(name, logfile, nodeid) if err != nil { common.Fatal("%v", err) } tailers = append(tailers, t) } server.MarkRunning("systail") for _, tail := range tailers { err := tail.Wait() if err != nil { log.Errorf("Cannot tail [%s]: %s", tail.Filename, err) } } // we don't expect any of the tailers to exit with or without // error. log.Error("No file left to tail; exiting.") os.Exit(1) }
func NewCron(schedule string, command string, args []string) *Cron { log.Infof("Running per schedule: %v", schedule) c := &Cron{cron.New(), &sync.WaitGroup{}} c.AddFunc(schedule, func() { c.wg.Add(1) log.Infof("Executing: %v %v", command, strings.Join(args, " ")) err := execute(command, args) if err != nil { log.Warnf("Failed: %v", err) } else { log.Info("Succeeded") } c.wg.Done() }) return c }
func tailLogFile( name string, filepath string, nodeid string) (*tail.Tail, error) { if filepath == "" { filepath = fmt.Sprintf("/s/logs/%s.log", name) } log.Info("Tailing... ", filepath) t, err := tail.TailFile(filepath, tail.Config{ MaxLineSize: systail.GetConfig().MaxRecordSize, MustExist: false, Follow: true, // ignore existing content, to support subsequent re-runs of systail Location: &tail.SeekInfo{0, os.SEEK_END}, ReOpen: true, Poll: false}) if err != nil { return nil, err } go func(name string, tail *tail.Tail) { pub := logyard.Broker.NewPublisherMust() defer pub.Stop() for line := range tail.Lines { // JSON must be a valid UTF-8 string if !utf8.ValidString(line.Text) { line.Text = string([]rune(line.Text)) } data, err := json.Marshal(systail.Message{ name, common.NewMessageCommon(line.Text, line.Time, nodeid), }) if err != nil { tail.Killf("Failed to encode to JSON: %v", err) break } pub.MustPublish("systail."+name+"."+nodeid, string(data)) } }(name, t) return t, nil }
// Make relevant cloud events available in application logs. Heroku style. func MonitorCloudEvents() { sub := logyard.Broker.Subscribe("event.timeline") defer sub.Stop() pub := logyard.Broker.NewPublisherMust() defer pub.Stop() log.Info("Listening for app relevant cloud events...") for msg := range sub.Ch { var event sieve.Event err := json.Unmarshal([]byte(msg.Value), &event) if err != nil { common.Fatal("%v", err) // not expected at all } // Re-parse the event json record into a TimelineEvent structure. var t TimelineEvent if data, err := json.Marshal(event.Info); err != nil { common.Fatal("%v", err) } else { err = json.Unmarshal(data, &t) if err != nil { common.Fatal("Invalid timeline event: %v", err) } } var source string brandname := util.GetBrandName() if t.InstanceIndex > -1 { source = fmt.Sprintf("%v[%v.%v]", brandname, event.Process, t.InstanceIndex) } else { source = fmt.Sprintf("%v[%v]", brandname, event.Process) } PublishAppLog(pub, t, source, &event) } log.Warn("Finished listening for app relevant cloud events.") err := sub.Wait() if err != nil { common.Fatal("%v", err) } }
func main() { major, minor, patch := gozmq.Version() log.Infof("Starting logyard_sieve (zeromq %d.%d.%d)", major, minor, patch) LoadConfig() parser := sieve.NewStackatoParser(getConfig().Events) parser.DeleteSamples() pub := logyard.Broker.NewPublisherMust() defer pub.Stop() sub := logyard.Broker.Subscribe("systail") defer sub.Stop() server.MarkRunning("logyard_sieve") log.Info("Watching the systail stream on this node") for message := range sub.Ch { var record systail.Message err := json.Unmarshal([]byte(message.Value), &record) if err != nil { log.Warnf("failed to parse json: %s; ignoring record: %s", err, message.Value) continue } event, err := parser.Parse(record.Name, record.Text) if err != nil { log.Warnf( "failed to parse event from %s: %s -- source: %s", record.Name, err, record.Text) continue } if event != nil { event.MessageCommon = common.NewMessageCommon( event.Desc, time.Unix(record.UnixTime, 0), record.NodeID) event.MustPublish(pub) } } }
func (h *webSocketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ws, err := upgrader.Upgrade(w, r, nil) if err != nil { var errString string if _, ok := err.(websocket.HandshakeError); !ok { errString = fmt.Sprintf("Handshake error: %v", err) } else { errString = fmt.Sprintf("Unknown websocket error: %v", err) } log.Info(errString) http.Error(w, errString, 500) return } log.Infof("wsutil.ServeWS start - %v", getWsConnId(r, ws)) defer log.Infof("wsutil.ServeWS finish - %v", getWsConnId(r, ws)) h.handler.ServeWS(w, r, &WebSocketStream{ws}) ws.Close() }
func cleanup() { log.Info("cleanup: closing open inotify watches") tail.Cleanup() }