Esempio n. 1
0
func main() {
	flag.Parse()
	go common.RegisterTailCleanup()

	apptail.LoadConfig()

	fstorage := storage.NewFileStorage(*stateFile_path)
	tracker := storage.NewTracker(fstorage, *debug)
	tracker.LoadTailers()

	interval := time.Duration(int64(apptail.GetConfig().PersistPositionIntervalSeconds))
	go tracker.StartSubmissionTimer(interval * time.Second)

	major, minor, patch := gozmq.Version()
	log.Infof("Starting apptail (zeromq %d.%d.%d)", major, minor, patch)

	log.Infof("Config: %+v\n", apptail.GetConfig())

	uid := getUID()

	natsclient := server.NewNatsClient(3)

	mux := &sync.Mutex{}

	n := 0
	started_instances := StartedInstance{}

	natsclient.Subscribe("logyard."+uid+".newinstance", func(instance *apptail.Instance) {

		n++
		if started_instances.checkInstanceAndUpdate(n, instance.DockerId, mux) {
			go func() {
				instance.Tail(tracker)
				started_instances.delete(instance.DockerId, mux)
			}()
		}

	})

	natsclient.Publish("logyard."+uid+".start", []byte("{}"))
	log.Infof("Waiting for app instances ...")

	go docker.DockerListener.Listen()

	// clean up the cache after restart
	docker.DockerListener.TrackerCleanUp(tracker)

	server.MarkRunning("apptail")

	apptail_event.MonitorCloudEvents()

}
Esempio n. 2
0
func main() {
	go common.RegisterTailCleanup()

	major, minor, patch := gozmq.Version()
	log.Infof("Starting systail (zeromq %d.%d.%d)", major, minor, patch)

	systail.LoadConfig()

	nodeid, err := server.LocalIP()
	if err != nil {
		common.Fatal("Failed to determine IP addr: %v", err)
	}
	log.Info("Host IP: ", nodeid)

	tailers := []*tail.Tail{}

	logFiles := systail.GetConfig().LogFiles

	fmt.Printf("%+v\n", logFiles)
	if len(logFiles) == 0 {
		common.Fatal("No log files exist in configuration.")
	}

	for name, logfile := range logFiles {
		t, err := tailLogFile(name, logfile, nodeid)
		if err != nil {
			common.Fatal("%v", err)
		}
		tailers = append(tailers, t)
	}

	server.MarkRunning("systail")

	for _, tail := range tailers {
		err := tail.Wait()
		if err != nil {
			log.Errorf("Cannot tail [%s]: %s", tail.Filename, err)
		}
	}

	// we don't expect any of the tailers to exit with or without
	// error.
	log.Error("No file left to tail; exiting.")
	os.Exit(1)
}
Esempio n. 3
0
func main() {
	major, minor, patch := gozmq.Version()
	log.Infof("Starting logyard_sieve (zeromq %d.%d.%d)", major, minor, patch)

	LoadConfig()

	parser := sieve.NewStackatoParser(getConfig().Events)
	parser.DeleteSamples()

	pub := logyard.Broker.NewPublisherMust()
	defer pub.Stop()
	sub := logyard.Broker.Subscribe("systail")
	defer sub.Stop()

	server.MarkRunning("logyard_sieve")

	log.Info("Watching the systail stream on this node")
	for message := range sub.Ch {
		var record systail.Message
		err := json.Unmarshal([]byte(message.Value), &record)
		if err != nil {
			log.Warnf("failed to parse json: %s; ignoring record: %s",
				err, message.Value)
			continue
		}

		event, err := parser.Parse(record.Name, record.Text)
		if err != nil {
			log.Warnf(
				"failed to parse event from %s: %s -- source: %s",
				record.Name, err, record.Text)
			continue
		}
		if event != nil {
			event.MessageCommon = common.NewMessageCommon(
				event.Desc, time.Unix(record.UnixTime, 0), record.NodeID)
			event.MustPublish(pub)
		}

	}
}
Esempio n. 4
0
func main() {
	major, minor, patch := gozmq.Version()
	log.Infof("Starting logyard (Go %s; ZeroMQ %d.%d.%d)",
		runtime.Version(), major, minor, patch)

	m := drain.NewDrainManager()
	log.Info("Starting drain manager")
	go m.Run()
	// SIGTERM handle for stopping running drains.
	go func() {
		sigchan := make(chan os.Signal)
		signal.Notify(sigchan, syscall.SIGTERM)
		<-sigchan
		log.Info("Stopping all drains before exiting")
		m.Stop()
		log.Info("Exiting now.")
		os.Exit(0)
	}()

	server.MarkRunning("logyard")

	log.Info("Running pubsub broker")
	log.Fatal(logyard.Broker.Run())
}
Esempio n. 5
0
func (t *TransportZmq) Init() (err error) {
	// Initialise once for ZMQ
	if t.ready {
		// If already initialised, ask if we can send again
		t.bridge_chan <- []byte(zmq_signal_output)
		return nil
	}

	t.context, err = zmq.NewContext()
	if err != nil {
		return fmt.Errorf("Failed to create ZMQ context: %s", err)
	}
	defer func() {
		if err != nil {
			t.context.Close()
		}
	}()

	// Control sockets to connect bridge to poller
	bridge_in, err := t.context.NewSocket(zmq.PUSH)
	if err != nil {
		return fmt.Errorf("Failed to create internal ZMQ PUSH socket: %s", err)
	}
	defer func() {
		if err != nil {
			bridge_in.Close()
		}
	}()

	if err = bridge_in.Bind("inproc://notify"); err != nil {
		return fmt.Errorf("Failed to bind internal ZMQ PUSH socket: %s", err)
	}

	bridge_out, err := t.context.NewSocket(zmq.PULL)
	if err != nil {
		return fmt.Errorf("Failed to create internal ZMQ PULL socket: %s", err)
	}
	defer func() {
		if err != nil {
			bridge_out.Close()
		}
	}()

	if err = bridge_out.Connect("inproc://notify"); err != nil {
		return fmt.Errorf("Failed to connect internal ZMQ PULL socket: %s", err)
	}

	// Outbound dealer socket will fair-queue load balance amongst peers
	if t.dealer, err = t.context.NewSocket(zmq.DEALER); err != nil {
		return fmt.Errorf("Failed to create ZMQ DEALER socket: %s", err)
	}
	defer func() {
		if err != nil {
			t.dealer.Close()
		}
	}()

	if err = t.dealer.Monitor("inproc://monitor", zmq.EVENT_ALL); err != nil {
		return fmt.Errorf("Failed to bind DEALER socket to monitor: %s", err)
	}

	if err = t.configureSocket(); err != nil {
		return fmt.Errorf("Failed to configure DEALER socket: %s", err)
	}

	// Configure reconnect interval
	if err = t.dealer.SetReconnectIvlMax(t.net_config.Reconnect); err != nil {
		return fmt.Errorf("Failed to set ZMQ reconnect interval: %s", err)
	}

	// We should not LINGER. If we do, socket Close and also context Close will
	// block infinitely until the message queue is flushed. Set to 0 to discard
	// all messages immediately when we call Close
	if err = t.dealer.SetLinger(0); err != nil {
		return fmt.Errorf("Failed to set ZMQ linger period: %s", err)
	}

	// Set the outbound queue
	if err = t.dealer.SetSndHWM(int(t.config.PeerSendQueue)); err != nil {
		return fmt.Errorf("Failed to set ZMQ send highwater: %s", err)
	}

	// Monitor socket
	if t.monitor, err = t.context.NewSocket(zmq.PULL); err != nil {
		return fmt.Errorf("Failed to create monitor ZMQ PULL socket: %s", err)
	}
	defer func() {
		if err != nil {
			t.monitor.Close()
		}
	}()

	if err = t.monitor.Connect("inproc://monitor"); err != nil {
		return fmt.Errorf("Failed to connect monitor ZMQ PULL socket: %s", err)
	}

	// Register endpoints
	pool := NewAddressPool(t.net_config.Servers)
	endpoints := 0

	if t.net_config.Rfc2782Srv {
		pool.SetRfc2782(true, t.net_config.Rfc2782Service)
	}

	for {
		addressport, err := pool.NextServer()
		if err != nil {
			return err
		}

		if err = t.dealer.Connect("tcp://" + addressport); err != nil {
			log.Warning("Failed to register %s with ZMQ, skipping", addressport)
			goto NextAddress
		}

		log.Info("Registered %s with ZMQ", addressport)
		endpoints++

	NextAddress:
		if pool.IsLastServer() {
			break
		}
	}

	if endpoints == 0 {
		return errors.New("Failed to register any of the specified endpoints.")
	}

	major, minor, patch := zmq.Version()
	log.Info("libzmq version %d.%d.%d", major, minor, patch)

	// Signal channels
	t.bridge_chan = make(chan []byte, 1)
	t.send_chan = make(chan *ZMQMessage, 2)
	t.recv_chan = make(chan interface{}, 1)
	t.recv_bridge_chan = make(chan interface{}, 1)
	t.can_send = make(chan int, 1)

	// Waiter we use to wait for shutdown
	t.wait.Add(2)

	// Bridge between channels and ZMQ
	go t.bridge(bridge_in)

	// The poller
	go t.poller(bridge_out)

	t.ready = true
	t.send_buff = nil
	t.recv_buff = nil
	t.recv_body = false

	return nil
}
Esempio n. 6
0
func main() {
	major, minor, patch := zmq.Version()
	fmt.Printf("Current 0MQ version is %d.%d.%d\n", major, minor, patch)
}