コード例 #1
0
ファイル: main.go プロジェクト: shageman/loggregator
func main() {
	flag.Parse()

	if *version {
		fmt.Printf("\n\nversion: %s\ngitSha: %s\n\n", versionNumber, gitSha)
		return
	}

	level := gosteno.LOG_INFO

	if *logLevel {
		level = gosteno.LOG_DEBUG
	}

	loggingConfig := &gosteno.Config{
		Sinks:     make([]gosteno.Sink, 1),
		Level:     level,
		Codec:     gosteno.NewJsonCodec(),
		EnableLOC: true}
	if strings.TrimSpace(*logFilePath) == "" {
		loggingConfig.Sinks[0] = gosteno.NewIOSink(os.Stdout)
	} else {
		loggingConfig.Sinks[0] = gosteno.NewFileSink(*logFilePath)
	}
	gosteno.Init(loggingConfig)
	logger := gosteno.NewLogger("deaagent")

	loggregatorClient := loggregatorclient.NewLoggregatorClient(*loggregatorAddress, logger, 4096)

	agent := deaagent.NewAgent(*instancesJsonFilePath, logger)
	agent.Start(loggregatorClient)
}
コード例 #2
0
ファイル: main.go プロジェクト: lyuyun/loggregator
func main() {
	flag.Parse()

	// ** Config Setup
	config, err := readConfig(*configFile)
	if err != nil {
		panic(err)
	}

	dropsonde.Initialize(config.MetronAddress, "dea_logging_agent")

	log := logger.NewLogger(*logLevel, *logFilePath, "deaagent", config.Syslog)
	log.Info("Startup: Setting up the loggregator dea logging agent")
	// ** END Config Setup

	agent := deaagent.NewAgent(*instancesJsonFilePath, log)

	go agent.Start()

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			os.Exit(0)
			return
		}
	}
}
コード例 #3
0
ファイル: agent_test.go プロジェクト: robsonmwoc/loggregator
func xTestThatFunctionContinuesToPollWhenFileCantBeOpened(t *testing.T) {

	task1StdoutSocketPath := filepath.Join("tmp", "jobs", "56", "stdout.sock")
	task1StderrSocketPath := filepath.Join("tmp", "jobs", "56", "stderr.sock")

	os.MkdirAll(filepath.Join("tmp", "jobs", "56"), 0777)

	os.Remove(filePath())
	os.Remove(task1StdoutSocketPath)
	os.Remove(task1StderrSocketPath)
	agent := deaagent.NewAgent(filePath(), loggertesthelper.StdOutLogger())
	mockLoggregatorEmitter := new(MockLoggregatorEmitter)

	mockLoggregatorEmitter.received = make(chan *logmessage.LogMessage, 2)

	go agent.Start(mockLoggregatorEmitter)

	select {
	case <-mockLoggregatorEmitter.received:
		t.Error("Should not have any messages, the file doesn't exist")
	case <-time.After(2 * time.Second):
		// OK
	}

	println("done with the empty file")

	task1StdoutListener, err := net.Listen("unix", task1StdoutSocketPath)
	assert.NoError(t, err)
	task1StderrListener, err := net.Listen("unix", task1StderrSocketPath)
	defer task1StderrListener.Close()
	assert.NoError(t, err)

	go func() {
		task1Connection, err := task1StdoutListener.Accept()
		println("got a socket connection")
		if err != nil {
			println(err.Error())
			assert.NoError(t, err)
			return
		}

		println("writing to the socket connection")
		task1Connection.Write([]byte("a log line\n"))
		println("wrote to the socket connection")
	}()

	println("created all the sockets")

	writeToFile(t, `{"instances": [{"state": "RUNNING", "application_id": "1234", "warden_job_id": 56, "warden_container_path": "/tmp", "instance_index": 3}]}`, false)

	println("updated instances.json")

	select {
	case logMessage := <-mockLoggregatorEmitter.received:
		assert.Equal(t, logMessage.GetMessage(), "a log line")
	case <-time.After(2 * time.Second):
		t.Error("Should have gotten a message by now.")
	}
}
コード例 #4
0
ファイル: main.go プロジェクト: Amit-PivotalLabs/loggregator
func main() {
	flag.Parse()

	// ** Config Setup
	config := &Config{}
	err := cfcomponent.ReadConfigInto(config, *configFile)
	if err != nil {
		panic(err)
	}

	dropsonde.Initialize(config.MetronAddress, "dea_logging_agent")

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			panic(err)
		}
		pprof.StartCPUProfile(f)
		defer func() {
			pprof.StopCPUProfile()
			f.Close()
		}()
	}

	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		if err != nil {
			panic(err)
		}
		go func() {
			defer f.Close()
			ticker := time.NewTicker(time.Second * 1)
			defer ticker.Stop()
			for {
				<-ticker.C
				pprof.WriteHeapProfile(f)
			}
		}()
	}

	logger := cfcomponent.NewLogger(*logLevel, *logFilePath, "deaagent", config.Config)
	logger.Info("Startup: Setting up the loggregator dea logging agent")

	if len(config.NatsHosts) == 0 {
		logger.Warn("Startup: Did not receive a NATS host - not going to register component")
		cfcomponent.DefaultYagnatsClientProvider = func(logger *gosteno.Logger, c *cfcomponent.Config) (yagnats.NATSConn, error) {
			return fakeyagnats.Connect(), nil
		}
	}

	err = config.validate(logger)
	if err != nil {
		panic(err)
	}
	// ** END Config Setup

	agent := deaagent.NewAgent(*instancesJsonFilePath, logger)

	go agent.Start()

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Interrupt)

	for {
		select {
		case <-cfcomponent.RegisterGoRoutineDumpSignalChannel():
			cfcomponent.DumpGoRoutine()
		case <-killChan:
			logger.Info("Shutting down")
			return
		}
	}
}
コード例 #5
0
ファイル: main.go プロジェクト: hpcloud/loggregator-1
func main() {
	flag.Parse()

	// ** Config Setup
	config, err := readConfig(*configFile)
	if err != nil {
		panic(err)
	}

	dropsonde.Initialize(config.MetronAddress, "dea_logging_agent")

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			panic(err)
		}
		pprof.StartCPUProfile(f)
		defer func() {
			pprof.StopCPUProfile()
			f.Close()
		}()
	}

	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		if err != nil {
			panic(err)
		}
		go func() {
			defer f.Close()
			ticker := time.NewTicker(time.Second * 1)
			defer ticker.Stop()
			for {
				<-ticker.C
				pprof.WriteHeapProfile(f)
			}
		}()
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "deaagent", config.Syslog)
	log.Info("Startup: Setting up the loggregator dea logging agent")
	// ** END Config Setup

	agent := deaagent.NewAgent(*instancesJsonFilePath, log)

	go agent.Start()

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Interrupt)

	dumpChan := registerGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			logger.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			return
		}
	}
}
コード例 #6
0
ファイル: main.go プロジェクト: uabassguy/loggregator
func main() {
	flag.Parse()

	if *version {
		fmt.Printf("version: %s\ngitSha: %s\nsourceUrl: https://github.com/cloudfoundry/loggregator/tree/%s\n\n",
			versionNumber, gitSha, gitSha)
		return
	}

	// ** Config Setup
	config := &Config{}
	err := cfcomponent.ReadConfigInto(config, *configFile)
	if err != nil {
		panic(err)
	}

	logger := cfcomponent.NewLogger(*logLevel, *logFilePath, "deaagent", config.Config)
	logger.Info("Startup: Setting up the loggregator dea logging agent")

	err = config.validate(logger)
	if err != nil {
		panic(err)
	}
	// ** END Config Setup

	loggregatorEmitter, err := emitter.NewEmitter(config.LoggregatorAddress, "APP", "NA", config.SharedSecret, logger)

	if err != nil {
		panic(err)
	}

	agent := deaagent.NewAgent(*instancesJsonFilePath, logger)

	cfc, err := cfcomponent.NewComponent(
		logger,
		"LoggregatorDeaAgent",
		config.Index,
		&DeaAgentHealthMonitor{},
		config.VarzPort,
		[]string{config.VarzUser, config.VarzPass},
		[]instrumentation.Instrumentable{loggregatorEmitter.LoggregatorClient},
	)

	if err != nil {
		panic(err)
	}

	cr := collectorregistrar.NewCollectorRegistrar(config.MbusClient, logger)
	err = cr.RegisterWithCollector(cfc)
	if err != nil {
		panic(err)
	}

	go func() {
		err := cfc.StartMonitoringEndpoints()
		if err != nil {
			panic(err)
		}
	}()
	go agent.Start(loggregatorEmitter)

	for {
		select {
		case <-cfcomponent.RegisterGoRoutineDumpSignalChannel():
			cfcomponent.DumpGoRoutine()
		}
	}
}
コード例 #7
0
ファイル: main.go プロジェクト: robsonmwoc/loggregator
func main() {
	flag.Parse()

	if *version {
		fmt.Printf("version: %s\ngitSha: %s\nsourceUrl: https://github.com/cloudfoundry/loggregator/tree/%s\n\n",
			versionNumber, gitSha, gitSha)
		return
	}

	// ** Config Setup
	config := &Config{}
	err := cfcomponent.ReadConfigInto(config, *configFile)
	if err != nil {
		panic(err)
	}

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			panic(err)
		}
		pprof.StartCPUProfile(f)
		defer func() {
			pprof.StopCPUProfile()
			f.Close()
		}()
	}

	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		if err != nil {
			panic(err)
		}
		go func() {
			defer f.Close()
			ticker := time.NewTicker(time.Second * 1)
			defer ticker.Stop()
			for {
				<-ticker.C
				pprof.WriteHeapProfile(f)
			}
		}()
	}

	logger := cfcomponent.NewLogger(*logLevel, *logFilePath, "deaagent", config.Config)
	logger.Info("Startup: Setting up the loggregator dea logging agent")

	err = config.validate(logger)
	if err != nil {
		panic(err)
	}
	// ** END Config Setup

	loggregatorEmitter, err := emitter.NewEmitter(config.LoggregatorAddress, "APP", "NA", config.SharedSecret, logger)

	if err != nil {
		panic(err)
	}

	agent := deaagent.NewAgent(*instancesJsonFilePath, logger)

	cfc, err := cfcomponent.NewComponent(
		logger,
		"LoggregatorDeaAgent",
		config.Index,
		&DeaAgentHealthMonitor{},
		config.VarzPort,
		[]string{config.VarzUser, config.VarzPass},
		[]instrumentation.Instrumentable{loggregatorEmitter.LoggregatorClient},
	)

	if err != nil {
		panic(err)
	}

	cr := collectorregistrar.NewCollectorRegistrar(config.MbusClient, logger)
	err = cr.RegisterWithCollector(cfc)
	if err != nil {
		panic(err)
	}

	go func() {
		err := cfc.StartMonitoringEndpoints()
		if err != nil {
			panic(err)
		}
	}()
	go agent.Start(loggregatorEmitter)

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Kill, os.Interrupt)

	for {
		select {
		case <-cfcomponent.RegisterGoRoutineDumpSignalChannel():
			cfcomponent.DumpGoRoutine()
		case <-killChan:
			logger.Info("Shutting down")
			return
		}
	}
}
コード例 #8
0
ファイル: agent_test.go プロジェクト: lyuyun/loggregator
		task1StdoutListener, task1StderrListener = setupTaskSockets(helperTask1)

		helperTask2 := &domain.Task{
			ApplicationId:       "3456",
			SourceName:          "App",
			WardenJobId:         59,
			WardenContainerPath: tmpdir,
			Index:               1,
		}

		task2StdoutListener, task2StderrListener = setupTaskSockets(helperTask2)

		writeToFile(`{"instances": [{"state": "RUNNING", "application_id": "1234", "warden_job_id": 56, "warden_container_path":"`+tmpdir+`", "instance_index": 3, "syslog_drain_urls": ["url1"]},
	                                {"state": "RUNNING", "application_id": "3456", "warden_job_id": 59, "warden_container_path":"`+tmpdir+`", "instance_index": 1}]}`, true)

		agent = deaagent.NewAgent(filePath, loggertesthelper.Logger())

		fakeMetricSender = fake.NewFakeMetricSender()
		metrics.Initialize(fakeMetricSender, metricbatcher.New(fakeMetricSender, 10*time.Millisecond))
	})

	AfterEach(func() {
		task1StdoutListener.Close()
		task1StderrListener.Close()
		task2StdoutListener.Close()
		task2StderrListener.Close()
		agent.Stop()
	})

	Describe("instances.json polling", func() {
		Context("at startup", func() {
コード例 #9
0
ファイル: agent_test.go プロジェクト: robsonmwoc/loggregator
func TestTheAgentMonitorsChangesInTasks(t *testing.T) {
	helperTask1 := &domain.Task{
		ApplicationId:       "1234",
		SourceName:          "App",
		WardenJobId:         56,
		WardenContainerPath: tmpdir,
		Index:               3}
	os.MkdirAll(helperTask1.Identifier(), 0777)

	task1StdoutSocketPath := filepath.Join(helperTask1.Identifier(), "stdout.sock")
	task1StderrSocketPath := filepath.Join(helperTask1.Identifier(), "stderr.sock")
	os.Remove(task1StdoutSocketPath)
	os.Remove(task1StderrSocketPath)
	task1StdoutListener, err := net.Listen("unix", task1StdoutSocketPath)
	defer task1StdoutListener.Close()
	assert.NoError(t, err)
	task1StderrListener, err := net.Listen("unix", task1StderrSocketPath)
	defer task1StderrListener.Close()
	assert.NoError(t, err)

	helperTask2 := &domain.Task{
		ApplicationId:       "5678",
		SourceName:          "App",
		WardenJobId:         58,
		WardenContainerPath: tmpdir,
		Index:               0}
	os.MkdirAll(helperTask2.Identifier(), 0777)

	task2StdoutSocketPath := filepath.Join(helperTask2.Identifier(), "stdout.sock")
	task2StderrSocketPath := filepath.Join(helperTask2.Identifier(), "stderr.sock")
	os.Remove(task2StdoutSocketPath)
	os.Remove(task2StderrSocketPath)
	task2StdoutListener, err := net.Listen("unix", task2StdoutSocketPath)
	defer task2StdoutListener.Close()
	assert.NoError(t, err)
	task2StderrListener, err := net.Listen("unix", task2StderrSocketPath)
	defer task2StderrListener.Close()
	assert.NoError(t, err)

	expectedMessage := "Some Output"

	mockLoggregatorEmitter := new(MockLoggregatorEmitter)

	mockLoggregatorEmitter.received = make(chan *logmessage.LogMessage, 2)

	writeToFile(t, `{"instances": [{"state": "RUNNING", "application_id": "1234", "warden_job_id": 56, "warden_container_path":"`+tmpdir+`", "instance_index": 3}]}`, true)

	agent := deaagent.NewAgent(filePath(), loggertesthelper.Logger())
	go agent.Start(mockLoggregatorEmitter)

	task1Connection, err := task1StdoutListener.Accept()
	defer task1Connection.Close()
	assert.NoError(t, err)

	writeToFile(t, `{"instances": [{"state": "RUNNING", "application_id": "1234", "warden_job_id": 56, "warden_container_path":"`+tmpdir+`", "instance_index": 3},
								   {"state": "RUNNING", "application_id": "5678", "warden_job_id": 58, "warden_container_path":"`+tmpdir+`", "instance_index": 0},
	                               {"state": "RUNNING", "application_id": "1234", "warden_job_id": 57, "warden_container_path":"`+tmpdir+`", "instance_index": 2},
	                               {"state": "RUNNING", "application_id": "3456", "warden_job_id": 59, "warden_container_path":"`+tmpdir+`", "instance_index": 1}
	                               ]}`, true)

	connectionChannel := make(chan net.Conn)
	go func() {
		task2Connection, _ := task2StdoutListener.Accept()
		connectionChannel <- task2Connection
	}()
	var task2Connection net.Conn
	select {
	case task2Connection = <-connectionChannel:
		defer task2Connection.Close()
	case <-time.After(1 * time.Second):
		t.Fatal("Should have been able to open the socket listener")
	}

	_, err = task1Connection.Write([]byte(SOCKET_PREFIX + expectedMessage))
	_, err = task1Connection.Write([]byte("\n"))
	assert.NoError(t, err)

	_, err = task2Connection.Write([]byte(SOCKET_PREFIX + expectedMessage))
	_, err = task2Connection.Write([]byte("\n"))
	assert.NoError(t, err)

	receivedMessages := make(map[string]*logmessage.LogMessage)

	receivedMessage := <-mockLoggregatorEmitter.received
	receivedMessages[receivedMessage.GetAppId()] = receivedMessage

	receivedMessage = <-mockLoggregatorEmitter.received
	receivedMessages[receivedMessage.GetAppId()] = receivedMessage

	assert.Equal(t, 2, len(receivedMessages))

	assert.NotNil(t, receivedMessages["1234"])
	assert.Equal(t, "App", receivedMessages["1234"].GetSourceName())
	assert.Equal(t, logmessage.LogMessage_OUT, receivedMessages["1234"].GetMessageType())
	assert.Equal(t, expectedMessage, string(receivedMessages["1234"].GetMessage()))

	assert.NotNil(t, receivedMessages["5678"])
	assert.Equal(t, "App", receivedMessages["5678"].GetSourceName())
	assert.Equal(t, logmessage.LogMessage_OUT, receivedMessages["5678"].GetMessageType())
	assert.Equal(t, expectedMessage, string(receivedMessages["5678"].GetMessage()))
}
コード例 #10
0
ファイル: main.go プロジェクト: narayana1208/loggregator
func main() {
	flag.Parse()

	if *version {
		fmt.Printf("\n\nversion: %s\ngitSha: %s\n\n", versionNumber, gitSha)
		return
	}

	logger := cfcomponent.NewLogger(*logLevel, *logFilePath, "deaagent")

	// ** Config Setup
	config := &Config{}
	err := cfcomponent.ReadConfigInto(config, *configFile)
	if err != nil {
		panic(err)
	}

	err = config.validate(logger)
	if err != nil {
		panic(err)
	}

	// ** END Config Setup

	loggregatorClient := loggregatorclient.NewLoggregatorClient(config.LoggregatorAddress, logger, 4096)

	agent := deaagent.NewAgent(*instancesJsonFilePath, logger)

	cfc, err := cfcomponent.NewComponent(
		0,
		"LoggregatorDeaAgent",
		config.Index,
		&DeaAgentHealthMonitor{},
		config.VarzPort,
		[]string{config.VarzUser, config.VarzPass},
		[]instrumentation.Instrumentable{loggregatorClient},
	)

	if err != nil {
		panic(err)
	}

	cr := collectorregistrar.NewCollectorRegistrar(config.MbusClient, logger)
	err = cr.RegisterWithCollector(cfc)
	if err != nil {
		panic(err)
	}

	go func() {
		err := cfc.StartMonitoringEndpoints()
		if err != nil {
			panic(err)
		}
	}()
	go agent.Start(loggregatorClient)

	for {
		select {
		case <-cfcomponent.RegisterGoRoutineDumpSignalChannel():
			cfcomponent.DumpGoRoutine()
		}
	}
}