예제 #1
0
func initLogger() bool {
	var flags int

	toConsole, err := g_config.GetBool("log", "console")
	if err != nil || toConsole {
		flags = logger.L_CONSOLE
	}
	toFile, err := g_config.GetBool("log", "file")
	if err != nil || toFile {
		flags = flags | logger.L_FILE
	}

	logFile, err := g_config.GetString("log", "filename")
	if err != nil || len(logFile) <= 0 {
		logFile = "log.txt"
	}
	myLog, err := logger.NewLogger(logFile, flags)
	if err != nil || myLog == nil {
		fmt.Printf("[Error] Could not initialize logger: %v\n\r", err)
		return false
	}
	g_logger = log.New(myLog, "", log.Ltime)
	if toFile {
		fmt.Printf(" - Start logging to file: %v\n\r", logFile)
	}

	return true
}
예제 #2
0
func main() {
	// Metron is intended to be light-weight so we occupy only one core
	runtime.GOMAXPROCS(1)

	flag.Parse()
	config, err := config.ParseConfig(*configFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*debug, *logFilePath, "metron", config.Syslog)
	log.Info("Startup: Setting up the Metron agent")

	dopplerClientPool, err := initializeClientPool(config, log)
	if err != nil {
		log.Errorf("Error while initializing client pool: %s", err.Error())
		os.Exit(-1)
	}

	dopplerForwarder := dopplerforwarder.New(dopplerClientPool, log)
	byteSigner := signer.New(config.SharedSecret, dopplerForwarder)
	marshaller := eventmarshaller.New(byteSigner, log)
	messageTagger := tagger.New(config.Deployment, config.Job, config.Index, marshaller)
	aggregator := messageaggregator.New(messageTagger, log)

	initializeMetrics(byteSigner, config, log)

	dropsondeUnmarshaller := eventunmarshaller.New(aggregator, log)
	dropsondeReader := networkreader.New(fmt.Sprintf("localhost:%d", config.DropsondeIncomingMessagesPort), "dropsondeAgentListener", dropsondeUnmarshaller, log)

	log.Info("metron started")

	dropsondeReader.Start()
}
예제 #3
0
파일: main.go 프로젝트: Jonty/loggregator
func main() {
	// Metron is intended to be light-weight so we occupy only one core
	runtime.GOMAXPROCS(1)

	flag.Parse()
	config, err := config.ParseConfig(*configFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*debug, *logFilePath, "metron", config.Syslog)
	log.Info("Startup: Setting up the Metron agent")

	dopplerClientPool := initializeClientPool(config, log)

	dopplerForwarder := dopplerforwarder.New(dopplerClientPool, log)
	byteSigner := signer.New(config.SharedSecret, dopplerForwarder)
	marshaller := eventmarshaller.New(byteSigner, log)
	messageTagger := tagger.New(config.Deployment, config.Job, config.Index, marshaller)
	aggregator := messageaggregator.New(messageTagger, log)

	initializeMetrics(byteSigner, config, log)

	dropsondeUnmarshaller := eventunmarshaller.New(aggregator, log)
	dropsondeReader := networkreader.New(fmt.Sprintf("localhost:%d", config.DropsondeIncomingMessagesPort), "dropsondeAgentListener", dropsondeUnmarshaller, log)

	// TODO: remove next four lines when legacy support is removed (or extracted to injector)
	legacyMarshaller := eventmarshaller.New(byteSigner, log)
	legacyMessageTagger := tagger.New(config.Deployment, config.Job, config.Index, legacyMarshaller)
	legacyUnmarshaller := legacyunmarshaller.New(legacyMessageTagger, log)
	legacyReader := networkreader.New(fmt.Sprintf("localhost:%d", config.LegacyIncomingMessagesPort), "legacyAgentListener", legacyUnmarshaller, log)

	go legacyReader.Start()
	dropsondeReader.Start()
}
예제 #4
0
파일: main.go 프로젝트: lyuyun/loggregator
func main() {
	flag.Parse()

	// ** Config Setup
	config, err := readConfig(*configFile)
	if err != nil {
		panic(err)
	}

	dropsonde.Initialize(config.MetronAddress, "dea_logging_agent")

	log := logger.NewLogger(*logLevel, *logFilePath, "deaagent", config.Syslog)
	log.Info("Startup: Setting up the loggregator dea logging agent")
	// ** END Config Setup

	agent := deaagent.NewAgent(*instancesJsonFilePath, log)

	go agent.Start()

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			os.Exit(0)
			return
		}
	}
}
예제 #5
0
func main() {
	// Metron is intended to be light-weight so we occupy only one core
	runtime.GOMAXPROCS(1)

	flag.Parse()
	config, err := config.ParseConfig(*configFilePath)
	if err != nil {
		panic(fmt.Errorf("Unable to parse config: %s", err))
	}

	localIp, err := localip.LocalIP()
	if err != nil {
		panic(fmt.Errorf("Unable to resolve own IP address: %s", err))
	}

	log := logger.NewLogger(*debug, *logFilePath, "metron", config.Syslog)

	go func() {
		err := http.ListenAndServe(net.JoinHostPort(localIp, pprofPort), nil)
		if err != nil {
			log.Errorf("Error starting pprof server: %s", err.Error())
		}
	}()

	log.Info("Startup: Setting up the Metron agent")
	marshaller, err := initializeDopplerPool(config, log)
	if err != nil {
		panic(fmt.Errorf("Could not initialize doppler connection pool: %s", err))
	}
	messageTagger := tagger.New(config.Deployment, config.Job, config.Index, marshaller)
	aggregator := messageaggregator.New(messageTagger, log)

	statsStopChan := make(chan struct{})
	initializeMetrics(messageTagger, config, statsStopChan, log)

	dropsondeUnmarshaller := eventunmarshaller.New(aggregator, log)
	metronAddress := fmt.Sprintf("127.0.0.1:%d", config.IncomingUDPPort)
	dropsondeReader, err := networkreader.New(metronAddress, "dropsondeAgentListener", dropsondeUnmarshaller, log)
	if err != nil {
		panic(fmt.Errorf("Failed to listen on %s: %s", metronAddress, err))
	}

	log.Info("metron started")
	go dropsondeReader.Start()

	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()
	killChan := signalmanager.RegisterKillSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			close(statsStopChan)
			return
		}
	}
}
예제 #6
0
func main() {
	flag.Parse()

	config, err := config.ParseConfig(*logLevel, *configFile, *logFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "loggregator trafficcontroller", config.Syslog)
	log.Info("Startup: Setting up the loggregator traffic controller")

	dropsonde.Initialize("localhost:"+strconv.Itoa(config.MetronPort), "LoggregatorTrafficController")

	profiler := profiler.NewProfiler(*cpuprofile, *memprofile, 1*time.Second, log)
	profiler.Profile()
	defer profiler.Stop()

	uptimeMonitor := monitor.NewUptimeMonitor(time.Duration(config.MonitorIntervalSeconds) * time.Second)
	go uptimeMonitor.Start()
	defer uptimeMonitor.Stop()

	dopplerAdapter := DefaultStoreAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests)
	dopplerAdapter.Connect()

	legacyAdapter := DefaultStoreAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests)
	legacyAdapter.Connect()

	ipAddress, err := localip.LocalIP()
	if err != nil {
		panic(err)
	}

	dopplerProxy := makeDopplerProxy(dopplerAdapter, config, log)
	startOutgoingDopplerProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingDropsondePort), 10)), dopplerProxy)

	legacyProxy := makeLegacyProxy(legacyAdapter, config, log)
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingPort), 10)), legacyProxy)

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Kill, os.Interrupt)

	dumpChan := registerGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			logger.DumpGoRoutine()
		case <-killChan:
			break
		}
	}
}
예제 #7
0
func main() {
	// Metron is intended to be light-weight so we occupy only one core
	runtime.GOMAXPROCS(1)

	flag.Parse()
	config, err := config.ParseConfig(*configFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*debug, *logFilePath, "metron", config.Syslog)
	log.Info("Startup: Setting up the Metron agent")

	dopplerClientPool, err := initializeDopplerPool(config, log)
	if err != nil {
		log.Errorf("Failed to initialize the doppler pool: %s", err.Error())
		os.Exit(-1)
	}

	dopplerForwarder := dopplerforwarder.New(dopplerClientPool, []byte(config.SharedSecret), uint(config.BufferSize), log)
	messageTagger := tagger.New(config.Deployment, config.Job, config.Index, dopplerForwarder)
	aggregator := messageaggregator.New(messageTagger, log)

	initializeMetrics(messageTagger, config, log)

	dropsondeUnmarshaller := eventunmarshaller.New(aggregator, log)
	metronAddress := fmt.Sprintf("127.0.0.1:%d", config.DropsondeIncomingMessagesPort)
	dropsondeReader, err := networkreader.New(metronAddress, "dropsondeAgentListener", dropsondeUnmarshaller, log)
	if err != nil {
		log.Errorf("Failed to listen on %s: %s", metronAddress, err)
		os.Exit(1)
	}

	log.Info("metron started")
	go dopplerForwarder.Run()
	dropsondeReader.Start()
	dopplerForwarder.Stop()
}
예제 #8
0
파일: main.go 프로젝트: Jonty/loggregator
func main() {
	seed := time.Now().UnixNano()
	rand.Seed(seed)

	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	localIp, err := localip.LocalIP()
	if err != nil {
		panic(errors.New("Unable to resolve own IP address: " + err.Error()))
	}

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			panic(err)
		}
		pprof.StartCPUProfile(f)
		defer func() {
			pprof.StopCPUProfile()
			f.Close()
		}()
	}

	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		if err != nil {
			panic(err)
		}
		go func() {
			defer f.Close()
			ticker := time.NewTicker(time.Second * 1)
			defer ticker.Stop()
			for {
				<-ticker.C
				pprof.WriteHeapProfile(f)
			}
		}()
	}

	conf, err := config.ParseConfig(*configFile)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "doppler", conf.Syslog)
	log.Info("Startup: Setting up the doppler server")

	dropsonde.Initialize(conf.MetronAddress, DOPPLER_ORIGIN)
	dopplerStoreAdapter := NewStoreAdapter(conf.EtcdUrls, conf.EtcdMaxConcurrentRequests)
	legacyStoreAdapter := NewStoreAdapter(conf.EtcdUrls, conf.EtcdMaxConcurrentRequests)

	doppler := New(localIp, conf, log, dopplerStoreAdapter, conf.MessageDrainBufferSize, DOPPLER_ORIGIN, time.Duration(conf.SinkDialTimeoutSeconds)*time.Second)

	if err != nil {
		panic(err)
	}

	go doppler.Start()
	log.Info("Startup: doppler server started.")

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Kill, os.Interrupt)

	dumpChan := registerGoRoutineDumpSignalChannel()

	releaseNodeChan := announcer.Announce(localIp, config.HeartbeatInterval, conf, dopplerStoreAdapter, log)
	legacyReleaseNodeChan := announcer.AnnounceLegacy(localIp, config.HeartbeatInterval, conf, legacyStoreAdapter, log)

	for {
		select {
		case <-dumpChan:
			logger.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			doppler.Stop()
			close(releaseNodeChan)
			close(legacyReleaseNodeChan)
			return
		}
	}
}
예제 #9
0
// Interface for setting and getting pvdata
type PvStore interface {
	Set(plantkey string, pv *PvData)
	Get(plantkey string) PvData
}

type InitiateData struct {
	PlantKey string
	UserName string
	Password string
	PlantNo  string
	Address  string
}

var log = logger.NewLogger(logger.DEBUG, "Dataprovider: generic: ")

type TerminateCallback func()

type UpdatePvData func(i *InitiateData, pv *PvData) error

// List of all known dataproviders
// This would be nice if we could autodetect them somehow
const (
	FJY         = iota
	SunnyPortal = iota
	Suntrol     = iota
	Danfoss     = iota
	Kostal      = iota
)
예제 #10
0
파일: app.go 프로젝트: jaybora/solarcompare
package startup

import (
	"appengine"
	"appengine/memcache"
	"appengine/urlfetch"
	"dataproviders"
	"fmt"
	"logger"
	"net/http"
	"net/url"
	"time"
	"web"
)

var log = logger.NewLogger(logger.INFO, "app.go ")

func init() {
	http.HandleFunc("/plant/", planthandler)
}

func planthandler(w http.ResponseWriter, r *http.Request) {
	plantkey := web.PlantKey(r.URL.String(), appengine.IsDevAppServer())
	if plantkey == "" {
		http.Error(w, fmt.Sprintf("No plant specified"),
			http.StatusNotFound)
		return
	}
	c := appengine.NewContext(r)

	i, err := memcache.Get(c, "pvdata:"+plantkey)
예제 #11
0
func main() {
	seed := time.Now().UnixNano()
	rand.Seed(seed)

	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	localIp, err := localip.LocalIP()
	if err != nil {
		panic(fmt.Errorf("Unable to resolve own IP address: %s", err))
	}

	conf, err := config.ParseConfig(*configFile)
	if err != nil {
		panic(fmt.Errorf("Unable to parse config: %s", err))
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "doppler", conf.Syslog)

	go func() {
		err := http.ListenAndServe(net.JoinHostPort(localIp, pprofPort), nil)
		if err != nil {
			log.Errorf("Error starting pprof server: %s", err.Error())
		}
	}()

	log.Info("Startup: Setting up the doppler server")
	dropsonde.Initialize(conf.MetronAddress, DOPPLER_ORIGIN)
	storeAdapter := NewStoreAdapter(conf.EtcdUrls, conf.EtcdMaxConcurrentRequests)

	doppler, err := New(log, localIp, conf, storeAdapter, conf.MessageDrainBufferSize, DOPPLER_ORIGIN, time.Duration(conf.WebsocketWriteTimeoutSeconds)*time.Second, time.Duration(conf.SinkDialTimeoutSeconds)*time.Second)

	if err != nil {
		panic(fmt.Errorf("Failed to create doppler: %s", err))
	}

	go doppler.Start()
	log.Info("Startup: doppler server started.")

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	releaseNodeChan := dopplerservice.Announce(localIp, config.HeartbeatInterval, conf, storeAdapter, log)
	legacyReleaseNodeChan := dopplerservice.AnnounceLegacy(localIp, config.HeartbeatInterval, conf, storeAdapter, log)

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")

			stopped := make(chan bool)
			legacyStopped := make(chan bool)
			releaseNodeChan <- stopped
			legacyReleaseNodeChan <- legacyStopped

			doppler.Stop()

			<-stopped
			<-legacyStopped

			return
		}
	}
}
예제 #12
0
func main() {
	flag.Parse()

	config, err := config.ParseConfig(*logLevel, *configFile, *logFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "loggregator trafficcontroller", config.Syslog)
	log.Info("Startup: Setting up the loggregator traffic controller")

	dropsonde.Initialize("127.0.0.1:"+strconv.Itoa(config.MetronPort), "LoggregatorTrafficController")

	profiler := profiler.NewProfiler(*cpuprofile, *memprofile, 1*time.Second, log)
	profiler.Profile()
	defer profiler.Stop()

	uptimeMonitor := monitor.NewUptimeMonitor(time.Duration(config.MonitorIntervalSeconds) * time.Second)
	go uptimeMonitor.Start()
	defer uptimeMonitor.Stop()

	etcdAdapter := DefaultStoreAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests)
	err = etcdAdapter.Connect()
	if err != nil {
		log.Errorf("Cannot connect to ETCD: %s", err.Error())
		os.Exit(-1)
	}

	ipAddress, err := localip.LocalIP()
	if err != nil {
		panic(err)
	}

	logAuthorizer := authorization.NewLogAccessAuthorizer(*disableAccessControl, config.ApiHost, config.SkipCertVerify)

	uaaClient := uaa_client.NewUaaClient(config.UaaHost, config.UaaClientId, config.UaaClientSecret, config.SkipCertVerify)
	adminAuthorizer := authorization.NewAdminAccessAuthorizer(*disableAccessControl, &uaaClient)

	preferredServers := func(string) bool { return false }
	finder := dopplerservice.NewLegacyFinder(etcdAdapter, int(config.DopplerPort), preferredServers, nil, log)
	finder.Start()

	dopplerProxy := makeProxy(etcdAdapter, config, log, marshaller.DropsondeLogMessage, dopplerproxy.TranslateFromDropsondePath,
		newDropsondeWebsocketListener, finder, logAuthorizer, adminAuthorizer, "doppler."+config.SystemDomain)
	startOutgoingDopplerProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingDropsondePort), 10)), dopplerProxy)

	legacyProxy := makeProxy(etcdAdapter, config, log, marshaller.LoggregatorLogMessage, dopplerproxy.TranslateFromLegacyPath,
		newLegacyWebsocketListener, finder, logAuthorizer, adminAuthorizer, "loggregator."+config.SystemDomain)
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingPort), 10)), legacyProxy)

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Kill, os.Interrupt)

	dumpChan := registerGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			logger.DumpGoRoutine()
		case <-killChan:
			break
		}
	}
}
예제 #13
0
파일: main.go 프로젝트: lyuyun/loggregator
func main() {
	seed := time.Now().UnixNano()
	rand.Seed(seed)

	// Put os.Exit in a deferred statement so that other defers get executed prior to
	// the os.Exit call.
	exitCode := 0
	defer func() {
		os.Exit(exitCode)
	}()

	flag.Parse()

	runtime.GOMAXPROCS(runtime.NumCPU())

	localIp, err := localip.LocalIP()
	if err != nil {
		panic(errors.New("Unable to resolve own IP address: " + err.Error()))
	}

	conf, err := config.ParseConfig(*configFile)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "doppler", conf.Syslog)
	log.Info("Startup: Setting up the doppler server")

	profiler := profiler.New(*cpuprofile, *memprofile, 1*time.Second, log)
	profiler.Profile()
	defer profiler.Stop()

	dropsonde.Initialize(conf.MetronAddress, DOPPLER_ORIGIN)
	storeAdapter := NewStoreAdapter(conf.EtcdUrls, conf.EtcdMaxConcurrentRequests)

	doppler, err := New(log, localIp, conf, storeAdapter, conf.MessageDrainBufferSize, DOPPLER_ORIGIN, time.Duration(conf.WebsocketWriteTimeoutSeconds)*time.Second, time.Duration(conf.SinkDialTimeoutSeconds)*time.Second)

	if err != nil {
		log.Errorf("Failed to create doppler: %s", err.Error())
		exitCode = -1
		return
	}

	go doppler.Start()
	log.Info("Startup: doppler server started.")

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	releaseNodeChan := dopplerservice.Announce(localIp, config.HeartbeatInterval, conf, storeAdapter, log)
	legacyReleaseNodeChan := dopplerservice.AnnounceLegacy(localIp, config.HeartbeatInterval, conf, storeAdapter, log)

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")

			stopped := make(chan bool)
			legacyStopped := make(chan bool)
			releaseNodeChan <- stopped
			legacyReleaseNodeChan <- legacyStopped

			doppler.Stop()

			<-stopped
			<-legacyStopped

			return
		}
	}
}
예제 #14
0
func main() {
	flag.Parse()

	config, err := config.ParseConfig(*logLevel, *configFile)
	if err != nil {
		panic(fmt.Errorf("Unable to parse config: %s", err))
	}

	transport := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: config.SkipCertVerify}}
	http.DefaultClient.Transport = transport

	ipAddress, err := localip.LocalIP()
	if err != nil {
		panic(fmt.Errorf("Unable to resolve own IP address: %s", err))
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "loggregator trafficcontroller", config.Syslog)
	log.Info("Startup: Setting up the loggregator traffic controller")

	dropsonde.Initialize(net.JoinHostPort(config.MetronHost, strconv.Itoa(config.MetronPort)), "LoggregatorTrafficController")

	go func() {
		err := http.ListenAndServe(net.JoinHostPort(ipAddress, pprofPort), nil)
		if err != nil {
			log.Errorf("Error starting pprof server: %s", err.Error())
		}
	}()

	uptimeMonitor := monitor.NewUptimeMonitor(time.Duration(config.MonitorIntervalSeconds) * time.Second)
	go uptimeMonitor.Start()
	defer uptimeMonitor.Stop()

	etcdAdapter := defaultStoreAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests)
	err = etcdAdapter.Connect()
	if err != nil {
		panic(fmt.Errorf("Unable to connect to ETCD: %s", err))
	}

	logAuthorizer := authorization.NewLogAccessAuthorizer(*disableAccessControl, config.ApiHost)

	uaaClient := uaa_client.NewUaaClient(config.UaaHost, config.UaaClientId, config.UaaClientSecret)
	adminAuthorizer := authorization.NewAdminAccessAuthorizer(*disableAccessControl, &uaaClient)

	// TODO: The preferredProtocol of udp tells the finder to pull out the Doppler URLs from the legacy ETCD endpoint.
	// Eventually we'll have a separate websocket client pool
	finder := dopplerservice.NewFinder(etcdAdapter, int(config.DopplerPort), []string{"udp"}, "", log)
	finder.Start()
	// Draining the finder's events channel in order to not block the finder from handling etcd events.
	go func() {
		for {
			finder.Next()
		}
	}()

	var accessMiddleware, legacyAccessMiddleware func(middleware.HttpHandler) *middleware.AccessHandler
	if config.SecurityEventLog != "" {
		accessLog, err := os.OpenFile(config.SecurityEventLog, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
		if err != nil {
			panic(fmt.Errorf("Unable to open access log: %s", err))
		}
		defer func() {
			accessLog.Sync()
			accessLog.Close()
		}()
		accessLogger := accesslogger.New(accessLog, log)
		accessMiddleware = middleware.Access(accessLogger, ipAddress, config.OutgoingDropsondePort, log)
		legacyAccessMiddleware = middleware.Access(accessLogger, ipAddress, config.OutgoingPort, log)
	}

	dopplerCgc := channel_group_connector.NewChannelGroupConnector(finder, newDropsondeWebsocketListener, marshaller.DropsondeLogMessage, log)
	dopplerHandler := http.Handler(dopplerproxy.NewDopplerProxy(logAuthorizer, adminAuthorizer, dopplerCgc, dopplerproxy.TranslateFromDropsondePath, "doppler."+config.SystemDomain, log))
	if accessMiddleware != nil {
		dopplerHandler = accessMiddleware(dopplerHandler)
	}
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingDropsondePort), 10)), dopplerHandler)

	legacyCgc := channel_group_connector.NewChannelGroupConnector(finder, newLegacyWebsocketListener, marshaller.LoggregatorLogMessage, log)
	legacyHandler := http.Handler(dopplerproxy.NewDopplerProxy(logAuthorizer, adminAuthorizer, legacyCgc, dopplerproxy.TranslateFromLegacyPath, "loggregator."+config.SystemDomain, log))
	if legacyAccessMiddleware != nil {
		legacyHandler = legacyAccessMiddleware(legacyHandler)
	}
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingPort), 10)), legacyHandler)

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			return
		}
	}
}
예제 #15
0
파일: main.go 프로젝트: lyuyun/loggregator
func main() {
	// Put os.Exit in a deferred statement so that other defers get executed prior to
	// the os.Exit call.
	exitCode := 0
	defer func() {
		os.Exit(exitCode)
	}()

	flag.Parse()

	config, err := config.ParseConfig(*logLevel, *configFile, *logFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "loggregator trafficcontroller", config.Syslog)
	log.Info("Startup: Setting up the loggregator traffic controller")

	dropsonde.Initialize("127.0.0.1:"+strconv.Itoa(config.MetronPort), "LoggregatorTrafficController")

	profiler := profiler.New(*cpuprofile, *memprofile, 1*time.Second, log)
	profiler.Profile()
	defer profiler.Stop()

	uptimeMonitor := monitor.NewUptimeMonitor(time.Duration(config.MonitorIntervalSeconds) * time.Second)
	go uptimeMonitor.Start()
	defer uptimeMonitor.Stop()

	etcdAdapter := DefaultStoreAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests)
	err = etcdAdapter.Connect()
	if err != nil {
		log.Errorf("Cannot connect to ETCD: %s", err.Error())
		exitCode = -1
		return
	}

	ipAddress, err := localip.LocalIP()
	if err != nil {
		panic(err)
	}

	logAuthorizer := authorization.NewLogAccessAuthorizer(*disableAccessControl, config.ApiHost, config.SkipCertVerify)

	uaaClient := uaa_client.NewUaaClient(config.UaaHost, config.UaaClientId, config.UaaClientSecret, config.SkipCertVerify)
	adminAuthorizer := authorization.NewAdminAccessAuthorizer(*disableAccessControl, &uaaClient)

	preferredServers := func(string) bool { return false }
	finder := dopplerservice.NewLegacyFinder(etcdAdapter, int(config.DopplerPort), preferredServers, nil, log)
	finder.Start()

	dopplerCgc := channel_group_connector.NewChannelGroupConnector(finder, newDropsondeWebsocketListener, marshaller.DropsondeLogMessage, log)
	dopplerProxy := dopplerproxy.NewDopplerProxy(logAuthorizer, adminAuthorizer, dopplerCgc, dopplerproxy.TranslateFromDropsondePath, "doppler."+config.SystemDomain, log)
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingDropsondePort), 10)), dopplerProxy)

	legacyCgc := channel_group_connector.NewChannelGroupConnector(finder, newLegacyWebsocketListener, marshaller.LoggregatorLogMessage, log)
	legacyProxy := dopplerproxy.NewDopplerProxy(logAuthorizer, adminAuthorizer, legacyCgc, dopplerproxy.TranslateFromLegacyPath, "loggregator."+config.SystemDomain, log)
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingPort), 10)), legacyProxy)

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			return
		}
	}
}
예제 #16
0
	"io/ioutil"
	"logger"
	"net/http"
	"net/url"
	"regexp"
	"strconv"
	"time"
)

type dataProvider struct {
	InitiateData dataproviders.InitiateData
	latestErr    error
	client       *http.Client
}

var log = logger.NewLogger(logger.DEBUG, "Dataprovider: Danfoss:")

const MAX_ERRORS = 5

const urlTemplate = "http://%s/%s"
const loginUrl = "cgi-bin/handle_login.tcl"
const logoutUrl = "cgi-bin/logout.tcl?sid=%s"
const forceLogoutUrl = "cgi-bin/closed_for_now.tcl?useTheForce=1"
const pacUrl = "cgi-bin/overview.tcl?sid=%s"

const sidRegEx = "sid=[0-9]+"
const curPwr1RegEx = "<td id=\"curr_power\" class=\"parValue\" style=\"width:25%;\">[0-9|\\.]+ [kW|W]"
const numberRegEx = ">[0-9|\\.]+"
const etodayRegEx = "<td id=\"prod_today\" class=\"parValue\">[0-9|\\.]+ [kW|W]"
const etotalRegEx = "<td id=\"total_yield\" class=\"parValue\">[0-9|\\.]+"
예제 #17
0
func main() {
	// Put os.Exit in a deferred statement so that other defers get executed prior to
	// the os.Exit call.
	exitCode := 0
	defer func() {
		os.Exit(exitCode)
	}()

	// Metron is intended to be light-weight so we occupy only one core
	runtime.GOMAXPROCS(1)

	flag.Parse()
	config, err := config.ParseConfig(*configFilePath)
	if err != nil {
		panic(err)
	}

	log := logger.NewLogger(*debug, *logFilePath, "metron", config.Syslog)
	log.Info("Startup: Setting up the Metron agent")

	profiler := profiler.New(*cpuprofile, *memprofile, 1*time.Second, log)
	profiler.Profile()
	defer profiler.Stop()

	dopplerClientPool, err := initializeDopplerPool(config, log)
	if err != nil {
		log.Errorf("Failed to initialize the doppler pool: %s", err.Error())
		os.Exit(-1)
	}

	dopplerForwarder := dopplerforwarder.New(dopplerClientPool, []byte(config.SharedSecret), uint(config.BufferSize), config.EnableBuffer, log)
	messageTagger := tagger.New(config.Deployment, config.Job, config.Index, dopplerForwarder)
	aggregator := messageaggregator.New(messageTagger, log)

	statsStopChan := make(chan struct{})
	initializeMetrics(messageTagger, config, statsStopChan, log)

	dropsondeUnmarshaller := eventunmarshaller.New(aggregator, log)
	metronAddress := fmt.Sprintf("127.0.0.1:%d", config.DropsondeIncomingMessagesPort)
	dropsondeReader, err := networkreader.New(metronAddress, "dropsondeAgentListener", dropsondeUnmarshaller, log)
	if err != nil {
		log.Errorf("Failed to listen on %s: %s", metronAddress, err)
		exitCode = 1
		return
	}

	log.Info("metron started")
	go dopplerForwarder.Run()
	go dropsondeReader.Start()

	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()
	killChan := signalmanager.RegisterKillSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			dopplerForwarder.Stop()
			close(statsStopChan)
			return
		}
	}
}
예제 #18
0
파일: main.go 프로젝트: Jonty/loggregator
func main() {
	flag.Parse()
	config, err := parseConfig(*debug, *configFile, *logFilePath)
	if err != nil {
		panic(err)
	}
	log := logger.NewLogger(*debug, *logFilePath, "syslog_drain_binder", config.Syslog)

	dropsonde.Initialize(config.MetronAddress, "syslog_drain_binder")

	workPool, err := workpool.NewWorkPool(config.EtcdMaxConcurrentRequests)
	if err != nil {
		panic(err)
	}

	options := &etcdstoreadapter.ETCDOptions{
		ClusterUrls: config.EtcdUrls,
	}
	adapter, err := etcdstoreadapter.New(options, workPool)
	if err != nil {
		panic(err)
	}

	updateInterval := time.Duration(config.UpdateIntervalSeconds) * time.Second
	politician := elector.NewElector(config.InstanceName, adapter, updateInterval, log)

	drainTTL := time.Duration(config.DrainUrlTtlSeconds) * time.Second
	store := etcd_syslog_drain_store.NewEtcdSyslogDrainStore(adapter, drainTTL, log)

	dumpChan := registerGoRoutineDumpSignalChannel()
	ticker := time.NewTicker(updateInterval)
	for {
		select {
		case <-dumpChan:
			logger.DumpGoRoutine()
		case <-ticker.C:
			if politician.IsLeader() {
				err = politician.StayAsLeader()
				if err != nil {
					log.Errorf("Error when staying leader: %s", err.Error())
					politician.Vacate()
					continue
				}
			} else {
				err = politician.RunForElection()

				if err != nil {
					log.Errorf("Error when running for leader: %s", err.Error())
					politician.Vacate()
					continue
				}
			}

			log.Debugf("Polling %s for updates", config.CloudControllerAddress)
			drainUrls, err := Poll(config.CloudControllerAddress, config.BulkApiUsername, config.BulkApiPassword, config.PollingBatchSize, config.SkipCertVerify)
			if err != nil {
				log.Errorf("Error when polling cloud controller: %s", err.Error())
				politician.Vacate()
				continue
			}

			metrics.IncrementCounter("pollCount")

			var totalDrains int
			for _, drainList := range drainUrls {
				totalDrains += len(drainList)
			}

			metrics.SendValue("totalDrains", float64(totalDrains), "drains")

			log.Debugf("Updating drain URLs for %d application(s)", len(drainUrls))
			err = store.UpdateDrains(drainUrls)
			if err != nil {
				log.Errorf("Error when updating ETCD: %s", err.Error())
				politician.Vacate()
				continue
			}
		}
	}
}
예제 #19
0
func main() {
	flag.Parse()

	// ** Config Setup
	config, err := readConfig(*configFile)
	if err != nil {
		panic(err)
	}

	dropsonde.Initialize(config.MetronAddress, "dea_logging_agent")

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			panic(err)
		}
		pprof.StartCPUProfile(f)
		defer func() {
			pprof.StopCPUProfile()
			f.Close()
		}()
	}

	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		if err != nil {
			panic(err)
		}
		go func() {
			defer f.Close()
			ticker := time.NewTicker(time.Second * 1)
			defer ticker.Stop()
			for {
				<-ticker.C
				pprof.WriteHeapProfile(f)
			}
		}()
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "deaagent", config.Syslog)
	log.Info("Startup: Setting up the loggregator dea logging agent")
	// ** END Config Setup

	agent := deaagent.NewAgent(*instancesJsonFilePath, log)

	go agent.Start()

	killChan := make(chan os.Signal)
	signal.Notify(killChan, os.Interrupt)

	dumpChan := registerGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			logger.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			return
		}
	}
}
예제 #20
0
type dataPart struct {
	Value float32 `json:"value"`
}

type chartData struct {
	DataPart []dataPart `json:"data"`
}

const requestDateFormat = "2006-01"

// 1st parameter is building pid / PlantNo
// 2nd parameter is yyyy-mm
const MonthUrl = "http://suntrol-portal.com/en/plant/graph-json/month/p/1/pid/%s/date/%s/size/page/chart/Column3D/axis/static/output/real"

var log = logger.NewLogger(logger.INFO, "Dataprovider: Suntrol:")

const MAX_ERRORS = 10
const INACTIVE_TIMOUT = 30 //secs

func (dp *dataProvider) Name() string {
	return "Suntrol"
}

func NewDataProvider(initiateData dataproviders.InitiateData,
	term dataproviders.TerminateCallback,
	client *http.Client,
	pvStore dataproviders.PvStore,
	statsStore dataproviders.PlantStatsStore) dataProvider {
	log.Debug("New dataprovider")