func main() { flag.Parse() //make the out dir logger.Component = "SIMULATOR" if outDir == "" { logger.Fatal("out.dir.unspecified") } err := os.MkdirAll(outDir, 0777) if err != nil { logger.Fatal("out.dir.creation.failed", err) } //set up logging outputFile, err := os.Create(filepath.Join(outDir, "simulator.log")) if err != nil { logger.Fatal("failed.to.create.simulator.log", err) } logger.Writer = io.MultiWriter(os.Stdout, outputFile) cleanup.Register(func() { outputFile.Sync() }) //compile the executor logger.Info("compiling.executor") output, err := exec.Command("go", "install", "github.com/cloudfoundry-incubator/simulator/game_executor").CombinedOutput() if err != nil { logger.Fatal("failed.to.compile.executor", string(output)) } //write info to the output dir writeInfo() //start etcd logger.Info("starting.etcd", etcdNodes) etcd = etcdstorerunner.NewETCDClusterRunner(4001, etcdNodes) etcd.Start() //set up the bbs pool := workerpool.NewWorkerPool(50) etcdAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool) etcdAdapter.Connect() bbs = Bbs.New(etcdAdapter, timeprovider.NewTimeProvider()) //monitor etcd monitorETCD() //start executors startExecutors() cleanup.Register(func() { logger.Info("stopping.etcd", etcdNodes) etcd.Stop() }) //run the simulator runSimulation() cleanup.Exit(0) }
func main() { flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) table := map[string]Route{} transport := &http.Transport{ ResponseHeaderTimeout: 10 * time.Second, } handler := &Handler{ table: table, transport: transport, } etcdAdapter := etcdstoreadapter.NewETCDStoreAdapter( strings.Split(*etcdCluster, ","), workerpool.NewWorkerPool(10), ) err := etcdAdapter.Connect() if err != nil { log.Fatalln("can't connect to etcd:", err) } go handler.syncTable(etcdAdapter, *syncInterval) http.Handle("/", handler) http.ListenAndServe(*listenAddr, nil) }
func NewETCD(nodeURLs []string) etcd { workpool := workpool.NewWorkPool(1) storeAdapter := etcdstoreadapter.NewETCDStoreAdapter(nodeURLs, workpool) return etcd{ storeAdapter: storeAdapter, } }
func New(host string, config *Config, logger *gosteno.Logger) *Loggregator { cfcomponent.Logger = logger keepAliveInterval := 30 * time.Second listener, incomingLogChan := agentlistener.NewAgentListener(fmt.Sprintf("%s:%d", host, config.IncomingPort), logger) unmarshaller, messageChan := unmarshaller.NewLogMessageUnmarshaller(config.SharedSecret, incomingLogChan) blacklist := blacklist.New(config.BlackListIps) sinkManager, appStoreInputChan := sinkmanager.NewSinkManager(config.MaxRetainedLogMessages, config.SkipCertVerify, blacklist, logger) workerPool := workerpool.NewWorkerPool(config.EtcdMaxConcurrentRequests) storeAdapter := etcdstoreadapter.NewETCDStoreAdapter(config.EtcdUrls, workerPool) appStoreCache := cache.NewAppServiceCache() appStoreWatcher, newAppServiceChan, deletedAppServiceChan := store.NewAppServiceStoreWatcher(storeAdapter, appStoreCache) appStore := store.NewAppServiceStore(storeAdapter, appStoreWatcher) return &Loggregator{ Logger: logger, listener: listener, unmarshaller: unmarshaller, sinkManager: sinkManager, messageChan: messageChan, appStoreInputChan: appStoreInputChan, appStore: appStore, messageRouter: sinkserver.NewMessageRouter(sinkManager, logger), websocketServer: websocketserver.New(fmt.Sprintf("%s:%d", host, config.OutgoingPort), sinkManager, keepAliveInterval, config.WSMessageBufferSize, logger), newAppServiceChan: newAppServiceChan, deletedAppServiceChan: deletedAppServiceChan, appStoreWatcher: appStoreWatcher, storeAdapter: storeAdapter, } }
func (etcd *ETCDClusterRunner) Adapter() storeadapter.StoreAdapter { pool, err := workpool.NewWorkPool(10) Expect(err).NotTo(HaveOccurred()) adapter := etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool) adapter.Connect() return adapter }
func storeAdapterProvider(urls []string, concurrentRequests int) storeadapter.StoreAdapter { workPool, err := workpool.NewWorkPool(concurrentRequests) if err != nil { panic(err) } return etcdstoreadapter.NewETCDStoreAdapter(urls, workPool) }
func NewStoreAdapter(urls []string, concurrentRequests int) storeadapter.StoreAdapter { workPool, err := workpool.NewWorkPool(concurrentRequests) if err != nil { panic(err) } etcdStoreAdapter := etcdstoreadapter.NewETCDStoreAdapter(urls, workPool) etcdStoreAdapter.Connect() return etcdStoreAdapter }
func (coordinator *MCATCoordinator) StartETCD() { etcdPort := 5000 + (coordinator.ParallelNode-1)*10 coordinator.StoreRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1) coordinator.StoreRunner.Start() coordinator.StoreAdapter = etcdstoreadapter.NewETCDStoreAdapter(coordinator.StoreRunner.NodeURLS(), workerpool.NewWorkerPool(coordinator.Conf.StoreMaxConcurrentRequests)) err := coordinator.StoreAdapter.Connect() Ω(err).ShouldNot(HaveOccurred()) }
func NewETCD(nodeURLs []string, maxWorkers uint) (*etcd, error) { workpool, err := workpool.NewWorkPool(int(maxWorkers)) if err != nil { return nil, err } storeAdapter := etcdstoreadapter.NewETCDStoreAdapter(nodeURLs, workpool) return &etcd{ storeAdapter: storeAdapter, }, nil }
func connectToStoreAdapter(l logger.Logger, conf *config.Config) (storeadapter.StoreAdapter, metricsaccountant.UsageTracker) { var adapter storeadapter.StoreAdapter workerPool := workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests) adapter = etcdstoreadapter.NewETCDStoreAdapter(conf.StoreURLs, workerPool) err := adapter.Connect() if err != nil { l.Error("Failed to connect to the store", err) os.Exit(1) } return adapter, workerPool }
func main() { flag.Parse() datadogApiKey := os.Getenv("DATADOG_API_KEY") datadogAppKey := os.Getenv("DATADOG_APP_KEY") var datadogClient *datadog.Client if datadogApiKey != "" { datadogClient = datadog.NewClient(datadogApiKey, datadogAppKey) } store := etcdstoreadapter.NewETCDStoreAdapter( strings.Split(*etcdMachines, ","), workerpool.NewWorkerPool(50), ) err := store.Connect() if err != nil { log.Fatalln("failed to connect to store:", err) } if *emitStates { // logging mode etcd_logger.EmitRunOnceStates(datadogClient, store, strings.Split(*etcdMachines, ",")) } else { // stampede mode runOnce := &models.RunOnce{ Actions: []models.ExecutorAction{ { models.RunAction{ Script: *script, Timeout: *timeout, }, }, }, Stack: *stack, MemoryMB: *memoryMB, DiskMB: *diskMB, } if *logGuid != "" { runOnce.Log = models.LogConfig{ Guid: *logGuid, SourceName: *logSourceName, } } stampede.RunonceStampede(bbs.New(store, timeprovider.NewTimeProvider()), datadogClient, runOnce, *runOnceCount) } }
func (etcd *ETCDClusterRunner) RetryableAdapter(workPoolSize int) storeadapter.StoreAdapter { pool, err := workpool.NewWorkPool(workPoolSize) Expect(err).NotTo(HaveOccurred()) adapter := storeadapter.NewRetryable( etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool), clock.NewClock(), storeadapter.ExponentialRetryPolicy{}, ) adapter.Connect() return adapter }
func connectToStoreAdapter(l logger.Logger, conf *config.Config, usage *usageTracker) storeadapter.StoreAdapter { var adapter storeadapter.StoreAdapter var around workpool.AroundWork = workpool.DefaultAround if usage != nil { around = usage } workPool := workpool.New(conf.StoreMaxConcurrentRequests, 0, around) adapter = etcdstoreadapter.NewETCDStoreAdapter(conf.StoreURLs, workPool) err := adapter.Connect() if err != nil { l.Error("Failed to connect to the store", err) os.Exit(1) } return adapter }
func connectToStoreAdapter(l logger.Logger, conf *config.Config) (storeadapter.StoreAdapter, metricsaccountant.UsageTracker) { var adapter storeadapter.StoreAdapter workerPool := workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests) if conf.StoreType == "etcd" { adapter = etcdstoreadapter.NewETCDStoreAdapter(conf.StoreURLs, workerPool) } else if conf.StoreType == "ZooKeeper" { adapter = zookeeperstoreadapter.NewZookeeperStoreAdapter(conf.StoreURLs, workerPool, buildTimeProvider(l), time.Second) } else { l.Error(fmt.Sprintf("Unknown store type %s. Choose one of 'etcd' or 'ZooKeeper'", conf.StoreType), fmt.Errorf("Unkown store type")) os.Exit(1) } err := adapter.Connect() if err != nil { l.Error("Failed to connect to the store", err) os.Exit(1) } return adapter, workerPool }
func main() { flag.Parse() cleanup.Register(func() { logger.Info("executor.shuttingdown") close(stop) tasks.Wait() logger.Info("executor.shutdown") }) logger.Component = fmt.Sprintf("EXECUTOR %s", *executorID) lock = &sync.Mutex{} currentMemory = *maxMemory etcdAdapter := etcdstoreadapter.NewETCDStoreAdapter( strings.Split(*etcdCluster, ","), workerpool.NewWorkerPool(10), ) err := etcdAdapter.Connect() if err != nil { logger.Fatal("etcd.connect.fatal", err) } tasks = &sync.WaitGroup{} stop = make(chan bool) bbs := Bbs.New(etcdAdapter, timeprovider.NewTimeProvider()) ready := make(chan bool, 1) err = maintainPresence(bbs, ready) if err != nil { logger.Fatal("executor.initializing-presence.failed", err) } go handleRunOnces(bbs) go convergeRunOnces(bbs) <-ready logger.Info("executor.up") select {} }
func main() { var err error runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UnixNano()) flag.Parse() executorUUID, err := uuid.NewV4() if err != nil { log.Fatalln("could not generate guid:", err) } executorID = executorUUID.String() cleanup.Register(func() { once.Do(func() { logger.Info("shutting-down", map[string]interface{}{}) close(stop) tasks.Wait() logger.Info("shutdown", map[string]interface{}{}) }) }) natsClient := yagnats.NewClient() natsMembers := []yagnats.ConnectionProvider{} for _, addr := range strings.Split(*natsAddresses, ",") { natsMembers = append( natsMembers, &yagnats.ConnectionInfo{addr, *natsUsername, *natsPassword}, ) } natsInfo := &yagnats.ConnectionCluster{Members: natsMembers} err = logger.Connect(natsInfo) if err != nil { log.Fatalln("could not connect logger:", err) } err = natsClient.Connect(natsInfo) if err != nil { log.Fatalln("could not connect to nats:", err) } logger.Component = fmt.Sprintf("executor.%s", executorID) etcdAdapter := etcdstoreadapter.NewETCDStoreAdapter( strings.Split(*etcdCluster, ","), workerpool.NewWorkerPool(10), ) err = etcdAdapter.Connect() if err != nil { logger.Fatal("etcd.connect-failed", map[string]interface{}{ "error": err.Error(), }) } bbs := bbs.New(bbs.NewHurlerKicker(*hurlerAddress), etcdAdapter, timeprovider.NewTimeProvider()) ready := make(chan bool, 1) err = maintainPresence(bbs, ready) if err != nil { logger.Fatal("initializing-presence", map[string]interface{}{ "error": err.Error(), }) } err = registerHandler(etcdAdapter, *listenAddr, ready) if err != nil { logger.Fatal("initializing-route", map[string]interface{}{ "error": err.Error(), }) } go handleTasks(bbs, *listenAddr) go convergeTasks(bbs) <-ready <-ready logger.Info("up", map[string]interface{}{ "executor": executorID, }) select {} }
func (etcd *ETCDClusterRunner) Adapter() storeadapter.StoreAdapter { pool := workpool.NewWorkPool(10) adapter := etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool) adapter.Connect() return adapter }
func main() { flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) //make the out dir logger.Component = "SIMULATOR" if outDir == "" { logger.Fatal("out.dir.unspecified") } err := os.MkdirAll(outDir, 0777) if err != nil { logger.Fatal("out.dir.creation.failed", err) } //set up logging outputFile, err := os.Create(filepath.Join(outDir, "simulator.log")) if err != nil { logger.Fatal("failed.to.create.simulator.log", err) } logger.Writer = io.MultiWriter(os.Stdout, outputFile) cleanup.Register(func() { outputFile.Sync() }) //start etcd natsClient := yagnats.NewClient() natsMembers := []yagnats.ConnectionProvider{} for _, addr := range strings.Split(*natsAddresses, ",") { natsMembers = append( natsMembers, &yagnats.ConnectionInfo{addr, *natsUsername, *natsPassword}, ) } natsInfo := &yagnats.ConnectionCluster{Members: natsMembers} err = natsClient.Connect(natsInfo) if err != nil { logger.Fatal("could not connect to nats:", err) } logger.Component = "simulator" etcdAdapter := etcdstoreadapter.NewETCDStoreAdapter( strings.Split(*etcdCluster, ","), workerpool.NewWorkerPool(10), ) err = etcdAdapter.Connect() if err != nil { logger.Fatal("etcd.connect-failed", map[string]interface{}{ "error": err.Error(), }) } //write info to the output dir writeInfo() //monitor etcd monitorETCD(etcdAdapter) //run the simulator runSimulation(natsClient) cleanup.Exit(0) }
"trafficcontroller/config" "trafficcontroller/dopplerproxy" "trafficcontroller/listener" "trafficcontroller/marshaller" "trafficcontroller/profiler" "trafficcontroller/serveraddressprovider" "trafficcontroller/uaa_client" ) var DefaultStoreAdapterProvider = func(urls []string, concurrentRequests int) storeadapter.StoreAdapter { workPool, err := workpool.NewWorkPool(concurrentRequests) if err != nil { panic(err) } return etcdstoreadapter.NewETCDStoreAdapter(urls, workPool) } const EtcdQueryInterval = 5 * time.Second var ( logFilePath = flag.String("logFile", "", "The agent log file, defaults to STDOUT") logLevel = flag.Bool("debug", false, "Debug logging") disableAccessControl = flag.Bool("disableAccessControl", false, "always all access to app logs") configFile = flag.String("config", "config/loggregator_trafficcontroller.json", "Location of the loggregator trafficcontroller config json file") cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") memprofile = flag.String("memprofile", "", "write memory profile to this file") ) func main() { flag.Parse()
store Store storeAdapter storeadapter.StoreAdapter conf *config.Config dea appfixture.DeaFixture app1 appfixture.AppFixture app2 appfixture.AppFixture app3 appfixture.AppFixture app4 appfixture.AppFixture crashCount []models.CrashCount ) conf, _ = config.DefaultConfig() BeforeEach(func() { storeAdapter = etcdstoreadapter.NewETCDStoreAdapter(etcdRunner.NodeURLS(), workerpool.NewWorkerPool(conf.StoreMaxConcurrentRequests)) err := storeAdapter.Connect() Ω(err).ShouldNot(HaveOccurred()) store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger()) dea = appfixture.NewDeaFixture() app1 = dea.GetApp(0) app2 = dea.GetApp(1) app3 = dea.GetApp(2) app4 = dea.GetApp(3) actualState := []models.InstanceHeartbeat{ app1.InstanceAtIndex(0).Heartbeat(), app1.InstanceAtIndex(1).Heartbeat(), app1.InstanceAtIndex(2).Heartbeat(),
func main() { flag.Parse() config, logger := parseConfig(*debug, *configFile, *logFilePath) dropsonde.Initialize(config.MetronAddress, "syslog_drain_binder") workPool, err := workpool.NewWorkPool(config.EtcdMaxConcurrentRequests) if err != nil { panic(err) } adapter := etcdstoreadapter.NewETCDStoreAdapter(config.EtcdUrls, workPool) updateInterval := time.Duration(config.UpdateIntervalSeconds) * time.Second politician := elector.NewElector(config.InstanceName, adapter, updateInterval, logger) drainTTL := time.Duration(config.DrainUrlTtlSeconds) * time.Second store := etcd_syslog_drain_store.NewEtcdSyslogDrainStore(adapter, drainTTL, logger) ticker := time.NewTicker(updateInterval) for { select { case <-cfcomponent.RegisterGoRoutineDumpSignalChannel(): cfcomponent.DumpGoRoutine() case <-ticker.C: if politician.IsLeader() { err = politician.StayAsLeader() if err != nil { logger.Errorf("Error when staying leader: %s", err.Error()) politician.Vacate() continue } } else { err = politician.RunForElection() if err != nil { logger.Errorf("Error when running for leader: %s", err.Error()) politician.Vacate() continue } } logger.Debugf("Polling %s for updates", config.CloudControllerAddress) drainUrls, err := Poll(config.CloudControllerAddress, config.BulkApiUsername, config.BulkApiPassword, config.PollingBatchSize, config.SkipCertVerify) if err != nil { logger.Errorf("Error when polling cloud controller: %s", err.Error()) politician.Vacate() continue } metrics.IncrementCounter("pollCount") var totalDrains int for _, drainList := range drainUrls { totalDrains += len(drainList) } metrics.SendValue("totalDrains", float64(totalDrains), "drains") logger.Debugf("Updating drain URLs for %d application(s)", len(drainUrls)) err = store.UpdateDrains(drainUrls) if err != nil { logger.Errorf("Error when updating ETCD: %s", err.Error()) politician.Vacate() continue } } } }