func connectToStoreAdapter(l logger.Logger, conf *config.Config) storeadapter.StoreAdapter { var adapter storeadapter.StoreAdapter workPool, err := workpool.NewWorkPool(conf.StoreMaxConcurrentRequests) if err != nil { l.Error("Failed to create workpool", err) os.Exit(1) } options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: conf.StoreURLs, } adapter, err = etcdstoreadapter.New(options, workPool) if err != nil { l.Error("Failed to create the store adapter", err) os.Exit(1) } err = adapter.Connect() if err != nil { l.Error("Failed to connect to the store", err) os.Exit(1) } return adapter }
func initializeReceptorBBS(etcdOptions *etcdstoreadapter.ETCDOptions, logger lager.Logger) Bbs.ReceptorBBS { workPool, err := workpool.NewWorkPool(100) if err != nil { logger.Fatal("failed-to-construct-etcd-adapter-workpool", err, lager.Data{"num-workers": 100}) // should never happen } etcdAdapter, err := etcdstoreadapter.New(etcdOptions, workPool) if err != nil { logger.Fatal("failed-to-construct-etcd-tls-client", err) } client, err := consuladapter.NewClient(*consulCluster) if err != nil { logger.Fatal("new-client-failed", err) } sessionMgr := consuladapter.NewSessionManager(client) consulSession, err := consuladapter.NewSession("receptor", *lockTTL, client, sessionMgr) if err != nil { logger.Fatal("consul-session-failed", err) } return Bbs.NewReceptorBBS(etcdAdapter, consulSession, *taskHandlerAddress, clock.NewClock(), logger) }
func (coordinator *MCATCoordinator) StartETCD() { etcdPort := 5000 + (coordinator.ParallelNode-1)*10 coordinator.StoreRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1, nil) coordinator.StoreRunner.Start() pool, err := workpool.NewWorkPool(coordinator.Conf.StoreMaxConcurrentRequests) Expect(err).NotTo(HaveOccurred()) coordinator.StoreAdapter, err = etcdstoreadapter.New(&etcdstoreadapter.ETCDOptions{ClusterUrls: coordinator.StoreRunner.NodeURLS()}, pool) Expect(err).NotTo(HaveOccurred()) err = coordinator.StoreAdapter.Connect() Expect(err).NotTo(HaveOccurred()) }
func NewETCD(nodeURLs []string, maxWorkers uint) (*etcd, error) { workpool, err := workpool.NewWorkPool(int(maxWorkers)) if err != nil { return nil, err } storeAdapter, err := etcdstoreadapter.New(&etcdstoreadapter.ETCDOptions{ClusterUrls: nodeURLs}, workpool) if err != nil { return nil, err } return &etcd{ storeAdapter: storeAdapter, }, nil }
func defaultStoreAdapterProvider(urls []string, concurrentRequests int) storeadapter.StoreAdapter { workPool, err := workpool.NewWorkPool(concurrentRequests) if err != nil { panic(err) } options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: urls, } etcdStoreAdapter, err := etcdstoreadapter.New(options, workPool) if err != nil { panic(err) } return etcdStoreAdapter }
func storeAdapterProvider(urls []string, concurrentRequests int) (storeadapter.StoreAdapter, error) { workPool, err := workpool.NewWorkPool(concurrentRequests) if err != nil { return nil, err } options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: urls, } etcdAdapter, err := etcdstoreadapter.New(options, workPool) if err != nil { return nil, err } return etcdAdapter, nil }
func (etcd *ETCDClusterRunner) newAdapter(clientSSL *SSLConfig) storeadapter.StoreAdapter { pool, err := workpool.NewWorkPool(10) Expect(err).NotTo(HaveOccurred()) options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: etcd.NodeURLS(), IsSSL: false, } if clientSSL != nil { options.CertFile = clientSSL.CertFile options.KeyFile = clientSSL.KeyFile options.CAFile = clientSSL.CAFile options.IsSSL = true } adapter, err := etcdstoreadapter.New(options, pool) Expect(err).NotTo(HaveOccurred()) return adapter }
func defaultStoreAdapterProvider(conf *config.Config) storeadapter.StoreAdapter { workPool, err := workpool.NewWorkPool(conf.EtcdMaxConcurrentRequests) if err != nil { panic(err) } options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: conf.EtcdUrls, } if conf.EtcdRequireTLS { options.IsSSL = true options.CertFile = conf.EtcdTLSClientConfig.CertFile options.KeyFile = conf.EtcdTLSClientConfig.KeyFile options.CAFile = conf.EtcdTLSClientConfig.CAFile } etcdStoreAdapter, err := etcdstoreadapter.New(options, workPool) if err != nil { panic(err) } return etcdStoreAdapter }
updateNode storeadapter.StoreNode updateCallback func(all map[string]string, preferred map[string]string) callbackCount *int32 preferredCallback func(key string) bool preferredCount *int32 ) BeforeEach(func() { workPool, err := workpool.NewWorkPool(10) Expect(err).NotTo(HaveOccurred()) options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: etcdRunner.NodeURLS(), } storeAdapter, err = etcdstoreadapter.New(options, workPool) Expect(err).NotTo(HaveOccurred()) err = storeAdapter.Connect() Expect(err).NotTo(HaveOccurred()) node = storeadapter.StoreNode{ Key: dopplerservice.LEGACY_ROOT + "/z1/loggregator_z1/0", Value: []byte("10.0.0.1"), } callbackCount = new(int32) count := callbackCount updateCallback = func(a map[string]string, p map[string]string) { atomic.AddInt32(count, 1) }
store Store storeAdapter storeadapter.StoreAdapter conf *config.Config app1 appfixture.AppFixture app2 appfixture.AppFixture app3 appfixture.AppFixture ) BeforeEach(func() { var err error conf, err = config.DefaultConfig() Expect(err).NotTo(HaveOccurred()) wpool, err := workpool.NewWorkPool(conf.StoreMaxConcurrentRequests) Expect(err).NotTo(HaveOccurred()) storeAdapter, err = etcdstoreadapter.New( &etcdstoreadapter.ETCDOptions{ClusterUrls: etcdRunner.NodeURLS()}, wpool, ) Expect(err).NotTo(HaveOccurred()) err = storeAdapter.Connect() Expect(err).NotTo(HaveOccurred()) app1 = appfixture.NewAppFixture() app2 = appfixture.NewAppFixture() app3 = appfixture.NewAppFixture() store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger()) }) AfterEach(func() { storeAdapter.Disconnect() })
func main() { flag.Parse() config, err := parseConfig(*debug, *configFile, *logFilePath) if err != nil { panic(err) } log := logger.NewLogger(*debug, *logFilePath, "syslog_drain_binder", config.Syslog) dropsonde.Initialize(config.MetronAddress, "syslog_drain_binder") workPool, err := workpool.NewWorkPool(config.EtcdMaxConcurrentRequests) if err != nil { panic(err) } options := &etcdstoreadapter.ETCDOptions{ ClusterUrls: config.EtcdUrls, } adapter, err := etcdstoreadapter.New(options, workPool) if err != nil { panic(err) } updateInterval := time.Duration(config.UpdateIntervalSeconds) * time.Second politician := elector.NewElector(config.InstanceName, adapter, updateInterval, log) drainTTL := time.Duration(config.DrainUrlTtlSeconds) * time.Second store := etcd_syslog_drain_store.NewEtcdSyslogDrainStore(adapter, drainTTL, log) dumpChan := registerGoRoutineDumpSignalChannel() ticker := time.NewTicker(updateInterval) for { select { case <-dumpChan: logger.DumpGoRoutine() case <-ticker.C: if politician.IsLeader() { err = politician.StayAsLeader() if err != nil { log.Errorf("Error when staying leader: %s", err.Error()) politician.Vacate() continue } } else { err = politician.RunForElection() if err != nil { log.Errorf("Error when running for leader: %s", err.Error()) politician.Vacate() continue } } log.Debugf("Polling %s for updates", config.CloudControllerAddress) drainUrls, err := Poll(config.CloudControllerAddress, config.BulkApiUsername, config.BulkApiPassword, config.PollingBatchSize, config.SkipCertVerify) if err != nil { log.Errorf("Error when polling cloud controller: %s", err.Error()) politician.Vacate() continue } metrics.IncrementCounter("pollCount") var totalDrains int for _, drainList := range drainUrls { totalDrains += len(drainList) } metrics.SendValue("totalDrains", float64(totalDrains), "drains") log.Debugf("Updating drain URLs for %d application(s)", len(drainUrls)) err = store.UpdateDrains(drainUrls) if err != nil { log.Errorf("Error when updating ETCD: %s", err.Error()) politician.Vacate() continue } } } }