Ejemplo n.º 1
0
func NewETCD(nodeURLs []string) etcd {
	workpool := workpool.NewWorkPool(1)
	storeAdapter := etcdstoreadapter.NewETCDStoreAdapter(nodeURLs, workpool)
	return etcd{
		storeAdapter: storeAdapter,
	}
}
Ejemplo n.º 2
0
func (etcd *ETCDClusterRunner) Adapter() storeadapter.StoreAdapter {
	pool, err := workpool.NewWorkPool(10)
	Expect(err).NotTo(HaveOccurred())
	adapter := etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool)
	adapter.Connect()
	return adapter
}
Ejemplo n.º 3
0
func NewGardenStore(
	gardenClient garden.Client,
	containerOwnerName string,
	containerMaxCPUShares uint64,
	containerInodeLimit uint64,
	healthyMonitoringInterval time.Duration,
	unhealthyMonitoringInterval time.Duration,
	transformer *transformer.Transformer,
	clock clock.Clock,
	eventEmitter EventEmitter,
	healthCheckWorkPoolSize int,
) (*GardenStore, error) {
	workPool, err := workpool.NewWorkPool(healthCheckWorkPoolSize)
	if err != nil {
		return nil, err
	}

	return &GardenStore{
		gardenClient:       gardenClient,
		exchanger:          NewExchanger(containerOwnerName, containerMaxCPUShares, containerInodeLimit),
		containerOwnerName: containerOwnerName,

		healthyMonitoringInterval:   healthyMonitoringInterval,
		unhealthyMonitoringInterval: unhealthyMonitoringInterval,

		transformer: transformer,
		clock:       clock,

		eventEmitter: eventEmitter,

		runningProcesses: map[string]ifrit.Process{},

		workPool: workPool,
	}, nil
}
Ejemplo n.º 4
0
func initializeReceptorBBS(etcdOptions *etcdstoreadapter.ETCDOptions, logger lager.Logger) Bbs.ReceptorBBS {
	workPool, err := workpool.NewWorkPool(100)
	if err != nil {
		logger.Fatal("failed-to-construct-etcd-adapter-workpool", err, lager.Data{"num-workers": 100}) // should never happen
	}

	etcdAdapter, err := etcdstoreadapter.New(etcdOptions, workPool)

	if err != nil {
		logger.Fatal("failed-to-construct-etcd-tls-client", err)
	}

	client, err := consuladapter.NewClient(*consulCluster)
	if err != nil {
		logger.Fatal("new-client-failed", err)
	}

	sessionMgr := consuladapter.NewSessionManager(client)
	consulSession, err := consuladapter.NewSession("receptor", *lockTTL, client, sessionMgr)
	if err != nil {
		logger.Fatal("consul-session-failed", err)
	}

	return Bbs.NewReceptorBBS(etcdAdapter, consulSession, *taskHandlerAddress, clock.NewClock(), logger)
}
Ejemplo n.º 5
0
func connectToStoreAdapter(l logger.Logger, conf *config.Config) storeadapter.StoreAdapter {
	var adapter storeadapter.StoreAdapter
	workPool, err := workpool.NewWorkPool(conf.StoreMaxConcurrentRequests)
	if err != nil {
		l.Error("Failed to create workpool", err)
		os.Exit(1)
	}

	options := &etcdstoreadapter.ETCDOptions{
		ClusterUrls: conf.StoreURLs,
	}
	adapter, err = etcdstoreadapter.New(options, workPool)
	if err != nil {
		l.Error("Failed to create the store adapter", err)
		os.Exit(1)
	}

	err = adapter.Connect()
	if err != nil {
		l.Error("Failed to connect to the store", err)
		os.Exit(1)
	}

	return adapter
}
Ejemplo n.º 6
0
func initializeNatsEmitter(natsClient diegonats.NATSClient, logger lager.Logger) nats_emitter.NATSEmitter {
	workPool, err := workpool.NewWorkPool(*routeEmittingWorkers)
	if err != nil {
		logger.Fatal("failed-to-construct-nats-emitter-workpool", err, lager.Data{"num-workers": *routeEmittingWorkers}) // should never happen
	}

	return nats_emitter.New(natsClient, workPool, logger)
}
Ejemplo n.º 7
0
func storeAdapterProvider(urls []string, concurrentRequests int) storeadapter.StoreAdapter {
	workPool, err := workpool.NewWorkPool(concurrentRequests)
	if err != nil {
		panic(err)
	}

	return etcdstoreadapter.NewETCDStoreAdapter(urls, workPool)
}
Ejemplo n.º 8
0
func NewStoreAdapter(urls []string, concurrentRequests int) storeadapter.StoreAdapter {
	workPool, err := workpool.NewWorkPool(concurrentRequests)
	if err != nil {
		panic(err)
	}
	etcdStoreAdapter := etcdstoreadapter.NewETCDStoreAdapter(urls, workPool)
	etcdStoreAdapter.Connect()
	return etcdStoreAdapter
}
Ejemplo n.º 9
0
func (coordinator *MCATCoordinator) StartETCD() {
	etcdPort := 5000 + (coordinator.ParallelNode-1)*10
	coordinator.StoreRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1)
	coordinator.StoreRunner.Start()

	coordinator.StoreAdapter = etcdstoreadapter.NewETCDStoreAdapter(coordinator.StoreRunner.NodeURLS(),
		workpool.NewWorkPool(coordinator.Conf.StoreMaxConcurrentRequests))
	err := coordinator.StoreAdapter.Connect()
	Ω(err).ShouldNot(HaveOccurred())
}
Ejemplo n.º 10
0
func NewETCD(nodeURLs []string, maxWorkers uint) (*etcd, error) {
	workpool, err := workpool.NewWorkPool(int(maxWorkers))
	if err != nil {
		return nil, err
	}

	storeAdapter := etcdstoreadapter.NewETCDStoreAdapter(nodeURLs, workpool)
	return &etcd{
		storeAdapter: storeAdapter,
	}, nil
}
Ejemplo n.º 11
0
func (coordinator *MCATCoordinator) StartETCD() {
	etcdPort := 5000 + (coordinator.ParallelNode-1)*10
	coordinator.StoreRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1, nil)
	coordinator.StoreRunner.Start()

	pool, err := workpool.NewWorkPool(coordinator.Conf.StoreMaxConcurrentRequests)
	Expect(err).NotTo(HaveOccurred())

	coordinator.StoreAdapter, err = etcdstoreadapter.New(&etcdstoreadapter.ETCDOptions{ClusterUrls: coordinator.StoreRunner.NodeURLS()}, pool)
	Expect(err).NotTo(HaveOccurred())
	err = coordinator.StoreAdapter.Connect()
	Expect(err).NotTo(HaveOccurred())
}
Ejemplo n.º 12
0
func NewClientProvider(
	totalCapacity executor.ExecutorResources,
	allocationStore AllocationStore,
	gardenStore GardenStore,
	eventHub event.Hub,
	lockManager keyed_lock.LockManager,
	workPoolSettings executor.WorkPoolSettings,
) (executor.ClientProvider, error) {
	creationWorkPool, err := workpool.NewWorkPool(workPoolSettings.CreateWorkPoolSize)
	if err != nil {
		return nil, err
	}
	deletionWorkPool, err := workpool.NewWorkPool(workPoolSettings.DeleteWorkPoolSize)
	if err != nil {
		return nil, err
	}
	readWorkPool, err := workpool.NewWorkPool(workPoolSettings.ReadWorkPoolSize)
	if err != nil {
		return nil, err
	}
	metricsWorkPool, err := workpool.NewWorkPool(workPoolSettings.MetricsWorkPoolSize)
	if err != nil {
		return nil, err
	}

	return &clientProvider{
		totalCapacity:        totalCapacity,
		allocationStore:      allocationStore,
		gardenStore:          gardenStore,
		eventHub:             eventHub,
		containerLockManager: lockManager,
		resourcesLock:        new(sync.Mutex),
		creationWorkPool:     creationWorkPool,
		deletionWorkPool:     deletionWorkPool,
		readWorkPool:         readWorkPool,
		metricsWorkPool:      metricsWorkPool,
		healthy:              true,
	}, nil
}
Ejemplo n.º 13
0
func NewETCD(nodeURLs []string, maxWorkers uint) (*etcd, error) {
	workpool, err := workpool.NewWorkPool(int(maxWorkers))
	if err != nil {
		return nil, err
	}

	storeAdapter, err := etcdstoreadapter.New(&etcdstoreadapter.ETCDOptions{ClusterUrls: nodeURLs}, workpool)
	if err != nil {
		return nil, err
	}
	return &etcd{
		storeAdapter: storeAdapter,
	}, nil
}
Ejemplo n.º 14
0
func (twp *TaskCompletionWorkPool) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
	cbWorkPool, err := workpool.NewWorkPool(twp.maxWorkers)
	if err != nil {
		twp.logger.Error("callback-workpool-creation-failed", err)
		return err
	}
	twp.callbackWorkPool = cbWorkPool
	close(ready)

	<-signals
	go twp.callbackWorkPool.Stop()

	return nil
}
Ejemplo n.º 15
0
func (etcd *ETCDClusterRunner) RetryableAdapter(workPoolSize int) storeadapter.StoreAdapter {
	pool, err := workpool.NewWorkPool(workPoolSize)
	Expect(err).NotTo(HaveOccurred())

	adapter := storeadapter.NewRetryable(
		etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool),
		clock.NewClock(),
		storeadapter.ExponentialRetryPolicy{},
	)

	adapter.Connect()

	return adapter
}
Ejemplo n.º 16
0
func defaultStoreAdapterProvider(urls []string, concurrentRequests int) storeadapter.StoreAdapter {
	workPool, err := workpool.NewWorkPool(concurrentRequests)
	if err != nil {
		panic(err)
	}
	options := &etcdstoreadapter.ETCDOptions{
		ClusterUrls: urls,
	}
	etcdStoreAdapter, err := etcdstoreadapter.New(options, workPool)
	if err != nil {
		panic(err)
	}
	return etcdStoreAdapter
}
Ejemplo n.º 17
0
func storeAdapterProvider(urls []string, concurrentRequests int) (storeadapter.StoreAdapter, error) {
	workPool, err := workpool.NewWorkPool(concurrentRequests)
	if err != nil {
		return nil, err
	}

	options := &etcdstoreadapter.ETCDOptions{
		ClusterUrls: urls,
	}
	etcdAdapter, err := etcdstoreadapter.New(options, workPool)
	if err != nil {
		return nil, err
	}

	return etcdAdapter, nil
}
Ejemplo n.º 18
0
func NewWatcher(
	logger lager.Logger,
	workPoolSize int,
	bbsClient bbs.Client,
	ccClient cc_client.CcClient,
) (*Watcher, error) {
	workPool, err := workpool.NewWorkPool(workPoolSize)
	if err != nil {
		return nil, err
	}

	return &Watcher{
		bbsClient: bbsClient,
		ccClient:  ccClient,
		logger:    logger,

		pool: workPool,
	}, nil
}
Ejemplo n.º 19
0
func (twp *TaskCompletionWorkPool) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
	cbWorkPool, err := workpool.NewWorkPool(twp.maxWorkers)
	logger := twp.logger
	logger.Info("starting")

	if err != nil {
		logger.Error("creation-failed", err)
		return err
	}
	twp.callbackWorkPool = cbWorkPool
	close(ready)
	logger.Info("started")
	defer logger.Info("finished")

	<-signals
	go twp.callbackWorkPool.Stop()

	return nil
}
Ejemplo n.º 20
0
func initializeAuctionRunner(logger lager.Logger, cellStateTimeout time.Duration, bbsClient bbs.Client, serviceClient bbs.ServiceClient) auctiontypes.AuctionRunner {
	httpClient := cf_http.NewClient()
	stateClient := cf_http.NewCustomTimeoutClient(cellStateTimeout)
	repClientFactory := rep.NewClientFactory(httpClient, stateClient)

	delegate := auctionrunnerdelegate.New(repClientFactory, bbsClient, serviceClient, logger)
	metricEmitter := auctionmetricemitterdelegate.New()
	workPool, err := workpool.NewWorkPool(*auctionRunnerWorkers)
	if err != nil {
		logger.Fatal("failed-to-construct-auction-runner-workpool", err, lager.Data{"num-workers": *auctionRunnerWorkers}) // should never happen
	}

	return auctionrunner.New(
		delegate,
		metricEmitter,
		clock.NewClock(),
		workPool,
		logger,
	)
}
Ejemplo n.º 21
0
func (etcd *ETCDClusterRunner) newAdapter(clientSSL *SSLConfig) storeadapter.StoreAdapter {
	pool, err := workpool.NewWorkPool(10)
	Expect(err).NotTo(HaveOccurred())

	options := &etcdstoreadapter.ETCDOptions{
		ClusterUrls: etcd.NodeURLS(),
		IsSSL:       false,
	}

	if clientSSL != nil {
		options.CertFile = clientSSL.CertFile
		options.KeyFile = clientSSL.KeyFile
		options.CAFile = clientSSL.CAFile
		options.IsSSL = true
	}

	adapter, err := etcdstoreadapter.New(options, pool)
	Expect(err).NotTo(HaveOccurred())
	return adapter
}
Ejemplo n.º 22
0
	logger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG))

	sessionsToTerminate = []*gexec.Session{}
	switch communicationMode {
	case InProcess:
		cells = buildInProcessReps()
	case HTTP:
		cells = launchExternalHTTPReps()
	default:
		panic(fmt.Sprintf("unknown communication mode: %s", communicationMode))
	}
})

var _ = BeforeEach(func() {
	var err error
	workPool, err = workpool.NewWorkPool(workers)
	Expect(err).NotTo(HaveOccurred())

	wg := &sync.WaitGroup{}
	wg.Add(len(cells))
	for _, cell := range cells {
		cell := cell
		workPool.Submit(func() {
			cell.Reset()
			wg.Done()
		})
	}
	wg.Wait()

	util.ResetGuids()
Ejemplo n.º 23
0
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("ZoneBuilder", func() {
	var repA, repB, repC *repfakes.FakeSimClient
	var clients map[string]rep.Client
	var workPool *workpool.WorkPool
	var logger lager.Logger
	var metricEmitter *fakes.FakeAuctionMetricEmitterDelegate

	BeforeEach(func() {
		logger = lagertest.NewTestLogger("test")

		var err error
		workPool, err = workpool.NewWorkPool(5)
		Expect(err).NotTo(HaveOccurred())

		repA = new(repfakes.FakeSimClient)
		repB = new(repfakes.FakeSimClient)
		repC = new(repfakes.FakeSimClient)

		clients = map[string]rep.Client{
			"A": repA,
			"B": repB,
			"C": repC,
		}

		repA.StateReturns(BuildCellState("the-zone", 100, 200, 100, false, linuxOnlyRootFSProviders, nil), nil)
		repB.StateReturns(BuildCellState("the-zone", 10, 10, 100, false, linuxOnlyRootFSProviders, nil), nil)
		repC.StateReturns(BuildCellState("other-zone", 100, 10, 100, false, linuxOnlyRootFSProviders, nil), nil)
Ejemplo n.º 24
0
var _ = Describe("Desired State", func() {
	var (
		store        Store
		storeAdapter storeadapter.StoreAdapter
		conf         *config.Config
		app1         appfixture.AppFixture
		app2         appfixture.AppFixture
		app3         appfixture.AppFixture
	)

	BeforeEach(func() {
		var err error
		conf, err = config.DefaultConfig()
		Expect(err).NotTo(HaveOccurred())
		wpool, err := workpool.NewWorkPool(conf.StoreMaxConcurrentRequests)
		Expect(err).NotTo(HaveOccurred())
		storeAdapter, err = etcdstoreadapter.New(
			&etcdstoreadapter.ETCDOptions{ClusterUrls: etcdRunner.NodeURLS()},
			wpool,
		)
		Expect(err).NotTo(HaveOccurred())
		err = storeAdapter.Connect()
		Expect(err).NotTo(HaveOccurred())

		app1 = appfixture.NewAppFixture()
		app2 = appfixture.NewAppFixture()
		app3 = appfixture.NewAppFixture()

		store = NewStore(conf, storeAdapter, fakelogger.NewFakeLogger())
	})
Ejemplo n.º 25
0
	"github.com/cloudfoundry/loggregatorlib/servicediscovery"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/etcdstoreadapter"
	"github.com/pivotal-golang/localip"
	"trafficcontroller/channel_group_connector"
	"trafficcontroller/config"
	"trafficcontroller/dopplerproxy"
	"trafficcontroller/listener"
	"trafficcontroller/marshaller"
	"trafficcontroller/profiler"
	"trafficcontroller/serveraddressprovider"
	"trafficcontroller/uaa_client"
)

var DefaultStoreAdapterProvider = func(urls []string, concurrentRequests int) storeadapter.StoreAdapter {
	workPool, err := workpool.NewWorkPool(concurrentRequests)
	if err != nil {
		panic(err)
	}

	return etcdstoreadapter.NewETCDStoreAdapter(urls, workPool)
}

const EtcdQueryInterval = 5 * time.Second

var (
	logFilePath          = flag.String("logFile", "", "The agent log file, defaults to STDOUT")
	logLevel             = flag.Bool("debug", false, "Debug logging")
	disableAccessControl = flag.Bool("disableAccessControl", false, "always all access to app logs")
	configFile           = flag.String("config", "config/loggregator_trafficcontroller.json", "Location of the loggregator trafficcontroller config json file")
	cpuprofile           = flag.String("cpuprofile", "", "write cpu profile to file")
Ejemplo n.º 26
0
func GardenContainers(gardenAddr string, gardenNetwork string, raw bool, out io.Writer) error {
	client := client.New(connection.New(gardenNetwork, gardenAddr))
	containers, err := client.Containers(nil)
	if err != nil {
		return err
	}

	workPool, err := workpool.NewWorkPool(32)
	if err != nil {
		return err
	}

	lock := &sync.Mutex{}
	wg := &sync.WaitGroup{}
	wg.Add(len(containers))

	containerInfos := []ContainerInfo{}
	for _, container := range containers {
		container := container
		workPool.Submit(func() {
			defer wg.Done()
			info, err := container.Info()
			if err != nil {
				say.Println(1, say.Red("Failed to fetch container info: %s\n", container.Handle()))
				return
			}
			metrics, err := container.Metrics()
			if err != nil {
				say.Println(1, say.Red("Failed to fetch container metrics: %s\n", container.Handle()))
				return
			}

			lock.Lock()
			defer lock.Unlock()
			containerInfos = append(containerInfos, ContainerInfo{
				container.Handle(),
				info,
				metrics,
			})
		})
	}
	wg.Wait()

	if raw {
		encoded, err := json.MarshalIndent(containerInfos, "", "  ")

		if err != nil {
			return err
		}

		out.Write(encoded)
		return nil
	}

	if len(containerInfos) == 0 {
		say.Println(0, say.Red("No Containers"))
	}
	for _, containerInfo := range containerInfos {
		printContainer(out, containerInfo)
	}
	return nil
}
Ejemplo n.º 27
0
		clock = fakeclock.NewFakeClock(time.Now())

		fakeStreamer = newFakeStreamer()

		checkFunc = func() steps.Step {
			return <-checkSteps
		}

		logger = lagertest.NewTestLogger("test")
	})

	JustBeforeEach(func() {
		hasBecomeHealthyChannel := make(chan struct{}, 1000)
		hasBecomeHealthy = hasBecomeHealthyChannel

		workPool, err := workpool.NewWorkPool(numOfConcurrentMonitorSteps)
		Expect(err).NotTo(HaveOccurred())

		step = steps.NewMonitor(
			checkFunc,
			hasBecomeHealthyChannel,
			logger,
			clock,
			fakeStreamer,
			startTimeout,
			healthyInterval,
			unhealthyInterval,
			workPool,
		)
	})
Ejemplo n.º 28
0
func main() {
	flag.Parse()
	config, logger := parseConfig(*debug, *configFile, *logFilePath)

	dropsonde.Initialize(config.MetronAddress, "syslog_drain_binder")

	workPool, err := workpool.NewWorkPool(config.EtcdMaxConcurrentRequests)
	if err != nil {
		panic(err)
	}

	adapter := etcdstoreadapter.NewETCDStoreAdapter(config.EtcdUrls, workPool)

	updateInterval := time.Duration(config.UpdateIntervalSeconds) * time.Second
	politician := elector.NewElector(config.InstanceName, adapter, updateInterval, logger)

	drainTTL := time.Duration(config.DrainUrlTtlSeconds) * time.Second
	store := etcd_syslog_drain_store.NewEtcdSyslogDrainStore(adapter, drainTTL, logger)

	ticker := time.NewTicker(updateInterval)
	for {
		select {
		case <-cfcomponent.RegisterGoRoutineDumpSignalChannel():
			cfcomponent.DumpGoRoutine()
		case <-ticker.C:
			if politician.IsLeader() {
				err = politician.StayAsLeader()
				if err != nil {
					logger.Errorf("Error when staying leader: %s", err.Error())
					politician.Vacate()
					continue
				}
			} else {
				err = politician.RunForElection()

				if err != nil {
					logger.Errorf("Error when running for leader: %s", err.Error())
					politician.Vacate()
					continue
				}
			}

			logger.Debugf("Polling %s for updates", config.CloudControllerAddress)
			drainUrls, err := Poll(config.CloudControllerAddress, config.BulkApiUsername, config.BulkApiPassword, config.PollingBatchSize, config.SkipCertVerify)
			if err != nil {
				logger.Errorf("Error when polling cloud controller: %s", err.Error())
				politician.Vacate()
				continue
			}

			metrics.IncrementCounter("pollCount")

			var totalDrains int
			for _, drainList := range drainUrls {
				totalDrains += len(drainList)
			}

			metrics.SendValue("totalDrains", float64(totalDrains), "drains")

			logger.Debugf("Updating drain URLs for %d application(s)", len(drainUrls))
			err = store.UpdateDrains(drainUrls)
			if err != nil {
				logger.Errorf("Error when updating ETCD: %s", err.Error())
				politician.Vacate()
				continue
			}
		}
	}
}
Ejemplo n.º 29
0
	Context("with a real etcd", func() {
		var (
			storeAdapter storeadapter.StoreAdapter
			node         storeadapter.StoreNode
			updateNode   storeadapter.StoreNode

			updateCallback func(all map[string]string, preferred map[string]string)
			callbackCount  *int32

			preferredCallback func(key string) bool
			preferredCount    *int32
		)

		BeforeEach(func() {
			workPool, err := workpool.NewWorkPool(10)
			Expect(err).NotTo(HaveOccurred())
			options := &etcdstoreadapter.ETCDOptions{
				ClusterUrls: etcdRunner.NodeURLS(),
			}
			storeAdapter, err = etcdstoreadapter.New(options, workPool)
			Expect(err).NotTo(HaveOccurred())

			err = storeAdapter.Connect()
			Expect(err).NotTo(HaveOccurred())

			node = storeadapter.StoreNode{
				Key:   dopplerservice.LEGACY_ROOT + "/z1/loggregator_z1/0",
				Value: []byte("10.0.0.1"),
			}
Ejemplo n.º 30
0
func (etcd *ETCDClusterRunner) Adapter() storeadapter.StoreAdapter {
	pool := workpool.NewWorkPool(10)
	adapter := etcdstoreadapter.NewETCDStoreAdapter(etcd.NodeURLS(), pool)
	adapter.Connect()
	return adapter
}