Esempio n. 1
0
func (self *Cache) Start() {
	mlog.Info("starting cache service ...")

	self.workpool = workpool.New(4, 2000)

	go self.react()

	mlog.Info("cache service started")
}
Esempio n. 2
0
func (self *Scraper) Start() {
	mlog.Info("starting scraper service ...")

	var err error
	self.tmdb, err = tmdb.NewClient("e610ded10c3f47d05fe797961d90fea6", false)
	if err != nil {
		mlog.Fatalf("unable to create tmdb client: %s", err)
	}

	self.workpool = workpool.New(12, 4000)

	go self.react()

	// go self.workpool.Balance()

	mlog.Info("scraper service started")
}
Esempio n. 3
0
func NewFile(filename string) (*WZFile, error) {
	file, err := os.Open(filename)
	if err != nil {
		return nil, err
	}

	filemap, err := mmap.Map(file, mmap.RDONLY, 0)
	if err != nil {
		return nil, err
	}

	wz := new(WZFile)
	wz.filemap = filemap
	wz.Debug = false
	wz.Filename = filename
	wz.workPool = workpool.New(runtime.NumCPU()*2, 7000)
	wz.mainBlob = NewWZFileBlob(wz.filemap, nil, wz)
	wz.LazyLoading = true

	return wz, nil
}
Esempio n. 4
0
func (p *prometheusScraper) main(paramDataSendRate, paramNodeServiceDiscoveryRate time.Duration) (err error) {

	kubeClient, err := newKubeClient(p.cfg)
	if err != nil {
		return err
	}

	podToServiceMap := updateServices(kubeClient)
	hostIPtoNameMap, nodeIPs := updateNodes(kubeClient, p.cfg.CadvisorPort)
	p.cfg.CadvisorURL = nodeIPs

	cadvisorServers := make([]*url.URL, len(p.cfg.CadvisorURL))
	for i, serverURL := range p.cfg.CadvisorURL {
		cadvisorServers[i], err = url.Parse(serverURL)
		if err != nil {
			return err
		}
	}

	printVersion()
	cfg, _ := json.MarshalIndent(p.cfg, "", "  ")
	glog.Infof("Scrapper started with following params:\n%v\n", string(cfg))

	scrapWorkCache := newScrapWorkCache(p.cfg, p.forwarder)
	stop := make(chan error, 1)

	scrapWorkCache.setPodToServiceMap(podToServiceMap)
	scrapWorkCache.setHostIPtoNameMap(hostIPtoNameMap)

	scrapWorkCache.buildWorkList(p.cfg.CadvisorURL)

	// Wait on channel input and forward datapoints to SignalFx
	go func() {
		scrapWorkCache.waitAndForward()                // Blocking call!
		stop <- errors.New("all channels were closed") // Stop all timers
	}()

	workPool := workpool.New(runtime.NumCPU(), int32(len(p.cfg.CadvisorURL)+1))

	// Collect data from nodes
	scrapWorkTicker := time.NewTicker(paramDataSendRate)
	go func() {
		for range scrapWorkTicker.C {

			scrapWorkCache.foreachWork(func(i int, w *scrapWork2) bool {
				workPool.PostWork("CollectDataWork", w)
				return true
			})
		}
	}()

	// New nodes and services discovery
	updateNodeAndPodTimer := time.NewTicker(paramNodeServiceDiscoveryRate)
	go func() {

		for range updateNodeAndPodTimer.C {

			podMap := updateServices(kubeClient)
			hostMap, _ := updateNodes(kubeClient, p.cfg.CadvisorPort)

			hostMapCopy := make(map[string]kubeAPI.Node)
			for k, v := range hostMap {
				hostMapCopy[k] = v
			}

			// Remove known nodes
			scrapWorkCache.foreachWork(func(i int, w *scrapWork2) bool {
				delete(hostMapCopy, w.serverURL)
				return true
			})

			if len(hostMapCopy) != 0 {
				scrapWorkCache.setHostIPtoNameMap(hostMap)

				// Add new(remaining) nodes to monitoring
				for serverURL := range hostMapCopy {
					cadvisorClient, localERR := client.NewClient(serverURL)
					if localERR != nil {
						glog.Errorf("Failed connect to server: %v\n", localERR)
						continue
					}

					scrapWorkCache.addWork(&scrapWork2{
						serverURL:  serverURL,
						collector:  NewCadvisorCollector(newCadvisorInfoProvider(cadvisorClient), nameToLabel),
						chRecvOnly: make(chan datapoint.Datapoint),
					})
				}
			}

			scrapWorkCache.setPodToServiceMap(podMap)
		}
	}()

	err = <-stop // Block here till stopped

	updateNodeAndPodTimer.Stop()
	scrapWorkTicker.Stop()

	return
}