Example #1
0
func newAlertmanagerSet(cfg *config.AlertmanagerConfig) (*alertmanagerSet, error) {
	client, err := retrieval.NewHTTPClient(cfg.HTTPClientConfig)
	if err != nil {
		return nil, err
	}
	s := &alertmanagerSet{
		client: client,
		cfg:    cfg,
	}
	s.ts = discovery.NewTargetSet(s)

	return s, nil
}
Example #2
0
func (tm *TargetManager) reload() {
	jobs := map[string]struct{}{}

	// Start new target sets and update existing ones.
	for _, scfg := range tm.scrapeConfigs {
		jobs[scfg.JobName] = struct{}{}

		ts, ok := tm.targetSets[scfg.JobName]
		if !ok {
			ctx, cancel := context.WithCancel(tm.ctx)
			ts = &targetSet{
				ctx:    ctx,
				cancel: cancel,
				sp:     newScrapePool(ctx, scfg, tm.appender),
			}
			ts.ts = discovery.NewTargetSet(ts.sp)

			tm.targetSets[scfg.JobName] = ts

			tm.wg.Add(1)

			go func(ts *targetSet) {
				// Run target set, which blocks until its context is canceled.
				// Gracefully shut down pending scrapes in the scrape pool afterwards.
				ts.ts.Run(ctx)
				ts.sp.stop()
				tm.wg.Done()
			}(ts)
		} else {
			ts.sp.reload(scfg)
		}
		ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg.ServiceDiscoveryConfig))
	}

	// Remove old target sets. Waiting for scrape pools to complete pending
	// scrape inserts is already guaranteed by the goroutine that started the target set.
	for name, ts := range tm.targetSets {
		if _, ok := jobs[name]; !ok {
			ts.cancel()
			delete(tm.targetSets, name)
		}
	}
}