Exemple #1
0
func (t *Target) report(app storage.SampleAppender, start time.Time, duration time.Duration, err error) {
	t.status.setLastScrape(start)
	t.status.setLastError(err)

	ts := model.TimeFromUnixNano(start.UnixNano())

	var health model.SampleValue
	if err == nil {
		health = 1
	}

	healthSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeHealthMetricName,
		},
		Timestamp: ts,
		Value:     health,
	}
	durationSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeDurationMetricName,
		},
		Timestamp: ts,
		Value:     model.SampleValue(float64(duration) / float64(time.Second)),
	}

	app = t.wrapReportingAppender(app)

	app.Append(healthSample)
	app.Append(durationSample)
}
Exemple #2
0
func recordScrapeHealth(
	sampleAppender storage.SampleAppender,
	timestamp time.Time,
	baseLabels model.LabelSet,
	health TargetHealth,
	scrapeDuration time.Duration,
) {
	healthMetric := make(model.Metric, len(baseLabels)+1)
	durationMetric := make(model.Metric, len(baseLabels)+1)

	healthMetric[model.MetricNameLabel] = scrapeHealthMetricName
	durationMetric[model.MetricNameLabel] = scrapeDurationMetricName

	for ln, lv := range baseLabels {
		healthMetric[ln] = lv
		durationMetric[ln] = lv
	}

	ts := model.TimeFromUnixNano(timestamp.UnixNano())

	healthSample := &model.Sample{
		Metric:    healthMetric,
		Timestamp: ts,
		Value:     health.value(),
	}
	durationSample := &model.Sample{
		Metric:    durationMetric,
		Timestamp: ts,
		Value:     model.SampleValue(float64(scrapeDuration) / float64(time.Second)),
	}

	sampleAppender.Append(healthSample)
	sampleAppender.Append(durationSample)
}
Exemple #3
0
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := t.httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	processor, err := extraction.ProcessorForRequestHeader(resp.Header)
	if err != nil {
		return err
	}

	t.ingestedSamples = make(chan clientmodel.Samples, ingestedSamplesCap)

	processOptions := &extraction.ProcessOptions{
		Timestamp: clientmodel.TimestampFromTime(start),
	}
	go func() {
		err = processor.ProcessSingle(resp.Body, t, processOptions)
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			s.Metric.MergeFromLabelSet(baseLabels, clientmodel.ExporterLabelPrefix)
			// Avoid the copy in Relabel if there are no configs.
			if len(t.metricRelabelConfigs) > 0 {
				labels, err := Relabel(clientmodel.LabelSet(s.Metric), t.metricRelabelConfigs...)
				if err != nil {
					log.Errorf("error while relabeling metric %s of instance %s: ", s.Metric, t.url, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = clientmodel.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}
	return err
}
Exemple #4
0
// RunScraper implements Target.
func (t *Target) RunScraper(sampleAppender storage.SampleAppender) {
	defer close(t.scraperStopped)

	lastScrapeInterval := t.interval()

	log.Debugf("Starting scraper for target %v...", t)

	select {
	case <-time.After(t.offset(lastScrapeInterval)):
		// Continue after scraping offset.
	case <-t.scraperStopping:
		return
	}

	ticker := time.NewTicker(lastScrapeInterval)
	defer ticker.Stop()

	t.scrape(sampleAppender)

	// Explanation of the contraption below:
	//
	// In case t.scraperStopping has something to receive, we want to read
	// from that channel rather than starting a new scrape (which might take very
	// long). That's why the outer select has no ticker.C. Should t.scraperStopping
	// not have anything to receive, we go into the inner select, where ticker.C
	// is in the mix.
	for {
		select {
		case <-t.scraperStopping:
			return
		default:
			select {
			case <-t.scraperStopping:
				return
			case <-ticker.C:
				took := time.Since(t.status.LastScrape())

				intervalStr := lastScrapeInterval.String()

				// On changed scrape interval the new interval becomes effective
				// after the next scrape.
				if iv := t.interval(); iv != lastScrapeInterval {
					ticker.Stop()
					ticker = time.NewTicker(iv)
					lastScrapeInterval = iv
				}

				targetIntervalLength.WithLabelValues(intervalStr).Observe(
					float64(took) / float64(time.Second), // Sub-second precision.
				)
				if sampleAppender.NeedsThrottling() {
					targetSkippedScrapes.WithLabelValues(intervalStr).Inc()
					t.status.setLastError(errSkippedScrape)
					continue
				}
				t.scrape(sampleAppender)
			}
		}
	}
}
Exemple #5
0
// append the defined time series to the storage.
func (cmd *loadCmd) append(a storage.SampleAppender) {
	for fp, samples := range cmd.defs {
		met := cmd.metrics[fp]
		for _, smpl := range samples {
			s := &clientmodel.Sample{
				Metric:    met,
				Value:     smpl.Value,
				Timestamp: smpl.Timestamp,
			}
			a.Append(s)
		}
	}
}
Exemple #6
0
func recordScrapeHealth(
	sampleAppender storage.SampleAppender,
	timestamp clientmodel.Timestamp,
	baseLabels clientmodel.LabelSet,
	health TargetHealth,
	scrapeDuration time.Duration,
) {
	healthMetric := make(clientmodel.Metric, len(baseLabels)+1)
	durationMetric := make(clientmodel.Metric, len(baseLabels)+1)

	healthMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeHealthMetricName)
	durationMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeDurationMetricName)

	for label, value := range baseLabels {
		healthMetric[label] = value
		durationMetric[label] = value
	}

	healthValue := clientmodel.SampleValue(0)
	if health == HealthGood {
		healthValue = clientmodel.SampleValue(1)
	}

	healthSample := &clientmodel.Sample{
		Metric:    healthMetric,
		Timestamp: timestamp,
		Value:     healthValue,
	}
	durationSample := &clientmodel.Sample{
		Metric:    durationMetric,
		Timestamp: timestamp,
		Value:     clientmodel.SampleValue(float64(scrapeDuration) / float64(time.Second)),
	}

	sampleAppender.Append(healthSample)
	sampleAppender.Append(durationSample)
}
Exemple #7
0
func (t *Target) scrape(appender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func(appender storage.SampleAppender) {
		t.status.setLastError(err)
		recordScrapeHealth(appender, start, baseLabels, t.status.Health(), time.Since(start))
	}(appender)

	t.RLock()

	// The relabelAppender has to be inside the label-modifying appenders
	// so the relabeling rules are applied to the correct label set.
	if len(t.metricRelabelConfigs) > 0 {
		appender = relabelAppender{
			app:         appender,
			relabelings: t.metricRelabelConfigs,
		}
	}

	if t.honorLabels {
		appender = honorLabelsAppender{
			app:    appender,
			labels: baseLabels,
		}
	} else {
		appender = ruleLabelsAppender{
			app:    appender,
			labels: baseLabels,
		}
	}

	httpClient := t.httpClient

	t.RUnlock()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		return err
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec := expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header))

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	t.ingestedSamples = make(chan model.Vector, ingestedSamplesCap)

	go func() {
		for {
			// TODO(fabxc): Change the SampleAppender interface to return an error
			// so we can proceed based on the status and don't leak goroutines trying
			// to append a single sample after dropping all the other ones.
			//
			// This will also allow use to reuse this vector and save allocations.
			var samples model.Vector
			if err = sdec.Decode(&samples); err != nil {
				break
			}
			if err = t.ingest(samples); err != nil {
				break
			}
		}
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			appender.Append(s)
		}
	}

	if err == io.EOF {
		return nil
	}
	return err
}
Exemple #8
0
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := t.httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	processor, err := extraction.ProcessorForRequestHeader(resp.Header)
	if err != nil {
		return err
	}

	t.ingestedSamples = make(chan clientmodel.Samples, ingestedSamplesCap)

	processOptions := &extraction.ProcessOptions{
		Timestamp: clientmodel.TimestampFromTime(start),
	}
	go func() {
		err = processor.ProcessSingle(resp.Body, t, processOptions)
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			if t.honorLabels {
				// Merge the metric with the baseLabels for labels not already set in the
				// metric. This also considers labels explicitly set to the empty string.
				for ln, lv := range baseLabels {
					if _, ok := s.Metric[ln]; !ok {
						s.Metric[ln] = lv
					}
				}
			} else {
				// Merge the ingested metric with the base label set. On a collision the
				// value of the label is stored in a label prefixed with the exported prefix.
				for ln, lv := range baseLabels {
					if v, ok := s.Metric[ln]; ok && v != "" {
						s.Metric[clientmodel.ExportedLabelPrefix+ln] = v
					}
					s.Metric[ln] = lv
				}
			}
			// Avoid the copy in Relabel if there are no configs.
			if len(t.metricRelabelConfigs) > 0 {
				labels, err := Relabel(clientmodel.LabelSet(s.Metric), t.metricRelabelConfigs...)
				if err != nil {
					log.Errorf("Error while relabeling metric %s of instance %s: %s", s.Metric, t.url, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = clientmodel.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}
	return err
}
Exemple #9
0
func (t *Target) scrape(appender storage.SampleAppender) error {
	var (
		err   error
		start = time.Now()
	)
	defer func(appender storage.SampleAppender) {
		t.report(appender, start, time.Since(start), err)
	}(appender)

	t.RLock()

	appender = t.wrapAppender(appender)

	client := t.httpClient
	t.RUnlock()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		return err
	}
	req.Header.Add("Accept", acceptHeader)

	ctx, _ := context.WithTimeout(context.Background(), t.timeout())
	resp, err := ctxhttp.Do(ctx, client, req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec := expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header))

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	var (
		samples       model.Vector
		numOutOfOrder int
		logger        = log.With("target", t.InstanceIdentifier())
	)
	for {
		if err = sdec.Decode(&samples); err != nil {
			break
		}
		for _, s := range samples {
			err := appender.Append(s)
			if err != nil {
				if err == local.ErrOutOfOrderSample {
					numOutOfOrder++
				} else {
					logger.With("sample", s).Warnf("Error inserting sample: %s", err)
				}
			}

		}
	}
	if numOutOfOrder > 0 {
		logger.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order samples")
	}

	if err == io.EOF {
		// Set err to nil since it is used in the scrape health recording.
		err = nil
	}
	return err
}
Exemple #10
0
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	t.RLock()
	var (
		honorLabels          = t.honorLabels
		httpClient           = t.httpClient
		metricRelabelConfigs = t.metricRelabelConfigs
	)
	t.RUnlock()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, start, baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec, err := expfmt.NewDecoder(resp.Body, resp.Header)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	t.ingestedSamples = make(chan model.Vector, ingestedSamplesCap)

	go func() {
		for {
			// TODO(fabxc): Changex the SampleAppender interface to return an error
			// so we can proceed based on the status and don't leak goroutines trying
			// to append a single sample after dropping all the other ones.
			//
			// This will also allow use to reuse this vector and save allocations.
			var samples model.Vector
			if err = sdec.Decode(&samples); err != nil {
				break
			}
			if err = t.ingest(samples); err != nil {
				break
			}
		}
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			if honorLabels {
				// Merge the metric with the baseLabels for labels not already set in the
				// metric. This also considers labels explicitly set to the empty string.
				for ln, lv := range baseLabels {
					if _, ok := s.Metric[ln]; !ok {
						s.Metric[ln] = lv
					}
				}
			} else {
				// Merge the ingested metric with the base label set. On a collision the
				// value of the label is stored in a label prefixed with the exported prefix.
				for ln, lv := range baseLabels {
					if v, ok := s.Metric[ln]; ok && v != "" {
						s.Metric[model.ExportedLabelPrefix+ln] = v
					}
					s.Metric[ln] = lv
				}
			}
			// Avoid the copy in Relabel if there are no configs.
			if len(metricRelabelConfigs) > 0 {
				labels, err := Relabel(model.LabelSet(s.Metric), metricRelabelConfigs...)
				if err != nil {
					log.Errorf("Error while relabeling metric %s of instance %s: %s", s.Metric, req.URL, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = model.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}

	if err == io.EOF {
		return nil
	}
	return err
}
Exemple #11
0
func (t *Target) scrape(appender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func(appender storage.SampleAppender) {
		t.status.setLastError(err)
		recordScrapeHealth(appender, start, baseLabels, t.status.Health(), time.Since(start))
	}(appender)

	t.RLock()

	// The relabelAppender has to be inside the label-modifying appenders
	// so the relabeling rules are applied to the correct label set.
	if len(t.metricRelabelConfigs) > 0 {
		appender = relabelAppender{
			SampleAppender: appender,
			relabelings:    t.metricRelabelConfigs,
		}
	}

	if t.honorLabels {
		appender = honorLabelsAppender{
			SampleAppender: appender,
			labels:         baseLabels,
		}
	} else {
		appender = ruleLabelsAppender{
			SampleAppender: appender,
			labels:         baseLabels,
		}
	}

	httpClient := t.httpClient

	t.RUnlock()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		return err
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec := expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header))

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	var (
		samples       model.Vector
		numOutOfOrder int
		logger        = log.With("target", t.InstanceIdentifier())
	)
	for {
		if err = sdec.Decode(&samples); err != nil {
			break
		}
		for _, s := range samples {
			err := appender.Append(s)
			if err != nil {
				if err == local.ErrOutOfOrderSample {
					numOutOfOrder++
				} else {
					logger.With("sample", s).Warnf("Error inserting sample: %s", err)
				}
			}

		}
	}
	if numOutOfOrder > 0 {
		logger.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order samples")
	}

	if err == io.EOF {
		return nil
	}
	return err
}