Esempio n. 1
0
func parseMetrics(data string, knownMetrics map[string][]string, output *Metrics, unknownMetrics sets.String) error {
	dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText)
	decoder := expfmt.SampleDecoder{
		Dec:  dec,
		Opts: &expfmt.DecodeOptions{},
	}

	for {
		var v model.Vector
		if err := decoder.Decode(&v); err != nil {
			if err == io.EOF {
				// Expected loop termination condition.
				return nil
			}
			glog.Warningf("Invalid Decode. Skipping.")
			continue
		}
		for _, metric := range v {
			name := string(metric.Metric[model.MetricNameLabel])
			_, isCommonMetric := CommonMetrics[name]
			_, isKnownMetric := knownMetrics[name]
			if isKnownMetric || isCommonMetric {
				(*output)[name] = append((*output)[name], metric)
			} else {
				if unknownMetrics != nil {
					unknownMetrics.Insert(name)
				}
			}
		}
	}
}
Esempio n. 2
0
// parseMetrics takes the text format for prometheus metrics, and converts
// them into our Metrics object.
func (c *Metrics) parseMetrics(data string) error {
	dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText)
	decoder := expfmt.SampleDecoder{
		Dec:  dec,
		Opts: &expfmt.DecodeOptions{},
	}

	for {
		var v model.Vector
		if err := decoder.Decode(&v); err != nil {
			if err == io.EOF {
				// Expected loop termination condition.
				return nil
			}
			return fmt.Errorf("Invalid decode: %v", err)
		}
		for _, metric := range v {
			switch name := string(metric.Metric[model.MetricNameLabel]); name {
			case "node_collector_evictions_number":
				c.NodeEvictions = int64(metric.Value)
			case "process_start_time_seconds":
				c.CreateTime = int64(metric.Value)
			}
		}
	}
}
Esempio n. 3
0
func (s *targetScraper) scrape(ctx context.Context, ts time.Time) (model.Samples, error) {
	req, err := http.NewRequest("GET", s.URL().String(), nil)
	if err != nil {
		return nil, err
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := ctxhttp.Do(ctx, s.client, req)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	var (
		allSamples = make(model.Samples, 0, 200)
		decSamples = make(model.Vector, 0, 50)
	)
	sdec := expfmt.SampleDecoder{
		Dec: expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header)),
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(ts.UnixNano()),
		},
	}

	for {
		if err = sdec.Decode(&decSamples); err != nil {
			break
		}
		allSamples = append(allSamples, decSamples...)
		decSamples = decSamples[:0]
	}

	if err == io.EOF {
		// Set err to nil since it is used in the scrape health recording.
		err = nil
	}
	return allSamples, err
}
Esempio n. 4
0
// extractMetricSamples parses the prometheus metric samples from the input string.
func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) {
	dec := expfmt.NewDecoder(strings.NewReader(metricsBlob), expfmt.FmtText)
	decoder := expfmt.SampleDecoder{
		Dec:  dec,
		Opts: &expfmt.DecodeOptions{},
	}

	var samples []*model.Sample
	for {
		var v model.Vector
		if err := decoder.Decode(&v); err != nil {
			if err == io.EOF {
				// Expected loop termination condition.
				return samples, nil
			}
			return nil, err
		}
		samples = append(samples, v...)
	}
}
Esempio n. 5
0
func parseMetrics(data string, output *Metrics) error {
	dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText)
	decoder := expfmt.SampleDecoder{
		Dec:  dec,
		Opts: &expfmt.DecodeOptions{},
	}

	for {
		var v model.Vector
		if err := decoder.Decode(&v); err != nil {
			if err == io.EOF {
				// Expected loop termination condition.
				return nil
			}
			glog.Warningf("Invalid Decode. Skipping.")
			continue
		}
		for _, metric := range v {
			name := string(metric.Metric[model.MetricNameLabel])
			(*output)[name] = append((*output)[name], metric)
		}
	}
}
Esempio n. 6
0
func (t *Target) scrape(appender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func(appender storage.SampleAppender) {
		t.status.setLastError(err)
		recordScrapeHealth(appender, start, baseLabels, t.status.Health(), time.Since(start))
	}(appender)

	t.RLock()

	// The relabelAppender has to be inside the label-modifying appenders
	// so the relabeling rules are applied to the correct label set.
	if len(t.metricRelabelConfigs) > 0 {
		appender = relabelAppender{
			app:         appender,
			relabelings: t.metricRelabelConfigs,
		}
	}

	if t.honorLabels {
		appender = honorLabelsAppender{
			app:    appender,
			labels: baseLabels,
		}
	} else {
		appender = ruleLabelsAppender{
			app:    appender,
			labels: baseLabels,
		}
	}

	httpClient := t.httpClient

	t.RUnlock()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		return err
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec := expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header))

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	t.ingestedSamples = make(chan model.Vector, ingestedSamplesCap)

	go func() {
		for {
			// TODO(fabxc): Change the SampleAppender interface to return an error
			// so we can proceed based on the status and don't leak goroutines trying
			// to append a single sample after dropping all the other ones.
			//
			// This will also allow use to reuse this vector and save allocations.
			var samples model.Vector
			if err = sdec.Decode(&samples); err != nil {
				break
			}
			if err = t.ingest(samples); err != nil {
				break
			}
		}
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			appender.Append(s)
		}
	}

	if err == io.EOF {
		return nil
	}
	return err
}
Esempio n. 7
0
func (t *Target) scrape(appender storage.SampleAppender) error {
	var (
		err   error
		start = time.Now()
	)
	defer func(appender storage.SampleAppender) {
		t.report(appender, start, time.Since(start), err)
	}(appender)

	t.RLock()

	appender = t.wrapAppender(appender)

	client := t.httpClient
	t.RUnlock()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		return err
	}
	req.Header.Add("Accept", acceptHeader)

	ctx, _ := context.WithTimeout(context.Background(), t.timeout())
	resp, err := ctxhttp.Do(ctx, client, req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec := expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header))

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	var (
		samples       model.Vector
		numOutOfOrder int
		logger        = log.With("target", t.InstanceIdentifier())
	)
	for {
		if err = sdec.Decode(&samples); err != nil {
			break
		}
		for _, s := range samples {
			err := appender.Append(s)
			if err != nil {
				if err == local.ErrOutOfOrderSample {
					numOutOfOrder++
				} else {
					logger.With("sample", s).Warnf("Error inserting sample: %s", err)
				}
			}

		}
	}
	if numOutOfOrder > 0 {
		logger.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order samples")
	}

	if err == io.EOF {
		// Set err to nil since it is used in the scrape health recording.
		err = nil
	}
	return err
}
Esempio n. 8
0
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	t.RLock()
	var (
		honorLabels          = t.honorLabels
		httpClient           = t.httpClient
		metricRelabelConfigs = t.metricRelabelConfigs
	)
	t.RUnlock()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, start, baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec, err := expfmt.NewDecoder(resp.Body, resp.Header)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	t.ingestedSamples = make(chan model.Vector, ingestedSamplesCap)

	go func() {
		for {
			// TODO(fabxc): Changex the SampleAppender interface to return an error
			// so we can proceed based on the status and don't leak goroutines trying
			// to append a single sample after dropping all the other ones.
			//
			// This will also allow use to reuse this vector and save allocations.
			var samples model.Vector
			if err = sdec.Decode(&samples); err != nil {
				break
			}
			if err = t.ingest(samples); err != nil {
				break
			}
		}
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			if honorLabels {
				// Merge the metric with the baseLabels for labels not already set in the
				// metric. This also considers labels explicitly set to the empty string.
				for ln, lv := range baseLabels {
					if _, ok := s.Metric[ln]; !ok {
						s.Metric[ln] = lv
					}
				}
			} else {
				// Merge the ingested metric with the base label set. On a collision the
				// value of the label is stored in a label prefixed with the exported prefix.
				for ln, lv := range baseLabels {
					if v, ok := s.Metric[ln]; ok && v != "" {
						s.Metric[model.ExportedLabelPrefix+ln] = v
					}
					s.Metric[ln] = lv
				}
			}
			// Avoid the copy in Relabel if there are no configs.
			if len(metricRelabelConfigs) > 0 {
				labels, err := Relabel(model.LabelSet(s.Metric), metricRelabelConfigs...)
				if err != nil {
					log.Errorf("Error while relabeling metric %s of instance %s: %s", s.Metric, req.URL, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = model.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}

	if err == io.EOF {
		return nil
	}
	return err
}
Esempio n. 9
0
func (t *Target) scrape(appender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func(appender storage.SampleAppender) {
		t.status.setLastError(err)
		recordScrapeHealth(appender, start, baseLabels, t.status.Health(), time.Since(start))
	}(appender)

	t.RLock()

	// The relabelAppender has to be inside the label-modifying appenders
	// so the relabeling rules are applied to the correct label set.
	if len(t.metricRelabelConfigs) > 0 {
		appender = relabelAppender{
			SampleAppender: appender,
			relabelings:    t.metricRelabelConfigs,
		}
	}

	if t.honorLabels {
		appender = honorLabelsAppender{
			SampleAppender: appender,
			labels:         baseLabels,
		}
	} else {
		appender = ruleLabelsAppender{
			SampleAppender: appender,
			labels:         baseLabels,
		}
	}

	httpClient := t.httpClient

	t.RUnlock()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		return err
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec := expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header))

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	var (
		samples       model.Vector
		numOutOfOrder int
		logger        = log.With("target", t.InstanceIdentifier())
	)
	for {
		if err = sdec.Decode(&samples); err != nil {
			break
		}
		for _, s := range samples {
			err := appender.Append(s)
			if err != nil {
				if err == local.ErrOutOfOrderSample {
					numOutOfOrder++
				} else {
					logger.With("sample", s).Warnf("Error inserting sample: %s", err)
				}
			}

		}
	}
	if numOutOfOrder > 0 {
		logger.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order samples")
	}

	if err == io.EOF {
		return nil
	}
	return err
}
Esempio n. 10
0
// Returns collected metrics and the next collection time of the collector
func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) {
	currentTime := time.Now()
	nextCollectionTime := currentTime.Add(time.Duration(collector.pollingFrequency))

	uri := collector.configFile.Endpoint.URL
	response, err := collector.httpClient.Get(uri)
	if err != nil {
		return nextCollectionTime, nil, err
	}
	defer response.Body.Close()

	if response.StatusCode != http.StatusOK {
		return nextCollectionTime, nil, fmt.Errorf("server returned HTTP status %s", response.Status)
	}

	sdec := expfmt.SampleDecoder{
		Dec: expfmt.NewDecoder(response.Body, expfmt.ResponseFormat(response.Header)),
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(currentTime.UnixNano()),
		},
	}

	var (
		// 50 is chosen as a reasonable guesstimate at a number of metrics we can
		// expect from virtually any endpoint to try to save allocations.
		decSamples = make(model.Vector, 0, 50)
		newMetrics = make(map[string][]v1.MetricVal)
	)
	for {
		if err = sdec.Decode(&decSamples); err != nil {
			break
		}

		for _, sample := range decSamples {
			metName := string(sample.Metric[model.MetricNameLabel])
			if len(metName) == 0 {
				continue
			}
			// If metrics to collect is specified, skip any metrics not in the list to collect.
			if _, ok := collector.metricsSet[metName]; collector.metricsSet != nil && !ok {
				continue
			}
			// TODO Handle multiple labels nicer. Prometheus metrics can have multiple
			// labels, cadvisor only accepts a single string for the metric label.
			label := prometheusLabelSetToCadvisorLabel(sample.Metric)

			metric := v1.MetricVal{
				FloatValue: float64(sample.Value),
				Timestamp:  sample.Timestamp.Time(),
				Label:      label,
			}
			newMetrics[metName] = append(newMetrics[metName], metric)
			if len(newMetrics) > collector.metricCountLimit {
				return nextCollectionTime, nil, fmt.Errorf("too many metrics to collect")
			}
		}
		decSamples = decSamples[:0]
	}

	if err != nil && err != io.EOF {
		return nextCollectionTime, nil, err
	}

	for key, val := range newMetrics {
		metrics[key] = append(metrics[key], val...)
	}

	return nextCollectionTime, metrics, nil
}