コード例 #1
0
// Append implements storage.SampleAppender. Always returns nil.
func (s *ReloadableStorage) Append(smpl *model.Sample) error {
	s.mtx.RLock()
	defer s.mtx.RUnlock()

	if s.queue == nil {
		return nil
	}

	var snew model.Sample
	snew = *smpl
	snew.Metric = smpl.Metric.Clone()

	for ln, lv := range s.externalLabels {
		if _, ok := smpl.Metric[ln]; !ok {
			snew.Metric[ln] = lv
		}
	}
	snew.Metric = model.Metric(
		relabel.Process(model.LabelSet(snew.Metric), s.conf.WriteRelabelConfigs...))

	if snew.Metric == nil {
		return nil
	}
	s.queue.Append(&snew)
	return nil
}
コード例 #2
0
ファイル: remote.go プロジェクト: PrFalken/prometheus
// Append implements storage.SampleAppender. Always returns nil.
func (s *Storage) Append(smpl *model.Sample) error {
	s.mtx.RLock()

	var snew model.Sample
	snew = *smpl
	snew.Metric = smpl.Metric.Clone()

	for ln, lv := range s.externalLabels {
		if _, ok := smpl.Metric[ln]; !ok {
			snew.Metric[ln] = lv
		}
	}
	snew.Metric = model.Metric(
		relabel.Process(model.LabelSet(snew.Metric), s.relabelConfigs...))
	s.mtx.RUnlock()

	if snew.Metric == nil {
		return nil
	}

	for _, q := range s.queues {
		q.Append(&snew)
	}
	return nil
}
コード例 #3
0
ファイル: alerting.go プロジェクト: nicr9/prometheus
// eval evaluates the rule expression and then creates pending alerts and fires
// or removes previously pending alerts accordingly.
func (rule *AlertingRule) eval(timestamp model.Time, engine *promql.Engine) (model.Vector, error) {
	query, err := engine.NewInstantQuery(rule.vector.String(), timestamp)
	if err != nil {
		return nil, err
	}
	exprResult, err := query.Exec().Vector()
	if err != nil {
		return nil, err
	}

	rule.mutex.Lock()
	defer rule.mutex.Unlock()

	// Create pending alerts for any new vector elements in the alert expression
	// or update the expression value for existing elements.
	resultFPs := map[model.Fingerprint]struct{}{}
	for _, sample := range exprResult {
		fp := sample.Metric.Fingerprint()
		resultFPs[fp] = struct{}{}

		if alert, ok := rule.activeAlerts[fp]; !ok {
			labels := model.LabelSet(sample.Metric.Clone())
			labels = labels.Merge(rule.labels)
			if _, ok := labels[model.MetricNameLabel]; ok {
				delete(labels, model.MetricNameLabel)
			}
			rule.activeAlerts[fp] = &Alert{
				Name:        rule.name,
				Labels:      labels,
				State:       StatePending,
				ActiveSince: timestamp,
				Value:       sample.Value,
			}
		} else {
			alert.Value = sample.Value
		}
	}

	var vector model.Vector

	// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
	for fp, activeAlert := range rule.activeAlerts {
		if _, ok := resultFPs[fp]; !ok {
			vector = append(vector, activeAlert.sample(timestamp, 0))
			delete(rule.activeAlerts, fp)
			continue
		}

		if activeAlert.State == StatePending && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration {
			vector = append(vector, activeAlert.sample(timestamp, 0))
			activeAlert.State = StateFiring
		}

		vector = append(vector, activeAlert.sample(timestamp, 1))
	}

	return vector, nil
}
コード例 #4
0
ファイル: target.go プロジェクト: tommyulfsparre/prometheus
func (app relabelAppender) Append(s *model.Sample) error {
	labels := relabel.Process(model.LabelSet(s.Metric), app.relabelings...)

	// Check if the timeseries was dropped.
	if labels == nil {
		return nil
	}
	s.Metric = model.Metric(labels)

	return app.SampleAppender.Append(s)
}
コード例 #5
0
ファイル: target.go プロジェクト: izogain/prometheus
func (app relabelAppender) Append(s *model.Sample) error {
	labels, err := Relabel(model.LabelSet(s.Metric), app.relabelings...)
	if err != nil {
		return fmt.Errorf("metric relabeling error %s: %s", s.Metric, err)
	}
	// Check if the timeseries was dropped.
	if labels == nil {
		return nil
	}
	s.Metric = model.Metric(labels)

	return app.SampleAppender.Append(s)
}
コード例 #6
0
ファイル: target.go プロジェクト: RobertKielty/prometheus
func (app relabelAppender) Append(s *model.Sample) {
	labels, err := Relabel(model.LabelSet(s.Metric), app.relabelings...)
	if err != nil {
		log.Errorf("Error while relabeling metric %s: %s", s.Metric, err)
		return
	}
	// Check if the timeseries was dropped.
	if labels == nil {
		return
	}
	s.Metric = model.Metric(labels)

	app.app.Append(s)
}
コード例 #7
0
ファイル: alerting.go プロジェクト: hvnsweeting/prometheus
// eval evaluates the rule expression and then creates pending alerts and fires
// or removes previously pending alerts accordingly.
func (r *AlertingRule) eval(ts model.Time, engine *promql.Engine) (model.Vector, error) {
	query, err := engine.NewInstantQuery(r.vector.String(), ts)
	if err != nil {
		return nil, err
	}
	res, err := query.Exec().Vector()
	if err != nil {
		return nil, err
	}

	r.mtx.Lock()
	defer r.mtx.Unlock()

	// Create pending alerts for any new vector elements in the alert expression
	// or update the expression value for existing elements.
	resultFPs := map[model.Fingerprint]struct{}{}

	for _, smpl := range res {
		fp := smpl.Metric.Fingerprint()
		resultFPs[fp] = struct{}{}

		if alert, ok := r.active[fp]; ok {
			alert.Value = smpl.Value
			continue
		}

		delete(smpl.Metric, model.MetricNameLabel)

		r.active[fp] = &Alert{
			Labels:   model.LabelSet(smpl.Metric),
			ActiveAt: ts,
			State:    StatePending,
			Value:    smpl.Value,
		}
	}

	var vec model.Vector
	// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
	for fp, a := range r.active {
		if _, ok := resultFPs[fp]; !ok {
			if a.State != StateInactive {
				vec = append(vec, r.sample(a, ts, false))
			}
			// If the alert was previously firing, keep it around for a given
			// retention time so it is reported as resolved to the AlertManager.
			if a.State == StatePending || (a.ResolvedAt != 0 && ts.Sub(a.ResolvedAt) > resolvedRetention) {
				delete(r.active, fp)
			}
			if a.State != StateInactive {
				a.State = StateInactive
				a.ResolvedAt = ts
			}
			continue
		}

		if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
			vec = append(vec, r.sample(a, ts, false))
			a.State = StateFiring
		}

		vec = append(vec, r.sample(a, ts, true))
	}

	return vec, nil
}
コード例 #8
0
ファイル: target.go プロジェクト: robert-zaremba/prometheus
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	t.RLock()
	var (
		honorLabels          = t.honorLabels
		httpClient           = t.httpClient
		metricRelabelConfigs = t.metricRelabelConfigs
	)
	t.RUnlock()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, start, baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := httpClient.Do(req)
	if err != nil {
		return err
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	dec, err := expfmt.NewDecoder(resp.Body, resp.Header)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	sdec := expfmt.SampleDecoder{
		Dec: dec,
		Opts: &expfmt.DecodeOptions{
			Timestamp: model.TimeFromUnixNano(start.UnixNano()),
		},
	}

	t.ingestedSamples = make(chan model.Vector, ingestedSamplesCap)

	go func() {
		for {
			// TODO(fabxc): Changex the SampleAppender interface to return an error
			// so we can proceed based on the status and don't leak goroutines trying
			// to append a single sample after dropping all the other ones.
			//
			// This will also allow use to reuse this vector and save allocations.
			var samples model.Vector
			if err = sdec.Decode(&samples); err != nil {
				break
			}
			if err = t.ingest(samples); err != nil {
				break
			}
		}
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			if honorLabels {
				// Merge the metric with the baseLabels for labels not already set in the
				// metric. This also considers labels explicitly set to the empty string.
				for ln, lv := range baseLabels {
					if _, ok := s.Metric[ln]; !ok {
						s.Metric[ln] = lv
					}
				}
			} else {
				// Merge the ingested metric with the base label set. On a collision the
				// value of the label is stored in a label prefixed with the exported prefix.
				for ln, lv := range baseLabels {
					if v, ok := s.Metric[ln]; ok && v != "" {
						s.Metric[model.ExportedLabelPrefix+ln] = v
					}
					s.Metric[ln] = lv
				}
			}
			// Avoid the copy in Relabel if there are no configs.
			if len(metricRelabelConfigs) > 0 {
				labels, err := Relabel(model.LabelSet(s.Metric), metricRelabelConfigs...)
				if err != nil {
					log.Errorf("Error while relabeling metric %s of instance %s: %s", s.Metric, req.URL, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = model.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}

	if err == io.EOF {
		return nil
	}
	return err
}