Пример #1
0
func (self *influxdbSink) metricToSeries(timeseries *sink_api.Timeseries) *influxdb.Series {
	columns := []string{}
	values := []interface{}{}
	// TODO: move labels to tags once v0.9.0 is released.
	seriesName := timeseries.Point.Name
	if timeseries.MetricDescriptor.Units.String() != "" {
		seriesName = fmt.Sprintf("%s_%s", seriesName, timeseries.MetricDescriptor.Units.String())
	}
	if timeseries.MetricDescriptor.Type.String() != "" {
		seriesName = fmt.Sprintf("%s_%s", seriesName, timeseries.MetricDescriptor.Type.String())
	}

	// Add the real metric value.
	columns = append(columns, "value")
	values = append(values, timeseries.Point.Value)
	// Append labels.
	if !self.c.avoidColumns {
		for key, value := range timeseries.Point.Labels {
			columns = append(columns, key)
			values = append(values, value)
		}
	} else {
		seriesName = strings.Replace(seriesName, "/", "_", -1)
		seriesName = fmt.Sprintf("%s_%s", util.LabelsToString(timeseries.Point.Labels, "_"), seriesName)
	}
	// Add timestamp.
	columns = append(columns, "time")
	values = append(values, timeseries.Point.End.Unix())
	// Ass sequence number
	columns = append(columns, "sequence_number")
	values = append(values, self.seqNum.Get(seriesName))

	return self.newSeries(seriesName, columns, values)
}
Пример #2
0
func (sink *influxdbSink) storeEventNoColumns(event kube_api.Event) (*influxdb.Series, error) {
	// Append labels to seriesName instead of adding extra columns
	seriesName := strings.Replace(eventsSeriesName, "/", "_", -1)
	labels := make(map[string]string)
	if event.InvolvedObject.Kind == "Pod" {
		labels[sink_api.LabelPodId.Key] = string(event.InvolvedObject.UID)
		labels[sink_api.LabelPodName.Key] = event.InvolvedObject.Name
	}
	labels[sink_api.LabelHostname.Key] = event.Source.Host
	seriesName = fmt.Sprintf("%s_%s", util.LabelsToString(labels, "_"), seriesName)

	columns := []string{}
	columns = append(columns, "time")            // Column 0
	columns = append(columns, "value")           // Column 1
	columns = append(columns, "sequence_number") // Column 2

	value, err := getEventValue(&event)
	if err != nil {
		return nil, err
	}

	// There's only one point per series for no columns
	points := make([][]interface{}, 1)
	points[0] = make([]interface{}, len(columns))
	points[0][0] = event.LastTimestamp.Time.Round(time.Millisecond).Unix() // Column 0 - time
	points[0][1] = sink.seqNum.Get(eventsSeriesName)                       // Column 1 - sequence_number
	points[0][2] = value                                                   // Column 2 - value
	return &influxdb.Series{
		Name:    seriesName,
		Columns: eventColumns,
		Points:  points,
	}, nil

}
Пример #3
0
func (self *defaultDecoder) getContainerMetrics(container *source_api.Container, labels map[string]string) []sinksV1Api.Timeseries {
	if container == nil {
		return nil
	}
	labels[sinksV1Api.LabelContainerName.Key] = container.Name
	// One metric value per data point.
	var result []sinksV1Api.Timeseries
	labelsAsString := util.LabelsToString(labels, ",")
	for _, stat := range container.Stats {
		if stat == nil {
			continue
		}
		// Add all supported metrics that have values.
		for index, supported := range self.supportedStatMetrics {
			// Finest allowed granularity is seconds.
			stat.Timestamp = stat.Timestamp.Round(time.Second)
			key := timeseriesKey{
				Name:   supported.Name,
				Labels: labelsAsString,
			}
			// TODO: remove this once the heapster source is tested to not provide duplicate stats.
			if data, ok := self.lastExported[key]; ok && data.After(stat.Timestamp) {
				continue
			}

			if supported.HasValue(&container.Spec) {
				// Cumulative stats have container creation time as their start time.
				var startTime time.Time
				if supported.Type == sinksV1Api.MetricCumulative {
					startTime = container.Spec.CreationTime
				} else {
					startTime = stat.Timestamp
				}
				points := supported.GetValue(&container.Spec, stat)
				for _, point := range points {
					labels := util.CopyLabels(labels)
					for name, value := range point.Labels {
						labels[name] = value
					}
					timeseries := sinksV1Api.Timeseries{
						MetricDescriptor: &self.supportedStatMetrics[index].MetricDescriptor,
						Point: &sinksV1Api.Point{
							Name:   supported.Name,
							Labels: labels,
							Start:  startTime.Round(time.Second),
							End:    stat.Timestamp,
							Value:  point.Value,
						},
					}
					result = append(result, timeseries)
				}
			}
			self.lastExported[key] = stat.Timestamp
		}
	}

	return result
}
Пример #4
0
func (self *GcmCore) GetEquivalentRateMetric(metric *sink_api.Point) (*Timeseries, error) {
	// TODO(vmarmol): Validation and cleanup of data.
	// TODO(vmarmol): Handle non-int64 data types. There is an issue with using omitempty since 0 is a valid value for us.
	value, ok := metric.Value.(int64)
	if !ok {
		return nil, fmt.Errorf("non-int64 data not implemented. Seen for metric %q", metric.Name)
	}

	// Use full label names.
	labels := make(map[string]string, len(metric.Labels))
	for key, value := range metric.Labels {
		labels[FullLabelName(key)] = value
	}

	rateMetric, exists := gcmRateMetrics[metric.Name]
	if !exists {
		return nil, nil
	}
	key := lastValueKey{
		metricName: FullMetricName(rateMetric.name),
		labels:     util.LabelsToString(labels, ","),
	}
	lastValueRaw := self.lastValue.Get(key)
	self.lastValue.Put(key, lastValueData{
		value:     value,
		timestamp: metric.End,
	})

	// We need two metrics to do a delta, skip first value.
	if lastValueRaw == nil {
		return nil, nil
	}
	lastValue, ok := lastValueRaw.(lastValueData)
	if !ok {
		return nil, nil
	}
	doubleValue := float64(value)
	doubleValue = float64(value-lastValue.value) / float64(metric.End.UnixNano()-lastValue.timestamp.UnixNano()) * float64(time.Second)

	// Translate to a float using the custom translation function.
	if transFunc, ok := translationFuncs[rateMetric.name]; ok {
		doubleValue = transFunc(doubleValue)
	}
	return &Timeseries{
		TimeseriesDescriptor: timeseriesDescriptor{
			Metric: FullMetricName(rateMetric.name),
			Labels: labels,
		},
		Point: point{
			Start:       metric.End,
			End:         metric.End,
			DoubleValue: &doubleValue,
		},
	}, nil
}
Пример #5
0
// Generate the labels.
func (self *decoder) getPodLabels(pod *cache.PodElement) map[string]string {
	labels := make(map[string]string)
	labels[LabelPodId.Key] = pod.UID
	labels[LabelPodNamespace.Key] = pod.Namespace
	labels[LabelPodNamespaceUID.Key] = pod.NamespaceUID
	labels[LabelPodName.Key] = pod.Name
	labels[LabelLabels.Key] = util.LabelsToString(pod.Labels, ",")
	labels[LabelHostname.Key] = pod.Hostname
	labels[LabelHostID.Key] = pod.ExternalID

	return labels
}
Пример #6
0
// Generate the labels.
func (self *defaultDecoder) getPodLabels(pod *source_api.Pod) map[string]string {
	labels := make(map[string]string)
	labels[sinksV1Api.LabelPodId.Key] = pod.ID
	labels[sinksV1Api.LabelPodNamespace.Key] = pod.Namespace
	labels[sinksV1Api.LabelPodNamespaceUID.Key] = pod.NamespaceUID
	labels[sinksV1Api.LabelPodName.Key] = pod.Name
	labels[sinksV1Api.LabelLabels.Key] = util.LabelsToString(pod.Labels, ",")
	labels[sinksV1Api.LabelHostname.Key] = pod.Hostname
	labels[sinksV1Api.LabelHostID.Key] = pod.ExternalID

	return labels
}
Пример #7
0
func (a *Api) exportMetrics(request *restful.Request, response *restful.Response) {
	points, err := a.manager.ExportMetrics()
	if err != nil {
		response.WriteError(http.StatusInternalServerError, err)
		return
	}

	// Group points by target labels.
	timeseriesForTargetLabels := map[string]*Timeseries{}
	for _, point := range points {
		targetLabels, otherLabels := separateLabels(point.Labels)
		labelsStr := util.LabelsToString(targetLabels, ",")

		// Add timeseries if it does not exist.
		timeseries, ok := timeseriesForTargetLabels[labelsStr]
		if !ok {
			timeseries = &Timeseries{
				Metrics: map[string][]Point{},
				Labels:  targetLabels,
			}
			timeseriesForTargetLabels[labelsStr] = timeseries
		}

		// Add point to this timeseries
		timeseries.Metrics[point.Name] = append(timeseries.Metrics[point.Name], Point{
			Start:  point.Start,
			End:    point.End,
			Labels: otherLabels,
			Value:  point.Value,
		})
	}

	// Turn into a slice.
	timeseries := make([]*Timeseries, 0, len(timeseriesForTargetLabels))
	for _, val := range timeseriesForTargetLabels {
		timeseries = append(timeseries, val)
	}

	response.WriteEntity(timeseries)
}
Пример #8
0
func (self *decoder) getContainerMetrics(container *cache.ContainerElement, labels map[string]string) []Timeseries {
	if container == nil {
		return nil
	}
	labels[LabelContainerName.Key] = container.Name
	if _, exists := labels[LabelHostID.Key]; !exists {
		labels[LabelHostID.Key] = container.ExternalID
	}
	// One metric value per data point.
	var result []Timeseries
	labelsAsString := util.LabelsToString(labels, ",")
	for _, metric := range container.Metrics {
		if metric == nil || metric.Spec == nil || metric.Stats == nil {
			continue
		}
		// Add all supported metrics that have values.
		for index, supported := range self.supportedStatMetrics {
			// Finest allowed granularity is seconds.
			metric.Stats.Timestamp = metric.Stats.Timestamp.Round(time.Second)
			key := timeseriesKey{
				Name:   supported.Name,
				Labels: labelsAsString,
			}
			// TODO: remove this once the heapster source is tested to not provide duplicate metric.Statss.

			if data, ok := self.lastExported[key]; ok && data.After(metric.Stats.Timestamp) {
				continue
			}

			if supported.HasValue(metric.Spec) {
				// Cumulative metric.Statss have container creation time as their start time.
				var startTime time.Time
				if supported.Type == MetricCumulative {
					startTime = metric.Spec.CreationTime
				} else {
					startTime = metric.Stats.Timestamp
				}
				points := supported.GetValue(metric.Spec, metric.Stats)
				for _, point := range points {
					labels := util.CopyLabels(labels)
					for name, value := range point.Labels {
						labels[name] = value
					}
					timeseries := Timeseries{
						MetricDescriptor: &self.supportedStatMetrics[index].MetricDescriptor,
						Point: &Point{
							Name:   supported.Name,
							Labels: labels,
							Start:  startTime.Round(time.Second),
							End:    metric.Stats.Timestamp,
							Value:  point.Value,
						},
					}
					result = append(result, timeseries)
				}
			}
			self.lastExported[key] = metric.Stats.Timestamp
		}

	}

	return result
}