Example #1
0
func ProcessDerivative(dep_el chains.ChainEl) (our_el chains.ChainEl) {
	our_el = *chains.NewChainEl()
	go func(our_el chains.ChainEl, dep_el chains.ChainEl) {
		from := <-our_el.Settings
		until := <-our_el.Settings
		dep_el.Settings <- from - 60
		dep_el.Settings <- until
		var last_dp *metrics.Datapoint

		for {
			d := <-dep_el.Link
			if last_dp == nil {
				last_dp = &d
				continue
			}
			if d.Known && last_dp.Known {
				our_el.Link <- *metrics.NewDatapoint(d.Ts, d.Value-last_dp.Value, true)
			} else {
				our_el.Link <- *metrics.NewDatapoint(d.Ts, 0.0, false)
			}
			last_dp = &d
			if d.Ts >= until {
				return
			}
		}
	}(our_el, dep_el)
	return
}
Example #2
0
func (t TextStore) Get(name string) (our_el *chains.ChainEl, err error) {
	our_el = chains.NewChainEl()

	go func(our_el *chains.ChainEl) {
		var file *os.File
		path := t.path(name)
		if file, err = os.Open(path); err != nil {
			panic(err)
		}
		defer file.Close()
		from := <-our_el.Settings
		until := <-our_el.Settings

		scanner := bufio.NewScanner(file)
		first := true
		// this will be used to fill the potential gap between last datapoint and until,
		// but also if there were no (matching) datapoints in the file at all.
		last_ts := from - 60
		for scanner.Scan() {
			line := scanner.Text()
			parts := strings.Split(line, " ")
			ts, _ := strconv.ParseInt(parts[0], 10, 32)
			val, _ := strconv.ParseFloat(parts[1], 64)
			known, _ := strconv.ParseBool(parts[2])
			dp := metrics.NewDatapoint(int32(ts), val, known)
			if first {
				if from < dp.Ts {
					for new_ts := from; new_ts < dp.Ts; new_ts += 60 {
						our_el.Link <- *metrics.NewDatapoint(new_ts, 0.0, false)
					}
				}
			}
			if dp.Ts >= from && dp.Ts <= until {
				our_el.Link <- *dp
				last_ts = dp.Ts
			}
			first = false
		}
		if err := scanner.Err(); err != nil {
			panic(fmt.Sprintf("error reading %s: %s", path, err.Error()))
		}
		if last_ts < until {
			for new_ts := last_ts + 60; new_ts <= until+60; new_ts += 60 {
				our_el.Link <- *metrics.NewDatapoint(new_ts, 0.0, false)
			}
		}
	}(our_el)
	return our_el, nil
}
Example #3
0
func ProcessIntegral(dep_el chains.ChainEl) (our_el chains.ChainEl) {
	our_el = *chains.NewChainEl()
	go func(our_el chains.ChainEl, dep_el chains.ChainEl) {
		from := <-our_el.Settings
		until := <-our_el.Settings
		dep_el.Settings <- from - 60
		dep_el.Settings <- until
		sum := float64(0)
		d := <-dep_el.Link
		last_ts := d.Ts

		for {
			d = <-dep_el.Link
			if d.Known {
				sum += d.Value * float64(d.Ts-last_ts)
			}
			our_el.Link <- *metrics.NewDatapoint(d.Ts, sum, true)
			last_ts = d.Ts
			if d.Ts >= until {
				return
			}
		}
	}(our_el, dep_el)
	return
}
Example #4
0
func ReadTextMetric(name string) (our_el *chains.ChainEl, err error) {
	var file *os.File
	path := GetTextMetricPath(name)
	if file, err = os.Open(path); err != nil {
		return nil, err
	}
	defer file.Close()

	scanner := bufio.NewScanner(file)
	datapoints := make([]*metrics.Datapoint, 0)
	for scanner.Scan() {
		line := scanner.Text()
		parts := strings.Split(line, " ")
		ts, _ := strconv.ParseInt(parts[0], 10, 32)
		val, _ := strconv.ParseFloat(parts[1], 64)
		known, _ := strconv.ParseBool(parts[2])
		dp := metrics.NewDatapoint(int32(ts), val, known)
		datapoints = append(datapoints, dp)
	}
	if err := scanner.Err(); err != nil {
		return nil, errors.New(fmt.Sprintf("error reading %s: %s", path, err.Error()))
	}
	metric := metrics.NewMetric(name, datapoints)

	our_el = chains.NewChainEl()
	go func(our_el *chains.ChainEl, metric *metrics.Metric) {
		from := <-our_el.Settings
		until := <-our_el.Settings
		// if we don't have enough data to cover the requested timespan, fill with nils
		if metric.Data[0].Ts > from {
			for new_ts := from; new_ts < metric.Data[0].Ts; new_ts += 60 {
				our_el.Link <- *metrics.NewDatapoint(new_ts, 0.0, false)
			}
		}
		for _, d := range metric.Data {
			if d.Ts >= from && until <= until {
				our_el.Link <- *d
			}
		}
		if metric.Data[len(metric.Data)-1].Ts < until {
			for new_ts := metric.Data[len(metric.Data)-1].Ts + 60; new_ts <= until+60; new_ts += 60 {
				our_el.Link <- *metrics.NewDatapoint(new_ts, 0.0, false)
			}
		}
	}(our_el, metric)
	return our_el, nil
}
Example #5
0
// like with graphite, it is assumed datapoints from different inputs are time synchronized
// at some point we might lift that and take it into account in individual functions
func ProcessSum(dep_els ...chains.ChainEl) (our_el chains.ChainEl) {
	our_el = *chains.NewChainEl()
	go func(our_el chains.ChainEl, dep_els []chains.ChainEl) {
		from := <-our_el.Settings
		until := <-our_el.Settings
		for _, dep_el := range dep_els {
			dep_el.Settings <- from
			dep_el.Settings <- until
		}
		var sum float64
		var known bool
		// for every point in time (can't iterate over them here, they come from the channels)
		for {
			// sum the datapoints from the different channels together (each dp from each chan is one term)
			// we're done when we reached the last channel and the ts == until
			// if one or more of the points is !known, the resulting sum is not known
			for i, c := range dep_els {
				// first term in the sum, reset the data that will go into datapoint
				if i == 0 {
					known = true
					sum = 0.0
				}
				d := <-c.Link
				if known {
					if !d.Known {
						known = false
						our_el.Link <- *metrics.NewDatapoint(d.Ts, 0.0, false)
						if i == len(dep_els)-1 && d.Ts >= until {
							return
						}
					} else {
						sum += d.Value
						if i == len(dep_els)-1 {
							our_el.Link <- *metrics.NewDatapoint(d.Ts, sum, true)
							if d.Ts >= until {
								return
							}
						}
					}
				}
			}
		}
	}(our_el, dep_els)
	return
}
Example #6
0
func (i InfluxdbStore) Get(name string) (our_el *chains.ChainEl, err error) {

	our_el = chains.NewChainEl()
	go func(our_el *chains.ChainEl) {
		from := <-our_el.Settings
		until := <-our_el.Settings

		query := fmt.Sprintf("select time, value from %s where time > %ds and time < %ds order asc", name, from, until)
		series, err := i.client.Query(query)
		if err != nil {
			panic(err)
		}
		// len(series) can be 0 if there's no datapoints matching the range.
		// so it's up to the caller to make sure the store is supposed to have the data
		// if we don't have enough data to cover the requested timespan, fill with nils
		if len(series) > 0 {
			points := series[0].Points
			oldest_dp := int32(points[0][0].(float64) / 1000)
			latest_dp := int32(points[len(points)-1][0].(float64) / 1000)
			if oldest_dp > from {
				for new_ts := from; new_ts < oldest_dp; new_ts += 60 {
					our_el.Link <- *metrics.NewDatapoint(new_ts, 0.0, false)
				}
			}
			for _, values := range points {
				ts := int32(values[0].(float64) / 1000)
				val := values[2].(float64)
				dp := metrics.NewDatapoint(ts, val, true)
				our_el.Link <- *dp
			}
			if latest_dp < until {
				for new_ts := latest_dp + 60; new_ts <= until+60; new_ts += 60 {
					our_el.Link <- *metrics.NewDatapoint(new_ts, 0.0, false)
				}
			}
		} else {
			for ts := from; ts <= until+60; ts += 60 {
				our_el.Link <- *metrics.NewDatapoint(ts, 0.0, false)
			}
		}
	}(our_el)
	return our_el, nil
}
Example #7
0
// todo: allow N inputs and outputs
func ProcessScale(dep_el chains.ChainEl, multiplier float64) (our_el chains.ChainEl) {
	our_el = *chains.NewChainEl()
	go func(our_el chains.ChainEl, dep_el chains.ChainEl, multiplier float64) {
		from := <-our_el.Settings
		until := <-our_el.Settings
		dep_el.Settings <- from
		dep_el.Settings <- until
		for {
			d := <-dep_el.Link
			if !d.Known {
				our_el.Link <- *metrics.NewDatapoint(d.Ts, 0.0, false)
				if d.Ts >= until {
					return
				}
				continue
			}
			our_el.Link <- *metrics.NewDatapoint(d.Ts, d.Value*multiplier, true)
			if d.Ts >= until {
				return
			}
		}
	}(our_el, dep_el, multiplier)
	return
}