func (sink *InfluxDBSink) run() {
	log.Printf("Sink run")
	go sink.aggregator.run()
	for {
		log.Printf("Sink wait for aggregated")
		batchRequest := <-sink.aggregated
		log.Printf("Sink got aggregated")
		bp, err := client.NewBatchPoints(sink.BatchPointsConfig)
		if err != nil {
			log.Fatal(err)
		}
		for _, req := range batchRequest {
			fields := make(map[string]interface{})
			log.Printf("%v", req)
			fields["request_time"] = req.GetRequestTime()
			fields["ru_utime"] = req.GetRuUtime()
			fields["ru_stime"] = req.GetRuStime()
			tags := make(map[string]string)
			tags["server_name"] = req.GetServerName()
			tags["hostname"] = req.GetHostname()
			tags["script_name"] = req.GetScriptName()
			point := client.NewPoint("request", tags, fields, time.Now())
			bp.AddPoint(point)
		}
		err = sink.client.Write(bp)
		if err != nil {
			log.Fatal(err)
		}
	}
}
예제 #2
0
func syncDeliveryServiceStat(sourceClient influx.Client, targetClient influx.Client, statName string, days int) {

	db := "deliveryservice_stats"
	bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        db,
		Precision:       "ms",
		RetentionPolicy: "monthly",
	})

	queryString := fmt.Sprintf("select time, cachegroup, cdn, deliveryservice, value from \"monthly\".\"%s\"", statName)
	if days > 0 {
		queryString += fmt.Sprintf(" where time > now() - %dd", days)
	}
	fmt.Println("queryString ", queryString)
	res, err := queryDB(sourceClient, queryString, db)
	if err != nil {
		fmt.Printf("An error occured getting %s records from sourceDb: %v\n", statName, err)
		return
	}
	sourceStats := getDeliveryServiceStats(res)
	// get value from target DB
	targetRes, err := queryDB(targetClient, queryString, db)
	if err != nil {
		fmt.Printf("An error occured getting %s record from target db: %v\n", statName, err)
		return
	}
	targetStats := getDeliveryServiceStats(targetRes)

	for ssKey := range sourceStats {
		ts := targetStats[ssKey]
		ss := sourceStats[ssKey]
		if ts.value > ss.value {
			fmt.Printf("target value %v is at least equal to source value %v\n", ts.value, ss.value)
			continue //target value is bigger so leave it
		}
		statTime, _ := time.Parse(time.RFC3339, ss.t)
		tags := map[string]string{
			"cdn":             ss.cdn,
			"cachegroup":      ss.cacheGroup,
			"deliveryservice": ss.deliveryService,
		}
		fields := map[string]interface{}{
			"value": ss.value,
		}
		pt, err := influx.NewPoint(
			statName,
			tags,
			fields,
			statTime,
		)
		if err != nil {
			fmt.Printf("error adding creating point for %v...%v\n", statName, err)
			continue
		}
		bps.AddPoint(pt)
	}
	targetClient.Write(bps)
}
예제 #3
0
func (d *DavisSi1000) storeReports(reportChan <-chan WxReport, ic influx.Client) {
	for {
		select {
		case report := <-reportChan:

			// Create a InfluxDB batch of points.  We only receive readings every
			// 2.5s so there's no need to batch more than one at a time.
			bp, err := influx.NewBatchPoints(influx.BatchPointsConfig{
				Database:  d.config.InfluxDB.InfluxDBName,
				Precision: "s",
			})
			if err != nil {
				log.Println("Error logging report to InfluxDB:", err)
				continue
			}

			tags := map[string]string{"transmitter-id": string(report.TransmitterID)}
			fields := map[string]interface{}{
				"wind_speed":      report.WindSpeed,
				"wind_dir":        report.WindDir,
				"temperature":     report.Temperature,
				"humidity":        report.Humidity,
				"dewpoint":        report.Dewpoint,
				"heat_index":      report.HeatIndex,
				"wind_chill":      report.WindChill,
				"uv_index":        report.UVIndex,
				"solar_radiation": report.SolarRadiation,
				"rainfall":        report.Rainfall,
			}

			// Build our InfluxDB point from our tags and fields
			pt := influx.NewPoint("wxreport", tags, fields, time.Now())
			// ...and add it to our batch
			bp.AddPoint(pt)

			// Write the batch to the InfluxDB client
			err = ic.Write(bp)
			if err != nil {
				log.Println("Error logging data point to InfluxDB:", err)
				continue
			}
			// Log this report to the console
			log.Printf("Received report: %+v\n", report)

		}
	}
}
예제 #4
0
func (i *InfluxDBOutNode) write(db, rp string, batch models.Batch) error {
	if i.i.Database != "" {
		db = i.i.Database
	}
	if i.i.RetentionPolicy != "" {
		rp = i.i.RetentionPolicy
	}
	name := i.i.Measurement
	if name == "" {
		name = batch.Name
	}

	var err error
	points := make([]*client.Point, len(batch.Points))
	for j, p := range batch.Points {
		var tags models.Tags
		if len(i.i.Tags) > 0 {
			tags = make(models.Tags, len(p.Tags)+len(i.i.Tags))
			for k, v := range p.Tags {
				tags[k] = v
			}
			for k, v := range i.i.Tags {
				tags[k] = v
			}
		} else {
			tags = p.Tags
		}
		points[j], err = client.NewPoint(
			name,
			tags,
			p.Fields,
			p.Time,
		)
		if err != nil {
			return err
		}
	}
	bpc := client.BatchPointsConfig{
		Database:         db,
		RetentionPolicy:  rp,
		WriteConsistency: i.i.WriteConsistency,
		Precision:        i.i.Precision,
	}
	i.wb.enqueue(bpc, points)
	return nil
}
예제 #5
0
func Flush() error {
	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
		Database: MyDB,
	})
	if err != nil {
		return err
	}
	bo := backoff.NewExponentialBackOff()
	bo.MaxElapsedTime = 5 * time.Minute
	for {
		bp, err = client.NewBatchPoints(client.BatchPointsConfig{
			Database: MyDB,
		})
		if err != nil {
			return err
		}
		for i := 0; i < 10; i++ {
			point := <-cPoints
			tags := map[string]string{"interface": point.Interface}
			fields := map[string]interface{}{
				"error":   point.Error,
				"latency": point.Latency.Seconds(),
			}
			pt, err := client.NewPoint("network", tags, fields, point.Time)
			if err != nil {
				return err
			}
			bp.AddPoint(pt)
		}
		err = backoff.Retry(func() error {
			return clnt.Write(bp)
		}, bo)
		if err != nil {
			bo.Reset()
			return err
		}
	}
	return nil
}
예제 #6
0
func calcCacheValues(trafmonData []byte, cdnName string, sampleTime int64, cacheGroupMap map[string]string, config StartupConfig) error {

	type CacheStatsJSON struct {
		Pp     string `json:"pp"`
		Date   string `json:"date"`
		Caches map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"caches"`
	}
	var jData CacheStatsJSON
	err := json.Unmarshal(trafmonData, &jData)
	errHndlr(err, ERROR)

	statCount := 0
	bps, err := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        "cache_stats",
		Precision:       "ms",
		RetentionPolicy: config.CacheRetentionPolicy,
	})
	if err != nil {
		errHndlr(err, ERROR)
	}
	for cacheName, cacheData := range jData.Caches {
		for statName, statData := range cacheData {
			dataKey := statName
			dataKey = strings.Replace(dataKey, ".bandwidth", ".kbps", 1)
			dataKey = strings.Replace(dataKey, "-", "_", -1)

			//Get the stat time and convert to epoch
			statTime := strconv.Itoa(statData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}

			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//Get the stat value and convert to float
			statValue := statData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.00
			}
			tags := map[string]string{
				"cachegroup": cacheGroupMap[cacheName],
				"hostname":   cacheName,
				"cdn":        cdnName,
			}

			fields := map[string]interface{}{
				"value": statFloatValue,
			}
			pt, err := influx.NewPoint(
				dataKey,
				tags,
				fields,
				newTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bps.AddPoint(pt)
			statCount++
		}
	}
	config.BpsChan <- bps
	log.Debug("Collected ", statCount, " cache stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
예제 #7
0
/* the ds json looks like:
{
  "deliveryService": {
    "linear-gbr-hls-sbr": {
      "	.us-ma-woburn.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 520024
      }],
      "location.us-de-newcastle.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 517707
      }],
    }
 }
*/
func calcDsValues(rascalData []byte, cdnName string, sampleTime int64, config StartupConfig) error {
	type DsStatsJSON struct {
		Pp              string `json:"pp"`
		Date            string `json:"date"`
		DeliveryService map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"deliveryService"`
	}

	var jData DsStatsJSON
	err := json.Unmarshal(rascalData, &jData)
	errHndlr(err, ERROR)

	statCount := 0
	bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        "deliveryservice_stats",
		Precision:       "ms",
		RetentionPolicy: config.DsRetentionPolicy,
	})
	for dsName, dsData := range jData.DeliveryService {
		for dsMetric, dsMetricData := range dsData {
			//create dataKey (influxDb series)
			var cachegroup, statName string
			if strings.Contains(dsMetric, "total.") {
				s := strings.Split(dsMetric, ".")
				cachegroup, statName = s[0], s[1]
			} else {
				s := strings.Split(dsMetric, ".")
				cachegroup, statName = s[1], s[2]
			}

			//convert stat time to epoch
			statTime := strconv.Itoa(dsMetricData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}

			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//convert stat value to float
			statValue := dsMetricData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.0
			}
			tags := map[string]string{
				"deliveryservice": dsName,
				"cdn":             cdnName,
				"cachegroup":      cachegroup,
			}

			fields := map[string]interface{}{
				"value": statFloatValue,
			}
			pt, err := influx.NewPoint(
				statName,
				tags,
				fields,
				newTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bps.AddPoint(pt)
			statCount++
		}
	}
	config.BpsChan <- bps
	log.Info("Collected ", statCount, " deliveryservice stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
예제 #8
0
func calcDailySummary(now time.Time, config StartupConfig, runningConfig RunningConfig) {
	log.Infof("lastSummaryTime is %v", runningConfig.LastSummaryTime)
	if runningConfig.LastSummaryTime.Day() != now.Day() {
		startTime := now.Truncate(24 * time.Hour).Add(-24 * time.Hour)
		endTime := startTime.Add(24 * time.Hour)
		log.Info("Summarizing from ", startTime, " (", startTime.Unix(), ") to ", endTime, " (", endTime.Unix(), ")")

		// influx connection
		influxClient, err := influxConnect(config, runningConfig)
		if err != nil {
			log.Error("Could not connect to InfluxDb to get daily summary stats!!")
			errHndlr(err, ERROR)
			return
		}

		//create influxdb query
		q := fmt.Sprintf("SELECT sum(value)/6 FROM bandwidth where time > '%s' and time < '%s' group by time(60s), cdn fill(0)", startTime.Format(time.RFC3339), endTime.Format(time.RFC3339))
		log.Infof(q)
		res, err := queryDB(influxClient, q, "cache_stats")
		if err != nil {
			errHndlr(err, ERROR)
			return
		}

		bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
			Database:        "daily_stats",
			Precision:       "s",
			RetentionPolicy: config.DailySummaryRetentionPolicy,
		})
		for _, row := range res[0].Series {
			prevtime := startTime
			max := float64(0)
			bytesServed := float64(0)
			cdn := row.Tags["cdn"]
			for _, record := range row.Values {
				kbps, err := record[1].(json.Number).Float64()
				if err != nil {
					errHndlr(err, ERROR)
					continue
				}
				sampleTime, err := time.Parse(time.RFC3339, record[0].(string))
				if err != nil {
					errHndlr(err, ERROR)
					continue
				}
				max = FloatMax(max, kbps)
				duration := sampleTime.Unix() - prevtime.Unix()
				bytesServed += float64(duration) * kbps / 8
				prevtime = sampleTime
			}
			maxGbps := max / 1000000
			bytesServedTb := bytesServed / 1000000000
			log.Infof("max gbps for cdn %v = %v", cdn, maxGbps)
			log.Infof("Tbytes served for cdn %v = %v", cdn, bytesServedTb)

			//write daily_maxgbps in traffic_ops
			var statsSummary traffic_ops.StatsSummary
			statsSummary.CdnName = cdn
			statsSummary.DeliveryService = "all"
			statsSummary.StatName = "daily_maxgbps"
			statsSummary.StatValue = strconv.FormatFloat(maxGbps, 'f', 2, 64)
			statsSummary.SummaryTime = now.Format(time.RFC3339)
			statsSummary.StatDate = startTime.Format("2006-01-02")
			go writeSummaryStats(config, statsSummary)

			tags := map[string]string{
				"deliveryservice": statsSummary.DeliveryService,
				"cdn":             statsSummary.CdnName,
			}

			fields := map[string]interface{}{
				"value": maxGbps,
			}
			pt, err := influx.NewPoint(
				statsSummary.StatName,
				tags,
				fields,
				startTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bp.AddPoint(pt)

			// write bytes served data to traffic_ops
			statsSummary.StatName = "daily_bytesserved"
			statsSummary.StatValue = strconv.FormatFloat(bytesServedTb, 'f', 2, 64)
			go writeSummaryStats(config, statsSummary)

			pt, err = influx.NewPoint(
				statsSummary.StatName,
				tags,
				fields,
				startTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bp.AddPoint(pt)
		}
		config.BpsChan <- bp
		log.Info("Collected daily stats @ ", now)
	}
}