func syncDeliveryServiceStat(sourceClient influx.Client, targetClient influx.Client, statName string, days int) { db := "deliveryservice_stats" bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: db, Precision: "ms", RetentionPolicy: "monthly", }) queryString := fmt.Sprintf("select time, cachegroup, cdn, deliveryservice, value from \"monthly\".\"%s\"", statName) if days > 0 { queryString += fmt.Sprintf(" where time > now() - %dd", days) } fmt.Println("queryString ", queryString) res, err := queryDB(sourceClient, queryString, db) if err != nil { fmt.Printf("An error occured getting %s records from sourceDb: %v\n", statName, err) return } sourceStats := getDeliveryServiceStats(res) // get value from target DB targetRes, err := queryDB(targetClient, queryString, db) if err != nil { fmt.Printf("An error occured getting %s record from target db: %v\n", statName, err) return } targetStats := getDeliveryServiceStats(targetRes) for ssKey := range sourceStats { ts := targetStats[ssKey] ss := sourceStats[ssKey] if ts.value > ss.value { fmt.Printf("target value %v is at least equal to source value %v\n", ts.value, ss.value) continue //target value is bigger so leave it } statTime, _ := time.Parse(time.RFC3339, ss.t) tags := map[string]string{ "cdn": ss.cdn, "cachegroup": ss.cacheGroup, "deliveryservice": ss.deliveryService, } fields := map[string]interface{}{ "value": ss.value, } pt, err := influx.NewPoint( statName, tags, fields, statTime, ) if err != nil { fmt.Printf("error adding creating point for %v...%v\n", statName, err) continue } bps.AddPoint(pt) } targetClient.Write(bps) }
func queryDB(con influx.Client, cmd string, database string) (res []influx.Result, err error) { q := influx.Query{ Command: cmd, Database: database, } if response, err := con.Query(q); err == nil { if response.Error() != nil { return res, response.Error() } res = response.Results } return }
func (s *Service) execQuery(cli client.Client, q string) (*client.Response, error) { query := client.Query{ Command: q, } resp, err := cli.Query(query) if err != nil { return nil, err } if err := resp.Error(); err != nil { return nil, err } return resp, nil }
func (d *DavisSi1000) storeReports(reportChan <-chan WxReport, ic influx.Client) { for { select { case report := <-reportChan: // Create a InfluxDB batch of points. We only receive readings every // 2.5s so there's no need to batch more than one at a time. bp, err := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: d.config.InfluxDB.InfluxDBName, Precision: "s", }) if err != nil { log.Println("Error logging report to InfluxDB:", err) continue } tags := map[string]string{"transmitter-id": string(report.TransmitterID)} fields := map[string]interface{}{ "wind_speed": report.WindSpeed, "wind_dir": report.WindDir, "temperature": report.Temperature, "humidity": report.Humidity, "dewpoint": report.Dewpoint, "heat_index": report.HeatIndex, "wind_chill": report.WindChill, "uv_index": report.UVIndex, "solar_radiation": report.SolarRadiation, "rainfall": report.Rainfall, } // Build our InfluxDB point from our tags and fields pt := influx.NewPoint("wxreport", tags, fields, time.Now()) // ...and add it to our batch bp.AddPoint(pt) // Write the batch to the InfluxDB client err = ic.Write(bp) if err != nil { log.Println("Error logging data point to InfluxDB:", err) continue } // Log this report to the console log.Printf("Received report: %+v\n", report) } } }