func (m *mockClient) Write(bp stdinflux.BatchPoints) error { for _, p := range bp.Points() { m.Points = append(m.Points, *p) m.Done() } return nil }
func calcDailyBytesServed(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) { bytesToTerabytes := 1000000000.00 sampleTimeSecs := 60.00 bitsTobytes := 8.00 queryString := fmt.Sprintf(`select mean(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by time(1m), cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339)) log.Infof("queryString = %v\n", queryString) res, err := queryDB(client, queryString, "cache_stats") if err != nil { log.Error("An error occured getting max bandwidth!\n") return } if res != nil && len(res[0].Series) > 0 { for _, row := range res[0].Series { bytesServed := float64(0) cdn := row.Tags["cdn"] for _, record := range row.Values { if record[1] != nil { value, err := record[1].(json.Number).Float64() if err != nil { log.Errorf("Couldn't parse value from record %v\n", record) continue } bytesServed += value * sampleTimeSecs / bitsTobytes } } bytesServedTB := bytesServed / bytesToTerabytes log.Infof("TBytes served for cdn %v = %v", cdn, bytesServedTB) //write to Traffic Ops var statsSummary traffic_ops.StatsSummary statsSummary.CDNName = cdn statsSummary.DeliveryService = "all" statsSummary.StatName = "daily_bytesserved" statsSummary.StatValue = strconv.FormatFloat(bytesServedTB, 'f', 2, 64) statsSummary.SummaryTime = time.Now().Format(time.RFC3339) statsSummary.StatDate = startTime.Format("2006-01-02") go writeSummaryStats(config, statsSummary) //write to Influxdb tags := map[string]string{"cdn": cdn, "deliveryservice": "all"} fields := map[string]interface{}{ "value": bytesServedTB, //converted to TB } pt, err := influx.NewPoint( "daily_bytesserved", tags, fields, startTime, ) if err != nil { log.Errorf("error adding creating data point for max Gbps...%v\n", err) continue } bp.AddPoint(pt) } config.BpsChan <- bp } }
func calcDailyMaxGbps(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) { kilobitsToGigabits := 1000000.00 queryString := fmt.Sprintf(`select time, cdn, max(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339)) log.Infof("queryString = %v\n", queryString) res, err := queryDB(client, queryString, "cache_stats") if err != nil { log.Errorf("An error occured getting max bandwidth! %v\n", err) return } if res != nil && len(res[0].Series) > 0 { for _, row := range res[0].Series { for _, record := range row.Values { t := record[0].(string) if record[1] != nil { cdn := record[1].(string) value, err := record[2].(json.Number).Float64() if err != nil { log.Errorf("Couldn't parse value from record %v\n", record) continue } value = value / kilobitsToGigabits statTime, _ := time.Parse(time.RFC3339, t) log.Infof("max gbps for cdn %v = %v", cdn, value) var statsSummary traffic_ops.StatsSummary statsSummary.CDNName = cdn statsSummary.DeliveryService = "all" statsSummary.StatName = "daily_maxgbps" statsSummary.StatValue = strconv.FormatFloat(value, 'f', 2, 64) statsSummary.SummaryTime = time.Now().Format(time.RFC3339) statsSummary.StatDate = statTime.Format("2006-01-02") go writeSummaryStats(config, statsSummary) //write to influxdb tags := map[string]string{"cdn": cdn, "deliveryservice": "all"} fields := map[string]interface{}{ "value": value, } pt, err := influx.NewPoint( "daily_maxgbps", tags, fields, statTime, ) if err != nil { fmt.Printf("error adding creating data point for max Gbps...%v\n", err) continue } bp.AddPoint(pt) } } } } config.BpsChan <- bp }
// Batches incoming Result.Point and sends them if the batch reaches 5k in size func (st *StressTest) batcher(pt *influx.Point, bp influx.BatchPoints) influx.BatchPoints { if len(bp.Points()) <= 5000 { bp.AddPoint(pt) } else { err := st.ResultsClient.Write(bp) if err != nil { log.Fatalf("Error writing performance stats\n error: %v\n", err) } bp = st.NewResultsPointBatch() } return bp }
// Batches incoming Result.Point and sends them if the batch reaches 5k in sizes func (sf *StoreFront) batcher(pt *influx.Point, bp influx.BatchPoints, bpconf influx.BatchPointsConfig) influx.BatchPoints { // If fewer than 5k add point and return if len(bp.Points()) <= 5000 { bp.AddPoint(pt) } else { // Otherwise send the batch err := sf.ResultsClient.Write(bp) // Check error if err != nil { log.Fatalf("Error writing performance stats\n error: %v\n", err) } // Reset the batch of points bp, _ = influx.NewBatchPoints(bpconf) } return bp }
func sendMetrics(config StartupConfig, runningConfig RunningConfig, bps influx.BatchPoints, retry bool) { influxClient, err := influxConnect(config, runningConfig) if err != nil { if retry { config.BpsChan <- bps } errHndlr(err, ERROR) return } pts := bps.Points() for len(pts) > 0 { chunkBps, err := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: bps.Database(), Precision: bps.Precision(), RetentionPolicy: bps.RetentionPolicy(), }) if err != nil { if retry { config.BpsChan <- chunkBps } errHndlr(err, ERROR) } for _, p := range pts[:intMin(config.MaxPublishSize, len(pts))] { chunkBps.AddPoint(p) } pts = pts[intMin(config.MaxPublishSize, len(pts)):] err = influxClient.Write(chunkBps) if err != nil { if retry { config.BpsChan <- chunkBps } errHndlr(err, ERROR) } else { log.Info(fmt.Sprintf("Sent %v stats for %v", len(chunkBps.Points()), chunkBps.Database())) } } }
func (w *bufWriter) Write(bp influxdb.BatchPoints) error { for _, p := range bp.Points() { fmt.Fprintf(&w.buf, p.String()+"\n") } return nil }