// WriteTo flushes the buffered content of the metrics to the writer, in an // Influx BatchPoints format. WriteTo abides best-effort semantics, so // observations are lost if there is a problem with the write. Clients should be // sure to call WriteTo regularly, ideally through the WriteLoop helper method. func (in *Influx) WriteTo(w BatchPointsWriter) (err error) { bp, err := influxdb.NewBatchPoints(in.conf) if err != nil { return err } now := time.Now() in.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { fields := fieldsFrom(lvs) fields["count"] = sum(values) var p *influxdb.Point p, err = influxdb.NewPoint(name, in.tags, fields, now) if err != nil { return false } bp.AddPoint(p) return true }) if err != nil { return err } in.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { fields := fieldsFrom(lvs) fields["value"] = last(values) var p *influxdb.Point p, err = influxdb.NewPoint(name, in.tags, fields, now) if err != nil { return false } bp.AddPoint(p) return true }) if err != nil { return err } in.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { fields := fieldsFrom(lvs) ps := make([]*influxdb.Point, len(values)) for i, v := range values { fields["value"] = v // overwrite each time ps[i], err = influxdb.NewPoint(name, in.tags, fields, now) if err != nil { return false } } bp.AddPoints(ps) return true }) if err != nil { return err } return w.Write(bp) }
func createInfluxDBMetrics(ping Ping) (influxdbclient.BatchPoints, error) { var err error bp, err := influxdbclient.NewBatchPoints(influxdbclient.BatchPointsConfig{ Database: receiverDatabaseFlag, Precision: "s", }) if err != nil { return nil, err } tags := map[string]string{ "origin": ping.origin, "destination": ping.destination, } fields := map[string]interface{}{ "loss": ping.stats.loss, "min": ping.stats.min, "avg": ping.stats.avg, "max": ping.stats.max, "mdev": ping.stats.mdev, } pt, err := influxdbclient.NewPoint("ping", tags, fields, time.Unix(ping.time, 0)) if err != nil { return nil, err } bp.AddPoint(pt) return bp, nil }
func (t *tickData) TickHandler(ctx context.Context, tick *tickRecorder.Tick) error { var err error t.log.Infoln("Received data") tags := map[string]string{"pair": "AUDUSD"} fields := map[string]interface{}{ "bid": tick.Bid, "ask": tick.Ask, "last": tick.Last, } point, err := influx.NewPoint("tick_data", tags, fields, time.Unix(0, tick.Time)) bp, err := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: "tick", Precision: "ns", }) bp.AddPoint(point) t.log.Infoln("Created batch point:", bp) if influxErr := t.influx.Write(bp); influxErr != nil { t.log.Error(influxErr) } return err }
func syncDeliveryServiceStat(sourceClient influx.Client, targetClient influx.Client, statName string, days int) { db := "deliveryservice_stats" bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: db, Precision: "ms", RetentionPolicy: "monthly", }) queryString := fmt.Sprintf("select time, cachegroup, cdn, deliveryservice, value from \"monthly\".\"%s\"", statName) if days > 0 { queryString += fmt.Sprintf(" where time > now() - %dd", days) } fmt.Println("queryString ", queryString) res, err := queryDB(sourceClient, queryString, db) if err != nil { errorMessage = fmt.Sprintf("An error occured getting %s records from sourceDb: %v\n", statName, err) fmt.Println(errorMessage) return } sourceStats := getDeliveryServiceStats(res) // get value from target DB targetRes, err := queryDB(targetClient, queryString, db) if err != nil { errorMessage = fmt.Sprintf("An error occured getting %s record from target db: %v\n", statName, err) fmt.Println(errorMessage) return } targetStats := getDeliveryServiceStats(targetRes) for ssKey := range sourceStats { ts := targetStats[ssKey] ss := sourceStats[ssKey] if ts.value > ss.value { //fmt.Printf("target value %v is at least equal to source value %v\n", ts.value, ss.value) continue //target value is bigger so leave it } statTime, _ := time.Parse(time.RFC3339, ss.t) tags := map[string]string{ "cdn": ss.cdn, "cachegroup": ss.cacheGroup, "deliveryservice": ss.deliveryService, } fields := map[string]interface{}{ "value": ss.value, } pt, err := influx.NewPoint( statName, tags, fields, statTime, ) if err != nil { fmt.Printf("error adding creating point for %v...%v\n", statName, err) continue } bps.AddPoint(pt) } targetClient.Write(bps) }
// Perform the batch write func (w *Writer) writeEntries(entries []Entry) { bps, err := client.NewBatchPoints(client.BatchPointsConfig{ Database: w.DB, RetentionPolicy: "default", Precision: "s", // WriteConsistency: string, }) Checkerr(err) for _, entry := range entries { name := "watt" // Measurement tags := map[string]string{ /*"ted1k",...*/ } fields := map[string]interface{}{ "value": entry.Watt, } pt, err := client.NewPoint(name, tags, fields, entry.Stamp) Checkerr(err) bps.AddPoint(pt) // fmt.Printf("point: %v\n", pt) } // TODO(daneroo): retry, if error is timeout? err = w.con.Write(bps) Checkerr(err) }
// Fire adds a new InfluxDB point based off of Logrus entry func (hook *InfluxDBHook) Fire(entry *logrus.Entry) (err error) { // If passing a "message" field then it will be overridden by the entry Message entry.Data["message"] = entry.Message measurement := hook.measurement if result, ok := getTag(entry.Data, "measurement"); ok { measurement = result } tags := make(map[string]string) // Set the level of the entry tags["level"] = entry.Level.String() // getAndDel and getAndDelRequest are taken from https://github.com/evalphobia/logrus_sentry if logger, ok := getTag(entry.Data, "logger"); ok { tags["logger"] = logger } for _, tag := range hook.tagList { if tagValue, ok := getTag(entry.Data, tag); ok { tags[tag] = tagValue } } pt, err := influxdb.NewPoint(measurement, tags, entry.Data, entry.Time) if err != nil { return fmt.Errorf("Fire: %v", err) } return hook.addPoint(pt) }
func (self *Server) influxWriter(influxClient influxdb.Client, options InfluxOptions) { defer close(self.influxChan) defer influxClient.Close() for stat := range self.influxChan { tags := map[string]string{ "id": stat.ID.String(), "family": stat.ID.Family(), "name": stat.SensorConfig.String(), } fields := map[string]interface{}{ "temperature": stat.Temperature.Float64(), } // write point, err := influxdb.NewPoint("onewire", tags, fields, stat.Time) if err != nil { log.Printf("server.Server: influxWriter: influxdb.NewPoint: %v\n", err) } points, err := options.batchPoints() if err != nil { log.Printf("server.Server: influxWriter: newBatchPoints: %v\n", err) continue } points.AddPoint(point) if err := influxClient.Write(points); err != nil { log.Printf("server.Server: influxWriter: influxdb.Client %v: Write %v: %v\n", influxClient, points, err) continue } } }
// Point stamps out a new influx data point and COPIES the tags, and CLEARS // the fields for this Capture so that it can be fields with new values. // The new data point is stamped with the current time. func (mm *Measurement) Point() (*influx.Point, error) { tags := mm.tags fields := mm.fields mm.tags = make(map[string]string) mm.fields = make(map[string]interface{}) for k, v := range tags { mm.tags[k] = v } for k, v := range fields { mm.fields[k] = v } pt, err := influx.NewPoint( mm.measurement, mm.tags, mm.fields, time.Now(), ) if err != nil { return nil, err } else { return pt, nil } }
// InfluxDB writes interest processing time to influxDB. // // The data collected can be viewed with: SELECT "value" FROM :name WHERE "name" = ':interest_name'. func InfluxDB(client influxdb.Client, db, name string, tags map[string]string) mux.Middleware { return func(next mux.Handler) mux.Handler { return mux.HandlerFunc(func(w ndn.Sender, i *ndn.Interest) { before := time.Now() next.ServeNDN(w, i) t := make(map[string]string) for k, v := range tags { t[k] = v } t["name"] = i.Name.String() pt, _ := influxdb.NewPoint(name, t, map[string]interface{}{ "value": float64(time.Since(before)) / float64(time.Millisecond), }, time.Now()) bp, _ := influxdb.NewBatchPoints(influxdb.BatchPointsConfig{ Database: db, }) bp.AddPoint(pt) err := client.Write(bp) if err != nil { log.Println(err) return } }) } }
func NewBlankTestPoint() *influx.Point { meas := "measurement" tags := map[string]string{"fooTag": "fooTagValue"} fields := map[string]interface{}{"value": 5920} utc, _ := time.LoadLocation("UTC") timestamp := time.Date(2016, time.Month(4), 20, 0, 0, 0, 0, utc) pt, _ := influx.NewPoint(meas, tags, fields, timestamp) return pt }
func calcDailyBytesServed(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) { bytesToTerabytes := 1000000000.00 sampleTimeSecs := 60.00 bitsTobytes := 8.00 queryString := fmt.Sprintf(`select mean(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by time(1m), cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339)) log.Infof("queryString = %v\n", queryString) res, err := queryDB(client, queryString, "cache_stats") if err != nil { log.Error("An error occured getting max bandwidth!\n") return } if res != nil && len(res[0].Series) > 0 { for _, row := range res[0].Series { bytesServed := float64(0) cdn := row.Tags["cdn"] for _, record := range row.Values { if record[1] != nil { value, err := record[1].(json.Number).Float64() if err != nil { log.Errorf("Couldn't parse value from record %v\n", record) continue } bytesServed += value * sampleTimeSecs / bitsTobytes } } bytesServedTB := bytesServed / bytesToTerabytes log.Infof("TBytes served for cdn %v = %v", cdn, bytesServedTB) //write to Traffic Ops var statsSummary traffic_ops.StatsSummary statsSummary.CDNName = cdn statsSummary.DeliveryService = "all" statsSummary.StatName = "daily_bytesserved" statsSummary.StatValue = strconv.FormatFloat(bytesServedTB, 'f', 2, 64) statsSummary.SummaryTime = time.Now().Format(time.RFC3339) statsSummary.StatDate = startTime.Format("2006-01-02") go writeSummaryStats(config, statsSummary) //write to Influxdb tags := map[string]string{"cdn": cdn, "deliveryservice": "all"} fields := map[string]interface{}{ "value": bytesServedTB, //converted to TB } pt, err := influx.NewPoint( "daily_bytesserved", tags, fields, startTime, ) if err != nil { log.Errorf("error adding creating data point for max Gbps...%v\n", err) continue } bp.AddPoint(pt) } config.BpsChan <- bp } }
// Returns a point representation of the report to be written to the ResultsDB func (qr *queryReport) Point() *influx.Point { measurement := "testDefault" tags := map[string]string{} fields := map[string]interface{}{"field": "blank"} point, err := influx.NewPoint(measurement, tags, fields, time.Now()) if err != nil { log.Fatalf("Error creating queryReport point\n measurement: %v\n tags: %v\n fields: %v\n error: %v\n", measurement, tags, fields, err) } return point }
func calcDailyMaxGbps(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) { kilobitsToGigabits := 1000000.00 queryString := fmt.Sprintf(`select time, cdn, max(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339)) log.Infof("queryString = %v\n", queryString) res, err := queryDB(client, queryString, "cache_stats") if err != nil { log.Errorf("An error occured getting max bandwidth! %v\n", err) return } if res != nil && len(res[0].Series) > 0 { for _, row := range res[0].Series { for _, record := range row.Values { t := record[0].(string) if record[1] != nil { cdn := record[1].(string) value, err := record[2].(json.Number).Float64() if err != nil { log.Errorf("Couldn't parse value from record %v\n", record) continue } value = value / kilobitsToGigabits statTime, _ := time.Parse(time.RFC3339, t) log.Infof("max gbps for cdn %v = %v", cdn, value) var statsSummary traffic_ops.StatsSummary statsSummary.CDNName = cdn statsSummary.DeliveryService = "all" statsSummary.StatName = "daily_maxgbps" statsSummary.StatValue = strconv.FormatFloat(value, 'f', 2, 64) statsSummary.SummaryTime = time.Now().Format(time.RFC3339) statsSummary.StatDate = statTime.Format("2006-01-02") go writeSummaryStats(config, statsSummary) //write to influxdb tags := map[string]string{"cdn": cdn, "deliveryservice": "all"} fields := map[string]interface{}{ "value": value, } pt, err := influx.NewPoint( "daily_maxgbps", tags, fields, statTime, ) if err != nil { fmt.Printf("error adding creating data point for max Gbps...%v\n", err) continue } bp.AddPoint(pt) } } } } config.BpsChan <- bp }
func (g *gauge) createPoint() { tags := map[string]string{} for _, tag := range g.tags { tags[tag.Key] = tag.Value } fields := map[string]interface{}{} for _, field := range g.fields { fields[field.Key] = field.Value } fields["value"] = g.value pt, _ := stdinflux.NewPoint(g.key, tags, fields, time.Now()) g.bp.AddPoint(pt) }
func blankResponse() stressClient.Response { // Points must have at least one field fields := map[string]interface{}{"done": true} // Make a 'blank' point p, err := influx.NewPoint("done", make(map[string]string), fields, time.Now()) // Panic on error if err != nil { log.Fatalf("Error creating blank response point\n error: %v\n", err) } // Add a tracer to prevent program from returning too early tracer := stressClient.NewTracer(make(map[string]string)) // Add to the WaitGroup tracer.Add(1) // Make a new response with the point and the tracer resp := stressClient.NewResponse(p, tracer) return resp }
func (c *counter) Add(delta uint64) { c.value = c.value + delta tags := map[string]string{} for _, tag := range c.tags { tags[tag.Key] = tag.Value } fields := map[string]interface{}{} for _, field := range c.fields { fields[field.Key] = field.Value } fields["value"] = c.value pt, _ := stdinflux.NewPoint(c.key, tags, fields, time.Now()) c.bp.AddPoint(pt) }
// This function makes a *client.Point for reporting on queries func (pe *ponyExpress) queryPoint(statementID string, body []byte, statusCode int, responseTime time.Duration, addedTags map[string]string) *influx.Point { tags := sumTags(pe.tags(statementID), addedTags) fields := map[string]interface{}{ "status_code": statusCode, "num_bytes": len(body), "response_time_ns": responseTime.Nanoseconds(), } point, err := influx.NewPoint("query", tags, fields, time.Now()) if err != nil { log.Fatalf("Error creating query results point\n error: %v\n", err) } return point }
// This function makes a *client.Point for reporting on writes func (pe *ponyExpress) writePoint(retries int, statementID string, statusCode int, responseTime time.Duration, addedTags map[string]string, writeBytes int) *influx.Point { tags := sumTags(pe.tags(statementID), addedTags) fields := map[string]interface{}{ "status_code": statusCode, "response_time_ns": responseTime.Nanoseconds(), "num_bytes": writeBytes, } point, err := influx.NewPoint("write", tags, fields, time.Now()) if err != nil { log.Fatalf("Error creating write results point\n error: %v\n", err) } return point }
func (self *InfluxdbStorage) AddStats(infotype string, measurement string, content interface{}) error { if infotype == Infotypepacket { //transfer interface into HttpTransaction httpinstance, ok := content.(*metrics.HttpTransaction) if !ok { return errors.New("fail in transformation") } influxclient := self.Influxclient //create bp point write bp point bp, _ := influxpackage.NewBatchPoints(influxpackage.BatchPointsConfig{ Database: self.Database, Precision: "us", }) fields := map[string]interface{}{ "respondtime": httpinstance.Respondtime, } tags := map[string]string{ "Srcip": httpinstance.Srcip, "Srcport": httpinstance.Srcport, "Destip": httpinstance.Destip, "Destport": httpinstance.Destport, // problems in processing nesting problem // "Requestdetail": httpinstance.Packetdetail.Requestdetail, // "Responddetail": httpinstance.Packetdetail.Responddetail, } fmt.Println("**the measurement**", measurement) point, err := influxpackage.NewPoint(measurement, tags, fields, time.Now()) if err != nil { return err } fmt.Println("the point name:", point.Name()) bp.AddPoint(point) influxclient.Write(bp) } //common metric info if infotype == Infotypemetric { } return nil }
// AddTags adds additional tags to the point held in Response and returns the point func (resp Response) AddTags(newTags map[string]string) *influx.Point { // Pull off the current tags tags := resp.Point.Tags() // Add the new tags to the current tags for tag, tagValue := range newTags { tags[tag] = tagValue } // Make a new point pt, err := influx.NewPoint(resp.Point.Name(), tags, resp.Point.Fields(), resp.Point.Time()) // panic on error if err != nil { log.Fatalf("Error adding tags to response point\n point: %v\n tags:%v\n error: %v\n", resp.Point, newTags, err) } return pt }
// WriteMetrics is called from a handler to write the eru and qos metrics to the // influxdb database. func WriteMetrics(eru, qos float64, trafficPattern string) error { influxClient, err := createClient() if err != nil { log.Println("Error getting the HTTP client for Influx.") return err } // After this function terminates, close the HTTP client connection. defer influxClient.Close() bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ Database: databaseName, // @TODO Does "s" make sense Precision: "s", }) tags := map[string]string{ "sm": scalingMethod, "tp": trafficPattern, "pit": podInitializationTime, "ver": version, } fields := map[string]interface{}{ "eru": eru, "qos": qos, } pt, err := client.NewPoint(PointName, tags, fields, time.Now()) if err != nil { log.Println("Error creating the new point.") return err } bp.AddPoint(pt) return influxClient.Write(bp) }
func Msg2Series(msgs []Message, database string) client.BatchPoints { // Create a new point batch bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ Database: database, Precision: "s", }) for _, msg := range msgs { if msg.Topic == "" && len(msg.Payload) == 0 { break } tokens := strings.Split(msg.Topic, "/") if len(tokens) < 2 { break } j, err := MsgParse(msg.Payload) if err != nil { log.Warn(err) continue } //name := strings.Replace(msg.Topic, "/", ".", -1) name := strings.Join(tokens[1:], "_") tags := map[string]string{ "device": tokens[0], } pt, err := client.NewPoint(name, tags, j, time.Now()) if err != nil { break } bp.AddPoint(pt) fmt.Printf("%+v\n", pt) } return bp }
func (s *Stats) newPoint(name string, values ...interface{}) { var fields map[string]interface{} if len(values) == 1 { fields = map[string]interface{}{name: values[0]} } else if len(values)%2 == 0 { fields = make(map[string]interface{}) for i := 0; i < len(values)/2; i++ { fields[values[i*2].(string)] = values[i*2+1] } } else { panic("invalid number of args") } pt, err := influx.NewPoint( strings.Replace(name, "_", "-", -1), map[string]string{}, fields, time.Now(), ) s.bp.AddPoint(pt) if err != nil { log.Error(err) } }
func StatisticHandler(c *gin.Context) { if !conf.StatisticEnable { return } now := time.Now() c.Next() clientData := getClientData(c) if clientData == nil { return } respTime := time.Now().Sub(now) tags := map[string]string{ "node": conf.ClientAddr, "app": clientData.AppKey, } fields := map[string]interface{}{ "status": getServiceStatus(c), "error_code": getServiceErrorCode(c), "ip": clientData.Ip, "lang": clientData.Lang, "os": clientData.OSType, "osv": clientData.OSVersion, "appv": clientData.AppVersion, "deviceid": clientData.DeviceId, "resp_time": int(respTime / microSecondUnit), } p, _ := influx.NewPoint("client_request", tags, fields, now) select { case statisticCh <- p: default: log.Println("failed to send influx point to channel") } }
func Crawl(searchUrl string, depth int, fetcher Fetcher, redisClient *redis.Client, influxClient influx.Client) { throttle <- 1 if depth <= 0 { return } fmt.Printf("Depth: %d Crawling: %s\n", depth, searchUrl) bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: "crawler", Precision: "s", }) host, err := url.Parse(searchUrl) // Send this to our redis queue for indexing if err != nil { redisClient.LPush("unknown_url_crawler_queue", searchUrl) } else { redisClient.LPush(host.Host+"_crawler_queue", searchUrl) } urlcache.lock.Lock() urlcache.m[searchUrl] = crawlError urlcache.lock.Unlock() // let's determine how long it is taking to fetch all urls on a page startFetch := time.Now() urls, err := fetcher.Fetch(searchUrl) crawlTime := time.Since(startFetch) if err != nil { fmt.Printf("Error fetching results from %s: %s\n", searchUrl, err.Error()) } else { fmt.Printf("Finished crawling %s in %.2f seconds\n", searchUrl, crawlTime.Seconds()) } tags := map[string]string{ "domain": host.String(), } fields := map[string]interface{}{ "urls_found": len(urls), "crawl_time": crawlTime.Nanoseconds(), "total_urls_crawled": len(urlcache.m), "urls_by_page": len(urls), } point, _ := influx.NewPoint( "crawl_usage", tags, fields, time.Now(), ) // add data point to influx bp.AddPoint(point) if err := influxClient.Write(bp); err != nil { log.Printf("Unable to write batch point to influxdb: %s\n", err.Error()) } var wg sync.WaitGroup for _, u := range urls { if !urlTest.MatchString(u) { u = "http://" + host.Host + u } urlcache.lock.Lock() _, crawled := urlcache.m[u] urlcache.lock.Unlock() if validURL.MatchString(u) && urlTest.MatchString(u) && !crawled { wg.Add(1) go func(u string, depth int, fetcher Fetcher, redisClient *redis.Client, influxClient influx.Client) { defer wg.Done() Crawl(u, depth-1, fetcher, redisClient, influxClient) }(u, depth, fetcher, redisClient, influxClient) } } <-throttle wg.Wait() }
func calcCacheValues(trafmonData []byte, cdnName string, sampleTime int64, cacheMap map[string]traffic_ops.Server, config StartupConfig) error { type CacheStatsJSON struct { Pp string `json:"pp"` Date string `json:"date"` Caches map[string]map[string][]struct { Index uint64 `json:"index"` Time int `json:"time"` Value string `json:"value"` Span uint64 `json:"span"` } `json:"caches"` } var jData CacheStatsJSON err := json.Unmarshal(trafmonData, &jData) if err != nil { return fmt.Errorf("could not unmarshall cache stats JSON - %v", err) } statCount := 0 bps, err := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: "cache_stats", Precision: "ms", RetentionPolicy: config.CacheRetentionPolicy, }) if err != nil { errHndlr(err, ERROR) } for cacheName, cacheData := range jData.Caches { cache := cacheMap[cacheName] for statName, statData := range cacheData { dataKey := statName dataKey = strings.Replace(dataKey, ".bandwidth", ".kbps", 1) dataKey = strings.Replace(dataKey, "-", "_", -1) //Get the stat time and convert to epoch statTime := strconv.Itoa(statData[0].Time) msInt, err := strconv.ParseInt(statTime, 10, 64) if err != nil { errHndlr(err, ERROR) } newTime := time.Unix(0, msInt*int64(time.Millisecond)) //Get the stat value and convert to float statValue := statData[0].Value statFloatValue, err := strconv.ParseFloat(statValue, 64) if err != nil { statFloatValue = 0.00 } tags := map[string]string{ "cachegroup": cache.Cachegroup, "hostname": cacheName, "cdn": cdnName, "type": cache.Type, } fields := map[string]interface{}{ "value": statFloatValue, } pt, err := influx.NewPoint( dataKey, tags, fields, newTime, ) if err != nil { errHndlr(err, ERROR) continue } bps.AddPoint(pt) statCount++ } } config.BpsChan <- bps log.Info("Collected ", statCount, " cache stats values for ", cdnName, " @ ", sampleTime) return nil }
/* the ds json looks like: { "deliveryService": { "linear-gbr-hls-sbr": { " .us-ma-woburn.kbps": [{ "index": 520281, "time": 1398893383605, "value": "0", "span": 520024 }], "location.us-de-newcastle.kbps": [{ "index": 520281, "time": 1398893383605, "value": "0", "span": 517707 }], } } */ func calcDsValues(rascalData []byte, cdnName string, sampleTime int64, config StartupConfig) error { type DsStatsJSON struct { Pp string `json:"pp"` Date string `json:"date"` DeliveryService map[string]map[string][]struct { Index uint64 `json:"index"` Time int `json:"time"` Value string `json:"value"` Span uint64 `json:"span"` } `json:"deliveryService"` } var jData DsStatsJSON err := json.Unmarshal(rascalData, &jData) if err != nil { return fmt.Errorf("could not unmarshall deliveryservice stats JSON - %v", err) } statCount := 0 bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: "deliveryservice_stats", Precision: "ms", RetentionPolicy: config.DsRetentionPolicy, }) for dsName, dsData := range jData.DeliveryService { for dsMetric, dsMetricData := range dsData { var cachegroup, statName string tags := map[string]string{ "deliveryservice": dsName, "cdn": cdnName, } s := strings.Split(dsMetric, ".") if strings.Contains(dsMetric, "type.") { cachegroup = "all" statName = s[2] tags["type"] = s[1] } else if strings.Contains(dsMetric, "total.") { cachegroup, statName = s[0], s[1] } else { cachegroup, statName = s[1], s[2] } tags["cachegroup"] = cachegroup //convert stat time to epoch statTime := strconv.Itoa(dsMetricData[0].Time) msInt, err := strconv.ParseInt(statTime, 10, 64) if err != nil { errHndlr(err, ERROR) } newTime := time.Unix(0, msInt*int64(time.Millisecond)) //convert stat value to float statValue := dsMetricData[0].Value statFloatValue, err := strconv.ParseFloat(statValue, 64) if err != nil { statFloatValue = 0.0 } fields := map[string]interface{}{ "value": statFloatValue, } pt, err := influx.NewPoint( statName, tags, fields, newTime, ) if err != nil { errHndlr(err, ERROR) continue } bps.AddPoint(pt) statCount++ } } config.BpsChan <- bps log.Info("Collected ", statCount, " deliveryservice stats values for ", cdnName, " @ ", sampleTime) return nil }
func calcDailySummary(now time.Time, config StartupConfig, runningConfig RunningConfig) { log.Infof("lastSummaryTime is %v", runningConfig.LastSummaryTime) if runningConfig.LastSummaryTime.Day() != now.Day() { startTime := now.Truncate(24 * time.Hour).Add(-24 * time.Hour) endTime := startTime.Add(24 * time.Hour) log.Info("Summarizing from ", startTime, " (", startTime.Unix(), ") to ", endTime, " (", endTime.Unix(), ")") // influx connection influxClient, err := influxConnect(config, runningConfig) if err != nil { log.Error("Could not connect to InfluxDb to get daily summary stats!!") errHndlr(err, ERROR) return } //create influxdb query q := fmt.Sprintf("SELECT sum(value)/6 FROM bandwidth where time > '%s' and time < '%s' group by time(60s), cdn fill(0)", startTime.Format(time.RFC3339), endTime.Format(time.RFC3339)) log.Infof(q) res, err := queryDB(influxClient, q, "cache_stats") if err != nil { errHndlr(err, ERROR) return } bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ Database: "daily_stats", Precision: "s", RetentionPolicy: config.DailySummaryRetentionPolicy, }) for _, row := range res[0].Series { prevtime := startTime max := float64(0) bytesServed := float64(0) cdn := row.Tags["cdn"] for _, record := range row.Values { kbps, err := record[1].(json.Number).Float64() if err != nil { errHndlr(err, ERROR) continue } sampleTime, err := time.Parse(time.RFC3339, record[0].(string)) if err != nil { errHndlr(err, ERROR) continue } max = floatMax(max, kbps) duration := sampleTime.Unix() - prevtime.Unix() bytesServed += float64(duration) * kbps / 8 prevtime = sampleTime } maxGbps := max / 1000000 bytesServedTb := bytesServed / 1000000000 log.Infof("max gbps for cdn %v = %v", cdn, maxGbps) log.Infof("Tbytes served for cdn %v = %v", cdn, bytesServedTb) //write daily_maxgbps in traffic_ops var statsSummary traffic_ops.StatsSummary statsSummary.CDNName = cdn statsSummary.DeliveryService = "all" statsSummary.StatName = "daily_maxgbps" statsSummary.StatValue = strconv.FormatFloat(maxGbps, 'f', 2, 64) statsSummary.SummaryTime = now.Format(time.RFC3339) statsSummary.StatDate = startTime.Format("2006-01-02") go writeSummaryStats(config, statsSummary) tags := map[string]string{ "deliveryservice": statsSummary.DeliveryService, "cdn": statsSummary.CDNName, } fields := map[string]interface{}{ "value": maxGbps, } pt, err := influx.NewPoint( statsSummary.StatName, tags, fields, startTime, ) if err != nil { errHndlr(err, ERROR) continue } bp.AddPoint(pt) // write bytes served data to traffic_ops statsSummary.StatName = "daily_bytesserved" statsSummary.StatValue = strconv.FormatFloat(bytesServedTb, 'f', 2, 64) go writeSummaryStats(config, statsSummary) fields = map[string]interface{}{ "value": bytesServedTb, } pt, err = influx.NewPoint( statsSummary.StatName, tags, fields, startTime, ) if err != nil { errHndlr(err, ERROR) continue } bp.AddPoint(pt) } config.BpsChan <- bp log.Info("Collected daily stats @ ", now) } }
// NewSender returns a function that will accept datapoints to send to influxdb func NewSender( config interface{}, batch client.BatchPointsConfig, batchSize int, queueSize int, flush int, errFunc func(error), ) (Sender, error) { if batchSize <= 0 { batchSize = DefaultBatchSize } if queueSize <= 0 { queueSize = DefaultQueueSize } if flush <= 0 { flush = DefaultFlush } var conn client.Client var err error switch conf := config.(type) { case client.HTTPConfig: conn, err = client.NewHTTPClient(conf) if err != nil { return nil, errors.Wrap(err, "error creating HTTPClient") } _, _, err = conn.Ping(conf.Timeout) if err != nil { return nil, fmt.Errorf("cannot ping influxdb server: %s", conf.Addr) } if err := dbCheck(conn, batch.Database); err != nil { return nil, errors.Wrapf(err, "check for database %s failed", batch.Database) } case client.UDPConfig: conn, err = client.NewUDPClient(conf) if err != nil { return nil, errors.Wrap(err, "error creating UDPClient") } } pts := make(chan *client.Point, queueSize) bp, err := client.NewBatchPoints(batch) if err != nil { return nil, errors.Wrap(err, "batchpoints error") } go func() { delay := time.Duration(flush) * time.Second tick := time.Tick(delay) count := 0 for { select { case p := <-pts: bp.AddPoint(p) count++ if count < batchSize { continue } case <-tick: if len(bp.Points()) == 0 { continue } } for { if err := conn.Write(bp); err != nil { if errFunc != nil { errFunc(err) } time.Sleep(retry) continue } bp, _ = client.NewBatchPoints(batch) count = 0 break } } }() return func(key string, tags map[string]string, fields map[string]interface{}, ts time.Time) error { pt, err := client.NewPoint(key, tags, fields, ts) if err != nil { return err } pts <- pt return nil }, nil }