Exemple #1
0
func WriteStock(conn *client.Client, stks []Stock) error {
	pts := make([]client.Point, len(stks))
	for i := 0; i < len(stks); i++ {
		pts[i] = client.Point{
			Measurement: "stock_daily",
			Tags: map[string]string{
				"Name": stks[i].Name,
				"Code": stks[i].Code,
				"Area": stks[i].Area,
			},
			Fields: map[string]interface{}{
				"HighPrice":  stks[i].HighPrice,
				"LowPrice":   stks[i].LowPrice,
				"OpenPrice":  stks[i].OpenPrice,
				"ClosePrice": stks[i].ClosePrice,
			},
			Time: stks[i].Time,
		}
	}

	bth := client.BatchPoints{
		Points:          pts,
		Database:        DBName,
		RetentionPolicy: "default",
	}

	_, err := conn.Write(bth)

	if err != nil {
		log.Fatal(err)
		return err
	}

	return nil
}
Exemple #2
0
// Middleware sets Tracking cookie
func Track(i *client.Client) ace.HandlerFunc {
	return func(c *ace.C) {
		session := c.Sessions(Tracker)
		_vid := session.GetString(V_ID, "")
		if _vid == "" {
			_vid = uuid.New()
			session.Set(V_ID, _vid)
		}

		var pts = make([]client.Point, 1)

		pts[0] = client.Point{
			Measurement: Measurement,
			Tags: map[string]string{
				"event": "view",
				"vid":   _vid,
			},
			Fields: map[string]interface{}{
				"value": 1,
			},
			// Time: time.Now(),
			Precision: "s",
		}

		bps := client.BatchPoints{
			Points:          pts,
			Database:        "mixevents",
			RetentionPolicy: "default",
		}
		go i.Write(bps)
		c.Next()
	}
}
Exemple #3
0
func writePoints(con *client.Client, l *LoadTest) {
	var (
		hosts     = []string{"host1", "host2", "host3", "host4", "host5", "host6"}
		metrics   = []string{"com.addthis.Service.total._red_pjson__.1MinuteRate", "com.addthis.Service.total._red_lojson_100eng.json.1MinuteRate", "com.addthis.Service.total._red_lojson_300lo.json.1MinuteRate"}
		batchSize = *l.batchSize
		pts       = make([]client.Point, batchSize)
	)

	for i := 0; i < batchSize; i++ {
		pts[i] = client.Point{
			Measurement: *l.measurement,
			Tags: map[string]string{
				"host":   hosts[rand.Intn(len(hosts))],
				"metric": metrics[rand.Intn(len(metrics))],
			},
			Fields: map[string]interface{}{
				"value": rand.Float64(),
			},
			Time:      time.Now(),
			Precision: "n",
		}
	}

	bps := client.BatchPoints{
		Points:          pts,
		Database:        *l.db,
		RetentionPolicy: *l.retentionPolicy,
	}

	_, err := con.Write(bps)
	if err != nil {
		l.errorMeter.Mark(1)
		log.Println(err)
	}
}
Exemple #4
0
func WriteDB(c *client.Client, b client.BatchPoints) {

	_, err := c.Write(b)
	if err != nil {
		fmt.Printf("Fail to write to database, error: %s\n", err.Error())
	}
}
Exemple #5
0
func writePoints(con *client.Client, point ResponseTimePoint) {
	var (
		sampleSize = 1
		pts        = make([]client.Point, sampleSize)
	)

	for i := 0; i < sampleSize; i++ {
		pts[i] = client.Point{
			Measurement: "response_time",
			Tags: map[string]string{
				"user_id":    fmt.Sprintf("%d", point.UserId),
				"service_id": fmt.Sprintf("%d", point.ServiceId),
			},
			Fields: map[string]interface{}{
				"value": point.Value,
			},
			Time:      time.Now(),
			Precision: "ms",
		}
	}

	bps := client.BatchPoints{
		Points: pts,
		//@TODO make this flexible via environment var
		Database: "noty",
		//@TODO Create a better retention policy based on
		// plan of each user and use it when we write point
		RetentionPolicy: "default",
	}
	_, err := con.Write(bps)
	if err != nil {
		log.Println("Write point error")
		log.Fatal(err)
	}
}
Exemple #6
0
func send(client *influxClient.Client, series []influxClient.Point) error {
	w := influxClient.BatchPoints{Database: databaseFlag, Points: series}

	if retentionPolicyFlag != "" {
		w.RetentionPolicy = retentionPolicyFlag
	}

	_, err := client.Write(w)
	return err
}
Exemple #7
0
func write_influxdb(con *client.Client, conf InfluxdbConf, pts []client.Point) {

	bps := client.BatchPoints{
		Points:          pts,
		Database:        conf.Database,
		RetentionPolicy: "default",
	}
	_, err := con.Write(bps)
	if err != nil {
		log.Fatal(err)
	}
}
Exemple #8
0
func send(r metrics.Registry, client *influxClient.Client, db string) error {
	points := make([]influxClient.Point, 0)

	r.Each(func(name string, i interface{}) {
		now := time.Now()
		switch metric := i.(type) {
		case metrics.Timer:
			h := metric.Snapshot()
			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
			points = append(points, influxClient.Point{
				Measurement: fmt.Sprintf("%s", name),
				Time:        now,
				Fields: map[string]interface{}{"count": h.Count(),
					"min":            h.Min(),
					"max":            h.Max(),
					"mean":           h.Mean(),
					"std-dev":        h.StdDev(),
					"50-percentile":  ps[0],
					"75-percentile":  ps[1],
					"95-percentile":  ps[2],
					"99-percentile":  ps[3],
					"999-percentile": ps[4],
					"one-minute":     h.Rate1(),
					"five-minute":    h.Rate5(),
					"fifteen-minute": h.Rate15(),
					"mean-rate":      h.RateMean(),
				},
			})
		}
	})

	bp := influxClient.BatchPoints{
		Points:          points,
		Database:        db,
		RetentionPolicy: "default",
	}
	if _, err := client.Write(bp); err != nil {
		log.Println(err)
	}
	return nil
}
func writePoints(con *client.Client, samples int) {
	var (
		shapes     = []string{"circle", "rectangle", "square", "triangle"}
		colors     = []string{"red", "blue", "green", "black", "purple", "magenta", "pink", "maroon"}
		sampleSize = samples
		pts        = make([]client.Point, sampleSize)
	)

	rand.Seed(42)
	for i := 0; i < sampleSize; i++ {
		pts[i] = client.Point{
			Measurement: "shapes",
			Tags: map[string]string{
				"color": strconv.Itoa(rand.Intn(len(colors))),
				"shape": strconv.Itoa(rand.Intn(len(shapes))),
			},
			Fields: map[string]interface{}{
				"value": rand.Intn(100000),
			},
			Time:      time.Now(),
			Precision: "s",
		}
	}

	bps := client.BatchPoints{
		Points:          pts,
		Database:        MyDB,
		RetentionPolicy: "default",
	}
	resp, err := con.Write(bps)
	if err != nil {
		panic(err)
	}

	if resp != nil {
		fmt.Printf("Wrote %d points\n", len(resp.Results))
	}

}
func storeCacheValues(trafmonData []byte, cdnName string, sampleTime int64, cacheGroupMap map[string]string, influxClient *influx.Client) error {
	/* note about the data:
	keys are cdnName:deliveryService:cacheGroup:cacheName:statName
	*/

	type CacheStatsJson struct {
		Pp     string `json:"pp"`
		Date   string `json:"date"`
		Caches map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"caches"`
	}
	var jData CacheStatsJson
	err := json.Unmarshal(trafmonData, &jData)
	errHndlr(err, ERROR)
	statCount := 0
	pts := make([]influx.Point, 0)
	for cacheName, cacheData := range jData.Caches {
		for statName, statData := range cacheData {
			dataKey := statName
			dataKey = strings.Replace(dataKey, ".bandwidth", ".kbps", 1)
			dataKey = strings.Replace(dataKey, "-", "_", -1)
			//Get the stat time and convert to epoch
			statTime := strconv.Itoa(statData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}
			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//Get the stat value and convert to float
			statValue := statData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.00
			}
			//add stat data to pts array
			pts = append(pts,
				influx.Point{
					Measurement: dataKey,
					Tags: map[string]string{
						"cachegroup": cacheGroupMap[cacheName],
						"hostname":   cacheName,
						"cdn":        cdnName,
					},
					Fields: map[string]interface{}{
						"value": statFloatValue,
					},
					Time:      newTime,
					Precision: "ms",
				},
			)
			statCount++
		}
	}
	//create influxdb batch of points
	// TODO: make retention policy configurable
	bps := influx.BatchPoints{
		Points:          pts,
		Database:        "cache_stats",
		RetentionPolicy: "weekly",
	}
	//write to influxdb
	_, err = influxClient.Write(bps)
	if err != nil {
		errHndlr(err, ERROR)
	}
	log.Info("Saved ", statCount, " cache stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
/* the ds json looks like:
{
  "deliveryService": {
    "linear-gbr-hls-sbr": {
      "	.us-ma-woburn.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 520024
      }],
      "location.us-de-newcastle.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 517707
      }],
    }
 }
*/
func storeDsValues(rascalData []byte, cdnName string, sampleTime int64, influxClient *influx.Client) error {
	type DsStatsJson struct {
		Pp              string `json:"pp"`
		Date            string `json:"date"`
		DeliveryService map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"deliveryService"`
	}

	var jData DsStatsJson
	err := json.Unmarshal(rascalData, &jData)
	errHndlr(err, ERROR)
	statCount := 0
	pts := make([]influx.Point, 0)

	for dsName, dsData := range jData.DeliveryService {
		for dsMetric, dsMetricData := range dsData {
			//create dataKey (influxDb series)
			var cachegroup, statName string
			if strings.Contains(dsMetric, "total.") {
				s := strings.Split(dsMetric, ".")
				cachegroup, statName = s[0], s[1]
			} else {
				s := strings.Split(dsMetric, ".")
				cachegroup, statName = s[1], s[2]
			}
			//convert stat time to epoch
			statTime := strconv.Itoa(dsMetricData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}
			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//convert stat value to float
			statValue := dsMetricData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.0
			}
			pts = append(pts,
				influx.Point{
					Measurement: statName,
					Tags: map[string]string{
						"deliveryservice": dsName,
						"cdn":             cdnName,
						"cachegroup":      cachegroup,
					},
					Fields: map[string]interface{}{
						"value": statFloatValue,
					},
					Time:      newTime,
					Precision: "ms",
				},
			)
			statCount++
		}
	}
	bps := influx.BatchPoints{
		Points:          pts,
		Database:        "deliveryservice_stats",
		RetentionPolicy: "weekly",
	}
	_, err = influxClient.Write(bps)
	if err != nil {
		errHndlr(err, ERROR)
	}
	log.Info("Saved ", statCount, " deliveryservice stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
Exemple #12
0
func send(client *influx.Client, database string, r metrics.Registry) error {

	var (
		points []influx.Point
		now    = time.Now()
	)

	r.Each(func(name string, i interface{}) {

		switch metric := i.(type) {
		case metrics.Counter:
			points = append(points, influx.Point{
				Measurement: fmt.Sprintf("%s.count", name),
				Time:        now,
				Fields: map[string]interface{}{
					"count": metric.Count(),
				},
			})
		case metrics.Gauge:
			points = append(points, influx.Point{
				Measurement: fmt.Sprintf("%s.value", name),
				Time:        now,
				Fields: map[string]interface{}{
					"value": metric.Value(),
				},
			})
		case metrics.GaugeFloat64:
			points = append(points, influx.Point{
				Measurement: fmt.Sprintf("%s.value", name),
				Time:        now,
				Fields: map[string]interface{}{
					"value": metric.Value(),
				},
			})
		case metrics.Histogram:
			sn := metric.Snapshot()
			ps := sn.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
			points = append(points, influx.Point{
				Measurement: fmt.Sprintf("%s.histogram", name),
				Time:        now,
				Fields: map[string]interface{}{
					"count":          sn.Count,
					"min":            sn.Min(),
					"max":            sn.Max(),
					"mean":           sn.Mean(),
					"std-dev":        sn.StdDev(),
					"50-percentile":  ps[0],
					"75-percentile":  ps[1],
					"95-percentile":  ps[2],
					"99-percentile":  ps[3],
					"999-percentile": ps[4],
				},
			})
		case metrics.Meter:
			sn := metric.Snapshot()
			points = append(points, influx.Point{
				Measurement: fmt.Sprintf("%s.meter", name),
				Time:        now,
				Fields: map[string]interface{}{
					"count":          sn.Count(),
					"one-minute":     sn.Rate1(),
					"five-minute":    sn.Rate5(),
					"fifteen-minute": sn.Rate15(),
					"mean":           sn.RateMean(),
				},
			})
		case metrics.Timer:
			sn := metric.Snapshot()
			ps := sn.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
			points = append(points, influx.Point{
				Measurement: fmt.Sprintf("%s.timer", name),
				Fields: map[string]interface{}{
					"count":          sn.Count(),
					"min":            sn.Min(),
					"max":            sn.Max(),
					"mean":           sn.Mean(),
					"std-dev":        sn.StdDev(),
					"50-percentile":  ps[0],
					"75-percentile":  ps[1],
					"95-percentile":  ps[2],
					"99-percentile":  ps[3],
					"999-percentile": ps[4],
					"one-minute":     sn.Rate1(),
					"five-minute":    sn.Rate5(),
					"fifteen-minute": sn.Rate15(),
					"mean-rate":      sn.RateMean(),
				},
			})
		}
	})

	batch := influx.BatchPoints{
		Points:   points,
		Database: database,
		Time:     now,
	}

	_, err := client.Write(batch)
	return err
}
Exemple #13
0
func writeRooms(con *client.Client, myDB string) {
	var (
		roomname   = []string{"exec", "singleone", "singletwo", "doubleone", "doubletwo", "doublethree", "meetingone", "balcony"}
		sampleSize = 10000
		pts        = make([]client.Point, sampleSize)
	)

	rand.Seed(42)

	// We'll track where we are building a batch of points with the batchptr
	batchptr := 0

	// Let's start by looping through all the rooms then through each floor.
	for j := 0; j < 8; j++ {
		for k := 0; k < 10; k++ {

			// Now we'll pick a "starting temperature and humidity"
			temp := rand.Intn(100)/10 + 20.0
			humid := rand.Intn(200)/10 + 20.0
			// This all starts from a month ago, so we'll find now and take a month off
			now := time.Now()
			secs := now.Unix() - (60 * 60 * 24 * 30)

			// Now we travel forward in time back to now...
			for i := 0; i < (60 * 24 * 30); i++ {
				// We decide on our temperature behavior here. This uses a switch with
				// a random number in case we want to make the behavior richer/more
				// erratic. For now, most of the time, the temperature goes up or down by
				// up to .5 degrees

				switch rand.Intn(10) {
				case 0, 1, 2, 3, 4, 5, 6:
					tchange := rand.Intn(11) - 5
					temp = temp - (tchange / 10)
					if temp < 0 {
						temp = 0.0
					}
					if temp > 40 {
						temp = 40.0
					}
				default:
				}

				// And we do something similar for the humidity
				switch rand.Intn(10) {
				case 0, 1, 2, 3, 4, 5, 6:
					hchange := rand.Intn(11) - 5
					humid = humid - (hchange / 10)
					if humid < 5 {
						humid = 5.0
					}
					if humid > 40 {
						humid = 40.0
					}
				default:
				}

				// New we can create the point of data at batchptr. These all go in a
				// We fill in the room with a name from our array above, set the
				// level and set our created timestamp.

				pts[batchptr] = client.Point{
					Measurement: "temperature",
					Tags: map[string]string{
						"room":  roomname[j],
						"level": strconv.Itoa(k),
					},
					Fields: map[string]interface{}{
						"temp":  temp,
						"humid": humid,
					},
					Time:      time.Unix(secs, 0),
					Precision: "s",
				}

				// We increment the pointer now...
				batchptr = batchptr + 1

				// And if we hit the sampleSize its time to send
				if batchptr == sampleSize {
					// We create a BatchPoints structure with our points, set the db
					// it is heading for...
					bps := client.BatchPoints{
						Points:          pts,
						Database:        myDB,
						RetentionPolicy: "default",
					}

					// And then we write it
					_, err := con.Write(bps)

					// If that fails, we bomb out here with an error
					if err != nil {
						log.Fatal(err)
					}

					// Otherwise we reallocate the pts (letting the GC clean up) and
					// reset the batchptr
					pts = make([]client.Point, sampleSize)
					batchptr = 0
				}

				// Now we bump the clock on 60 seconds
				secs = secs + 60
			}
		}
	}

	// Tidy up by copying the incomplete batch to the server
	if batchptr > 0 {
		newpts := make([]client.Point, batchptr)
		copy(newpts, pts)

		bps := client.BatchPoints{
			Points:          newpts,
			Database:        myDB,
			RetentionPolicy: "default",
		}
		_, err := con.Write(bps)
		if err != nil {
			log.Fatal(err)
		}
	}
}