Exemple #1
0
func WriteDB(c *client.Client, b client.BatchPoints) {

	_, err := c.Write(b)
	if err != nil {
		fmt.Printf("Fail to write to database, error: %s\n", err.Error())
	}
}
Exemple #2
0
func WriteStock(conn *client.Client, stks []Stock) error {
	pts := make([]client.Point, len(stks))
	for i := 0; i < len(stks); i++ {
		pts[i] = client.Point{
			Measurement: "stock_daily",
			Tags: map[string]string{
				"Name": stks[i].Name,
				"Code": stks[i].Code,
				"Area": stks[i].Area,
			},
			Fields: map[string]interface{}{
				"HighPrice":  stks[i].HighPrice,
				"LowPrice":   stks[i].LowPrice,
				"OpenPrice":  stks[i].OpenPrice,
				"ClosePrice": stks[i].ClosePrice,
			},
			Time: stks[i].Time,
		}
	}

	bth := client.BatchPoints{
		Points:          pts,
		Database:        DBName,
		RetentionPolicy: "default",
	}

	_, err := conn.Write(bth)

	if err != nil {
		log.Fatal(err)
		return err
	}

	return nil
}
Exemple #3
0
func writePoints(con *client.Client, l *LoadTest) {
	var (
		hosts     = []string{"host1", "host2", "host3", "host4", "host5", "host6"}
		metrics   = []string{"com.addthis.Service.total._red_pjson__.1MinuteRate", "com.addthis.Service.total._red_lojson_100eng.json.1MinuteRate", "com.addthis.Service.total._red_lojson_300lo.json.1MinuteRate"}
		batchSize = *l.batchSize
		pts       = make([]client.Point, batchSize)
	)

	for i := 0; i < batchSize; i++ {
		pts[i] = client.Point{
			Measurement: *l.measurement,
			Tags: map[string]string{
				"host":   hosts[rand.Intn(len(hosts))],
				"metric": metrics[rand.Intn(len(metrics))],
			},
			Fields: map[string]interface{}{
				"value": rand.Float64(),
			},
			Time:      time.Now(),
			Precision: "n",
		}
	}

	bps := client.BatchPoints{
		Points:          pts,
		Database:        *l.db,
		RetentionPolicy: *l.retentionPolicy,
	}

	_, err := con.Write(bps)
	if err != nil {
		l.errorMeter.Mark(1)
		log.Println(err)
	}
}
Exemple #4
0
func writePoints(con *client.Client, point ResponseTimePoint) {
	var (
		sampleSize = 1
		pts        = make([]client.Point, sampleSize)
	)

	for i := 0; i < sampleSize; i++ {
		pts[i] = client.Point{
			Measurement: "response_time",
			Tags: map[string]string{
				"user_id":    fmt.Sprintf("%d", point.UserId),
				"service_id": fmt.Sprintf("%d", point.ServiceId),
			},
			Fields: map[string]interface{}{
				"value": point.Value,
			},
			Time:      time.Now(),
			Precision: "ms",
		}
	}

	bps := client.BatchPoints{
		Points: pts,
		//@TODO make this flexible via environment var
		Database: "noty",
		//@TODO Create a better retention policy based on
		// plan of each user and use it when we write point
		RetentionPolicy: "default",
	}
	_, err := con.Write(bps)
	if err != nil {
		log.Println("Write point error")
		log.Fatal(err)
	}
}
Exemple #5
0
func writeSeries(client *influxdb.Client, buf []*influxdb.Series) {
	err := client.WriteSeries(buf)
	if err != nil {
		Conf.Logger.Printf("influxdb can't write series %+v", err)
	}
	return
}
Exemple #6
0
// Middleware sets Tracking cookie
func Track(i *client.Client) ace.HandlerFunc {
	return func(c *ace.C) {
		session := c.Sessions(Tracker)
		_vid := session.GetString(V_ID, "")
		if _vid == "" {
			_vid = uuid.New()
			session.Set(V_ID, _vid)
		}

		var pts = make([]client.Point, 1)

		pts[0] = client.Point{
			Measurement: Measurement,
			Tags: map[string]string{
				"event": "view",
				"vid":   _vid,
			},
			Fields: map[string]interface{}{
				"value": 1,
			},
			// Time: time.Now(),
			Precision: "s",
		}

		bps := client.BatchPoints{
			Points:          pts,
			Database:        "mixevents",
			RetentionPolicy: "default",
		}
		go i.Write(bps)
		c.Next()
	}
}
Exemple #7
0
func ReadDB(c *client.Client, sdb, ddb, cmd string) client.BatchPoints {

	q := client.Query{
		Command:  cmd,
		Database: sdb,
	}

	//get type client.BatchPoints
	var batchpoints client.BatchPoints

	response, err := c.Query(q)
	if err != nil {
		fmt.Printf("Fail to get response from database, read database error: %s\n", err.Error())
	}

	res := response.Results
	if len(res) == 0 {
		fmt.Printf("The response of database is null, read database error!\n")
	} else {

		res_length := len(res)
		for k := 0; k < res_length; k++ {

			//show progress of reading series
			count := len(res[k].Series)
			bar := pb.StartNew(count)
			for _, ser := range res[k].Series {

				//get type client.Point
				var point client.Point

				point.Measurement = ser.Name
				point.Tags = ser.Tags
				for _, v := range ser.Values {
					point.Time, _ = time.Parse(time.RFC3339, v[0].(string))

					field := make(map[string]interface{})
					l := len(v)
					for i := 1; i < l; i++ {
						if v[i] != nil {
							field[ser.Columns[i]] = v[i]
						}
					}
					point.Fields = field
					point.Precision = "s"
					batchpoints.Points = append(batchpoints.Points, point)
				}
				bar.Increment()
				time.Sleep(3 * time.Millisecond)
			}
			bar.FinishPrint("Read series has finished!\n")
		}
		batchpoints.Database = ddb
		batchpoints.RetentionPolicy = "default"
	}
	return batchpoints
}
Exemple #8
0
func send(client *influxClient.Client, series []influxClient.Point) error {
	w := influxClient.BatchPoints{Database: databaseFlag, Points: series}

	if retentionPolicyFlag != "" {
		w.RetentionPolicy = retentionPolicyFlag
	}

	_, err := client.Write(w)
	return err
}
Exemple #9
0
func resetDB(c *client.Client, database string) error {
	_, err := c.Query(client.Query{
		Command: fmt.Sprintf("DROP DATABASE %s", database),
	})

	if err != nil && !strings.Contains(err.Error(), "database not found") {
		return err
	}

	return nil
}
Exemple #10
0
func write_influxdb(con *client.Client, conf InfluxdbConf, pts []client.Point) {

	bps := client.BatchPoints{
		Points:          pts,
		Database:        conf.Database,
		RetentionPolicy: "default",
	}
	_, err := con.Write(bps)
	if err != nil {
		log.Fatal(err)
	}
}
Exemple #11
0
// queryDB convenience function to query the database
func queryDB(con *client.Client, cmd string) (res []client.Result, err error) {
	q := client.Query{
		Command:  cmd,
		Database: MyDB,
	}
	if response, err := con.Query(q); err == nil {
		if response.Error() != nil {
			return res, response.Error()
		}
		res = response.Results
	}
	return res, nil
}
/*
	Send a query to InfluxDB server
*/
func queryDB(con *influxdb.Client, cmd string) (res []influxdb.Result, err error) {
	q := influxdb.Query{
		Command:  cmd,
		Database: DGConfig.DockerGuard.InfluxDB.DB,
	}
	if response, err := con.Query(q); err == nil {
		if response.Error() != nil {
			return res, response.Error()
		}
		res = response.Results
	}
	return
}
func queryDB(con *influx.Client, cmd string, database string) (res []influx.Result, err error) {
	q := influx.Query{
		Command:  cmd,
		Database: database,
	}
	if response, err := con.Query(q); err == nil {
		if response.Error() != nil {
			return res, response.Error()
		}
		res = response.Results
	}
	return
}
Exemple #14
0
func (s *Service) execQuery(cli *client.Client, q string) (*client.Response, error) {
	query := client.Query{
		Command: q,
	}
	resp, err := cli.Query(query)
	if err != nil {
		return nil, err
	}
	if resp.Err != nil {
		return nil, resp.Err
	}
	return resp, nil
}
Exemple #15
0
func createDatabase(con *client.Client, l *LoadTest) {
	database := *l.db
	l.Logger.Printf("creating database %s, if doesn't already exist", database)

	q := client.Query{
		Command:  fmt.Sprintf("create database %s", database),
		Database: database,
	}

	if _, err := con.Query(q); err != nil {
		panic(err)
	}
}
Exemple #16
0
func QueryDB(conn *client.Client, cmd string) (res []client.Result, err error) {
	q := client.Query{
		Command:  cmd,
		Database: DBName,
	}
	response, err := conn.Query(q)

	if err != nil {
		if response.Error() != nil {
			return res, response.Error()
		} else {
			return res, err
		}
	}
	res = response.Results
	return
}
Exemple #17
0
func createDatabase(databaseName string, client *influxdb.Client) error {
	createDatabase := true
	if databases, err := client.GetDatabaseList(); err == nil {
		for _, database := range databases {
			if database["name"] == databaseName {
				createDatabase = false
				break
			}
		}
	}
	if createDatabase {
		if err := client.CreateDatabase(databaseName); err != nil {
			return fmt.Errorf("Database creation failed: %v", err)
		}
		glog.Infof("Created database %q on influxdb", databaseName)
	}
	return nil
}
Exemple #18
0
func queryInfluxDB(t *testing.T, client *influxdb.Client) {
	var series []*influxdb.Series
	var err error
	success := false
	for i := 0; i < maxInfluxdbRetries; i++ {
		if series, err = client.Query("list series", influxdb.Second); err == nil {
			glog.V(1).Infof("query:' list series' - output %+v from influxdb", series[0].Points)
			if len(series[0].Points) >= (len(sink_api.SupportedStatMetrics()) - 1) {
				success = true
				break
			}
		}
		glog.V(2).Infof("influxdb test case failed. Retrying")
		time.Sleep(30 * time.Second)
	}
	require.NoError(t, err, "failed to list series in Influxdb")
	require.True(t, success, "list series test case failed.")
}
Exemple #19
0
func send(r metrics.Registry, client *influxClient.Client, db string) error {
	points := make([]influxClient.Point, 0)

	r.Each(func(name string, i interface{}) {
		now := time.Now()
		switch metric := i.(type) {
		case metrics.Timer:
			h := metric.Snapshot()
			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
			points = append(points, influxClient.Point{
				Measurement: fmt.Sprintf("%s", name),
				Time:        now,
				Fields: map[string]interface{}{"count": h.Count(),
					"min":            h.Min(),
					"max":            h.Max(),
					"mean":           h.Mean(),
					"std-dev":        h.StdDev(),
					"50-percentile":  ps[0],
					"75-percentile":  ps[1],
					"95-percentile":  ps[2],
					"99-percentile":  ps[3],
					"999-percentile": ps[4],
					"one-minute":     h.Rate1(),
					"five-minute":    h.Rate5(),
					"fifteen-minute": h.Rate15(),
					"mean-rate":      h.RateMean(),
				},
			})
		}
	})

	bp := influxClient.BatchPoints{
		Points:          points,
		Database:        db,
		RetentionPolicy: "default",
	}
	if _, err := client.Write(bp); err != nil {
		log.Println(err)
	}
	return nil
}
func ensureSeriesExists(conn *influxdb.Client, existingQueries *influxdb.Series, seriesName, contQuery string) error {
	queryExists := false
	for _, p := range existingQueries.GetPoints() {
		id := p[1].(float64)
		query := p[2].(string)
		if strings.Contains(query, "into "+seriesName) {
			if query != contQuery {
				if _, err := conn.Query(fmt.Sprintf("drop continuous query %v", id), influxdb.Second); err != nil {
					return err
				}
			} else {
				queryExists = true
			}
		}
	}
	if !queryExists {
		if _, err := conn.Query("drop series "+seriesName, influxdb.Second); err != nil {
			return err
		}
		if _, err := conn.Query(contQuery, influxdb.Second); err != nil {
			return err
		}
	}
	return nil
}
func getInfluxData(flightName string, influxCl *client.Client) InfluxResults {
	query := fmt.Sprintf("select lat, lon, altitude, track from /^flight." + flightName + "/ limit 1")
	var infRes InfluxResults
	var err error

	infRes, err = influxCl.Query(query)
	if err != nil {
		log.Fatalf("Query %s failed!", query)
		os.Exit(1)
	}

	if len(infRes) != 1 {
		log.Printf("No series present?!")
	}

	pointsCount := len(infRes[0].GetPoints())
	var _ = pointsCount
	//log.Printf("Series count: %d\n", pointsCount)
	//log.Printf("DATA: %v", infRes[0].GetPoints());

	return infRes
}
func writePoints(con *client.Client, samples int) {
	var (
		shapes     = []string{"circle", "rectangle", "square", "triangle"}
		colors     = []string{"red", "blue", "green", "black", "purple", "magenta", "pink", "maroon"}
		sampleSize = samples
		pts        = make([]client.Point, sampleSize)
	)

	rand.Seed(42)
	for i := 0; i < sampleSize; i++ {
		pts[i] = client.Point{
			Measurement: "shapes",
			Tags: map[string]string{
				"color": strconv.Itoa(rand.Intn(len(colors))),
				"shape": strconv.Itoa(rand.Intn(len(shapes))),
			},
			Fields: map[string]interface{}{
				"value": rand.Intn(100000),
			},
			Time:      time.Now(),
			Precision: "s",
		}
	}

	bps := client.BatchPoints{
		Points:          pts,
		Database:        MyDB,
		RetentionPolicy: "default",
	}
	resp, err := con.Write(bps)
	if err != nil {
		panic(err)
	}

	if resp != nil {
		fmt.Printf("Wrote %d points\n", len(resp.Results))
	}

}
Exemple #23
0
func Getmeasurements(c *client.Client, sdb, cmd string) []string {

	//get measurements from database
	q := client.Query{
		Command:  cmd,
		Database: sdb,
	}

	var measurements []string

	response, err := c.Query(q)
	if err != nil {
		fmt.Printf("Fail to get response from database, get measurements error: %s\n", err.Error())
	}

	res := response.Results

	if len(res[0].Series) == 0 {
		fmt.Printf("The response of database is null, get measurements error!\n")
	} else {

		values := res[0].Series[0].Values

		//show progress of getting measurements
		count := len(values)
		bar := pb.StartNew(count)

		for _, row := range values {
			measurement := fmt.Sprintf("%v", row[0])
			measurements = append(measurements, measurement)
			bar.Increment()
			time.Sleep(3 * time.Millisecond)
		}
		bar.FinishPrint("Get measurements has finished!\n")
	}
	return measurements

}
func getInfluxdbData(c *influxdb.Client, query string) (map[string]bool, error) {
	series, err := c.Query(query, influxdb.Second)
	if err != nil {
		return nil, err
	}
	if len(series) != 1 {
		return nil, fmt.Errorf("expected only one series from Influxdb for query %q. Got %+v", query, series)
	}
	if len(series[0].GetColumns()) != 2 {
		Failf("Expected two columns for query %q. Found %v", query, series[0].GetColumns())
	}
	result := map[string]bool{}
	for _, point := range series[0].GetPoints() {
		if len(point) != 2 {
			Failf("Expected only two entries in a point for query %q. Got %v", query, point)
		}
		name, ok := point[1].(string)
		if !ok {
			Failf("expected %v to be a string, but it is %T", point[1], point[1])
		}
		result[name] = false
	}
	return result, nil
}
Exemple #25
0
func write_in_batches(data []string, client *client.Client, size int, db string, wg *sync.WaitGroup) {
	start := time.Now()
	n := len(data)
	for index := 0; index < n; {
		end := index + size
		if end >= n {
			end = n - 1
		}
		txt := strings.Join(data[index:end], "\n")
		_, err := client.WriteLineProtocol(txt, db, "", "", "")
		if err != nil {
			panic(err)
		}
		index = index + size
	}
	seelog.Infof("Write done in %s", time.Since(start))
	wg.Done()

}
Exemple #26
0
func prepareDatabase(client *influxdb.Client, database string) error {
	dropDbQuery := influxdb.Query{
		Command: fmt.Sprintf("drop database \"%v\"", database),
	}
	createDbQuery := influxdb.Query{
		Command: fmt.Sprintf("create database \"%v\"", database),
	}
	// A default retention policy must always be present.
	// Depending on the InfluxDB configuration it may be created automatically with the database or not.
	// TODO create ret. policy only if not present
	createPolicyQuery := influxdb.Query{
		Command: fmt.Sprintf("create retention policy \"default\" on \"%v\" duration 1h replication 1 default", database),
	}
	_, err := client.Query(dropDbQuery)
	if err != nil {
		return err
	}
	_, err = client.Query(createDbQuery)
	if err != nil {
		return err
	}
	_, err = client.Query(createPolicyQuery)
	return err
}
// Handles incoming requests.
func handleRequest(conn net.Conn, c *client.Client) {
	defer conn.Close()
	// Make a buffer to hold incoming data.
	buf := make([]byte, 102400)
	// Read the incoming connection into the buffer.
	numbytes, err := conn.Read(buf)
	if err != nil {
		fmt.Println("Error reading:", err.Error())
		return
	}

	var evt Event
	err = json.Unmarshal(buf[:numbytes], &evt)
	if err != nil {
		fmt.Printf("Error unmarshalling event: %s for input %s\n", err.Error(), string(buf[:numbytes]))
		return
	}

	outputlines := strings.Split(strings.TrimSpace(evt.Check.Output), "\n")

	seriesdata := make(map[string][][]interface{})

	for _, l := range outputlines {
		line := strings.TrimSpace(l)
		pieces := strings.Split(line, " ")
		if len(pieces) != 3 {
			continue
		}
		keys := strings.SplitN(pieces[0], ".", 2)
		if len(keys) != 2 {
			continue
		}
		keyraw := keys[1]
		key := strings.Replace(keyraw, ".", "_", -1)

		val, verr := strconv.ParseFloat(pieces[1], 64)
		if verr != nil {
			fmt.Printf("Error parsing value (%s): %s\n", pieces[1], verr.Error())
			continue
		}

		time, terr := strconv.ParseInt(pieces[2], 10, 64)
		if terr != nil {
			fmt.Printf("Error parsing time (%s): %s\n", pieces[2], terr.Error())
			continue
		}

		seriesdata[key] = append(seriesdata[key], []interface{}{time, evt.Client.Name, evt.Client.Address, val})
	}

	serieses := make([]*client.Series, 0)
	for key, points := range seriesdata {
		series := &client.Series{
			Name:    key,
			Columns: []string{"time", "host", "ip", "value"},
			Points:  points,
		}
		serieses = append(serieses, series)
	}

	if err := c.WriteSeriesWithTimePrecision(serieses, client.Second); err != nil {
		fmt.Printf("Error sending data to influx: %s, data: %+v\n", err.Error(), serieses)
	}

}
Exemple #28
0
func send(r metrics.Registry, client *influxClient.Client) error {
	series := []*influxClient.Series{}

	r.Each(func(name string, i interface{}) {
		now := getCurrentTime()
		switch metric := i.(type) {
		case metrics.Counter:
			series = append(series, &influxClient.Series{
				Name:    fmt.Sprintf("%s.count", name),
				Columns: []string{"time", "count"},
				Points: [][]interface{}{
					{now, metric.Count()},
				},
			})
		case metrics.Gauge:
			series = append(series, &influxClient.Series{
				Name:    fmt.Sprintf("%s.value", name),
				Columns: []string{"time", "value"},
				Points: [][]interface{}{
					{now, metric.Value()},
				},
			})
		case metrics.GaugeFloat64:
			series = append(series, &influxClient.Series{
				Name:    fmt.Sprintf("%s.value", name),
				Columns: []string{"time", "value"},
				Points: [][]interface{}{
					{now, metric.Value()},
				},
			})
		case metrics.Histogram:
			h := metric.Snapshot()
			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
			series = append(series, &influxClient.Series{
				Name: fmt.Sprintf("%s.histogram", name),
				Columns: []string{"time", "count", "min", "max", "mean", "std-dev",
					"50-percentile", "75-percentile", "95-percentile",
					"99-percentile", "999-percentile"},
				Points: [][]interface{}{
					{now, h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(),
						ps[0], ps[1], ps[2], ps[3], ps[4]},
				},
			})
		case metrics.Meter:
			m := metric.Snapshot()
			series = append(series, &influxClient.Series{
				Name: fmt.Sprintf("%s.meter", name),
				Columns: []string{"count", "one-minute",
					"five-minute", "fifteen-minute", "mean"},
				Points: [][]interface{}{
					{m.Count(), m.Rate1(), m.Rate5(), m.Rate15(), m.RateMean()},
				},
			})
		case metrics.Timer:
			h := metric.Snapshot()
			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
			series = append(series, &influxClient.Series{
				Name: fmt.Sprintf("%s.timer", name),
				Columns: []string{"count", "min", "max", "mean", "std-dev",
					"50-percentile", "75-percentile", "95-percentile",
					"99-percentile", "999-percentile", "one-minute", "five-minute", "fifteen-minute", "mean-rate"},
				Points: [][]interface{}{
					{h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(),
						ps[0], ps[1], ps[2], ps[3], ps[4],
						h.Rate1(), h.Rate5(), h.Rate15(), h.RateMean()},
				},
			})
		}
	})
	if err := client.WriteSeries(series); err != nil {
		log.Println(err)
	}
	return nil
}
func storeCacheValues(trafmonData []byte, cdnName string, sampleTime int64, cacheGroupMap map[string]string, influxClient *influx.Client) error {
	/* note about the data:
	keys are cdnName:deliveryService:cacheGroup:cacheName:statName
	*/

	type CacheStatsJson struct {
		Pp     string `json:"pp"`
		Date   string `json:"date"`
		Caches map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"caches"`
	}
	var jData CacheStatsJson
	err := json.Unmarshal(trafmonData, &jData)
	errHndlr(err, ERROR)
	statCount := 0
	pts := make([]influx.Point, 0)
	for cacheName, cacheData := range jData.Caches {
		for statName, statData := range cacheData {
			dataKey := statName
			dataKey = strings.Replace(dataKey, ".bandwidth", ".kbps", 1)
			dataKey = strings.Replace(dataKey, "-", "_", -1)
			//Get the stat time and convert to epoch
			statTime := strconv.Itoa(statData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}
			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//Get the stat value and convert to float
			statValue := statData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.00
			}
			//add stat data to pts array
			pts = append(pts,
				influx.Point{
					Measurement: dataKey,
					Tags: map[string]string{
						"cachegroup": cacheGroupMap[cacheName],
						"hostname":   cacheName,
						"cdn":        cdnName,
					},
					Fields: map[string]interface{}{
						"value": statFloatValue,
					},
					Time:      newTime,
					Precision: "ms",
				},
			)
			statCount++
		}
	}
	//create influxdb batch of points
	// TODO: make retention policy configurable
	bps := influx.BatchPoints{
		Points:          pts,
		Database:        "cache_stats",
		RetentionPolicy: "weekly",
	}
	//write to influxdb
	_, err = influxClient.Write(bps)
	if err != nil {
		errHndlr(err, ERROR)
	}
	log.Info("Saved ", statCount, " cache stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
/* the ds json looks like:
{
  "deliveryService": {
    "linear-gbr-hls-sbr": {
      "	.us-ma-woburn.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 520024
      }],
      "location.us-de-newcastle.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 517707
      }],
    }
 }
*/
func storeDsValues(rascalData []byte, cdnName string, sampleTime int64, influxClient *influx.Client) error {
	type DsStatsJson struct {
		Pp              string `json:"pp"`
		Date            string `json:"date"`
		DeliveryService map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"deliveryService"`
	}

	var jData DsStatsJson
	err := json.Unmarshal(rascalData, &jData)
	errHndlr(err, ERROR)
	statCount := 0
	pts := make([]influx.Point, 0)

	for dsName, dsData := range jData.DeliveryService {
		for dsMetric, dsMetricData := range dsData {
			//create dataKey (influxDb series)
			var cachegroup, statName string
			if strings.Contains(dsMetric, "total.") {
				s := strings.Split(dsMetric, ".")
				cachegroup, statName = s[0], s[1]
			} else {
				s := strings.Split(dsMetric, ".")
				cachegroup, statName = s[1], s[2]
			}
			//convert stat time to epoch
			statTime := strconv.Itoa(dsMetricData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}
			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//convert stat value to float
			statValue := dsMetricData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.0
			}
			pts = append(pts,
				influx.Point{
					Measurement: statName,
					Tags: map[string]string{
						"deliveryservice": dsName,
						"cdn":             cdnName,
						"cachegroup":      cachegroup,
					},
					Fields: map[string]interface{}{
						"value": statFloatValue,
					},
					Time:      newTime,
					Precision: "ms",
				},
			)
			statCount++
		}
	}
	bps := influx.BatchPoints{
		Points:          pts,
		Database:        "deliveryservice_stats",
		RetentionPolicy: "weekly",
	}
	_, err = influxClient.Write(bps)
	if err != nil {
		errHndlr(err, ERROR)
	}
	log.Info("Saved ", statCount, " deliveryservice stats values for ", cdnName, " @ ", sampleTime)
	return nil
}