예제 #1
0
func init() {
	if conf.StatisticEnable {
		var err error
		influxClient, err = influx.NewHTTPClient(
			influx.HTTPConfig{
				Addr:     conf.InfluxURL,
				Username: conf.InfluxUser,
				Password: conf.InfluxPassword,
			})
		if err != nil {
			log.Println("Failed to init influx client: ", err.Error())
			os.Exit(1)
		}

		influxBatchPoints, err = influx.NewBatchPoints(influx.BatchPointsConfig{
			Database:  conf.InfluxDB,
			Precision: "s",
		})
		if err != nil {
			log.Println("Failed to init influx batch points: ", err.Error())
			os.Exit(1)
		}
		go logStatisticTask()
	}
}
예제 #2
0
파일: flux.go 프로젝트: daneroo/go-ted1k
// Perform the batch write
func (w *Writer) writeEntries(entries []Entry) {
	bps, err := client.NewBatchPoints(client.BatchPointsConfig{
		Database:        w.DB,
		RetentionPolicy: "default",
		Precision:       "s",
		// WriteConsistency: string,
	})
	Checkerr(err)

	for _, entry := range entries {
		name := "watt" // Measurement
		tags := map[string]string{ /*"ted1k",...*/ }
		fields := map[string]interface{}{
			"value": entry.Watt,
		}
		pt, err := client.NewPoint(name, tags, fields, entry.Stamp)
		Checkerr(err)
		bps.AddPoint(pt)

		// fmt.Printf("point: %v\n", pt)
	}

	// TODO(daneroo): retry, if error is timeout?
	err = w.con.Write(bps)
	Checkerr(err)
}
예제 #3
0
파일: influxdb_test.go 프로젝트: crezam/kit
func TestHistogramWithTags(t *testing.T) {
	expectedName := "test_histogram"
	expectedTags := map[string]string{
		"key1": "value1",
		"key2": "value2",
	}
	expectedFields := []map[string]map[string]interface{}{
		{
			"test_histogram_p50": {"value": 5.0},
			"test_histogram_p90": {"value": 5.0},
			"test_histogram_p95": {"value": 5.0},
			"test_histogram_p99": {"value": 5.0},
		},
		{
			"test_histogram_p50": {"Test": "Test", "value": 5.0},
			"test_histogram_p90": {"Test": "Test", "value": 10.0},
			"test_histogram_p95": {"Test": "Test", "value": 10.0},
			"test_histogram_p99": {"Test": "Test", "value": 10.0},
		},
		{
			"test_histogram_p50": {"Test": "Test", "value": 5.0},
			"test_histogram_p90": {"Test": "Test", "value": 10.0},
			"test_histogram_p95": {"Test": "Test", "value": 10.0},
			"test_histogram_p99": {"Test": "Test", "value": 10.0},
		},
	}
	quantiles := []int{50, 90, 95, 99}

	cl := &mockClient{}
	cl.Add(12)
	bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{
		Database:  "testing",
		Precision: "s",
	})

	tags := []metrics.Field{}
	for key, value := range expectedTags {
		tags = append(tags, metrics.Field{Key: key, Value: value})
	}

	triggerChan := make(chan time.Time)
	histogram := influxdb.NewHistogramTick(cl, bp, expectedName, tags, triggerChan, 0, 100, 3, quantiles...)
	histogram.Observe(5)
	histogram = histogram.With(metrics.Field{Key: "Test", Value: "Test"})
	histogram.Observe(10)
	histogram.Observe(4)
	triggerChan <- time.Now()
	cl.Wait()

	for i := 0; i <= 11; i++ {
		actualName := cl.Points[i].Name()
		givenName := expectedName + actualName[len(actualName)-4:]
		givenPoint := mockPoint{
			Name:   givenName,
			Tags:   expectedTags,
			Fields: expectedFields[i/4][actualName],
		}
		comparePoint(t, i, givenPoint, cl.Points[i])
	}
}
func syncDeliveryServiceStat(sourceClient influx.Client, targetClient influx.Client, statName string, days int) {

	db := "deliveryservice_stats"
	bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        db,
		Precision:       "ms",
		RetentionPolicy: "monthly",
	})

	queryString := fmt.Sprintf("select time, cachegroup, cdn, deliveryservice, value from \"monthly\".\"%s\"", statName)
	if days > 0 {
		queryString += fmt.Sprintf(" where time > now() - %dd", days)
	}
	fmt.Println("queryString ", queryString)
	res, err := queryDB(sourceClient, queryString, db)
	if err != nil {
		errorMessage = fmt.Sprintf("An error occured getting %s records from sourceDb: %v\n", statName, err)
		fmt.Println(errorMessage)
		return
	}
	sourceStats := getDeliveryServiceStats(res)
	// get value from target DB
	targetRes, err := queryDB(targetClient, queryString, db)
	if err != nil {
		errorMessage = fmt.Sprintf("An error occured getting %s record from target db: %v\n", statName, err)
		fmt.Println(errorMessage)
		return
	}
	targetStats := getDeliveryServiceStats(targetRes)

	for ssKey := range sourceStats {
		ts := targetStats[ssKey]
		ss := sourceStats[ssKey]
		if ts.value > ss.value {
			//fmt.Printf("target value %v is at least equal to source value %v\n", ts.value, ss.value)
			continue //target value is bigger so leave it
		}
		statTime, _ := time.Parse(time.RFC3339, ss.t)
		tags := map[string]string{
			"cdn":             ss.cdn,
			"cachegroup":      ss.cacheGroup,
			"deliveryservice": ss.deliveryService,
		}
		fields := map[string]interface{}{
			"value": ss.value,
		}
		pt, err := influx.NewPoint(
			statName,
			tags,
			fields,
			statTime,
		)
		if err != nil {
			fmt.Printf("error adding creating point for %v...%v\n", statName, err)
			continue
		}
		bps.AddPoint(pt)
	}
	targetClient.Write(bps)
}
예제 #5
0
파일: middleware.go 프로젝트: go-ndn/health
// InfluxDB writes interest processing time to influxDB.
//
// The data collected can be viewed with: SELECT "value" FROM :name WHERE "name" = ':interest_name'.
func InfluxDB(client influxdb.Client, db, name string, tags map[string]string) mux.Middleware {
	return func(next mux.Handler) mux.Handler {
		return mux.HandlerFunc(func(w ndn.Sender, i *ndn.Interest) {
			before := time.Now()
			next.ServeNDN(w, i)
			t := make(map[string]string)
			for k, v := range tags {
				t[k] = v
			}
			t["name"] = i.Name.String()
			pt, _ := influxdb.NewPoint(name, t, map[string]interface{}{
				"value": float64(time.Since(before)) / float64(time.Millisecond),
			}, time.Now())
			bp, _ := influxdb.NewBatchPoints(influxdb.BatchPointsConfig{
				Database: db,
			})
			bp.AddPoint(pt)

			err := client.Write(bp)
			if err != nil {
				log.Println(err)
				return
			}
		})
	}
}
예제 #6
0
func calcDailySummary(now time.Time, config StartupConfig, runningConfig RunningConfig) {
	log.Infof("lastSummaryTime is %v", runningConfig.LastSummaryTime)
	if runningConfig.LastSummaryTime.Day() != now.Day() {
		startTime := now.Truncate(24 * time.Hour).Add(-24 * time.Hour)
		endTime := startTime.Add(24 * time.Hour)
		log.Info("Summarizing from ", startTime, " (", startTime.Unix(), ") to ", endTime, " (", endTime.Unix(), ")")

		// influx connection
		influxClient, err := influxConnect(config, runningConfig)
		if err != nil {
			log.Error("Could not connect to InfluxDb to get daily summary stats!!")
			errHndlr(err, ERROR)
			return
		}

		bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
			Database:        "daily_stats",
			Precision:       "s",
			RetentionPolicy: config.DailySummaryRetentionPolicy,
		})

		calcDailyMaxGbps(influxClient, bp, startTime, endTime, config)
		calcDailyBytesServed(influxClient, bp, startTime, endTime, config)
		log.Info("Collected daily stats @ ", now)
	}
}
예제 #7
0
// NewResultsPointBatch creates a new batch of points for the results
func (st *StressTest) NewResultsPointBatch() influx.BatchPoints {
	bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:  st.TestDB,
		Precision: "ns",
	})
	return bp
}
예제 #8
0
파일: tick.go 프로젝트: tamseo/nii-finance
func (t *tickData) TickHandler(ctx context.Context, tick *tickRecorder.Tick) error {
	var err error
	t.log.Infoln("Received data")
	tags := map[string]string{"pair": "AUDUSD"}
	fields := map[string]interface{}{
		"bid":  tick.Bid,
		"ask":  tick.Ask,
		"last": tick.Last,
	}

	point, err := influx.NewPoint("tick_data", tags, fields, time.Unix(0, tick.Time))

	bp, err := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:  "tick",
		Precision: "ns",
	})

	bp.AddPoint(point)
	t.log.Infoln("Created batch point:", bp)
	if influxErr := t.influx.Write(bp); influxErr != nil {
		t.log.Error(influxErr)
	}

	return err
}
예제 #9
0
func createInfluxDBMetrics(ping Ping) (influxdbclient.BatchPoints, error) {
	var err error
	bp, err := influxdbclient.NewBatchPoints(influxdbclient.BatchPointsConfig{
		Database:  receiverDatabaseFlag,
		Precision: "s",
	})
	if err != nil {
		return nil, err
	}

	tags := map[string]string{
		"origin":      ping.origin,
		"destination": ping.destination,
	}
	fields := map[string]interface{}{
		"loss": ping.stats.loss,
		"min":  ping.stats.min,
		"avg":  ping.stats.avg,
		"max":  ping.stats.max,
		"mdev": ping.stats.mdev,
	}
	pt, err := influxdbclient.NewPoint("ping", tags, fields, time.Unix(ping.time, 0))
	if err != nil {
		return nil, err
	}

	bp.AddPoint(pt)
	return bp, nil
}
예제 #10
0
파일: influx.go 프로젝트: basvanbeek/kit
// WriteTo flushes the buffered content of the metrics to the writer, in an
// Influx BatchPoints format. WriteTo abides best-effort semantics, so
// observations are lost if there is a problem with the write. Clients should be
// sure to call WriteTo regularly, ideally through the WriteLoop helper method.
func (in *Influx) WriteTo(w BatchPointsWriter) (err error) {
	bp, err := influxdb.NewBatchPoints(in.conf)
	if err != nil {
		return err
	}

	now := time.Now()

	in.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
		fields := fieldsFrom(lvs)
		fields["count"] = sum(values)
		var p *influxdb.Point
		p, err = influxdb.NewPoint(name, in.tags, fields, now)
		if err != nil {
			return false
		}
		bp.AddPoint(p)
		return true
	})
	if err != nil {
		return err
	}

	in.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
		fields := fieldsFrom(lvs)
		fields["value"] = last(values)
		var p *influxdb.Point
		p, err = influxdb.NewPoint(name, in.tags, fields, now)
		if err != nil {
			return false
		}
		bp.AddPoint(p)
		return true
	})
	if err != nil {
		return err
	}

	in.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
		fields := fieldsFrom(lvs)
		ps := make([]*influxdb.Point, len(values))
		for i, v := range values {
			fields["value"] = v // overwrite each time
			ps[i], err = influxdb.NewPoint(name, in.tags, fields, now)
			if err != nil {
				return false
			}
		}
		bp.AddPoints(ps)
		return true
	})
	if err != nil {
		return err
	}

	return w.Write(bp)
}
예제 #11
0
func NewReporter(conf flux.BatchPointsConfig, stats chan flux.BatchPoints, points chan *flux.Point) (*Reporter, error) {
	bp, err := flux.NewBatchPoints(conf)
	if err != nil {
		return nil, err
	}
	r := &Reporter{
		accum:  stats,
		batch:  bp,
		conf:   conf,
		points: points,
	}
	return r, nil
}
예제 #12
0
func TestStoreFrontBatcher(t *testing.T) {
	sf, _, _ := NewTestStoreFront()
	bpconf := influx.BatchPointsConfig{
		Database:  fmt.Sprintf("_%v", sf.TestName),
		Precision: "ns",
	}
	bp, _ := influx.NewBatchPoints(bpconf)
	pt := NewBlankTestPoint()
	bp = sf.batcher(pt, bp, bpconf)
	if len(bp.Points()) != 1 {
		t.Fail()
	}
}
예제 #13
0
func TestStressTestBatcher(t *testing.T) {
	sf, _, _ := NewTestStressTest()
	bpconf := influx.BatchPointsConfig{
		Database:  sf.TestDB,
		Precision: "ns",
	}
	bp, _ := influx.NewBatchPoints(bpconf)
	pt := NewBlankTestPoint()
	bp = sf.batcher(pt, bp)
	if len(bp.Points()) != 1 {
		t.Fail()
	}
}
예제 #14
0
func (hook *InfluxDBHook) newBatchPoints() (err error) {
	// make sure we're only creating new batch points when we don't already have them
	if hook.batchP != nil {
		return nil
	}
	hook.batchP, err = influxdb.NewBatchPoints(influxdb.BatchPointsConfig{
		Database:  hook.database,
		Precision: hook.precision,
	})
	if err != nil {
		return err
	}
	return nil
}
예제 #15
0
func logStatistic(p *influx.Point) {
	influxBatchPoints.AddPoint(p)
	if len(influxBatchPoints.Points()) < conf.InfluxBatchPointsCount {
		return
	}

	if err := influxClient.Write(influxBatchPoints); err != nil {
		log.Println("Failed to dump point to influxdb: ", err.Error())
	}
	influxBatchPoints, _ = influx.NewBatchPoints(
		influx.BatchPointsConfig{
			Database:  conf.InfluxDB,
			Precision: "s",
		})
}
예제 #16
0
func (a *Reporter) Start() hitman.KillChannel {
	done := hitman.NewKillChannel()
	tripwire := 1 * time.Second
	tic := time.NewTicker(tripwire).C
	last := time.Now().UnixNano()
	_1000ms := time.Duration(1000)
	fuzz := time.Duration(10)

	go func() {
		for {
			select {
			case cleaner := <-done:
				cleaner.WaitGroup.Done()
				fmt.Println("Closing reporter")
				return
			case p1 := <-a.points:
				a.batch.AddPoint(p1)
				n := len(a.points)
				for i := 0; i < n; i++ {
					a.batch.AddPoint(<-a.points)
				}
			case <-tic:
				now := time.Now().UnixNano()
				diff := now - last
				delta := time.Duration(diff) / time.Millisecond
				isOverdue := _1000ms < (delta + fuzz)

				if !isOverdue {
					continue
				}
				fmt.Println(len(a.accum), len(a.points), len(a.batch.Points()))
				bp := a.batch
				newPts, err := flux.NewBatchPoints(a.conf)
				if err != nil {
					fmt.Println(err)
				}
				a.batch = newPts
				a.accum <- bp
				last = time.Now().UnixNano()
			}
		}
	}()

	return done
}
예제 #17
0
// Batches incoming Result.Point and sends them if the batch reaches 5k in sizes
func (sf *StoreFront) batcher(pt *influx.Point, bp influx.BatchPoints, bpconf influx.BatchPointsConfig) influx.BatchPoints {
	// If fewer than 5k add point and return
	if len(bp.Points()) <= 5000 {
		bp.AddPoint(pt)
	} else {
		// Otherwise send the batch
		err := sf.ResultsClient.Write(bp)

		// Check error
		if err != nil {
			log.Fatalf("Error writing performance stats\n  error: %v\n", err)
		}

		// Reset the batch of points
		bp, _ = influx.NewBatchPoints(bpconf)
	}
	return bp
}
예제 #18
0
파일: stats.go 프로젝트: Pixelgaffer/dicod
func (s *Stats) initClient() (err error) {
	log.WithField("Addr", s.influxConfig.Addr).Debug("connecting to influxdb")
	s.client, err = influx.NewHTTPClient(s.influxConfig)
	if err != nil {
		return err
	}
	defer s.client.Close()
	ping, _, err := s.client.Ping(time.Second)
	if err != nil {
		return err
	}
	log.WithField("ping", ping).Debug("influx ping")

	s.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:  "dicod",
		Precision: "s",
	})
	return err
}
예제 #19
0
파일: influxdb_test.go 프로젝트: crezam/kit
func TestCounterWithTags(t *testing.T) {
	expectedName := "test_counter"
	expectedTags := map[string]string{
		"key1": "value1",
		"key2": "value2",
	}
	expectedFields := []map[string]interface{}{
		{"value": "2"},
		{"Test": "Test", "value": "7"},
		{"Test": "Test", "value": "10"},
	}

	cl := &mockClient{}
	cl.Add(3)
	bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{
		Database:  "testing",
		Precision: "s",
	})

	tags := []metrics.Field{}
	for key, value := range expectedTags {
		tags = append(tags, metrics.Field{Key: key, Value: value})
	}

	triggerChan := make(chan time.Time)
	counter := influxdb.NewCounterTick(cl, bp, expectedName, tags, triggerChan)
	counter.Add(2)
	counter = counter.With(metrics.Field{Key: "Test", Value: "Test"})
	counter.Add(5)
	counter.Add(3)

	triggerChan <- time.Now()
	cl.Wait()

	for i := 0; i <= 2; i++ {
		givenPoint := mockPoint{
			Name:   expectedName,
			Tags:   expectedTags,
			Fields: expectedFields[i],
		}
		comparePoint(t, i, givenPoint, cl.Points[i])
	}
}
예제 #20
0
func (self *InfluxdbStorage) AddStats(infotype string, measurement string, content interface{}) error {
	if infotype == Infotypepacket {
		//transfer interface into HttpTransaction
		httpinstance, ok := content.(*metrics.HttpTransaction)
		if !ok {
			return errors.New("fail in transformation")
		}
		influxclient := self.Influxclient
		//create bp point write bp point

		bp, _ := influxpackage.NewBatchPoints(influxpackage.BatchPointsConfig{
			Database:  self.Database,
			Precision: "us",
		})
		fields := map[string]interface{}{
			"respondtime": httpinstance.Respondtime,
		}
		tags := map[string]string{
			"Srcip":    httpinstance.Srcip,
			"Srcport":  httpinstance.Srcport,
			"Destip":   httpinstance.Destip,
			"Destport": httpinstance.Destport,
			// problems in processing nesting problem
			// "Requestdetail": httpinstance.Packetdetail.Requestdetail,
			// "Responddetail": httpinstance.Packetdetail.Responddetail,
		}
		fmt.Println("**the measurement**", measurement)
		point, err := influxpackage.NewPoint(measurement, tags, fields, time.Now())
		if err != nil {
			return err
		}
		fmt.Println("the point name:", point.Name())
		bp.AddPoint(point)
		influxclient.Write(bp)
	}
	//common metric info
	if infotype == Infotypemetric {

	}

	return nil
}
예제 #21
0
// Starts a go routine that listens for Results
func (sf *StoreFront) resultsListen() {

	// Make sure databases for results are created
	sf.createDatabase(fmt.Sprintf("_%v", sf.TestName))
	sf.createDatabase(sf.TestName)

	// Listen for Responses
	go func() {

		// Prepare a BatchPointsConfig
		bpconf := influx.BatchPointsConfig{
			Database:  fmt.Sprintf("_%v", sf.TestName),
			Precision: "ns",
		}

		// Prepare the first batch of points
		bp, _ := influx.NewBatchPoints(bpconf)

		// TODO: Panics on resp.Tracer.Done() if there are too many 500s in a row
		// Loop over ResultsChan
		for resp := range sf.ResultsChan {
			switch resp.Point.Name() {
			// If the done point comes down the channel write the results
			case "done":
				sf.ResultsClient.Write(bp)
				// Decrement the tracer
				resp.Tracer.Done()
			// By default fall back to the batcher
			default:
				// Add the StoreFront tags
				pt := resp.AddTags(sf.tags())
				// Add the point to the batch
				bp = sf.batcher(pt, bp, bpconf)
				// Decrement the tracer
				resp.Tracer.Done()
			}
		}

	}()
}
예제 #22
0
func sendMetrics(config StartupConfig, runningConfig RunningConfig, bps influx.BatchPoints, retry bool) {
	influxClient, err := influxConnect(config, runningConfig)
	if err != nil {
		if retry {
			config.BpsChan <- bps
		}
		errHndlr(err, ERROR)
		return
	}

	pts := bps.Points()
	for len(pts) > 0 {
		chunkBps, err := influx.NewBatchPoints(influx.BatchPointsConfig{
			Database:        bps.Database(),
			Precision:       bps.Precision(),
			RetentionPolicy: bps.RetentionPolicy(),
		})
		if err != nil {
			if retry {
				config.BpsChan <- chunkBps
			}
			errHndlr(err, ERROR)
		}
		for _, p := range pts[:intMin(config.MaxPublishSize, len(pts))] {
			chunkBps.AddPoint(p)
		}
		pts = pts[intMin(config.MaxPublishSize, len(pts)):]

		err = influxClient.Write(chunkBps)
		if err != nil {
			if retry {
				config.BpsChan <- chunkBps
			}
			errHndlr(err, ERROR)
		} else {
			log.Info(fmt.Sprintf("Sent %v stats for %v", len(chunkBps.Points()), chunkBps.Database()))
		}
	}
}
예제 #23
0
파일: influxdb_test.go 프로젝트: crezam/kit
func TestGauge(t *testing.T) {
	expectedName := "test_gauge"
	expectedTags := map[string]string{}
	expectedFields := []map[string]interface{}{
		{"value": 2.1},
		{"value": 1.0},
		{"value": 10.5},
	}

	cl := &mockClient{}
	cl.Add(3)
	bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{
		Database:  "testing",
		Precision: "s",
	})

	tags := []metrics.Field{}
	for key, value := range expectedTags {
		tags = append(tags, metrics.Field{Key: key, Value: value})
	}

	triggerChan := make(chan time.Time)
	counter := influxdb.NewGaugeTick(cl, bp, expectedName, tags, triggerChan)
	counter.Add(2.1)
	counter.Set(1)
	counter.Add(9.5)

	triggerChan <- time.Now()
	cl.Wait()

	for i := 0; i <= 2; i++ {
		givenPoint := mockPoint{
			Name:   expectedName,
			Tags:   expectedTags,
			Fields: expectedFields[i],
		}
		comparePoint(t, i, givenPoint, cl.Points[i])
	}
}
예제 #24
0
func Msg2Series(msgs []Message, database string) client.BatchPoints {

	// Create a new point batch
	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
		Database:  database,
		Precision: "s",
	})

	for _, msg := range msgs {
		if msg.Topic == "" && len(msg.Payload) == 0 {
			break
		}
		tokens := strings.Split(msg.Topic, "/")
		if len(tokens) < 2 {
			break
		}
		j, err := MsgParse(msg.Payload)
		if err != nil {
			log.Warn(err)
			continue
		}
		//name := strings.Replace(msg.Topic, "/", ".", -1)
		name := strings.Join(tokens[1:], "_")
		tags := map[string]string{
			"device": tokens[0],
		}
		pt, err := client.NewPoint(name, tags, j, time.Now())

		if err != nil {
			break
		}
		bp.AddPoint(pt)

		fmt.Printf("%+v\n", pt)
	}

	return bp
}
예제 #25
0
// WriteMetrics is called from a handler to write the eru and qos metrics to the
// influxdb database.
func WriteMetrics(eru, qos float64, trafficPattern string) error {
	influxClient, err := createClient()

	if err != nil {
		log.Println("Error getting the HTTP client for Influx.")
		return err
	}

	// After this function terminates, close the HTTP client connection.
	defer influxClient.Close()

	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
		Database: databaseName,
		// @TODO Does "s" make sense
		Precision: "s",
	})

	tags := map[string]string{
		"sm":  scalingMethod,
		"tp":  trafficPattern,
		"pit": podInitializationTime,
		"ver": version,
	}
	fields := map[string]interface{}{
		"eru": eru,
		"qos": qos,
	}

	pt, err := client.NewPoint(PointName, tags, fields, time.Now())
	if err != nil {
		log.Println("Error creating the new point.")
		return err
	}

	bp.AddPoint(pt)

	return influxClient.Write(bp)
}
예제 #26
0
func calcCacheValues(trafmonData []byte, cdnName string, sampleTime int64, cacheMap map[string]traffic_ops.Server, config StartupConfig) error {

	type CacheStatsJSON struct {
		Pp     string `json:"pp"`
		Date   string `json:"date"`
		Caches map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"caches"`
	}
	var jData CacheStatsJSON
	err := json.Unmarshal(trafmonData, &jData)
	if err != nil {
		return fmt.Errorf("could not unmarshall cache stats JSON - %v", err)
	}

	statCount := 0
	bps, err := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        "cache_stats",
		Precision:       "ms",
		RetentionPolicy: config.CacheRetentionPolicy,
	})
	if err != nil {
		errHndlr(err, ERROR)
	}
	for cacheName, cacheData := range jData.Caches {
		cache := cacheMap[cacheName]

		for statName, statData := range cacheData {
			dataKey := statName
			dataKey = strings.Replace(dataKey, ".bandwidth", ".kbps", 1)
			dataKey = strings.Replace(dataKey, "-", "_", -1)

			//Get the stat time and convert to epoch
			statTime := strconv.Itoa(statData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}

			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//Get the stat value and convert to float
			statValue := statData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.00
			}
			tags := map[string]string{
				"cachegroup": cache.Cachegroup,
				"hostname":   cacheName,
				"cdn":        cdnName,
				"type":       cache.Type,
			}

			fields := map[string]interface{}{
				"value": statFloatValue,
			}
			pt, err := influx.NewPoint(
				dataKey,
				tags,
				fields,
				newTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bps.AddPoint(pt)
			statCount++
		}
	}
	config.BpsChan <- bps
	log.Info("Collected ", statCount, " cache stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
예제 #27
0
/* the ds json looks like:
{
  "deliveryService": {
    "linear-gbr-hls-sbr": {
      "	.us-ma-woburn.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 520024
      }],
      "location.us-de-newcastle.kbps": [{
        "index": 520281,
        "time": 1398893383605,
        "value": "0",
        "span": 517707
      }],
    }
 }
*/
func calcDsValues(rascalData []byte, cdnName string, sampleTime int64, config StartupConfig) error {
	type DsStatsJSON struct {
		Pp              string `json:"pp"`
		Date            string `json:"date"`
		DeliveryService map[string]map[string][]struct {
			Index uint64 `json:"index"`
			Time  int    `json:"time"`
			Value string `json:"value"`
			Span  uint64 `json:"span"`
		} `json:"deliveryService"`
	}

	var jData DsStatsJSON
	err := json.Unmarshal(rascalData, &jData)
	if err != nil {
		return fmt.Errorf("could not unmarshall deliveryservice stats JSON - %v", err)
	}

	statCount := 0
	bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        "deliveryservice_stats",
		Precision:       "ms",
		RetentionPolicy: config.DsRetentionPolicy,
	})
	for dsName, dsData := range jData.DeliveryService {
		for dsMetric, dsMetricData := range dsData {
			var cachegroup, statName string
			tags := map[string]string{
				"deliveryservice": dsName,
				"cdn":             cdnName,
			}

			s := strings.Split(dsMetric, ".")
			if strings.Contains(dsMetric, "type.") {
				cachegroup = "all"
				statName = s[2]
				tags["type"] = s[1]
			} else if strings.Contains(dsMetric, "total.") {
				cachegroup, statName = s[0], s[1]
			} else {
				cachegroup, statName = s[1], s[2]
			}

			tags["cachegroup"] = cachegroup

			//convert stat time to epoch
			statTime := strconv.Itoa(dsMetricData[0].Time)
			msInt, err := strconv.ParseInt(statTime, 10, 64)
			if err != nil {
				errHndlr(err, ERROR)
			}

			newTime := time.Unix(0, msInt*int64(time.Millisecond))
			//convert stat value to float
			statValue := dsMetricData[0].Value
			statFloatValue, err := strconv.ParseFloat(statValue, 64)
			if err != nil {
				statFloatValue = 0.0
			}
			fields := map[string]interface{}{
				"value": statFloatValue,
			}
			pt, err := influx.NewPoint(
				statName,
				tags,
				fields,
				newTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bps.AddPoint(pt)
			statCount++
		}
	}
	config.BpsChan <- bps
	log.Info("Collected ", statCount, " deliveryservice stats values for ", cdnName, " @ ", sampleTime)
	return nil
}
예제 #28
0
func calcDailySummary(now time.Time, config StartupConfig, runningConfig RunningConfig) {
	log.Infof("lastSummaryTime is %v", runningConfig.LastSummaryTime)
	if runningConfig.LastSummaryTime.Day() != now.Day() {
		startTime := now.Truncate(24 * time.Hour).Add(-24 * time.Hour)
		endTime := startTime.Add(24 * time.Hour)
		log.Info("Summarizing from ", startTime, " (", startTime.Unix(), ") to ", endTime, " (", endTime.Unix(), ")")

		// influx connection
		influxClient, err := influxConnect(config, runningConfig)
		if err != nil {
			log.Error("Could not connect to InfluxDb to get daily summary stats!!")
			errHndlr(err, ERROR)
			return
		}

		//create influxdb query
		q := fmt.Sprintf("SELECT sum(value)/6 FROM bandwidth where time > '%s' and time < '%s' group by time(60s), cdn fill(0)", startTime.Format(time.RFC3339), endTime.Format(time.RFC3339))
		log.Infof(q)
		res, err := queryDB(influxClient, q, "cache_stats")
		if err != nil {
			errHndlr(err, ERROR)
			return
		}

		bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
			Database:        "daily_stats",
			Precision:       "s",
			RetentionPolicy: config.DailySummaryRetentionPolicy,
		})
		for _, row := range res[0].Series {
			prevtime := startTime
			max := float64(0)
			bytesServed := float64(0)
			cdn := row.Tags["cdn"]
			for _, record := range row.Values {
				kbps, err := record[1].(json.Number).Float64()
				if err != nil {
					errHndlr(err, ERROR)
					continue
				}
				sampleTime, err := time.Parse(time.RFC3339, record[0].(string))
				if err != nil {
					errHndlr(err, ERROR)
					continue
				}
				max = floatMax(max, kbps)
				duration := sampleTime.Unix() - prevtime.Unix()
				bytesServed += float64(duration) * kbps / 8
				prevtime = sampleTime
			}
			maxGbps := max / 1000000
			bytesServedTb := bytesServed / 1000000000
			log.Infof("max gbps for cdn %v = %v", cdn, maxGbps)
			log.Infof("Tbytes served for cdn %v = %v", cdn, bytesServedTb)

			//write daily_maxgbps in traffic_ops
			var statsSummary traffic_ops.StatsSummary
			statsSummary.CDNName = cdn
			statsSummary.DeliveryService = "all"
			statsSummary.StatName = "daily_maxgbps"
			statsSummary.StatValue = strconv.FormatFloat(maxGbps, 'f', 2, 64)
			statsSummary.SummaryTime = now.Format(time.RFC3339)
			statsSummary.StatDate = startTime.Format("2006-01-02")
			go writeSummaryStats(config, statsSummary)

			tags := map[string]string{
				"deliveryservice": statsSummary.DeliveryService,
				"cdn":             statsSummary.CDNName,
			}

			fields := map[string]interface{}{
				"value": maxGbps,
			}
			pt, err := influx.NewPoint(
				statsSummary.StatName,
				tags,
				fields,
				startTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bp.AddPoint(pt)

			// write bytes served data to traffic_ops
			statsSummary.StatName = "daily_bytesserved"
			statsSummary.StatValue = strconv.FormatFloat(bytesServedTb, 'f', 2, 64)
			go writeSummaryStats(config, statsSummary)
			fields = map[string]interface{}{
				"value": bytesServedTb,
			}
			pt, err = influx.NewPoint(
				statsSummary.StatName,
				tags,
				fields,
				startTime,
			)
			if err != nil {
				errHndlr(err, ERROR)
				continue
			}
			bp.AddPoint(pt)
		}
		config.BpsChan <- bp
		log.Info("Collected daily stats @ ", now)
	}
}
예제 #29
0
// NewSender returns a function that will accept datapoints to send to influxdb
func NewSender(
	config interface{},
	batch client.BatchPointsConfig,
	batchSize int,
	queueSize int,
	flush int,
	errFunc func(error),
) (Sender, error) {
	if batchSize <= 0 {
		batchSize = DefaultBatchSize
	}
	if queueSize <= 0 {
		queueSize = DefaultQueueSize
	}
	if flush <= 0 {
		flush = DefaultFlush
	}

	var conn client.Client
	var err error

	switch conf := config.(type) {
	case client.HTTPConfig:
		conn, err = client.NewHTTPClient(conf)
		if err != nil {
			return nil, errors.Wrap(err, "error creating HTTPClient")
		}

		_, _, err = conn.Ping(conf.Timeout)
		if err != nil {
			return nil, fmt.Errorf("cannot ping influxdb server: %s", conf.Addr)
		}

		if err := dbCheck(conn, batch.Database); err != nil {
			return nil, errors.Wrapf(err, "check for database %s failed", batch.Database)
		}
	case client.UDPConfig:
		conn, err = client.NewUDPClient(conf)
		if err != nil {
			return nil, errors.Wrap(err, "error creating UDPClient")
		}
	}

	pts := make(chan *client.Point, queueSize)

	bp, err := client.NewBatchPoints(batch)
	if err != nil {
		return nil, errors.Wrap(err, "batchpoints error")
	}

	go func() {
		delay := time.Duration(flush) * time.Second
		tick := time.Tick(delay)
		count := 0
		for {
			select {
			case p := <-pts:
				bp.AddPoint(p)
				count++
				if count < batchSize {
					continue
				}
			case <-tick:
				if len(bp.Points()) == 0 {
					continue
				}
			}
			for {
				if err := conn.Write(bp); err != nil {
					if errFunc != nil {
						errFunc(err)
					}
					time.Sleep(retry)
					continue
				}
				bp, _ = client.NewBatchPoints(batch)
				count = 0
				break
			}
		}
	}()

	return func(key string, tags map[string]string, fields map[string]interface{}, ts time.Time) error {
		pt, err := client.NewPoint(key, tags, fields, ts)
		if err != nil {
			return err
		}
		pts <- pt
		return nil
	}, nil
}
예제 #30
0
파일: main.go 프로젝트: cpurta/go-crawler
func Crawl(searchUrl string, depth int, fetcher Fetcher, redisClient *redis.Client, influxClient influx.Client) {
	throttle <- 1

	if depth <= 0 {
		return
	}

	fmt.Printf("Depth: %d Crawling: %s\n", depth, searchUrl)

	bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:  "crawler",
		Precision: "s",
	})

	host, err := url.Parse(searchUrl)

	// Send this to our redis queue for indexing
	if err != nil {
		redisClient.LPush("unknown_url_crawler_queue", searchUrl)
	} else {
		redisClient.LPush(host.Host+"_crawler_queue", searchUrl)
	}

	urlcache.lock.Lock()
	urlcache.m[searchUrl] = crawlError
	urlcache.lock.Unlock()

	// let's determine how long it is taking to fetch all urls on a page
	startFetch := time.Now()
	urls, err := fetcher.Fetch(searchUrl)
	crawlTime := time.Since(startFetch)

	if err != nil {
		fmt.Printf("Error fetching results from %s: %s\n", searchUrl, err.Error())

	} else {
		fmt.Printf("Finished crawling %s in %.2f seconds\n", searchUrl, crawlTime.Seconds())
	}

	tags := map[string]string{
		"domain": host.String(),
	}

	fields := map[string]interface{}{
		"urls_found":         len(urls),
		"crawl_time":         crawlTime.Nanoseconds(),
		"total_urls_crawled": len(urlcache.m),
		"urls_by_page":       len(urls),
	}

	point, _ := influx.NewPoint(
		"crawl_usage",
		tags,
		fields,
		time.Now(),
	)

	// add data point to influx
	bp.AddPoint(point)

	if err := influxClient.Write(bp); err != nil {
		log.Printf("Unable to write batch point to influxdb: %s\n", err.Error())
	}

	var wg sync.WaitGroup

	for _, u := range urls {
		if !urlTest.MatchString(u) {
			u = "http://" + host.Host + u
		}

		urlcache.lock.Lock()
		_, crawled := urlcache.m[u]
		urlcache.lock.Unlock()

		if validURL.MatchString(u) && urlTest.MatchString(u) && !crawled {
			wg.Add(1)
			go func(u string, depth int, fetcher Fetcher, redisClient *redis.Client, influxClient influx.Client) {
				defer wg.Done()
				Crawl(u, depth-1, fetcher, redisClient, influxClient)
			}(u, depth, fetcher, redisClient, influxClient)
		}
	}

	<-throttle
	wg.Wait()
}