func syncDeliveryServiceStat(sourceClient influx.Client, targetClient influx.Client, statName string, days int) {

	db := "deliveryservice_stats"
	bps, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:        db,
		Precision:       "ms",
		RetentionPolicy: "monthly",
	})

	queryString := fmt.Sprintf("select time, cachegroup, cdn, deliveryservice, value from \"monthly\".\"%s\"", statName)
	if days > 0 {
		queryString += fmt.Sprintf(" where time > now() - %dd", days)
	}
	fmt.Println("queryString ", queryString)
	res, err := queryDB(sourceClient, queryString, db)
	if err != nil {
		errorMessage = fmt.Sprintf("An error occured getting %s records from sourceDb: %v\n", statName, err)
		fmt.Println(errorMessage)
		return
	}
	sourceStats := getDeliveryServiceStats(res)
	// get value from target DB
	targetRes, err := queryDB(targetClient, queryString, db)
	if err != nil {
		errorMessage = fmt.Sprintf("An error occured getting %s record from target db: %v\n", statName, err)
		fmt.Println(errorMessage)
		return
	}
	targetStats := getDeliveryServiceStats(targetRes)

	for ssKey := range sourceStats {
		ts := targetStats[ssKey]
		ss := sourceStats[ssKey]
		if ts.value > ss.value {
			//fmt.Printf("target value %v is at least equal to source value %v\n", ts.value, ss.value)
			continue //target value is bigger so leave it
		}
		statTime, _ := time.Parse(time.RFC3339, ss.t)
		tags := map[string]string{
			"cdn":             ss.cdn,
			"cachegroup":      ss.cacheGroup,
			"deliveryservice": ss.deliveryService,
		}
		fields := map[string]interface{}{
			"value": ss.value,
		}
		pt, err := influx.NewPoint(
			statName,
			tags,
			fields,
			statTime,
		)
		if err != nil {
			fmt.Printf("error adding creating point for %v...%v\n", statName, err)
			continue
		}
		bps.AddPoint(pt)
	}
	targetClient.Write(bps)
}
Exemple #2
0
// InfluxDB writes interest processing time to influxDB.
//
// The data collected can be viewed with: SELECT "value" FROM :name WHERE "name" = ':interest_name'.
func InfluxDB(client influxdb.Client, db, name string, tags map[string]string) mux.Middleware {
	return func(next mux.Handler) mux.Handler {
		return mux.HandlerFunc(func(w ndn.Sender, i *ndn.Interest) {
			before := time.Now()
			next.ServeNDN(w, i)
			t := make(map[string]string)
			for k, v := range tags {
				t[k] = v
			}
			t["name"] = i.Name.String()
			pt, _ := influxdb.NewPoint(name, t, map[string]interface{}{
				"value": float64(time.Since(before)) / float64(time.Millisecond),
			}, time.Now())
			bp, _ := influxdb.NewBatchPoints(influxdb.BatchPointsConfig{
				Database: db,
			})
			bp.AddPoint(pt)

			err := client.Write(bp)
			if err != nil {
				log.Println(err)
				return
			}
		})
	}
}
Exemple #3
0
func (self *Server) influxWriter(influxClient influxdb.Client, options InfluxOptions) {
	defer close(self.influxChan)
	defer influxClient.Close()

	for stat := range self.influxChan {
		tags := map[string]string{
			"id":     stat.ID.String(),
			"family": stat.ID.Family(),
			"name":   stat.SensorConfig.String(),
		}
		fields := map[string]interface{}{
			"temperature": stat.Temperature.Float64(),
		}

		// write
		point, err := influxdb.NewPoint("onewire", tags, fields, stat.Time)
		if err != nil {
			log.Printf("server.Server: influxWriter: influxdb.NewPoint: %v\n", err)
		}

		points, err := options.batchPoints()
		if err != nil {
			log.Printf("server.Server: influxWriter: newBatchPoints: %v\n", err)
			continue
		}

		points.AddPoint(point)

		if err := influxClient.Write(points); err != nil {
			log.Printf("server.Server: influxWriter: influxdb.Client %v: Write %v: %v\n", influxClient, points, err)
			continue
		}
	}
}
Exemple #4
0
func processPing(c <-chan Ping) error {
	var err error
	var carbonReceiver *carbon.Carbon
	var ic influxdbclient.Client

	switch receiverTypeFlag {
	case "carbon":
		carbonReceiver, err = carbon.NewCarbon(receiverHostFlag, receiverPortFlag, receiverNoopFlag, verboseFlag)
	case "influxdb":
		if receiverDatabaseFlag == "" {
			log.Fatalln("An InfluxDB database was not specified on the command line.")
		}
		if len(receiverUsernameFlag) > 0 {
			ic, err = influxdbclient.NewHTTPClient(
				influxdbclient.HTTPConfig{
					Addr:     fmt.Sprintf("http://%v:%v", receiverHostFlag, receiverPortFlag),
					Username: receiverUsernameFlag,
					Password: receiverPasswordFlag,
				})
		} else {
			ic, err = influxdbclient.NewHTTPClient(
				influxdbclient.HTTPConfig{
					Addr: fmt.Sprintf("http://%v:%v", receiverHostFlag, receiverPortFlag),
				})
		}
	}

	if err != nil {
		log.Println("error in creating a connection, but ignoring")
		//		return err
	}
	for {
		pingResult := <-c
		if !isReceiverFullyDefined() {
			log.Printf("Receiver is not fully defined: host: %v Port %v\n.",
				receiverHostFlag, receiverPortFlag)
			continue
		}
		switch receiverTypeFlag {
		case "carbon":
			err := carbonReceiver.SendMetrics(createCarbonMetrics(pingResult))
			if err != nil {
				log.Printf("Error sending metrics to Carbon: %v.\n", err)
			}
		case "influxdb":
			ret, err := createInfluxDBMetrics(pingResult)
			if err != nil {
				log.Fatalln(err)
			}
			err = ic.Write(ret)
			if err != nil {
				log.Printf("Error writing metrics to Influxdb: %v.\n", err)
			}
		}
		if verboseFlag {
			log.Printf("Successfully published %v metrics to %v.\n", receiverTypeFlag, receiverHostFlag)
		}
	}
	return nil
}
func queryDB(client influx.Client, cmd string) (res []influx.Result, err error) {
	q := influx.Query{
		Command:  cmd,
		Database: "",
	}
	if response, err := client.Query(q); err == nil {
		if response.Error() != nil {
			return res, response.Error()
		}
		res = response.Results
	}
	return res, nil
}
Exemple #6
0
func queryDB(clnt influxpackage.Client, cmd string, dbname string) (res []influxpackage.Result, err error) {
	q := influxpackage.Query{
		Command:  cmd,
		Database: dbname,
	}
	if response, err := clnt.Query(q); err == nil {
		if response.Error() != nil {
			return res, response.Error()
		}
		res = response.Results
	} else {
		return res, err
	}
	return res, nil
}
// NewInfluxDB returns a new InfluxDBHook.
func NewInfluxDB(config *Config, clients ...influxdb.Client) (hook *InfluxDBHook, err error) {
	if config == nil {
		config = &Config{}
	}
	config.defaults()

	var client influxdb.Client
	if len(clients) == 0 {
		client, err = hook.newInfluxDBClient(config)
		if err != nil {
			return nil, fmt.Errorf("NewInfluxDB: Error creating InfluxDB Client, %v", err)
		}
	} else if len(clients) == 1 {
		client = clients[0]
	} else {
		return nil, fmt.Errorf("NewInfluxDB: Error creating InfluxDB Client, %d is too many influxdb clients", len(clients))
	}

	// Make sure that we can connect to InfluxDB
	_, _, err = client.Ping(5 * time.Second) // if this takes more than 5 seconds then influxdb is probably down
	if err != nil {
		return nil, fmt.Errorf("NewInfluxDB: Error connecting to InfluxDB, %v", err)
	}

	hook = &InfluxDBHook{
		client:        client,
		database:      config.Database,
		measurement:   config.Measurement,
		tagList:       config.Tags,
		batchInterval: config.BatchInterval,
		batchCount:    config.BatchCount,
		precision:     config.Precision,
	}

	err = hook.autocreateDatabase()
	if err != nil {
		return nil, err
	}
	go hook.handleBatch()

	return hook, nil
}
Exemple #8
0
// dbCheck ensures the given database exists
func dbCheck(conn client.Client, database string) error {
	if len(database) == 0 {
		return fmt.Errorf("no database specified")
	}
	q := client.Query{Command: "show databases"}
	resp, err := conn.Query(q)
	if err != nil {
		return err
	}

	for _, r := range resp.Results {
		for _, s := range r.Series {
			for _, v := range s.Values {
				for _, d := range v {
					if d.(string) == database {
						return nil
					}
				}
			}
		}
	}
	return fmt.Errorf("database %s does not exist", database)
}
Exemple #9
0
// NewSender returns a function that will accept datapoints to send to influxdb
func NewSender(
	config interface{},
	batch client.BatchPointsConfig,
	batchSize int,
	queueSize int,
	flush int,
	errFunc func(error),
) (Sender, error) {
	if batchSize <= 0 {
		batchSize = DefaultBatchSize
	}
	if queueSize <= 0 {
		queueSize = DefaultQueueSize
	}
	if flush <= 0 {
		flush = DefaultFlush
	}

	var conn client.Client
	var err error

	switch conf := config.(type) {
	case client.HTTPConfig:
		conn, err = client.NewHTTPClient(conf)
		if err != nil {
			return nil, errors.Wrap(err, "error creating HTTPClient")
		}

		_, _, err = conn.Ping(conf.Timeout)
		if err != nil {
			return nil, fmt.Errorf("cannot ping influxdb server: %s", conf.Addr)
		}

		if err := dbCheck(conn, batch.Database); err != nil {
			return nil, errors.Wrapf(err, "check for database %s failed", batch.Database)
		}
	case client.UDPConfig:
		conn, err = client.NewUDPClient(conf)
		if err != nil {
			return nil, errors.Wrap(err, "error creating UDPClient")
		}
	}

	pts := make(chan *client.Point, queueSize)

	bp, err := client.NewBatchPoints(batch)
	if err != nil {
		return nil, errors.Wrap(err, "batchpoints error")
	}

	go func() {
		delay := time.Duration(flush) * time.Second
		tick := time.Tick(delay)
		count := 0
		for {
			select {
			case p := <-pts:
				bp.AddPoint(p)
				count++
				if count < batchSize {
					continue
				}
			case <-tick:
				if len(bp.Points()) == 0 {
					continue
				}
			}
			for {
				if err := conn.Write(bp); err != nil {
					if errFunc != nil {
						errFunc(err)
					}
					time.Sleep(retry)
					continue
				}
				bp, _ = client.NewBatchPoints(batch)
				count = 0
				break
			}
		}
	}()

	return func(key string, tags map[string]string, fields map[string]interface{}, ts time.Time) error {
		pt, err := client.NewPoint(key, tags, fields, ts)
		if err != nil {
			return err
		}
		pts <- pt
		return nil
	}, nil
}
Exemple #10
0
func Crawl(searchUrl string, depth int, fetcher Fetcher, redisClient *redis.Client, influxClient influx.Client) {
	throttle <- 1

	if depth <= 0 {
		return
	}

	fmt.Printf("Depth: %d Crawling: %s\n", depth, searchUrl)

	bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
		Database:  "crawler",
		Precision: "s",
	})

	host, err := url.Parse(searchUrl)

	// Send this to our redis queue for indexing
	if err != nil {
		redisClient.LPush("unknown_url_crawler_queue", searchUrl)
	} else {
		redisClient.LPush(host.Host+"_crawler_queue", searchUrl)
	}

	urlcache.lock.Lock()
	urlcache.m[searchUrl] = crawlError
	urlcache.lock.Unlock()

	// let's determine how long it is taking to fetch all urls on a page
	startFetch := time.Now()
	urls, err := fetcher.Fetch(searchUrl)
	crawlTime := time.Since(startFetch)

	if err != nil {
		fmt.Printf("Error fetching results from %s: %s\n", searchUrl, err.Error())

	} else {
		fmt.Printf("Finished crawling %s in %.2f seconds\n", searchUrl, crawlTime.Seconds())
	}

	tags := map[string]string{
		"domain": host.String(),
	}

	fields := map[string]interface{}{
		"urls_found":         len(urls),
		"crawl_time":         crawlTime.Nanoseconds(),
		"total_urls_crawled": len(urlcache.m),
		"urls_by_page":       len(urls),
	}

	point, _ := influx.NewPoint(
		"crawl_usage",
		tags,
		fields,
		time.Now(),
	)

	// add data point to influx
	bp.AddPoint(point)

	if err := influxClient.Write(bp); err != nil {
		log.Printf("Unable to write batch point to influxdb: %s\n", err.Error())
	}

	var wg sync.WaitGroup

	for _, u := range urls {
		if !urlTest.MatchString(u) {
			u = "http://" + host.Host + u
		}

		urlcache.lock.Lock()
		_, crawled := urlcache.m[u]
		urlcache.lock.Unlock()

		if validURL.MatchString(u) && urlTest.MatchString(u) && !crawled {
			wg.Add(1)
			go func(u string, depth int, fetcher Fetcher, redisClient *redis.Client, influxClient influx.Client) {
				defer wg.Done()
				Crawl(u, depth-1, fetcher, redisClient, influxClient)
			}(u, depth, fetcher, redisClient, influxClient)
		}
	}

	<-throttle
	wg.Wait()
}
Exemple #11
0
func watch(client stdinflux.Client, bp stdinflux.BatchPoints, reportTicker <-chan time.Time) {
	for range reportTicker {
		client.Write(bp)
	}
}