func sinceLastUpdate(dbfile string) time.Duration {
	ri, _ := rrd.Info(dbfile)
	lastUpdate := int64(ri["last_update"].(uint))
	t := time.Unix(lastUpdate, 0)
	delta := time.Since(t)

	return delta
}
func infoDB(dbfile string) (err error) {
	inf, err := rrd.Info(dbfile)
	if err != nil {
		return
	}
	for k, v := range inf {
		fmt.Printf("%s (%T): %v\n", k, v, v)
	}
	return
}
Exemple #3
0
func main() {
	JSONStruct := &JSONData{}
	err := JSONStruct.FromJSON("/etc/conf.json")
	if err != nil {
		panic(err)
	}

	inf, err := rrd.Info(JSONStruct.Dbfile)
	if err != nil {
		log.Fatal(err)
	}

	end := time.Unix(int64(inf["last_update"].(uint)), 0)
	start := end.Add(-288 * step * time.Second)
	fetchRes, err := rrd.Fetch(JSONStruct.Dbfile, "AVERAGE", start, end, step*time.Second)
	if err != nil {
		log.Fatal(err)
	}

	defer fetchRes.FreeValues()

	num := start.Format("2006-01-02_15:04:05")
	fileName := JSONStruct.Devicename + "_" + num + ".csv"

	buf := new(bytes.Buffer)
	r2 := csv.NewWriter(buf)

	for i := 0; i <= 288; i++ {
		v1 := fetchRes.ValueAt(0, i) * 8 / 1024 / 1024
		v2 := fetchRes.ValueAt(1, i) * 8 / 1024 / 1024
		t1 := FloatToString(v1) + "Mbps"
		t2 := FloatToString(v2) + "Mbps"
		s := make([]string, 3)
		num := (i + 1) * step
		t22 := start.Add(time.Duration(num) * time.Second)
		t11 := (t22.String())
		s[0] = t11
		s[1] = t1
		s[2] = t2
		r2.Write(s)
		r2.Flush()
	}

	fout, err := os.Create(fileName)
	defer fout.Close()
	if err != nil {
		fmt.Println(fileName, err)
		return
	}

	fout.WriteString(buf.String())
	fmt.Printf("\n")

}
func fetchDB(dbfile string, step uint) (err error) {
	inf, err := rrd.Info(dbfile)
	if err != nil {
		return
	}

	end := time.Unix(int64(inf["last_update"].(uint)), 0)
	start := end.Add(-20 * time.Duration(step) * time.Second)
	fmt.Printf("Fetch Params:\n")
	fmt.Printf("Start: %s\n", start)
	fmt.Printf("End: %s\n", end)
	fmt.Printf("Step: %s\n", time.Duration(step)*time.Second)
	fetchRes, err := rrd.Fetch(dbfile, "AVERAGE", start, end, time.Duration(step)*time.Second)
	if err != nil {
		return
	}
	defer fetchRes.FreeValues()
	fmt.Printf("FetchResult:\n")
	fmt.Printf("Start: %s\n", fetchRes.Start)
	fmt.Printf("End: %s\n", fetchRes.End)
	fmt.Printf("Step: %s\n", fetchRes.Step)
	for _, dsName := range fetchRes.DsNames {
		fmt.Printf("\t%s", dsName)
	}
	fmt.Printf("\n")

	row := 0
	for ti := fetchRes.Start.Add(fetchRes.Step); ti.Before(end) || ti.Equal(end); ti = ti.Add(fetchRes.Step) {
		fmt.Printf("%s / %d", ti, ti.Unix())
		for i := 0; i < len(fetchRes.DsNames); i++ {
			v := fetchRes.ValueAt(i, row)
			fmt.Printf("\t%e", v)
		}
		fmt.Printf("\n")
		row++
	}
	return
}
Exemple #5
0
// Refresh triggers a full connector data update.
func (c *RRDConnector) Refresh(originName string, outputChan chan<- *catalog.Record) error {
	// Search for files and parse their path for source/metric pairs
	walkFunc := func(filePath string, fileInfo os.FileInfo, err error) error {
		var sourceName, metricName string

		// Report errors
		if err != nil {
			logger.Log(logger.LevelWarning, "connector", "rrd[%s]: error while walking: %s", c.name, err)
			return nil
		}

		// Skip non-files
		mode := fileInfo.Mode() & os.ModeType
		if mode != 0 {
			return nil
		}

		// Get pattern matches
		m, err := matchSeriesPattern(c.re, strings.TrimPrefix(filePath, c.path+"/"))
		if err != nil {
			logger.Log(logger.LevelInfo, "connector", "rrd[%s]: file `%s' does not match pattern, ignoring", c.name,
				filePath)
			return nil
		}

		sourceName, metricName = m[0], m[1]

		if _, ok := c.metrics[sourceName]; !ok {
			c.metrics[sourceName] = make(map[string]*rrdMetric)
		}

		// Extract metric information from .rrd file
		info, err := rrd.Info(filePath)
		if err != nil {
			logger.Log(logger.LevelWarning, "connector", "rrd[%s]: %s", c.name, err)
			return nil
		}

		// Extract consolidation functions list
		cfSet := set.New(set.ThreadSafe)

		if cf, ok := info["rra.cf"].([]interface{}); ok {
			for _, entry := range cf {
				if name, ok := entry.(string); ok {
					cfSet.Add(name)
				}
			}
		}

		cfList := set.StringSlice(cfSet)

		if _, ok := info["ds.index"]; ok {
			indexes, ok := info["ds.index"].(map[string]interface{})
			if !ok {
				return nil
			}

			for dsName := range indexes {
				for _, cfName := range cfList {
					metricFullName := metricName + "/" + dsName + "/" + strings.ToLower(cfName)

					c.metrics[sourceName][metricFullName] = &rrdMetric{
						Dataset:  dsName,
						FilePath: filePath,
						Step:     time.Duration(info["step"].(uint)) * time.Second,
						Cf:       cfName,
					}

					outputChan <- &catalog.Record{
						Origin:    originName,
						Source:    sourceName,
						Metric:    metricFullName,
						Connector: c,
					}
				}
			}
		}

		return nil
	}

	if err := utils.WalkDir(c.path, walkFunc); err != nil {
		return err
	}

	return nil
}
Exemple #6
0
func main() {
	flag.Parse()

	s, err := mgo.Dial(mongo)
	if err != nil {
		log.Err.Fatal(err)
	}
	defer s.Close()

	var hosts []string

	collection := s.DB("af24").C("event")
	err = collection.Find(nil).Distinct("host", &hosts)
	if err != nil {
		log.Err.Fatal(err)
	}

	for _, host := range hosts {
		filename := path.Join(output, host+".rrd")
		log.Info.Printf("processing %s", host)

		var q bson.M
		info, err := rrd.Info(filename)
		if err != nil {
			q = bson.M{"type": "aflog", "host": host}
		} else {
			lasti, ok := info["last_update"]
			if !ok {
				log.Err.Fatal("rrd file exists, but no last update")
			}
			last, ok := lasti.(uint)
			if !ok {
				log.Err.Fatalf("last update is of wrong type: %#v", lasti)
			}
			utime := time.Unix(int64(last), 0)
			q = bson.M{"type": "aflog", "host": host, "timestamp": bson.M{"$gt": utime}}
		}

		it := collection.Find(q).Sort("timestamp").Iter()
		if it == nil {
			log.Err.Fatal("iterator is nil!")
		}

		var creator *rrd.Creator
		var updater *rrd.Updater
		var e event.Event
		for it.Next(&e) {
			if creator == nil {
				start := e["timestamp"].(time.Time)
				creator = rrd.NewCreator(filename, start, 10)
				creator.DS("rx0", "GAUGE", 60, -120, 0)
				creator.DS("rx1", "GAUGE", 60, -120, 0)
				creator.DS("cap", "GAUGE", 60, 0, 1e9)
				creator.DS("pow", "GAUGE", 60, -50, 50)
				creator.RRA("AVERAGE", 0, 1, 6*60*24*365)
				err = creator.Create(overwrite)
				if err != nil && !os.IsExist(err) {
					log.Err.Fatal(err)
				}
			}
			if updater == nil {
				updater = rrd.NewUpdater(filename)
			}
			timestamp := e["timestamp"].(time.Time)

			locali, ok := e["local"]
			if !ok {
				log.Warn.Printf("[%v] no local section", e["uuid"])
				continue
			}
			local, ok := locali.(event.Event)
			if !ok {
				log.Warn.Printf("[%v] local section has wrong type: %#v", e["uuid"], locali)
				continue
			}

			rx0i, ok := local["rxpower0"]
			if !ok {
				log.Warn.Printf("[%v] local rxpower0 absent", e["uuid"])
				continue
			}
			rx0, ok := rx0i.(int64)
			if !ok {
				log.Warn.Printf("[%v] local rxpower0 has wrong type: %#v", e["uuid"], rx0i)
				continue
			}

			rx1i, ok := local["rxpower1"]
			if !ok {
				log.Warn.Printf("[%v] local rxpower1 absent", e["uuid"])
				continue
			}
			rx1, ok := rx1i.(int64)
			if !ok {
				log.Warn.Printf("[%v] local rxpower1 has wrong type: %#v", e["uuid"], rx1i)
				continue
			}

			rxcapi, ok := local["rxcapacity"]
			if !ok {
				log.Warn.Printf("[%v] local rxcapacity absent", e["uuid"])
				continue
			}
			rxcap, ok := rxcapi.(int64)
			if !ok {
				log.Warn.Printf("[%v] local rxcapacity has wrong type: %#v", e["uuid"], rxcapi)
				continue
			}

			remotei, ok := e["remote"]
			if !ok {
				log.Warn.Printf("[%v] no remote section", e["uuid"])
				continue
			}
			remote, ok := remotei.(event.Event)
			if !ok {
				log.Warn.Printf("[%v] remote section has wrong type: %#v", e["uuid"], remotei)
				continue
			}

			poweri, ok := remote["rpowerout"]
			if !ok {
				log.Warn.Printf("[%v] remote rpowerout is absent", e["uuid"])
				continue
			}
			power, ok := poweri.(int64)
			if !ok {
				log.Warn.Printf("[%v] remote rpowerout has wrong type: %v", e["uuid"], poweri)
				continue
			}

			err = updater.Update(timestamp.Unix(), rx0, rx1, rxcap, power)
			if err != nil {
				log.Err.Print(err)
				continue
			}
		}
	}
}
func main() {
	// Create
	const (
		dbfile    = "/tmp/test.rrd"
		step      = 1
		heartbeat = 2 * step
	)

	c := rrd.NewCreator(dbfile, time.Now(), step)
	c.RRA("AVERAGE", 0.5, 1, 100)
	c.RRA("AVERAGE", 0.5, 5, 100)
	c.DS("cnt", "COUNTER", heartbeat, 0, 100)
	c.DS("g", "GAUGE", heartbeat, 0, 60)
	err := c.Create(true)
	if err != nil {
		panic(err)
	}

	// Update
	u := rrd.NewUpdater(dbfile)
	for i := 0; i < 10; i++ {
		time.Sleep(step * time.Second)
		err := u.Update(time.Now(), i, 1.5*float64(i))
		if err != nil {
			panic(err)
		}
	}

	// Update with cache
	for i := 10; i < 20; i++ {
		time.Sleep(step * time.Second)
		u.Cache(time.Now(), i, 2*float64(i))
	}
	err = u.Update()
	if err != nil {
		panic(err)
	}

	// Info
	inf, err := rrd.Info(dbfile)
	if err != nil {
		panic(err)
	}
	for k, v := range inf {
		fmt.Printf("%s (%T): %v\n", k, v, v)
	}

	// Graph
	g := rrd.NewGrapher()
	g.SetTitle("Test")
	g.SetVLabel("some variable")
	g.SetSize(800, 300)
	g.SetWatermark("some watermark")
	g.Def("v1", dbfile, "g", "AVERAGE")
	g.Def("v2", dbfile, "cnt", "AVERAGE")
	g.VDef("max1", "v1,MAXIMUM")
	g.VDef("avg2", "v2,AVERAGE")
	g.Line(1, "v1", "ff0000", "var 1")
	g.Area("v2", "0000ff", "var 2")
	g.GPrintT("max1", "max1 at %c")
	g.GPrint("avg2", "avg2=%lf")
	g.PrintT("max1", "max1 at %c")
	g.Print("avg2", "avg2=%lf")

	now := time.Now()

	i, err := g.SaveGraph("/tmp/test_rrd1.png", now.Add(-20*time.Second), now)
	fmt.Printf("%+v\n", i)
	if err != nil {
		panic(err)
	}
	i, buf, err := g.Graph(now.Add(-20*time.Second), now)
	fmt.Printf("%+v\n", i)
	if err != nil {
		panic(err)
	}
	err = ioutil.WriteFile("/tmp/test_rrd2.png", buf, 0666)
	if err != nil {
		panic(err)
	}

	// Fetch
	end := time.Unix(int64(inf["last_update"].(uint)), 0)
	start := end.Add(-20 * step * time.Second)
	fmt.Printf("Fetch Params:\n")
	fmt.Printf("Start: %s\n", start)
	fmt.Printf("End: %s\n", end)
	fmt.Printf("Step: %s\n", step*time.Second)
	fetchRes, err := rrd.Fetch(dbfile, "AVERAGE", start, end, step*time.Second)
	if err != nil {
		panic(err)
	}
	defer fetchRes.FreeValues()
	fmt.Printf("FetchResult:\n")
	fmt.Printf("Start: %s\n", fetchRes.Start)
	fmt.Printf("End: %s\n", fetchRes.End)
	fmt.Printf("Step: %s\n", fetchRes.Step)
	for _, dsName := range fetchRes.DsNames {
		fmt.Printf("\t%s", dsName)
	}
	fmt.Printf("\n")

	row := 0
	for ti := fetchRes.Start.Add(fetchRes.Step); ti.Before(end) || ti.Equal(end); ti = ti.Add(fetchRes.Step) {
		fmt.Printf("%s / %d", ti, ti.Unix())
		for i := 0; i < len(fetchRes.DsNames); i++ {
			v := fetchRes.ValueAt(i, row)
			fmt.Printf("\t%e", v)
		}
		fmt.Printf("\n")
		row++
	}

	// Xport
	end = time.Unix(int64(inf["last_update"].(uint)), 0)
	start = end.Add(-20 * step * time.Second)
	fmt.Printf("Xport Params:\n")
	fmt.Printf("Start: %s\n", start)
	fmt.Printf("End: %s\n", end)
	fmt.Printf("Step: %s\n", step*time.Second)

	e := rrd.NewExporter()
	e.Def("def1", dbfile, "cnt", "AVERAGE")
	e.Def("def2", dbfile, "g", "AVERAGE")
	e.CDef("vdef1", "def1,def2,+")
	e.XportDef("def1", "cnt")
	e.XportDef("def2", "g")
	e.XportDef("vdef1", "sum")

	xportRes, err := e.Xport(start, end, step*time.Second)
	if err != nil {
		panic(err)
	}
	defer xportRes.FreeValues()
	fmt.Printf("XportResult:\n")
	fmt.Printf("Start: %s\n", xportRes.Start)
	fmt.Printf("End: %s\n", xportRes.End)
	fmt.Printf("Step: %s\n", xportRes.Step)
	for _, legend := range xportRes.Legends {
		fmt.Printf("\t%s", legend)
	}
	fmt.Printf("\n")

	row = 0
	for ti := xportRes.Start.Add(xportRes.Step); ti.Before(end) || ti.Equal(end); ti = ti.Add(xportRes.Step) {
		fmt.Printf("%s / %d", ti, ti.Unix())
		for i := 0; i < len(xportRes.Legends); i++ {
			v := xportRes.ValueAt(i, row)
			fmt.Printf("\t%e", v)
		}
		fmt.Printf("\n")
		row++
	}
}