// call by ioWorker func ioRrdFetch(filename string, cf string, start, end int64, step int) ([]*specs.RRDData, error) { start_t := time.Unix(start, 0) end_t := time.Unix(end, 0) step_t := time.Duration(step) * time.Second statInc(ST_RRD_FETCH, 1) fetchRes, err := rrdlite.Fetch(filename, cf, start_t, end_t, step_t) if err != nil { statInc(ST_RRD_FETCH_ERR, 1) return []*specs.RRDData{}, err } defer fetchRes.FreeValues() values := fetchRes.Values() size := len(values) ret := make([]*specs.RRDData, size) start_ts := fetchRes.Start.Unix() step_s := fetchRes.Step.Seconds() for i, val := range values { ts := start_ts + int64(i+1)*int64(step_s) d := &specs.RRDData{ Ts: ts, V: specs.JsonFloat(val), } ret[i] = d } if err != nil { err = fmt.Errorf("filename:%s %s", filename, err) } return ret, nil }
// 非法值: ts=0,value无意义 func (p *Backend) getLast(csum string) *specs.RRDData { nan := &specs.RRDData{Ts: 0, V: specs.JsonFloat(0.0)} e := p.cache.get(csum) if e == nil { return nan } e.RLock() defer e.RUnlock() typ := e.typ() if typ == specs.GAUGE { if e.e.dataId == 0 { return nan } idx := uint32(e.e.dataId-1) & CACHE_SIZE_MASK return &specs.RRDData{ Ts: int64(e.e.time[idx]), V: specs.JsonFloat(e.e.value[idx]), } } if typ == specs.COUNTER || typ == specs.DERIVE { if e.e.dataId < 2 { return nan } data, _ := e._getData(uint32(e.e.dataId)-2, uint32(e.e.dataId)) delta_ts := data[0].Ts - data[1].Ts delta_v := data[0].V - data[1].V if delta_ts != int64(e.e.step) || delta_ts <= 0 { return nan } if delta_v < 0 { // when cnt restarted, new cnt value would be zero, so fix it here delta_v = 0 } return &specs.RRDData{Ts: data[0].Ts, V: specs.JsonFloat(float64(delta_v) / float64(delta_ts))} } return nan }
func (p *Backend) getLastRaw(csum string) *specs.RRDData { nan := &specs.RRDData{Ts: 0, V: specs.JsonFloat(0.0)} e := p.cache.get(csum) if e == nil { return nan } e.RLock() defer e.RUnlock() if e.typ() == specs.GAUGE { if e.e.dataId == 0 { return nan } idx := uint32(e.e.dataId-1) & CACHE_SIZE_MASK return &specs.RRDData{ Ts: int64(e.e.time[idx]), V: specs.JsonFloat(e.e.value[idx]), } } return nan }
/* * a older than b * c = a <- b */ func queryMergeData(a, b []*specs.RRDData, start, end, step int64) []*specs.RRDData { // do merging c := make([]*specs.RRDData, 0) if len(a) > 0 { for _, v := range a { if v.Ts >= start && v.Ts <= end { //rrdtool返回的数据,时间戳是连续的、不会有跳点的情况 c = append(c, v) } } } bl := len(b) if bl > 0 { cl := len(c) lastTs := b[0].Ts // find junction i := 0 for i = cl - 1; i >= 0; i-- { if c[i].Ts < b[0].Ts { lastTs = c[i].Ts break } } // fix missing for ts := lastTs + step; ts < b[0].Ts; ts += step { c = append(c, &specs.RRDData{Ts: ts, V: specs.JsonFloat(math.NaN())}) } // merge cached items to result i += 1 for j := 0; j < bl; j++ { if i < cl { if !math.IsNaN(float64(b[j].V)) { c[i] = b[j] } } else { c = append(c, b[j]) } i++ } } return c }
func queryPruneCache(items []*specs.RRDData, e *cacheEntry, start, end int64) (ret []*specs.RRDData) { // prune cached items var val specs.JsonFloat ts := items[0].Ts n := len(items) last := items[n-1].Ts i := 0 typ := e.typ() if typ == specs.DERIVE || typ == specs.COUNTER { for ts < last { if i < n-1 && ts == items[i].Ts && ts == items[i+1].Ts-int64(e.e.step) { val = specs.JsonFloat(items[i+1].V- items[i].V) / specs.JsonFloat(e.e.step) if val < 0 { val = specs.JsonFloat(math.NaN()) } i++ } else { // missing val = specs.JsonFloat(math.NaN()) } if ts >= start && ts <= end { ret = append(ret, &specs.RRDData{Ts: ts, V: val}) } ts = ts + int64(e.e.step) } } else if typ == specs.GAUGE { for ts <= last { if i < n && ts == items[i].Ts { val = specs.JsonFloat(items[i].V) i++ } else { // missing val = specs.JsonFloat(math.NaN()) } if ts >= start && ts <= end { ret = append(ret, &specs.RRDData{Ts: ts, V: val}) } ts = ts + int64(e.e.step) } } return ret }
func queryPruneRet(a []*specs.RRDData, start, end, step int64) []*specs.RRDData { // prune result n := int((end - start) / step) ret := make([]*specs.RRDData, n) j := 0 ts := start al := len(a) for i := 0; i < n; i++ { if j < al && ts == a[j].Ts { ret[i] = a[j] j++ } else { ret[i] = &specs.RRDData{Ts: ts, V: specs.JsonFloat(math.NaN())} } ts += step } return ret }
// return [l, h) // h - l <= CACHE_SIZE func (p *cacheEntry) _getData(l, h uint32) (ret []*specs.RRDData, overrun int) { size := h - l if size > CACHE_SIZE { overrun = int(size - CACHE_SIZE) size = CACHE_SIZE l = h - CACHE_SIZE } if size == 0 { return } ret = make([]*specs.RRDData, size) //H := h & CACHE_SIZE_MASK L := l & CACHE_SIZE_MASK for i := uint32(0); i < size; i++ { idx := (L + i) & CACHE_SIZE_MASK ret[i] = &specs.RRDData{ Ts: int64(p.e.time[idx]), V: specs.JsonFloat(p.e.value[idx]), } } /* if H > L { copy(ret, p.data[L:H]) } else { copy(ret[:CACHE_SIZE-L], p.data[L:]) copy(ret[CACHE_SIZE-L:], p.data[:H]) } */ return }