func fetch(filename string, cf string, start, end int64, step int) ([]*cmodel.RRDData, error) { start_t := time.Unix(start, 0) end_t := time.Unix(end, 0) step_t := time.Duration(step) * time.Second fetchRes, err := rrdlite.Fetch(filename, cf, start_t, end_t, step_t) if err != nil { return []*cmodel.RRDData{}, err } defer fetchRes.FreeValues() values := fetchRes.Values() size := len(values) ret := make([]*cmodel.RRDData, size) start_ts := fetchRes.Start.Unix() step_s := fetchRes.Step.Seconds() for i, val := range values { ts := start_ts + int64(i+1)*int64(step_s) d := &cmodel.RRDData{ Timestamp: ts, Value: cmodel.JsonFloat(val), } ret[i] = d } return ret, nil }
//如果请求的不同counter的采集周期不一致,或者不同的采集频率,可能出现不准确 func (this *EChartsData) GetEchartsData(datas []*cmodel.GraphQueryResponse) { if this.Data == nil { this.Data = make(map[string]([]interface{})) } var max int var index int for i, _ := range datas { if max < len(datas[i].Values) { max = len(datas[i].Values) index = i } } counter := datas[index].Counter for _, val := range datas[index].Values { this.Timestamp = append(this.Timestamp, val.Timestamp) } for i, _ := range datas { counter = datas[i].Counter for j, val := range datas[i].Values { if val.Timestamp == this.Timestamp[j] { this.Data[counter] = append(this.Data[counter], val.Value) } else { this.Data[counter] = append(this.Data[counter], cmodel.JsonFloat(math.NaN())) //not data } } } }
func QueryOne(para cmodel.GraphQueryParam) (resp *cmodel.GraphQueryResponse, err error) { start, end := para.Start, para.End endpoint, counter := para.Endpoint, para.Counter pool, addr, err := selectPool(endpoint, counter) if err != nil { return nil, err } conn, err := pool.Fetch() if err != nil { return nil, err } rpcConn := conn.(spool.RpcClient) if rpcConn.Closed() { pool.ForceClose(conn) return nil, errors.New("conn closed") } type ChResult struct { Err error Resp *cmodel.GraphQueryResponse } ch := make(chan *ChResult, 1) go func() { resp := &cmodel.GraphQueryResponse{} err := rpcConn.Call("Graph.Query", para, resp) ch <- &ChResult{Err: err, Resp: resp} }() select { case <-time.After(time.Duration(g.Config().Graph.CallTimeout) * time.Millisecond): pool.ForceClose(conn) return nil, fmt.Errorf("%s, call timeout. proc: %s", addr, pool.Proc()) case r := <-ch: if r.Err != nil { pool.ForceClose(conn) return r.Resp, fmt.Errorf("%s, call failed, err %v. proc: %s", addr, r.Err, pool.Proc()) } else { pool.Release(conn) if len(r.Resp.Values) < 1 { return r.Resp, nil } // TODO query不该做这些事情, 说明graph没做好 fixed := []*cmodel.RRDData{} for _, v := range r.Resp.Values { if v == nil || !(v.Timestamp >= start && v.Timestamp <= end) { continue } //FIXME: 查询数据的时候,把所有的负值都过滤掉,因为transfer之前在设置最小值的时候为U if (r.Resp.DsType == "DERIVE" || r.Resp.DsType == "COUNTER") && v.Value < 0 { fixed = append(fixed, &cmodel.RRDData{Timestamp: v.Timestamp, Value: cmodel.JsonFloat(math.NaN())}) } else { fixed = append(fixed, v) } } r.Resp.Values = fixed } return r.Resp, nil } }
func (this *Graph) Query(param cmodel.GraphQueryParam, resp *cmodel.GraphQueryResponse) error { // statistics proc.GraphQueryCnt.Incr() resp.Values = []*cmodel.RRDData{} dsType, step, exists := index.GetTypeAndStep(param.Endpoint, param.Counter) if !exists { return nil } md5 := cutils.Md5(param.Endpoint + "/" + param.Counter) filename := fmt.Sprintf("%s/%s/%s_%s_%d.rrd", g.Config().RRD.Storage, md5[0:2], md5, dsType, step) datas, err := rrdtool.Fetch(filename, param.ConsolFun, param.Start, param.End, step) if err != nil { if store.GraphItems.LenOf(md5) <= 2 { return nil } // TODO not atomic, fix me items := store.GraphItems.PopAll(md5) size := len(items) if size > 2 { filename := fmt.Sprintf("%s/%s/%s_%s_%d.rrd", g.Config().RRD.Storage, md5[0:2], md5, items[0].DsType, items[0].Step) err := rrdtool.Flush(filename, items) if err != nil && g.Config().Debug && g.Config().DebugChecksum == md5 { log.Println("flush fail:", err, "filename:", filename) } } else { return nil } } items := store.GraphItems.FetchAll(md5) // merge items_size := len(items) datas_size := len(datas) if items_size > 1 && datas_size > 2 && int(datas[1].Timestamp-datas[0].Timestamp) == step && items[items_size-1].Timestamp > datas[0].Timestamp { var val cmodel.JsonFloat cache_size := int(items[items_size-1].Timestamp-items[0].Timestamp)/step + 1 cache := make([]*cmodel.RRDData, cache_size, cache_size) //fix items items_idx := 0 ts := items[0].Timestamp if dsType == g.DERIVE || dsType == g.COUNTER { for i := 0; i < cache_size; i++ { if items_idx < items_size-1 && ts == items[items_idx].Timestamp && ts != items[items_idx+1].Timestamp { val = cmodel.JsonFloat(items[items_idx+1].Value-items[items_idx].Value) / cmodel.JsonFloat(items[items_idx+1].Timestamp-items[items_idx].Timestamp) if val < 0 { val = cmodel.JsonFloat(math.NaN()) } items_idx++ } else { // miss val = cmodel.JsonFloat(math.NaN()) } cache[i] = &cmodel.RRDData{ Timestamp: ts, Value: val, } ts = ts + int64(step) } } else if dsType == g.GAUGE { for i := 0; i < cache_size; i++ { if items_idx < items_size && ts == items[items_idx].Timestamp { val = cmodel.JsonFloat(items[items_idx].Value) items_idx++ } else { // miss val = cmodel.JsonFloat(math.NaN()) } cache[i] = &cmodel.RRDData{ Timestamp: ts, Value: val, } ts = ts + int64(step) } } else { log.Println("not support dstype") return nil } size := int(items[items_size-1].Timestamp-datas[0].Timestamp)/step + 1 ret := make([]*cmodel.RRDData, size, size) cache_idx := 0 ts = datas[0].Timestamp if g.Config().Debug && g.Config().DebugChecksum == md5 { log.Println("param.start", param.Start, "param.End:", param.End, "items:", items, "datas:", datas) } for i := 0; i < size; i++ { if g.Config().Debug && g.Config().DebugChecksum == md5 { log.Println("i", i, "size:", size, "items_idx:", items_idx, "ts:", ts) } if i < datas_size { if ts == cache[cache_idx].Timestamp { if math.IsNaN(float64(cache[cache_idx].Value)) { val = datas[i].Value } else { val = cache[cache_idx].Value } cache_idx++ } else { val = datas[i].Value } } else { if cache_idx < cache_size && ts == cache[cache_idx].Timestamp { val = cache[cache_idx].Value cache_idx++ } else { //miss val = cmodel.JsonFloat(math.NaN()) } } ret[i] = &cmodel.RRDData{ Timestamp: ts, Value: val, } ts = ts + int64(step) } resp.Values = ret } else { resp.Values = datas } resp.Endpoint = param.Endpoint resp.Counter = param.Counter resp.DsType = dsType resp.Step = step // statistics proc.GraphQueryItemCnt.IncrBy(int64(len(resp.Values))) return nil }
func QueryOne(start, end int64, cf, endpoint, counter string) (r *model.GraphQueryResponse, err error) { pool, err := selectPool(endpoint, counter) if err != nil { return nil, err } conn, err := pool.Get() if err != nil { return nil, err } rpc_conn := conn.(RpcConn) if rpc_conn.cli == nil { pool.CloseClean(conn) return nil, errors.New("nil rpc conn") } type ChResult struct { Err error Resp *model.GraphQueryResponse } ch := make(chan *ChResult, 1) go func() { param := model.GraphQueryParam{ Start: start, End: end, ConsolFun: cf, Endpoint: endpoint, Counter: counter, } resp := &model.GraphQueryResponse{} err := rpc_conn.cli.Call("Graph.Query", param, resp) r := &ChResult{ Err: err, Resp: resp, } ch <- r }() cfg := g.Config().Graph select { case r := <-ch: if r.Err != nil { pool.CloseClean(conn) return nil, r.Err } else { pool.Release(conn) logger.Trace("graph: query graph resp: %v, addr: %v", r.Resp, pool.Name) fixedResp := &model.GraphQueryResponse{ Endpoint: r.Resp.Endpoint, Counter: r.Resp.Counter, DsType: r.Resp.DsType, Step: r.Resp.Step, } size := len(r.Resp.Values) //NOTICE:最后一个点是坏点,过滤点,可能是rrdtool的bug if size < 1 { return fixedResp, nil } else { dsType := r.Resp.DsType fixedValues := []*model.RRDData{} for _, v := range r.Resp.Values[0:size] { if v == nil { continue } if v.Timestamp < start || v.Timestamp > end { continue } //FIXME: 查询数据的时候,把所有的负值都过滤掉,因为transfer之前在设置最小值的时候为U if (dsType == "DERIVE" || dsType == "COUNTER") && v.Value < 0 { fixedValues = append(fixedValues, &model.RRDData{ Timestamp: v.Timestamp, Value: model.JsonFloat(math.NaN()), }) } else { fixedValues = append(fixedValues, v) } } fixedResp.Values = fixedValues return fixedResp, nil } } case <-time.After(time.Duration(cfg.Timeout) * time.Millisecond): pool.Release(conn) logger.Trace("query graph timeout err: i/o timeout, addr: %v", pool.Name) return nil, errors.New("i/o timeout") } }
func (this *Graph) Query(param cmodel.GraphQueryParam, resp *cmodel.GraphQueryResponse) error { // statistics proc.GraphQueryCnt.Incr() // form empty response resp.Values = []*cmodel.RRDData{} resp.Endpoint = param.Endpoint resp.Counter = param.Counter dsType, step, exists := index.GetTypeAndStep(param.Endpoint, param.Counter) // complete dsType and step if !exists { return nil } resp.DsType = dsType resp.Step = step start_ts := param.Start - param.Start%int64(step) end_ts := param.End - param.End%int64(step) + int64(step) if end_ts-start_ts-int64(step) < 1 { return nil } md5 := cutils.Md5(param.Endpoint + "/" + param.Counter) ckey := g.FormRrdCacheKey(md5, dsType, step) filename := g.RrdFileName(g.Config().RRD.Storage, md5, dsType, step) // read data from rrd file datas, _ := rrdtool.Fetch(filename, param.ConsolFun, start_ts, end_ts, step) datas_size := len(datas) // read cached items items := store.GraphItems.FetchAll(ckey) items_size := len(items) nowTs := time.Now().Unix() lastUpTs := nowTs - nowTs%int64(step) rra1StartTs := lastUpTs - int64(rrdtool.RRA1PointCnt*step) // consolidated, do not merge if start_ts < rra1StartTs { resp.Values = datas goto _RETURN_OK } // no cached items, do not merge if items_size < 1 { resp.Values = datas goto _RETURN_OK } // merge { // fmt cached items var val cmodel.JsonFloat cache := make([]*cmodel.RRDData, 0) ts := items[0].Timestamp itemEndTs := items[items_size-1].Timestamp itemIdx := 0 if dsType == g.DERIVE || dsType == g.COUNTER { for ts < itemEndTs { if itemIdx < items_size-1 && ts == items[itemIdx].Timestamp && ts == items[itemIdx+1].Timestamp-int64(step) { val = cmodel.JsonFloat(items[itemIdx+1].Value-items[itemIdx].Value) / cmodel.JsonFloat(step) if val < 0 { val = cmodel.JsonFloat(math.NaN()) } itemIdx++ } else { // missing val = cmodel.JsonFloat(math.NaN()) } if ts >= start_ts && ts <= end_ts { cache = append(cache, &cmodel.RRDData{Timestamp: ts, Value: val}) } ts = ts + int64(step) } } else if dsType == g.GAUGE { for ts <= itemEndTs { if itemIdx < items_size && ts == items[itemIdx].Timestamp { val = cmodel.JsonFloat(items[itemIdx].Value) itemIdx++ } else { // missing val = cmodel.JsonFloat(math.NaN()) } if ts >= start_ts && ts <= end_ts { cache = append(cache, &cmodel.RRDData{Timestamp: ts, Value: val}) } ts = ts + int64(step) } } cache_size := len(cache) // do merging merged := make([]*cmodel.RRDData, 0) if datas_size > 0 { for _, val := range datas { if val.Timestamp >= start_ts && val.Timestamp <= end_ts { merged = append(merged, val) //rrdtool返回的数据,时间戳是连续的、不会有跳点的情况 } } } if cache_size > 0 { rrdDataSize := len(merged) lastTs := cache[0].Timestamp // find junction rrdDataIdx := 0 for rrdDataIdx = rrdDataSize - 1; rrdDataIdx >= 0; rrdDataIdx-- { if merged[rrdDataIdx].Timestamp < cache[0].Timestamp { lastTs = merged[rrdDataIdx].Timestamp break } } // fix missing for ts := lastTs + int64(step); ts < cache[0].Timestamp; ts += int64(step) { merged = append(merged, &cmodel.RRDData{Timestamp: ts, Value: cmodel.JsonFloat(math.NaN())}) } // merge cached items to result rrdDataIdx += 1 for cacheIdx := 0; cacheIdx < cache_size; cacheIdx++ { if rrdDataIdx < rrdDataSize { if !math.IsNaN(float64(cache[cacheIdx].Value)) { merged[rrdDataIdx] = cache[cacheIdx] } } else { merged = append(merged, cache[cacheIdx]) } rrdDataIdx++ } } mergedSize := len(merged) // fmt result ret_size := int((end_ts - start_ts) / int64(step)) ret := make([]*cmodel.RRDData, ret_size, ret_size) mergedIdx := 0 ts = start_ts for i := 0; i < ret_size; i++ { if mergedIdx < mergedSize && ts == merged[mergedIdx].Timestamp { ret[i] = merged[mergedIdx] mergedIdx++ } else { ret[i] = &cmodel.RRDData{Timestamp: ts, Value: cmodel.JsonFloat(math.NaN())} } ts += int64(step) } resp.Values = ret } _RETURN_OK: // statistics proc.GraphQueryItemCnt.IncrBy(int64(len(resp.Values))) return nil }