func fetch(filename string, cf string, start, end int64, step int) ([]*cmodel.RRDData, error) { start_t := time.Unix(start, 0) end_t := time.Unix(end, 0) step_t := time.Duration(step) * time.Second fetchRes, err := rrdlite.Fetch(filename, cf, start_t, end_t, step_t) if err != nil { return []*cmodel.RRDData{}, err } defer fetchRes.FreeValues() values := fetchRes.Values() size := len(values) ret := make([]*cmodel.RRDData, size) start_ts := fetchRes.Start.Unix() step_s := fetchRes.Step.Seconds() for i, val := range values { ts := start_ts + int64(i+1)*int64(step_s) d := &cmodel.RRDData{ Timestamp: ts, Value: cmodel.JsonFloat(val), } ret[i] = d } return ret, nil }
func makeOneFakeResult(body GraphHistoryParam) *cmodel.GraphQueryResponse { for _, ec := range body.EndpointCounters { if !strings.Contains(ec.Counter, "packet-loss-rate") && !strings.Contains(ec.Counter, "average") { fakeResult := graphQueryOne(ec, body, "", "") for i := range fakeResult.Values { fakeResult.Values[i].Value = cmodel.JsonFloat(0.0) } return fakeResult } } return nil }
func nqmData(body GraphHistoryParam, nqmDataCounter string, rawDataCounter string) []*cmodel.GraphQueryResponse { result := makeOneFakeResult(body) if result == nil { return nil } data := []*cmodel.GraphQueryResponse{} packetSentCount := make([]cmodel.JsonFloat, len(result.Values)) for i := range result.Values { packetSentCount[i] = cmodel.JsonFloat(0.0) } for _, ec := range body.EndpointCounters { if !strings.Contains(ec.Counter, rawDataCounter) { continue } resultRaw := graphQueryOne(ec, body, "", "") if resultRaw == nil { continue } data = append(data, resultRaw) if rawDataCounter == "packets-sent" { counter := strings.Replace(ec.Counter, "packets-sent", "packets-received", 1) if resultAdditional := graphQueryOne(ec, body, "", counter); resultAdditional != nil { data = append(data, resultAdditional) for i := range resultRaw.Values { packetLossCount := (resultRaw.Values[i].Value - resultAdditional.Values[i].Value) result.Values[i].Value += packetLossCount packetSentCount[i] += resultRaw.Values[i].Value } } } else if rawDataCounter == "transmission-time" { counter := strings.Replace(ec.Counter, "transmission-time", "packets-sent", 1) if resultAdditional := graphQueryOne(ec, body, "", counter); resultAdditional != nil { data = append(data, resultAdditional) for i := range resultAdditional.Values { result.Values[i].Value += resultAdditional.Values[i].Value * resultRaw.Values[i].Value packetSentCount[i] += resultAdditional.Values[i].Value } } } } for i := range result.Values { result.Values[i].Value = result.Values[i].Value / packetSentCount[i] } result.Endpoint = "all-endpoints" result.Counter = nqmDataCounter data = append(data, result) return data }
func QueryOne(para cmodel.GraphQueryParam) (resp *cmodel.GraphQueryResponse, err error) { start, end := para.Start, para.End endpoint, counter := para.Endpoint, para.Counter pool, addr, err := selectPool(endpoint, counter) if err != nil { return nil, err } conn, err := pool.Fetch() if err != nil { return nil, err } rpcConn := conn.(spool.RpcClient) if rpcConn.Closed() { pool.ForceClose(conn) return nil, errors.New("conn closed") } type ChResult struct { Err error Resp *cmodel.GraphQueryResponse } ch := make(chan *ChResult, 1) go func() { resp := &cmodel.GraphQueryResponse{} err := rpcConn.Call("Graph.Query", para, resp) ch <- &ChResult{Err: err, Resp: resp} }() select { case <-time.After(time.Duration(g.Config().Graph.CallTimeout) * time.Millisecond): pool.ForceClose(conn) return nil, fmt.Errorf("%s, call timeout. proc: %s", addr, pool.Proc()) case r := <-ch: if r.Err != nil { pool.ForceClose(conn) return r.Resp, fmt.Errorf("%s, call failed, err %v. proc: %s", addr, r.Err, pool.Proc()) } else { pool.Release(conn) if len(r.Resp.Values) < 1 { r.Resp.Values = []*cmodel.RRDData{} return r.Resp, nil } // TODO query不该做这些事情, 说明graph没做好 fixed := []*cmodel.RRDData{} for _, v := range r.Resp.Values { if v == nil || !(v.Timestamp >= start && v.Timestamp <= end) { continue } //FIXME: 查询数据的时候,把所有的负值都过滤掉,因为transfer之前在设置最小值的时候为U if (r.Resp.DsType == "DERIVE" || r.Resp.DsType == "COUNTER") && v.Value < 0 { fixed = append(fixed, &cmodel.RRDData{Timestamp: v.Timestamp, Value: cmodel.JsonFloat(math.NaN())}) } else { fixed = append(fixed, v) } } r.Resp.Values = fixed } return r.Resp, nil } }
func (this *Graph) Query(param cmodel.GraphQueryParam, resp *cmodel.GraphQueryResponse) error { var ( datas []*cmodel.RRDData datas_size int ) // statistics proc.GraphQueryCnt.Incr() cfg := g.Config() // form empty response resp.Values = []*cmodel.RRDData{} resp.Endpoint = param.Endpoint resp.Counter = param.Counter dsType, step, exists := index.GetTypeAndStep(param.Endpoint, param.Counter) // complete dsType and step qstep := step if param.Step != 0 { qstep = param.Step } if !exists { return nil } resp.DsType = dsType resp.Step = qstep start_ts := param.Start - param.Start%int64(step) end_ts := param.End - param.End%int64(step) + int64(step) if end_ts-start_ts-int64(step) < 1 { return nil } md5 := cutils.Md5(param.Endpoint + "/" + param.Counter) key := g.FormRrdCacheKey(md5, dsType, step) filename := g.RrdFileName(cfg.RRD.Storage, md5, dsType, step) // read cached items items, flag := store.GraphItems.FetchAll(key) items_size := len(items) if cfg.Migrate.Enabled && flag&g.GRAPH_F_MISS != 0 { node, _ := rrdtool.Consistent.Get(param.Endpoint + "/" + param.Counter) done := make(chan error, 1) res := &cmodel.GraphAccurateQueryResponse{} rrdtool.Net_task_ch[node] <- &rrdtool.Net_task_t{ Method: rrdtool.NET_TASK_M_QUERY, Done: done, Args: param, Reply: res, } <-done // fetch data from remote datas = res.Values datas_size = len(datas) } else { // read data from rrd file datas, _ = rrdtool.Fetch(filename, param.ConsolFun, start_ts, end_ts, qstep) datas_size = len(datas) } nowTs := time.Now().Unix() lastUpTs := nowTs - nowTs%int64(step) rra1StartTs := lastUpTs - int64(rrdtool.RRA1PointCnt*step) // consolidated, do not merge if start_ts < rra1StartTs { resp.Values = datas goto _RETURN_OK } // no cached items, do not merge if items_size < 1 { resp.Values = datas goto _RETURN_OK } // merge { // fmt cached items var val cmodel.JsonFloat cache := make([]*cmodel.RRDData, 0) ts := items[0].Timestamp itemEndTs := items[items_size-1].Timestamp itemIdx := 0 if dsType == g.DERIVE || dsType == g.COUNTER { for ts < itemEndTs { if itemIdx < items_size-1 && ts == items[itemIdx].Timestamp && ts == items[itemIdx+1].Timestamp-int64(step) { val = cmodel.JsonFloat(items[itemIdx+1].Value-items[itemIdx].Value) / cmodel.JsonFloat(step) if val < 0 { val = cmodel.JsonFloat(math.NaN()) } itemIdx++ } else { // missing val = cmodel.JsonFloat(math.NaN()) } if ts >= start_ts && ts <= end_ts { cache = append(cache, &cmodel.RRDData{Timestamp: ts, Value: val}) } ts = ts + int64(step) } } else if dsType == g.GAUGE { for ts <= itemEndTs { if itemIdx < items_size && ts == items[itemIdx].Timestamp { val = cmodel.JsonFloat(items[itemIdx].Value) itemIdx++ } else { // missing val = cmodel.JsonFloat(math.NaN()) } if ts >= start_ts && ts <= end_ts { cache = append(cache, &cmodel.RRDData{Timestamp: ts, Value: val}) } ts = ts + int64(step) } } cache_size := len(cache) // do merging merged := make([]*cmodel.RRDData, 0) if datas_size > 0 { for _, val := range datas { if val.Timestamp >= start_ts && val.Timestamp <= end_ts { merged = append(merged, val) //rrdtool返回的数据,时间戳是连续的、不会有跳点的情况 } } } if cache_size > 0 { rrdDataSize := len(merged) lastTs := cache[0].Timestamp // find junction rrdDataIdx := 0 for rrdDataIdx = rrdDataSize - 1; rrdDataIdx >= 0; rrdDataIdx-- { if merged[rrdDataIdx].Timestamp < cache[0].Timestamp { lastTs = merged[rrdDataIdx].Timestamp break } } // fix missing for ts := lastTs + int64(step); ts < cache[0].Timestamp; ts += int64(step) { merged = append(merged, &cmodel.RRDData{Timestamp: ts, Value: cmodel.JsonFloat(math.NaN())}) } // merge cached items to result rrdDataIdx += 1 for cacheIdx := 0; cacheIdx < cache_size; cacheIdx++ { if rrdDataIdx < rrdDataSize { if !math.IsNaN(float64(cache[cacheIdx].Value)) { merged[rrdDataIdx] = cache[cacheIdx] } } else { merged = append(merged, cache[cacheIdx]) } rrdDataIdx++ } } mergedSize := len(merged) // fmt result ret_size := int((end_ts - start_ts) / int64(step)) ret := make([]*cmodel.RRDData, ret_size, ret_size) mergedIdx := 0 ts = start_ts for i := 0; i < ret_size; i++ { if mergedIdx < mergedSize && ts == merged[mergedIdx].Timestamp { ret[i] = merged[mergedIdx] mergedIdx++ } else { ret[i] = &cmodel.RRDData{Timestamp: ts, Value: cmodel.JsonFloat(math.NaN())} } ts += int64(step) } resp.Values = ret } _RETURN_OK: // statistics proc.GraphQueryItemCnt.IncrBy(int64(len(resp.Values))) return nil }