func (p *ConnPool) Get() (conn io.Closer, err error) { if slowLogEnabled { start_t := time.Now() defer func() { end_t := time.Now() diff := float64(end_t.UnixNano()-start_t.UnixNano()) / 1000000 if diff >= float64(slowLogLimit) { logger.Debug("%s get conn from pool cost too much, duration: %f ms, pool: %+v", LOG_TAG, diff, p) } }() } p.Lock() if p.conns >= p.MaxConns && len(p.free) == 0 { p.Unlock() return nil, ErrMaxConn } new_conn := false if len(p.free) > 0 { // return the first free connection in the pool conn = p.free[0] p.free = p.free[1:] } else { conn, err = p.New() if err != nil { p.Unlock() return nil, err } new_conn = true } p.Unlock() err = p.Ping(conn) if err != nil { p.Lock() logger.Error("%s ping conn fail: %v, pool: %+v", LOG_TAG, err, p) if !new_conn && p.conns > 0 { p.conns -= 1 } p.Unlock() conn.Close() return nil, err } if new_conn { p.Lock() p.conns += 1 logger.Trace("%s open new conn: %v, pool: %+v", LOG_TAG, conn, p) p.Unlock() } else { logger.Trace("%s get existent conn: %v, pool: %+v", LOG_TAG, conn, p) } return conn, nil }
func (p *ConnPool) Release(conn io.Closer) error { p.Lock() if len(p.free) >= p.MaxIdle { logger.Trace("%s auto close conn: %v, pool: %+v", LOG_TAG, conn, p) if conn != nil { conn.Close() } p.conns -= 1 } else { p.free = append(p.free, conn) } logger.Trace("%s release conn: %v, pool: %+v", LOG_TAG, conn, p) p.Unlock() return nil }
func (p *ConnPool) Destroy() { p.Lock() defer p.Unlock() for _, conn := range p.free { if conn != nil { logger.Trace("%s destroy conn: %v, pool: %+v", LOG_TAG, conn, p) conn.Close() } } p = nil }
func (p *ConnPool) CloseClean(conn io.Closer) error { if conn != nil { conn.Close() } p.Lock() if p.conns > 0 { p.conns -= 1 } logger.Trace("%s close_clean conn: %v, pool: %+v", LOG_TAG, conn, p) p.Unlock() return nil }
func initConnPools() error { cfg := g.Config() if cfg.LogLevel == "trace" || cfg.LogLevel == "debug" { conn_pool.EnableSlowLog(true, cfg.SlowLog) } var ( tmp_addrs map[string][]string tmp_pools map[string]*conn_pool.ConnPool ) backend.RLock() tmp_addrs = backend.Addrs tmp_pools = backend.Pools backend.RUnlock() c := cfg.Graph for name, addr_list := range tmp_addrs { for _, addr := range addr_list { if _, ok := tmp_pools[addr]; !ok { pool := conn_pool.NewConnPool(addr, c.MaxConns, c.MaxIdle) pool.New = func() (io.Closer, error) { _, err := net.ResolveTCPAddr("tcp", pool.Name) if err != nil { return nil, err } conn, err := net.DialTimeout("tcp", pool.Name, time.Duration(c.Timeout)*time.Millisecond) if err != nil { return nil, err } return RpcConn{rpc.NewClient(conn)}, nil } pool.Ping = func(conn io.Closer) error { rpc_conn := conn.(RpcConn) if rpc_conn.cli == nil { return errors.New("nil conn") } resp := &model.SimpleRpcResponse{} err := rpc_conn.cli.Call("Graph.Ping", model.NullRpcRequest{}, resp) logger.Trace("Graph.Ping resp: %v", resp) return err } tmp_pools[addr] = pool logger.Info("create the pool: %s %s", name, addr) } else { logger.Trace("keep the pool: %s %s", name, addr) } } } backend.Lock() defer backend.Unlock() backend.Pools = tmp_pools return nil }
func Info(endpoint, counter string) (r *model.GraphFullyInfo, err error) { pool, err := selectPool(endpoint, counter) if err != nil { return nil, err } conn, err := pool.Get() if err != nil { return nil, err } rpc_conn := conn.(RpcConn) if rpc_conn.cli == nil { pool.CloseClean(conn) return nil, errors.New("nil rpc conn") } type ChResult struct { Err error Resp *model.GraphInfoResp } ch := make(chan *ChResult, 1) go func() { param := model.GraphInfoParam{ Endpoint: endpoint, Counter: counter, } resp := &model.GraphInfoResp{} err := rpc_conn.cli.Call("Graph.Info", param, resp) r := &ChResult{ Err: err, Resp: resp, } ch <- r }() cfg := g.Config().Graph select { case r := <-ch: if r.Err != nil { pool.CloseClean(conn) return nil, r.Err } else { pool.Release(conn) logger.Trace("graph.info resp: %v, addr: %v", r.Resp, pool.Name) fullyInfo := model.GraphFullyInfo{ Endpoint: endpoint, Counter: counter, ConsolFun: r.Resp.ConsolFun, Step: r.Resp.Step, Filename: r.Resp.Filename, Addr: pool.Name, } return &fullyInfo, nil } case <-time.After(time.Duration(cfg.Timeout) * time.Millisecond): pool.Release(conn) logger.Trace("graph.info timeout err: i/o timeout, addr: %v", pool.Name) return nil, errors.New("i/o timeout") } }
func QueryOne(start, end int64, cf, endpoint, counter string) (r *model.GraphQueryResponse, err error) { pool, err := selectPool(endpoint, counter) if err != nil { return nil, err } conn, err := pool.Get() if err != nil { return nil, err } rpc_conn := conn.(RpcConn) if rpc_conn.cli == nil { pool.CloseClean(conn) return nil, errors.New("nil rpc conn") } type ChResult struct { Err error Resp *model.GraphQueryResponse } ch := make(chan *ChResult, 1) go func() { param := model.GraphQueryParam{ Start: start, End: end, ConsolFun: cf, Endpoint: endpoint, Counter: counter, } resp := &model.GraphQueryResponse{} err := rpc_conn.cli.Call("Graph.Query", param, resp) r := &ChResult{ Err: err, Resp: resp, } ch <- r }() cfg := g.Config().Graph select { case r := <-ch: if r.Err != nil { pool.CloseClean(conn) return nil, r.Err } else { pool.Release(conn) logger.Trace("graph: query graph resp: %v, addr: %v", r.Resp, pool.Name) fixedResp := &model.GraphQueryResponse{ Endpoint: r.Resp.Endpoint, Counter: r.Resp.Counter, DsType: r.Resp.DsType, Step: r.Resp.Step, } size := len(r.Resp.Values) //NOTICE:最后一个点是坏点,过滤点,可能是rrdtool的bug if size < 1 { return fixedResp, nil } else { dsType := r.Resp.DsType fixedValues := []*model.RRDData{} for _, v := range r.Resp.Values[0:size] { if v == nil { continue } if v.Timestamp < start || v.Timestamp > end { continue } //FIXME: 查询数据的时候,把所有的负值都过滤掉,因为transfer之前在设置最小值的时候为U if (dsType == "DERIVE" || dsType == "COUNTER") && v.Value < 0 { fixedValues = append(fixedValues, &model.RRDData{ Timestamp: v.Timestamp, Value: model.JsonFloat(math.NaN()), }) } else { fixedValues = append(fixedValues, v) } } fixedResp.Values = fixedValues return fixedResp, nil } } case <-time.After(time.Duration(cfg.Timeout) * time.Millisecond): pool.Release(conn) logger.Trace("query graph timeout err: i/o timeout, addr: %v", pool.Name) return nil, errors.New("i/o timeout") } }
func configGraphRoutes() { // method:post http.HandleFunc("/graph/history", func(w http.ResponseWriter, r *http.Request) { var body GraphHistoryParam decoder := json.NewDecoder(r.Body) err := decoder.Decode(&body) if err != nil { StdRender(w, "", err) return } if len(body.EndpointCounters) == 0 { StdRender(w, "", errors.New("empty_payload")) return } data := []*model.GraphQueryResponse{} for _, ec := range body.EndpointCounters { result, err := graph.QueryOne(int64(body.Start), int64(body.End), body.CF, ec.Endpoint, ec.Counter) if err != nil { logger.Error("query one fail: %v", err) } data = append(data, result) } StdRender(w, data, nil) }) // method:get http.HandleFunc("/graph/history/one", func(w http.ResponseWriter, r *http.Request) { start := r.FormValue("start") end := r.FormValue("end") cf := r.FormValue("cf") endpoint := r.FormValue("endpoint") counter := r.FormValue("counter") if endpoint == "" || counter == "" { StdRender(w, "", errors.New("empty_endpoint_counter")) return } if cf != "AVERAGE" && cf != "MAX" && cf != "MIN" { StdRender(w, "", errors.New("invalid_cf")) return } now := time.Now() start_i64, err := strconv.ParseInt(start, 10, 64) if err != nil { start_i64 = now.Unix() - 3600 } end_i64, err := strconv.ParseInt(end, 10, 64) if err != nil { end_i64 = now.Unix() } result, err := graph.QueryOne(start_i64, end_i64, cf, endpoint, counter) logger.Trace("query one result: %v, err: %v", result, err) if err != nil { StdRender(w, "", err) return } StdRender(w, result, nil) }) // get, info http.HandleFunc("/graph/info/one", func(w http.ResponseWriter, r *http.Request) { endpoint := r.FormValue("endpoint") counter := r.FormValue("counter") if endpoint == "" || counter == "" { StdRender(w, "", errors.New("empty_endpoint_counter")) return } result, err := graph.Info(endpoint, counter) logger.Trace("graph.info result: %v, err: %v", result, err) if err != nil { StdRender(w, "", err) return } StdRender(w, result, nil) }) // post, info http.HandleFunc("/graph/info", func(w http.ResponseWriter, r *http.Request) { var body []*model.GraphInfoParam decoder := json.NewDecoder(r.Body) err := decoder.Decode(&body) if err != nil { StdRender(w, "", err) return } if len(body) == 0 { StdRender(w, "", errors.New("empty_payload")) return } data := []*model.GraphFullyInfo{} for _, param := range body { info, err := graph.Info(param.Endpoint, param.Counter) if err != nil { logger.Trace("graph.info fail, resp: %v, err: %v", info, err) } else { logger.Trace("graph.info result: %v, err: %v", info, err) } data = append(data, info) } StdRender(w, data, nil) }) // post, last http.HandleFunc("/graph/last", func(w http.ResponseWriter, r *http.Request) { var body []*model.GraphLastParam decoder := json.NewDecoder(r.Body) err := decoder.Decode(&body) if err != nil { StdRender(w, "", err) return } if len(body) == 0 { StdRender(w, "", errors.New("empty_payload")) return } data := []*model.GraphLastResp{} for _, param := range body { last, err := graph.Last(param.Endpoint, param.Counter) if err != nil { logger.Trace("graph.last fail, resp: %v, err: %v", last, err) continue } data = append(data, last) } StdRender(w, data, nil) }) }