예제 #1
0
파일: cop_handler.go 프로젝트: pingcap/tidb
func (h *rpcHandler) handleCopRequest(req *coprocessor.Request) (*coprocessor.Response, error) {
	resp := &coprocessor.Response{}
	if err := h.checkContext(req.GetContext()); err != nil {
		resp.RegionError = err
		return resp, nil
	}
	if len(req.Ranges) == 0 {
		return resp, nil
	}
	if req.GetTp() == kv.ReqTypeSelect || req.GetTp() == kv.ReqTypeIndex {
		sel := new(tipb.SelectRequest)
		err := proto.Unmarshal(req.Data, sel)
		if err != nil {
			return nil, errors.Trace(err)
		}
		ctx := &selectContext{
			sel:       sel,
			keyRanges: req.Ranges,
			sc:        xeval.FlagsToStatementContext(sel.Flags),
		}
		ctx.eval = xeval.NewEvaluator(ctx.sc)
		if sel.Where != nil {
			ctx.whereColumns = make(map[int64]*tipb.ColumnInfo)
			collectColumnsInExpr(sel.Where, ctx, ctx.whereColumns)
		}
		ctx.aggregate = len(sel.Aggregates) > 0 || len(sel.GetGroupBy()) > 0
		if ctx.aggregate {
			// compose aggregateFuncExpr
			ctx.aggregates = make([]*aggregateFuncExpr, 0, len(sel.Aggregates))
			ctx.aggColumns = make(map[int64]*tipb.ColumnInfo)
			for _, agg := range sel.Aggregates {
				aggExpr := &aggregateFuncExpr{expr: agg}
				ctx.aggregates = append(ctx.aggregates, aggExpr)
				collectColumnsInExpr(agg, ctx, ctx.aggColumns)
			}
			ctx.groups = make(map[string]bool)
			ctx.groupKeys = make([][]byte, 0)
			for _, item := range ctx.sel.GetGroupBy() {
				collectColumnsInExpr(item.Expr, ctx, ctx.aggColumns)
			}
			for k := range ctx.whereColumns {
				// It is will be handled in where.
				delete(ctx.aggColumns, k)
			}
		}

		var chunks []tipb.Chunk
		if req.GetTp() == kv.ReqTypeSelect {
			chunks, err = h.getChunksFromSelectReq(ctx)
		} else {
			// The PKHandle column info has been collected in ctx, so we can remove it in IndexInfo.
			length := len(sel.IndexInfo.Columns)
			if sel.IndexInfo.Columns[length-1].GetPkHandle() {
				sel.IndexInfo.Columns = sel.IndexInfo.Columns[:length-1]
			}
			chunks, err = h.getChunksFromIndexReq(ctx)
		}

		selResp := new(tipb.SelectResponse)
		selResp.Error = toPBError(err)
		selResp.Chunks = chunks
		if err != nil {
			if locked, ok := errors.Cause(err).(*ErrLocked); ok {
				resp.Locked = &kvrpcpb.LockInfo{
					Key:         locked.Key,
					PrimaryLock: locked.Primary,
					LockVersion: locked.StartTS,
					LockTtl:     locked.TTL,
				}
			} else {
				resp.OtherError = err.Error()
			}
		}
		data, err := proto.Marshal(selResp)
		if err != nil {
			return nil, errors.Trace(err)
		}
		resp.Data = data
	}
	return resp, nil
}
예제 #2
0
func (rs *localRegion) Handle(req *regionRequest) (*regionResponse, error) {
	resp := &regionResponse{
		req: req,
	}
	if req.Tp == kv.ReqTypeSelect || req.Tp == kv.ReqTypeIndex {
		sel := new(tipb.SelectRequest)
		err := proto.Unmarshal(req.data, sel)
		if err != nil {
			return nil, errors.Trace(err)
		}
		txn := newTxn(rs.store, kv.Version{Ver: uint64(sel.StartTs)})
		ctx := &selectContext{
			sel:       sel,
			txn:       txn,
			keyRanges: req.ranges,
			sc:        xeval.FlagsToStatementContext(sel.Flags),
		}
		ctx.eval = xeval.NewEvaluator(ctx.sc)
		if sel.Where != nil {
			ctx.whereColumns = make(map[int64]*tipb.ColumnInfo)
			collectColumnsInExpr(sel.Where, ctx, ctx.whereColumns)
		}
		if len(sel.OrderBy) > 0 {
			if sel.OrderBy[0].Expr == nil {
				ctx.descScan = sel.OrderBy[0].Desc
			} else {
				if sel.Limit == nil {
					return nil, errors.New("we don't support pushing down Sort without Limit")
				}
				ctx.topn = true
				ctx.topnHeap = &topnHeap{
					totalCount: int(*sel.Limit),
					topnSorter: topnSorter{
						orderByItems: sel.OrderBy,
						ctx:          ctx,
					},
				}
				ctx.topnColumns = make(map[int64]*tipb.ColumnInfo)
				for _, item := range sel.OrderBy {
					collectColumnsInExpr(item.Expr, ctx, ctx.topnColumns)
				}
				for k := range ctx.whereColumns {
					// It will be handled in where.
					delete(ctx.topnColumns, k)
				}
			}
		}
		ctx.aggregate = len(sel.Aggregates) > 0 || len(sel.GetGroupBy()) > 0
		if ctx.aggregate {
			// compose aggregateFuncExpr
			ctx.aggregates = make([]*aggregateFuncExpr, 0, len(sel.Aggregates))
			ctx.aggColumns = make(map[int64]*tipb.ColumnInfo)
			for _, agg := range sel.Aggregates {
				aggExpr := &aggregateFuncExpr{expr: agg}
				ctx.aggregates = append(ctx.aggregates, aggExpr)
				collectColumnsInExpr(agg, ctx, ctx.aggColumns)
			}
			ctx.groups = make(map[string]bool)
			ctx.groupKeys = make([][]byte, 0)
			for _, item := range ctx.sel.GetGroupBy() {
				collectColumnsInExpr(item.Expr, ctx, ctx.aggColumns)
			}
			for k := range ctx.whereColumns {
				// It will be handled in where.
				delete(ctx.aggColumns, k)
			}
		}
		if req.Tp == kv.ReqTypeSelect {
			err = rs.getRowsFromSelectReq(ctx)
		} else {
			// The PKHandle column info has been collected in ctx, so we can remove it in IndexInfo.
			length := len(sel.IndexInfo.Columns)
			if sel.IndexInfo.Columns[length-1].GetPkHandle() {
				sel.IndexInfo.Columns = sel.IndexInfo.Columns[:length-1]
			}
			err = rs.getRowsFromIndexReq(ctx)
		}
		if ctx.topn {
			rs.setTopNDataForCtx(ctx)
		}
		selResp := new(tipb.SelectResponse)
		selResp.Error = toPBError(err)
		selResp.Chunks = ctx.chunks
		resp.err = err
		data, err := proto.Marshal(selResp)
		if err != nil {
			return nil, errors.Trace(err)
		}
		resp.data = data
	}
	if bytes.Compare(rs.startKey, req.startKey) < 0 || bytes.Compare(rs.endKey, req.endKey) > 0 {
		resp.newStartKey = rs.startKey
		resp.newEndKey = rs.endKey
	}
	return resp, nil
}