Example #1
0
// HashCode implements Expression interface.
func (sf *ScalarFunction) HashCode() []byte {
	var bytes []byte
	v := make([]types.Datum, 0, len(sf.Args)+1)
	bytes, _ = codec.EncodeValue(bytes, types.NewStringDatum(sf.FuncName.L))
	v = append(v, types.NewBytesDatum(bytes))
	for _, arg := range sf.Args {
		v = append(v, types.NewBytesDatum(arg.HashCode()))
	}
	bytes = bytes[:0]
	bytes, _ = codec.EncodeValue(bytes, v...)
	return bytes
}
Example #2
0
func (h *rpcHandler) getRowByHandle(ctx *selectContext, handle int64) (*tipb.Row, error) {
	tid := ctx.sel.TableInfo.GetTableId()
	columns := ctx.sel.TableInfo.Columns
	row := new(tipb.Row)
	var d types.Datum
	d.SetInt64(handle)
	var err error
	row.Handle, err = codec.EncodeValue(nil, d)
	if err != nil {
		return nil, errors.Trace(err)
	}
	for _, col := range columns {
		if col.GetPkHandle() {
			if mysql.HasUnsignedFlag(uint(col.GetFlag())) {
				row.Data, err = codec.EncodeValue(row.Data, types.NewUintDatum(uint64(handle)))
				if err != nil {
					return nil, errors.Trace(err)
				}
			} else {
				row.Data = append(row.Data, row.Handle...)
			}
		} else {
			colID := col.GetColumnId()
			if ctx.whereColumns[colID] != nil {
				// The column is saved in evaluator, use it directly.
				datum := ctx.eval.Row[colID]
				row.Data, err = codec.EncodeValue(row.Data, datum)
				if err != nil {
					return nil, errors.Trace(err)
				}
			} else {
				key := tablecodec.EncodeColumnKey(tid, handle, colID)
				data, err1 := h.mvccStore.Get(key, ctx.sel.GetStartTs())
				if err1 != nil {
					return nil, errors.Trace(err1)
				}
				if data == nil {
					if mysql.HasNotNullFlag(uint(col.GetFlag())) {
						return nil, errors.Trace(kv.ErrNotExist)
					}
					row.Data = append(row.Data, codec.NilFlag)
				} else {
					row.Data = append(row.Data, data...)
				}
			}
		}
	}
	return row, nil
}
Example #3
0
func (t *Table) addUpdateBinlog(ctx context.Context, h int64, old []types.Datum, newValue []byte, colIDs []int64) error {
	mutation := t.getMutation(ctx)
	hasPK := false
	if t.meta.PKIsHandle {
		hasPK = true
	} else {
		for _, idx := range t.meta.Indices {
			if idx.Primary {
				hasPK = true
				break
			}
		}
	}
	var bin []byte
	if hasPK {
		handleData, _ := codec.EncodeValue(nil, types.NewIntDatum(h))
		bin = append(handleData, newValue...)
	} else {
		oldData, err := tablecodec.EncodeRow(old, colIDs)
		if err != nil {
			return errors.Trace(err)
		}
		bin = append(oldData, newValue...)
	}
	mutation.UpdatedRows = append(mutation.UpdatedRows, bin)
	mutation.Sequence = append(mutation.Sequence, binlog.MutationType_Update)
	return nil
}
Example #4
0
// handleRowData deals with raw row data:
//	1. Decodes row from raw byte slice.
//	2. Checks if it fit where condition.
//	3. Update aggregate functions.
// returns true if got a row.
func (rs *localRegion) handleRowData(ctx *selectContext, handle int64, value []byte) (bool, error) {
	columns := ctx.sel.TableInfo.Columns
	values, err := rs.getRowData(value, ctx.colTps)
	if err != nil {
		return false, errors.Trace(err)
	}
	// Fill handle and null columns.
	for _, col := range columns {
		if col.GetPkHandle() {
			var handleDatum types.Datum
			if mysql.HasUnsignedFlag(uint(col.Flag)) {
				// PK column is Unsigned
				handleDatum = types.NewUintDatum(uint64(handle))
			} else {
				handleDatum = types.NewIntDatum(handle)
			}
			handleData, err1 := codec.EncodeValue(nil, handleDatum)
			if err1 != nil {
				return false, errors.Trace(err1)
			}
			values[col.GetColumnId()] = handleData
		} else {
			_, ok := values[col.GetColumnId()]
			if !ok {
				if mysql.HasNotNullFlag(uint(col.GetFlag())) {
					return false, errors.New("Miss column")
				}
				values[col.GetColumnId()] = []byte{codec.NilFlag}
			}
		}
	}
	return rs.valuesToRow(ctx, handle, values)
}
Example #5
0
// meetNewGroup returns a value that represents if the new group is different from last group.
func (e *StreamAggExec) meetNewGroup(row *Row) (bool, error) {
	if len(e.GroupByItems) == 0 {
		return false, nil
	}
	e.tmpGroupKey = e.tmpGroupKey[:0]
	matched, firstGroup := true, false
	if len(e.curGroupKey) == 0 {
		matched, firstGroup = false, true
	}
	sc := e.Ctx.GetSessionVars().StmtCtx
	for i, item := range e.GroupByItems {
		v, err := item.Eval(row.Data, e.Ctx)
		if err != nil {
			return false, errors.Trace(err)
		}
		if matched {
			c, err := v.CompareDatum(sc, e.curGroupKey[i])
			if err != nil {
				return false, errors.Trace(err)
			}
			matched = c == 0
		}
		e.tmpGroupKey = append(e.tmpGroupKey, v)
	}
	if matched {
		return false, nil
	}
	e.curGroupKey = e.tmpGroupKey
	var err error
	e.curGroupEncodedKey, err = codec.EncodeValue(e.curGroupEncodedKey[0:0:cap(e.curGroupEncodedKey)], e.curGroupKey...)
	if err != nil {
		return false, errors.Trace(err)
	}
	return !firstGroup, nil
}
Example #6
0
// HashCode implements Expression interface.
func (col *Column) HashCode() []byte {
	if len(col.hashcode) != 0 {
		return col.hashcode
	}
	col.hashcode, _ = codec.EncodeValue(col.hashcode, types.NewStringDatum(col.FromID), types.NewIntDatum(int64(col.Position)))
	return col.hashcode
}
Example #7
0
func (ps *perfSchema) initRecords(tbName string, records [][]interface{}) error {
	lastLsn := atomic.AddUint64(ps.lsns[tbName], uint64(len(records)))

	batch := pool.Get().(*leveldb.Batch)
	defer func() {
		batch.Reset()
		pool.Put(batch)
	}()

	for i, rec := range records {
		lsn := lastLsn - uint64(len(records)) + uint64(i)
		rawKey := []interface{}{uint64(lsn)}
		key, err := codec.EncodeKey(nil, rawKey...)
		if err != nil {
			return errors.Trace(err)
		}
		val, err := codec.EncodeValue(nil, rec...)
		if err != nil {
			return errors.Trace(err)
		}
		batch.Put(key, val)
	}

	err := ps.stores[tbName].Write(batch, nil)
	return errors.Trace(err)
}
Example #8
0
/*
 * Convert aggregate partial result to rows.
 * Data layout example:
 *	SQL:	select count(c1), sum(c2), avg(c3) from t;
 *	Aggs:	count(c1), sum(c2), avg(c3)
 *	Rows:	groupKey1, count1, value2, count3, value3
 *		groupKey2, count1, value2, count3, value3
 */
func (rs *localRegion) getRowsFromAgg(ctx *selectContext) error {
	for _, gk := range ctx.groupKeys {
		chunk := rs.getChunk(ctx)
		// Each aggregate partial result will be converted to one or two datums.
		rowData := make([]types.Datum, 0, 1+2*len(ctx.aggregates))
		// The first column is group key.
		rowData = append(rowData, types.NewBytesDatum(gk))
		for _, agg := range ctx.aggregates {
			agg.currentGroup = gk
			ds, err := agg.toDatums()
			if err != nil {
				return errors.Trace(err)
			}
			rowData = append(rowData, ds...)
		}
		var err error
		beforeLen := len(chunk.RowsData)
		chunk.RowsData, err = codec.EncodeValue(chunk.RowsData, rowData...)
		if err != nil {
			return errors.Trace(err)
		}
		var rowMeta tipb.RowMeta
		rowMeta.Length = int64(len(chunk.RowsData) - beforeLen)
		chunk.RowsMeta = append(chunk.RowsMeta, rowMeta)
	}
	return nil
}
Example #9
0
func (e *HashAggExec) getGroupKey(row *Row) ([]byte, error) {
	if e.aggType == plan.FinalAgg {
		val, err := e.GroupByItems[0].Eval(row.Data, e.ctx)
		if err != nil {
			return nil, errors.Trace(err)
		}
		return val.GetBytes(), nil
	}
	if !e.hasGby {
		return []byte{}, nil
	}
	vals := make([]types.Datum, 0, len(e.GroupByItems))
	for _, item := range e.GroupByItems {
		v, err := item.Eval(row.Data, e.ctx)
		if err != nil {
			return nil, errors.Trace(err)
		}
		vals = append(vals, v)
	}
	bs, err := codec.EncodeValue([]byte{}, vals...)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return bs, nil
}
Example #10
0
func (h *rpcHandler) getRowsFromAgg(ctx *selectContext) ([]*tipb.Row, error) {
	rows := make([]*tipb.Row, 0, len(ctx.groupKeys))
	for _, gk := range ctx.groupKeys {
		row := new(tipb.Row)
		// Each aggregate partial result will be converted to one or two datums.
		rowData := make([]types.Datum, 0, 1+2*len(ctx.aggregates))
		// The first column is group key.
		rowData = append(rowData, types.NewBytesDatum(gk))
		for _, agg := range ctx.aggregates {
			agg.currentGroup = gk
			ds, err := agg.toDatums()
			if err != nil {
				return nil, errors.Trace(err)
			}
			rowData = append(rowData, ds...)
		}
		var err error
		row.Data, err = codec.EncodeValue(nil, rowData...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		rows = append(rows, row)
	}
	return rows, nil
}
Example #11
0
func encodeColumnKV(tid, handle, cid int64, value types.Datum) (kv.Key, []byte, error) {
	key := tablecodec.EncodeColumnKey(tid, handle, cid)
	val, err := codec.EncodeValue(nil, value)
	if err != nil {
		return nil, nil, errors.Trace(err)
	}
	return key, val, nil
}
Example #12
0
// EncodeValue implements table.Table EncodeValue interface.
func (t *Table) EncodeValue(raw interface{}) ([]byte, error) {
	v, err := t.flatten(raw)
	if err != nil {
		return nil, errors.Trace(err)
	}
	b, err := codec.EncodeValue(nil, v)
	return b, errors.Trace(err)
}
Example #13
0
// EncodeValue encodes a go value to bytes.
func EncodeValue(raw types.Datum) ([]byte, error) {
	v, err := flatten(raw)
	if err != nil {
		return nil, errors.Trace(err)
	}
	b, err := codec.EncodeValue(nil, v)
	return b, errors.Trace(err)
}
Example #14
0
// getHashKey encodes a requiredProperty to a unique hash code.
func (p *requiredProperty) getHashKey() ([]byte, error) {
	datums := make([]types.Datum, 0, len(p.props)*3+1)
	datums = append(datums, types.NewDatum(p.sortKeyLen))
	for _, c := range p.props {
		datums = append(datums, types.NewDatum(c.desc), types.NewDatum(c.col.FromID), types.NewDatum(c.col.Index))
	}
	bytes, err := codec.EncodeValue(nil, datums...)
	return bytes, errors.Trace(err)
}
Example #15
0
func inExpr(target interface{}, list ...interface{}) *tipb.Expr {
	targetDatum := types.NewDatum(target)
	var listDatums []types.Datum
	for _, v := range list {
		listDatums = append(listDatums, types.NewDatum(v))
	}
	types.SortDatums(listDatums)
	targetExpr := datumExpr(targetDatum)
	val, _ := codec.EncodeValue(nil, listDatums...)
	listExpr := &tipb.Expr{Tp: tipb.ExprType_ValueList, Val: val}
	return &tipb.Expr{Tp: tipb.ExprType_In, Children: []*tipb.Expr{targetExpr, listExpr}}
}
Example #16
0
func indexRangesToPBRanges(ranges []*plan.IndexRange) ([]*tipb.KeyRange, error) {
	keyRanges := make([]*tipb.KeyRange, 0, len(ranges))
	for _, ran := range ranges {
		low, err := codec.EncodeValue(nil, ran.LowVal...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if ran.LowExclude {
			low = []byte(kv.Key(low).PartialNext())
		}
		high, err := codec.EncodeValue(nil, ran.HighVal...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !ran.HighExclude {
			high = []byte(kv.Key(high).PartialNext())
		}
		keyRanges = append(keyRanges, &tipb.KeyRange{Low: low, High: high})
	}
	return keyRanges, nil
}
Example #17
0
func (s *testKeySuite) TestPartialNext(c *C) {
	// keyA represents a multi column index.
	keyA, err := codec.EncodeValue(nil, types.NewDatum("abc"), types.NewDatum("def"))
	c.Check(err, IsNil)
	keyB, err := codec.EncodeValue(nil, types.NewDatum("abca"), types.NewDatum("def"))

	// We only use first column value to seek.
	seekKey, err := codec.EncodeValue(nil, types.NewDatum("abc"))
	c.Check(err, IsNil)

	nextKey := Key(seekKey).Next()
	cmp := bytes.Compare(nextKey, keyA)
	c.Assert(cmp, Equals, -1)

	// Use next partial key, we can skip all index keys with first column value equal to "abc".
	nextPartialKey := Key(seekKey).PartialNext()
	cmp = bytes.Compare(nextPartialKey, keyA)
	c.Assert(cmp, Equals, 1)

	cmp = bytes.Compare(nextPartialKey, keyB)
	c.Assert(cmp, Equals, -1)
}
Example #18
0
// Check checks if values is distinct.
func (d *Checker) Check(values []interface{}) (bool, error) {
	bs, err := codec.EncodeValue([]byte{}, values...)
	if err != nil {
		return false, errors.Trace(err)
	}
	key := string(bs)
	_, ok := d.existingKeys[key]
	if ok {
		return false, nil
	}
	d.existingKeys[key] = true
	return true, nil
}
Example #19
0
func (e *HashJoinExec) getHashKey(exprs []*expression.Column, row *Row) ([]byte, error) {
	vals := make([]types.Datum, 0, len(exprs))
	for _, expr := range exprs {
		v, err := expr.Eval(row.Data, e.ctx)
		if err != nil {
			return nil, errors.Trace(err)
		}
		vals = append(vals, v)
	}
	if len(vals) == 0 {
		return []byte{}, nil
	}

	return codec.EncodeValue([]byte{}, vals...)
}
Example #20
0
// EncodeRow encode row data and column ids into a slice of byte.
// Row layout: colID1, value1, colID2, value2, .....
func EncodeRow(row []types.Datum, colIDs []int64) ([]byte, error) {
	if len(row) != len(colIDs) {
		return nil, errors.Errorf("EncodeRow error: data and columnID count not match %d vs %d", len(row), len(colIDs))
	}
	values := make([]types.Datum, 2*len(row))
	for i, c := range row {
		id := colIDs[i]
		idv := types.NewIntDatum(id)
		values[2*i] = idv
		fc, err := flatten(c)
		if err != nil {
			return nil, errors.Trace(err)
		}
		values[2*i+1] = fc
	}
	return codec.EncodeValue(nil, values...)
}
Example #21
0
func (e *AggregateExec) getGroupKey() ([]byte, error) {
	if len(e.GroupByItems) == 0 {
		return singleGroup, nil
	}
	vals := make([]types.Datum, 0, len(e.GroupByItems))
	for _, item := range e.GroupByItems {
		v, err := evaluator.Eval(e.ctx, item.Expr)
		if err != nil {
			return nil, errors.Trace(err)
		}
		vals = append(vals, v)
	}
	bs, err := codec.EncodeValue([]byte{}, vals...)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return bs, nil
}
Example #22
0
func (e *AggregationExec) getGroupKey(row *Row) ([]byte, error) {
	if len(e.GroupByItems) == 0 {
		return []byte{}, nil
	}
	vals := make([]types.Datum, 0, len(e.GroupByItems))
	for _, item := range e.GroupByItems {
		v, err := item.Eval(row.Data, e.ctx)
		if err != nil {
			return nil, errors.Trace(err)
		}
		vals = append(vals, v)
	}
	bs, err := codec.EncodeValue([]byte{}, vals...)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return bs, nil
}
Example #23
0
func (rs *localRegion) getGroupKey(ctx *selectContext) ([]byte, error) {
	items := ctx.sel.GetGroupBy()
	if len(items) == 0 {
		return singleGroup, nil
	}
	vals := make([]types.Datum, 0, len(items))
	for _, item := range items {
		v, err := ctx.eval.Eval(item.Expr)
		if err != nil {
			return nil, errors.Trace(err)
		}
		vals = append(vals, v)
	}
	bs, err := codec.EncodeValue(nil, vals...)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return bs, nil
}
Example #24
0
// ToPB converts Table to TablePB.
func (t *Table) ToPB() (*TablePB, error) {
	tblPB := &TablePB{
		Id:      proto.Int64(t.info.ID),
		Ts:      proto.Int64(t.TS),
		Count:   proto.Int64(t.Count),
		Columns: make([]*ColumnPB, len(t.Columns)),
	}
	for i, col := range t.Columns {
		data, err := codec.EncodeValue(nil, col.Values...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		tblPB.Columns[i] = &ColumnPB{
			Id:      proto.Int64(col.ID),
			Ndv:     proto.Int64(col.NDV),
			Numbers: col.Numbers,
			Value:   data,
			Repeats: col.Repeats,
		}
	}
	return tblPB, nil
}
Example #25
0
// getHashKey gets the hash key when given a row and hash columns.
// It will return a boolean value representing if the hash key has null, a byte slice representing the result hash code.
func getHashKey(cols []*expression.Column, row *Row, targetTypes []*types.FieldType, vals []types.Datum, bytes []byte) (bool, []byte, error) {
	var err error
	for i, col := range cols {
		vals[i], err = col.Eval(row.Data, nil)
		if err != nil {
			return false, nil, errors.Trace(err)
		}
		if vals[i].IsNull() {
			return true, nil, nil
		}
		if targetTypes[i].Tp != col.RetType.Tp {
			vals[i], err = vals[i].ConvertTo(targetTypes[i])
			if err != nil {
				return false, nil, errors.Trace(err)
			}
		}
	}
	if len(vals) == 0 {
		return false, nil, nil
	}
	bytes, err = codec.EncodeValue(bytes, vals...)
	return false, bytes, errors.Trace(err)
}
Example #26
0
func (pc pbConverter) datumsToValueList(datums []types.Datum) *tipb.Expr {
	// Don't push value list that has different datum kind.
	prevKind := types.KindNull
	for _, d := range datums {
		if prevKind == types.KindNull {
			prevKind = d.Kind()
		}
		if !d.IsNull() && d.Kind() != prevKind {
			return nil
		}
	}
	err := types.SortDatums(pc.sc, datums)
	if err != nil {
		log.Error(err.Error())
		return nil
	}
	val, err := codec.EncodeValue(nil, datums...)
	if err != nil {
		log.Error(err.Error())
		return nil
	}
	return &tipb.Expr{Tp: tipb.ExprType_ValueList, Val: val}
}
Example #27
0
func (b *executorBuilder) datumsToValueList(datums []types.Datum) *tipb.Expr {
	// Don't push value list that has different datum kind.
	prevKind := types.KindNull
	for _, d := range datums {
		if prevKind == types.KindNull {
			prevKind = d.Kind()
		}
		if !d.IsNull() && d.Kind() != prevKind {
			return nil
		}
	}
	err := types.SortDatums(datums)
	if err != nil {
		b.err = errors.Trace(err)
		return nil
	}
	val, err := codec.EncodeValue(nil, datums...)
	if err != nil {
		b.err = errors.Trace(err)
		return nil
	}
	return &tipb.Expr{Tp: tipb.ExprType_ValueList.Enum(), Val: val}
}
Example #28
0
func (h *rpcHandler) valuesToRow(ctx *selectContext, handle int64, values map[int64][]byte) (*tipb.Row, error) {
	var columns []*tipb.ColumnInfo
	if ctx.sel.TableInfo != nil {
		columns = ctx.sel.TableInfo.Columns
	} else {
		columns = ctx.sel.IndexInfo.Columns
	}
	// Evaluate where
	match, err := h.evalWhereForRow(ctx, handle, values)
	if err != nil {
		return nil, errors.Trace(err)
	}
	if !match {
		return nil, nil
	}
	var row *tipb.Row
	if ctx.aggregate {
		// Update aggregate functions.
		err = h.aggregate(ctx, handle, values)
		if err != nil {
			return nil, errors.Trace(err)
		}
	} else {
		var handleData []byte
		handleData, err = codec.EncodeValue(nil, types.NewIntDatum(handle))
		if err != nil {
			return nil, errors.Trace(err)
		}
		row = new(tipb.Row)
		row.Handle = handleData
		// If without aggregate functions, just return raw row data.
		for _, col := range columns {
			row.Data = append(row.Data, values[col.GetColumnId()]...)
		}
	}
	return row, nil
}
Example #29
0
// getHashKey gets the hash key when given a row and hash columns.
// It will return a boolean value representing if the hash key has null, a byte slice representing the result hash code.
func getHashKey(exprs []*expression.Column, row *Row, targetTypes []*types.FieldType) (bool, []byte, error) {
	vals := make([]types.Datum, 0, len(exprs))
	for i, expr := range exprs {
		v, err := expr.Eval(row.Data, nil)
		if err != nil {
			return false, nil, errors.Trace(err)
		}
		if v.IsNull() {
			return true, nil, nil
		}
		if targetTypes[i].Tp != expr.RetType.Tp {
			v, err = v.ConvertTo(targetTypes[i])
			if err != nil {
				return false, nil, errors.Trace(err)
			}
		}
		vals = append(vals, v)
	}
	if len(vals) == 0 {
		return false, nil, nil
	}
	bytes, err := codec.EncodeValue([]byte{}, vals...)
	return false, bytes, errors.Trace(err)
}
Example #30
0
func (s *testXAPISuite) TestSelect(c *C) {
	defer testleak.AfterTest(c)()
	store := createMemStore(time.Now().Nanosecond())
	count := int64(10)
	err := prepareTableData(store, tbInfo, count, genValues)
	c.Check(err, IsNil)

	// Select Table request.
	txn, err := store.Begin()
	c.Check(err, IsNil)
	client := store.GetClient()
	req, err := prepareSelectRequest(tbInfo, txn.StartTS())
	c.Check(err, IsNil)
	resp := client.Send(req)
	subResp, err := resp.Next()
	c.Check(err, IsNil)
	data, err := ioutil.ReadAll(subResp)
	c.Check(err, IsNil)
	selResp := new(tipb.SelectResponse)
	proto.Unmarshal(data, selResp)
	c.Check(selResp.Chunks, HasLen, 1)
	chunk := &selResp.Chunks[0]
	c.Check(chunk.RowsMeta, HasLen, int(count))
	var dataOffset int64
	for i, rowMeta := range chunk.RowsMeta {
		handle := int64(i + 1)
		expectedDatums := []types.Datum{types.NewDatum(handle)}
		expectedDatums = append(expectedDatums, genValues(handle, tbInfo)...)
		var expectedEncoded []byte
		expectedEncoded, err = codec.EncodeValue(nil, expectedDatums...)
		c.Assert(err, IsNil)
		c.Assert(chunk.RowsData[dataOffset:dataOffset+rowMeta.Length], BytesEquals, expectedEncoded)
		dataOffset += rowMeta.Length
	}
	txn.Commit()

	// Select Index request.
	txn, err = store.Begin()
	c.Check(err, IsNil)
	client = store.GetClient()
	req, err = prepareIndexRequest(tbInfo, txn.StartTS())
	c.Check(err, IsNil)
	resp = client.Send(req)
	subResp, err = resp.Next()
	c.Check(err, IsNil)
	data, err = ioutil.ReadAll(subResp)
	c.Check(err, IsNil)
	idxResp := new(tipb.SelectResponse)
	proto.Unmarshal(data, idxResp)
	chunk = &idxResp.Chunks[0]
	c.Check(chunk.RowsMeta, HasLen, int(count))
	handles := make([]int, 0, 10)
	for _, rowMeta := range chunk.RowsMeta {
		handles = append(handles, int(rowMeta.Handle))
	}
	sort.Ints(handles)
	for i, h := range handles {
		c.Assert(h, Equals, i+1)
	}
	txn.Commit()

	store.Close()
}