func indexRangesToKVRanges(tid, idxID int64, ranges []*plan.IndexRange, fieldTypes []*types.FieldType) ([]kv.KeyRange, error) { krs := make([]kv.KeyRange, 0, len(ranges)) for _, ran := range ranges { err := convertIndexRangeTypes(ran, fieldTypes) if err != nil { return nil, errors.Trace(err) } low, err := codec.EncodeKey(nil, ran.LowVal...) if err != nil { return nil, errors.Trace(err) } if ran.LowExclude { low = []byte(kv.Key(low).PrefixNext()) } high, err := codec.EncodeKey(nil, ran.HighVal...) if err != nil { return nil, errors.Trace(err) } if !ran.HighExclude { high = []byte(kv.Key(high).PrefixNext()) } startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) } return krs, nil }
func indexRangesToPBRanges(ranges []*plan.IndexRange, fieldTypes []*types.FieldType) ([]*tipb.KeyRange, error) { keyRanges := make([]*tipb.KeyRange, 0, len(ranges)) for _, ran := range ranges { err := convertIndexRangeTypes(ran, fieldTypes) if err != nil { return nil, errors.Trace(err) } low, err := codec.EncodeKey(nil, ran.LowVal...) if err != nil { return nil, errors.Trace(err) } if ran.LowExclude { low = []byte(kv.Key(low).PrefixNext()) } high, err := codec.EncodeKey(nil, ran.HighVal...) if err != nil { return nil, errors.Trace(err) } if !ran.HighExclude { high = []byte(kv.Key(high).PrefixNext()) } keyRanges = append(keyRanges, &tipb.KeyRange{Low: low, High: high}) } return keyRanges, nil }
func (e *NewXSelectIndexExec) doTableRequest(handles []int64) (xapi.SelectResult, error) { // The handles are not in original index order, so we can't push limit here. selTableReq := new(tipb.SelectRequest) selTableReq.StartTs = e.txn.StartTS() selTableReq.TableInfo = &tipb.TableInfo{ TableId: e.table.Meta().ID, } selTableReq.TableInfo.Columns = xapi.ColumnsToProto(e.indexPlan.Columns, e.table.Meta().PKIsHandle) for _, h := range handles { if h == math.MaxInt64 { // We can't convert MaxInt64 into an left closed, right open range. continue } pbRange := new(tipb.KeyRange) pbRange.Low = codec.EncodeInt(nil, h) pbRange.High = kv.Key(pbRange.Low).PrefixNext() selTableReq.Ranges = append(selTableReq.Ranges, pbRange) } selTableReq.Where = e.where // Aggregate Info selTableReq.Aggregates = e.aggFuncs selTableReq.GroupBy = e.byItems // Aggregate Info resp, err := xapi.Select(e.txn.GetClient(), selTableReq, defaultConcurrency, false) if err != nil { return nil, errors.Trace(err) } if e.aggregate { // The returned rows should be aggregate partial result. resp.SetFields(e.aggFields) } resp.Fetch() return resp, nil }
func (e *NewXSelectIndexExec) doTableRequest(handles []int64) (*xapi.SelectResult, error) { txn, err := e.ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } // The handles are not in original index order, so we can't push limit here. selTableReq := new(tipb.SelectRequest) startTs := txn.StartTS() selTableReq.StartTs = &startTs selTableReq.TableInfo = &tipb.TableInfo{ TableId: proto.Int64(e.table.Meta().ID), } selTableReq.TableInfo.Columns = xapi.ColumnsToProto(e.indexPlan.Columns, e.table.Meta().PKIsHandle) for _, h := range handles { if h == math.MaxInt64 { // We can't convert MaxInt64 into an left closed, right open range. continue } pbRange := new(tipb.KeyRange) pbRange.Low = codec.EncodeInt(nil, h) pbRange.High = kv.Key(pbRange.Low).PrefixNext() selTableReq.Ranges = append(selTableReq.Ranges, pbRange) } selTableReq.Where = e.where // Aggregate Info resp, err := xapi.Select(txn.GetClient(), selTableReq, defaultConcurrency) if err != nil { return nil, errors.Trace(err) } return resp, nil }
func (s *dbStore) doCommit(cmd *command) { txn := cmd.txn curVer, err := globalVersionProvider.CurrentVersion() if err != nil { log.Fatal(err) } err = s.tryLock(txn) if err != nil { cmd.done <- errors.Trace(err) return } // Update commit version. txn.version = curVer b := s.db.NewBatch() txn.us.WalkBuffer(func(k kv.Key, value []byte) error { mvccKey := MvccEncodeVersionKey(kv.Key(k), curVer) if len(value) == 0 { // Deleted marker b.Put(mvccKey, nil) s.compactor.OnDelete(k) } else { b.Put(mvccKey, value) s.compactor.OnSet(k) } return nil }) err = s.writeBatch(b) s.unLockKeys(txn) cmd.done <- errors.Trace(err) }
// NewIndex builds a new Index object. func NewIndex(tableInfo *model.TableInfo, indexInfo *model.IndexInfo) table.Index { index := &index{ tblInfo: tableInfo, idxInfo: indexInfo, prefix: kv.Key(tablecodec.EncodeTableIndexPrefix(tableInfo.ID, indexInfo.ID)), } return index }
func (s *testLockSuite) TestLockTTL(c *C) { txn, err := s.store.Begin() c.Assert(err, IsNil) txn.Set(kv.Key("key"), []byte("value")) s.prewriteTxn(c, txn.(*tikvTxn)) l := s.mustGetLock(c, []byte("key")) c.Assert(l.TTL, Equals, defaultLockTTL) // Huge txn has a greater TTL. txn, err = s.store.Begin() txn.Set(kv.Key("key"), []byte("value")) for i := 0; i < 2048; i++ { k, v := randKV(1024, 1024) txn.Set(kv.Key(k), []byte(v)) } s.prewriteTxn(c, txn.(*tikvTxn)) l = s.mustGetLock(c, []byte("key")) c.Assert(l.TTL, Equals, uint64(ttlFactor*2)) }
// extractKVRanges extracts kv.KeyRanges slice from a SelectRequest, and also returns if it is in descending order. func (h *rpcHandler) extractKVRanges(sel *tipb.SelectRequest) (kvRanges []kv.KeyRange, desc bool) { var ( tid int64 idxID int64 ) if sel.IndexInfo != nil { tid = sel.IndexInfo.GetTableId() idxID = sel.IndexInfo.GetIndexId() } else { tid = sel.TableInfo.GetTableId() } for _, kran := range sel.Ranges { var upperKey, lowerKey kv.Key if idxID == 0 { upperKey = tablecodec.EncodeRowKey(tid, kran.GetHigh()) if bytes.Compare(upperKey, h.startKey) <= 0 { continue } lowerKey = tablecodec.EncodeRowKey(tid, kran.GetLow()) } else { upperKey = tablecodec.EncodeIndexSeekKey(tid, idxID, kran.GetHigh()) if bytes.Compare(upperKey, h.startKey) <= 0 { continue } lowerKey = tablecodec.EncodeIndexSeekKey(tid, idxID, kran.GetLow()) } if len(h.endKey) != 0 && bytes.Compare([]byte(lowerKey), h.endKey) >= 0 { break } var kvr kv.KeyRange kvr.StartKey = kv.Key(maxStartKey(lowerKey, h.startKey)) kvr.EndKey = kv.Key(minEndKey(upperKey, h.endKey)) kvRanges = append(kvRanges, kvr) } if sel.OrderBy != nil { desc = *sel.OrderBy[0].Desc } if desc { reverseKVRanges(kvRanges) } return }
func (txn *dbTxn) doCommit() error { b := txn.store.newBatch() keysLocked := make([]string, 0, len(txn.snapshotVals)) defer func() { for _, key := range keysLocked { txn.store.unLockKeys(key) } }() // check lazy condition pairs if err := txn.UnionStore.CheckLazyConditionPairs(); err != nil { return errors.Trace(err) } txn.Snapshot.Release() // Check locked keys for k := range txn.snapshotVals { err := txn.store.tryConditionLockKey(txn.tid, k) if err != nil { return errors.Trace(err) } keysLocked = append(keysLocked, k) } // disable version provider temporarily providerMu.Lock() defer providerMu.Unlock() curVer, err := globalVersionProvider.CurrentVersion() if err != nil { return errors.Trace(err) } err = txn.each(func(iter kv.Iterator) error { metaKey := codec.EncodeBytes(nil, []byte(iter.Key())) // put dummy meta key, write current version b.Put(metaKey, codec.EncodeUint(nil, curVer.Ver)) mvccKey := MvccEncodeVersionKey(kv.Key(iter.Key()), curVer) if len(iter.Value()) == 0 { // Deleted marker b.Put(mvccKey, nil) } else { b.Put(mvccKey, iter.Value()) } return nil }) if err != nil { return errors.Trace(err) } // Update commit version. txn.version = curVer return txn.store.writeBatch(b) }
func (s *Scanner) resolveCurrentLock(bo *Backoffer) error { current := s.cache[s.idx] if current.GetError() == nil { return nil } val, err := s.snapshot.get(bo, kv.Key(current.Key)) if err != nil { return errors.Trace(err) } current.Error = nil current.Value = val return nil }
func (s *testLockSuite) TestBatchGetLock(c *C) { var allKeys []kv.Key for ch := byte('a'); ch <= byte('z'); ch++ { k := []byte{ch} s.lockKey(c, k, k, k, k, false) allKeys = append(allKeys, kv.Key(k)) } ver, err := s.store.CurrentVersion() c.Assert(err, IsNil) snapshot := newTiKVSnapshot(s.store, ver) _, err = snapshot.BatchGet(allKeys) c.Assert(err, IsNil) }
// extractKVRanges extracts kv.KeyRanges slice from a SelectRequest, and also returns if it is in descending order. func (h *rpcHandler) extractKVRanges(ctx *selectContext) (kvRanges []kv.KeyRange, desc bool) { sel := ctx.sel for _, kran := range ctx.keyRanges { upperKey := kran.GetEnd() if bytes.Compare(upperKey, h.startKey) <= 0 { continue } lowerKey := kran.GetStart() if len(h.endKey) != 0 && bytes.Compare([]byte(lowerKey), h.endKey) >= 0 { break } var kvr kv.KeyRange kvr.StartKey = kv.Key(maxStartKey(lowerKey, h.startKey)) kvr.EndKey = kv.Key(minEndKey(upperKey, h.endKey)) kvRanges = append(kvRanges, kvr) } if sel.OrderBy != nil { desc = sel.OrderBy[0].Desc } if desc { reverseKVRanges(kvRanges) } return }
func (s *testSnapshotSuite) TestBatchGetNotExist(c *C) { for _, rowNum := range s.rowNums { log.Debugf("Test BatchGetNotExist with length[%d]", rowNum) txn := s.beginTxn(c) for i := 0; i < rowNum; i++ { k := encodeKey(s.prefix, s08d("key", i)) err := txn.Set(k, valueBytes(i)) c.Assert(err, IsNil) } err := txn.Commit() c.Assert(err, IsNil) keys := makeKeys(rowNum, s.prefix) keys = append(keys, kv.Key("noSuchKey")) s.checkAll(keys, c) s.deleteKeys(keys, c) } }
// Next implements Executor Next interface. func (e *UpdateExec) Next() (*Row, error) { if !e.fetched { err := e.fetchRows() if err != nil { return nil, errors.Trace(err) } e.fetched = true } if e.cursor >= len(e.rows) { return nil, nil } if e.updatedRowKeys == nil { e.updatedRowKeys = map[string]bool{} } row := e.rows[e.cursor] newData := e.newRowsData[e.cursor] for _, entry := range row.RowKeys { tbl := entry.Tbl offset := e.getTableOffset(tbl) k := entry.Key oldData := row.Data[offset : offset+len(tbl.Cols())] newTableData := newData[offset : offset+len(tbl.Cols())] _, ok := e.updatedRowKeys[k] if ok { // Each matching row is updated once, even if it matches the conditions multiple times. continue } // Update row handle, err1 := tables.DecodeRecordKeyHandle(kv.Key(k)) if err1 != nil { return nil, errors.Trace(err1) } err1 = e.updateRecord(handle, oldData, newTableData, tbl, offset, false) if err1 != nil { return nil, errors.Trace(err1) } e.updatedRowKeys[k] = true } e.cursor++ return &Row{}, nil }
func (e *XSelectIndexExec) doTableRequest(handles []int64) (*xapi.SelectResult, error) { txn, err := e.ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } // The handles are not in original index order, so we can't push limit here. selTableReq := new(tipb.SelectRequest) startTs := txn.StartTS() selTableReq.StartTs = &startTs columns := make([]*model.ColumnInfo, 0, len(e.indexPlan.Fields())) for _, v := range e.indexPlan.Fields() { if v.Referenced { columns = append(columns, v.Column) } } selTableReq.TableInfo = &tipb.TableInfo{ TableId: proto.Int64(e.table.Meta().ID), } selTableReq.TableInfo.Columns = xapi.ColumnsToProto(columns, e.table.Meta().PKIsHandle) selTableReq.Fields = resultFieldsToPBExpression(e.indexPlan.Fields()) for _, h := range handles { if h == math.MaxInt64 { // We can't convert MaxInt64 into an left closed, right open range. continue } pbRange := new(tipb.KeyRange) pbRange.Low = codec.EncodeInt(nil, h) pbRange.High = kv.Key(pbRange.Low).PrefixNext() selTableReq.Ranges = append(selTableReq.Ranges, pbRange) } selTableReq.Where = e.where // Aggregate Info selTableReq.Aggregates = e.aggFuncs selTableReq.GroupBy = e.byItems resp, err := xapi.Select(txn.GetClient(), selTableReq, defaultConcurrency) if err != nil { return nil, errors.Trace(err) } if e.aggregate { // The returned rows should be aggregate partial result. resp.SetFields(e.aggFields) } return resp, nil }
func (txn *dbTxn) doCommit() error { b := txn.store.newBatch() keysLocked := make([]string, 0, len(txn.snapshotVals)) defer func() { for _, key := range keysLocked { txn.store.unLockKeys(key) } }() // Check locked keys for k, v := range txn.snapshotVals { err := txn.store.tryConditionLockKey(txn.tid, k, v) if err != nil { return errors.Trace(err) } keysLocked = append(keysLocked, k) } // Check dirty store curVer, err := globalVersionProvider.CurrentVersion() if err != nil { return errors.Trace(err) } err = txn.each(func(iter kv.Iterator) error { metaKey := codec.EncodeBytes(nil, []byte(iter.Key())) // put dummy meta key, write current version b.Put(metaKey, codec.EncodeUint(nil, curVer.Ver)) mvccKey := MvccEncodeVersionKey(kv.Key(iter.Key()), curVer) if len(iter.Value()) == 0 { // Deleted marker b.Put(mvccKey, nil) } else { b.Put(mvccKey, iter.Value()) } return nil }) if err != nil { return errors.Trace(err) } // Update commit version. txn.version = curVer // Release read lock before write. Workaround for BoltDB. txn.Snapshot.Release() return txn.store.writeBatch(b) }
func (ts *testSuite) TestRowKeyCodec(c *C) { table := []struct { tableID int64 h int64 ID int64 }{ {1, 1234567890, 0}, {2, 1, 0}, {3, -1, 0}, {4, -1, 1}, } for _, t := range table { b := tables.EncodeRecordKey(t.tableID, t.h, t.ID) tableID, handle, columnID, err := tables.DecodeRecordKey(b) c.Assert(err, IsNil) c.Assert(tableID, Equals, t.tableID) c.Assert(handle, Equals, t.h) c.Assert(columnID, Equals, t.ID) handle, err = tables.DecodeRecordKeyHandle(b) c.Assert(err, IsNil) c.Assert(handle, Equals, t.h) } // test error tbl := []string{ "", "x", "t1", "t12345678", "t12345678_i", "t12345678_r1", "t12345678_r1234567", "t12345678_r123456781", } for _, t := range tbl { _, err := tables.DecodeRecordKeyHandle(kv.Key(t)) c.Assert(err, NotNil) } }
// Next implements plan.Plan Next interface. func (r *SelectLockPlan) Next(ctx context.Context) (row *plan.Row, err error) { row, err = r.Src.Next(ctx) if row == nil || err != nil { return nil, errors.Trace(err) } if len(row.RowKeys) != 0 && r.Lock == coldef.SelectLockForUpdate { forupdate.SetForUpdate(ctx) txn, err := ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } for _, k := range row.RowKeys { err = txn.LockKeys(kv.Key(k.Key)) if err != nil { return nil, errors.Trace(err) } } } return }
func (s *Scanner) getData() error { log.Debugf("txn getData nextStartKey[%q], txn %d", s.nextStartKey, s.startTS()) var backoffErr error for backoff := regionMissBackoff(); backoffErr == nil; backoffErr = backoff() { region, err := s.snapshot.store.regionCache.GetRegion(s.nextStartKey) if err != nil { return errors.Trace(err) } req := &pb.Request{ Type: pb.MessageType_CmdScan.Enum(), CmdScanReq: &pb.CmdScanRequest{ StartKey: []byte(s.nextStartKey), Limit: proto.Uint32(uint32(s.batchSize)), Version: proto.Uint64(s.startTS()), }, } resp, err := s.snapshot.store.SendKVReq(req, region.VerID()) if err != nil { return errors.Trace(err) } if regionErr := resp.GetRegionError(); regionErr != nil { log.Warnf("scanner getData failed: %s", regionErr) continue } cmdScanResp := resp.GetCmdScanResp() if cmdScanResp == nil { return errors.Trace(errBodyMissing) } kvPairs := cmdScanResp.Pairs // Check if kvPair contains error, it should be a Lock. for _, pair := range kvPairs { if keyErr := pair.GetError(); keyErr != nil { lock, err := extractLockInfoFromKeyErr(keyErr) if err != nil { return errors.Trace(err) } pair.Key = lock.Key } } s.cache, s.idx = kvPairs, 0 if len(kvPairs) < s.batchSize { // No more data in current Region. Next getData() starts // from current Region's endKey. s.nextStartKey = region.EndKey() if len(region.EndKey()) == 0 { // Current Region is the last one. s.eof = true } return nil } // next getData() starts from the last key in kvPairs (but skip // it by appending a '\x00' to the key). Note that next getData() // may get an empty response if the Region in fact does not have // more data. lastKey := kvPairs[len(kvPairs)-1].GetKey() s.nextStartKey = kv.Key(lastKey).Next() return nil } return errors.Annotate(backoffErr, txnRetryableMark) }
// Next implements Executor Next interface. func (e *DeleteExec) Next() (*Row, error) { if e.finished { return nil, nil } defer func() { e.finished = true }() if e.IsMultiTable && len(e.Tables) == 0 { return &Row{}, nil } tblIDMap := make(map[int64]bool, len(e.Tables)) // Get table alias map. tblNames := make(map[string]string) rowKeyMap := make(map[string]table.Table) if e.IsMultiTable { // Delete from multiple tables should consider table ident list. fs := e.SelectExec.Fields() for _, f := range fs { if len(f.TableAsName.L) > 0 { tblNames[f.TableAsName.L] = f.TableName.Name.L } else { tblNames[f.TableName.Name.L] = f.TableName.Name.L } } for _, t := range e.Tables { // Consider DBName. _, ok := tblNames[t.Name.L] if !ok { return nil, errors.Errorf("Unknown table '%s' in MULTI DELETE", t.Name.O) } tblIDMap[t.TableInfo.ID] = true } } for { row, err := e.SelectExec.Next() if err != nil { return nil, errors.Trace(err) } if row == nil { break } for _, entry := range row.RowKeys { if e.IsMultiTable { tid := entry.Tbl.TableID() if _, ok := tblIDMap[tid]; !ok { continue } } rowKeyMap[entry.Key] = entry.Tbl } } for k, t := range rowKeyMap { handle, err := tables.DecodeRecordKeyHandle(kv.Key(k)) if err != nil { return nil, errors.Trace(err) } data, err := t.Row(e.ctx, handle) if err != nil { return nil, errors.Trace(err) } err = e.removeRow(e.ctx, t, handle, data) if err != nil { return nil, errors.Trace(err) } } return nil, nil }
// Exec implements the stmt.Statement Exec interface. func (s *DeleteStmt) Exec(ctx context.Context) (_ rset.Recordset, err error) { if s.MultiTable && len(s.TableIdents) == 0 { return nil, nil } p, err := s.plan(ctx) if err != nil { return nil, errors.Trace(err) } if p == nil { return nil, nil } defer p.Close() tblIDMap := make(map[int64]bool, len(s.TableIdents)) // Get table alias map. tblNames := make(map[string]string) if s.MultiTable { // Delete from multiple tables should consider table ident list. fs := p.GetFields() for _, f := range fs { if f.TableName != f.OrgTableName { tblNames[f.TableName] = f.OrgTableName } else { tblNames[f.TableName] = f.TableName } } for _, t := range s.TableIdents { // Consider DBName. oname, ok := tblNames[t.Name.O] if !ok { return nil, errors.Errorf("Unknown table '%s' in MULTI DELETE", t.Name.O) } t.Name.O = oname t.Name.L = strings.ToLower(oname) var tbl table.Table tbl, err = getTable(ctx, t) if err != nil { return nil, errors.Trace(err) } tblIDMap[tbl.TableID()] = true } } rowKeyMap := make(map[string]table.Table) for { row, err1 := p.Next(ctx) if err1 != nil { return nil, errors.Trace(err1) } if row == nil { break } for _, entry := range row.RowKeys { if s.MultiTable { tid := entry.Tbl.TableID() if _, ok := tblIDMap[tid]; !ok { continue } } rowKeyMap[entry.Key] = entry.Tbl } } for k, t := range rowKeyMap { handle, err := tables.DecodeRecordKeyHandle(kv.Key(k)) if err != nil { return nil, errors.Trace(err) } data, err := t.Row(ctx, handle) if err != nil { return nil, errors.Trace(err) } err = removeRow(ctx, t, handle, data) if err != nil { return nil, errors.Trace(err) } } return nil, nil }
func (h *rpcHandler) getIndexRowFromRange(ctx *selectContext, ran kv.KeyRange, desc bool, limit int64) ([]*tipb.Row, error) { idxInfo := ctx.sel.IndexInfo startKey := maxStartKey(ran.StartKey, h.startKey) endKey := minEndKey(ran.EndKey, h.endKey) if limit == 0 || bytes.Compare(startKey, endKey) >= 0 { return nil, nil } var rows []*tipb.Row var seekKey kv.Key if desc { seekKey = endKey } else { seekKey = startKey } ids := make([]int64, len(idxInfo.Columns)) for i, col := range idxInfo.Columns { ids[i] = col.GetColumnId() } for { if limit == 0 { break } var ( pairs []Pair pair Pair err error ) if desc { pairs = h.mvccStore.ReverseScan(startKey, seekKey, 1, ctx.sel.GetStartTs()) } else { pairs = h.mvccStore.Scan(seekKey, endKey, 1, ctx.sel.GetStartTs()) } if len(pairs) > 0 { pair = pairs[0] } if pair.Err != nil { // TODO: handle lock error. return nil, errors.Trace(pair.Err) } if pair.Key == nil { break } if desc { if bytes.Compare(pair.Key, startKey) < 0 { break } seekKey = pair.Key } else { if bytes.Compare(pair.Key, endKey) >= 0 { break } seekKey = []byte(kv.Key(pair.Key).PrefixNext()) } values, b, err := tablecodec.CutIndexKey(pair.Key, ids) var handle int64 if len(b) > 0 { var handleDatum types.Datum _, handleDatum, err = codec.DecodeOne(b) if err != nil { return nil, errors.Trace(err) } handle = handleDatum.GetInt64() } else { handle, err = decodeHandle(pair.Value) if err != nil { return nil, errors.Trace(err) } } row, err := h.valuesToRow(ctx, handle, values) if err != nil { return nil, errors.Trace(err) } if row != nil { rows = append(rows, row) limit-- } } return rows, nil }
func (h *rpcHandler) getRowsFromRange(ctx *selectContext, ran kv.KeyRange, limit int64, desc bool) ([]*tipb.Row, error) { startKey := maxStartKey(ran.StartKey, h.startKey) endKey := minEndKey(ran.EndKey, h.endKey) if limit == 0 || bytes.Compare(startKey, endKey) >= 0 { return nil, nil } var rows []*tipb.Row if ran.IsPoint() { val, err := h.mvccStore.Get(startKey, ctx.sel.GetStartTs()) if len(val) == 0 { return nil, nil } else if err != nil { return nil, errors.Trace(err) } handle, err := tablecodec.DecodeRowKey(kv.Key(startKey)) if err != nil { return nil, errors.Trace(err) } row, err := h.handleRowData(ctx, handle, val) if err != nil { return nil, errors.Trace(err) } if row != nil { rows = append(rows, row) } return rows, nil } var seekKey []byte if desc { seekKey = endKey } else { seekKey = startKey } for { if limit == 0 { break } var ( pairs []Pair pair Pair err error ) if desc { pairs = h.mvccStore.ReverseScan(startKey, seekKey, 1, ctx.sel.GetStartTs()) } else { pairs = h.mvccStore.Scan(seekKey, endKey, 1, ctx.sel.GetStartTs()) } if len(pairs) > 0 { pair = pairs[0] } if pair.Err != nil { // TODO: handle lock error. return nil, errors.Trace(pair.Err) } if pair.Key == nil { break } if desc { if bytes.Compare(pair.Key, startKey) < 0 { break } seekKey = []byte(tablecodec.TruncateToRowKeyLen(kv.Key(pair.Key))) } else { if bytes.Compare(pair.Key, endKey) >= 0 { break } seekKey = []byte(kv.Key(pair.Key).PrefixNext()) } handle, err := tablecodec.DecodeRowKey(pair.Key) if err != nil { return nil, errors.Trace(err) } row, err := h.handleRowData(ctx, handle, pair.Value) if err != nil { return nil, errors.Trace(err) } if row != nil { rows = append(rows, row) limit-- } } return rows, nil }
func (s *Scanner) getData(bo *Backoffer) error { log.Debugf("txn getData nextStartKey[%q], txn %d", s.nextStartKey, s.startTS()) for { loc, err := s.snapshot.store.regionCache.LocateKey(bo, s.nextStartKey) if err != nil { return errors.Trace(err) } req := &pb.Request{ Type: pb.MessageType_CmdScan, CmdScanReq: &pb.CmdScanRequest{ StartKey: []byte(s.nextStartKey), Limit: uint32(s.batchSize), Version: s.startTS(), }, } resp, err := s.snapshot.store.SendKVReq(bo, req, loc.Region, readTimeoutMedium) if err != nil { return errors.Trace(err) } if regionErr := resp.GetRegionError(); regionErr != nil { log.Warnf("scanner getData failed: %s", regionErr) err = bo.Backoff(boRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } continue } cmdScanResp := resp.GetCmdScanResp() if cmdScanResp == nil { return errors.Trace(errBodyMissing) } kvPairs := cmdScanResp.Pairs // Check if kvPair contains error, it should be a Lock. for _, pair := range kvPairs { if keyErr := pair.GetError(); keyErr != nil { lock, err := extractLockFromKeyErr(keyErr) if err != nil { return errors.Trace(err) } pair.Key = lock.Key } } s.cache, s.idx = kvPairs, 0 if len(kvPairs) < s.batchSize { // No more data in current Region. Next getData() starts // from current Region's endKey. s.nextStartKey = loc.EndKey if len(loc.EndKey) == 0 { // Current Region is the last one. s.eof = true } return nil } // next getData() starts from the last key in kvPairs (but skip // it by appending a '\x00' to the key). Note that next getData() // may get an empty response if the Region in fact does not have // more data. lastKey := kvPairs[len(kvPairs)-1].GetKey() s.nextStartKey = kv.Key(lastKey).Next() return nil } }
// Exec implements the stmt.Statement Exec interface. func (s *UpdateStmt) Exec(ctx context.Context) (_ rset.Recordset, err error) { p, err := s.plan(ctx) if err != nil { return nil, errors.Trace(err) } defer p.Close() fs := p.GetFields() columns, err := getUpdateColumns(s.List, fs) if err != nil { return nil, errors.Trace(err) } var records []*plan.Row for { row, err1 := p.Next(ctx) if err1 != nil { return nil, errors.Trace(err1) } if row == nil { break } if len(row.RowKeys) == 0 { // Nothing to update continue } records = append(records, row) } evalMap := map[interface{}]interface{}{} updatedRowKeys := make(map[string]bool) for _, row := range records { rowData := row.Data // Set ExprEvalIdentReferFunc. evalMap[expression.ExprEvalIdentReferFunc] = func(name string, scope int, index int) (interface{}, error) { return rowData[index], nil } // Update rows. offset := 0 for _, entry := range row.RowKeys { tbl := entry.Tbl k := entry.Key lastOffset := offset offset += len(tbl.Cols()) data := rowData[lastOffset:offset] _, ok := updatedRowKeys[k] if ok { // Each matching row is updated once, even if it matches the conditions multiple times. continue } // Update row handle, err1 := tables.DecodeRecordKeyHandle(kv.Key(k)) if err1 != nil { return nil, errors.Trace(err1) } err1 = updateRecord(ctx, handle, data, tbl, columns, evalMap, lastOffset, false) if err1 != nil { return nil, errors.Trace(err1) } updatedRowKeys[k] = true } } return nil, nil }
func (s *dbStore) doCommit(txn *dbTxn) error { var commitVer kv.Version var err error for { // Atomically get commit version s.mu.Lock() closed := s.closed committing := s.committingTS != 0 if !closed && !committing { commitVer, err = globalVersionProvider.CurrentVersion() if err != nil { s.mu.Unlock() return errors.Trace(err) } s.committingTS = commitVer.Ver s.wg.Add(1) } s.mu.Unlock() if closed { return ErrDBClosed } if committing { time.Sleep(time.Microsecond) continue } break } defer func() { s.mu.Lock() s.committingTS = 0 s.wg.Done() s.mu.Unlock() }() // Here we are sure no concurrent committing happens. err = s.tryLock(txn) if err != nil { return errors.Trace(err) } b := s.db.NewBatch() txn.us.WalkBuffer(func(k kv.Key, value []byte) error { mvccKey := MvccEncodeVersionKey(kv.Key(k), commitVer) if len(value) == 0 { // Deleted marker b.Put(mvccKey, nil) s.compactor.OnDelete(k) } else { b.Put(mvccKey, value) s.compactor.OnSet(k) } return nil }) err = s.writeBatch(b) if err != nil { return errors.Trace(err) } // Update commit version. txn.version = commitVer err = s.unLockKeys(txn) if err != nil { return errors.Trace(err) } // Clean recent updates. now := time.Now() if now.Sub(s.lastCleanTime) > time.Second { s.cleanRecentUpdates(s.cleanIdx) s.cleanIdx++ if s.cleanIdx == s.recentUpdates.SegmentCount() { s.cleanIdx = 0 } s.lastCleanTime = now } return nil }
func (h *rpcHandler) getIndexRowFromRange(sel *tipb.SelectRequest, ran kv.KeyRange, desc bool, limit int64) ([]*tipb.Row, error) { startKey := maxStartKey(ran.StartKey, h.startKey) endKey := minEndKey(ran.EndKey, h.endKey) if limit == 0 || bytes.Compare(startKey, endKey) >= 0 { return nil, nil } var rows []*tipb.Row var seekKey kv.Key if desc { seekKey = endKey } else { seekKey = startKey } for { if limit == 0 { break } var ( pairs []Pair pair Pair err error ) if desc { pairs = h.mvccStore.ReverseScan(startKey, seekKey, 1, sel.GetStartTs()) } else { pairs = h.mvccStore.Scan(seekKey, endKey, 1, sel.GetStartTs()) } if len(pairs) > 0 { pair = pairs[0] } if pair.Err != nil { // TODO: handle lock error. return nil, errors.Trace(pair.Err) } if pair.Key == nil { break } if desc { if bytes.Compare(pair.Key, startKey) < 0 { break } seekKey = pair.Key } else { if bytes.Compare(pair.Key, endKey) >= 0 { break } seekKey = []byte(kv.Key(pair.Key).PrefixNext()) } datums, err := tablecodec.DecodeIndexKey(pair.Key) if err != nil { return nil, errors.Trace(err) } var handle types.Datum columns := sel.IndexInfo.Columns if len(datums) > len(columns) { handle = datums[len(columns)] datums = datums[:len(columns)] } else { var intHandle int64 intHandle, err = decodeHandle(pair.Value) if err != nil { return nil, errors.Trace(err) } handle.SetInt64(intHandle) } data, err := codec.EncodeValue(nil, datums...) if err != nil { return nil, errors.Trace(err) } handleData, err := codec.EncodeValue(nil, handle) if err != nil { return nil, errors.Trace(err) } row := &tipb.Row{Handle: handleData, Data: data} rows = append(rows, row) limit-- } return rows, nil }