// backfillIndexInTxn deals with a part of backfilling index data in a Transaction. // This part of the index data rows is defaultSmallBatchCnt. func (d *ddl) backfillIndexInTxn(t table.Table, kvIdx table.Index, handles []int64, txn kv.Transaction) (int64, error) { idxRecords, err := d.fetchRowColVals(txn, t, handles, kvIdx.Meta()) if err != nil { return 0, errors.Trace(err) } for _, idxRecord := range idxRecords { log.Debug("[ddl] backfill index...", idxRecord.handle) err = txn.LockKeys(idxRecord.key) if err != nil { return 0, errors.Trace(err) } // Create the index. handle, err := kvIdx.Create(txn, idxRecord.vals, idxRecord.handle) if err != nil { if terror.ErrorEqual(err, kv.ErrKeyExists) && idxRecord.handle == handle { // Index already exists, skip it. continue } return 0, errors.Trace(err) } } return idxRecords[len(idxRecords)-1].handle, nil }
// Next implements plan.Plan Next interface. func (r *TableDefaultPlan) Next(ctx context.Context) (row *plan.Row, err error) { if r.iter == nil { var txn kv.Transaction txn, err = ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } r.iter, err = txn.Seek([]byte(r.T.FirstKey())) if err != nil { return nil, errors.Trace(err) } } if !r.iter.Valid() || !strings.HasPrefix(r.iter.Key(), r.T.KeyPrefix()) { return } // TODO: check if lock valid // the record layout in storage (key -> value): // r1 -> lock-version // r1_col1 -> r1 col1 value // r1_col2 -> r1 col2 value // r2 -> lock-version // r2_col1 -> r2 col1 value // r2_col2 -> r2 col2 value // ... rowKey := r.iter.Key() handle, err := util.DecodeHandleFromRowKey(rowKey) if err != nil { return nil, errors.Trace(err) } txn, err := ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } // It is very likely that we will fetch rows after current row later, enable the RangePrefetchOnCacheMiss // option may help reducing RPC calls. // TODO: choose a wiser option value. txn.SetOption(kv.RangePrefetchOnCacheMiss, nil) defer txn.DelOption(kv.RangePrefetchOnCacheMiss) // TODO: we could just fetch mentioned columns' values row = &plan.Row{} row.Data, err = r.T.Row(ctx, handle) if err != nil { return nil, errors.Trace(err) } // Put rowKey to the tail of record row rke := &plan.RowKeyEntry{ Tbl: r.T, Key: rowKey, } row.RowKeys = append(row.RowKeys, rke) rk := r.T.RecordKey(handle, nil) err = kv.NextUntil(r.iter, util.RowKeyPrefixFilter(rk)) if err != nil { return nil, errors.Trace(err) } return }
// GetTableRecordsCount returns the total number of table records from startHandle. // If startHandle = 0, returns the total number of table records. func GetTableRecordsCount(txn kv.Transaction, t table.Table, startHandle int64) (int64, error) { startKey := t.RecordKey(startHandle, nil) it, err := txn.Seek(startKey) if err != nil { return 0, errors.Trace(err) } var cnt int64 prefix := t.RecordPrefix() for it.Valid() && it.Key().HasPrefix(prefix) { handle, err := tables.DecodeRecordKeyHandle(it.Key()) if err != nil { return 0, errors.Trace(err) } it.Close() rk := t.RecordKey(handle+1, nil) it, err = txn.Seek(rk) if err != nil { return 0, errors.Trace(err) } cnt++ } it.Close() return cnt, nil }
func lockRow(txn kv.Transaction, t table.Table, h int64) error { // Get row lock key lockKey := t.RecordKey(h, nil) // set row lock key to current txn err := txn.Set(lockKey, []byte(txn.String())) return errors.Trace(err) }
// ScanMetaWithPrefix scans metadata with the prefix. func ScanMetaWithPrefix(txn kv.Transaction, prefix string, filter func([]byte, []byte) bool) error { iter, err := txn.Seek([]byte(prefix)) if err != nil { return errors.Trace(err) } defer iter.Close() for { if err != nil { return errors.Trace(err) } if iter.Valid() && strings.HasPrefix(iter.Key(), prefix) { if !filter([]byte(iter.Key()), iter.Value()) { break } err = iter.Next() if err != nil { return errors.Trace(err) } } else { break } } return nil }
func mustNotGet(c *C, txn kv.Transaction) { for i := startIndex; i < testCount; i++ { s := encodeInt(i * indexStep) _, err := txn.Get(s) c.Assert(err, NotNil) } }
func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo *model.IndexInfo) ( kv.Key, []types.Datum, error) { // fetch datas cols := t.Cols() colMap := make(map[int64]*types.FieldType) for _, v := range indexInfo.Columns { col := cols[v.Offset] colMap[col.ID] = &col.FieldType } rowKey := tablecodec.EncodeRecordKey(t.RecordPrefix(), handle) rowVal, err := txn.Get(rowKey) if err != nil { return nil, nil, errors.Trace(err) } row, err := tablecodec.DecodeRow(rowVal, colMap) if err != nil { return nil, nil, errors.Trace(err) } vals := make([]types.Datum, 0, len(indexInfo.Columns)) for _, v := range indexInfo.Columns { col := cols[v.Offset] vals = append(vals, row[col.ID]) } return rowKey, vals, nil }
func insertData(c *C, txn kv.Transaction) { for i := startIndex; i < testCount; i++ { val := encodeInt(i * indexStep) err := txn.Set(val, val) c.Assert(err, IsNil) } }
// Next implements plan.Plan Next interface. func (r *TableNilPlan) Next(ctx context.Context) (row *plan.Row, err error) { if r.iter == nil { var txn kv.Transaction txn, err = ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } r.iter, err = txn.Seek([]byte(r.T.FirstKey()), nil) if err != nil { return nil, errors.Trace(err) } } if !r.iter.Valid() || !strings.HasPrefix(r.iter.Key(), r.T.KeyPrefix()) { return } id, err := util.DecodeHandleFromRowKey(r.iter.Key()) if err != nil { return nil, errors.Trace(err) } rk := r.T.RecordKey(id, nil) // Even though the data is nil, we should return not nil row, // or the iteration will stop. row = &plan.Row{} r.iter, err = kv.NextUntil(r.iter, util.RowKeyPrefixFilter(rk)) return }
func setRow(txn kv.Transaction, handle int64, tbl *simpleTableInfo, gen genValueFunc) error { rowKey := tablecodec.EncodeRowKey(tbl.tID, codec.EncodeInt(nil, handle)) columnValues := gen(handle, tbl) value, err := tablecodec.EncodeRow(columnValues, tbl.cIDs) if err != nil { return errors.Trace(err) } err = txn.Set(rowKey, value) if err != nil { return errors.Trace(err) } for i, idxCol := range tbl.indices { idxVal := columnValues[idxCol] encoded, err := codec.EncodeKey(nil, idxVal, types.NewDatum(handle)) if err != nil { return errors.Trace(err) } idxKey := tablecodec.EncodeIndexSeekKey(tbl.tID, tbl.iIDs[i], encoded) err = txn.Set(idxKey, []byte{0}) if err != nil { return errors.Trace(err) } } return nil }
func mustDel(c *C, txn kv.Transaction) { for i := startIndex; i < testCount; i++ { val := encodeInt(i * indexStep) err := txn.Delete(val) c.Assert(err, IsNil) } }
// SetDDLBinlog sets DDL binlog in the kv.Transaction. func SetDDLBinlog(txn kv.Transaction, jobID int64, ddlQuery string) { bin := &binlog.Binlog{ Tp: binlog.BinlogType_Prewrite, DdlJobId: jobID, DdlQuery: []byte(ddlQuery), } txn.SetOption(kv.BinlogData, bin) }
func setColValue(c *C, txn kv.Transaction, key kv.Key, v types.Datum) { row := []types.Datum{v, {}} colIDs := []int64{2, 3} value, err := tablecodec.EncodeRow(row, colIDs) c.Assert(err, IsNil) err = txn.Set(key, value) c.Assert(err, IsNil) }
func mustGet(c *C, txn kv.Transaction) { for i := startIndex; i < testCount; i++ { s := encodeInt(i * indexStep) val, err := txn.Get(s) c.Assert(err, IsNil) c.Assert(string(val), Equals, string(s)) } }
// SetColValue implements table.Table SetColValue interface. func (t *Table) SetColValue(txn kv.Transaction, key []byte, data interface{}) error { v, err := t.EncodeValue(data) if err != nil { return errors.Trace(err) } if err := txn.Set(key, v); err != nil { return errors.Trace(err) } return nil }
func (e *XSelectIndexExec) doIndexRequest(txn kv.Transaction) (*xapi.SelectResult, error) { selIdxReq := new(tipb.SelectRequest) startTs := txn.StartTS() selIdxReq.StartTs = &startTs selIdxReq.IndexInfo = tablecodec.IndexToProto(e.table.Meta(), e.indexPlan.Index) var err error selIdxReq.Ranges, err = indexRangesToPBRanges(e.indexPlan.Ranges) if err != nil { return nil, errors.Trace(err) } return xapi.Select(txn.GetClient(), selIdxReq, 1) }
// Next implements plan.Plan Next interface. func (r *TableDefaultPlan) Next(ctx context.Context) (row *plan.Row, err error) { if r.rangeScan { return r.rangeNext(ctx) } if r.iter == nil { var txn kv.Transaction txn, err = ctx.GetTxn(false) if err != nil { return nil, errors.Trace(err) } r.iter, err = txn.Seek(r.T.FirstKey()) if err != nil { return nil, errors.Trace(err) } } if !r.iter.Valid() || !r.iter.Key().HasPrefix(r.T.RecordPrefix()) { return } // TODO: check if lock valid // the record layout in storage (key -> value): // r1 -> lock-version // r1_col1 -> r1 col1 value // r1_col2 -> r1 col2 value // r2 -> lock-version // r2_col1 -> r2 col1 value // r2_col2 -> r2 col2 value // ... rowKey := r.iter.Key() handle, err := tables.DecodeRecordKeyHandle(rowKey) if err != nil { return nil, errors.Trace(err) } // TODO: we could just fetch mentioned columns' values row = &plan.Row{} row.Data, err = r.T.Row(ctx, handle) if err != nil { return nil, errors.Trace(err) } // Put rowKey to the tail of record row rke := &plan.RowKeyEntry{ Tbl: r.T, Key: string(rowKey), } row.RowKeys = append(row.RowKeys, rke) rk := r.T.RecordKey(handle, nil) err = kv.NextUntil(r.iter, util.RowKeyPrefixFilter(rk)) if err != nil { return nil, errors.Trace(err) } return }
func checkRowExist(txn kv.Transaction, t table.Table, handle int64) (bool, error) { _, err := txn.Get(t.RecordKey(handle, nil)) if terror.ErrorEqual(err, kv.ErrNotExist) { // If row doesn't exist, we may have deleted the row already, // no need to add index again. return false, nil } else if err != nil { return false, errors.Trace(err) } return true, nil }
func (do *Domain) loadInfoSchema(txn kv.Transaction) (err error) { defer func() { if err != nil { do.SchemaValidity.setLastFailedTS(txn.StartTS()) } }() m := meta.NewMeta(txn) schemaMetaVersion, err := m.GetSchemaVersion() if err != nil { return errors.Trace(err) } info := do.infoHandle.Get() if info != nil && schemaMetaVersion <= info.SchemaMetaVersion() { // info may be changed by other txn, so here its version may be bigger than schema version, // so we don't need to reload. log.Debugf("[ddl] schema version is still %d, no need reload", schemaMetaVersion) return nil } schemas, err := m.ListDatabases() if err != nil { return errors.Trace(err) } for _, di := range schemas { if di.State != model.StatePublic { // schema is not public, can't be used outside. continue } tables, err1 := m.ListTables(di.ID) if err1 != nil { err = err1 return errors.Trace(err1) } di.Tables = make([]*model.TableInfo, 0, len(tables)) for _, tbl := range tables { if tbl.State != model.StatePublic { // schema is not public, can't be used outsiee. continue } di.Tables = append(di.Tables, tbl) } } log.Infof("[ddl] loadInfoSchema %d", schemaMetaVersion) err = do.infoHandle.Set(schemas, schemaMetaVersion) return errors.Trace(err) }
func (d *ddl) fetchRowColVals(txn kv.Transaction, t table.Table, handles []int64, indexInfo *model.IndexInfo) ( []*indexRecord, error) { // Through handles access to get all row keys. handlesLen := len(handles) rowKeys := make([]kv.Key, 0, handlesLen) for _, h := range handles { rowKey := tablecodec.EncodeRecordKey(t.RecordPrefix(), h) rowKeys = append(rowKeys, rowKey) } // Get corresponding raw values for rowKeys. ver := kv.Version{Ver: txn.StartTS()} snap, err := d.store.GetSnapshot(ver) if err != nil { return nil, errors.Trace(err) } pairMap, err := snap.BatchGet(rowKeys) if err != nil { return nil, errors.Trace(err) } // Get corresponding values for pairMap. cols := t.Cols() colMap := make(map[int64]*types.FieldType) for _, v := range indexInfo.Columns { col := cols[v.Offset] colMap[col.ID] = &col.FieldType } idxRecords := make([]*indexRecord, 0, handlesLen) for i, rowKey := range rowKeys { rawVal, ok := pairMap[string(rowKey)] if !ok { // Row doesn't exist, skip it. continue } row, err := tablecodec.DecodeRow(rawVal, colMap) if err != nil { return nil, errors.Trace(err) } rowVal := make([]types.Datum, 0, len(indexInfo.Columns)) for _, v := range indexInfo.Columns { col := cols[v.Offset] rowVal = append(rowVal, row[col.ID]) } idxRecord := &indexRecord{handle: handles[i], key: rowKey, vals: rowVal} idxRecords = append(idxRecords, idxRecord) } return idxRecords, nil }
func (d *ddl) writeSchemaInfo(info *model.DBInfo, txn kv.Transaction) error { var b []byte b, err := json.Marshal(info) if err != nil { return errors.Trace(err) } key := []byte(meta.DBMetaKey(info.ID)) if err := txn.LockKeys(key); err != nil { return errors.Trace(err) } txn.Set(key, b) log.Warn("save schema", string(b)) return errors.Trace(err) }
// GenID adds step to the value for key and returns the sum. func GenID(txn kv.Transaction, key []byte, step int) (int64, error) { if len(key) == 0 { return 0, errors.New("Invalid key") } err := txn.LockKeys(key) if err != nil { return 0, err } id, err := txn.Inc(key, int64(step)) if err != nil { return 0, errors.Trace(err) } return id, errors.Trace(err) }
// backfillColumnInTxn deals with a part of backfilling column data in a Transaction. // This part of the column data rows is defaultSmallBatchCnt. func (d *ddl) backfillColumnInTxn(t table.Table, colID int64, handles []int64, colMap map[int64]*types.FieldType, defaultVal types.Datum, txn kv.Transaction) (int64, error) { nextHandle := handles[0] for _, handle := range handles { log.Debug("[ddl] backfill column...", handle) rowKey := t.RecordKey(handle) rowVal, err := txn.Get(rowKey) if terror.ErrorEqual(err, kv.ErrNotExist) { // If row doesn't exist, skip it. continue } if err != nil { return 0, errors.Trace(err) } rowColumns, err := tablecodec.DecodeRow(rowVal, colMap) if err != nil { return 0, errors.Trace(err) } if _, ok := rowColumns[colID]; ok { // The column is already added by update or insert statement, skip it. continue } newColumnIDs := make([]int64, 0, len(rowColumns)+1) newRow := make([]types.Datum, 0, len(rowColumns)+1) for colID, val := range rowColumns { newColumnIDs = append(newColumnIDs, colID) newRow = append(newRow, val) } newColumnIDs = append(newColumnIDs, colID) newRow = append(newRow, defaultVal) newRowVal, err := tablecodec.EncodeRow(newRow, newColumnIDs) if err != nil { return 0, errors.Trace(err) } err = txn.Set(rowKey, newRowVal) if err != nil { return 0, errors.Trace(err) } } return nextHandle, nil }
func (e *XSelectIndexExec) doTableRequest(txn kv.Transaction, handles []int64) (*xapi.SelectResult, error) { selTableReq := new(tipb.SelectRequest) startTs := txn.StartTS() selTableReq.StartTs = &startTs selTableReq.TableInfo = tablecodec.TableToProto(e.indexPlan.Table) selTableReq.Fields = resultFieldsToPBExpression(e.indexPlan.Fields()) for _, h := range handles { if h == math.MaxInt64 { // We can't convert MaxInt64 into an left closed, right open range. continue } pbRange := new(tipb.KeyRange) pbRange.Low = codec.EncodeInt(nil, h) pbRange.High = codec.EncodeInt(nil, h) selTableReq.Ranges = append(selTableReq.Ranges, pbRange) } selTableReq.Where = conditionsToPBExpression(e.indexPlan.FilterConditions...) return xapi.Select(txn.GetClient(), selTableReq, 10) }
func (d *ddl) isReorgRunnable(txn kv.Transaction, flag JobType) error { if d.isClosed() { // worker is closed, can't run reorganization. return errors.Trace(errInvalidWorker.Gen("worker is closed")) } t := meta.NewMeta(txn) owner, err := d.getJobOwner(t, flag) if err != nil { return errors.Trace(err) } if owner == nil || owner.OwnerID != d.uuid { // if no owner, we will try later, so here just return error. // or another server is owner, return error too. log.Infof("[ddl] %s job, self id %s owner %s, txnTS:%d", flag, d.uuid, owner, txn.StartTS()) return errors.Trace(errNotOwner) } return nil }
func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo *model.IndexInfo) ([]types.Datum, error) { // fetch datas cols := t.Cols() vals := make([]types.Datum, 0, len(indexInfo.Columns)) for _, v := range indexInfo.Columns { col := cols[v.Offset] k := t.RecordKey(handle, col) data, err := txn.Get(k) if err != nil { return nil, errors.Trace(err) } val, err := tables.DecodeValue(data, &col.FieldType) if err != nil { return nil, errors.Trace(err) } vals = append(vals, val) } return vals, nil }
func checkSeek(c *C, txn kv.Transaction) { for i := startIndex; i < testCount; i++ { val := encodeInt(i) iter, err := txn.Seek(val, nil) c.Assert(err, IsNil) c.Assert(iter.Key(), Equals, string(val)) c.Assert(decodeInt([]byte(valToStr(c, iter))), Equals, i) iter.Close() } // Test iterator Next() for i := startIndex; i < testCount-1; i++ { val := encodeInt(i) iter, err := txn.Seek(val, nil) c.Assert(err, IsNil) c.Assert(iter.Key(), Equals, string(val)) c.Assert(valToStr(c, iter), Equals, string(val)) next, err := iter.Next(nil) c.Assert(err, IsNil) c.Assert(next.Valid(), IsTrue) val = encodeInt(i + 1) c.Assert(next.Key(), Equals, string(val)) c.Assert(valToStr(c, next), Equals, string(val)) iter.Close() } // Non exist seek test iter, err := txn.Seek(encodeInt(testCount), nil) c.Assert(err, IsNil) c.Assert(iter.Valid(), IsFalse) iter.Close() }
func (do *Domain) loadInfoSchema(txn kv.Transaction) (err error) { var schemas []*model.DBInfo err = util.ScanMetaWithPrefix(txn, meta.SchemaMetaPrefix, func(key []byte, value []byte) bool { di := &model.DBInfo{} err := json.Unmarshal(value, di) if err != nil { log.Fatal(err) } schemas = append(schemas, di) return true }) if err != nil { return errors.Trace(err) } schemaMetaVersion, err := txn.GetInt64(meta.SchemaMetaVersionKey) if err != nil { return } log.Info("loadInfoSchema %d", schemaMetaVersion) do.infoHandle.Set(schemas, schemaMetaVersion) return }
func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo *model.IndexInfo) ([]interface{}, error) { // fetch datas cols := t.Cols() var vals []interface{} for _, v := range indexInfo.Columns { var val interface{} col := cols[v.Offset] k := t.RecordKey(handle, col) data, err := txn.Get(k) if err != nil { return nil, errors.Trace(err) } val, err = t.DecodeValue(data, col) if err != nil { return nil, errors.Trace(err) } vals = append(vals, val) } return vals, nil }
func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo *model.IndexInfo) ([]types.Datum, error) { // fetch datas cols := t.Cols() vals := make([]types.Datum, 0, len(indexInfo.Columns)) for _, v := range indexInfo.Columns { col := cols[v.Offset] k := t.RecordKey(handle, col) data, err := txn.Get(k) if err != nil { if terror.ErrorEqual(err, kv.ErrNotExist) && !mysql.HasNotNullFlag(col.Flag) { vals = append(vals, types.Datum{}) continue } return nil, errors.Trace(err) } val, err := tables.DecodeValue(data, &col.FieldType) if err != nil { return nil, errors.Trace(err) } vals = append(vals, val) } return vals, nil }