func (s *testKVSuite) TestBoltDBDeadlock(c *C) { d := Driver{ boltdb.Driver{}, } path := "boltdb_test" defer os.Remove(path) store, err := d.Open(path) c.Assert(err, IsNil) defer store.Close() kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { txn.Set([]byte("a"), []byte("0")) txn.Inc([]byte("a"), 1) kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { txn.Set([]byte("b"), []byte("0")) txn.Inc([]byte("b"), 1) return nil }) return nil }) kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { n, err := txn.GetInt64([]byte("a")) c.Assert(err, IsNil) c.Assert(n, Equals, int64(1)) n, err = txn.GetInt64([]byte("b")) c.Assert(err, IsNil) c.Assert(n, Equals, int64(1)) return nil }) }
func (s *testDBSuite) TestUpdateMultipleTable(c *C) { defer testleak.AfterTest(c) store, err := tidb.NewStore("memory://update_multiple_table") c.Assert(err, IsNil) tk := testkit.NewTestKit(c, store) tk.MustExec("use test") tk.MustExec("create table t1 (c1 int, c2 int)") tk.MustExec("insert t1 values (1, 1), (2, 2)") tk.MustExec("create table t2 (c1 int, c2 int)") tk.MustExec("insert t2 values (1, 3), (2, 5)") ctx := tk.Se.(context.Context) domain := sessionctx.GetDomain(ctx) is := domain.InfoSchema() db, ok := is.SchemaByName(model.NewCIStr("test")) c.Assert(ok, IsTrue) t1Tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) c.Assert(err, IsNil) t1Info := t1Tbl.Meta() // Add a new column in write only state. newColumn := &model.ColumnInfo{ ID: 100, Name: model.NewCIStr("c3"), Offset: 2, DefaultValue: 9, FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StateWriteOnly, } t1Info.Columns = append(t1Info.Columns, newColumn) kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { m := meta.NewMeta(txn) _, err = m.GenSchemaVersion() c.Assert(err, IsNil) c.Assert(m.UpdateTable(db.ID, t1Info), IsNil) return nil }) err = domain.Reload() c.Assert(err, IsNil) tk.MustExec("update t1, t2 set t1.c1 = 8, t2.c2 = 10 where t1.c2 = t2.c1") tk.MustQuery("select * from t1").Check(testkit.Rows("8 1", "8 2")) tk.MustQuery("select * from t2").Check(testkit.Rows("1 10", "2 10")) newColumn.State = model.StatePublic kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { m := meta.NewMeta(txn) _, err = m.GenSchemaVersion() c.Assert(err, IsNil) c.Assert(m.UpdateTable(db.ID, t1Info), IsNil) return nil }) err = domain.Reload() c.Assert(err, IsNil) tk.MustQuery("select * from t1").Check(testkit.Rows("8 1 9", "8 2 9")) }
func (d *ddl) dropTableIndex(t table.Table, indexInfo *model.IndexInfo) error { prefix := kv.GenIndexPrefix(t.IndexPrefix(), indexInfo.Name.L) prefixBytes := []byte(prefix) keys := make([]string, maxBatchSize) for { keys := keys[0:0] err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { iter, err := txn.Seek(prefixBytes) if err != nil { return errors.Trace(err) } defer iter.Close() for i := 0; i < maxBatchSize; i++ { if iter.Valid() && strings.HasPrefix(iter.Key(), prefix) { keys = append(keys, iter.Key()) err = iter.Next() if err != nil { return errors.Trace(err) } } else { break } } return nil }) // if err or delete no keys, return. if err != nil || len(keys) == 0 { return errors.Trace(err) } // delete index key one by one for _, key := range keys { err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { if err := d.isReorgRunnable(txn); err != nil { return errors.Trace(err) } err1 := txn.Delete([]byte(key)) // if key doesn't exist, skip this error. if err1 != nil && !terror.ErrorEqual(err1, kv.ErrNotExist) { return errors.Trace(err1) } return nil }) if err != nil { return errors.Trace(err) } } } }
// Reload reloads InfoSchema. func (do *Domain) Reload() error { // for test if do.SchemaValidity.MockReloadFailed { err := kv.RunInNewTxn(do.store, false, func(txn kv.Transaction) error { do.SchemaValidity.setLastFailedTS(txn.StartTS()) return nil }) if err != nil { log.Errorf("mock reload failed err:%v", err) return errors.Trace(err) } return errors.New("mock reload failed") } // lock here for only once at same time. do.m.Lock() defer do.m.Unlock() timeout := do.ddl.GetLease() / 2 if timeout < defaultMinReloadTimeout { timeout = defaultMinReloadTimeout } exit := int32(0) done := make(chan error, 1) go func() { var err error for { err = kv.RunInNewTxn(do.store, false, do.loadInfoSchema) if err == nil { atomic.StoreInt64(&do.lastLeaseTS, time.Now().UnixNano()) break } log.Errorf("[ddl] load schema err %v, retry again", errors.ErrorStack(err)) if atomic.LoadInt32(&exit) == 1 { return } // TODO: use a backoff algorithm. time.Sleep(500 * time.Millisecond) continue } done <- err }() select { case err := <-done: return errors.Trace(err) case <-time.After(timeout): atomic.StoreInt32(&exit, 1) return ErrLoadSchemaTimeOut } }
func (t *testIsolationSuite) TestMultiInc(c *C) { store, err := tidb.NewStore("memory://test/test_isolation") c.Assert(err, IsNil) defer store.Close() threadCnt := 4 incCnt := 100 keyCnt := 4 keys := make([][]byte, 0, keyCnt) for i := 0; i < keyCnt; i++ { keys = append(keys, []byte(fmt.Sprintf("test_key_%d", i))) } var wg sync.WaitGroup wg.Add(threadCnt) for i := 0; i < threadCnt; i++ { go func() { defer wg.Done() for j := 0; j < incCnt; j++ { err1 := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { for _, key := range keys { _, err2 := kv.IncInt64(txn, key, 1) if err2 != nil { return err2 } } return nil }) c.Assert(err1, IsNil) } }() } wg.Wait() for i := 0; i < keyCnt; i++ { err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { for _, key := range keys { id, err1 := kv.GetInt64(txn, key) if err1 != nil { return err1 } c.Assert(id, Equals, int64(threadCnt*incCnt)) } return nil }) c.Assert(err, IsNil) } }
func (s *testKVSuite) TestIsolationMultiInc(c *C) { defer testleak.AfterTest(c)() threadCnt := 4 incCnt := 100 keyCnt := 4 keys := make([][]byte, 0, keyCnt) for i := 0; i < keyCnt; i++ { keys = append(keys, []byte(fmt.Sprintf("test_key_%d", i))) } var wg sync.WaitGroup wg.Add(threadCnt) for i := 0; i < threadCnt; i++ { go func() { defer wg.Done() for j := 0; j < incCnt; j++ { err := kv.RunInNewTxn(s.s, true, func(txn kv.Transaction) error { for _, key := range keys { _, err1 := kv.IncInt64(txn, key, 1) if err1 != nil { return err1 } } return nil }) c.Assert(err, IsNil) } }() } wg.Wait() err := kv.RunInNewTxn(s.s, false, func(txn kv.Transaction) error { for _, key := range keys { id, err1 := kv.GetInt64(txn, key) if err1 != nil { return err1 } c.Assert(id, Equals, int64(threadCnt*incCnt)) txn.Delete(key) } return nil }) c.Assert(err, IsNil) }
func (*testSuite) TestT(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) c.Assert(err, IsNil) err = m.CreateTable(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) c.Assert(err, IsNil) return nil }) c.Assert(err, IsNil) alloc := autoid.NewAllocator(store, 1) c.Assert(alloc, NotNil) id, err := alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(1)) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(2)) id, err = alloc.Alloc(0) c.Assert(err, NotNil) }
// Alloc allocs the next autoID for table with tableID. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. func (alloc *allocator) Alloc(tableID int64) (int64, error) { if tableID == 0 { return 0, errors.New("Invalid tableID") } metaKey := meta.AutoIDKey(tableID) alloc.mu.Lock() defer alloc.mu.Unlock() if alloc.base == alloc.end { // step err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { end, err := meta.GenID(txn, []byte(metaKey), step) if err != nil { return errors.Trace(err) } alloc.end = end alloc.base = alloc.end - step return nil }) if err != nil { return 0, errors.Trace(err) } } alloc.base++ log.Infof("Alloc id %d, table ID:%d, from %p, store ID:%s", alloc.base, tableID, alloc, alloc.store.UUID()) return alloc.base, nil }
func (s *testDDLSuite) TestDropSchemaError(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_drop_schema") defer store.Close() lease := 50 * time.Millisecond d := newDDL(store, nil, nil, lease) defer d.close() job := &model.Job{ SchemaID: 1, Type: model.ActionDropSchema, Args: []interface{}{&model.DBInfo{ Name: model.CIStr{O: "test"}, }}, } err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return d.prepareBgJob(t, job) }) c.Check(err, IsNil) d.startBgJob(job.Type) time.Sleep(lease) verifyBgJobState(c, d, job, model.JobDone) }
func (d *ddl) dropTableColumn(t table.Table, colInfo *model.ColumnInfo, reorgInfo *reorgInfo) error { version := reorgInfo.SnapshotVer seekHandle := reorgInfo.Handle col := &column.Col{ColumnInfo: *colInfo} for { handles, err := d.getSnapshotRows(t, version, seekHandle) if err != nil { return errors.Trace(err) } else if len(handles) == 0 { return nil } seekHandle = handles[len(handles)-1] + 1 err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { if err1 := d.isReorgRunnable(txn); err1 != nil { return errors.Trace(err1) } var h int64 for _, h = range handles { key := t.RecordKey(h, col) err1 := txn.Delete(key) if err1 != nil && !terror.ErrorEqual(err1, kv.ErrNotExist) { return errors.Trace(err1) } } return errors.Trace(reorgInfo.UpdateHandle(txn, h)) }) if err != nil { return errors.Trace(err) } } }
func (d *ddl) CreateSchema(ctx context.Context, schema model.CIStr) (err error) { is := d.GetInformationSchema() _, ok := is.SchemaByName(schema) if ok { return errors.Trace(ErrExists) } info := &model.DBInfo{Name: schema} info.ID, err = d.genGlobalID() if err != nil { return errors.Trace(err) } err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := d.verifySchemaMetaVersion(t, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } err = t.CreateDatabase(info) log.Warnf("save schema %s", info) return errors.Trace(err) }) if d.onDDLChange != nil { err = d.onDDLChange(err) } return errors.Trace(err) }
func isBoostrapped(store kv.Storage) bool { // check in memory _, ok := storeBootstrapped[store.UUID()] if ok { return true } // check in kv store err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ok, err = t.IsBootstrapped() return errors.Trace(err) }) if err != nil { log.Fatalf("check bootstrapped err %v", err) } if ok { // here mean memory is not ok, but other server has already finished it storeBootstrapped[store.UUID()] = true } return ok }
// Alloc allocs the next autoID for table with tableID. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. func (alloc *allocator) Alloc(tableID int64) (int64, error) { if tableID == 0 { return 0, errors.New("Invalid tableID") } alloc.mu.Lock() defer alloc.mu.Unlock() if alloc.base == alloc.end { // step err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { m := meta.NewMeta(txn) // err1 is used for passing `go tool vet --shadow` check. end, err1 := m.GenAutoTableID(alloc.dbID, tableID, step) if err1 != nil { return errors.Trace(err1) } alloc.end = end alloc.base = alloc.end - step return nil }) if err != nil { return 0, errors.Trace(err) } } alloc.base++ log.Infof("Alloc id %d, table ID:%d, from %p, database ID:%d", alloc.base, tableID, alloc, alloc.dbID) return alloc.base, nil }
func (s *testDDLSuite) TestDropTableError(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_drop_table") defer store.Close() d := newDDL(store, nil, nil, testLease) defer d.close() dbInfo := testSchemaInfo(c, d, "test") testCreateSchema(c, mock.NewContext(), d, dbInfo) job := &model.Job{ SchemaID: dbInfo.ID, Type: model.ActionDropTable, Args: []interface{}{&model.TableInfo{ ID: 1, Name: model.CIStr{O: "t"}, }}, } err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return d.prepareBgJob(t, job) }) c.Check(err, IsNil) d.startBgJob(job.Type) time.Sleep(testLease * 3) verifyBgJobState(c, d, job, model.JobDone) }
func (t *testIsolationSuite) TestInc(c *C) { store, err := tidb.NewStore("memory://test/test_isolation") c.Assert(err, IsNil) defer store.Close() threadCnt := 4 ids := make(map[int64]struct{}, threadCnt*100) var m sync.Mutex var wg sync.WaitGroup wg.Add(threadCnt) for i := 0; i < threadCnt; i++ { go func() { defer wg.Done() for j := 0; j < 100; j++ { var id int64 err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { var err1 error id, err1 = kv.IncInt64(txn, []byte("key"), 1) return err1 }) c.Assert(err, IsNil) m.Lock() _, ok := ids[id] ids[id] = struct{}{} m.Unlock() c.Assert(ok, IsFalse) } }() } wg.Wait() }
func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DBInfo, state model.SchemaState) { isDropped := true for { kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) info, err := t.GetDatabase(dbInfo.ID) c.Assert(err, IsNil) if state == model.StateNone { isDropped = checkDrop(c, t) if !isDropped { return nil } c.Assert(info, IsNil) return nil } c.Assert(info.Name, DeepEquals, dbInfo.Name) c.Assert(info.State, Equals, state) return nil }) if isDropped { break } } }
func (d *ddl) CreateSchema(ctx context.Context, schema model.CIStr) (err error) { is := d.GetInformationSchema() _, ok := is.SchemaByName(schema) if ok { return errors.Trace(ErrExists) } info := &model.DBInfo{Name: schema} info.ID, err = meta.GenGlobalID(d.store) if err != nil { return errors.Trace(err) } err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { err := d.verifySchemaMetaVersion(txn, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } err = d.writeSchemaInfo(info, txn) return errors.Trace(err) }) if d.onDDLChange != nil { err = d.onDDLChange(err) } return errors.Trace(err) }
func getStoreBootstrapVersion(store kv.Storage) int64 { // check in memory _, ok := storeBootstrapped[store.UUID()] if ok { return currentBootstrapVersion } var ver int64 // check in kv store err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ver, err = t.GetBootstrapVersion() return errors.Trace(err) }) if err != nil { log.Fatalf("check bootstrapped err %v", err) } if ver > notBootstrapped { // here mean memory is not ok, but other server has already finished it storeBootstrapped[store.UUID()] = true } return ver }
// DropTable will proceed even if some table in the list does not exists. func (d *ddl) DropTable(ctx context.Context, ti table.Ident) (err error) { is := d.GetInformationSchema() schema, ok := is.SchemaByName(ti.Schema) if !ok { return errors.Trace(qerror.ErrDatabaseNotExist) } tb, err := is.TableByName(ti.Schema, ti.Name) if err != nil { return errors.Trace(err) } err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := d.verifySchemaMetaVersion(t, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } err = t.DropTable(schema.ID, tb.Meta().ID) return errors.Trace(err) }) if d.onDDLChange != nil { err = d.onDDLChange(err) if err != nil { return errors.Trace(err) } } err = d.deleteTableData(ctx, tb) return errors.Trace(err) }
func (d *ddl) startJob(ctx context.Context, job *model.Job) error { // for every DDL, we must commit current transaction. if err := ctx.FinishTxn(false); err != nil { return errors.Trace(err) } // Create a new job and queue it. err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) var err error job.ID, err = t.GenGlobalID() if err != nil { return errors.Trace(err) } err = t.EnQueueDDLJob(job) return errors.Trace(err) }) if err != nil { return errors.Trace(err) } // notice worker that we push a new job and wait the job done. asyncNotify(d.jobCh) log.Warnf("start DDL job %v", job) jobID := job.ID var historyJob *model.Job // for a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public // for every state change, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease. ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second)) defer ticker.Stop() for { select { case <-d.jobDoneCh: case <-ticker.C: } historyJob, err = d.getHistoryJob(jobID) if err != nil { log.Errorf("get history job err %v, check again", err) continue } else if historyJob == nil { log.Warnf("job %d is not in history, maybe not run", jobID) continue } // if a job is a history table, the state must be JobDone or JobCancel. if historyJob.State == model.JobDone { return nil } return errors.Errorf(historyJob.Error) } }
func (d *ddl) checkOwner() error { err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return errors.Trace(d.verifyOwner(t)) }) return errors.Trace(err) }
func (d *ddl) backfillColumnData(t table.Table, columnInfo *model.ColumnInfo, handles []int64, reorgInfo *reorgInfo) error { defaultVal, _, err := table.GetColDefaultValue(nil, columnInfo) if err != nil { return errors.Trace(err) } colMap := make(map[int64]*types.FieldType) for _, col := range t.Meta().Columns { colMap[col.ID] = &col.FieldType } for _, handle := range handles { log.Info("[ddl] backfill column...", handle) err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { if err := d.isReorgRunnable(txn); err != nil { return errors.Trace(err) } rowKey := t.RecordKey(handle) rowVal, err := txn.Get(rowKey) if terror.ErrorEqual(err, kv.ErrNotExist) { // If row doesn't exist, skip it. return nil } if err != nil { return errors.Trace(err) } rowColumns, err := tablecodec.DecodeRow(rowVal, colMap) if err != nil { return errors.Trace(err) } if _, ok := rowColumns[columnInfo.ID]; ok { // The column is already added by update or insert statement, skip it. return nil } newColumnIDs := make([]int64, 0, len(rowColumns)+1) newRow := make([]types.Datum, 0, len(rowColumns)+1) for colID, val := range rowColumns { newColumnIDs = append(newColumnIDs, colID) newRow = append(newRow, val) } newColumnIDs = append(newColumnIDs, columnInfo.ID) newRow = append(newRow, defaultVal) newRowVal, err := tablecodec.EncodeRow(newRow, newColumnIDs) if err != nil { return errors.Trace(err) } err = txn.Set(rowKey, newRowVal) if err != nil { return errors.Trace(err) } return errors.Trace(reorgInfo.UpdateHandle(txn, handle)) }) if err != nil { return errors.Trace(err) } } return nil }
func (d *ddl) backfillColumnData(t table.Table, columnInfo *model.ColumnInfo, handles []int64, reorgInfo *reorgInfo) error { for _, handle := range handles { log.Info("[ddl] backfill column...", handle) err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { if err := d.isReorgRunnable(txn); err != nil { return errors.Trace(err) } // First check if row exists. exist, err := checkRowExist(txn, t, handle) if err != nil { return errors.Trace(err) } else if !exist { // If row doesn't exist, skip it. return nil } backfillKey := t.RecordKey(handle, &column.Col{ColumnInfo: *columnInfo}) backfillValue, err := txn.Get(backfillKey) if err != nil && !kv.IsErrNotFound(err) { return errors.Trace(err) } if backfillValue != nil { return nil } value, _, err := tables.GetColDefaultValue(nil, columnInfo) if err != nil { return errors.Trace(err) } // must convert to the column field type. v, err := types.Convert(value, &columnInfo.FieldType) if err != nil { return errors.Trace(err) } err = lockRow(txn, t, handle) if err != nil { return errors.Trace(err) } err = t.SetColValue(txn, backfillKey, v) if err != nil { return errors.Trace(err) } return errors.Trace(reorgInfo.UpdateHandle(txn, handle)) }) if err != nil { return errors.Trace(err) } } return nil }
func testCheckJobCancelled(c *C, d *ddl, job *model.Job) { kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) historyJob, err := t.GetHistoryDDLJob(job.ID) c.Assert(err, IsNil) c.Assert(historyJob.State, Equals, model.JobCancelled) return nil }) }
func genGlobalID(store kv.Storage) (int64, error) { var globalID int64 err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { var err error globalID, err = meta.NewMeta(txn).GenGlobalID() return errors.Trace(err) }) return globalID, errors.Trace(err) }
func (do *Domain) reload() error { err := kv.RunInNewTxn(do.store, false, do.loadInfoSchema) if err != nil { return errors.Trace(err) } atomic.StoreInt64(&do.lastLeaseTS, time.Now().UnixNano()) return nil }
func (d *ddl) backfillTableIndex(t table.Table, indexInfo *model.IndexInfo, handles []int64, reorgInfo *reorgInfo) error { kvX := kv.NewKVIndex(t.IndexPrefix(), indexInfo.Name.L, indexInfo.ID, indexInfo.Unique) for _, handle := range handles { log.Debug("[ddl] building index...", handle) err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { if err := d.isReorgRunnable(txn); err != nil { return errors.Trace(err) } // first check row exists exist, err := checkRowExist(txn, t, handle) if err != nil { return errors.Trace(err) } else if !exist { // row doesn't exist, skip it. return nil } var vals []interface{} vals, err = fetchRowColVals(txn, t, handle, indexInfo) if err != nil { return errors.Trace(err) } exist, _, err = kvX.Exist(txn, vals, handle) if err != nil { return errors.Trace(err) } else if exist { // index already exists, skip it. return nil } err = lockRow(txn, t, handle) if err != nil { return errors.Trace(err) } // create the index. err = kvX.Create(txn, vals, handle) if err != nil { return errors.Trace(err) } // update reorg next handle return errors.Trace(reorgInfo.UpdateHandle(txn, handle)) }) if err != nil { return errors.Trace(err) } } return nil }
func verifyBgJobState(c *C, d *ddl, job *model.Job, state model.JobState) { kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) historyBgJob, err := t.GetHistoryBgJob(job.ID) c.Assert(err, IsNil) c.Assert(historyBgJob.State, Equals, state) return nil }) }
func (d *ddl) DropSchema(ctx context.Context, schema model.CIStr) (err error) { is := d.GetInformationSchema() old, ok := is.SchemaByName(schema) if !ok { return errors.Trace(ErrNotExists) } // Update InfoSchema oldInfo := is.Clone() var newInfo []*model.DBInfo for _, v := range oldInfo { if v.Name.L != schema.L { newInfo = append(newInfo, v) } } // Remove data. txn, err := ctx.GetTxn(true) if err != nil { return errors.Trace(err) } tables := is.SchemaTables(schema) for _, t := range tables { err = t.Truncate(ctx) if err != nil { return errors.Trace(err) } // Remove indices. for _, v := range t.Indices() { if v != nil && v.X != nil { if err = v.X.Drop(txn); err != nil { return errors.Trace(err) } } } } // Delete meta key. err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { err := d.verifySchemaMetaVersion(txn, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } key := []byte(meta.DBMetaKey(old.ID)) if err := txn.LockKeys(key); err != nil { return errors.Trace(err) } return txn.Delete(key) }) if d.onDDLChange != nil { err = d.onDDLChange(err) } return errors.Trace(err) }
func (s *testDDLSuite) TestReorgOwner(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_reorg_owner") defer store.Close() lease := 50 * time.Millisecond d1 := newDDL(store, nil, nil, lease) defer d1.close() ctx := testNewContext(c, d1) testCheckOwner(c, d1, true, ddlJobFlag) d2 := newDDL(store, nil, nil, lease) defer d2.close() dbInfo := testSchemaInfo(c, d1, "test") testCreateSchema(c, ctx, d1, dbInfo) tblInfo := testTableInfo(c, d1, "t", 3) testCreateTable(c, ctx, d1, dbInfo, tblInfo) t := testGetTable(c, d1, dbInfo.ID, tblInfo.ID) num := 10 for i := 0; i < num; i++ { _, err := t.AddRecord(ctx, types.MakeDatums(i, i, i)) c.Assert(err, IsNil) } err := ctx.CommitTxn() c.Assert(err, IsNil) tc := &testDDLCallback{} tc.onJobRunBefore = func(job *model.Job) { if job.SchemaState == model.StateDeleteReorganization { d1.close() } } d1.hook = tc testDropSchema(c, ctx, d1, dbInfo) err = kv.RunInNewTxn(d1.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) db, err1 := t.GetDatabase(dbInfo.ID) c.Assert(err1, IsNil) c.Assert(db, IsNil) return nil }) c.Assert(err, IsNil) }