func (s *testSuite) TestSnapshot(c *C) { defer testleak.AfterTest(c)() driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() txn, _ := store.Begin() m := meta.NewMeta(txn) m.GenGlobalID() n, _ := m.GetGlobalID() c.Assert(n, Equals, int64(1)) txn.Commit() ver1, _ := store.CurrentVersion() time.Sleep(time.Millisecond) txn, _ = store.Begin() m = meta.NewMeta(txn) m.GenGlobalID() n, _ = m.GetGlobalID() c.Assert(n, Equals, int64(2)) txn.Commit() snapshot, _ := store.GetSnapshot(ver1) snapMeta := meta.NewSnapshotMeta(snapshot) n, _ = snapMeta.GetGlobalID() c.Assert(n, Equals, int64(1)) _, err = snapMeta.GenGlobalID() c.Assert(err, NotNil) }
func (s *testDBSuite) TestUpdateMultipleTable(c *C) { defer testleak.AfterTest(c) store, err := tidb.NewStore("memory://update_multiple_table") c.Assert(err, IsNil) tk := testkit.NewTestKit(c, store) tk.MustExec("use test") tk.MustExec("create table t1 (c1 int, c2 int)") tk.MustExec("insert t1 values (1, 1), (2, 2)") tk.MustExec("create table t2 (c1 int, c2 int)") tk.MustExec("insert t2 values (1, 3), (2, 5)") ctx := tk.Se.(context.Context) domain := sessionctx.GetDomain(ctx) is := domain.InfoSchema() db, ok := is.SchemaByName(model.NewCIStr("test")) c.Assert(ok, IsTrue) t1Tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) c.Assert(err, IsNil) t1Info := t1Tbl.Meta() // Add a new column in write only state. newColumn := &model.ColumnInfo{ ID: 100, Name: model.NewCIStr("c3"), Offset: 2, DefaultValue: 9, FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StateWriteOnly, } t1Info.Columns = append(t1Info.Columns, newColumn) kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { m := meta.NewMeta(txn) _, err = m.GenSchemaVersion() c.Assert(err, IsNil) c.Assert(m.UpdateTable(db.ID, t1Info), IsNil) return nil }) err = domain.Reload() c.Assert(err, IsNil) tk.MustExec("update t1, t2 set t1.c1 = 8, t2.c2 = 10 where t1.c2 = t2.c1") tk.MustQuery("select * from t1").Check(testkit.Rows("8 1", "8 2")) tk.MustQuery("select * from t2").Check(testkit.Rows("1 10", "2 10")) newColumn.State = model.StatePublic kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { m := meta.NewMeta(txn) _, err = m.GenSchemaVersion() c.Assert(err, IsNil) c.Assert(m.UpdateTable(db.ID, t1Info), IsNil) return nil }) err = domain.Reload() c.Assert(err, IsNil) tk.MustQuery("select * from t1").Check(testkit.Rows("8 1 9", "8 2 9")) }
func isBoostrapped(store kv.Storage) bool { // check in memory _, ok := storeBootstrapped[store.UUID()] if ok { return true } // check in kv store err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ok, err = t.IsBootstrapped() return errors.Trace(err) }) if err != nil { log.Fatalf("check bootstrapped err %v", err) } if ok { // here mean memory is not ok, but other server has already finished it storeBootstrapped[store.UUID()] = true } return ok }
func checkHistoryJobArgs(c *C, ctx context.Context, id int64, args *historyJobArgs) { txn, err := ctx.GetTxn(true) c.Assert(err, IsNil) t := meta.NewMeta(txn) historyJob, err := t.GetHistoryDDLJob(id) c.Assert(err, IsNil) var v int64 var ids []int64 tbl := &model.TableInfo{} if args.tbl != nil { historyJob.DecodeArgs(&v, &tbl) c.Assert(v, Equals, args.ver) checkEqualTable(c, tbl, args.tbl) return } // only for create schema job db := &model.DBInfo{} if args.db != nil && len(args.tblIDs) == 0 { historyJob.DecodeArgs(&v, &db) c.Assert(v, Equals, args.ver) c.Assert(db, DeepEquals, args.db) return } // only for drop schema job historyJob.DecodeArgs(&v, &db, &ids) c.Assert(v, Equals, args.ver) c.Assert(db, DeepEquals, args.db) for _, id := range ids { c.Assert(args.tblIDs, HasKey, id) delete(args.tblIDs, id) } c.Assert(len(args.tblIDs), Equals, 0) }
func (do *Domain) loadInfoSchema(txn kv.Transaction) (err error) { m := meta.NewMeta(txn) schemaMetaVersion, err := m.GetSchemaVersion() if err != nil { return errors.Trace(err) } info := do.infoHandle.Get() if info != nil && schemaMetaVersion > 0 && schemaMetaVersion == info.SchemaMetaVersion() { log.Debugf("schema version is still %d, no need reload", schemaMetaVersion) return nil } schemas, err := m.ListDatabases() if err != nil { return errors.Trace(err) } for _, di := range schemas { tables, err := m.ListTables(di.ID) if err != nil { return errors.Trace(err) } di.Tables = tables } log.Infof("loadInfoSchema %d", schemaMetaVersion) do.infoHandle.Set(schemas, schemaMetaVersion) return }
func (d *ddl) CreateSchema(ctx context.Context, schema model.CIStr) (err error) { is := d.GetInformationSchema() _, ok := is.SchemaByName(schema) if ok { return errors.Trace(ErrExists) } info := &model.DBInfo{Name: schema} info.ID, err = d.genGlobalID() if err != nil { return errors.Trace(err) } err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := d.verifySchemaMetaVersion(t, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } err = t.CreateDatabase(info) log.Warnf("save schema %s", info) return errors.Trace(err) }) if d.onDDLChange != nil { err = d.onDDLChange(err) } return errors.Trace(err) }
func (s *testDDLSuite) TestDropTableError(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_drop_table") defer store.Close() d := newDDL(store, nil, nil, testLease) defer d.close() dbInfo := testSchemaInfo(c, d, "test") testCreateSchema(c, mock.NewContext(), d, dbInfo) job := &model.Job{ SchemaID: dbInfo.ID, Type: model.ActionDropTable, Args: []interface{}{&model.TableInfo{ ID: 1, Name: model.CIStr{O: "t"}, }}, } err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return d.prepareBgJob(t, job) }) c.Check(err, IsNil) d.startBgJob(job.Type) time.Sleep(testLease * 3) verifyBgJobState(c, d, job, model.JobDone) }
func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DBInfo, state model.SchemaState) { isDropped := true for { kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) info, err := t.GetDatabase(dbInfo.ID) c.Assert(err, IsNil) if state == model.StateNone { isDropped = checkDrop(c, t) if !isDropped { return nil } c.Assert(info, IsNil) return nil } c.Assert(info.Name, DeepEquals, dbInfo.Name) c.Assert(info.State, Equals, state) return nil }) if isDropped { break } } }
// Alloc allocs the next autoID for table with tableID. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. func (alloc *allocator) Alloc(tableID int64) (int64, error) { if tableID == 0 { return 0, errors.New("Invalid tableID") } alloc.mu.Lock() defer alloc.mu.Unlock() if alloc.base == alloc.end { // step err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { m := meta.NewMeta(txn) // err1 is used for passing `go tool vet --shadow` check. end, err1 := m.GenAutoTableID(alloc.dbID, tableID, step) if err1 != nil { return errors.Trace(err1) } alloc.end = end alloc.base = alloc.end - step return nil }) if err != nil { return 0, errors.Trace(err) } } alloc.base++ log.Infof("Alloc id %d, table ID:%d, from %p, database ID:%d", alloc.base, tableID, alloc, alloc.dbID) return alloc.base, nil }
func (*testSuite) TestT(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() m := meta.NewMeta(store) err = m.RunInNewTxn(false, func(m *meta.TMeta) error { err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) c.Assert(err, IsNil) err = m.CreateTable(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) c.Assert(err, IsNil) return nil }) c.Assert(err, IsNil) alloc := autoid.NewAllocator(m, 1) c.Assert(alloc, NotNil) id, err := alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(1)) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(2)) id, err = alloc.Alloc(0) c.Assert(err, NotNil) }
func (s *testSuite) TestGetDDLInfo(c *C) { defer testleak.AfterTest(c)() txn, err := s.store.Begin() c.Assert(err, IsNil) t := meta.NewMeta(txn) owner := &model.Owner{OwnerID: "owner"} err = t.SetDDLJobOwner(owner) c.Assert(err, IsNil) dbInfo2 := &model.DBInfo{ ID: 2, Name: model.NewCIStr("b"), State: model.StateNone, } job := &model.Job{ SchemaID: dbInfo2.ID, Type: model.ActionCreateSchema, } err = t.EnQueueDDLJob(job) c.Assert(err, IsNil) info, err := GetDDLInfo(txn) c.Assert(err, IsNil) c.Assert(info.Owner, DeepEquals, owner) c.Assert(info.Job, DeepEquals, job) c.Assert(info.ReorgHandle, Equals, int64(0)) err = txn.Commit() c.Assert(err, IsNil) }
func getStoreBootstrapVersion(store kv.Storage) int64 { // check in memory _, ok := storeBootstrapped[store.UUID()] if ok { return currentBootstrapVersion } var ver int64 // check in kv store err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ver, err = t.GetBootstrapVersion() return errors.Trace(err) }) if err != nil { log.Fatalf("check bootstrapped err %v", err) } if ver > notBootstrapped { // here mean memory is not ok, but other server has already finished it storeBootstrapped[store.UUID()] = true } return ver }
func (s *testDDLSuite) TestDropSchemaError(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_drop_schema") defer store.Close() lease := 50 * time.Millisecond d := newDDL(store, nil, nil, lease) defer d.close() job := &model.Job{ SchemaID: 1, Type: model.ActionDropSchema, Args: []interface{}{&model.DBInfo{ Name: model.CIStr{O: "test"}, }}, } err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return d.prepareBgJob(t, job) }) c.Check(err, IsNil) d.startBgJob(job.Type) time.Sleep(lease) verifyBgJobState(c, d, job, model.JobDone) }
// GetDDLInfo returns DDL information. func GetDDLInfo(txn kv.Transaction) (*DDLInfo, error) { var err error info := &DDLInfo{} t := meta.NewMeta(txn) info.Owner, err = t.GetDDLJobOwner() if err != nil { return nil, errors.Trace(err) } info.Job, err = t.GetDDLJob(0) if err != nil { return nil, errors.Trace(err) } info.SchemaVer, err = t.GetSchemaVersion() if err != nil { return nil, errors.Trace(err) } if info.Job == nil { return info, nil } info.ReorgHandle, err = t.GetDDLReorgHandle(info.Job) if err != nil { return nil, errors.Trace(err) } return info, nil }
// DropTable will proceed even if some table in the list does not exists. func (d *ddl) DropTable(ctx context.Context, ti table.Ident) (err error) { is := d.GetInformationSchema() schema, ok := is.SchemaByName(ti.Schema) if !ok { return errors.Trace(qerror.ErrDatabaseNotExist) } tb, err := is.TableByName(ti.Schema, ti.Name) if err != nil { return errors.Trace(err) } err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := d.verifySchemaMetaVersion(t, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } err = t.DropTable(schema.ID, tb.Meta().ID) return errors.Trace(err) }) if d.onDDLChange != nil { err = d.onDDLChange(err) if err != nil { return errors.Trace(err) } } err = d.deleteTableData(ctx, tb) return errors.Trace(err) }
func (d *ddl) startJob(ctx context.Context, job *model.Job) error { // for every DDL, we must commit current transaction. if err := ctx.FinishTxn(false); err != nil { return errors.Trace(err) } // Create a new job and queue it. err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) var err error job.ID, err = t.GenGlobalID() if err != nil { return errors.Trace(err) } err = t.EnQueueDDLJob(job) return errors.Trace(err) }) if err != nil { return errors.Trace(err) } // notice worker that we push a new job and wait the job done. asyncNotify(d.jobCh) log.Warnf("start DDL job %v", job) jobID := job.ID var historyJob *model.Job // for a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public // for every state change, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease. ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second)) defer ticker.Stop() for { select { case <-d.jobDoneCh: case <-ticker.C: } historyJob, err = d.getHistoryJob(jobID) if err != nil { log.Errorf("get history job err %v, check again", err) continue } else if historyJob == nil { log.Warnf("job %d is not in history, maybe not run", jobID) continue } // if a job is a history table, the state must be JobDone or JobCancel. if historyJob.State == model.JobDone { return nil } return errors.Errorf(historyJob.Error) } }
func (d *ddl) checkOwner() error { err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return errors.Trace(d.verifyOwner(t)) }) return errors.Trace(err) }
func genGlobalID(store kv.Storage) (int64, error) { var globalID int64 err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { var err error globalID, err = meta.NewMeta(txn).GenGlobalID() return errors.Trace(err) }) return globalID, errors.Trace(err) }
func testCheckJobCancelled(c *C, d *ddl, job *model.Job) { kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) historyJob, err := t.GetHistoryDDLJob(job.ID) c.Assert(err, IsNil) c.Assert(historyJob.State, Equals, model.JobCancelled) return nil }) }
func getSchemaVer(c *C, ctx context.Context) int64 { txn, err := ctx.GetTxn(true) c.Assert(err, IsNil) c.Assert(txn, NotNil) m := meta.NewMeta(txn) ver, err := m.GetSchemaVersion() c.Assert(err, IsNil) return ver }
// Test case for upgrade func (s *testSessionSuite) TestUpgrade(c *C) { defer testleak.AfterTest(c)() store := newStore(c, s.dbName) se := newSession(c, store, s.dbName) mustExecSQL(c, se, "USE mysql;") // bootstrap with currentBootstrapVersion r := mustExecSQL(c, se, `SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME="tidb_server_version";`) row, err := r.Next() c.Assert(err, IsNil) c.Assert(row, NotNil) c.Assert(row.Data, HasLen, 1) c.Assert(row.Data[0].GetBytes(), BytesEquals, []byte(fmt.Sprintf("%d", currentBootstrapVersion))) se1 := newSession(c, store, s.dbName) ver, err := getBootstrapVersion(se1) c.Assert(err, IsNil) c.Assert(ver, Equals, int64(currentBootstrapVersion)) // Do something to downgrade the store. // downgrade meta bootstrap version txn, err := store.Begin() c.Assert(err, IsNil) m := meta.NewMeta(txn) err = m.FinishBootstrap(int64(1)) c.Assert(err, IsNil) err = txn.Commit() c.Assert(err, IsNil) mustExecSQL(c, se1, `delete from mysql.TiDB where VARIABLE_NAME="tidb_server_version";`) mustExecSQL(c, se1, fmt.Sprintf(`delete from mysql.global_variables where VARIABLE_NAME="%s" or VARIABLE_NAME="%s";`, variable.DistSQLScanConcurrencyVar, variable.DistSQLJoinConcurrencyVar)) mustExecSQL(c, se1, `commit;`) delete(storeBootstrapped, store.UUID()) // Make sure the version is downgraded. r = mustExecSQL(c, se1, `SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME="tidb_server_version";`) row, err = r.Next() c.Assert(err, IsNil) c.Assert(row, IsNil) ver, err = getBootstrapVersion(se1) c.Assert(err, IsNil) c.Assert(ver, Equals, int64(0)) // Create a new session then upgrade() will run automatically. se2 := newSession(c, store, s.dbName) r = mustExecSQL(c, se2, `SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME="tidb_server_version";`) row, err = r.Next() c.Assert(err, IsNil) c.Assert(row, NotNil) c.Assert(row.Data, HasLen, 1) c.Assert(row.Data[0].GetBytes(), BytesEquals, []byte(fmt.Sprintf("%d", currentBootstrapVersion))) ver, err = getBootstrapVersion(se2) c.Assert(err, IsNil) c.Assert(ver, Equals, int64(currentBootstrapVersion)) }
func verifyBgJobState(c *C, d *ddl, job *model.Job, state model.JobState) { kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) historyBgJob, err := t.GetHistoryBgJob(job.ID) c.Assert(err, IsNil) c.Assert(historyBgJob.State, Equals, state) return nil }) }
func (s *testDDLSuite) TestReorgOwner(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_reorg_owner") defer store.Close() lease := 50 * time.Millisecond d1 := newDDL(store, nil, nil, lease) defer d1.close() ctx := testNewContext(c, d1) testCheckOwner(c, d1, true, ddlJobFlag) d2 := newDDL(store, nil, nil, lease) defer d2.close() dbInfo := testSchemaInfo(c, d1, "test") testCreateSchema(c, ctx, d1, dbInfo) tblInfo := testTableInfo(c, d1, "t", 3) testCreateTable(c, ctx, d1, dbInfo, tblInfo) t := testGetTable(c, d1, dbInfo.ID, tblInfo.ID) num := 10 for i := 0; i < num; i++ { _, err := t.AddRecord(ctx, types.MakeDatums(i, i, i)) c.Assert(err, IsNil) } err := ctx.CommitTxn() c.Assert(err, IsNil) tc := &testDDLCallback{} tc.onJobRunBefore = func(job *model.Job) { if job.SchemaState == model.StateDeleteReorganization { d1.close() } } d1.hook = tc testDropSchema(c, ctx, d1, dbInfo) err = kv.RunInNewTxn(d1.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) db, err1 := t.GetDatabase(dbInfo.ID) c.Assert(err1, IsNil) c.Assert(db, IsNil) return nil }) c.Assert(err, IsNil) }
func (s *testSuite) TestDDL(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() m := meta.NewMeta(store) t, err := m.Begin() c.Assert(err, IsNil) defer t.Rollback() owner := &model.Owner{OwnerID: "1"} err = t.SetDDLOwner(owner) c.Assert(err, IsNil) ov, err := t.GetDDLOwner() c.Assert(err, IsNil) c.Assert(owner, DeepEquals, ov) job := &model.Job{ID: 1} err = t.EnQueueDDLJob(job) c.Assert(err, IsNil) n, err := t.DDLJobLength() c.Assert(err, IsNil) c.Assert(n, Equals, int64(1)) v, err := t.GetDDLJob(0) c.Assert(err, IsNil) c.Assert(v, DeepEquals, job) v, err = t.GetDDLJob(1) c.Assert(err, IsNil) c.Assert(v, IsNil) job.ID = 2 err = t.UpdateDDLJob(0, job) c.Assert(err, IsNil) v, err = t.DeQueueDDLJob() c.Assert(err, IsNil) c.Assert(v, DeepEquals, job) err = t.AddHistoryDDLJob(job) c.Assert(err, IsNil) v, err = t.GetHistoryDDLJob(2) c.Assert(err, IsNil) c.Assert(v, DeepEquals, job) err = t.Commit() c.Assert(err, IsNil) }
func (s *testColumnChangeSuite) SetUpSuite(c *C) { s.store = testCreateStore(c, "test_column_change") s.dbInfo = &model.DBInfo{ Name: model.NewCIStr("test_column_change"), ID: 1, } err := kv.RunInNewTxn(s.store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) return errors.Trace(t.CreateDatabase(s.dbInfo)) }) c.Check(err, IsNil) }
func finishBoostrap(store kv.Storage) { storeBootstrapped[store.UUID()] = true err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := t.FinishBootstrap() return errors.Trace(err) }) if err != nil { log.Fatalf("finish bootstrap err %v", err) } }
func (d *ddl) getHistoryJob(id int64) (*model.Job, error) { var job *model.Job err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) var err1 error job, err1 = t.GetHistoryDDLJob(id) return errors.Trace(err1) }) return job, errors.Trace(err) }
func (d *ddl) DropSchema(ctx context.Context, schema model.CIStr) (err error) { is := d.GetInformationSchema() old, ok := is.SchemaByName(schema) if !ok { return errors.Trace(ErrNotExists) } // Update InfoSchema oldInfo := is.Clone() var newInfo []*model.DBInfo for _, v := range oldInfo { if v.Name.L != schema.L { newInfo = append(newInfo, v) } } // Remove data. txn, err := ctx.GetTxn(true) if err != nil { return errors.Trace(err) } tables := is.SchemaTables(schema) for _, t := range tables { err = t.Truncate(ctx) if err != nil { return errors.Trace(err) } // Remove indices. for _, v := range t.Indices() { if v != nil && v.X != nil { if err = v.X.Drop(txn); err != nil { return errors.Trace(err) } } } } err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := d.verifySchemaMetaVersion(t, is.SchemaMetaVersion()) if err != nil { return errors.Trace(err) } err = t.DropDatabase(old.ID) return errors.Trace(err) }) if d.onDDLChange != nil { err = d.onDDLChange(err) } return errors.Trace(err) }
func (d *ddl) startJob(ctx context.Context, job *model.Job) error { // for every DDL, we must commit current transaction. if err := ctx.FinishTxn(false); err != nil { return errors.Trace(err) } // Create a new job and queue it. err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) var err error job.ID, err = t.GenGlobalID() if err != nil { return errors.Trace(err) } err = t.EnQueueDDLJob(job) return errors.Trace(err) }) if err != nil { return errors.Trace(err) } // notice worker that we push a new job and wait the job done. asyncNotify(d.jobCh) jobID := job.ID ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() for { select { case <-d.jobDoneCh: case <-ticker.C: } job, err = d.getHistoryJob(jobID) if err != nil { log.Errorf("get history job err %v, check again", err) continue } else if job == nil { log.Warnf("job %d is not in history, maybe not run", jobID) continue } // if a job is a history table, the state must be JobDone or JobCancel. if job.State == model.JobDone { return nil } return errors.Errorf("job is %s, err :%v", job.State, job.Error) } }
func (d *ddl) Stop() error { d.m.Lock() defer d.m.Unlock() d.close() err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) owner, err1 := t.GetDDLJobOwner() if err1 != nil { return errors.Trace(err1) } if owner == nil || owner.OwnerID != d.uuid { return nil } // DDL job's owner is me, clean it so other servers can complete it quickly. return t.SetDDLJobOwner(&model.Owner{}) }) if err != nil { return errors.Trace(err) } err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) owner, err1 := t.GetBgJobOwner() if err1 != nil { return errors.Trace(err1) } if owner == nil || owner.OwnerID != d.uuid { return nil } // Background job's owner is me, clean it so other servers can complete it quickly. return t.SetBgJobOwner(&model.Owner{}) }) return errors.Trace(err) }