// tryLoadSchemaDiffs tries to only load latest schema changes. // Returns true if the schema is loaded successfully. // Returns false if the schema can not be loaded by schema diff, then we need to do full load. func (do *Domain) tryLoadSchemaDiffs(m *meta.Meta, usedVersion, newVersion int64) (bool, error) { if usedVersion == initialVersion || newVersion-usedVersion > maxNumberOfDiffsToLoad { // If there isn't any used version, or used version is too old, we do full load. return false, nil } if usedVersion > newVersion { // When user use History Read feature, history schema will be loaded. // usedVersion may be larger than newVersion, full load is needed. return false, nil } var diffs []*model.SchemaDiff for usedVersion < newVersion { usedVersion++ diff, err := m.GetSchemaDiff(usedVersion) if err != nil { return false, errors.Trace(err) } if diff == nil { // If diff is missing for any version between used and new version, we fall back to full reload. return false, nil } diffs = append(diffs, diff) } builder := infoschema.NewBuilder(do.infoHandle).InitWithOldInfoSchema() for _, diff := range diffs { err := builder.ApplyDiff(m, diff) if err != nil { return false, errors.Trace(err) } } builder.Build() return true, nil }
func (do *Domain) getAllSchemasWithTablesFromMeta(m *meta.Meta) ([]*model.DBInfo, error) { schemas, err := m.ListDatabases() if err != nil { return nil, errors.Trace(err) } for _, di := range schemas { if di.State != model.StatePublic { // schema is not public, can't be used outside. continue } tables, err1 := m.ListTables(di.ID) if err1 != nil { err = err1 return nil, errors.Trace(err1) } di.Tables = make([]*model.TableInfo, 0, len(tables)) for _, tbl := range tables { if tbl.State != model.StatePublic { // schema is not public, can't be used outside. continue } di.Tables = append(di.Tables, tbl) } } return schemas, nil }
func (d *ddl) checkOwner(t *meta.Meta) (*model.Owner, error) { owner, err := t.GetDDLOwner() if err != nil { return nil, errors.Trace(err) } if owner == nil { owner = &model.Owner{} // try to set onwer owner.OwnerID = d.uuid } now := time.Now().UnixNano() // we must wait 2 * lease time to guarantee other servers update the schema, // the owner will update its owner status every 2 * lease time, so here we use // 4 * lease to check its timeout. maxTimeout := int64(4 * d.lease) if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout { owner.OwnerID = d.uuid owner.LastUpdateTS = now // update status. if err = t.SetDDLOwner(owner); err != nil { return nil, errors.Trace(err) } log.Debugf("become owner %s", owner.OwnerID) } if owner.OwnerID != d.uuid { log.Debugf("not owner, owner is %s", owner.OwnerID) return nil, errors.Trace(ErrNotOwner) } return owner, nil }
func (d *ddl) verifyOwner(t *meta.Meta) error { owner, err := t.GetDDLOwner() if err != nil { return errors.Trace(err) } if owner == nil { owner = &model.Owner{} // try to set onwer owner.OwnerID = d.uuid } now := time.Now().Unix() maxTimeout := int64(4 * d.lease) if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout { owner.OwnerID = d.uuid owner.LastUpdateTS = now // update or try to set itself as owner. if err = t.SetDDLOwner(owner); err != nil { return errors.Trace(err) } } if owner.OwnerID != d.uuid { return errors.Trace(ErrNotOwner) } return nil }
func (d *ddl) onDropForeignKey(t *meta.Meta, job *model.Job) error { schemaID := job.SchemaID tblInfo, err := d.getTableInfo(t, job) if err != nil { return errors.Trace(err) } var ( fkName model.CIStr found bool fkInfo model.FKInfo ) err = job.DecodeArgs(&fkName) if err != nil { job.State = model.JobCancelled return errors.Trace(err) } for _, fk := range tblInfo.ForeignKeys { if fk.Name.L == fkName.L { found = true fkInfo = *fk } } if !found { return infoschema.ErrForeignKeyNotExists.Gen("foreign key doesn't exist", fkName) } nfks := tblInfo.ForeignKeys[:0] for _, fk := range tblInfo.ForeignKeys { if fk.Name.L != fkName.L { nfks = append(nfks, fk) } } tblInfo.ForeignKeys = nfks _, err = t.GenSchemaVersion() if err != nil { return errors.Trace(err) } switch fkInfo.State { case model.StatePublic: // We just support record the foreign key, so we just make it none. // public -> none job.SchemaState = model.StateNone fkInfo.State = model.StateNone err = t.UpdateTable(schemaID, tblInfo) if err != nil { return errors.Trace(err) } // finish this job job.State = model.JobDone return nil default: return ErrInvalidForeignKeyState.Gen("invalid fk state %v", fkInfo.State) } }
func (d *ddl) onModifyColumn(t *meta.Meta, job *model.Job) error { tblInfo, err := d.getTableInfo(t, job) if err != nil { return errors.Trace(err) } newCol := &model.ColumnInfo{} oldColName := &model.CIStr{} err = job.DecodeArgs(newCol, oldColName) if err != nil { job.State = model.JobCancelled return errors.Trace(err) } oldCol := findCol(tblInfo.Columns, oldColName.L) if oldCol == nil || oldCol.State != model.StatePublic { job.State = model.JobCancelled return infoschema.ErrColumnNotExists.GenByArgs(newCol.Name, tblInfo.Name) } *oldCol = *newCol err = t.UpdateTable(job.SchemaID, tblInfo) if err != nil { job.State = model.JobCancelled return errors.Trace(err) } ver, err := updateSchemaVersion(t, job) if err != nil { return errors.Trace(err) } job.SchemaState = model.StatePublic job.State = model.JobDone addTableHistoryInfo(job, ver, tblInfo) return nil }
func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID int64, alloc autoid.Allocator) error { tblInfo, err := m.GetTable(roDBInfo.ID, tableID) if err != nil { return errors.Trace(err) } if tblInfo == nil { // When we apply an old schema diff, the table may has been dropped already, so we need to fall back to // full load. return ErrTableNotExists } if alloc == nil { alloc = autoid.NewAllocator(b.handle.store, roDBInfo.ID) } tbl, err := tables.TableFromMeta(alloc, tblInfo) if err != nil { return errors.Trace(err) } tableNames := b.is.schemaMap[roDBInfo.Name.L] tableNames.tables[tblInfo.Name.L] = tbl bucketIdx := tableBucketIdx(tableID) sortedTables := b.is.sortedTablesBuckets[bucketIdx] sortedTables = append(sortedTables, tbl) sort.Sort(sortedTables) b.is.sortedTablesBuckets[bucketIdx] = sortedTables return nil }
func (d *ddl) delReorgSchema(t *meta.Meta, job *model.Job) error { dbInfo := &model.DBInfo{} if err := job.DecodeArgs(dbInfo); err != nil { // arg error, cancel this job. job.State = model.JobCancelled return errors.Trace(err) } tables, err := t.ListTables(dbInfo.ID) if terror.ErrorEqual(meta.ErrDBNotExists, err) { job.State = model.JobDone return nil } if err != nil { return errors.Trace(err) } if err = d.dropSchemaData(dbInfo, tables); err != nil { return errors.Trace(err) } // finish this background job job.SchemaState = model.StateNone job.State = model.JobDone return nil }
func (d *ddl) getReorgInfo(t *meta.Meta, job *model.Job) (*reorgInfo, error) { var err error info := &reorgInfo{ Job: job, d: d, first: job.SnapshotVer == 0, } if info.first { // get the current version for reorganization if we don't have var ver kv.Version ver, err = d.store.CurrentVersion() if err != nil { return nil, errors.Trace(err) } else if ver.Ver <= 0 { return nil, errors.Errorf("invalid storage current version %d", ver.Ver) } job.SnapshotVer = ver.Ver } else { info.Handle, err = t.GetDDLReorgHandle(job) if err != nil { return nil, errors.Trace(err) } } if info.Handle > 0 { // we have already handled this handle, so use next info.Handle++ } return info, errors.Trace(err) }
// every time we enter another state, we must call this function. func (d *ddl) updateJob(t *meta.Meta, job *model.Job) error { err := d.verifyOwner(t) if err != nil { return errors.Trace(err) } err = t.UpdateDDLJob(0, job) return errors.Trace(err) }
func checkDrop(c *C, t *meta.Meta) bool { bgJob, err := t.GetBgJob(0) c.Assert(err, IsNil) if bgJob == nil { return true } time.Sleep(5 * time.Millisecond) return false }
// finishBgJob finishs a background job. func (d *ddl) finishBgJob(t *meta.Meta, job *model.Job) error { log.Warnf("[ddl] finish background job %v", job) if _, err := t.DeQueueBgJob(); err != nil { return errors.Trace(err) } err := t.AddHistoryBgJob(job) return errors.Trace(err) }
func checkDrop(c *C, t *meta.Meta) bool { bgJob, err := t.GetBgJob(0) c.Assert(err, IsNil) if bgJob == nil { return true } time.Sleep(testLease) return false }
func (d *ddl) finishJob(t *meta.Meta, job *model.Job) error { log.Warnf("finish DDL job %v", job) // done, notice and run next job. _, err := t.DeQueueDDLJob() if err != nil { return errors.Trace(err) } err = t.AddHistoryDDLJob(job) return errors.Trace(err) }
// prepareBgJob prepares a background job. func (d *ddl) prepareBgJob(t *meta.Meta, ddlJob *model.Job) error { job := &model.Job{ ID: ddlJob.ID, SchemaID: ddlJob.SchemaID, TableID: ddlJob.TableID, Type: ddlJob.Type, Args: ddlJob.Args, } err := t.EnQueueBgJob(job) return errors.Trace(err) }
func (d *ddl) onDropTable(t *meta.Meta, job *model.Job) error { schemaID := job.SchemaID tableID := job.TableID // Check this table's database. tblInfo, err := t.GetTable(schemaID, tableID) if terror.ErrorEqual(err, meta.ErrDBNotExists) { job.State = model.JobCancelled return errors.Trace(infoschema.ErrDatabaseNotExists) } else if err != nil { return errors.Trace(err) } // Check the table. if tblInfo == nil { job.State = model.JobCancelled return errors.Trace(infoschema.ErrTableNotExists) } ver, err := updateSchemaVersion(t, job) if err != nil { return errors.Trace(err) } switch tblInfo.State { case model.StatePublic: // public -> write only job.SchemaState = model.StateWriteOnly tblInfo.State = model.StateWriteOnly err = t.UpdateTable(schemaID, tblInfo) case model.StateWriteOnly: // write only -> delete only job.SchemaState = model.StateDeleteOnly tblInfo.State = model.StateDeleteOnly err = t.UpdateTable(schemaID, tblInfo) case model.StateDeleteOnly: tblInfo.State = model.StateNone err = t.UpdateTable(schemaID, tblInfo) if err = t.DropTable(job.SchemaID, job.TableID); err != nil { break } // Finish this job. job.State = model.JobDone job.SchemaState = model.StateNone addTableHistoryInfo(job, ver, tblInfo) startKey := tablecodec.EncodeTablePrefix(tableID) job.Args = append(job.Args, startKey) default: err = ErrInvalidTableState.Gen("invalid table state %v", tblInfo.State) } return errors.Trace(err) }
func (d *ddl) onCreateTable(t *meta.Meta, job *model.Job) error { schemaID := job.SchemaID tbInfo := &model.TableInfo{} if err := job.DecodeArgs(tbInfo); err != nil { // arg error, cancel this job. job.State = model.JobCancelled return errors.Trace(err) } tbInfo.State = model.StateNone tables, err := t.ListTables(schemaID) if terror.ErrorEqual(err, meta.ErrDBNotExists) { job.State = model.JobCancelled return errors.Trace(infoschema.ErrDatabaseNotExists) } else if err != nil { return errors.Trace(err) } for _, tbl := range tables { if tbl.Name.L == tbInfo.Name.L { if tbl.ID != tbInfo.ID { // table exists, can't create, we should cancel this job now. job.State = model.JobCancelled return errors.Trace(infoschema.ErrTableExists) } tbInfo = tbl } } ver, err := updateSchemaVersion(t, job) if err != nil { return errors.Trace(err) } switch tbInfo.State { case model.StateNone: // none -> public job.SchemaState = model.StatePublic tbInfo.State = model.StatePublic err = t.CreateTable(schemaID, tbInfo) if err != nil { return errors.Trace(err) } // finish this job job.State = model.JobDone addTableHistoryInfo(job, ver, tbInfo) return nil default: return ErrInvalidTableState.Gen("invalid table state %v", tbInfo.State) } }
func (b *Builder) applyCreateSchema(m *meta.Meta, diff *model.SchemaDiff) error { di, err := m.GetDatabase(diff.SchemaID) if err != nil { return errors.Trace(err) } if di == nil { // When we apply an old schema diff, the database may has been dropped already, so we need to fall back to // full load. return ErrDatabaseNotExists } b.is.schemaMap[di.Name.L] = &schemaTables{dbInfo: di, tables: make(map[string]table.Table)} return nil }
func (d *ddl) verifySchemaMetaVersion(t *meta.Meta, schemaMetaVersion int64) error { curVer, err := t.GetSchemaVersion() if err != nil { return errors.Trace(err) } if curVer != schemaMetaVersion { return errors.Errorf("Schema changed, our version %d, but got %d", schemaMetaVersion, curVer) } // Increment version. _, err = t.GenSchemaVersion() return errors.Trace(err) }
func (d *ddl) onCreateSchema(t *meta.Meta, job *model.Job) error { schemaID := job.SchemaID dbInfo := &model.DBInfo{} if err := job.DecodeArgs(dbInfo); err != nil { // arg error, cancel this job. job.State = model.JobCancelled return errors.Trace(err) } dbInfo.ID = schemaID dbInfo.State = model.StateNone dbs, err := t.ListDatabases() if err != nil { return errors.Trace(err) } for _, db := range dbs { if db.Name.L == dbInfo.Name.L { if db.ID != schemaID { // database exists, can't create, we should cancel this job now. job.State = model.JobCancelled return errors.Trace(infoschema.ErrDatabaseExists) } dbInfo = db } } ver, err := updateSchemaVersion(t, job) if err != nil { return errors.Trace(err) } switch dbInfo.State { case model.StateNone: // none -> public job.SchemaState = model.StatePublic dbInfo.State = model.StatePublic err = t.CreateDatabase(dbInfo) if err != nil { return errors.Trace(err) } // finish this job job.State = model.JobDone addDBHistoryInfo(job, ver, dbInfo) return nil default: // we can't enter here. return errors.Errorf("invalid db state %v", dbInfo.State) } }
func (b *Builder) applyCreateSchema(m *meta.Meta, diff *model.SchemaDiff) error { di, err := m.GetDatabase(diff.SchemaID) if err != nil { return errors.Trace(err) } if di == nil { // When we apply an old schema diff, the database may has been dropped already, so we need to fall back to // full load. return ErrDatabaseNotExists } b.is.schemas[di.ID] = di b.is.schemaNameToID[di.Name.L] = di.ID return nil }
func (d *ddl) getJobOwner(t *meta.Meta, flag JobType) (*model.Owner, error) { var owner *model.Owner var err error switch flag { case ddlJobFlag: owner, err = t.GetDDLJobOwner() case bgJobFlag: owner, err = t.GetBgJobOwner() default: err = errInvalidJobFlag } return owner, errors.Trace(err) }
func (d *ddl) convert2RollbackJob(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error { job.State = model.JobRollback job.Args = []interface{}{indexInfo.Name} // If add index job rollbacks in write reorganization state, its need to delete all keys which has been added. // Its work is the same as drop index job do. // The write reorganization state in add index job that likes write only state in drop index job. // So the next state is delete only state. indexInfo.State = model.StateDeleteOnly job.SchemaState = model.StateDeleteOnly err := t.UpdateTable(job.SchemaID, tblInfo) if err != nil { return errors.Trace(err) } err = kv.ErrKeyExists.Gen("Duplicate for key %s", indexInfo.Name.O) return errors.Trace(err) }
func (d *ddl) finishDDLJob(t *meta.Meta, job *model.Job) error { log.Warnf("[ddl] finish DDL job %v", job) // done, notice and run next job. _, err := t.DeQueueDDLJob() if err != nil { return errors.Trace(err) } switch job.Type { case model.ActionDropSchema, model.ActionDropTable: if err = d.prepareBgJob(t, job); err != nil { return errors.Trace(err) } } err = t.AddHistoryDDLJob(job) return errors.Trace(err) }
// prepareBgJob prepares a background job. func (d *ddl) prepareBgJob(t *meta.Meta, ddlJob *model.Job) error { job := &model.Job{ ID: ddlJob.ID, SchemaID: ddlJob.SchemaID, TableID: ddlJob.TableID, Type: ddlJob.Type, } if len(ddlJob.Args) >= 2 { // ddlJob.Args[0] is the schema version that isn't necessary in background job and // ddlJob.Args[1] is the table information or the database information. // They will make the background job of dropping schema become more complicated to handle. job.Args = ddlJob.Args[2:] } err := t.EnQueueBgJob(job) return errors.Trace(err) }
func (do *Domain) fetchAllSchemasWithTables(m *meta.Meta) ([]*model.DBInfo, error) { allSchemas, err := m.ListDatabases() if err != nil { return nil, errors.Trace(err) } splittedSchemas := do.splitForConcurrentFetch(allSchemas) doneCh := make(chan error, len(splittedSchemas)) for _, schemas := range splittedSchemas { go do.fetchSchemasWithTables(schemas, m, doneCh) } for range splittedSchemas { err = <-doneCh if err != nil { return nil, errors.Trace(err) } } return allSchemas, nil }
func (d *ddl) getTableInfo(t *meta.Meta, job *model.Job) (*model.TableInfo, error) { schemaID := job.SchemaID tableID := job.TableID tblInfo, err := t.GetTable(schemaID, tableID) if terror.ErrorEqual(err, meta.ErrDBNotExists) { job.State = model.JobCancelled return nil, errors.Trace(infoschema.ErrDatabaseNotExists) } else if err != nil { return nil, errors.Trace(err) } else if tblInfo == nil { job.State = model.JobCancelled return nil, errors.Trace(infoschema.ErrTableNotExists) } if tblInfo.State != model.StatePublic { job.State = model.JobCancelled return nil, ErrInvalidTableState.Gen("table %s is not in public, but %s", tblInfo.Name.L, tblInfo.State) } return tblInfo, nil }
func (d *ddl) checkOwner(t *meta.Meta, flag JobType) (*model.Owner, error) { var owner *model.Owner var err error switch flag { case ddlJobFlag: owner, err = t.GetDDLJobOwner() case bgJobFlag: owner, err = t.GetBgJobOwner() default: err = errInvalidJobFlag } if err != nil { return nil, errors.Trace(err) } if owner == nil { owner = &model.Owner{} // try to set onwer owner.OwnerID = d.uuid } now := time.Now().UnixNano() // we must wait 2 * lease time to guarantee other servers update the schema, // the owner will update its owner status every 2 * lease time, so here we use // 4 * lease to check its timeout. maxTimeout := int64(4 * d.lease) if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout { owner.OwnerID = d.uuid owner.LastUpdateTS = now // update status. switch flag { case ddlJobFlag: err = t.SetDDLJobOwner(owner) case bgJobFlag: err = t.SetBgJobOwner(owner) } if err != nil { return nil, errors.Trace(err) } log.Debugf("[ddl] become %s job owner %s", flag, owner.OwnerID) } if owner.OwnerID != d.uuid { log.Debugf("[ddl] not %s job owner, owner is %s", flag, owner.OwnerID) return nil, errors.Trace(errNotOwner) } return owner, nil }
func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID int64, alloc autoid.Allocator) error { tblInfo, err := m.GetTable(roDBInfo.ID, tableID) if err != nil { return errors.Trace(err) } if tblInfo == nil { // When we apply an old schema diff, the table may has been dropped already, so we need to fall back to // full load. return ErrTableNotExists } if alloc == nil { alloc = autoid.NewAllocator(b.handle.store, roDBInfo.ID) } tbl, err := tables.TableFromMeta(alloc, tblInfo) if err != nil { return errors.Trace(err) } b.is.tables[tblInfo.ID] = tbl tn := makeTableName(roDBInfo.Name.L, tblInfo.Name.L) b.is.tableNameToID[string(tn)] = tblInfo.ID return nil }
func (do *Domain) fetchSchemasWithTables(schemas []*model.DBInfo, m *meta.Meta, done chan error) { for _, di := range schemas { if di.State != model.StatePublic { // schema is not public, can't be used outside. continue } tables, err := m.ListTables(di.ID) if err != nil { done <- err return } di.Tables = make([]*model.TableInfo, 0, len(tables)) for _, tbl := range tables { if tbl.State != model.StatePublic { // schema is not public, can't be used outside. continue } di.Tables = append(di.Tables, tbl) } } done <- nil }