func (*testSuite) TestT(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() m := meta.NewMeta(store) err = m.RunInNewTxn(false, func(m *meta.TMeta) error { err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) c.Assert(err, IsNil) err = m.CreateTable(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) c.Assert(err, IsNil) return nil }) c.Assert(err, IsNil) alloc := autoid.NewAllocator(m, 1) c.Assert(alloc, NotNil) id, err := alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(1)) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(2)) id, err = alloc.Alloc(0) c.Assert(err, NotNil) }
func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID int64, alloc autoid.Allocator) error { tblInfo, err := m.GetTable(roDBInfo.ID, tableID) if err != nil { return errors.Trace(err) } if tblInfo == nil { // When we apply an old schema diff, the table may has been dropped already, so we need to fall back to // full load. return ErrTableNotExists } if alloc == nil { alloc = autoid.NewAllocator(b.handle.store, roDBInfo.ID) } tbl, err := tables.TableFromMeta(alloc, tblInfo) if err != nil { return errors.Trace(err) } tableNames := b.is.schemaMap[roDBInfo.Name.L] tableNames.tables[tblInfo.Name.L] = tbl bucketIdx := tableBucketIdx(tableID) sortedTables := b.is.sortedTablesBuckets[bucketIdx] sortedTables = append(sortedTables, tbl) sort.Sort(sortedTables) b.is.sortedTablesBuckets[bucketIdx] = sortedTables return nil }
// Set sets DBInfo to information schema. func (h *Handle) Set(newInfo []*model.DBInfo) { info := &infoSchema{ schemaNameToID: map[string]int64{}, tableNameToID: map[tableName]int64{}, columnNameToID: map[columnName]int64{}, schemas: map[int64]*model.DBInfo{}, tables: map[int64]table.Table{}, columns: map[int64]*model.ColumnInfo{}, indices: map[indexName]*model.IndexInfo{}, columnIndices: map[int64][]*model.IndexInfo{}, } for _, di := range newInfo { info.schemas[di.ID] = di info.schemaNameToID[di.Name.L] = di.ID for _, t := range di.Tables { alloc := autoid.NewAllocator(h.store) info.tables[t.ID] = table.TableFromMeta(di.Name.L, alloc, t) tname := tableName{di.Name.L, t.Name.L} info.tableNameToID[tname] = t.ID for _, c := range t.Columns { info.columns[c.ID] = c info.columnNameToID[columnName{tname, c.Name.L}] = c.ID } for _, idx := range t.Indices { info.indices[indexName{tname, idx.Name.L}] = idx for _, idxCol := range idx.Columns { columnID := t.Columns[idxCol.Offset].ID columnIndices := info.columnIndices[columnID] info.columnIndices[columnID] = append(columnIndices, idx) } } } } h.value.Store(info) }
func (d *ddl) dropSchemaData(dbInfo *model.DBInfo, tables []*model.TableInfo) error { for _, tblInfo := range tables { alloc := autoid.NewAllocator(d.store, dbInfo.ID) t := table.TableFromMeta(alloc, tblInfo) err := d.dropTableData(t) if err != nil { return errors.Trace(err) } } return nil }
func testGetTable(c *C, d *ddl, schemaID int64, tableID int64) table.Table { var tblInfo *model.TableInfo kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) var err error tblInfo, err = t.GetTable(schemaID, tableID) c.Assert(err, IsNil) c.Assert(tblInfo, NotNil) return nil }) alloc := autoid.NewAllocator(d.store, schemaID) tbl := table.TableFromMeta(alloc, tblInfo) return tbl }
// If create table with auto_increment option, we should rebase tableAutoIncID value. func (d *ddl) handleAutoIncID(tbInfo *model.TableInfo, schemaID int64) error { alloc := autoid.NewAllocator(d.store, schemaID) tbInfo.State = model.StatePublic tb, err := table.TableFromMeta(alloc, tbInfo) if err != nil { return errors.Trace(err) } // The operation of the minus 1 to make sure that the current value doesn't be used, // the next Alloc operation will get this value. // Its behavior is consistent with MySQL. if err = tb.RebaseAutoID(tbInfo.AutoIncID-1, false); err != nil { return errors.Trace(err) } return nil }
func (*testSuite) TestT(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() alloc := autoid.NewAllocator(store) c.Assert(alloc, NotNil) id, err := alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(1)) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(2)) id, err = alloc.Alloc(0) c.Assert(err, NotNil) }
func (b *Builder) createSchemaTablesForDB(di *model.DBInfo) error { schTbls := &schemaTables{ dbInfo: di, tables: make(map[string]table.Table, len(di.Tables)), } b.is.schemaMap[di.Name.L] = schTbls for _, t := range di.Tables { alloc := autoid.NewAllocator(b.handle.store, di.ID) var tbl table.Table tbl, err := tables.TableFromMeta(alloc, t) if err != nil { return errors.Trace(err) } schTbls.tables[t.Name.L] = tbl sortedTables := b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] = append(sortedTables, tbl) } return nil }
func (d *ddl) handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo, schemaID int64) error { for _, op := range options { if op.Tp == ast.TableOptionAutoIncrement { alloc := autoid.NewAllocator(d.store, schemaID) tbInfo.State = model.StatePublic tb, err := table.TableFromMeta(alloc, tbInfo) if err != nil { return errors.Trace(err) } // The operation of the minus 1 to make sure that the current value doesn't be used, // the next Alloc operation will get this value. // Its behavior is consistent with MySQL. if err = tb.RebaseAutoID(int64(op.UintValue-1), false); err != nil { return errors.Trace(err) } } } return nil }
// Set sets DBInfo to information schema. func (h *Handle) Set(newInfo []*model.DBInfo, schemaMetaVersion int64) error { info := &infoSchema{ schemaNameToID: map[string]int64{}, tableNameToID: map[tableName]int64{}, columnNameToID: map[columnName]int64{}, schemas: map[int64]*model.DBInfo{}, tables: map[int64]table.Table{}, columns: map[int64]*model.ColumnInfo{}, indices: map[indexName]*model.IndexInfo{}, columnIndices: map[int64][]*model.IndexInfo{}, schemaMetaVersion: schemaMetaVersion, } var err error for _, di := range newInfo { info.schemas[di.ID] = di info.schemaNameToID[di.Name.L] = di.ID for _, t := range di.Tables { alloc := autoid.NewAllocator(h.store, di.ID) info.tables[t.ID], err = table.TableFromMeta(alloc, t) if err != nil { return errors.Trace(err) } tname := tableName{di.Name.L, t.Name.L} info.tableNameToID[tname] = t.ID for _, c := range t.Columns { info.columns[c.ID] = c info.columnNameToID[columnName{tname, c.Name.L}] = c.ID } for _, idx := range t.Indices { info.indices[indexName{tname, idx.Name.L}] = idx for _, idxCol := range idx.Columns { columnID := t.Columns[idxCol.Offset].ID columnIndices := info.columnIndices[columnID] info.columnIndices[columnID] = append(columnIndices, idx) } } } } h.value.Store(info) return nil }
func getCurrentTable(d *ddl, schemaID, tableID int64) (table.Table, error) { var tblInfo *model.TableInfo err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) var err error tblInfo, err = t.GetTable(schemaID, tableID) if err != nil { return errors.Trace(err) } return nil }) if err != nil { return nil, errors.Trace(err) } alloc := autoid.NewAllocator(d.store, schemaID) tbl, err := table.TableFromMeta(alloc, tblInfo) if err != nil { return nil, errors.Trace(err) } return tbl, err }
func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID int64, alloc autoid.Allocator) error { tblInfo, err := m.GetTable(roDBInfo.ID, tableID) if err != nil { return errors.Trace(err) } if tblInfo == nil { // When we apply an old schema diff, the table may has been dropped already, so we need to fall back to // full load. return ErrTableNotExists } if alloc == nil { alloc = autoid.NewAllocator(b.handle.store, roDBInfo.ID) } tbl, err := tables.TableFromMeta(alloc, tblInfo) if err != nil { return errors.Trace(err) } b.is.tables[tblInfo.ID] = tbl tn := makeTableName(roDBInfo.Name.L, tblInfo.Name.L) b.is.tableNameToID[string(tn)] = tblInfo.ID return nil }
// InitWithDBInfos initializes an empty new InfoSchema with a slice of DBInfo and schema version. func (b *Builder) InitWithDBInfos(dbInfos []*model.DBInfo, schemaVersion int64) (*Builder, error) { err := b.initMemorySchemas() if err != nil { return nil, errors.Trace(err) } info := b.is info.schemaMetaVersion = schemaVersion for _, di := range dbInfos { info.schemas[di.ID] = di info.schemaNameToID[di.Name.L] = di.ID for _, t := range di.Tables { alloc := autoid.NewAllocator(b.handle.store, di.ID) var tbl table.Table tbl, err = table.TableFromMeta(alloc, t) if err != nil { return nil, errors.Trace(err) } info.tables[t.ID] = tbl tname := makeTableName(di.Name.L, t.Name.L) info.tableNameToID[string(tname)] = t.ID } } return b, nil }
func (s *testSuite) TestScan(c *C) { defer testleak.AfterTest(c)() alloc := autoid.NewAllocator(s.store, s.dbInfo.ID) tb, err := tables.TableFromMeta(alloc, s.tbInfo) c.Assert(err, IsNil) indices := tb.Indices() _, err = tb.AddRecord(s.ctx, types.MakeDatums(10, 11)) c.Assert(err, IsNil) s.ctx.CommitTxn() record1 := &RecordData{Handle: int64(1), Values: types.MakeDatums(int64(10), int64(11))} record2 := &RecordData{Handle: int64(2), Values: types.MakeDatums(int64(20), int64(21))} ver, err := s.store.CurrentVersion() c.Assert(err, IsNil) records, _, err := ScanSnapshotTableRecord(s.store, ver, tb, int64(1), 1) c.Assert(err, IsNil) c.Assert(records, DeepEquals, []*RecordData{record1}) _, err = tb.AddRecord(s.ctx, record2.Values) c.Assert(err, IsNil) s.ctx.CommitTxn() txn, err := s.store.Begin() c.Assert(err, IsNil) records, nextHandle, err := ScanTableRecord(txn, tb, int64(1), 1) c.Assert(err, IsNil) c.Assert(records, DeepEquals, []*RecordData{record1}) records, nextHandle, err = ScanTableRecord(txn, tb, nextHandle, 1) c.Assert(err, IsNil) c.Assert(records, DeepEquals, []*RecordData{record2}) startHandle := nextHandle records, nextHandle, err = ScanTableRecord(txn, tb, startHandle, 1) c.Assert(err, IsNil) c.Assert(records, IsNil) c.Assert(nextHandle, Equals, startHandle) idxRow1 := &RecordData{Handle: int64(1), Values: types.MakeDatums(int64(10))} idxRow2 := &RecordData{Handle: int64(2), Values: types.MakeDatums(int64(20))} kvIndex := tables.NewIndex(tb.Meta(), indices[0].Meta()) idxRows, nextVals, err := ScanIndexData(txn, kvIndex, idxRow1.Values, 2) c.Assert(err, IsNil) c.Assert(idxRows, DeepEquals, []*RecordData{idxRow1, idxRow2}) idxRows, nextVals, err = ScanIndexData(txn, kvIndex, idxRow1.Values, 1) c.Assert(err, IsNil) c.Assert(idxRows, DeepEquals, []*RecordData{idxRow1}) idxRows, nextVals, err = ScanIndexData(txn, kvIndex, nextVals, 1) c.Assert(err, IsNil) c.Assert(idxRows, DeepEquals, []*RecordData{idxRow2}) idxRows, nextVals, err = ScanIndexData(txn, kvIndex, nextVals, 1) c.Assert(idxRows, IsNil) c.Assert(nextVals, DeepEquals, types.MakeDatums(nil)) c.Assert(err, IsNil) s.testTableData(c, tb, []*RecordData{record1, record2}) s.testIndex(c, tb, tb.Indices()[0]) err = tb.RemoveRecord(s.ctx, 1, record1.Values) c.Assert(err, IsNil) err = tb.RemoveRecord(s.ctx, 2, record2.Values) c.Assert(err, IsNil) }
func (s *testSuite) TestScan(c *C) { alloc := autoid.NewAllocator(s.store, s.dbInfo.ID) tb, err := tables.TableFromMeta(alloc, s.tbInfo) c.Assert(err, IsNil) indices := tb.Indices() _, err = tb.AddRecord(s.ctx, []interface{}{10, 11}) c.Assert(err, IsNil) s.ctx.FinishTxn(false) record1 := &RecordData{Handle: int64(1), Values: []interface{}{int64(10), int64(11)}} record2 := &RecordData{Handle: int64(2), Values: []interface{}{int64(20), int64(21)}} ver, err := s.store.CurrentVersion() c.Assert(err, IsNil) records, _, err := ScanSnapshotTableRecord(s.store, ver, tb, int64(1), 1) c.Assert(err, IsNil) c.Assert(records, DeepEquals, []*RecordData{record1}) _, err = tb.AddRecord(s.ctx, record2.Values) c.Assert(err, IsNil) s.ctx.FinishTxn(false) txn, err := s.store.Begin() c.Assert(err, IsNil) records, nextHandle, err := ScanTableRecord(txn, tb, int64(1), 1) c.Assert(err, IsNil) c.Assert(records, DeepEquals, []*RecordData{record1}) records, nextHandle, err = ScanTableRecord(txn, tb, nextHandle, 1) c.Assert(err, IsNil) c.Assert(records, DeepEquals, []*RecordData{record2}) startHandle := nextHandle records, nextHandle, err = ScanTableRecord(txn, tb, startHandle, 1) c.Assert(err, IsNil) c.Assert(records, IsNil) c.Assert(nextHandle, Equals, startHandle) idxRow1 := &RecordData{Handle: int64(1), Values: []interface{}{int64(10)}} idxRow2 := &RecordData{Handle: int64(2), Values: []interface{}{int64(20)}} kvIndex := kv.NewKVIndex(tb.IndexPrefix(), indices[0].Name.L, indices[0].ID, indices[0].Unique) idxRows, nextVals, err := ScanIndexData(txn, kvIndex, idxRow1.Values, 2) c.Assert(err, IsNil) c.Assert(idxRows, DeepEquals, []*RecordData{idxRow1, idxRow2}) idxRows, nextVals, err = ScanIndexData(txn, kvIndex, idxRow1.Values, 1) c.Assert(err, IsNil) c.Assert(idxRows, DeepEquals, []*RecordData{idxRow1}) idxRows, nextVals, err = ScanIndexData(txn, kvIndex, nextVals, 1) c.Assert(err, IsNil) c.Assert(idxRows, DeepEquals, []*RecordData{idxRow2}) idxRows, nextVals, err = ScanIndexData(txn, kvIndex, nextVals, 1) c.Assert(idxRows, IsNil) c.Assert(nextVals, DeepEquals, []interface{}{nil}) c.Assert(err, IsNil) s.testTableData(c, tb, []*RecordData{record1, record2}) s.testIndex(c, tb, tb.Indices()[0]) err = tb.RemoveRecord(s.ctx, 1, record1.Values) c.Assert(err, IsNil) err = tb.RemoveRecord(s.ctx, 2, record2.Values) c.Assert(err, IsNil) }
func (d *ddl) getTable(t *meta.Meta, schemaID int64, tblInfo *model.TableInfo) (table.Table, error) { alloc := autoid.NewAllocator(d.store, schemaID) tbl := table.TableFromMeta(alloc, tblInfo) return tbl, nil }
func (*testSuite) TestT(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Assert(err, IsNil) defer store.Close() err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) c.Assert(err, IsNil) err = m.CreateTable(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) c.Assert(err, IsNil) err = m.CreateTable(1, &model.TableInfo{ID: 2, Name: model.NewCIStr("t1")}) c.Assert(err, IsNil) err = m.CreateTable(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) c.Assert(err, IsNil) return nil }) c.Assert(err, IsNil) alloc := autoid.NewAllocator(store, 1) c.Assert(alloc, NotNil) id, err := alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(1)) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(2)) id, err = alloc.Alloc(0) c.Assert(err, NotNil) // rebase err = alloc.Rebase(1, int64(1), true) c.Assert(err, IsNil) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(3)) err = alloc.Rebase(1, int64(3), true) c.Assert(err, IsNil) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(4)) err = alloc.Rebase(1, int64(10), true) c.Assert(err, IsNil) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(11)) err = alloc.Rebase(1, int64(3010), true) c.Assert(err, IsNil) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(3011)) alloc = autoid.NewAllocator(store, 1) c.Assert(alloc, NotNil) id, err = alloc.Alloc(1) c.Assert(err, IsNil) c.Assert(id, Equals, int64(autoid.GetStep()+1)) alloc = autoid.NewAllocator(store, 1) c.Assert(alloc, NotNil) err = alloc.Rebase(2, int64(1), false) c.Assert(err, IsNil) id, err = alloc.Alloc(2) c.Assert(err, IsNil) c.Assert(id, Equals, int64(2)) alloc = autoid.NewAllocator(store, 1) c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(3210), false) c.Assert(err, IsNil) alloc = autoid.NewAllocator(store, 1) c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(3000), false) c.Assert(err, IsNil) id, err = alloc.Alloc(3) c.Assert(err, IsNil) c.Assert(id, Equals, int64(3211)) err = alloc.Rebase(3, int64(6543), false) c.Assert(err, IsNil) id, err = alloc.Alloc(3) c.Assert(err, IsNil) c.Assert(id, Equals, int64(6544)) }
func (d *ddl) getTable(schemaID int64, tblInfo *model.TableInfo) (table.Table, error) { alloc := autoid.NewAllocator(d.store, schemaID) tbl, err := table.TableFromMeta(alloc, tblInfo) return tbl, errors.Trace(err) }
// Set sets DBInfo to information schema. func (h *Handle) Set(newInfo []*model.DBInfo, schemaMetaVersion int64) error { info := &infoSchema{ schemaNameToID: map[string]int64{}, tableNameToID: map[tableName]int64{}, columnNameToID: map[columnName]int64{}, schemas: map[int64]*model.DBInfo{}, tables: map[int64]table.Table{}, tableAllocators: map[int64]autoid.Allocator{}, columns: map[int64]*model.ColumnInfo{}, indices: map[indexName]*model.IndexInfo{}, columnIndices: map[int64][]*model.IndexInfo{}, schemaMetaVersion: schemaMetaVersion, } var err error var hasOldInfo bool infoschema := h.Get() if infoschema != nil { hasOldInfo = true } for _, di := range newInfo { info.schemas[di.ID] = di info.schemaNameToID[di.Name.L] = di.ID for _, t := range di.Tables { alloc := autoid.NewAllocator(h.store, di.ID) if hasOldInfo { val, ok := infoschema.AllocByID(t.ID) if ok { alloc = val } } info.tableAllocators[t.ID] = alloc info.tables[t.ID], err = table.TableFromMeta(alloc, t) if err != nil { return errors.Trace(err) } tname := tableName{di.Name.L, t.Name.L} info.tableNameToID[tname] = t.ID for _, c := range t.Columns { info.columns[c.ID] = c info.columnNameToID[columnName{tname, c.Name.L}] = c.ID } for _, idx := range t.Indices { info.indices[indexName{tname, idx.Name.L}] = idx for _, idxCol := range idx.Columns { columnID := t.Columns[idxCol.Offset].ID columnIndices := info.columnIndices[columnID] info.columnIndices[columnID] = append(columnIndices, idx) } } } } // Build Information_Schema info.schemaNameToID[h.memSchema.isDB.Name.L] = h.memSchema.isDB.ID info.schemas[h.memSchema.isDB.ID] = h.memSchema.isDB for _, t := range h.memSchema.isDB.Tables { tbl, ok := h.memSchema.nameToTable[t.Name.L] if !ok { return ErrTableNotExists.Gen("table `%s` is missing.", t.Name) } info.tables[t.ID] = tbl tname := tableName{h.memSchema.isDB.Name.L, t.Name.L} info.tableNameToID[tname] = t.ID for _, c := range t.Columns { info.columns[c.ID] = c info.columnNameToID[columnName{tname, c.Name.L}] = c.ID } } // Add Performance_Schema psDB := h.memSchema.perfHandle.GetDBMeta() info.schemaNameToID[psDB.Name.L] = psDB.ID info.schemas[psDB.ID] = psDB for _, t := range psDB.Tables { tbl, ok := h.memSchema.perfHandle.GetTable(t.Name.O) if !ok { return ErrTableNotExists.Gen("table `%s` is missing.", t.Name) } info.tables[t.ID] = tbl tname := tableName{psDB.Name.L, t.Name.L} info.tableNameToID[tname] = t.ID for _, c := range t.Columns { info.columns[c.ID] = c info.columnNameToID[columnName{tname, c.Name.L}] = c.ID } } // Should refill some tables in Information_Schema. // schemata/tables/columns/statistics dbNames := make([]string, 0, len(info.schemas)) dbInfos := make([]*model.DBInfo, 0, len(info.schemas)) for _, v := range info.schemas { dbNames = append(dbNames, v.Name.L) dbInfos = append(dbInfos, v) } err = refillTable(h.memSchema.schemataTbl, dataForSchemata(dbNames)) if err != nil { return errors.Trace(err) } err = refillTable(h.memSchema.tablesTbl, dataForTables(dbInfos)) if err != nil { return errors.Trace(err) } err = refillTable(h.memSchema.columnsTbl, dataForColumns(dbInfos)) if err != nil { return errors.Trace(err) } err = refillTable(h.memSchema.statisticsTbl, dataForStatistics(dbInfos)) if err != nil { return errors.Trace(err) } h.value.Store(info) return nil }