func (s *testSessionSuite) TestIssue461(c *C) { store := newStore(c, s.dbName) se1 := newSession(c, store, s.dbName) mustExecSQL(c, se1, `CREATE TABLE test ( id int(11) UNSIGNED NOT NULL AUTO_INCREMENT, val int UNIQUE, PRIMARY KEY (id)); `) mustExecSQL(c, se1, "begin;") mustExecSQL(c, se1, "insert into test(id, val) values(1, 1);") se2 := newSession(c, store, s.dbName) mustExecSQL(c, se2, "begin;") mustExecSQL(c, se2, "insert into test(id, val) values(2, 2);") se3 := newSession(c, store, s.dbName) mustExecSQL(c, se3, "begin;") mustExecSQL(c, se3, "insert into test(id, val) values(1, 2);") mustExecSQL(c, se3, "commit;") _, err := se1.Execute("commit") c.Assert(err, NotNil) // Check error type and error message c.Assert(terror.ErrorEqual(err, kv.ErrKeyExists), IsTrue) c.Assert(err.Error(), Equals, "[kv:3]Duplicate entry '1' for key 'PRIMARY'") _, err = se2.Execute("commit") c.Assert(err, NotNil) c.Assert(terror.ErrorEqual(err, kv.ErrKeyExists), IsTrue) c.Assert(err.Error(), Equals, "[kv:3]Duplicate entry '2' for key 'val'") se := newSession(c, store, s.dbName) mustExecSQL(c, se, "drop table test;") }
// ScanIndexData scans the index handles and values in a limited number, according to the index information. // It returns data and the next startVals until it doesn't have data, then returns data is nil and // the next startVals is the values which can't get data. If startVals = nil and limit = -1, // it returns the index data of the whole. func ScanIndexData(txn kv.Transaction, kvIndex table.Index, startVals []types.Datum, limit int64) ( []*RecordData, []types.Datum, error) { it, _, err := kvIndex.Seek(txn, startVals) if err != nil { return nil, nil, errors.Trace(err) } defer it.Close() var idxRows []*RecordData var curVals []types.Datum for limit != 0 { val, h, err1 := it.Next() if terror.ErrorEqual(err1, io.EOF) { return idxRows, nextIndexVals(curVals), nil } else if err1 != nil { return nil, nil, errors.Trace(err1) } idxRows = append(idxRows, &RecordData{Handle: h, Values: val}) limit-- curVals = val } nextVals, _, err := it.Next() if terror.ErrorEqual(err, io.EOF) { return idxRows, nextIndexVals(curVals), nil } else if err != nil { return nil, nil, errors.Trace(err) } return idxRows, nextVals, nil }
func (s *testTypeConvertSuite) TestGetValidFloat(c *C) { cases := []struct { origin string valid string }{ {"-100", "-100"}, {"1abc", "1"}, {"-1-1", "-1"}, {"+1+1", "+1"}, {"123..34", "123."}, {"123.23E-10", "123.23E-10"}, {"1.1e1.3", "1.1e1"}, {"1.1e-13a", "1.1e-13"}, {"1.", "1."}, {".1", ".1"}, {"", "0"}, {"123e+", "123"}, {"123.e", "123."}, } sc := new(variable.StatementContext) for _, ca := range cases { prefix, _ := getValidFloatPrefix(sc, ca.origin) c.Assert(prefix, Equals, ca.valid) _, err := strconv.ParseFloat(prefix, 64) c.Assert(err, IsNil) } _, err := floatStrToIntStr("1e9223372036854775807") c.Assert(terror.ErrorEqual(err, ErrOverflow), IsTrue) _, err = floatStrToIntStr("1e21") c.Assert(terror.ErrorEqual(err, ErrOverflow), IsTrue) }
// reverseMvccSeek seeks for the first key in db which has a k < key and a version <= // snapshot's version, returns kv.ErrNotExist if such key is not found. func (s *dbSnapshot) reverseMvccSeek(key kv.Key) (kv.Key, []byte, error) { for { var mvccKey []byte if len(key) != 0 { mvccKey = MvccEncodeVersionKey(key, kv.MaxVersion) } revMvccKey, _, err := s.store.SeekReverse(mvccKey, s.version.Ver) if err != nil { if terror.ErrorEqual(err, engine.ErrNotFound) { return nil, nil, kv.ErrNotExist } return nil, nil, errors.Trace(err) } revKey, _, err := MvccDecode(revMvccKey) if err != nil { return nil, nil, errors.Trace(err) } resultKey, v, err := s.mvccSeek(revKey, true) if terror.ErrorEqual(err, kv.ErrNotExist) { key = revKey continue } return resultKey, v, errors.Trace(err) } }
// Add data into indices. func (t *Table) addIndices(ctx context.Context, recordID int64, r []interface{}, bs *kv.BufferStore) (int64, error) { txn, err := ctx.GetTxn(false) if err != nil { return 0, errors.Trace(err) } // Clean up lazy check error environment defer txn.DelOption(kv.PresumeKeyNotExistsError) if t.meta.PKIsHandle { // Check key exists. recordKey := t.RecordKey(recordID, nil) e := kv.ErrKeyExists.Gen("Duplicate entry '%d' for key 'PRIMARY'", recordID) txn.SetOption(kv.PresumeKeyNotExistsError, e) _, err = txn.Get(recordKey) if err == nil { return recordID, errors.Trace(e) } else if !terror.ErrorEqual(err, kv.ErrNotExist) { return 0, errors.Trace(err) } txn.DelOption(kv.PresumeKeyNotExistsError) } for _, v := range t.indices { if v == nil || v.State == model.StateDeleteOnly || v.State == model.StateDeleteReorganization { // if index is in delete only or delete reorganization state, we can't add it. continue } colVals, _ := v.FetchValues(r) var dupKeyErr error if v.Unique || v.Primary { entryKey, err1 := t.genIndexKeyStr(colVals) if err1 != nil { return 0, errors.Trace(err1) } dupKeyErr = kv.ErrKeyExists.Gen("Duplicate entry '%s' for key '%s'", entryKey, v.Name) txn.SetOption(kv.PresumeKeyNotExistsError, dupKeyErr) } if err = v.X.Create(bs, colVals, recordID); err != nil { if terror.ErrorEqual(err, kv.ErrKeyExists) { // Get the duplicate row handle // For insert on duplicate syntax, we should update the row iter, _, err1 := v.X.Seek(bs, colVals) if err1 != nil { return 0, errors.Trace(err1) } _, h, err1 := iter.Next() if err1 != nil { return 0, errors.Trace(err1) } return h, errors.Trace(dupKeyErr) } return 0, errors.Trace(err) } txn.DelOption(kv.PresumeKeyNotExistsError) } return 0, nil }
// IsRetryableError checks if the err is a fatal error and the under going operation is worth to retry. func IsRetryableError(err error) bool { if err == nil { return false } if terror.ErrorEqual(err, ErrLockConflict) || terror.ErrorEqual(err, ErrConditionNotMatch) { return true } return false }
func mayExit(err error, l string) bool { if terror.ErrorEqual(err, liner.ErrPromptAborted) || terror.ErrorEqual(err, io.EOF) { fmt.Println("\nBye") saveHistory() return true } if err != nil { log.Fatal(errors.ErrorStack(err)) } return false }
// Add data into indices. func (t *Table) addIndices(ctx context.Context, recordID int64, r []types.Datum, bs *kv.BufferStore) (int64, error) { txn, err := ctx.GetTxn(false) if err != nil { return 0, errors.Trace(err) } // Clean up lazy check error environment defer txn.DelOption(kv.PresumeKeyNotExistsError) skipCheck := ctx.GetSessionVars().SkipConstraintCheck if t.meta.PKIsHandle && !skipCheck { // Check key exists. recordKey := t.RecordKey(recordID) e := kv.ErrKeyExists.FastGen("Duplicate entry '%d' for key 'PRIMARY'", recordID) txn.SetOption(kv.PresumeKeyNotExistsError, e) _, err = txn.Get(recordKey) if err == nil { return recordID, errors.Trace(e) } else if !terror.ErrorEqual(err, kv.ErrNotExist) { return 0, errors.Trace(err) } txn.DelOption(kv.PresumeKeyNotExistsError) } for _, v := range t.indices { if v == nil || v.Meta().State == model.StateDeleteOnly || v.Meta().State == model.StateDeleteReorganization { // if index is in delete only or delete reorganization state, we can't add it. continue } colVals, err2 := v.FetchValues(r) if err2 != nil { return 0, errors.Trace(err2) } var dupKeyErr error if !skipCheck && (v.Meta().Unique || v.Meta().Primary) { entryKey, err1 := t.genIndexKeyStr(colVals) if err1 != nil { return 0, errors.Trace(err1) } dupKeyErr = kv.ErrKeyExists.FastGen("Duplicate entry '%s' for key '%s'", entryKey, v.Meta().Name) txn.SetOption(kv.PresumeKeyNotExistsError, dupKeyErr) } if dupHandle, err := v.Create(bs, colVals, recordID); err != nil { if terror.ErrorEqual(err, kv.ErrKeyExists) { return dupHandle, errors.Trace(dupKeyErr) } return 0, errors.Trace(err) } txn.DelOption(kv.PresumeKeyNotExistsError) } return 0, nil }
// IsRetryableError checks if the err is a fatal error and the under going operation is worth to retry. func IsRetryableError(err error) bool { if err == nil { return false } if terror.ErrorEqual(err, ErrRetryable) || terror.ErrorEqual(err, ErrLockConflict) || terror.ErrorEqual(err, ErrConditionNotMatch) || // TiKV exception message will tell you if you should retry or not strings.Contains(err.Error(), "try again later") { return true } return false }
func (d *ddl) delReorgSchema(t *meta.Meta, job *model.Job) error { dbInfo := &model.DBInfo{} if err := job.DecodeArgs(dbInfo); err != nil { // arg error, cancel this job. job.State = model.JobCancelled return errors.Trace(err) } tables, err := t.ListTables(dbInfo.ID) if terror.ErrorEqual(meta.ErrDBNotExists, err) { job.State = model.JobDone return nil } if err != nil { return errors.Trace(err) } if err = d.dropSchemaData(dbInfo, tables); err != nil { return errors.Trace(err) } // finish this background job job.SchemaState = model.StateNone job.State = model.JobDone return nil }
// Exec implements the stmt.Statement Exec interface. func (s *DropDatabaseStmt) Exec(ctx context.Context) (rset.Recordset, error) { err := sessionctx.GetDomain(ctx).DDL().DropSchema(ctx, model.NewCIStr(s.Name)) if terror.ErrorEqual(err, ddl.ErrNotExists) && s.IfExists { err = nil } return nil, errors.Trace(err) }
func (s *testSuite) TestSetPwd(c *C) { defer testleak.AfterTest(c)() tk := testkit.NewTestKit(c, s.store) createUserSQL := `CREATE USER 'testpwd'@'localhost' IDENTIFIED BY '';` tk.MustExec(createUserSQL) result := tk.MustQuery(`SELECT Password FROM mysql.User WHERE User="******" and Host="localhost"`) rowStr := fmt.Sprintf("%v", []byte("")) result.Check(testkit.Rows(rowStr)) // set password for tk.MustExec(`SET PASSWORD FOR 'testpwd'@'localhost' = 'password';`) result = tk.MustQuery(`SELECT Password FROM mysql.User WHERE User="******" and Host="localhost"`) rowStr = fmt.Sprintf("%v", []byte(util.EncodePassword("password"))) result.Check(testkit.Rows(rowStr)) // set password setPwdSQL := `SET PASSWORD = '******'` // Session user is empty. _, err := tk.Exec(setPwdSQL) c.Check(err, NotNil) tk.Se, err = tidb.CreateSession(s.store) c.Check(err, IsNil) ctx := tk.Se.(context.Context) ctx.GetSessionVars().User = "******" // Session user doesn't exist. _, err = tk.Exec(setPwdSQL) c.Check(terror.ErrorEqual(err, executor.ErrPasswordNoMatch), IsTrue) // normal ctx.GetSessionVars().User = "******" tk.MustExec(setPwdSQL) result = tk.MustQuery(`SELECT Password FROM mysql.User WHERE User="******" and Host="localhost"`) rowStr = fmt.Sprintf("%v", []byte(util.EncodePassword("pwd"))) result.Check(testkit.Rows(rowStr)) }
func (t *Table) removeRowData(ctx context.Context, h int64) error { if err := t.LockRow(ctx, h); err != nil { return errors.Trace(err) } txn, err := ctx.GetTxn(false) if err != nil { return errors.Trace(err) } // Remove row's colume one by one for _, col := range t.Columns { k := t.RecordKey(h, col) err = txn.Delete([]byte(k)) if err != nil { if col.State != model.StatePublic && terror.ErrorEqual(err, kv.ErrNotExist) { // If the column is not in public state, we may have not added the column, // or already deleted the column, so skip ErrNotExist error. continue } return errors.Trace(err) } } // Remove row lock err = txn.Delete([]byte(t.RecordKey(h, nil))) if err != nil { return errors.Trace(err) } return nil }
func (cc *clientConn) Run() { defer func() { r := recover() if r != nil { const size = 4096 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Errorf("lastCmd %s, %v, %s", cc.lastCmd, r, buf) } cc.Close() }() for { cc.alloc.Reset() data, err := cc.readPacket() if err != nil { if terror.ErrorNotEqual(err, io.EOF) { log.Error(err) } return } if err := cc.dispatch(data); err != nil { if terror.ErrorEqual(err, io.EOF) { return } log.Errorf("dispatch error %s, %s", errors.ErrorStack(err), cc) log.Errorf("cmd: %s", string(data[1:])) cc.writeError(err) } cc.pkg.sequence = 0 } }
func checkRecordAndIndex(txn kv.Transaction, t table.Table, idx *column.IndexedCol) error { cols := make([]*column.Col, len(idx.Columns)) for i, col := range idx.Columns { cols[i] = t.Cols()[col.Offset] } startKey := t.RecordKey(0, nil) kvIndex := kv.NewKVIndex(t.IndexPrefix(), idx.Name.L, idx.ID, idx.Unique) filterFunc := func(h1 int64, vals1 []interface{}, cols []*column.Col) (bool, error) { isExist, h2, err := kvIndex.Exist(txn, vals1, h1) if terror.ErrorEqual(err, kv.ErrKeyExists) { record1 := &RecordData{Handle: h1, Values: vals1} record2 := &RecordData{Handle: h2, Values: vals1} return false, errors.Errorf("index:%v != record:%v", record2, record1) } if err != nil { return false, errors.Trace(err) } if !isExist { record := &RecordData{Handle: h1, Values: vals1} return false, errors.Errorf("index:%v != record:%v", nil, record) } return true, nil } err := t.IterRecords(txn, startKey, cols, filterFunc) if err != nil { return errors.Trace(err) } return nil }
func (do *Domain) loadSchemaInLoop(lease time.Duration) { ticker := time.NewTicker(lease) defer ticker.Stop() for { select { case <-ticker.C: err := do.Reload() // we may close store in test, but the domain load schema loop is still checking, // so we can't panic for ErrDBClosed and just return here. if terror.ErrorEqual(err, localstore.ErrDBClosed) { return } else if err != nil { log.Fatalf("[ddl] reload schema err %v", errors.ErrorStack(err)) } case newLease := <-do.leaseCh: if lease == newLease { // nothing to do continue } lease = newLease // reset ticker too. ticker.Stop() ticker = time.NewTicker(lease) } } }
func newDBIter(s *dbSnapshot, key kv.Key, reverse bool) (*dbIter, error) { var ( k kv.Key v []byte err error ) if reverse { k, v, err = s.reverseMvccSeek(key) } else { k, v, err = s.mvccSeek(key, false) } if err != nil { if terror.ErrorEqual(err, kv.ErrNotExist) { err = nil } return &dbIter{valid: false}, errors.Trace(err) } return &dbIter{ s: s, valid: true, k: k, v: v, reverse: reverse, }, nil }
func (s *testValidatorSuite) TestValidator(c *C) { cases := []struct { sql string inPrepare bool err error }{ {"select ?", false, parser.ErrSyntax}, {"select ?", true, nil}, {"create table t(id int not null auto_increment default 2, key (id))", true, errors.New("Invalid default value for 'id'")}, {"create table t(id int not null default 2 auto_increment, key (id))", true, errors.New("Invalid default value for 'id'")}, {"create table t(id int not null auto_increment)", true, errors.New("Incorrect table definition; there can be only one auto column and it must be defined as a key")}, {"create table t(id int not null auto_increment, c int auto_increment, key (id, c))", true, errors.New("Incorrect table definition; there can be only one auto column and it must be defined as a key")}, {"create table t(id int not null auto_increment, c int, key (c, id))", true, errors.New("Incorrect table definition; there can be only one auto column and it must be defined as a key")}, {"create table t(id decimal auto_increment, key (id))", true, errors.New("Incorrect column specifier for column 'id'")}, {"create table t(id float auto_increment, key (id))", true, nil}, } store, err := tidb.NewStore(tidb.EngineGoLevelDBMemory) c.Assert(err, IsNil) se, err := tidb.CreateSession(store) c.Assert(err, IsNil) for _, ca := range cases { stmts, err1 := tidb.Parse(se.(context.Context), ca.sql) c.Assert(err1, IsNil) c.Assert(stmts, HasLen, 1) stmt := stmts[0] err = optimizer.Validate(stmt, ca.inPrepare) c.Assert(terror.ErrorEqual(err, ca.err), IsTrue) } }
func (s *testSchemaSuite) TestSchema(c *C) { store := testCreateStore(c, "test_schema") defer store.Close() lease := 100 * time.Millisecond d1 := newDDL(store, nil, nil, lease) defer d1.close() ctx := mock.NewContext() dbInfo := testSchemaInfo(c, d1, "test") job := testCreateSchema(c, ctx, d1, dbInfo) testCheckSchemaState(c, d1, dbInfo, model.StatePublic) testCheckJobDone(c, d1, job, true) job = testDropSchema(c, ctx, d1, dbInfo) testCheckSchemaState(c, d1, dbInfo, model.StateNone) testCheckJobDone(c, d1, job, false) job = &model.Job{ SchemaID: dbInfo.ID, Type: model.ActionDropSchema, } err := d1.startDDLJob(ctx, job) c.Assert(terror.ErrorEqual(err, infoschema.DatabaseNotExists), IsTrue) }
func (d *ddl) dropTableColumn(t table.Table, colInfo *model.ColumnInfo, reorgInfo *reorgInfo) error { version := reorgInfo.SnapshotVer seekHandle := reorgInfo.Handle col := &column.Col{ColumnInfo: *colInfo} for { handles, err := d.getSnapshotRows(t, version, seekHandle) if err != nil { return errors.Trace(err) } else if len(handles) == 0 { return nil } seekHandle = handles[len(handles)-1] + 1 err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { if err1 := d.isReorgRunnable(txn); err1 != nil { return errors.Trace(err1) } var h int64 for _, h = range handles { key := t.RecordKey(h, col) err1 := txn.Delete(key) if err1 != nil && !terror.ErrorEqual(err1, kv.ErrNotExist) { return errors.Trace(err1) } } return errors.Trace(reorgInfo.UpdateHandle(txn, h)) }) if err != nil { return errors.Trace(err) } } }
// Get returns the value associated with key. func (m *memDbBuffer) Get(k Key) ([]byte, error) { v, err := m.db.Get(k) if terror.ErrorEqual(err, leveldb.ErrNotFound) { return nil, ErrNotExist } return v, nil }
func (s *testSchemaSuite) TestSchema(c *C) { defer testleak.AfterTest(c)() store := testCreateStore(c, "test_schema") defer store.Close() d1 := newDDL(store, nil, nil, testLease) defer d1.close() ctx := mock.NewContext() dbInfo := testSchemaInfo(c, d1, "test") job := testCreateSchema(c, ctx, d1, dbInfo) testCheckSchemaState(c, d1, dbInfo, model.StatePublic) testCheckJobDone(c, d1, job, true) job = testDropSchema(c, ctx, d1, dbInfo) testCheckSchemaState(c, d1, dbInfo, model.StateNone) testCheckJobDone(c, d1, job, false) job = &model.Job{ SchemaID: dbInfo.ID, Type: model.ActionDropSchema, } err := d1.doDDLJob(ctx, job) c.Assert(terror.ErrorEqual(err, infoschema.ErrDatabaseDropExists), IsTrue) }
func (s *testColumnSuite) checkColumnKVExist(ctx context.Context, t table.Table, handle int64, col *table.Column, columnValue interface{}, isExist bool) error { txn, err := ctx.GetTxn(true) if err != nil { return errors.Trace(err) } defer ctx.CommitTxn() key := t.RecordKey(handle) data, err := txn.Get(key) if !isExist { if terror.ErrorEqual(err, kv.ErrNotExist) { return nil } } if err != nil { return errors.Trace(err) } colMap := make(map[int64]*types.FieldType) colMap[col.ID] = &col.FieldType rowMap, err := tablecodec.DecodeRow(data, colMap) if err != nil { return errors.Trace(err) } val, ok := rowMap[col.ID] if isExist { if !ok || val.GetValue() != columnValue { return errors.Errorf("%v is not equal to %v", val.GetValue(), columnValue) } } else { if ok { return errors.Errorf("column value should not exists") } } return nil }
func (s *session) checkSchemaValid() error { var ts uint64 if s.txn != nil { ts = s.txn.StartTS() } else { s.schemaVerInCurrTxn = 0 } var err error var currSchemaVer int64 for i := 0; i < schemaExpiredRetryTimes; i++ { currSchemaVer, err = sessionctx.GetDomain(s).SchemaValidity.Check(ts, s.schemaVerInCurrTxn) if err == nil { if s.txn == nil { s.schemaVerInCurrTxn = currSchemaVer } return nil } log.Infof("schema version original %d, current %d, sleep time %v", s.schemaVerInCurrTxn, currSchemaVer, checkSchemaValiditySleepTime) if terror.ErrorEqual(err, domain.ErrInfoSchemaChanged) { break } time.Sleep(checkSchemaValiditySleepTime) } return errors.Trace(err) }
func rowWithCols(txn kv.Retriever, t table.Table, h int64, cols []*table.Column) ([]types.Datum, error) { v := make([]types.Datum, len(cols)) for i, col := range cols { if col.State != model.StatePublic { return nil, errInvalidColumnState.Gen("Cannot use none public column - %v", cols) } if col.IsPKHandleColumn(t.Meta()) { v[i].SetInt64(h) continue } k := t.RecordKey(h, col) data, err := txn.Get(k) if terror.ErrorEqual(err, kv.ErrNotExist) && !mysql.HasNotNullFlag(col.Flag) { continue } else if err != nil { return nil, errors.Trace(err) } val, err := tables.DecodeValue(data, &col.FieldType) if err != nil { return nil, errors.Trace(err) } v[i] = val } return v, nil }
// backfillIndexInTxn deals with a part of backfilling index data in a Transaction. // This part of the index data rows is defaultSmallBatchCnt. func (d *ddl) backfillIndexInTxn(t table.Table, kvIdx table.Index, handles []int64, txn kv.Transaction) (int64, error) { idxRecords, err := d.fetchRowColVals(txn, t, handles, kvIdx.Meta()) if err != nil { return 0, errors.Trace(err) } for _, idxRecord := range idxRecords { log.Debug("[ddl] backfill index...", idxRecord.handle) err = txn.LockKeys(idxRecord.key) if err != nil { return 0, errors.Trace(err) } // Create the index. handle, err := kvIdx.Create(txn, idxRecord.vals, idxRecord.handle) if err != nil { if terror.ErrorEqual(err, kv.ErrKeyExists) && idxRecord.handle == handle { // Index already exists, skip it. continue } return 0, errors.Trace(err) } } return idxRecords[len(idxRecords)-1].handle, nil }
func checkRecordAndIndex(txn kv.Transaction, t table.Table, idx table.Index) error { cols := make([]*table.Column, len(idx.Meta().Columns)) for i, col := range idx.Meta().Columns { cols[i] = t.Cols()[col.Offset] } startKey := t.RecordKey(0, nil) filterFunc := func(h1 int64, vals1 []types.Datum, cols []*table.Column) (bool, error) { isExist, h2, err := idx.Exist(txn, vals1, h1) if terror.ErrorEqual(err, kv.ErrKeyExists) { record1 := &RecordData{Handle: h1, Values: vals1} record2 := &RecordData{Handle: h2, Values: vals1} return false, errDateNotEqual.Gen("index:%v != record:%v", record2, record1) } if err != nil { return false, errors.Trace(err) } if !isExist { record := &RecordData{Handle: h1, Values: vals1} return false, errDateNotEqual.Gen("index:%v != record:%v", nil, record) } return true, nil } err := iterRecords(txn, t, startKey, cols, filterFunc) if err != nil { return errors.Trace(err) } return nil }
// IsErrNotFound checks if err is a kind of NotFound error. func IsErrNotFound(err error) bool { if terror.ErrorEqual(err, ErrNotExist) { return true } return false }
// HGet gets the value of a hash field. func (t *TxStructure) HGet(key []byte, field []byte) ([]byte, error) { dataKey := t.encodeHashDataKey(key, field) value, err := t.reader.Get(dataKey) if terror.ErrorEqual(err, kv.ErrNotExist) { err = nil } return value, errors.Trace(err) }
// Exec implements the stmt.Statement Exec interface. func (s *DropIndexStmt) Exec(ctx context.Context) (rset.Recordset, error) { err := sessionctx.GetDomain(ctx).DDL().DropIndex(ctx, s.TableIdent.Full(ctx), model.NewCIStr(s.IndexName)) if (terror.ErrorEqual(err, ddl.ErrNotExists) || terror.DatabaseNotExists.Equal(err)) && s.IfExists { err = nil } return nil, errors.Trace(err) }