func TestDropIndexInterleaved(t *testing.T) { defer leaktest.AfterTest(t)() const chunkSize = 200 params, _ := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ BackfillChunkSize: chunkSize, }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() numRows := 2*chunkSize + 1 createKVInterleavedTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) checkKeyCount(t, kvDB, tablePrefix, 3*numRows) if _, err := sqlDB.Exec(`DROP INDEX t.intlv@intlv_idx`); err != nil { t.Fatal(err) } checkKeyCount(t, kvDB, tablePrefix, 2*numRows) // Ensure that index is not active. tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "intlv") if _, _, err := tableDesc.FindIndexByName("intlv_idx"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } }
// Test that table names are not treated as case sensitive by the name cache. func TestTableNameNotCaseSensitive(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // Check that we can get the table by a different name. lease := leaseManager.tableNames.get(tableDesc.ParentID, "tEsT", s.Clock()) if lease == nil { t.Fatalf("no name cache entry") } if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } }
// TestDropTableInterleaved tests dropping a table that is interleaved within // another table. func TestDropTableInterleaved(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() numRows := 2*sql.TableTruncateChunkSize + 1 createKVInterleavedTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) checkKeyCount(t, kvDB, tablePrefix, 3*numRows) if _, err := sqlDB.Exec(`DROP TABLE t.intlv`); err != nil { t.Fatal(err) } checkKeyCount(t, kvDB, tablePrefix, numRows) // Test that deleted table cannot be used. This prevents regressions where // name -> descriptor ID caches might make this statement erronously work. if _, err := sqlDB.Exec(`SELECT * FROM t.intlv`); !testutils.IsError( err, `table "t.intlv" does not exist`, ) { t.Fatalf("different error than expected: %v", err) } }
// Test that abruptly closing a pgwire connection releases all leases held by // that session. func TestPGWireConnectionCloseReleasesLeases(t *testing.T) { defer leaktest.AfterTest(t)() s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() url, cleanupConn := sqlutils.PGUrl(t, s.ServingAddr(), "SetupServer", url.User(security.RootUser)) defer cleanupConn() conn, err := pq.Open(url.String()) if err != nil { t.Fatal(err) } ex := conn.(driver.Execer) if _, err := ex.Exec("CREATE DATABASE test", nil); err != nil { t.Fatal(err) } if _, err := ex.Exec("CREATE TABLE test.t (i INT PRIMARY KEY)", nil); err != nil { t.Fatal(err) } // Start a txn so leases are accumulated by queries. if _, err := ex.Exec("BEGIN", nil); err != nil { t.Fatal(err) } // Get a table lease. if _, err := ex.Exec("SELECT * FROM test.t", nil); err != nil { t.Fatal(err) } // Abruptly close the connection. if err := conn.Close(); err != nil { t.Fatal(err) } // Verify that there are no leases held. tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") lm := s.LeaseManager().(*LeaseManager) // Looking for a table state validates that there used to be a lease on the // table. ts := lm.findTableState(tableDesc.ID, false /* create */) if ts == nil { t.Fatal("table state not found") } ts.mu.Lock() leases := ts.active.data ts.mu.Unlock() if len(leases) != 1 { t.Fatalf("expected one lease, found: %d", len(leases)) } // Wait for the lease to be released. util.SucceedsSoon(t, func() error { ts.mu.Lock() refcount := ts.active.data[0].refcount ts.mu.Unlock() if refcount != 0 { return errors.Errorf( "expected lease to be unused, found refcount: %d", refcount) } return nil }) }
// Test that we fail to lease a table that was marked for deletion. func TestCantLeaseDeletedTable(testingT *testing.T) { defer leaktest.AfterTest(testingT)() var mu syncutil.Mutex clearSchemaChangers := false params, _ := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ SyncFilter: func(tscc csql.TestingSchemaChangerCollection) { mu.Lock() defer mu.Unlock() if clearSchemaChangers { tscc.ClearSchemaChangers() } }, AsyncExecNotification: asyncSchemaChangerDisabled, }, } t := newLeaseTest(testingT, params) defer t.cleanup() sql := ` CREATE DATABASE test; CREATE TABLE test.t(a INT PRIMARY KEY); ` _, err := t.db.Exec(sql) if err != nil { t.Fatal(err) } // Block schema changers so that the table we're about to DROP is not actually // dropped; it will be left in a "deleted" state. mu.Lock() clearSchemaChangers = true mu.Unlock() // DROP the table _, err = t.db.Exec(`DROP TABLE test.t`) if err != nil { t.Fatal(err) } // Make sure we can't get a lease on the descriptor. tableDesc := sqlbase.GetTableDescriptor(t.kvDB, "test", "t") // try to acquire at a bogus version to make sure we don't get back a lease we // already had. _, err = t.acquire(1, tableDesc.ID, tableDesc.Version+1) if !testutils.IsError(err, "table is being dropped") { t.Fatalf("got a different error than expected: %v", err) } }
func TestDropIndex(t *testing.T) { defer leaktest.AfterTest(t)() const chunkSize = 200 params, _ := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ BackfillChunkSize: chunkSize, }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() numRows := 2*chunkSize + 1 createKVTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") status, i, err := tableDesc.FindIndexByName("foo") if err != nil { t.Fatal(err) } if status != sqlbase.DescriptorActive { t.Fatal("Index 'foo' is not active.") } indexPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.Indexes[i].ID)) checkKeyCount(t, kvDB, indexPrefix, numRows) if _, err := sqlDB.Exec(`DROP INDEX t.kv@foo`); err != nil { t.Fatal(err) } checkKeyCount(t, kvDB, indexPrefix, 0) tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") if _, _, err := tableDesc.FindIndexByName("foo"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } }
func setupRanges( db *gosql.DB, s *server.TestServer, cdb *client.DB, t *testing.T, ) ([]roachpb.RangeDescriptor, *sqlbase.TableDescriptor) { if _, err := db.Exec(`CREATE DATABASE t`); err != nil { t.Fatal(err) } if _, err := db.Exec(`CREATE TABLE test (k INT PRIMARY KEY)`); err != nil { t.Fatal(err) } values := []int{0, 10, 20} for _, val := range values { // Multiply by 10 to space out the values so we can easily construct keys // that fall within the range. if _, err := db.Exec("INSERT INTO test VALUES ($1)", val*10); err != nil { t.Fatal(err) } } tableDesc := sqlbase.GetTableDescriptor(cdb, "t", "test") // Split every SQL row to its own range. rowRanges := make([]roachpb.RangeDescriptor, len(values)) for i, val := range values { var err error var l roachpb.RangeDescriptor l, rowRanges[i], err = splitRangeAtVal(s, tableDesc, val) if err != nil { t.Fatal(err) } if i > 0 { rowRanges[i-1] = l } } // TODO(andrei): The sleep below serves to remove the noise that the // RangeCache might encounter, clobbering descriptors with old versions. // Remove once all the causes of such clobbering, listed in #10751, have been // fixed. time.Sleep(300 * time.Millisecond) // Run a select across the whole table to populate the caches with all the // ranges. if _, err := db.Exec(`SELECT COUNT(1) from test`); err != nil { t.Fatal(err) } return rowRanges, tableDesc }
// TestAddingFKs checks the behavior of a table in the non-public `ADD` state. // Being non-public, it should not be visible to clients, and is therefore // assumed to be empty (e.g. by foreign key checks), since no one could have // written to it yet. func TestAddingFKs(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.products (id INT PRIMARY KEY); INSERT INTO t.products VALUES (1), (2); CREATE TABLE t.orders (id INT PRIMARY KEY, product INT REFERENCES t.products, INDEX (product)); `); err != nil { t.Fatal(err) } // Step the referencing table back to the ADD state. ordersDesc := sqlbase.GetTableDescriptor(kvDB, "t", "orders") ordersDesc.State = sqlbase.TableDescriptor_ADD ordersDesc.Version++ if err := kvDB.Put( context.TODO(), sqlbase.MakeDescMetadataKey(ordersDesc.ID), sqlbase.WrapDescriptor(ordersDesc), ); err != nil { t.Fatal(err) } // Generally a referenced table needs to lookup referencing tables to check // FKs during delete operations, but referencing tables in the ADD state are // given special treatment. if _, err := sqlDB.Exec(`DELETE FROM t.products`); err != nil { t.Fatal(err) } // Client should not see the orders table. if _, err := sqlDB.Exec( `SELECT * FROM t.orders`, ); !testutils.IsError(err, "table is being added") { t.Fatal(err) } }
// verifyTables ensures that the correct number of tables were created and that // they all correspond to individual table descriptor IDs in the correct range // of values. func verifyTables( t *testing.T, tc *testcluster.TestCluster, completed chan int, expectedNumOfTables int, descIDStart int64, ) { descIDEnd := descIDStart + int64(expectedNumOfTables) usedTableIDs := make(map[sqlbase.ID]string) var count int for id := range completed { count++ tableName := fmt.Sprintf("table_%d", id) kvDB := tc.Servers[count%tc.NumServers()].KVClient().(*client.DB) tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) if int64(tableDesc.ID) < descIDStart || int64(tableDesc.ID) >= descIDEnd { t.Fatalf( "table %s's ID %d is not within the expected range of %d to %d", tableName, tableDesc.ID, descIDStart, descIDEnd, ) } usedTableIDs[tableDesc.ID] = tableName } if e, a := expectedNumOfTables, len(usedTableIDs); e != a { t.Fatalf("expected %d tables created, only got %d", e, a) } kvDB := tc.Servers[count%tc.NumServers()].KVClient().(*client.DB) if descID, err := kvDB.Get(context.Background(), keys.DescIDGenerator); err != nil { t.Fatal(err) } else { if e, a := descIDEnd, descID.ValueInt(); e != a { t.Fatalf("expected next descriptor ID to be %d, got %d", e, a) } } }
// Tests that a name cache entry with by an expired lease is not returned. func TestNameCacheEntryDoesntReturnExpiredLease(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) const tableName = "test" if _, err := db.Exec(fmt.Sprintf(` CREATE DATABASE t; CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); `, tableName)); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", tableName) // Check the assumptions this tests makes: that there is a cache entry // (with a valid lease). if lease := leaseManager.tableNames.get(tableDesc.ParentID, tableName, s.Clock()); lease == nil { t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.ParentID, tableName) } else { if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } } leaseManager.ExpireLeases(s.Clock()) // Check the name no longer resolves. if lease := leaseManager.tableNames.get(tableDesc.ParentID, tableName, s.Clock()); lease != nil { t.Fatalf("name cache has unexpired entry for (%d, %s): %s", tableDesc.ParentID, tableName, lease) } }
// TestAcquireFreshestFromStoreRaces runs // LeaseManager.acquireFreshestFromStore() in parallel to test for races. func TestAcquireFreshestFromStoreRaces(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") var wg sync.WaitGroup numRoutines := 10 wg.Add(numRoutines) for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() err := kvDB.Txn(context.TODO(), func(txn *client.Txn) error { lease, err := leaseManager.acquireFreshestFromStore(txn, tableDesc.ID) if err != nil { return err } if err := leaseManager.Release(lease); err != nil { return err } return nil }) if err != nil { t.Error(err) } }() } wg.Wait() }
// Test schema change backfills are not affected by various operations // that run simultaneously. func TestRaceWithBackfill(t *testing.T) { defer leaktest.AfterTest(t)() var backfillNotification chan bool params, _ := createTestServerParams() // Disable asynchronous schema change execution to allow synchronous path // to trigger start of backfill notification. params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ RunBeforeBackfillChunk: func(sp roachpb.Span) error { if backfillNotification != nil { // Close channel to notify that the backfill has started. close(backfillNotification) backfillNotification = nil } return nil }, AsyncExecNotification: asyncSchemaChangerDisabled, }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14')); CREATE UNIQUE INDEX vidx ON t.test (v); `); err != nil { t.Fatal(err) } // Bulk insert. maxValue := 4000 if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil { t.Fatal(err) } // Read table descriptor for version. tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() // number of keys == 3 * number of rows; 2 column families and 1 index entry // for each row. if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := 3 * (maxValue + 1); len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } // Run some schema changes with operations. // Add column. backfillNotification = make(chan bool) runSchemaChangeWithOperations( t, sqlDB, kvDB, "ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')", maxValue, 4, backfillNotification) // Drop column. backfillNotification = make(chan bool) runSchemaChangeWithOperations( t, sqlDB, kvDB, "ALTER TABLE t.test DROP pi", maxValue, 3, backfillNotification) // Add index. backfillNotification = make(chan bool) runSchemaChangeWithOperations( t, sqlDB, kvDB, "CREATE UNIQUE INDEX foo ON t.test (v)", maxValue, 4, backfillNotification) // Drop index. backfillNotification = make(chan bool) runSchemaChangeWithOperations( t, sqlDB, kvDB, "DROP INDEX t.test@vidx", maxValue, 3, backfillNotification) // Verify that the index foo over v is consistent, and that column x has // been backfilled properly. rows, err := sqlDB.Query(`SELECT v, x from t.test@foo`) if err != nil { t.Fatal(err) } count := 0 for ; rows.Next(); count++ { var val int var x float64 if err := rows.Scan(&val, &x); err != nil { t.Errorf("row %d scan failed: %s", count, err) continue } if count != val { t.Errorf("e = %d, v = %d", count, val) } if 1.4 != x { t.Errorf("e = %f, v = %f", 1.4, x) } } if err := rows.Err(); err != nil { t.Fatal(err) } eCount := maxValue + 1 if eCount != count { t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } // Verify that a table delete in the middle of a backfill works properly. // The backfill will terminate in the middle, and the delete will // successfully delete all the table data. // // This test could be made its own test but is placed here to speed up the // testing. notification := make(chan bool) backfillNotification = notification // Run the schema change in a separate goroutine. var wg sync.WaitGroup wg.Add(1) go func() { // Start schema change that eventually runs a backfill. if _, err := sqlDB.Exec("CREATE UNIQUE INDEX bar ON t.test (v)"); err != nil { t.Error(err) } wg.Done() }() // Wait until the schema change backfill starts. <-notification // Wait for a short bit to ensure that the backfill has likely progressed // and written some data, but not long enough that the backfill has // completed. time.Sleep(10 * time.Millisecond) if _, err := sqlDB.Exec("DROP TABLE t.test"); err != nil { t.Fatal(err) } // Wait until the schema change is done. wg.Wait() // Ensure that the table data has been deleted. if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if len(kvs) != 0 { t.Fatalf("expected %d key value pairs, but got %d", 0, len(kvs)) } }
// Run a particular schema change and run some OLTP operations in parallel, as // soon as the schema change starts executing its backfill. func runSchemaChangeWithOperations( t *testing.T, sqlDB *gosql.DB, kvDB *client.DB, schemaChange string, maxValue int, keyMultiple int, backfillNotification chan bool, ) { tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // Run the schema change in a separate goroutine. var wg sync.WaitGroup wg.Add(1) go func() { start := timeutil.Now() // Start schema change that eventually runs a backfill. if _, err := sqlDB.Exec(schemaChange); err != nil { t.Error(err) } t.Logf("schema change %s took %v", schemaChange, timeutil.Since(start)) wg.Done() }() // Wait until the schema change backfill starts. <-backfillNotification // Run a variety of operations during the backfill. // Grabbing a schema change lease on the table will fail, disallowing // another schema change from being simultaneously executed. sc := csql.NewSchemaChangerForTesting(tableDesc.ID, 0, 0, *kvDB, nil) if l, err := sc.AcquireLease(); err == nil { t.Fatalf("schema change lease acquisition on table %d succeeded: %v", tableDesc.ID, l) } // Update some rows. var updatedKeys []int for i := 0; i < 10; i++ { k := rand.Intn(maxValue) v := maxValue + i + 1 if _, err := sqlDB.Exec(`UPDATE t.test SET v = $1 WHERE k = $2`, v, k); err != nil { t.Error(err) } updatedKeys = append(updatedKeys, k) } // Reupdate updated values back to what they were before. for _, k := range updatedKeys { if _, err := sqlDB.Exec(`UPDATE t.test SET v = $1 WHERE k = $2`, maxValue-k, k); err != nil { t.Error(err) } } // Delete some rows. deleteStartKey := rand.Intn(maxValue - 10) for i := 0; i < 10; i++ { if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, deleteStartKey+i); err != nil { t.Error(err) } } // Reinsert deleted rows. for i := 0; i < 10; i++ { k := deleteStartKey + i if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, k, maxValue-k); err != nil { t.Error(err) } } // Insert some new rows. numInserts := 10 for i := 0; i < numInserts; i++ { k := maxValue + i + 1 if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $1)`, k); err != nil { t.Error(err) } } wg.Wait() // for schema change to complete. // Verify the number of keys left behind in the table to validate schema // change operations. tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := keyMultiple * (maxValue + numInserts + 1); len(kvs) != e { for _, kv := range kvs { t.Errorf("key %s, value %s", kv.Key, kv.Value) } t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } // Delete the rows inserted. for i := 0; i < numInserts; i++ { if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, maxValue+i+1); err != nil { t.Error(err) } } }
func TestAsyncSchemaChanger(t *testing.T) { defer leaktest.AfterTest(t)() // The descriptor changes made must have an immediate effect // so disable leases on tables. defer csql.TestDisableTableLeases()() // Disable synchronous schema change execution so the asynchronous schema // changer executes all schema changes. params, _ := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ SyncFilter: func(tscc csql.TestingSchemaChangerCollection) { tscc.ClearSchemaChangers() }, AsyncExecQuickly: true, }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); `); err != nil { t.Fatal(err) } // Read table descriptor for version. tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // A long running schema change operation runs through // a state machine that increments the version by 3. expectedVersion := tableDesc.Version + 3 // Run some schema change if _, err := sqlDB.Exec(` CREATE INDEX foo ON t.test (v) `); err != nil { t.Fatal(err) } retryOpts := retry.Options{ InitialBackoff: 20 * time.Millisecond, MaxBackoff: 200 * time.Millisecond, Multiplier: 2, } // Wait until index is created. for r := retry.Start(retryOpts); r.Next(); { tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") if len(tableDesc.Indexes) == 1 { break } } // Ensure that the indexes have been created. mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) indexQuery := `SELECT v FROM t.test@foo` mTest.CheckQueryResults(indexQuery, [][]string{{"b"}, {"d"}}) // Ensure that the version has been incremented. tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } // Apply a schema change that only sets the UpVersion bit. expectedVersion = newVersion + 1 mTest.Exec(`ALTER INDEX t.test@foo RENAME TO ufo`) for r := retry.Start(retryOpts); r.Next(); { // Ensure that the version gets incremented. tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") name := tableDesc.Indexes[0].Name if name != "ufo" { t.Fatalf("bad index name %s", name) } newVersion = tableDesc.Version if newVersion == expectedVersion { break } } // Run many schema changes simultaneously and check // that they all get executed. count := 5 for i := 0; i < count; i++ { mTest.Exec(fmt.Sprintf(`CREATE INDEX foo%d ON t.test (v)`, i)) } // Wait until indexes are created. for r := retry.Start(retryOpts); r.Next(); { tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") if len(tableDesc.Indexes) == count+1 { break } } for i := 0; i < count; i++ { indexQuery := fmt.Sprintf(`SELECT v FROM t.test@foo%d`, i) mTest.CheckQueryResults(indexQuery, [][]string{{"b"}, {"d"}}) } }
func TestSchemaChangeProcess(t *testing.T) { defer leaktest.AfterTest(t)() // The descriptor changes made must have an immediate effect // so disable leases on tables. defer csql.TestDisableTableLeases()() params, _ := createTestServerParams() // Disable external processing of mutations. params.Knobs.SQLSchemaChanger = &csql.SchemaChangerTestingKnobs{ AsyncExecNotification: asyncSchemaChangerDisabled, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() var id = sqlbase.ID(keys.MaxReservedDescID + 2) var node = roachpb.NodeID(2) stopper := stop.NewStopper() leaseMgr := csql.NewLeaseManager( &base.NodeIDContainer{}, *kvDB, hlc.NewClock(hlc.UnixNano, time.Nanosecond), csql.LeaseManagerTestingKnobs{}, stopper, &csql.MemoryMetrics{}, ) defer stopper.Stop() changer := csql.NewSchemaChangerForTesting(id, 0, node, *kvDB, leaseMgr) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo(v)); INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); `); err != nil { t.Fatal(err) } // Read table descriptor for version. tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") expectedVersion := tableDesc.Version desc, err := changer.MaybeIncrementVersion() if err != nil { t.Fatal(err) } tableDesc = desc.GetTable() newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } isDone, err := changer.IsDone() if err != nil { t.Fatal(err) } if !isDone { t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc) } // Check that MaybeIncrementVersion increments the version // correctly. expectedVersion++ tableDesc.UpVersion = true if err := kvDB.Put( context.TODO(), sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) } isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if isDone { t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable()) } desc, err = changer.MaybeIncrementVersion() if err != nil { t.Fatal(err) } tableDesc = desc.GetTable() savedTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version in returned desc; e = %d, v = %d", expectedVersion, newVersion) } newVersion = savedTableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version in saved desc; e = %d, v = %d", expectedVersion, newVersion) } isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if !isDone { t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc) } // Check that RunStateMachineBeforeBackfill doesn't do anything // if there are no mutations queued. if err := changer.RunStateMachineBeforeBackfill(); err != nil { t.Fatal(err) } tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } // Check that RunStateMachineBeforeBackfill functions properly. expectedVersion = tableDesc.Version // Make a copy of the index for use in a mutation. index := protoutil.Clone(&tableDesc.Indexes[0]).(*sqlbase.IndexDescriptor) index.Name = "bar" index.ID = tableDesc.NextIndexID tableDesc.NextIndexID++ changer = csql.NewSchemaChangerForTesting(id, tableDesc.NextMutationID, node, *kvDB, leaseMgr) tableDesc.Mutations = append(tableDesc.Mutations, sqlbase.DescriptorMutation{ Descriptor_: &sqlbase.DescriptorMutation_Index{Index: index}, Direction: sqlbase.DescriptorMutation_ADD, State: sqlbase.DescriptorMutation_DELETE_ONLY, MutationID: tableDesc.NextMutationID, }) tableDesc.NextMutationID++ // Run state machine in both directions. for _, direction := range []sqlbase.DescriptorMutation_Direction{sqlbase.DescriptorMutation_ADD, sqlbase.DescriptorMutation_DROP} { tableDesc.Mutations[0].Direction = direction expectedVersion++ if err := kvDB.Put( context.TODO(), sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) } // The expected end state. expectedState := sqlbase.DescriptorMutation_WRITE_ONLY if direction == sqlbase.DescriptorMutation_DROP { expectedState = sqlbase.DescriptorMutation_DELETE_ONLY } // Run two times to ensure idempotency of operations. for i := 0; i < 2; i++ { if err := changer.RunStateMachineBeforeBackfill(); err != nil { t.Fatal(err) } tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } state := tableDesc.Mutations[0].State if state != expectedState { t.Fatalf("bad state; e = %d, v = %d", expectedState, state) } } } // RunStateMachineBeforeBackfill() doesn't complete the schema change. isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if isDone { t.Fatalf("table expected to have an outstanding schema change: %v", tableDesc) } }
// TestSchemaChangeReverseMutations tests that schema changes get reversed // correctly when one of them violates a constraint. func TestSchemaChangeReverseMutations(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() const chunkSize = 200 // Disable synchronous schema change processing so that the mutations get // processed asynchronously. var enableAsyncSchemaChanges uint32 params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ SyncFilter: func(tscc csql.TestingSchemaChangerCollection) { tscc.ClearSchemaChangers() }, AsyncExecNotification: func() error { if enable := atomic.LoadUint32(&enableAsyncSchemaChanges); enable == 0 { return errors.New("async schema changes are disabled") } return nil }, AsyncExecQuickly: true, BackfillChunkSize: chunkSize, }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() // Create a k-v table. if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT); `); err != nil { t.Fatal(err) } // Add some data const maxValue = chunkSize + 1 if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil { t.Fatal(err) } // Create a column that is not NULL. This schema change doesn't return an // error only because we've turned off the synchronous execution path; it // will eventually fail when run by the asynchronous path. if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD a INT NOT NULL, ADD c INT`); err != nil { t.Fatal(err) } // Add an index over a column that will be purged. This index will // eventually not get added. if _, err := sqlDB.Exec(`CREATE UNIQUE INDEX idx_a ON t.test (a)`); err != nil { t.Fatal(err) } // The purge of column 'a' doesn't influence these schema changes. // Drop column 'v' moves along just fine. The constraint 'foo' will not be // enforced because c is not added. if _, err := sqlDB.Exec( `ALTER TABLE t.test DROP v, ADD CONSTRAINT foo UNIQUE (c)`, ); err != nil { t.Fatal(err) } // Add unique column 'b' moves along creating column b and the index on // it. if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD b INT UNIQUE`); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") if e := 7; e != len(tableDesc.Mutations) { t.Fatalf("e = %d, v = %d", e, len(tableDesc.Mutations)) } // Enable async schema change processing. atomic.StoreUint32(&enableAsyncSchemaChanges, 1) // Wait until all the mutations have been processed. var rows *gosql.Rows expectedCols := []string{"k", "b"} testutils.SucceedsSoon(t, func() error { // Read table descriptor. tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") if len(tableDesc.Mutations) > 0 { return errors.Errorf("%d mutations remaining", len(tableDesc.Mutations)) } // Verify that t.test has the expected data. Read the table data while // ensuring that the correct table lease is in use. var err error rows, err = sqlDB.Query(`SELECT * from t.test`) if err != nil { t.Fatal(err) } cols, err := rows.Columns() if err != nil { t.Fatal(err) } // Ensure that sql is using the correct table lease. if len(cols) != len(expectedCols) { return errors.Errorf("incorrect columns: %v, expected: %v", cols, expectedCols) } if cols[0] != expectedCols[0] || cols[1] != expectedCols[1] { t.Fatalf("incorrect columns: %v", cols) } return nil }) // rows contains the data; verify that it's the right data. vals := make([]interface{}, len(expectedCols)) for i := range vals { vals[i] = new(interface{}) } var count int64 for ; rows.Next(); count++ { if err := rows.Scan(vals...); err != nil { t.Errorf("row %d scan failed: %s", count, err) continue } for j, v := range vals { if j == 0 { if val := *v.(*interface{}); val != nil { switch k := val.(type) { case int64: if count != k { t.Errorf("k = %d, expected %d", k, count) } default: t.Errorf("error input of type %T", k) } } else { t.Error("received nil value for column 'k'") } } else { if val := *v.(*interface{}); val != nil { t.Error("received non NULL value for column 'b'") } } } } if err := rows.Err(); err != nil { t.Fatal(err) } if eCount := int64(maxValue + 1); eCount != count { t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } // Check that the index on b eventually goes live even though a schema // change in front of it in the queue got purged. rows, err := sqlDB.Query(`SELECT * from t.test@test_b_key`) if err != nil { t.Fatal(err) } count = 0 for ; rows.Next(); count++ { } if err := rows.Err(); err != nil { t.Fatal(err) } if eCount := int64(maxValue + 1); eCount != count { t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } // Check that the index on c gets purged. if _, err = sqlDB.Query(`SELECT * from t.test@foo`); err == nil { t.Fatal("SELECT over index 'foo' works") } // Check that the number of k-v pairs is accurate. tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := 2 * (maxValue + 1); len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } }
// Test schema change purge failure doesn't leave DB in a bad state. func TestSchemaChangePurgeFailure(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() const chunkSize = 200 // Disable the async schema changer. var enableAsyncSchemaChanges uint32 attempts := 0 // attempt 1: write the first chunk of the index. // attempt 2: write the second chunk and hit a unique constraint // violation; purge the schema change. // attempt 3: return an error while purging the schema change. expectedAttempts := 3 params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ RunBeforeBackfillChunk: func(sp roachpb.Span) error { attempts++ // Return a deadline exceeded error during the third attempt // which attempts to clean up the schema change. if attempts == expectedAttempts { return context.DeadlineExceeded } return nil }, AsyncExecNotification: func() error { if enable := atomic.LoadUint32(&enableAsyncSchemaChanges); enable == 0 { return errors.New("async schema changes are disabled") } return nil }, // Speed up evaluation of async schema changes so that it // processes a purged schema change quickly. AsyncExecQuickly: true, BackfillChunkSize: chunkSize, }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT); `); err != nil { t.Fatal(err) } // Bulk insert. const maxValue = chunkSize + 1 if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil { t.Fatal(err) } // Add a row with a duplicate value for v if _, err := sqlDB.Exec( `INSERT INTO t.test VALUES ($1, $2)`, maxValue+1, maxValue, ); err != nil { t.Fatal(err) } // A schema change that violates integrity constraints. if _, err := sqlDB.Exec( "CREATE UNIQUE INDEX foo ON t.test (v)", ); !testutils.IsError(err, "violates unique constraint") { t.Fatal(err) } // The deadline exceeded error in the schema change purge results in no // retry attempts of the purge. if attempts != expectedAttempts { t.Fatalf("%d retries, despite allowing only (schema change + reverse) = %d", attempts, expectedAttempts) } // The index doesn't exist if _, err := sqlDB.Query( `SELECT v from t.test@foo`, ); !testutils.IsError(err, "index .* not found") { t.Fatal(err) } // Read table descriptor. tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // There is still a mutation hanging off of it. if e := 1; len(tableDesc.Mutations) != e { t.Fatalf("the table has %d instead of %d mutations", len(tableDesc.Mutations), e) } // The mutation is for a DROP. if tableDesc.Mutations[0].Direction != sqlbase.DescriptorMutation_DROP { t.Fatalf("the table has mutation %v instead of a DROP", tableDesc.Mutations[0]) } // There is still some garbage index data that needs to be purged. All the // rows from k = 0 to k = maxValue have index values. The k = maxValue + 1 // row with the conflict doesn't contain an index value. numGarbageValues := chunkSize tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := 1*(maxValue+2) + numGarbageValues; len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } // Enable async schema change processing to ensure that it cleans up the // above garbage left behind. atomic.StoreUint32(&enableAsyncSchemaChanges, 1) testutils.SucceedsSoon(t, func() error { tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") if len(tableDesc.Mutations) > 0 { return errors.Errorf("%d mutations remaining", len(tableDesc.Mutations)) } return nil }) // No garbage left behind. numGarbageValues = 0 if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := 1*(maxValue+2) + numGarbageValues; len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } // A new attempt cleans up a chunk of data. if attempts != expectedAttempts+1 { t.Fatalf("%d chunk ops, despite allowing only (schema change + reverse) = %d", attempts, expectedAttempts) } }
func TestJoinReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") testCases := []struct { spec JoinReaderSpec input [][]parser.Datum expected string }{ { spec: JoinReaderSpec{ OutputColumns: []uint32{0, 1, 2}, }, input: [][]parser.Datum{ {aFn(2), bFn(2)}, {aFn(5), bFn(5)}, {aFn(10), bFn(10)}, {aFn(15), bFn(15)}, }, expected: "[[0 2 2] [0 5 5] [1 0 1] [1 5 6]]", }, { spec: JoinReaderSpec{ Filter: Expression{Expr: "$2 <= 5"}, // sum <= 5 OutputColumns: []uint32{3}, }, input: [][]parser.Datum{ {aFn(1), bFn(1)}, {aFn(25), bFn(25)}, {aFn(5), bFn(5)}, {aFn(21), bFn(21)}, {aFn(34), bFn(34)}, {aFn(13), bFn(13)}, {aFn(51), bFn(51)}, {aFn(50), bFn(50)}, }, expected: "[['one'] ['five'] ['two-one'] ['one-three'] ['five-zero']]", }, } for _, c := range testCases { js := c.spec js.Table = *td txn := client.NewTxn(context.Background(), *kvDB) flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txn: txn, } in := &RowBuffer{} for _, row := range c.input { encRow := make(sqlbase.EncDatumRow, len(row)) for i, d := range row { encRow[i].SetDatum(sqlbase.ColumnType_INT, d) } in.rows = append(in.rows, encRow) } out := &RowBuffer{} jr, err := newJoinReader(&flowCtx, &js, in, out) if err != nil { t.Fatal(err) } jr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !in.done { t.Fatal("joinReader stopped accepting rows") } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func TestServer(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() conn, err := s.RPCContext().GRPCDial(s.ServingAddr()) if err != nil { t.Fatal(err) } r := sqlutils.MakeSQLRunner(t, sqlDB) r.Exec(`CREATE DATABASE test`) r.Exec(`CREATE TABLE test.t (a INT PRIMARY KEY, b INT)`) r.Exec(`INSERT INTO test.t VALUES (1, 10), (2, 20), (3, 30)`) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") ts := TableReaderSpec{ Table: *td, IndexIdx: 0, Reverse: false, Spans: nil, Filter: Expression{Expr: "$0 != 2"}, // a != 2 OutputColumns: []uint32{0, 1}, // a } txn := client.NewTxn(context.Background(), *kvDB) req := &SetupFlowRequest{Txn: txn.Proto} req.Flow = FlowSpec{ Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &ts}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}}, }}, }}, } distSQLClient := NewDistSQLClient(conn) stream, err := distSQLClient.RunSimpleFlow(context.Background(), req) if err != nil { t.Fatal(err) } var decoder StreamDecoder var rows sqlbase.EncDatumRows for { msg, err := stream.Recv() if err != nil { if err == io.EOF { break } t.Fatal(err) } err = decoder.AddMessage(msg) if err != nil { t.Fatal(err) } rows = testGetDecodedRows(t, &decoder, rows) } if done, trailerErr := decoder.IsDone(); !done { t.Fatal("stream not done") } else if trailerErr != nil { t.Fatal("error in the stream trailer:", trailerErr) } str := rows.String() expected := "[[1 10] [3 30]]" if str != expected { t.Errorf("invalid results: %s, expected %s'", str, expected) } }
// Test aborting a schema change backfill transaction and check that the // backfill is completed correctly. The backfill transaction is aborted at a // time when it thinks it has processed all the rows of the table. Later, // before the transaction is retried, the table is populated with more rows // that a backfill chunk, requiring the backfill to forget that it is at the // end of its processing and needs to continue on to process two more chunks // of data. func TestAbortSchemaChangeBackfill(t *testing.T) { defer leaktest.AfterTest(t)() var backfillNotification, commandsDone chan struct{} var dontAbortBackfill uint32 params, _ := createTestServerParams() const maxValue = 100 backfillCount := int64(0) retriedBackfill := int64(0) var retriedSpan roachpb.Span // Disable asynchronous schema change execution to allow synchronous path // to trigger start of backfill notification. params.Knobs = base.TestingKnobs{ SQLExecutor: &csql.ExecutorTestingKnobs{ // Fix the priority to guarantee that a high priority transaction // pushes a lower priority one. FixTxnPriority: true, }, SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ RunBeforeBackfillChunk: func(sp roachpb.Span) error { switch atomic.LoadInt64(&backfillCount) { case 0: // Keep track of the span provided with the first backfill // attempt. retriedSpan = sp case 1: // Ensure that the second backfill attempt provides the // same span as the first. if sp.Equal(retriedSpan) { atomic.AddInt64(&retriedBackfill, 1) } } return nil }, RunAfterBackfillChunk: func() { atomic.AddInt64(&backfillCount, 1) if atomic.SwapUint32(&dontAbortBackfill, 1) == 1 { return } // Close channel to notify that the backfill has been // completed but hasn't yet committed. close(backfillNotification) // Receive signal that the commands that push the backfill // transaction have completed; The backfill will attempt // to commit and will abort. <-commandsDone }, AsyncExecNotification: asyncSchemaChangerDisabled, BackfillChunkSize: maxValue, }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT); `); err != nil { t.Fatal(err) } // Bulk insert enough rows to exceed the chunk size. inserts := make([]string, maxValue+1) for i := 0; i < maxValue+1; i++ { inserts[i] = fmt.Sprintf(`(%d, %d)`, i, i) } if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ` + strings.Join(inserts, ",")); err != nil { t.Fatal(err) } // The two drop cases (column and index) do not need to be tested here // because the INSERT down below will not insert an entry for a dropped // column or index, however, it's still nice to have them just in case // INSERT gets messed up. testCases := []struct { sql string // Each schema change adds/drops a schema element that affects the // number of keys representing a table row. expectedNumKeysPerRow int }{ {"ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')", 2}, {"ALTER TABLE t.test DROP x", 1}, {"CREATE UNIQUE INDEX foo ON t.test (v)", 2}, {"DROP INDEX t.test@foo", 1}, } for i, testCase := range testCases { t.Run(testCase.sql, func(t *testing.T) { // Delete two rows so that the table size is smaller than a backfill // chunk. The two values will be added later to make the table larger // than a backfill chunk after the schema change backfill is aborted. for i := 0; i < 2; i++ { if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, i); err != nil { t.Fatal(err) } } backfillNotification = make(chan struct{}) commandsDone = make(chan struct{}) atomic.StoreUint32(&dontAbortBackfill, 0) // Run the column schema change in a separate goroutine. var wg sync.WaitGroup wg.Add(1) go func() { // Start schema change that eventually runs a backfill. if _, err := sqlDB.Exec(testCase.sql); err != nil { t.Error(err) } wg.Done() }() // Wait until the schema change backfill has finished writing its // intents. <-backfillNotification // Delete a row that will push the backfill transaction. if _, err := sqlDB.Exec(` BEGIN TRANSACTION PRIORITY HIGH; DELETE FROM t.test WHERE k = 2; COMMIT; `); err != nil { t.Fatal(err) } // Add missing rows so that the table exceeds the size of a // backfill chunk. for i := 0; i < 3; i++ { if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, i, i); err != nil { t.Fatal(err) } } // Release backfill so that it can try to commit and in the // process discover that it was aborted. close(commandsDone) wg.Wait() // for schema change to complete // Backfill retry happened. if count, e := atomic.SwapInt64(&retriedBackfill, 0), int64(1); count != e { t.Fatalf("expected = %d, found = %d", e, count) } // 1 failed + 2 retried backfill chunks. expectNumBackfills := int64(3) if i == len(testCases)-1 { // The DROP INDEX case: The above INSERTs do not add any index // entries for the inserted rows, so the index remains smaller // than a backfill chunk and is dropped in a single retried // backfill chunk. expectNumBackfills = 2 } if count := atomic.SwapInt64(&backfillCount, 0); count != expectNumBackfills { t.Fatalf("expected = %d, found = %d", expectNumBackfills, count) } // Verify the number of keys left behind in the table to validate // schema change operations. tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := testCase.expectedNumKeysPerRow * (maxValue + 1); len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } }) } }
// Test that changing a descriptor's name updates the name cache. func TestNameCacheIsUpdated(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` CREATE DATABASE t; CREATE DATABASE t1; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // Rename. if _, err := db.Exec("ALTER TABLE t.test RENAME TO t.test2;"); err != nil { t.Fatal(err) } // Check that the cache has been updated. if leaseManager.tableNames.get(tableDesc.ParentID, "test", s.Clock()) != nil { t.Fatalf("old name still in cache") } lease := leaseManager.tableNames.get(tableDesc.ParentID, "test2", s.Clock()) if lease == nil { t.Fatalf("new name not found in cache") } if lease.ID != tableDesc.ID { t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.ID, tableDesc.ID) } if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } // Rename to a different database. if _, err := db.Exec("ALTER TABLE t.test2 RENAME TO t1.test2;"); err != nil { t.Fatal(err) } // Re-read the descriptor, to get the new ParentID. newTableDesc := sqlbase.GetTableDescriptor(kvDB, "t1", "test2") if tableDesc.ParentID == newTableDesc.ParentID { t.Fatalf("database didn't change") } // Check that the cache has been updated. if leaseManager.tableNames.get(tableDesc.ParentID, "test2", s.Clock()) != nil { t.Fatalf("old name still in cache") } lease = leaseManager.tableNames.get(newTableDesc.ParentID, "test2", s.Clock()) if lease == nil { t.Fatalf("new name not found in cache") } if lease.ID != tableDesc.ID { t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.ID, tableDesc.ID) } if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } }
func TestPurgeOldLeases(t *testing.T) { defer leaktest.AfterTest(t)() // We're going to block gossip so it doesn't come randomly and clear up the // leases we're artificially setting up. gossipSem := make(chan struct{}, 1) serverParams := base.TestServerArgs{ Knobs: base.TestingKnobs{ SQLLeaseManager: &LeaseManagerTestingKnobs{ GossipUpdateEvent: func(cfg config.SystemConfig) { gossipSem <- struct{}{} <-gossipSem }, }, }, } s, db, kvDB := serverutils.StartServer(t, serverParams) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) // Block gossip. gossipSem <- struct{}{} defer func() { // Unblock gossip. <-gossipSem }() if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") var leases []*LeaseState err := kvDB.Txn(context.TODO(), func(txn *client.Txn) error { for i := 0; i < 3; i++ { lease, err := leaseManager.acquireFreshestFromStore(txn, tableDesc.ID) if err != nil { t.Fatal(err) } leases = append(leases, lease) if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } ts := leaseManager.findTableState(tableDesc.ID, false) if numLeases := getNumLeases(ts); numLeases != 3 { t.Fatalf("found %d leases instead of 3", numLeases) } if err := ts.purgeOldLeases( kvDB, false, 1 /* minVersion */, leaseManager.LeaseStore); err != nil { t.Fatal(err) } if numLeases := getNumLeases(ts); numLeases != 1 { t.Fatalf("found %d leases instead of 1", numLeases) } ts.mu.Lock() correctLease := ts.active.data[0] == leases[2] ts.mu.Unlock() if !correctLease { t.Fatalf("wrong lease survived purge") } }
func TestTableReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } testCases := []struct { spec TableReaderSpec expected string }{ { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 OutputColumns: []uint32{0, 1}, }, expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]", }, { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, OutputColumns: []uint32{3}, // s HardLimit: 4, }, expected: "[['one'] ['two'] ['four'] ['one-zero']]", }, { spec: TableReaderSpec{ IndexIdx: 1, Reverse: true, Spans: []TableReaderSpan{makeIndexSpan(4, 6)}, Filter: Expression{Expr: "@1 < 3"}, // sum < 8 OutputColumns: []uint32{0, 1}, SoftLimit: 1, }, expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]", }, } for _, c := range testCases { ts := c.spec ts.Table = *td flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txnProto: &roachpb.Transaction{}, clientDB: kvDB, } out := &RowBuffer{} tr, err := newTableReader(&flowCtx, &ts, out) if err != nil { t.Fatal(err) } tr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func TestManualReplication(t *testing.T) { defer leaktest.AfterTest(t)() tc := StartTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ UseDatabase: "t", }, }) defer tc.Stopper().Stop() s0 := sqlutils.MakeSQLRunner(t, tc.Conns[0]) s1 := sqlutils.MakeSQLRunner(t, tc.Conns[1]) s2 := sqlutils.MakeSQLRunner(t, tc.Conns[2]) s0.Exec(`CREATE DATABASE t`) s0.Exec(`CREATE TABLE test (k INT PRIMARY KEY, v INT)`) s0.Exec(`INSERT INTO test VALUES (5, 1), (4, 2), (1, 2)`) if r := s1.Query(`SELECT * FROM test WHERE k = 5`); !r.Next() { t.Fatal("no rows") } s2.ExecRowsAffected(3, `DELETE FROM test`) // Split the table to a new range. kvDB := tc.Servers[0].DB() tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") tableStartKey := keys.MakeRowSentinelKey(keys.MakeTablePrefix(uint32(tableDesc.ID))) leftRangeDesc, tableRangeDesc, err := tc.SplitRange(tableStartKey) if err != nil { t.Fatal(err) } log.Infof(context.Background(), "After split got ranges: %+v and %+v.", leftRangeDesc, tableRangeDesc) if len(tableRangeDesc.Replicas) == 0 { t.Fatalf( "expected replica on node 1, got no replicas: %+v", tableRangeDesc.Replicas) } if tableRangeDesc.Replicas[0].NodeID != 1 { t.Fatalf( "expected replica on node 1, got replicas: %+v", tableRangeDesc.Replicas) } // Replicate the table's range to all the nodes. tableRangeDesc, err = tc.AddReplicas( tableRangeDesc.StartKey.AsRawKey(), tc.Target(1), tc.Target(2), ) if err != nil { t.Fatal(err) } if len(tableRangeDesc.Replicas) != 3 { t.Fatalf("expected 3 replicas, got %+v", tableRangeDesc.Replicas) } for i := 0; i < 3; i++ { if _, ok := tableRangeDesc.GetReplicaDescriptor( tc.Servers[i].GetFirstStoreID()); !ok { t.Fatalf("expected replica on store %d, got %+v", tc.Servers[i].GetFirstStoreID(), tableRangeDesc.Replicas) } } // Transfer the lease to node 1. leaseHolder, err := tc.FindRangeLeaseHolder( tableRangeDesc, &ReplicationTarget{ NodeID: tc.Servers[0].GetNode().Descriptor.NodeID, StoreID: tc.Servers[0].GetFirstStoreID(), }) if err != nil { t.Fatal(err) } if leaseHolder.StoreID != tc.Servers[0].GetFirstStoreID() { t.Fatalf("expected initial lease on server idx 0, but is on node: %+v", leaseHolder) } err = tc.TransferRangeLease(tableRangeDesc, tc.Target(1)) if err != nil { t.Fatal(err) } // Check that the lease holder has changed. We'll use the old lease holder as // the hint, since it's guaranteed that the old lease holder has applied the // new lease. leaseHolder, err = tc.FindRangeLeaseHolder( tableRangeDesc, &ReplicationTarget{ NodeID: tc.Servers[0].GetNode().Descriptor.NodeID, StoreID: tc.Servers[0].GetFirstStoreID(), }) if err != nil { t.Fatal(err) } if leaseHolder.StoreID != tc.Servers[1].GetFirstStoreID() { t.Fatalf("expected lease on server idx 1 (node: %d store: %d), but is on node: %+v", tc.Servers[1].GetNode().Descriptor.NodeID, tc.Servers[1].GetFirstStoreID(), leaseHolder) } }
// Test schema changes are retried and complete properly. This also checks // that a mutation checkpoint reduces the number of chunks operated on during // a retry. func TestSchemaChangeRetry(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() attempts := 0 seenSpan := roachpb.Span{} params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ RunBeforeBackfillChunk: func(sp roachpb.Span) error { attempts++ // Fail somewhere in the middle. if attempts == 3 { return context.DeadlineExceeded } if seenSpan.Key != nil { // Check that the keys are never reevaluated if seenSpan.Key.Compare(sp.Key) >= 0 { t.Errorf("reprocessing span %s, already seen span %s", sp, seenSpan) } if !seenSpan.EndKey.Equal(sp.EndKey) { t.Errorf("different EndKey: span %s, already seen span %s", sp, seenSpan) } } seenSpan = sp return nil }, // Disable asynchronous schema change execution to allow // synchronous path to run schema changes. AsyncExecNotification: asyncSchemaChangerDisabled, WriteCheckpointInterval: time.Nanosecond, }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT); `); err != nil { t.Fatal(err) } // Bulk insert. maxValue := 5000 if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil { t.Fatal(err) } // Add an index and check that it succeeds. if _, err := sqlDB.Exec("CREATE UNIQUE INDEX foo ON t.test (v)"); err != nil { t.Fatal(err) } // The schema change succeeded. Verify that the index foo over v is // consistent. rows, err := sqlDB.Query(`SELECT v from t.test@foo`) if err != nil { t.Fatal(err) } count := 0 for ; rows.Next(); count++ { var val int if err := rows.Scan(&val); err != nil { t.Errorf("row %d scan failed: %s", count, err) continue } if count != val { t.Errorf("e = %d, v = %d", count, val) } } if err := rows.Err(); err != nil { t.Fatal(err) } if eCount := maxValue + 1; eCount != count { t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() numKeysPerRow := 2 if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := numKeysPerRow * (maxValue + 1); len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } // Add a column and check that it works. attempts = 0 seenSpan = roachpb.Span{} if _, err := sqlDB.Exec("ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')"); err != nil { t.Fatal(err) } rows, err = sqlDB.Query(`SELECT x from t.test`) if err != nil { t.Fatal(err) } count = 0 for ; rows.Next(); count++ { var val float64 if err := rows.Scan(&val); err != nil { t.Errorf("row %d scan failed: %s", count, err) continue } if e := 1.4; e != val { t.Errorf("e = %f, v = %f", e, val) } } if err := rows.Err(); err != nil { t.Fatal(err) } if eCount := maxValue + 1; eCount != count { t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } numKeysPerRow++ if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := numKeysPerRow * (maxValue + 1); len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } // Delete a column and check that it works. attempts = 0 seenSpan = roachpb.Span{} if _, err := sqlDB.Exec("ALTER TABLE t.test DROP x"); err != nil { t.Fatal(err) } numKeysPerRow-- if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := numKeysPerRow * (maxValue + 1); len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } }
func TestDropTable(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() ctx := context.TODO() numRows := 2*sql.TableTruncateChunkSize + 1 createKVTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv") gr, err := kvDB.Get(ctx, nameKey) if err != nil { t.Fatal(err) } if !gr.Exists() { t.Fatalf("Name entry %q does not exist", nameKey) } descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt())) // Add a zone config for the table. cfg := config.DefaultZoneConfig() buf, err := protoutil.Marshal(&cfg) if err != nil { t.Fatal(err) } if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil { t.Fatal(err) } zoneKey := sqlbase.MakeZoneKey(tableDesc.ID) if gr, err := kvDB.Get(ctx, zoneKey); err != nil { t.Fatal(err) } else if !gr.Exists() { t.Fatalf("zone config entry not found") } tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) checkKeyCount(t, kvDB, tablePrefix, 3*numRows) if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil { t.Fatal(err) } checkKeyCount(t, kvDB, tablePrefix, 0) // Test that deleted table cannot be used. This prevents regressions where // name -> descriptor ID caches might make this statement erronously work. if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) { t.Fatalf("different error than expected: %v", err) } if gr, err := kvDB.Get(ctx, descKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table descriptor still exists after the table is dropped") } if gr, err := kvDB.Get(ctx, nameKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table namekey still exists after the table is dropped") } if gr, err := kvDB.Get(ctx, zoneKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("zone config entry still exists after the table is dropped") } }
// TestSchemaChangeCommandsWithPendingMutations tests how schema change // commands behave when they are referencing schema elements that are // mutations that are not yet live. func TestSchemaChangeCommandsWithPendingMutations(t *testing.T) { defer leaktest.AfterTest(t)() // The descriptor changes made must have an immediate effect // so disable leases on tables. defer csql.TestDisableTableLeases()() // Disable external processing of mutations. params, _ := createTestServerParams() params.Knobs.SQLSchemaChanger = &csql.SchemaChangerTestingKnobs{ AsyncExecNotification: asyncSchemaChangerDisabled, } server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (a CHAR PRIMARY KEY, b CHAR, c CHAR, INDEX foo (c)); `); err != nil { t.Fatal(err) } // Read table descriptor tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") mt := mutationTest{ T: t, kvDB: kvDB, sqlDB: sqlDB, tableDesc: tableDesc, } // Test CREATE INDEX in the presence of mutations. // Add index DROP mutation "foo"" mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) if _, err := sqlDB.Exec(`CREATE INDEX foo ON t.test (c)`); !testutils.IsError(err, `index "foo" being dropped, try again later`) { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // "foo" is being added. mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) if _, err := sqlDB.Exec(`CREATE INDEX foo ON t.test (c)`); !testutils.IsError(err, `duplicate index name: "foo"`) { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // Add column DROP mutation "b" mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) if _, err := sqlDB.Exec(`CREATE INDEX bar ON t.test (b)`); !testutils.IsError(err, `index "bar" contains unknown column "b"`) { t.Fatal(err) } // Make "b" live. mt.makeMutationsActive() // "b" is being added. mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) // An index referencing a column mutation that is being added // is allowed to be added. if _, err := sqlDB.Exec(`CREATE INDEX bar ON t.test (b)`); err != nil { t.Fatal(err) } // Make "b" live. mt.makeMutationsActive() // Test DROP INDEX in the presence of mutations. // Add index DROP mutation "foo"" mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) // Noop. if _, err := sqlDB.Exec(`DROP INDEX t.test@foo`); err != nil { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // "foo" is being added. mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) if _, err := sqlDB.Exec(`DROP INDEX t.test@foo`); !testutils.IsError(err, `index "foo" in the middle of being added, try again later`) { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // Test ALTER TABLE ADD/DROP column in the presence of mutations. // Add column DROP mutation "b" mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD b CHAR`); !testutils.IsError(err, `column "b" being dropped, try again later`) { t.Fatal(err) } // Noop. if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP b`); err != nil { t.Fatal(err) } // Make "b" live. mt.makeMutationsActive() // "b" is being added. mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD b CHAR`); !testutils.IsError(err, `duplicate column name: "b"`) { t.Fatal(err) } if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP b`); !testutils.IsError(err, `column "b" in the middle of being added, try again later`) { t.Fatal(err) } // Make "b" live. mt.makeMutationsActive() // Test ALTER TABLE ADD CONSTRAINT in the presence of mutations. // Add index DROP mutation "foo"" mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT foo UNIQUE (c)`); !testutils.IsError(err, `index "foo" being dropped, try again later`) { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // "foo" is being added. mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT foo UNIQUE (c)`); !testutils.IsError(err, `duplicate index name: "foo"`) { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // Add column mutation "b" mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT bar UNIQUE (b)`); !testutils.IsError(err, `index "bar" contains unknown column "b"`) { t.Fatal(err) } // Make "b" live. mt.makeMutationsActive() // "b" is being added. mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) // Noop. if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT bar UNIQUE (b)`); err != nil { t.Fatal(err) } // Make "b" live. mt.makeMutationsActive() // Test DROP CONSTRAINT in the presence of mutations. // Add index mutation "foo"" mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) // Noop. if _, err := sqlDB.Exec(`DROP INDEX t.test@foo`); err != nil { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // "foo" is being added. mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) if _, err := sqlDB.Exec(`DROP INDEX t.test@foo`); !testutils.IsError(err, `index "foo" in the middle of being added, try again later`) { t.Fatal(err) } // Make "foo" live. mt.makeMutationsActive() // Rename column/index, while index is under mutation. // Add index mutation "foo"" mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{}) if _, err := sqlDB.Exec(`ALTER INDEX t.test@foo RENAME to ufo`); err != nil { mt.Fatal(err) } if _, err := sqlDB.Exec(`ALTER TABLE t.test RENAME COLUMN c TO d`); err != nil { mt.Fatal(err) } // The mutation in the table descriptor has changed and we would like // to update our copy to make it live. mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") // Make "ufo" live. mt.makeMutationsActive() // The index has been renamed to ufo, and the column to d. _ = mt.checkQueryResponse("SHOW INDEXES FROM t.test", [][]string{{"test", "primary", "true", "1", "a", "ASC", "false"}, {"test", "ufo", "false", "1", "d", "ASC", "false"}}) // Rename column under mutation works properly. // Add column mutation "b". mt.writeColumnMutation("b", sqlbase.DescriptorMutation{}) if _, err := sqlDB.Exec(`ALTER TABLE t.test RENAME COLUMN b TO e`); err != nil { mt.Fatal(err) } // The mutation in the table descriptor has changed and we would like // to update our copy to make it live. mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") // Make column "e" live. mt.makeMutationsActive() // Column b changed to d. _ = mt.checkQueryResponse("SHOW COLUMNS FROM t.test", [][]string{{"a", "STRING", "false", "NULL"}, {"d", "STRING", "true", "NULL"}, {"e", "STRING", "true", "NULL"}}) // Try to change column defaults while column is under mutation. mt.writeColumnMutation("e", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ALTER COLUMN e SET DEFAULT 'a'`); !testutils.IsError( err, `column "e" in the middle of being added`) { t.Fatal(err) } mt.makeMutationsActive() mt.writeColumnMutation("e", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP}) if _, err := sqlDB.Exec(`ALTER TABLE t.test ALTER COLUMN e SET DEFAULT 'a'`); !testutils.IsError( err, `column "e" in the middle of being dropped`) { t.Fatal(err) } mt.makeMutationsActive() }
// TestTableMutationQueue tests that schema elements when added are // assigned the correct start state and mutation id. func TestTableMutationQueue(t *testing.T) { defer leaktest.AfterTest(t)() // Disable synchronous and asynchronous schema change processing so that // the mutations get queued up. params, _ := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{ SyncFilter: func(tscc csql.TestingSchemaChangerCollection) { tscc.ClearSchemaChangers() }, AsyncExecNotification: asyncSchemaChangerDisabled, }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop() // Create a table with column i and an index on v and i. if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } // Run some schema changes. // This single command creates three columns and two indexes sharing the // same mutation ID. if _, err := sqlDB.Exec( `ALTER TABLE t.test ADD d INT UNIQUE, ADD e INT UNIQUE, ADD f INT`, ); err != nil { t.Fatal(err) } // This command creates two mutations sharing the same mutation ID. if _, err := sqlDB.Exec( `ALTER TABLE t.test ADD g INT, ADD CONSTRAINT idx_f UNIQUE (f)`, ); err != nil { t.Fatal(err) } // This command creates a single mutation. if _, err := sqlDB.Exec(`CREATE UNIQUE INDEX idx_g ON t.test (g)`); err != nil { t.Fatal(err) } // This command created a drop mutation. if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP v`); err != nil { t.Fatal(err) } // read table descriptor tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") expected := []struct { name string id sqlbase.MutationID state sqlbase.DescriptorMutation_State }{ {"d", 1, sqlbase.DescriptorMutation_DELETE_ONLY}, {"test_d_key", 1, sqlbase.DescriptorMutation_DELETE_ONLY}, {"e", 1, sqlbase.DescriptorMutation_DELETE_ONLY}, {"test_e_key", 1, sqlbase.DescriptorMutation_DELETE_ONLY}, {"f", 1, sqlbase.DescriptorMutation_DELETE_ONLY}, // Second schema change. {"g", 2, sqlbase.DescriptorMutation_DELETE_ONLY}, {"idx_f", 2, sqlbase.DescriptorMutation_DELETE_ONLY}, // Third. {"idx_g", 3, sqlbase.DescriptorMutation_DELETE_ONLY}, // Drop mutations start off in the WRITE_ONLY state. {"v", 4, sqlbase.DescriptorMutation_WRITE_ONLY}, } if len(tableDesc.Mutations) != len(expected) { t.Fatalf("%d mutations, instead of expected %d", len(tableDesc.Mutations), len(expected)) } for i, m := range tableDesc.Mutations { name := expected[i].name if col := m.GetColumn(); col != nil { if col.Name != name { t.Errorf("%d entry: name %s, expected %s", i, col.Name, name) } } if idx := m.GetIndex(); idx != nil { if idx.Name != name { t.Errorf("%d entry: name %s, expected %s", i, idx.Name, name) } } if id := expected[i].id; m.MutationID != id { t.Errorf("%d entry: id %d, expected %d", i, m.MutationID, id) } if state := expected[i].state; m.State != state { t.Errorf("%d entry: state %s, expected %s", i, m.State, state) } } }
// Test that there's no deadlock between AcquireByName and Release. // We used to have one due to lock inversion between the tableNameCache lock and // the leaseState lock, triggered when the same lease was Release()d after the // table had been deleted (which means it's removed from the tableNameCache) and // AcquireByName()d at the same time. func TestReleaseAcquireByNameDeadlock(t *testing.T) { defer leaktest.AfterTest(t)() removalTracker := NewLeaseRemovalTracker() testingKnobs := base.TestingKnobs{ SQLLeaseManager: &LeaseManagerTestingKnobs{ LeaseStoreTestingKnobs: LeaseStoreTestingKnobs{ LeaseReleasedEvent: removalTracker.LeaseRemovedNotification, }, }, } s, sqlDB, kvDB := serverutils.StartServer( t, base.TestServerArgs{Knobs: testingKnobs}) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // Populate the name cache. var lease *LeaseState if err := kvDB.Txn(context.TODO(), func(txn *client.Txn) error { var err error lease, err = leaseManager.AcquireByName(txn, tableDesc.ParentID, "test") return err }); err != nil { t.Fatal(err) } if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } // Pretend the table has been deleted, so that when we release leases on it, // they are removed from the tableNameCache too. tableState := leaseManager.findTableState(tableDesc.ID, true) tableState.deleted = true // Try to trigger the race repeatedly: race an AcquireByName against a // Release. // leaseChan acts as a barrier, synchornizing the two routines at every // iteration. leaseChan := make(chan *LeaseState) errChan := make(chan error) go func() { for lease := range leaseChan { // Move errors to the main goroutine. errChan <- leaseManager.Release(lease) } }() for i := 0; i < 50; i++ { var leaseByName *LeaseState if err := kvDB.Txn(context.TODO(), func(txn *client.Txn) error { var err error lease, err := leaseManager.AcquireByName(txn, tableDesc.ParentID, "test") if err != nil { t.Fatal(err) } // This test will need to wait until leases are removed from the store // before creating new leases because the jitter used in the leases' // expiration causes duplicate key errors when trying to create new // leases. This is not a problem in production, since leases are not // removed from the store until they expire, and the jitter is small // compared to their lifetime, but it is a problem in this test because // we churn through leases quickly. tracker := removalTracker.TrackRemoval(lease) // Start the race: signal the other guy to release, and we do another // acquire at the same time. leaseChan <- lease leaseByName, err = leaseManager.AcquireByName(txn, tableDesc.ParentID, "test") if err != nil { t.Fatal(err) } tracker2 := removalTracker.TrackRemoval(leaseByName) // See if there was an error releasing lease. err = <-errChan if err != nil { t.Fatal(err) } // Depending on how the race went, there are two cases - either the // AcquireByName ran first, and got the same lease as we already had, // or the Release ran first and so we got a new lease. if leaseByName == lease { if lease.Refcount() != 1 { t.Fatalf("expected refcount 1, got %d", lease.Refcount()) } if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } if err := tracker.WaitForRemoval(); err != nil { t.Fatal(err) } } else { if lease.Refcount() != 0 { t.Fatalf("expected refcount 0, got %d", lease.Refcount()) } if err := leaseManager.Release(leaseByName); err != nil { t.Fatal(err) } if err := tracker2.WaitForRemoval(); err != nil { t.Fatal(err) } } return nil }); err != nil { t.Fatal(err) } } close(leaseChan) }
// TestOperationsWithColumnAndIndexMutation tests the INSERT, UPDATE, UPSERT, // and DELETE operations while an index mutation refers to a column mutation. func TestOperationsWithColumnAndIndexMutation(t *testing.T) { defer leaktest.AfterTest(t)() // The descriptor changes made must have an immediate effect // so disable leases on tables. defer csql.TestDisableTableLeases()() // Disable external processing of mutations. params, _ := createTestServerParams() params.Knobs.SQLSchemaChanger = &csql.SchemaChangerTestingKnobs{ AsyncExecNotification: asyncSchemaChangerDisabled, } server, sqlDB, kvDB := serverutils.StartServer(t, params) defer server.Stopper().Stop() // Create a table with column i and an index on v and i. Fix the column // families so the key counts below don't change if the family heuristics // are updated. if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR, INDEX foo (i, v), FAMILY (k), FAMILY (v), FAMILY (i)); `); err != nil { t.Fatal(err) } // read table descriptor tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") mTest := mutationTest{ T: t, kvDB: kvDB, sqlDB: sqlDB, tableDesc: tableDesc, } starQuery := `SELECT * FROM t.test` indexQuery := `SELECT i FROM t.test@foo` for _, useUpsert := range []bool{true, false} { // Run the tests for both states for a column and an index. for _, state := range []sqlbase.DescriptorMutation_State{ sqlbase.DescriptorMutation_DELETE_ONLY, sqlbase.DescriptorMutation_WRITE_ONLY, } { for _, idxState := range []sqlbase.DescriptorMutation_State{ sqlbase.DescriptorMutation_DELETE_ONLY, sqlbase.DescriptorMutation_WRITE_ONLY, } { // Ignore the impossible column in DELETE_ONLY state while index // is in the WRITE_ONLY state. if state == sqlbase.DescriptorMutation_DELETE_ONLY && idxState == sqlbase.DescriptorMutation_WRITE_ONLY { continue } // Init table to start state. if _, err := sqlDB.Exec(`TRUNCATE TABLE t.test`); err != nil { t.Fatal(err) } initRows := [][]string{{"a", "z", "q"}, {"b", "y", "r"}} for _, row := range initRows { if useUpsert { if _, err := sqlDB.Exec(`UPSERT INTO t.test VALUES ($1, $2, $3)`, row[0], row[1], row[2]); err != nil { t.Fatal(err) } } else { if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ($1, $2, $3)`, row[0], row[1], row[2]); err != nil { t.Fatal(err) } } } // Check that the table only contains the initRows. _ = mTest.checkQueryResponse(starQuery, initRows) // Add index "foo" as a mutation. mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: idxState}) // Make column "i" a mutation. mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state}) // Insert a row into the table. if useUpsert { if _, err := sqlDB.Exec(`UPSERT INTO t.test VALUES ('c', 'x')`); err != nil { t.Error(err) } } else { if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ('c', 'x')`); err != nil { t.Error(err) } } // Make column "i" and index "foo" live. mTest.makeMutationsActive() // column "i" has no entry. _ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "z", "q"}, {"b", "y", "r"}, {"c", "x", "NULL"}}) if idxState == sqlbase.DescriptorMutation_DELETE_ONLY { // No index entry for row "c" _ = mTest.checkQueryResponse(indexQuery, [][]string{{"q"}, {"r"}}) } else { // Index entry for row "c" _ = mTest.checkQueryResponse(indexQuery, [][]string{{"NULL"}, {"q"}, {"r"}}) } // Add index "foo" as a mutation. mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: idxState}) // Make column "i" a mutation. mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state}) // Updating column "i" for a row fails. if useUpsert { if _, err := sqlDB.Exec(`UPSERT INTO t.test VALUES ('a', 'u', 'u')`); !testutils.IsError(err, `table t.test has 2 columns but 3 values were supplied`) { t.Error(err) } } else { if _, err := sqlDB.Exec(`UPDATE t.test SET (v, i) = ('u', 'u') WHERE k = 'a'`); !testutils.IsError(err, `column "i" does not exist`) { t.Error(err) } } // Update a row without specifying mutation column "i". if useUpsert { if _, err := sqlDB.Exec(`UPSERT INTO t.test VALUES ('a', 'u')`); err != nil { t.Error(err) } } else { if _, err := sqlDB.Exec(`UPDATE t.test SET v = 'u' WHERE k = 'a'`); err != nil { t.Error(err) } } // Make column "i" and index "foo" live. mTest.makeMutationsActive() // The update to column "v" is seen; there is no effect on column "i". _ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "u", "q"}, {"b", "y", "r"}, {"c", "x", "NULL"}}) if idxState == sqlbase.DescriptorMutation_DELETE_ONLY { // Index entry for row "a" is deleted. _ = mTest.checkQueryResponse(indexQuery, [][]string{{"r"}}) } else { // No change in index "foo" _ = mTest.checkQueryResponse(indexQuery, [][]string{{"NULL"}, {"q"}, {"r"}}) } // Add index "foo" as a mutation. mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: idxState}) // Make column "i" a mutation. mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state}) // Delete row "b". if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = 'b'`); err != nil { t.Error(err) } // Make column "i" and index "foo" live. mTest.makeMutationsActive() // Row "b" is deleted. numVals is the number of non-NULL values seen, // or the number of KV values belonging to all the rows in the table // excluding row "b" since it's deleted. numVals := mTest.checkQueryResponse(starQuery, [][]string{{"a", "u", "q"}, {"c", "x", "NULL"}}) // idxVals is the number of index values seen. var idxVals int if idxState == sqlbase.DescriptorMutation_DELETE_ONLY { // Index entry for row "b" is deleted. idxVals = mTest.checkQueryResponse(indexQuery, [][]string{}) } else { // Index entry for row "b" is deleted. idxVals doesn't account for // the NULL value seen. idxVals = mTest.checkQueryResponse(indexQuery, [][]string{{"NULL"}, {"q"}}) // Increment idxVals to account for the NULL value seen above. idxVals++ } // Check that there are no hidden KV values for row "b", and column // "i" for row "b" was deleted. Also check that the index values are // all accounted for. mTest.checkTableSize(numVals + idxVals) } } } }